text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_, PY2
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy._lib import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
if PY2:
instancemethod = types.MethodType
else:
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __getstate__(self):
return self._updated_ctor_param(), self._random_state
def __setstate__(self, state):
ctor_param, r = state
self.__init__(**ctor_param)
self._random_state = r
return self
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for distribution \"%s\": %s" % (self.name, repr(e)))
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters.")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
return (a < x) & (x < b)
def _rvs(self, *args):
# This method must handle self._size being a tuple, and it must
# properly broadcast *args and self._size. self._size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
# `size` should just be an argument to _rvs(), but for, um,
# historical reasons, it is made an attribute that is read
# by _rvs().
self._size = size
vals = self._rvs(*args)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
# if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
mu3 = mu3p - 3 * mu * mu2 - mu**3
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""
Return the support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : float
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
_a, _b = self._get_support(*args)
return _a * scale + loc, _b * scale + loc
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
_a, _b = self._get_support(*args)
if _a > -np.inf:
left = _a
if _b < np.inf:
right = _b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
return loc, scale, args
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
''' Return penalized negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape (if applicable), location, and scale
parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
mle_tuple : tuple of floats
MLEs for any shape parameters (if applicable), followed by those
for location and scale. For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, _a, _b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None, axis=0):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=axis)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=axis)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis: int, optional
The axis along which the entropy is calculated. Default is 0.
Returns
-------
S : float
The calculated entropy.
Examples
--------
>>> from scipy.stats import entropy
Bernoulli trial with different p.
The outcome of a fair coin is the most uncertain:
>>> entropy([1/2, 1/2], base=2)
1.0
The outcome of a biased coin is less uncertain:
>>> entropy([9/10, 1/10], base=2)
0.46899559358928117
Relative entropy:
>>> entropy([1/2, 1/2], qk=[9/10, 1/10])
0.5108256237659907
"""
pk = asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if qk.shape != pk.shape:
raise ValueError("qk and pk must have same shape.")
qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)
vec = rel_entr(pk, qk)
S = np.sum(vec, axis=axis)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
self._construct_docstrings(name, longname, extradoc)
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1)
place(output, cond2, _b)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
_a, _b = self._get_support(*args)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _b)
place(output, cond2, _a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or may not exist,
depending on the function, `func`. If it does exist, but the sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result, but may
also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._construct_docstrings(name, longname, extradoc)
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = self._random_state.random_sample(self._size)
if self._size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
jor-/scipy
|
scipy/stats/_distn_infrastructure.py
|
Python
|
bsd-3-clause
| 125,438
|
[
"Gaussian"
] |
016595588545d26605784d826bf8e635b3606009f9da366a65e6d38909294b12
|
#Standard dependencies
import os
import sys
import inspect
import time
import cPickle as pickle
import re
from copy import copy
from string import Template
#Non-standard dependencies
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as spline
import matplotlib as mpl
mpl.use('Agg')
import pylab as plt
import matplotlib.transforms as mtransforms
from matplotlib.mlab import griddata
import mpmath as mp
from ase.atoms import string2symbols
from ase.thermochemistry import IdealGasThermo, HarmonicThermo
from ase.structure import molecule
from catmap.model import ReactionModel
import data
__version__ = "0.2.27"
def load(setup_file):
rxm = ReactionModel(setup_file = setup_file)
return rxm
modified = []
class ReactionModelWrapper:
def __getattribute__(self,attr):
"Force use of custom getattr"
return self.__getattr__(self,attr)
def __getattr__(self,attr):
"Return the value of the reaction model instance if its there. Otherwise return the instances own value (or none if the instance does not have the attribute defined and the attribute is not private)"
if attr == '_rxm':
return object.__getattribute__(self,attr)
elif hasattr(self._rxm,attr):
return getattr(self._rxm,attr)
else:
if attr in self.__dict__:
val = object.__getattribute__(self,attr)
del self.__dict__[attr]
#this makes sure that the attr is read from _rxm
setattr(self._rxm,attr,val)
return val
elif attr.startswith('_'):
raise AttributeError()
else:
return None
def __setattr__(self,attr,val):
"Set attribute for the instance as well as the reaction_model instance"
accumulate = ['_required','_log_strings','_function_strings']
if attr == '_rxm':
self.__dict__[attr] = val
elif attr in accumulate:
self._rxm.__dict__[attr].update(val)
else:
setattr(self._rxm,attr,val)
|
starry99/catmap
|
catmap/__init__.py
|
Python
|
gpl-3.0
| 2,096
|
[
"ASE"
] |
95a051107eed23217bd39d6fa5d524b0f24549b1604c86494065d0c2c9af8144
|
""" A wrapper to get the needed preferences application wide """
# Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and
# University Hospital Center and University of Lausanne (UNIL-CHUV)
#
# Modified BSD License
# Standard library imports
from os.path import join
import pkg_resources
# Enthought library imports
from traits.etsconfig.api import ETSConfig
from traits.api import Instance
from traitsui.api import View, Group, Item
from apptools.preferences.api import (ScopedPreferences, IPreferences,
PreferencesHelper)
# Mayavi2 imports
from mayavi.preferences.preference_manager import PreferenceManager
# ConnectomeViewer imports
from cviewer.plugins.ui.cviewer_ui_preferences_helper import CViewerUIPreferencesHelper
# Logging import
import logging
logger = logging.getLogger('root.'+__name__)
# This module's package.
PKG = '.'.join(__name__.split('.')[:-1])
# The application ID where the preferences are stored.
ID = 'ch.connectome.viewer'
################################################################################
# `PreferenceManager` class
################################################################################
class CViewerPreferenceManager(PreferenceManager):
# add the cviewer ui preferences
cviewerui = Instance(PreferencesHelper)
# The preferences.
preferences = Instance(IPreferences)
######################################################################
# Traits UI view.
traits_view = View(Group(
Group(Item(name='root', style='custom'),
show_labels=False, label='Root',
show_border=True
),
Group(Item(name='mlab', style='custom'),
show_labels=False, label='Mlab',
show_border=True,
),
Group(Item(name='cviewerui', style='custom'),
show_labels=False, label='ConnectomeViewer',
show_border=True
)
),
buttons=['OK', 'Cancel'],
resizable=True
)
######################################################################
# `HasTraits` interface.
######################################################################
def __init__(self, **traits):
super(PreferenceManager, self).__init__(**traits)
if 'preferences' not in traits:
self._load_preferences()
def _preferences_default(self):
return ScopedPreferences()
def _cviewerui_default(self):
"""Trait initializer."""
return CViewerUIPreferencesHelper(preferences=self.preferences)
def _load_preferences(self):
"""Load the default preferences."""
# Save current application_home.
app_home = ETSConfig.get_application_home()
logger.debug('Application home: ' + str(app_home))
# Set it to where the cviewer preferences are temporarily.
path = join(ETSConfig.get_application_data(), ID)
ETSConfig.application_home = path
try:
for pkg in ('cviewer.plugins.ui',
'mayavi.preferences',
'tvtk.plugins.scene'):
pref = 'preferences.ini'
pref_file = pkg_resources.resource_stream(pkg, pref)
preferences = self.preferences
default = preferences.node('default/')
default.load(pref_file)
pref_file.close()
finally:
# Set back the application home.
ETSConfig.application_home = app_home
def _preferences_changed(self, preferences):
"""Setup the helpers if the preferences trait changes."""
for helper in (self.root, ):
helper.preferences = preferences
##########################################################
# A Global preference manager that all other modules can use.
preference_manager = CViewerPreferenceManager()
|
LTS5/connectomeviewer
|
cviewer/plugins/ui/preference_manager.py
|
Python
|
bsd-3-clause
| 4,244
|
[
"Mayavi"
] |
9961449fb45d1809238d96cffc96948956ceb97f03f8b5b95210b7d896aee200
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Directadmin API - Python implementation of Directadmin Web API
Copyright (C) 2009, Andrés Gattinoni
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=======================================================================
Proyect URL: http://code.google.com/p/python-directadmin/
For more information about Directadmin's Web API, visit:
http://www.directadmin.com/api.html
For more information about this module use PyDOC:
$ pydoc directadmin
Author: Andrés Gattinoni <andresgattinoni@gmail.com>
$Id$
"""
__author__ = "Andrés Gattinoni <andresgattinoni@gmail.com>"
__version__ = "$Revision$"
import urllib2
import urllib
import urlparse
import base64
_user_agent = "Python Directadmin"
class ApiError(Exception):
"""API Error
Generic exception for API error handling
"""
pass
class User(object):
"""User
Abstract representation of a Directadmin Panel User
"""
_properties = {'username': None,
'email': None,
'passwd': None,
'passwd2': None}
def __init__(self, username, email, password):
"""Constructor
Initializes the object with the basic information
for all kinds of users
Parameters:
username -- Admin's username 4-8 alphanumeric characters
email -- a valid email address
password -- Admin's password, +5 ascii characters
"""
self._properties['username'] = username
self._properties['email'] = email
self._properties['passwd'] = password
self._properties['passwd2'] = password
def __getitem__(self, key):
"""Returns a user property"""
return self._properties[key]
def __setitem__(self, key, value):
"""Sets a user property"""
self._properties[key] = value
def update(self, properties):
"""Updates the properties dictionary"""
return self._properties.update(properties)
def get_list(self):
"""Returns a list of tuples with all the
properties of the User, to be sent in API commands"""
l = []
for key, value in self._properties.items():
l.append((key, value))
return l
class AdminUser(User):
"""AdminUser
Represents a Directadmin's Admin
"""
pass
class ResellerUser(User):
"""ResellerUser
Represents a Directadmin's reseller user
Usage:
# Define a Reseller with a Reseller Package
reseller = ResellerUser('username',
'email@domain.com',
'password',
'domain.com',
'package_1',
'shared')
OR
# Define a Reseller with a custom configuration
reseller = ResellerUser('username',
'email@domain.com',
'password',
'domain.com',
None,
'shared')
reseller['bandwidth'] = 1024
reseller['ubandwidth'] = "OFF"
Available properties:
username -- Admin's username 4-8 alphanumeric characters
email -- a valid email address
password -- Admin's password, +5 ascii characters
domain -- A valid domain name in the form: domain.com
package -- One of the Reseller packages created by an admin
(default: None)
ip -- shared or assign. If shared, domain will use the
server's main ip. assign will use one of the
reseller's ips
(default: shared)
bandwidth -- Amount of bandwidth Reseller will be allowed to use.
Number, in Megabytes
ubandwidth -- ON or OFF. If ON, bandwidth is ignored and no limit
is set
quota -- Amount of disk space Reseller will be allowed to
use. Number, in Megabytes
uquota -- ON or OFF. If ON, quota is ignored and no limit is
set
vdomains -- Number of domains the reseller and his/her User's
are allowed to create
uvdomains -- ON or OFF. If ON, vdomains is ignored and no limit
is set
nsubdomains -- Number of subdomains the reseller and his/her User's
are allowed to create
unsubdomains -- ON or OFF. If ON, nsubdomains is ignored and no
limit is set
ips -- Number of ips that will be allocated to the Reseller
upon account during account
nemails -- Number of pop accounts the reseller and his/her
User's are allowed to create
unemails -- ON or OFF Unlimited option for nemails
nemailf -- Number of forwarders the reseller and his/her User's
are allowed to create
unemailf -- ON or OFF Unlimited option for nemailf
nemailml -- Number of mailing lists the reseller and his/her
User's are allowed to create
unemailml -- ON or OFF Unlimited option for nemailml
nemailr -- Number of autoresponders the reseller and his/her
User's are allowed to create
unemailr -- ON or OFF Unlimited option for nemailr
mysql -- Number of MySQL databases the reseller and his/her
User's are allowed to create
umysql -- ON or OFF Unlimited option for mysql
domainptr -- Number of domain pointers the reseller and his/her
User's are allowed to create
udomainptr -- ON or OFF Unlimited option for domainptr
ftp -- Number of ftp accounts the reseller and his/her
User's are allowed to create
uftp -- ON or OFF Unlimited option for ftp
aftp -- ON or OFF If ON, the reseller and his/her users will
be able to have anonymous ftp accounts.
php -- ON or OFF If ON, the reseller and his/her users will
have the ability to run php scripts.
cgi -- ON or OFF If ON, the reseller and his/her users will
have the ability to run cgi scripts in their
cgi-bins.
ssl -- ON or OFF If ON, the reseller and his/her users will
have the ability to access their websites through
secure https://.
ssh -- ON or OFF If ON, the reseller will be have an ssh
account.
userssh -- ON or OFF If ON, the reseller will be allowed to
create ssh accounts for his/her users.
dnscontrol -- ON or OFF If ON, the reseller will be able to modify
his/her dns records and to create users with or
without this option.
dns -- "OFF" or "TWO" or "THREE".
If OFF, no dns's will be created.
TWO: domain ip for ns1 and another ip for ns2.
THREE: domain has own ip. ns1 and ns2 have their own
ips
serverip -- ON or OFF If ON, the reseller will have the ability
to create users using the servers main ip.
"""
def __init__(self,
username,
email,
password,
domain,
package=None,
ip="shared"):
"""Constructor
Initializes the Reseller user
Parameters:
username -- Admin's username 4-8 alphanumeric characters
email -- a valid email address
password -- Admin's password, +5 ascii characters
domain -- A valid domain name in the form: domain.com
package -- One of the Reseller packages created by an admin
(default: None)
ip -- shared or assign. If shared, domain will use the
server's main ip. assign will use one of the
reseller's ips (default: shared)
"""
super(ResellerUser, self).__init__(username, email, password)
self['domain'] = domain
self['ip'] = ip
if package is not None:
self['package'] = package
else:
self.update(self._get_default_config())
def _get_default_config(self):
"""Get dafault config
Returns a dictionary with the default
configuration for a reseller user
"""
return {'bandwidth': 0,
'ubandwidth': "OFF",
'quota': 0,
'uquota': "OFF",
'vdomains': 0,
'uvdomains': "OFF",
'nsubdomains': 0,
'unsubdomains': "OFF",
'ips': 0,
'nemails': 0,
'unemails': "OFF",
'nemailf': 0,
'unemailf': "OFF",
'nemailml': 0,
'unemailml': "OFF",
'nemailr': 0,
'unemailr': "OFF",
'mysql': 0,
'umysql': "OFF",
'domainptr': 0,
'udomainptr': "OFF",
'ftp': 0,
'uftp': "OFF",
'aftp': "OFF",
'php': "ON",
'cgi': "ON",
'ssl': "OFF",
'ssh': "OFF",
'userssh': "OFF",
'dnscontrol': "OFF",
'dns': "OFF",
'serverip': "OFF"}
class EndUser(User):
"""EndUser
Represents a Directadmin's end user
Usage:
# Define an End User with a package
user = EndUser('username',
'email@domain.com',
'password',
'domain.com',
'package_1',
'65.65.65.65')
OR
# Define an End User with a custom configuration
user = EndUser('username',
'email@domain.com',
'password',
'domain.com',
None,
'65.65.65.65')
reseller['bandwidth'] = 1024
reseller['ubandwidth'] = "OFF"
Available properties:
username -- Admin's username 4-8 alphanumeric characters
email -- a valid email address
password -- Admin's password, +5 ascii characters
domain -- A valid domain name in the form: domain.com
package -- One of the User packages created by the Reseller
(default: None)
ip -- One of the ips which is available for user creation.
Only free or shared ips are allowed.
bandwidth -- Amount of bandwidth User will be allowed to use.
Number, in Megabytes
ubandwidth -- ON or OFF. If ON, bandwidth is ignored and no limit
is set
quota -- Amount of disk space User will be allowed to use.
Number, in Megabytes
uquota -- ON or OFF. If ON, quota is ignored and no limit is
set
vdomains -- Number of domains the User will be allowed to create
uvdomains -- ON or OFF. If ON, vdomains is ignored and no limit
is set
nsubdomains -- Number of subdomains the User will be allowed to
create
unsubdomains -- ON or OFF. If ON, nsubdomains is ignored and no
limit is set
nemails -- Number of pop accounts the User will be allowed to
create
unemails -- ON or OFF Unlimited option for nemails
nemailf -- Number of forwarders the User will be allowed to
create
unemailf -- ON or OFF Unlimited option for nemailf
nemailml -- Number of mailing lists the User will be allowed to
create
unemailml -- ON or OFF Unlimited option for nemailml
nemailr -- Number of autoresponders the User will be allowed to
create
unemailr -- ON or OFF Unlimited option for nemailr
mysql -- Number of MySQL databases the User will be allowed
to create
umysql -- ON or OFF Unlimited option for mysql
domainptr -- Number of domain pointers the User will be allowed
to create
udomainptr -- ON or OFF Unlimited option for domainptr
ftp -- Number of ftp accounts the User will be allowed to
create
uftp -- ON or OFF Unlimited option for ftp
aftp -- ON or OFF If ON, the User will be able to have
anonymous ftp accounts.
cgi -- ON or OFF If ON, the User will have the ability to
run cgi scripts in their cgi-bin.
php -- ON or OFF If ON, the User will have the ability to
run php scripts.
spam -- ON or OFF If ON, the User will have the ability to
run scan email with SpamAssassin.
cron -- ON or OFF If ON, the User will have the ability to
creat cronjobs.
catchall -- ON or OFF If ON, the User will have the ability to
enable and customize a catch-all email
(*@domain.com).
ssl -- ON or OFF If ON, the User will have the ability to
access their websites through secure https://.
ssh -- ON or OFF If ON, the User will have an ssh account.
sysinfo -- ON or OFF If ON, the User will have access to a page
that shows the system information.
dnscontrol -- ON or OFF If ON, the User will be able to modify
his/her dns records.
"""
def __init__(self,
username,
email,
password,
domain,
package=None,
ip=None):
"""Constructor
Initializes the Reseller user
Parameters:
username -- Admin's username 4-8 alphanumeric characters
email -- a valid email address
password -- Admin's password, +5 ascii characters
domain -- A valid domain name in the form: domain.com
package -- One of the User packages created by the Reseller
(default: None)
ip -- One of the ips which is available for user creation.
Only free or shared ips are allowed.
"""
super(EndUser, self).__init__(username, email, password)
self['domain'] = domain
self['ip'] = ip
if package is not None:
self['package'] = package
else:
self.update(self._get_default_config())
def _get_default_config(self):
"""Get dafault config
Returns a dictionary with the default
configuration for a reseller user
"""
return {'bandwidth': 0,
'ubandwidth': "OFF",
'quota': 0,
'uquota': "OFF",
'vdomains': 0,
'uvdomains': "OFF",
'nsubdomains': 0,
'unsubdomains': "OFF",
'nemails': 0,
'unemails': "OFF",
'nemailf': 0,
'unemailf': "OFF",
'nemailml': 0,
'unemailml': "OFF",
'nemailr': 0,
'unemailr': "OFF",
'mysql': 0,
'umysql': "OFF",
'domainptr': 0,
'udomainptr': "OFF",
'ftp': 0,
'uftp': "OFF",
'aftp': "OFF",
'cgi': "ON",
'php': "ON",
'spam': "ON",
'cron': "ON",
'catchall': "OFF",
'ssl': "OFF",
'ssh': "OFF",
'sysinfo': "OFF",
'dnscontrol': "OFF"}
class ApiConnector(object):
"""API Connector
Basic object to handle API connection.
Connect and send commands.
"""
_hostname = None
_port = 0
_username = None
_password = None
_https = False
def __init__(self,
username,
password,
hostname="localhost",
port=2222,
https=False):
"""Constructor
Parameters:
username = username to login on Directadmin
password = password to login on Directadmin
hostname = Directadmin's hostname (default: localhost)
port = port on which Directadmin listens (default: 2222)
https -- boolean, if True all transactions will
be performed using HTTPS (default: False)
"""
self._hostname = hostname
self._port = int(port)
self._username = username
self._password = password
self._https = bool(https)
def execute(self, cmd, parameters=None, get=None):
"""Execute command
Executes a command of the API
processes the result and returns it
Parameters:
cmd = command name
parameters = list of tuples with parameters (default: None)
get = list of tuples or dict with get parameters (default: None)
"""
url = self._get_url(cmd)
if get is not None:
url = '%s?%s' % (url, urllib.urlencode(get))
if parameters is not None:
parameters = urllib.urlencode(parameters)
request = urllib2.Request(url, parameters)
# Directadmin's API requires Basic HTTP Authentication
base_auth = base64.b64encode("%s:%s" %
(self._username, self._password))
request.add_header('Authorization', 'Basic %s' % base_auth)
# Identify our app with a custom User-Agent
request.add_header('User-Agent', _user_agent)
# Open the URL and handle the response
try:
return self._handle_response(urllib2.urlopen(request))
except urllib2.URLError, e:
raise ApiError("HTTP Error: %s" % e.reason)
def _get_url(self, cmd):
"""Get URL
Returns the URL for a specific command
"""
if self._https:
protocol = "https"
else:
protocol = "http"
return '%s://%s:%d/%s' % \
(protocol,
self._hostname,
self._port,
cmd)
def _handle_response(self, response):
"""Handle response
Takes the response string returned by
Directadmin server, checks for errors
and returns a python-friendly object
Parameters:
response -- response object
Returns a list or dictionary according
to the method
Raises ApiError on errors
"""
# Get response headers to check if there
# was any problem with login
info = response.info()
if info.getheader('X-DirectAdmin') == 'unauthorized':
raise ApiError("Invalid username or password")
# If we're getting HTML content we'll search for known
# error messages.
if info.getheader('Content-Type') == 'text/html':
errors = ['You cannot execute that command']
response = response.read()
for msg in errors:
if response.find(msg) > -1:
raise ApiError(msg)
# If we don't find any known error messages,
# we exit anyway, because we can't handle this
raise ApiError('Got unexpected HTML response from server')
# Parse the response query string
response = urlparse.parse_qs(response.read())
# Check for 'error' flag
if 'error' in response:
# If 'error' is 0, the operation was successful
if response['error'][0] == "0":
return True
# If not, check for details of the error
else:
if 'details' in response:
raise ApiError(response['details'][0])
if 'text' in response:
raise ApiError(response['text'][0])
else:
raise ApiError("Uknown error detected")
# If we got a 'list[]' keyword, we return only the list
elif 'list[]' in response:
return response['list[]']
# On any other case return the whole structure
else:
return response
class Api(object):
"""API
Directadmin API implementation
"""
_connector = None
def __init__(self,
username,
password,
hostname="localhost",
port=2222,
https=False):
"""Constructor
Initializes the connection for the API
Parameters:
username -- Directadmin username
password -- Directadmin password
hostname -- Directadmin server host (default: localhost)
port -- Directadmin server port (default: 2222)
https -- boolean, if True all transactions will
be performed using HTTPS (default: False)
"""
self._connector = ApiConnector(username,
password,
hostname,
port,
https)
def _execute_cmd(self, cmd, parameters=None, get=None):
"""Execute command
Executes a command using the Connection object
"""
return self._connector.execute(cmd, parameters, get)
def _yes_no(self, b):
"""Translates a boolean to "yes"/"no" """
if bool(b):
return "yes"
else:
return "no"
def create_admin(self, admin_user, notify=True):
"""Create admin
Implements command CMD_API_ACCOUNT_ADMIN
Creates a new admin user
Parameters:
admin_user -- AdminUser object with the information of the
admin to create
notify -- boolean: if true sends a notification email
Raises TypeError if admin_user is not an AdminUser object
"""
if not isinstance(admin_user, AdminUser):
raise TypeError("admin_user must be an AdminUser object")
parameters = [('action', 'create'),
('add', 'Submit'),
('notify', self._yes_no(notify))]
parameters.extend(admin_user.get_list())
return self._execute_cmd("CMD_API_ACCOUNT_ADMIN", parameters)
def create_reseller(self, reseller_user, notify=True):
"""Create reseller
Implements command CMD_API_ACCOUNT_RESELLER
Creates a reseller assigning him a reseller package
or with a custom configuration.
Parameters:
reseller_user -- ResellerUser object
notify -- boolean: if true sends a notification email
Raises TypeError if reseller_user is not an ResellerUser object
"""
if not isinstance(reseller_user, ResellerUser):
raise TypeError("reseller_user must be an ResellerUser object")
parameters = [('action', 'create'),
('add', 'Submit'),
('notify', self._yes_no(notify))]
parameters.extend(reseller_user.get_list())
return self._execute_cmd("CMD_API_ACCOUNT_RESELLER", parameters)
def create_user(self, end_user, notify=True):
"""Create user
Implements command CMD_API_ACCOUNT_USER
Creates an end user assigning him a package
or with a custom configuration.
Parameters:
end_user -- EndUser object
notify -- boolean: if true sends a notification email
Raises TypeError if end_user is not an EndUser object
"""
if not isinstance(end_user, EndUser):
raise TypeError("end_user must be an EndUser object")
parameters = [('action', 'create'),
('add', 'Submit'),
('notify', self._yes_no(notify))]
parameters.extend(end_user.get_list())
return self._execute_cmd("CMD_API_ACCOUNT_USER", parameters)
def show_ips(self, ip=None):
"""Show IPs
Implements command CMD_API_SHOW_RESELLER_IPS
Gets the list of IPs owned by the reseller or provides
information for a single IP if provided
Parameters:
ip -- IP address (optional)
"""
parameters = None
if ip is not None:
parameters = [('ip', ip)]
return self._execute_cmd("CMD_API_SHOW_RESELLER_IPS", parameters)
def delete_account(self, user):
"""Delete account
Implements command CMD_API_SELECT_USERS
Deletes an account of *ANY* type
Parameters:
user -- name of the Admin/Reseller/User to delete
it can also be a User object
"""
if isinstance(user, User):
username = user['username']
else:
username = user
parameters = [('confirmed', 'Confirm'),
('delete', 'yes'),
('select0', username)]
return self._execute_cmd("CMD_API_SELECT_USERS", parameters)
def _handle_suspensions(self, users, suspend):
"""Handle suspension
Internal method to handle suspensions/unsuspensions
of one or more users
Parameters:
users -- list of users to apply the suspension/unsuspension
the list can contain either usernames or User objects
suspend -- boolean (suspend/unsuspend)
"""
# Init params
parameters = []
# Define if we're suspending or unsuspending
if suspend:
parameters.append(('dosuspend', 'yes'))
else:
parameters.append(('dounsuspend', 'yes'))
# Add all the users to the list
for n, user in enumerate(users):
if isinstance(user, User):
username = user['username']
else:
username = user
parameters.append(('select%d' % n, username))
# Do the magic
return self._execute_cmd("CMD_API_SELECT_USERS", parameters)
def suspend_account(self, user):
"""Suspend account
Implements command CMD_API_SELECT_USERS
Suspends an account of *ANY* type
Parameters:
user -- name of the Admin/Reseller/User to suspend
it can also be a User object
"""
return self._handle_suspensions([user], True)
def suspend_accounts(self, users):
"""Suspend accounts
Implements command CMD_API_SELECT_USERS
Suspends a list of accounts of *ANY* type
Parameters:
users -- list of names or User objects of the
Admins/Resellers/Users to suspend
"""
return self._handle_suspensions(users, True)
def unsuspend_account(self, user):
"""Unsuspend account
Implements command CMD_API_SELECT_USERS
Unsuspends an account of *ANY* type
Parameters:
user -- name of the Admin/Reseller/User to unsuspend
it can also be a User object
"""
return self._handle_suspensions([user], False)
def unsuspend_accounts(self, users):
"""Unsuspend accounts
Implements command CMD_API_SELECT_USERS
Unsuspends a list of accounts of *ANY* type
Parameters:
users -- list of names or User objects of the
Admins/Resellers/Users to suspend
"""
return self._handle_suspensions(users, False)
def save_user_email(self, email, domain):
"""Save user email
Implements command CMD_API_CHANGE_INFO
Updates the email address for the logged user.
This does not affect the email address for the
ticketing/messaging system.
Parameteres:
email -- a valid email address
domain -- any of the user's domains
"""
parameters = [('evalue', email),
('domain', domain),
('email', 'Save')]
return self._execute_cmd("CMD_API_CHANGE_INFO", parameters)
def list_all_users(self):
"""List All Users
Implements command CMD_API_SHOW_ALL_USERS
Returns a list of all the users on the server
Method info: http://www.directadmin.com/api.html#showallusers
"""
return self._execute_cmd("CMD_API_SHOW_ALL_USERS")
def list_users(self, reseller=None):
"""List Users
Implements command CMD_API_SHOW_USERS
Returns the list of users corresponding to the reseller logged in.
If a reseller username is provided, it will return the users for it.
Method info: http://www.directadmin.com/api.html#showusers
"""
parameters = None
if reseller is not None:
parameters = [('reseller', reseller)]
return self._execute_cmd("CMD_API_SHOW_USERS", parameters)
def list_resellers(self):
"""List Resellers
Implements command CMD_API_SHOW_RESELLERS
Returns the list of resellers on the server
Method info: http://www.directadmin.com/api.html#showresellers
"""
return self._execute_cmd("CMD_API_SHOW_RESELLERS")
def list_admins(self):
"""List Admins
Implements command CMD_API_SHOW_ADMINS
Returns the list of all the admins on the server
Method info: http://www.directadmin.com/api.html#showradmins
"""
return self._execute_cmd("CMD_API_SHOW_ADMINS")
def get_server_stats(self):
"""Get Server Statistics
Implements command CMD_API_ADMIN_STATS
Returns a dictionary with information of the server.
Note that disk info is also returned as a dictionary
with the following keys:
- 'filesystem'
- 'blocks'
- 'used'
- 'available'
- 'usedpercent'
- 'mounted'
Method info: http://www.directadmin.com/api.html#info
"""
# Execute command
stats = self._execute_cmd("CMD_API_ADMIN_STATS")
# Split disk info
options = ['filesystem',
'blocks',
'used',
'available',
'usedpercent',
'mounted']
for key in stats.keys():
if key.startswith('disk'):
items = stats[key][0].split(':')
stats[key][0] = {}
for option in options:
stats[key][0][option] = items.pop(0)
return stats
def get_user_usage(self, user):
"""Get User Usage
Implements command CMD_API_SHOW_USER_USAGE
Returns a dictionary with the usage information for a user
Method info: http://www.directadmin.com/api.html#info
"""
return self._execute_cmd("CMD_API_SHOW_USER_USAGE",
get=[('user', user)])
def get_user_limits(self, user):
"""Get User Limits
Implements command CMD_API_SHOW_USER_CONFIG
Returns a dictionary with the user's upper limits
and settings that defines their account
Method info: http://www.directadmin.com/api.html#info
"""
return self._execute_cmd("CMD_API_SHOW_USER_CONFIG",
get=[('user', user)])
def get_user_domains(self, user):
"""Get User Domains
Implements command CMD_API_SHOW_USER_DOMAINS
Returns a list of domains owned by the user
Method info: http://www.directadmin.com/api.html#info
"""
return self._execute_cmd("CMD_API_SHOW_USER_DOMAINS",
get=[('user', user)])
def list_reseller_packages(self):
"""List Reseller Packages
Implements command CMD_API_PACKAGES_RESELLER
Returns the list of all available reseller packages
Method info: http://www.directadmin.com/api.html#package
"""
return self._execute_cmd("CMD_API_PACKAGES_RESELLER")
def get_reseller_package(self, package):
"""Get Reseller Package
Implements command CMD_API_PACKAGES_RESELLER
Returns the information of a reseller package
Method info: http://www.directadmin.com/api.html#package
"""
return self._execute_cmd("CMD_API_PACKAGES_RESELLER",
[('package', package)])
def list_user_packages(self):
"""List User Packages
Implements command CMD_API_PACKAGES_USER
Returns the list of all available user packages
Method info: http://www.directadmin.com/api.html#package
"""
return self._execute_cmd("CMD_API_PACKAGES_USER")
def get_user_package(self, package):
"""Get User Package
Implements command CMD_API_PACKAGES_USER
Returns the information of a user package
Method info: http://www.directadmin.com/api.html#package
"""
return self._execute_cmd("CMD_API_PACKAGES_USER",
[('package', package)])
def list_domains(self):
"""List domains
Implements command CMD_API_SHOW_DOMAINS
Returns a list of all the logged user's domains
Method info: http://www.directadmin.com/api.html#user_apis
"""
return self._execute_cmd("CMD_API_SHOW_DOMAINS")
def list_subdomains(self, domain):
"""List subdomains
Implements command CMD_API_SUBDOMAINS
Returns a list of all the logged user's subdomains
Method info: http://www.directadmin.com/api.html#user_apis
Parameters:
domain -- the domain to be shown
"""
return self._execute_cmd("CMD_API_SUBDOMAINS",
[('domain', domain)])
def create_subdomain(self, domain, subdomain):
"""Create subdomain
Implements command CMD_API_SUBDOMAINS
Creates a new subdomain
Method info: http://www.directadmin.com/api.html#user_apis
Parameters:
domain -- main domain
subdomain -- subdomain to be created
"""
parameters = [('action', 'create'),
('domain', domain),
('subdomain', subdomain)]
return self._execute_cmd("CMD_API_SUBDOMAINS", parameters)
def delete_subdomain(self, domain, subdomain, remove_contents=False):
"""Delete subdomain
Implements command CMD_API_SUBDOMAINS
Deletes a subdomain.
Method info: http://www.directadmin.com/api.html#user_apis
Parameters:
domain -- main domain
subdomain -- subdomain to delete
remove_contents -- boolean, if True the directory
and its contents will be removed
Default: False
"""
parameters = [('action', 'delete'),
('domain', domain),
('select0', subdomain),
('contents', self._yes_no(remove_contents))]
return self._execute_cmd("CMD_API_SUBDOMAINS", parameters)
def list_databases(self):
"""List databases
Implements command CMD_API_DATABASES
Lists all the logged user's databases
Method info: http://www.directadmin.com/api.html#user_apis
"""
return self._execute_cmd("CMD_API_DATABASES")
def create_database(self, name, user, password):
"""Create database
Implements command CMD_API_DATABASES
Creates a new database for the logged user.
Method info: http://www.directadmin.com/api.html#user_apis
Parameters:
name -- database name (username_ will be prepended)
user -- database user (username_ will be prepended)
password -- username_user's password
"""
parameters = [('action', 'create'),
('name', name),
('user', user),
('passwd', password),
('passwd2', password)]
return self._execute_cmd("CMD_API_DATABASES", parameters)
def delete_databases(self, dbs):
"""Delete databases
Implements command CMD_API_DATABASES
Removes one or more databases.
Method info: http://www.directadmin.com/api.html#user_apis
Parameters:
dbs -- database name or list of databases names to delete
"""
parameters = [('action', 'delete')]
if isinstance(dbs, list):
for n, name in enumerate(dbs):
parameters.append(('select%d' % n, name))
else:
parameters.append(('selected0', dbs))
return self._execute_cmd("CMD_API_DATABASES", parameters)
def update_pop_password(self, email, old_password, new_password):
"""Update POP password
Implements command CMD_CHANGE_EMAIL_PASSWORD
Updates the password of a POP account
Method info: http://www.directadmin.com/api.html#email
Parameters:
email -- email account to update its password
old_password -- current password of the account
new_password -- new password to define
"""
parameters = [('email', email),
('oldpassword', old_password),
('password1', new_password),
('password2', new_password),
('api', 'yes')]
return self._execute_cmd("CMD_API_CHANGE_EMAIL_PASSWORD",
parameters)
def list_pop_accounts(self, domain):
""" List POP accounts
Implements command CMD_API_POP
Lists all the POP accounts for a domain
Method info: http://www.directadmin.com/api.html#email
Parameters:
domain -- domain name of which the accounts will be listed
"""
parameters = [('action', 'list'),
('domain', domain)]
return self._execute_cmd("CMD_API_POP", parameters)
def create_pop_account(self, domain, user, password, quota=0):
"""Create POP account
Implements command CMD_API_POP
Creates a POP account on a domain
Method info: http://www.directadmin.com/api.html#email
Parameters:
domain -- domain on which the account will be created
user -- email username (what comes before the @)
password -- account password
quota -- quota in MB, zero is unlimited (default: 0)
"""
parameters = [('action', 'create'),
('domain', domain),
('user', user),
('passwd', password),
('quota', quota)]
return self._execute_cmd("CMD_API_POP", parameters)
def delete_pop_account(self, domain, user):
"""Delete POP account
Implements command CMD_API_POP
Deletes a POP account from a domain
Method info: http://www.directadmin.com/api.html#email
Parameters:
domain -- domain from which the account will be removed
user -- email username (what comes before the @)
"""
parameters = [('action', 'delete'),
('domain', domain),
('user', user)]
return self._execute_cmd("CMD_API_POP", parameters)
def check_pop_password(self, email, password):
"""Check POP password
Implements command CMD_API_EMAIL_AUTH
Checks the password of a POP account
Method info: http://www.directadmin.com/features.php?id=588
Parameters:
email -- email account to check its password
password -- current password of the account
"""
parameters = [('email', email),
('passwd', password)]
return self._execute_cmd("CMD_API_EMAIL_AUTH", parameters)
def get_pop_vacation(self, domain, user):
"""Get vacation details
Implements command CMD_API_EMAIL_VACATION_MODIFY
Gets vacation details of a POP account
Method info: http://www.directadmin.com/features.php?id=348
Parameters:
domain -- email domain (what comes after the @)
user -- email username (what comes before the @)
"""
parameters = [('domain', domain),
('user', user)]
return self._execute_cmd("CMD_API_EMAIL_VACATION_MODIFY", parameters)
def list_pop_vacations(self, domain):
"""List vacations
Implements command CMD_API_EMAIL_VACATION
Lists vacations of a domain
Method info: http://www.directadmin.com/features.php?id=348
Parameters:
domain -- email domain
"""
parameters = [('domain', domain)]
return self._execute_cmd("CMD_API_EMAIL_VACATION", parameters)
def create_pop_vacation(self, domain, user, text,
startyear, startmonth, startday, starttime,
endyear, endmonth, endday, endtime):
"""Create POP vacation
Implements command CMD_API_EMAIL_VACATION
Creates a vacation message for a given period for a POP account
on a domain
Method info: http://www.directadmin.com/features.php?id=348
Parameters:
domain -- email domain
user -- email username (what comes before the @)
text -- vacation message
startyear -- 4-digit year
startmonth -- 2-digit month (01-12)
startday -- 2-digit day (01-31)
starttime -- morning|afternoon|evening
endyear -- 4-digit year
endmonth -- 2-digit month (01-12)
endday -- 2-digit day (01-31)
endtime -- morning|afternoon|evening
"""
parameters = [('action', 'create'),
('domain', domain),
('user', user),
('text', text),
('startyear', startyear),
('startmonth', startmonth),
('startday', startday),
('starttime', starttime),
('endyear', endyear),
('endmonth', endmonth),
('endday', endday),
('endtime', endtime)]
return self._execute_cmd("CMD_API_EMAIL_VACATION", parameters)
def update_pop_vacation(self, domain, user, text,
startyear, startmonth, startday, starttime,
endyear, endmonth, endday, endtime):
"""Update POP vacation
Implements command CMD_API_EMAIL_VACATION
Updates a vacation message for a given period for a POP account
on a domain
Method info: http://www.directadmin.com/features.php?id=348
Parameters:
domain -- email domain
user -- email username (what comes before the @)
text -- vacation message
startyear -- 4-digit year
startmonth -- 2-digit month (01-12)
startday -- 2-digit day (01-31)
starttime -- morning|afternoon|evening
endyear -- 4-digit year
endmonth -- 2-digit month (01-12)
endday -- 2-digit day (01-31)
endtime -- morning|afternoon|evening
"""
parameters = [('action', 'modify'),
('domain', domain),
('user', user),
('text', text),
('startyear', startyear),
('startmonth', startmonth),
('startday', startday),
('starttime', starttime),
('endyear', endyear),
('endmonth', endmonth),
('endday', endday),
('endtime', endtime)]
return self._execute_cmd("CMD_API_EMAIL_VACATION", parameters)
def delete_pop_vacation(self, domain, user):
"""Delete POP vacation
Implements command CMD_API_EMAIL_VACATION
Delete the vacation message for a POP account on a domain
Method info: http://www.directadmin.com/features.php?id=348
Parameters:
domain -- email domain
user -- email username (what comes before the @)
"""
parameters = [('action', 'delete'),
('domain', domain),
('select0', user)]
return self._execute_cmd("CMD_API_EMAIL_VACATION", parameters)
def create_backup(self, domain=None, items=None):
"""Create User Level Backup
Implements command CMD_API_SITE_BACKUP
Schedules the creation of a user-level backup.
Further information: http://www.directadmin.com/features.php?id=512
Parameters:
domain -- one (any) of the user's domains
items -- list of items to backup
If domain is None, the first of the user's domains will be sent.
If items is None, all items will be included in the backup.
Available items:
* domain
* subdomain
* email
* forwarder
* autoresponder
* vacation
* list
* emailsettings
* ftp
* ftpsettings
* database
"""
available_items = ['domain',
'subdomain',
'email',
'forwarder',
'autoresponder',
'vacation',
'list',
'emailsettings',
'ftp',
'ftpsettings',
'database']
if domain is None:
domain = self.list_domains()[0]
if items is None:
items = available_items
parameters = [('action', 'backup'), ('domain', domain)]
for i, v in enumerate(items):
parameters.append(('select%d' % i, v))
return self._execute_cmd("CMD_API_SITE_BACKUP", parameters)
def list_email_list(self, domain):
"""List email lists
Implements command CMD_API_EMAIL_LIST
Returns a list of all the email lists for domain
Parameters:
domain -- the domain to be shown
"""
return self._execute_cmd("CMD_API_EMAIL_LIST",
[('domain', domain)])
def list_email_list_member(self, domain, name):
"""List members of email list
Implements command CMD_API_EMAIL_LIST
Returns a list of all the members of email list for domain
Parameters:
domain - the domain to be shown
name - the list name to be shown
"""
parameters = [('action', 'view'),
('domain', domain),
('name', name)]
return self._execute_cmd("CMD_API_EMAIL_LIST", parameters)
def list_autoresponder(self, domain):
"""List autoresponders info
Implements command CMD_API_EMAIL_AUTORESPONDER
Returns a list of all the autoresponders for domain
Method info: https://www.directadmin.com/features.php?id=348
Parameters:
domain -- the domain to be shown
"""
return self._execute_cmd("CMD_API_EMAIL_AUTORESPONDER",
[('domain', domain)])
def delete_autoresponder(self, domain, user):
"""Delete autoresponder
Implements command CMD_API_EMAIL_AUTORESPONDER
Returns action status
Method info: https://www.directadmin.com/features.php?id=348
Parameters:
domain -- the domain
user - username of autoresponder
"""
parameters = [('action', 'delete'),
('domain', domain),
('select0', user)]
return self._execute_cmd("CMD_API_EMAIL_AUTORESPONDER", parameters)
def create_autoresponder(self, domain, user, message, cc=False, email=None):
"""Delete autoresponder
Implements command CMD_API_EMAIL_AUTORESPONDER
Returns action status
Method info: https://www.directadmin.com/features.php?id=348
Parameters:
domain -- the domain
user - username of autoresponder
message - text of the message
cc - True or False (if you want to send a cc to email)
email - email to cc
"""
parameters = [('action', 'create'),
('domain', domain),
('user', user),
('text', message),
('cc', self._yes_no(cc)),
('email', email or '')]
return self._execute_cmd("CMD_API_EMAIL_AUTORESPONDER", parameters)
def get_autoresponder(self, domain, user):
"""Get autoresponder
Implements command CMD_API_EMAIL_AUTORESPONDER_MODIFY
Returns a settings of autoresponder
Method info: https://www.directadmin.com/features.php?id=348
Parameters:
domain -- autoresponder domain (what comes after the @)
user - username of autoresponder (what comes before the @)
"""
parameters = [('domain', domain),
('user', user)]
return self._execute_cmd("CMD_API_EMAIL_AUTORESPONDER_MODIFY", parameters)
|
el-barto/python-directadmin
|
directadmin/api.py
|
Python
|
gpl-3.0
| 51,302
|
[
"VisIt"
] |
bb028b6ab9b37afdf1da9806b5201824ee37ea4a7a4dfeb4174e8cfbd82baafc
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
import collections
import numbers
import string
from itertools import combinations_with_replacement, product
import os
import re
from typing import Tuple, List
from functools import total_ordering
from monty.serialization import loadfn
from monty.fractions import gcd, gcd_float
from monty.json import MSONable
from pymatgen.core.periodic_table import get_el_sp, Element, Specie, DummySpecie
from pymatgen.util.string import formula_double_format
from pymatgen.core.units import Mass
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 10, 2012"
@total_ordering
class Composition(collections.abc.Hashable, collections.abc.Mapping, MSONable):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Specie. Elements and Specie
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Specie and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Specie.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related functionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Composition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
# Tolerance in distinguishing different composition amounts.
# 1e-8 is fairly tight, but should cut out most floating point arithmetic
# errors.
amount_tolerance = 1e-8
# Special formula handling for peroxides and certain elements. This is so
# that formula output does not write LiO instead of Li2O2 for example.
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
oxi_prob = None # prior probability of oxidation used by oxi_state_guesses
def __init__(self, *args, strict=False, **kwargs): # allow_negative=False
r"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in dict() function.
1. A dict of either {Element/Specie: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Composition(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
strict: Only allow valid Elements and Species in the Composition.
allow_negative: Whether to allow negative compositions. This
argument must be popped from the **kwargs due to *args
ambiguity.
"""
self.allow_negative = kwargs.pop('allow_negative', False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]
elif len(args) == 1 and isinstance(args[0], str):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
elamt = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
elamt[get_el_sp(k)] = v
self._natoms += abs(v)
self._data = elamt
if strict and not self.valid:
raise ValueError("Composition is not valid, contains: {}"
.format(", ".join(map(str, self.elements))))
def __getitem__(self, item):
try:
sp = get_el_sp(item)
return self._data.get(sp, 0)
except ValueError as ex:
raise TypeError("Invalid key {}, {} for Composition\n"
"ValueError exception:\n{}".format(item,
type(item), ex))
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.keys().__iter__()
def __contains__(self, item):
try:
sp = get_el_sp(item)
return sp in self._data
except ValueError as ex:
raise TypeError("Invalid key {}, {} for Composition\n"
"ValueError exception:\n{}".format(item,
type(item), ex))
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
for el, v in self.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
if self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
CompositionError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between
Compositions with different elements.
"""
hashcode = 0
for el, amt in self.items():
if abs(amt) > Composition.amount_tolerance:
hashcode += el.Z
return hashcode
@property
def average_electroneg(self) -> float:
"""
:return: Average electronegativity of the composition.
"""
return sum((el.X * abs(amt) for el, amt in self.items())) / self.num_atoms
@property
def total_electrons(self) -> float:
"""
:return: Total number of electrons in composition.
"""
return sum((el.Z * abs(amt) for el, amt in self.items()))
def almost_equals(self, other, rtol=0.1, atol=1e-8):
"""
Returns true if compositions are equal within a tolerance.
Args:
other (Composition): Other composition to check
rtol (float): Relative tolerance
atol (float): Absolute tolerance
"""
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self) -> bool:
"""
True if composition is for an element.
"""
return len(self) == 1
def copy(self):
"""
:return: A copy of the composition.
"""
return Composition(self, allow_negative=self.allow_negative)
@property
def formula(self) -> str:
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self) -> str:
"""
Returns a formula string, with elements sorted by alphabetically
e.g., Fe4 Li4 O16 P4.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def iupac_formula(self) -> str:
"""
Returns a formula string, with elements sorted by the iupac
electronegativity ordering defined in Table VI of "Nomenclature of
Inorganic Chemistry (IUPAC Recommendations 2005)". This ordering
effectively follows the groups and rows of the periodic table, except
the Lanthanides, Actanides and hydrogen. Polyanions are still determined
based on the true electronegativity of the elements.
e.g. CH2(SO4)2
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(),
key=lambda s: get_el_sp(s).iupac_ordering)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self) -> 'Composition':
"""
Returns the composition replacing any species by the corresponding
element.
"""
return Composition(self.get_el_amt_dict(),
allow_negative=self.allow_negative)
@property
def fractional_composition(self) -> 'Composition':
"""
Returns the normalized composition which the number of species sum to
1.
Returns:
Normalized composition which the number of species sum to 1.
"""
return self / self._natoms
@property
def reduced_composition(self) -> 'Composition':
"""
Returns the reduced composition,i.e. amounts normalized by greatest
common denominator. e.g., Composition("FePO4") for
Composition("Fe4P4O16").
"""
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self) -> Tuple['Composition', float]:
"""
Calculates a reduced composition and factor.
Returns:
A normalized composition and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (Composition("LiFePO4"), 4).
"""
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self, iupac_ordering=False) -> Tuple[str, float]:
"""
Calculates a reduced formula and factor.
Args:
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (LiFePO4, 4).
"""
all_int = all(abs(x - round(x)) < Composition.amount_tolerance
for x in self.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = {k: int(round(v)) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(
d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(self, max_denominator=10000,
iupac_ordering=False):
"""
Calculates an integer formula and factor.
Args:
max_denominator (int): all amounts in the el:amt dict are
first converted to a Fraction with this maximum denominator
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125)
"""
el_amt = self.get_el_amt_dict()
g = gcd_float(list(el_amt.values()), 1 / max_denominator)
d = {k: round(v / g) for k, v in el_amt.items()}
(formula, factor) = reduce_formula(
d, iupac_ordering=iupac_ordering)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * g
@property
def reduced_formula(self) -> str:
"""
Returns a pretty normalized formula, i.e., LiFePO4 instead of
Li4Fe4P4O16.
"""
return self.get_reduced_formula_and_factor()[0]
@property
def hill_formula(self) -> str:
"""
:return: Hill formula. The Hill system (or Hill notation) is a system
of writing empirical chemical formulas, molecular chemical formulas and
components of a condensed formula such that the number of carbon atoms
in a molecule is indicated first, the number of hydrogen atoms next,
and then the number of all other chemical elements subsequently, in
alphabetical order of the chemical symbols. When the formula contains
no carbon, all the elements, including hydrogen, are listed
alphabetically.
"""
c = self.element_composition
elements = sorted([el.symbol for el in c.keys()])
if "C" in elements:
elements = ["C"] + [el for el in elements if el != "C"]
formula = ["%s%s" % (el, formula_double_format(c[el]) if c[el] != 1 else "")
for el in elements]
return " ".join(formula)
@property
def elements(self) -> List[Element]:
"""
Returns view of elements in Composition.
"""
return list(self.keys())
def __str__(self):
return " ".join([
"{}{}".format(k, formula_double_format(v, ignore_ones=False))
for k, v in self.as_dict().items()])
@property
def num_atoms(self):
"""
Total number of atoms in Composition. For negative amounts, sum
of absolute values
"""
return self._natoms
@property
def weight(self):
"""
Total molecular weight of Composition
"""
return Mass(sum([amount * el.atomic_mass for el, amount in self.items()]), "amu")
def get_atomic_fraction(self, el):
"""
Calculate atomic fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Atomic fraction for element el in Composition
"""
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el):
"""
Calculate weight fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Weight fraction for element el in Composition
"""
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def contains_element_type(self, category):
"""
Check if Composition contains any elements matching a given category.
Args:
category (str): one of "noble_gas", "transition_metal",
"post_transition_metal", "rare_earth_metal", "metal", "metalloid",
"alkali", "alkaline", "halogen", "chalcogen", "lanthanoid",
"actinoid", "quadrupolar", "s-block", "p-block", "d-block", "f-block"
Returns:
True if any elements in Composition match category, otherwise False
"""
allowed_categories = ("noble_gas", "transition_metal", "post_transition_metal",
"rare_earth_metal", "metal", "metalloid", "alkali",
"alkaline", "halogen", "chalcogen", "lanthanoid",
"actinoid", "quadrupolar", "s-block", "p-block",
"d-block", "f-block")
if category not in allowed_categories:
raise ValueError("Please pick a category from: {}".format(
", ".join(allowed_categories)))
if "block" in category:
return any([category[0] in el.block for el in self.elements])
return any([getattr(el, "is_{}".format(category)) for el in self.elements])
def _parse_formula(self, formula):
"""
Args:
formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3
Returns:
Composition with that formula.
Notes:
In the case of Metallofullerene formula (e.g. Y3N@C80),
the @ mark will be dropped and passed to parser.
"""
# for Metallofullerene like "Y3N@C80"
formula = formula.replace("@", "")
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)\s*([-*\.\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise CompositionError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)\s*([\.\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt)
for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self):
"""
An anonymized formula. Unique species are arranged in ordering of
increasing amounts and assigned ascending alphabets. Useful for
prototyping formulas. For example, all stoichiometric perovskites have
anonymized_formula ABC3.
"""
reduced = self.element_composition
if all(x == int(x) for x in self.values()):
reduced /= gcd(*(int(i) for i in self.values()))
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += ("{}{}".format(e, amt_str))
return anon
@property
def chemical_system(self) -> str:
"""
Get the chemical system of a Composition, for example "O-Si" for
SiO2. Chemical system is a string of a list of elements
sorted alphabetically and joined by dashes, by convention for use
in database keys.
"""
return "-".join(sorted([str(el) for el in self.elements]))
@property
def valid(self):
"""
Returns True if Composition contains valid elements or species and
False if the Composition contains any dummy species.
"""
return not any([isinstance(el, DummySpecie) for el in self.elements])
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d):
"""
Creates a composition from a dict generated by as_dict(). Strictly not
necessary given that the standard constructor already takes in such an
input, but this method preserves the standard pymatgen API of having
from_dict methods to reconstitute objects generated by as_dict(). Allows
for easier introspection.
Args:
d (dict): {symbol: amount} dict.
"""
return cls(d)
def get_el_amt_dict(self):
"""
Returns:
Dict with element symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self):
"""
Returns:
dict with species symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self):
"""
Returns:
Dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}
"""
c = Composition(self.reduced_formula)
return c.as_dict()
@property
def to_data_dict(self):
"""
Returns:
A dict with many keys and values relating to Composition/Formula,
including reduced_cell_composition, unit_cell_composition,
reduced_cell_formula, elements and nelements.
"""
return {"reduced_cell_composition": self.to_reduced_dict,
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": list(self.as_dict().keys()),
"nelements": len(self.as_dict().keys())}
def oxi_state_guesses(self, oxi_states_override=None, target_charge=0,
all_oxi_states=False, max_sites=None):
"""
Checks if the composition is charge-balanced and returns back all
charge-balanced oxidation state combinations. Composition must have
integer values. Note that more num_atoms in the composition gives
more degrees of freedom. e.g., if possible oxidation states of
element X are [2,4] and Y are [-3], then XY is not charge balanced
but X2Y2 is. Results are returned from most to least probable based
on ICSD statistics. Use max_sites to improve performance if needed.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
"""
return self._get_oxid_state_guesses(all_oxi_states, max_sites, oxi_states_override, target_charge)[0]
def add_charges_from_oxi_state_guesses(self,
oxi_states_override=None,
target_charge=0,
all_oxi_states=False,
max_sites=None):
"""
Assign oxidation states basedon guessed oxidation states.
See `oxi_state_guesses` for an explanation of how oxidation states are
guessed. This operation uses the set of oxidation states for each site
that were determined to be most likley from the oxidation state guessing
routine.
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
Composition, where the elements are assigned oxidation states based
on the results form guessing oxidation states. If no oxidation state
is possible, returns a Composition where all oxidation states are 0.
"""
_, oxidation_states = self._get_oxid_state_guesses(
all_oxi_states, max_sites, oxi_states_override, target_charge)
# Special case: No charged compound is possible
if not oxidation_states:
return Composition(dict((Specie(e, 0), f) for e, f in self.items()))
# Generate the species
species = []
for el, charges in oxidation_states[0].items():
species.extend([Specie(el, c) for c in charges])
# Return the new object
return Composition(collections.Counter(species))
def remove_charges(self):
"""
Removes the charges from any species in a Composition object.
Returns:
Composition object without charge decoration, for example
{"Fe3+": 2.0, "O2-":3.0} becomes {"Fe": 2.0, "O":3.0}
"""
d = collections.Counter()
for e, f in self.items():
e = re.findall(r"[A-z]+", str(e))[0]
d[str(e)] += f
return Composition(d)
def _get_oxid_state_guesses(self, all_oxi_states, max_sites,
oxi_states_override, target_charge):
"""
Utility operation for guessing oxidation states.
See `oxi_state_guesses` for full details. This operation does the
calculation of the most likely oxidation states
Args:
oxi_states_override (dict): dict of str->list to override an
element's common oxidation states, e.g. {"V": [2,3,4,5]}
target_charge (int): the desired total charge on the structure.
Default is 0 signifying charge balance.
all_oxi_states (bool): if True, an element defaults to
all oxidation states in pymatgen Element.icsd_oxidation_states.
Otherwise, default is Element.common_oxidation_states. Note
that the full oxidation state list is *very* inclusive and
can produce nonsensical results.
max_sites (int): if possible, will reduce Compositions to at most
this many sites to speed up oxidation state guesses. If the
composition cannot be reduced to this many sites a ValueError
will be raised. Set to -1 to just reduce fully. If set to a
number less than -1, the formula will be fully reduced but a
ValueError will be thrown if the number of atoms in the reduced
formula is greater than abs(max_sites).
Returns:
A list of dicts - each dict reports an element symbol and average
oxidation state across all sites in that composition. If the
composition is not charge balanced, an empty list is returned.
A list of dicts - each dict maps the element symbol to a list of
oxidation states for each site of that element. For example, Fe3O4 could
return a list of [2,2,2,3,3,3] for the oxidation states of If the composition
is
"""
comp = self.copy()
# reduce Composition if necessary
if max_sites and max_sites < 0:
comp = self.reduced_composition
if max_sites < -1 and comp.num_atoms > abs(max_sites):
raise ValueError(
"Composition {} cannot accommodate max_sites "
"setting!".format(comp))
elif max_sites and comp.num_atoms > max_sites:
reduced_comp, reduced_factor = self. \
get_reduced_composition_and_factor()
if reduced_factor > 1:
reduced_comp *= max(1, int(max_sites / reduced_comp.num_atoms))
comp = reduced_comp # as close to max_sites as possible
if comp.num_atoms > max_sites:
raise ValueError("Composition {} cannot accommodate max_sites "
"setting!".format(comp))
# Load prior probabilities of oxidation states, used to rank solutions
if not Composition.oxi_prob:
module_dir = os.path.join(os.path.
dirname(os.path.abspath(__file__)))
all_data = loadfn(os.path.join(module_dir, "..",
"analysis", "icsd_bv.yaml"))
Composition.oxi_prob = {Specie.from_string(sp): data
for sp, data in
all_data["occurrence"].items()}
oxi_states_override = oxi_states_override or {}
# assert: Composition only has integer amounts
if not all(amt == int(amt) for amt in comp.values()):
raise ValueError("Charge balance analysis requires integer "
"values in Composition!")
# for each element, determine all possible sum of oxidations
# (taking into account nsites for that particular element)
el_amt = comp.get_el_amt_dict()
els = el_amt.keys()
el_sums = [] # matrix: dim1= el_idx, dim2=possible sums
el_sum_scores = collections.defaultdict(set) # dict of el_idx, sum -> score
el_best_oxid_combo = {} # dict of el_idx, sum -> oxid combo with best score
for idx, el in enumerate(els):
el_sum_scores[idx] = {}
el_best_oxid_combo[idx] = {}
el_sums.append([])
if oxi_states_override.get(el):
oxids = oxi_states_override[el]
elif all_oxi_states:
oxids = Element(el).oxidation_states
else:
oxids = Element(el).icsd_oxidation_states or \
Element(el).oxidation_states
# get all possible combinations of oxidation states
# and sum each combination
for oxid_combo in combinations_with_replacement(oxids,
int(el_amt[el])):
# List this sum as a possible option
oxid_sum = sum(oxid_combo)
if oxid_sum not in el_sums[idx]:
el_sums[idx].append(oxid_sum)
# Determine how probable is this combo?
score = sum([Composition.oxi_prob.get(Specie(el, o), 0) for
o in oxid_combo])
# If it is the most probable combo for a certain sum,
# store the combination
if oxid_sum not in el_sum_scores[idx] or score > el_sum_scores[idx].get(oxid_sum, 0):
el_sum_scores[idx][oxid_sum] = score
el_best_oxid_combo[idx][oxid_sum] = oxid_combo
# Determine which combination of oxidation states for each element
# is the most probable
all_sols = [] # will contain all solutions
all_oxid_combo = [] # will contain the best combination of oxidation states for each site
all_scores = [] # will contain a score for each solution
for x in product(*el_sums):
# each x is a trial of one possible oxidation sum for each element
if sum(x) == target_charge: # charge balance condition
el_sum_sol = dict(zip(els, x)) # element->oxid_sum
# normalize oxid_sum by amount to get avg oxid state
sol = {el: v / el_amt[el] for el, v in el_sum_sol.items()}
# add the solution to the list of solutions
all_sols.append(sol)
# determine the score for this solution
score = 0
for idx, v in enumerate(x):
score += el_sum_scores[idx][v]
all_scores.append(score)
# collect the combination of oxidation states for each site
all_oxid_combo.append(
dict((e, el_best_oxid_combo[idx][v]) for idx, (e, v) in enumerate(zip(els, x))))
# sort the solutions by highest to lowest score
if all_scores:
all_sols, all_oxid_combo = zip(*[(y, x) for (z, y, x) in sorted(zip(all_scores, all_sols, all_oxid_combo),
key=lambda pair: pair[0],
reverse=True)])
return all_sols, all_oxid_combo
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula,
lock_if_strict=True):
"""
Takes in a formula where capitilization might not be correctly entered,
and suggests a ranked list of potential Composition matches.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations
lock_if_strict (bool): If true, a properly entered formula will
only return the one correct interpretation. For example,
"Co1" will only return "Co1" if true, but will return both
"Co1" and "C1 O1" if false.
Returns:
A ranked list of potential Composition matches
"""
# if we have an exact match and the user specifies lock_if_strict, just
# return the exact match!
if lock_if_strict:
# the strict composition parsing might throw an error, we can ignore
# it and just get on with fuzzy matching
try:
comp = Composition(fuzzy_formula)
return [comp]
except (CompositionError, ValueError):
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
# remove duplicates
all_matches = list(set(all_matches))
# sort matches by rank descending
all_matches = sorted(all_matches,
key=lambda match: match[1], reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict=None, m_points=0,
factor=1):
"""
A recursive helper method for formula parsing that helps in
interpreting and ranking indeterminate formulas.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations.
m_dict (dict): A symbol:amt dictionary from the previously parsed
formula.
m_points: Number of points gained from the previously parsed
formula.
factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4
as the fuzzy_formula with a coefficient of 2.
Returns:
A list of tuples, with the first element being a Composition and
the second element being the number of points awarded that
Composition intepretation.
"""
m_dict = m_dict or {}
def _parse_chomp_and_rank(m, f, m_dict, m_points):
"""
A helper method for formula parsing that helps in interpreting and
ranking indeterminate formulas
Author: Anubhav Jain
Args:
m: A regex match, with the first group being the element and
the second group being the amount
f: The formula part containing the match
m_dict: A symbol:amt dictionary from the previously parsed
formula
m_points: Number of points gained from the previously parsed
formula
Returns:
A tuple of (f, m_dict, points) where m_dict now contains data
from the match and the match has been removed (chomped) from
the formula f. The "goodness" of the match determines the
number of points returned for chomping. Returns
(None, None, None) if no element could be found...
"""
points = 0
# Points awarded if the first element of the element is correctly
# specified as a capital
points_first_capital = 100
# Points awarded if the second letter of the element is correctly
# specified as lowercase
points_second_lowercase = 100
# get element and amount from regex match
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise CompositionError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
# convert the element string to proper [uppercase,lowercase] format
# and award points if it is already in that format
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
# if it's a valid element, chomp and add to the points
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
# else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
# The entire formula has been parsed into m_dict. Return the
# corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
# if there is a parenthesis, remove it and match the remaining stuff
# with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
# Match the stuff inside the parenthesis with the appropriate
# factor
for match in \
Composition._comps_from_fuzzy_formula(mp.group(1),
mp_dict,
mp_points,
factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in \
Composition._comps_from_fuzzy_formula(mp_form,
mp_dict,
mp_points,
factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
# if the stuff inside the parenthesis is nothing, then just
# return the stuff inside the parentheses
if only_me:
yield match
return
# try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = \
_parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
# there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form1,
m_dict1,
m_points1,
factor):
yield match
# try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = \
_parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
# there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form2, m_dict2,
m_points2,
factor):
yield match
def reduce_formula(sym_amt, iupac_ordering=False):
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
iupac_ordering (bool, optional): Whether to order the
formula by the iupac "electronegativity" series, defined in
Table VI of "Nomenclature of Inorganic Chemistry (IUPAC
Recommendations 2005)". This ordering effectively follows
the groups and rows of the periodic table, except the
Lanthanides, Actanides and hydrogen. Note that polyanions
will still be determined based on the true electronegativity of
the elements.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(), key=lambda x: [get_el_sp(x).X, x])
syms = list(filter(
lambda x: abs(sym_amt[x]) > Composition.amount_tolerance, syms))
factor = 1
# Enforce integers for doing gcd.
if all((int(i) == i for i in sym_amt.values())):
factor = abs(gcd(*(int(i) for i in sym_amt.values())))
polyanion = []
# if the composition contains a poly anion
if len(syms) >= 3 and get_el_sp(syms[-1]).X - get_el_sp(syms[-2]).X < 1.65:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in [-2, -1]}
(poly_form, poly_factor) = reduce_formula(
poly_sym_amt, iupac_ordering=iupac_ordering)
if poly_factor != 1:
polyanion.append("({}){}".format(poly_form, int(poly_factor)))
syms = syms[:len(syms) - 2 if polyanion else len(syms)]
if iupac_ordering:
syms = sorted(syms,
key=lambda x: [get_el_sp(x).iupac_ordering, x])
reduced_form = []
for s in syms:
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
reduced_form = "".join(reduced_form + polyanion)
return reduced_form, factor
class CompositionError(Exception):
"""Exception class for composition errors"""
class ChemicalPotential(dict, MSONable):
"""
Class to represent set of chemical potentials. Can be:
multiplied/divided by a Number
multiplied by a Composition (returns an energy)
added/subtracted with other ChemicalPotentials.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args, **kwargs: any valid dict init arguments
"""
d = dict(*args, **kwargs)
super().__init__((get_el_sp(k), v)
for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
raise NotImplementedError()
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
raise NotImplementedError()
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0)
for e in els})
raise NotImplementedError()
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0)
for e in els})
raise NotImplementedError()
def get_energy(self, composition, strict=True):
"""
Calculates the energy of a composition.
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified
"""
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super().__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
fraricci/pymatgen
|
pymatgen/core/composition.py
|
Python
|
mit
| 51,187
|
[
"pymatgen"
] |
4f4dc51f05d675c77c3eaab519a50afae6825065cb3b1895c1cd84f316e35461
|
import re
from pycp2k import CP2K
from ase.lattice.cubic import Diamond
from ase.visualize import view
#===============================================================================
# Create the Si lattice here
lattice = Diamond(directions=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
symbol='Si',
latticeconstant=5.430697500,
size=(1, 1, 1))
# view(lattice)
#===============================================================================
# Setup directories and mpi
calc = CP2K()
calc.working_directory = "./"
calc.project_name = "si_bulk"
calc.mpi_n_processes = 2
#===============================================================================
# Create shortcuts for the most used subtrees of the input
CP2K_INPUT = calc.CP2K_INPUT
GLOBAL = CP2K_INPUT.GLOBAL
FORCE_EVAL = CP2K_INPUT.FORCE_EVAL_add()
SUBSYS = FORCE_EVAL.SUBSYS
DFT = FORCE_EVAL.DFT
SCF = DFT.SCF
#===============================================================================
# Fill input tree
GLOBAL.Run_type = "ENERGY"
GLOBAL.Print_level = "LOW"
calc.create_cell(SUBSYS, lattice)
calc.create_coord(SUBSYS, lattice)
FORCE_EVAL.Method = "Quickstep"
FORCE_EVAL.PRINT.FORCES.Section_parameters = "ON"
DFT.Basis_set_file_name = "BASIS_SET"
DFT.Potential_file_name = "GTH_POTENTIALS"
DFT.QS.Eps_default = 1.0E-10
DFT.MGRID.Ngrids = 4
DFT.MGRID.Cutoff = 300
DFT.MGRID.Rel_cutoff = 60
DFT.XC.XC_FUNCTIONAL.Section_parameters = "PADE"
SCF.Scf_guess = "ATOMIC"
SCF.Eps_scf = 1.0E-7
SCF.Max_scf = 300
SCF.DIAGONALIZATION.Section_parameters = "ON"
SCF.DIAGONALIZATION.Algorithm = "STANDARD"
SCF.MIXING.Section_parameters = "T"
SCF.MIXING.Method = "BROYDEN_MIXING"
SCF.MIXING.Alpha = 0.4
SCF.MIXING.Nbroyden = 8
KIND = SUBSYS.KIND_add("Si")
KIND.Basis_set = "DZVP-GTH-PADE"
KIND.Potential = "GTH-PADE-q4"
#===============================================================================
# Search for a good CUTOFF
energies = []
for cutoff in range(40, 90, 20):
DFT.MGRID.Cutoff = cutoff
calc.output_path = calc.working_directory + "/" + calc.project_name + str(cutoff) + ".out"
calc.run()
with open(calc.output_path, "r") as fin:
regex = re.compile(" ENERGY\| Total FORCE_EVAL \( QS \) energy \(a\.u\.\):\s+(.+)\n")
for line in fin:
match = regex.match(line)
if match:
energies.append(match.groups()[0])
print(energies)
|
SINGROUP/pycp2k
|
examples/example_si_cutoff.py
|
Python
|
lgpl-3.0
| 2,407
|
[
"ASE",
"CP2K"
] |
73ad4aea1d89dea080edd3402fee759be0fdf44e1d408bea5bae780d149505aa
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _, throw
from frappe.utils import add_days, cint, cstr, date_diff, formatdate, getdate
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
from erpnext.stock.utils import get_valid_serial_nos
from erpnext.utilities.transaction_base import TransactionBase, delete_events
class MaintenanceSchedule(TransactionBase):
@frappe.whitelist()
def generate_schedule(self):
if self.docstatus != 0:
return
self.set('schedules', [])
count = 1
for d in self.get('items'):
self.validate_maintenance_detail()
s_list = []
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits, d.sales_person)
for i in range(d.no_of_visits):
child = self.append('schedules')
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count + 1
child.sales_person = d.sales_person
child.completion_status = "Pending"
child.item_reference = d.name
@frappe.whitelist()
def validate_end_date_visits(self):
days_in_period = {
"Weekly": 7,
"Monthly": 30,
"Quarterly": 91,
"Half Yearly": 182,
"Yearly": 365
}
for item in self.items:
if item.periodicity and item.periodicity != "Random" and item.start_date:
if not item.end_date:
if item.no_of_visits:
item.end_date = add_days(item.start_date, item.no_of_visits * days_in_period[item.periodicity])
else:
item.end_date = add_days(item.start_date, days_in_period[item.periodicity])
diff = date_diff(item.end_date, item.start_date) + 1
no_of_visits = cint(diff / days_in_period[item.periodicity])
if not item.no_of_visits or item.no_of_visits == 0:
item.end_date = add_days(item.start_date, days_in_period[item.periodicity])
diff = date_diff(item.end_date, item.start_date) + 1
item.no_of_visits = cint(diff / days_in_period[item.periodicity])
elif item.no_of_visits > no_of_visits:
item.end_date = add_days(item.start_date, item.no_of_visits * days_in_period[item.periodicity])
elif item.no_of_visits < no_of_visits:
item.end_date = add_days(item.start_date, item.no_of_visits * days_in_period[item.periodicity])
def on_submit(self):
if not self.get('schedules'):
throw(_("Please click on 'Generate Schedule' to get schedule"))
self.check_serial_no_added()
self.validate_schedule()
email_map = {}
for d in self.get('items'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.validate_serial_no(d.item_code, serial_nos, d.start_date)
self.update_amc_date(serial_nos, d.end_date)
no_email_sp = []
if d.sales_person not in email_map:
sp = frappe.get_doc("Sales Person", d.sales_person)
try:
email_map[d.sales_person] = sp.get_email_id()
except frappe.ValidationError:
no_email_sp.append(d.sales_person)
if no_email_sp:
frappe.msgprint(
_("Setting Events to {0}, since the Employee attached to the below Sales Persons does not have a User ID{1}").format(
self.owner, "<br>" + "<br>".join(no_email_sp)
)
)
scheduled_date = frappe.db.sql("""select scheduled_date from
`tabMaintenance Schedule Detail` where sales_person=%s and item_code=%s and
parent=%s""", (d.sales_person, d.item_code, self.name), as_dict=1)
for key in scheduled_date:
description =frappe._("Reference: {0}, Item Code: {1} and Customer: {2}").format(self.name, d.item_code, self.customer)
event = frappe.get_doc({
"doctype": "Event",
"owner": email_map.get(d.sales_person, self.owner),
"subject": description,
"description": description,
"starts_on": cstr(key["scheduled_date"]) + " 10:00:00",
"event_type": "Private",
})
event.add_participant(self.doctype, self.name)
event.insert(ignore_permissions=1)
frappe.db.set(self, 'status', 'Submitted')
def create_schedule_list(self, start_date, end_date, no_of_visit, sales_person):
schedule_list = []
start_date_copy = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff / no_of_visit
for visit in range(cint(no_of_visit)):
if (getdate(start_date_copy) < getdate(end_date)):
start_date_copy = add_days(start_date_copy, add_by)
if len(schedule_list) < no_of_visit:
schedule_date = self.validate_schedule_date_for_holiday_list(getdate(start_date_copy),
sales_person)
if schedule_date > getdate(end_date):
schedule_date = getdate(end_date)
schedule_list.append(schedule_date)
return schedule_list
def validate_schedule_date_for_holiday_list(self, schedule_date, sales_person):
validated = False
employee = frappe.db.get_value("Sales Person", sales_person, "employee")
if employee:
holiday_list = get_holiday_list_for_employee(employee)
else:
holiday_list = frappe.get_cached_value('Company', self.company, "default_holiday_list")
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday` where parent=%s''', holiday_list)
if not validated and holidays:
# max iterations = len(holidays)
for i in range(len(holidays)):
if schedule_date in holidays:
schedule_date = add_days(schedule_date, -1)
else:
validated = True
break
return schedule_date
def validate_dates_with_periodicity(self):
for d in self.get("items"):
if d.start_date and d.end_date and d.periodicity and d.periodicity!="Random":
date_diff = (getdate(d.end_date) - getdate(d.start_date)).days + 1
days_in_period = {
"Weekly": 7,
"Monthly": 30,
"Quarterly": 90,
"Half Yearly": 180,
"Yearly": 365
}
if date_diff < days_in_period[d.periodicity]:
throw(_("Row {0}: To set {1} periodicity, difference between from and to date must be greater than or equal to {2}")
.format(d.idx, d.periodicity, days_in_period[d.periodicity]))
def validate_maintenance_detail(self):
if not self.get('items'):
throw(_("Please enter Maintaince Details first"))
for d in self.get('items'):
if not d.item_code:
throw(_("Please select item code"))
elif not d.start_date or not d.end_date:
throw(_("Please select Start Date and End Date for Item {0}").format(d.item_code))
elif not d.no_of_visits:
throw(_("Please mention no of visits required"))
elif not d.sales_person:
throw(_("Please select a Sales Person for item: {0}").format(d.item_name))
if getdate(d.start_date) >= getdate(d.end_date):
throw(_("Start date should be less than end date for Item {0}").format(d.item_code))
def validate_sales_order(self):
for d in self.get('items'):
if d.sales_order:
chk = frappe.db.sql("""select ms.name from `tabMaintenance Schedule` ms,
`tabMaintenance Schedule Item` msi where msi.parent=ms.name and
msi.sales_order=%s and ms.docstatus=1""", d.sales_order)
if chk:
throw(_("Maintenance Schedule {0} exists against {1}").format(chk[0][0], d.sales_order))
def validate_no_of_visits(self):
return len(self.schedules) != sum(d.no_of_visits for d in self.items)
def validate(self):
self.validate_end_date_visits()
self.validate_maintenance_detail()
self.validate_dates_with_periodicity()
self.validate_sales_order()
if not self.schedules or self.validate_no_of_visits():
self.generate_schedule()
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def update_amc_date(self, serial_nos, amc_expiry_date=None):
for serial_no in serial_nos:
serial_no_doc = frappe.get_doc("Serial No", serial_no)
serial_no_doc.amc_expiry_date = amc_expiry_date
serial_no_doc.save()
def validate_serial_no(self, item_code, serial_nos, amc_start_date):
for serial_no in serial_nos:
sr_details = frappe.db.get_value("Serial No", serial_no,
["warranty_expiry_date", "amc_expiry_date", "warehouse", "delivery_date", "item_code"], as_dict=1)
if not sr_details:
frappe.throw(_("Serial No {0} not found").format(serial_no))
if sr_details.get("item_code") != item_code:
frappe.throw(_("Serial No {0} does not belong to Item {1}")
.format(frappe.bold(serial_no), frappe.bold(item_code)), title="Invalid")
if sr_details.warranty_expiry_date \
and getdate(sr_details.warranty_expiry_date) >= getdate(amc_start_date):
throw(_("Serial No {0} is under warranty upto {1}")
.format(serial_no, sr_details.warranty_expiry_date))
if sr_details.amc_expiry_date and getdate(sr_details.amc_expiry_date) >= getdate(amc_start_date):
throw(_("Serial No {0} is under maintenance contract upto {1}")
.format(serial_no, sr_details.amc_expiry_date))
if not sr_details.warehouse and sr_details.delivery_date and \
getdate(sr_details.delivery_date) >= getdate(amc_start_date):
throw(_("Maintenance start date can not be before delivery date for Serial No {0}")
.format(serial_no))
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in self.get('items'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in self.get('schedules'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
throw(_("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'"))
else:
for x in item_lst1:
if x not in item_lst2:
throw(_("Please click on 'Generate Schedule'"))
def check_serial_no_added(self):
serial_present =[]
for d in self.get('items'):
if d.serial_no:
serial_present.append(d.item_code)
for m in self.get('schedules'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
throw(_("Please click on 'Generate Schedule' to fetch Serial No added for Item {0}").format(m.item_code))
def on_cancel(self):
for d in self.get('items'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.update_amc_date(serial_nos)
frappe.db.set(self, 'status', 'Cancelled')
delete_events(self.doctype, self.name)
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def get_pending_data(self, data_type, s_date=None, item_name=None):
if data_type == "date":
dates = ""
for schedule in self.schedules:
if schedule.item_name == item_name and schedule.completion_status == "Pending":
dates = dates + "\n" + formatdate(schedule.scheduled_date, "dd-MM-yyyy")
return dates
elif data_type == "items":
items = ""
for item in self.items:
for schedule in self.schedules:
if item.item_name == schedule.item_name and schedule.completion_status == "Pending":
items = items + "\n" + item.item_name
break
return items
elif data_type == "id":
for schedule in self.schedules:
if schedule.item_name == item_name and s_date == formatdate(schedule.scheduled_date, "dd-mm-yyyy"):
return schedule.name
@frappe.whitelist()
def update_serial_nos(s_id):
serial_nos = frappe.db.get_value('Maintenance Schedule Detail', s_id, 'serial_no')
if serial_nos:
serial_nos = get_serial_nos(serial_nos)
return serial_nos
else:
return False
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None, item_name=None, s_id=None):
from frappe.model.mapper import get_mapped_doc
def update_status_and_detail(source, target, parent):
target.maintenance_type = "Scheduled"
target.maintenance_schedule = source.name
target.maintenance_schedule_detail = s_id
def update_sales(source, target, parent):
sales_person = frappe.db.get_value('Maintenance Schedule Detail', s_id, 'sales_person')
target.service_person = sales_person
target.serial_no = ''
doclist = get_mapped_doc("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status_and_detail
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"condition": lambda doc: doc.item_name == item_name,
"postprocess": update_sales
}
}, target_doc)
return doclist
|
mhbu50/erpnext
|
erpnext/maintenance/doctype/maintenance_schedule/maintenance_schedule.py
|
Python
|
gpl-3.0
| 12,406
|
[
"VisIt"
] |
8acc38c23db11086bd7627e0b6fd224ceff152034eb2211e253bede0a89db7c1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views import defaults as default_views
from django.conf.urls.i18n import i18n_patterns
from med.pages import views as pages_views
urlpatterns = [
# Django set_language, user {% load i18n %}{% url 'set_language' %}
url(r'^i18n/', include('django.conf.urls.i18n')),
]
urlpatterns += i18n_patterns(
url(r'^$', pages_views.HomeView.as_view(), name="home"),
url(r'^contact/$', pages_views.ContactView.as_view(), name="contact"),
url(r'^thankyou/$', pages_views.ThanksView.as_view(), name="thanks"),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("med.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
url(r'^avatar/', include("avatar.urls")),
# Search
url(r'^search/', include('haystack.urls')),
# Your stuff: custom urls with internationalization includes go here
url(r'^questions/', include('med.questions.urls', namespace='questions')),
)
urlpatterns += [
# Need no internationalization
url(r'api/', include('med.api.urls', namespace='api')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request),
url(r'^403/$', default_views.permission_denied),
url(r'^404/$', default_views.page_not_found),
url(r'^500/$', default_views.server_error),
]
|
Uran198/med
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,852
|
[
"VisIt"
] |
dc61dbbe5d5a5797e8f6c81f2bbb8b000286b88f6c1022c722776dce79b790f1
|
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerFaucetCloseEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.0)
obj_high = (0.1, 0.85, 0.0)
self._handle_length = 0.175
self._target_radius = 0.07
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.8, 0.0]),
'hand_init_pos': np.array([0., .4, .2])
}
self.hand_init_pos = self.init_config['hand_init_pos']
self.obj_init_pos = self.init_config['obj_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_faucet.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, _, target_to_obj, object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= 0.07),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
@property
def _target_site_config(self):
return [('goal_close', self._target_pos),
('goal_open', np.array([10., 10., 10.]))]
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('faucetBase')
def _get_pos_objects(self):
return self._get_site_pos('handleStartClose') + np.array(
[0., 0., -0.01])
def reset_model(self):
self._reset_hand()
# Compute faucet position
self.obj_init_pos = self._get_state_rand_vec() if self.random_init \
else self.init_config['obj_init_pos']
# Set mujoco body to computed position
self.sim.model.body_pos[self.model.body_name2id(
'faucetBase')] = self.obj_init_pos
self._target_pos = self.obj_init_pos + np.array(
[-self._handle_length, .0, .125])
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.reachCompleted = False
def compute_reward(self, action, obs):
obj = obs[4:7]
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.obj_init_pos - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self._target_radius),
margin=abs(target_to_obj_init - self._target_radius),
sigmoid='long_tail',
)
faucet_reach_radius = 0.01
tcp_to_obj = np.linalg.norm(obj - tcp)
tcp_to_obj_init = np.linalg.norm(self.obj_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, faucet_reach_radius),
margin=abs(tcp_to_obj_init - faucet_reach_radius),
sigmoid='gaussian',
)
tcp_opened = 0
object_grasped = reach
reward = 2 * reach + 3 * in_place
reward *= 2
reward = 10 if target_to_obj <= self._target_radius else reward
return (reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped,
in_place)
|
rlworkgroup/metaworld
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_close_v2.py
|
Python
|
mit
| 4,061
|
[
"Gaussian"
] |
d0026644bf20660f301a2f0778950497e83f03f50d5c358aa5c7cf5b060504d2
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_seproperties
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SeProperties Avi RESTful Object
description:
- This module is used to configure SeProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
se_agent_properties:
description:
- Seagentproperties settings for seproperties.
se_bootup_properties:
description:
- Sebootupproperties settings for seproperties.
se_runtime_properties:
description:
- Seruntimeproperties settings for seproperties.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
- Default value when not specified in API or module is interpreted by Avi Controller as default.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SeProperties object
avi_seproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_seproperties
"""
RETURN = '''
obj:
description: SeProperties (api/seproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
se_agent_properties=dict(type='dict',),
se_bootup_properties=dict(type='dict',),
se_runtime_properties=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'seproperties',
set([]))
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_seproperties.py
|
Python
|
bsd-3-clause
| 3,509
|
[
"VisIt"
] |
7d6728d1b697661c78e165805594ab8350cdb2e044bb2105b1245c4c4d3bce22
|
#Internet Archive ROM Launcher
#Zach Morris
#https://github.com/zach-morris/plugin.program.iarl
from resources.lib.xbmcswift2b import Plugin
from resources.lib.xbmcswift2b import actions
from resources.lib.xbmcswift2b import ListItem as LI
import os, sys, subprocess, xbmc, xbmcgui, xbmcaddon
from resources.lib.util import *
from resources.lib.webutils import *
import resources.lib.paginate as paginate
xbmc.log(msg='IARL: Lets Play!', level=xbmc.LOGNOTICE)
#Initialize Stuff
plugin = Plugin()
try: #Added for even more viewtypes depending on the skin
if plugin.get_setting('iarl_setting_setcontent',unicode) != 'None':
xbmcplugin.setContent(int(sys.argv[1]),str(plugin.get_setting('iarl_setting_setcontent',unicode)))
except:
xbmc.log(msg='IARL: Unable to set content type', level=xbmc.LOGDEBUG)
iarl_data = {
'settings' : { 'cache_list' : plugin.get_setting('iarl_setting_cache_list',bool),
'clean_list' : plugin.get_setting('iarl_setting_clean_list',bool),
'listing_convention' : plugin.get_setting('iarl_setting_listing',unicode),
'naming_convention' : plugin.get_setting('iarl_setting_naming',unicode),
'items_per_page_setting' : None, #Initialize variable and set later
'iarl_setting_history' : plugin.get_setting('iarl_setting_history',int),
'local_file_action' : plugin.get_setting('iarl_setting_localfile_action',unicode),
'game_select_action' : plugin.get_setting('iarl_setting_default_action',unicode),
'show_search_item' : None, #Initialize variable and set later
'show_randomplay_item' : None, #Initialize variable and set later
'show_history_item' : None, #Initialize variable and set later
'show_extras_item' : None, #Initialize variable and set later
'autoplay_trailer' : plugin.get_setting('iarl_setting_autoplay_trailer',unicode),
'download_cache' : None, #Initialize variable and set later
'ia_enable_login' : None, #Initialize variable and set later
'ia_username' : plugin.get_setting('iarl_setting_ia_username',unicode),
'ia_password' : plugin.get_setting('iarl_setting_ia_password',unicode),
'external_launch_env' : plugin.get_setting('iarl_external_user_external_env',unicode),
'external_launch_close_kodi' : plugin.get_setting('iarl_external_launch_close_kodi',unicode),
'path_to_retroarch' : xbmc.translatePath(plugin.get_setting('iarl_path_to_retroarch',unicode)),
'path_to_retroarch_system_dir' : xbmc.translatePath(plugin.get_setting('iarl_path_to_retroarch_system_dir',unicode)),
'path_to_retroarch_cfg' : xbmc.translatePath(plugin.get_setting('iarl_path_to_retroarch_cfg',unicode)),
'enable_additional_emulators' : [plugin.get_setting('iarl_additional_emulator_1_type',unicode),plugin.get_setting('iarl_additional_emulator_2_type',unicode),plugin.get_setting('iarl_additional_emulator_3_type',unicode)],
'path_to_additional_emulators' : [xbmc.translatePath(plugin.get_setting('iarl_additional_emulator_1_path',unicode)),xbmc.translatePath(plugin.get_setting('iarl_additional_emulator_2_path',unicode)),xbmc.translatePath(plugin.get_setting('iarl_additional_emulator_3_path',unicode))],
'enable_netplay' : None, #Initialize variable and set later
'netplay_host_or_client' : plugin.get_setting('iarl_netplay_hostclient',unicode),
'netplay_host_nickname' : plugin.get_setting('iarl_netplay_nickname1',unicode),
'netplay_client_nickname' : plugin.get_setting('iarl_netplay_nickname2',unicode),
'netplay_spectator_nickname' : plugin.get_setting('iarl_netplay_nickname3',unicode),
'netplay_host_IP' : plugin.get_setting('iarl_netplay_IP',unicode),
'netplay_host_port' : plugin.get_setting('iarl_netplay_port',unicode),
'netplay_sync_frames' : None, #Initialize variable and set later
'enable_postdl_edit' : None, #Initialize variable and set later
'hidden_setting_clear_cache_value' : plugin.get_setting('iarl_setting_clear_cache_value',bool),
'hidden_setting_clear_hidden_archives' : plugin.get_setting('iarl_setting_clear_hidden_archives',bool),
'hidden_setting_warn_chd' : plugin.get_setting('iarl_setting_warn_chd',bool),
'hidden_setting_warn_iso' : plugin.get_setting('iarl_setting_warn_iso',bool),
'hidden_setting_tou_agree' : plugin.get_setting('iarl_setting_tou',bool),
'launch_with_subprocess' : plugin.get_setting('iarl_setting_subprocess_launch',bool),
'hard_code_favorite_settings' : plugin.get_setting('iarl_setting_favorite_hard_code',bool),
'hard_coded_include_back_link' : plugin.get_setting('iarl_setting_back_link_hard_code',bool),
},
'addon_data':{ 'plugin_name' : 'plugin.program.iarl',
'log_level' : 'LOG_LEVEL_INFO',
'operating_system' : get_operating_system(),
'addon_media_path' : get_media_files_path(),
'addon_skin_path' : get_skin_files_path(),
'addon_dat_path' : get_XML_files_path(),
'addon_temp_dl_path' : get_userdata_temp_dir(),
'addon_list_cache_path' : get_userdata_list_cache_dir(),
'addon_install_path' : get_addon_install_path(),
'addon_bin_path' : get_addondata_bindir(),
'7za_path' : None,
'chdman_path' : None,
'default_icon' : 'arcade_default_box.jpg',
'default_header_color' : 'white.png',
'default_bg_color' : 'black.png',
'default_buttonfocustheme' : 'button-highlight1.png',
'default_buttonnofocustheme' : 'button-nofocus2.png',
},
'archive_data': None,
'current_archive_data':{'xml_id' : None,
'page_id' : None,
'emu_name' : None,
'emu_base_url' : None,
'emu_homepage' : None,
'emu_filepath' : None,
'emu_parser' : None,
'emu_category' : None,
'emu_version' : None,
'emu_date' : None,
'emu_author' : None,
'emu_description' : None,
'emu_plot' : None,
'emu_boxart' : None,
'emu_banner' : None,
'emu_fanart' : None,
'emu_logo' : None,
'emu_trailer' : None,
'emu_download_path' : None,
'emu_post_download_action' : None,
'emu_launcher' : None,
'emu_ext_launch_cmd' : None,
'total_num_archives' : None,
'emu_total_num_games' : None,
'category_id' : None,
'header_color' : None,
'background_color' : None,
'button_focus' : None,
'button_nofocus' : None,
},
'current_rom_data':{'rom_label' : None,
'rom_name' : None,
'rom_icon' : None,
'rom_thumbnail' : None,
'rom_title' : None,
'rom_filenames' : list(),
'rom_save_filenames' : list(),
'rom_supporting_filenames' : list(),
'rom_save_supporting_filenames' : list(),
'rom_emu_command' : None,
'rom_override_cmd' : None,
'rom_override_postdl' : None,
'rom_override_downloadpath' : None,
'rom_size' : list(),
'rom_plot' : None,
'rom_date' : None,
'rom_year' : None,
'rom_studio' : None,
'rom_genre' : None,
'rom_nplayers' : None,
'rom_tag' : None,
'rom_rating' : None,
'rom_perspective' : None,
'rom_esrb' : None,
'rom_trailer' : None,
'rom_boxarts' : [None,None,None,None,None,None,None,None,None,None],
'rom_snapshots' : [None,None,None,None,None,None,None,None,None,None],
'rom_fanarts' : [None,None,None,None,None,None,None,None,None,None],
'rom_banners' : [None,None,None,None,None,None,None,None,None,None],
'rom_logos' : [None,None,None,None,None,None,None,None,None,None],
},
'current_save_data':{'rom_save_filenames' : list(),
'rom_save_filenames_exist' : list(),
'matching_rom_save_filenames' : list(),
'rom_save_filenames_success' : list(),
'rom_supporting_filenames' : list(),
'rom_save_supporting_filenames' : list(),
'rom_save_supporting_filenames_exist' : list(),
'matching_rom_save_supporting_filenames' : list(),
'rom_save_supporting_filenames_success' : list(),
'rom_converted_filenames' : list(),
'rom_converted_filenames_success' : list(),
'rom_converted_supporting_filenames' : list(),
'rom_converted_supporting_filenames_success' : list(),
'overall_download_success' : True,
'overall_conversion_success' : True,
'overwrite_existing_files' : False,
'launch_filename' : None,
},
}
#Define number of items to display per page
items_pp_options = {'10':10,'25':25,'50':50,'100':100,'150':150,'200':200,'250':250,'300':300,'350':350,'400':400,'450':450,'500':500,'List All':99999}
try:
iarl_data['settings']['items_per_page_setting'] = items_pp_options[plugin.get_setting('iarl_setting_items_pp',unicode)]
except ValueError:
iarl_data['settings']['items_per_page_setting'] = 99999 #Default to All if not initialized correctly
if iarl_data['settings']['items_per_page_setting'] is None:
iarl_data['settings']['items_per_page_setting'] = 99999 #Default to All if not initialized correctly
#Define temp download cache size
cache_options = {'Zero (One ROM and Supporting Files Only)':0,'10 MB':10*1e6,'25MB':25*1e6,'50MB':50*1e6,'100MB':100*1e6,'150MB':150*1e6,'200MB':200*1e6,'250MB':250*1e6,'300MB':300*1e6,'350MB':350*1e6,'400MB':400*1e6,'450MB':450*1e6,'500MB':500*1e6,'1GB':1000*1e6,'2GB':2000*1e6,'5GB':5000*1e6,'10GB':10000*1e6,'20GB':20000*1e6}
try:
iarl_data['settings']['download_cache'] = cache_options[plugin.get_setting('iarl_setting_dl_cache',unicode)]
except ValueError:
iarl_data['settings']['download_cache'] = 0 #Default to 0 if not initialized correctly
if iarl_data['settings']['download_cache'] is None:
iarl_data['settings']['download_cache'] = 0 #Default to 0 if not initialized correctly
#Convert Show/Hide to True/False
show_hide_options = {'Show':True,'Hide':False}
try:
iarl_data['settings']['show_search_item'] = show_hide_options[plugin.get_setting('iarl_setting_show_search',unicode)]
except ValueError:
iarl_data['settings']['show_search_item'] = True #Default to True if not initialized correctly
if iarl_data['settings']['show_search_item'] is None:
iarl_data['settings']['show_search_item'] = True #Default to True if not initialized correctly
try:
iarl_data['settings']['show_randomplay_item'] = show_hide_options[plugin.get_setting('iarl_setting_show_randomplay',unicode)]
except ValueError:
iarl_data['settings']['show_randomplay_item'] = True #Default to True if not initialized correctly
if iarl_data['settings']['show_randomplay_item'] is None:
iarl_data['settings']['show_randomplay_item'] = True #Default to True if not initialized correctly
try:
iarl_data['settings']['show_history_item'] = show_hide_options[plugin.get_setting('iarl_setting_show_gamehistory',unicode)]
except ValueError:
iarl_data['settings']['show_history_item'] = True #Default to True if not initialized correctly
if iarl_data['settings']['show_history_item'] is None:
iarl_data['settings']['show_history_item'] = True #Default to True if not initialized correctly
try:
iarl_data['settings']['show_extras_item'] = show_hide_options[plugin.get_setting('iarl_setting_show_extras',unicode)]
except ValueError:
iarl_data['settings']['show_extras_item'] = True #Default to True if not initialized correctly
if iarl_data['settings']['show_extras_item'] is None:
iarl_data['settings']['show_extras_item'] = True #Default to True if not initialized correctly
#Convert Enabled/Disabled to True/False
enabled_disabled_options = {'Enabled':True,'Disabled':False}
try:
iarl_data['settings']['enable_netplay'] = enabled_disabled_options[plugin.get_setting('iarl_enable_netplay',unicode)]
except ValueError:
iarl_data['settings']['enable_netplay'] = False #Default to False if not initialized correctly
if iarl_data['settings']['enable_netplay'] is None:
iarl_data['settings']['enable_netplay'] = False #Default to False if not initialized correctly
try:
iarl_data['settings']['netplay_sync_frames'] = enabled_disabled_options[plugin.get_setting('iarl_netplay_frames',unicode)]
except ValueError:
iarl_data['settings']['netplay_sync_frames'] = False #Default to False if not initialized correctly
if iarl_data['settings']['netplay_sync_frames'] is None:
iarl_data['settings']['netplay_sync_frames'] = False #Default to False if not initialized correctly
try:
iarl_data['settings']['ia_enable_login'] = enabled_disabled_options[plugin.get_setting('iarl_enable_login',unicode)]
except ValueError:
iarl_data['settings']['ia_enable_login'] = False #Default to False if not initialized correctly
if iarl_data['settings']['ia_enable_login'] is None:
iarl_data['settings']['ia_enable_login'] = False #Default to False if not initialized correctly
try:
iarl_data['settings']['enable_postdl_edit'] = enabled_disabled_options[plugin.get_setting('iarl_enable_post_dl_edit',unicode)]
except ValueError:
iarl_data['settings']['enable_postdl_edit'] = False #Default to False if not initialized correctly
if iarl_data['settings']['enable_postdl_edit'] is None:
iarl_data['settings']['enable_postdl_edit'] = False #Default to False if not initialized correctly
#Define path to 7za binary
if xbmc.getCondVisibility('System.HasAddon(virtual.system-tools)'):
try:
iarl_data['addon_data']['7za_path'] = xbmc.translatePath('special://home/addons/virtual.system-tools/bin/7za')
xbmc.log(msg='IARL: 7ZA Path was found in virtual.system-tools', level=xbmc.LOGDEBUG)
except:
xbmc.log(msg='IARL: virtual.system-tools was found but the path could not be defined', level=xbmc.LOGDEBUG)
else:
if 'OSX' in iarl_data['addon_data']['operating_system']:
iarl_data['addon_data']['7za_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7za.OSX')
elif 'Windows' in iarl_data['addon_data']['operating_system']:
iarl_data['addon_data']['7za_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7za.exe')
elif 'Nix' in iarl_data['addon_data']['operating_system']:
iarl_data['addon_data']['7za_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7za.Nix')
elif 'OpenElec RPi' in iarl_data['addon_data']['operating_system'] or 'LibreElec RPi' in iarl_data['addon_data']['operating_system'] or 'LibreElec SX05' in iarl_data['addon_data']['operating_system']:
try:
if 'v7' in os.uname()[4]:
iarl_data['addon_data']['7za_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7za.armv7l')
else:
iarl_data['addon_data']['7za_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7za.armv6l')
except:
iarl_data['addon_data']['7za_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7za.armv6l')
elif 'Android' in iarl_data['addon_data']['operating_system']: #Android. Your walled garden is confusing and generally sucks balls...
if os.path.isdir('/data/data/org.xbmc.kodi/lib'):
if not os.path.isfile('/data/data/org.xbmc.kodi/lib/7z.android'):
try:
copyFile(os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7z.android'),'/data/data/org.xbmc.kodi/lib/7z.android')
xbmc.log(msg='IARL: 7za was copied to /data/data/org.xbmc.kodi/lib/7z.android', level=xbmc.LOGDEBUG)
except:
xbmc.log(msg='IARL: Unable to copy 7z to /data/data/org.xbmc.kodi/lib/7z.android', level=xbmc.LOGDEBUG)
try:
os.chmod('/data/data/org.xbmc.kodi/lib/7z.android', 0555)
# os.chmod('/data/data/org.xbmc.kodi/lib/7z.android', os.stat('/data/data/org.xbmc.kodi/lib/7z.android').st_mode | 0o111)
iarl_data['addon_data']['7za_path'] = '/data/data/org.xbmc.kodi/lib/7z.android'
except:
xbmc.log(msg='IARL: chmod failed for /data/data/org.xbmc.kodi/lib/7z.android', level=xbmc.LOGDEBUG)
iarl_data['addon_data']['7za_path'] = None
xbmc.log(msg='IARL: 7Z Path could not be defined', level=xbmc.LOGDEBUG)
else:
try:
os.chmod('/data/data/org.xbmc.kodi/lib/7z.android', os.stat('/data/data/org.xbmc.kodi/lib/7z.android').st_mode | 0o111)
iarl_data['addon_data']['7za_path'] = '/data/data/org.xbmc.kodi/lib/7z.android'
except:
xbmc.log(msg='IARL: chmod failed for /data/data/org.xbmc.kodi/lib/7z.android', level=xbmc.LOGDEBUG)
iarl_data['addon_data']['7za_path'] = None
xbmc.log(msg='IARL: 7Z Path could not be defined', level=xbmc.LOGDEBUG)
else: #The normal location isnt available, need to try and install the 7za binary in the kodi root dir-http://forum.kodi.tv/showthread.php?tid=231642
if not os.path.isfile(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')):
try:
copyFile(os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7z.android'),os.path.join(xbmc.translatePath('special://xbmc'),'7z.android'))
xbmc.log(msg='IARL: 7za was copied to '+str(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')), level=xbmc.LOGDEBUG)
except:
xbmc.log(msg='IARL: Unable to copy 7za to '+str(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')), level=xbmc.LOGDEBUG)
try:
os.chmod(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android'), os.stat(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')).st_mode | 0o111)
iarl_data['addon_data']['7za_path'] = os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')
except:
xbmc.log(msg='IARL: chmod failed for '+str(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')), level=xbmc.LOGDEBUG)
iarl_data['addon_data']['7za_path'] = None
xbmc.log(msg='IARL: 7ZA Path could not be defined', level=xbmc.LOGDEBUG)
else:
try:
os.chmod(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android'), os.stat(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')).st_mode | 0o111)
iarl_data['addon_data']['7za_path'] = os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')
except:
xbmc.log(msg='IARL: chmod failed for '+str(os.path.join(xbmc.translatePath('special://xbmc'),'7z.android')), level=xbmc.LOGDEBUG)
iarl_data['addon_data']['7za_path'] = None
xbmc.log(msg='IARL: 7ZA Path could not be defined', level=xbmc.LOGDEBUG)
elif 'OpenElec x86' in iarl_data['addon_data']['operating_system'] or 'LibreElec x86' in iarl_data['addon_data']['operating_system']:
iarl_data['addon_data']['7za_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'7za','7za.x86_64')
else:
iarl_data['addon_data']['7za_path'] = None
xbmc.log(msg='IARL: 7ZA Path could not be defined', level=xbmc.LOGDEBUG)
if iarl_data['addon_data']['7za_path'] is not None:
xbmc.log(msg='IARL: 7ZA Path is defined as '+str(iarl_data['addon_data']['7za_path']), level=xbmc.LOGDEBUG)
#Define path to CHDMAN binary
if 'OSX' in iarl_data['addon_data']['operating_system']:
iarl_data['addon_data']['chdman_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'chdman','chdman.OSX')
elif 'Windows' in iarl_data['addon_data']['operating_system']:
iarl_data['addon_data']['chdman_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'chdman','chdman.exe')
elif 'Nix' in iarl_data['addon_data']['operating_system']:
iarl_data['addon_data']['chdman_path'] = os.path.join(iarl_data['addon_data']['addon_bin_path'],'chdman','chdman.Nix')
else:
iarl_data['addon_data']['chdman'] = None
xbmc.log(msg='IARL: CHDMAN Path could not be defined', level=xbmc.LOGDEBUG)
#If cache list is false, then clear the listed cache every time the addon is run
if not iarl_data['settings']['cache_list']:
try:
plugin.clear_function_cache()
except:
pass
#If the advanced setting action 'Clear Addon Cache' was set, then run this one time clear cache function
if iarl_data['settings']['hidden_setting_clear_cache_value']:
advanced_setting_action_clear_cache(plugin)
#If the advanced setting action 'Unhide All Archives' was set, then run this one time clear hidden archives function
if iarl_data['settings']['hidden_setting_clear_hidden_archives']:
unhide_all_archives(plugin)
xbmcaddon.Addon(id='plugin.program.iarl').setSetting(id='iarl_setting_clear_hidden_archives',value='false')
xbmc.log(msg='IARL: Unhide All Archives set back to false', level=xbmc.LOGDEBUG)
#When addon is initialized, get all available archive infos
iarl_data['archive_data'] = get_archive_info()
##Start of Addon Routes
#Update XML Value (Context Menu Item)
@plugin.route('/update_xml/<xml_id>')
def update_xml_value(xml_id):
args_in = plugin.request.args
try:
tag_value = args_in['tag_value'][0]
except:
tag_value = None
if tag_value is None:
try:
tag_value = sys.argv[2].split('=')[-1]
except:
tag_value = None
try:
current_xml_name = str(os.path.split(xml_id)[-1])
except:
current_xml_name = str(xml_id)
if tag_value == 'emu_downloadpath':
xbmc.log(msg='IARL: Updating archive download path for: '+str(xml_id), level=xbmc.LOGDEBUG)
set_new_dl_path(xml_id,plugin)
elif tag_value == 'emu_postdlaction':
xbmc.log(msg='IARL: Updating archive post download action for: '+str(xml_id), level=xbmc.LOGDEBUG)
set_new_post_dl_action(xml_id,plugin)
elif tag_value == 'emu_launcher':
xbmc.log(msg='IARL: Updating internal/external emulator launcher for: '+str(xml_id), level=xbmc.LOGDEBUG)
set_new_emu_launcher(xml_id,plugin)
elif tag_value == 'emu_ext_launch_cmd':
xbmc.log(msg='IARL: Updating external launch command for: '+str(xml_id), level=xbmc.LOGDEBUG)
update_external_launch_commands(iarl_data,xml_id,plugin)
elif tag_value == 'emu_launch_cmd_review':
xbmc.log(msg='IARL: Showing launch command for: '+str(xml_id), level=xbmc.LOGDEBUG)
review_archive_launch_commands(xml_id)
elif tag_value == 'hide_archive':
xbmc.log(msg='IARL: Updating archive visibility for: '+str(xml_id), level=xbmc.LOGDEBUG)
hide_selected_archive(iarl_data,xml_id,plugin)
elif tag_value == 'refresh_archive_cache':
xbmc.log(msg='IARL: Refreshing list_cache for: '+str(xml_id), level=xbmc.LOGDEBUG)
if iarl_data['archive_data'] is None:
iarl_data['archive_data'] = get_archive_info()
try:
cache_category_id = iarl_data['archive_data']['category_id'][iarl_data['archive_data']['emu_filepath'].index(xml_id)]
clear_cache_success = delete_userdata_list_cache_file(cache_category_id)
except:
xbmc.log(msg='IARL: Unable to clear list_cache for: '+str(xml_id), level=xbmc.LOGERROR)
if clear_cache_success:
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Complete','Archive Listing Refreshed')
elif tag_value == 'update_favorite_metadata':
xbmc.log(msg='IARL: Updating Favorites metadata for: '+str(xml_id), level=xbmc.LOGDEBUG)
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Update Favorite Metadata for '+current_xml_name, ['Title','Description','Author','Thumbnail URL','Banner URL','Fanart URL','Logo URL','Youtube Trailer'])
if ret1 == 0: #Update Title
xbmc.log(msg='IARL: Updating Favorites title for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new title:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n',' ').replace('\r',' ').replace('<',' ').replace('>',' '),0)
elif ret1 == 1: #Update Description
xbmc.log(msg='IARL: Updating Favorites description for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new description:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n','[CR]').replace('\r','[CR]').replace('<',' ').replace('>',' '),1)
elif ret1 == 2: #Update Author
xbmc.log(msg='IARL: Updating Favorites author for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new author:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n',' ').replace('\r',' ').replace('<',' ').replace('>',' '),2)
elif ret1 == 3: #Update Thumbnail
xbmc.log(msg='IARL: Updating Favorites Thumbnail URL for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new Thumbnail URL:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n',' ').replace('\r',' ').replace('<',' ').replace('>',' '),3)
elif ret1 == 4: #Update Banner
xbmc.log(msg='IARL: Updating Favorites Banner URL for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new Banner URL:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n',' ').replace('\r',' ').replace('<',' ').replace('>',' '),4)
elif ret1 == 5: #Update Fanart
xbmc.log(msg='IARL: Updating Favorites Fanart URL for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new Fanart URL:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n',' ').replace('\r',' ').replace('<',' ').replace('>',' '),5)
elif ret1 == 6: #Update Logo
xbmc.log(msg='IARL: Updating Favorites Logo URL for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new Logo URL:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n',' ').replace('\r',' ').replace('<',' ').replace('>',' '),6)
elif ret1 == 7: #Update Video
xbmc.log(msg='IARL: Updating Favorites Video ID for: '+str(xml_id), level=xbmc.LOGDEBUG)
new_xml_text = current_dialog.input('Enter a new YouTube URL:')
set_new_favorite_metadata(xml_id,new_xml_text.replace('\n',' ').replace('\r',' ').replace('<',' ').replace('>',' '),7)
elif ret1 == -1: #Cancelled
xbmc.log(msg='IARL: Updating Favorites metadata was cancelled', level=xbmc.LOGDEBUG)
else: #Unknown
xbmc.log(msg='IARL: Unknown selection for metadata update for: '+str(xml_id), level=xbmc.LOGERROR)
elif tag_value == 'share_favorites_list':
xbmc.log(msg='IARL: Share Favorites List started for: '+str(xml_id), level=xbmc.LOGDEBUG)
share_my_iarl_favorite(xml_id)
else:
xbmc.log(msg='IARL: Context menu selection is not defined', level=xbmc.LOGERROR)
pass #Do Nothing
def update_context(xml_id_in,tag_value_in,context_label):
new_url = plugin.url_for('update_xml_value', xml_id=xml_id_in, tag_value = tag_value_in)
return (context_label, actions.background(new_url))
#Add Favorite (Context Menu Item)
@plugin.route('/update_favorites/<item_string>')
def update_favorite_items(item_string):
ystr = lambda s: s if len(s) > 0 else None
if iarl_data['archive_data'] is None:
iarl_data['archive_data'] = get_archive_info()
iarl_data['current_rom_data']['rom_name'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_name)'))
iarl_data['current_rom_data']['rom_icon'] = ystr(xbmc.getInfoLabel('ListItem.Icon'))
iarl_data['current_rom_data']['rom_thumbnail'] = ystr(xbmc.getInfoLabel('ListItem.Thumb'))
iarl_data['current_rom_data']['rom_title'] = ystr(xbmc.getInfoLabel('ListItem.Title'))
iarl_data['current_rom_data']['rom_studio'] = ystr(xbmc.getInfoLabel('ListItem.Studio'))
iarl_data['current_rom_data']['rom_genre'] = ystr(xbmc.getInfoLabel('ListItem.Genre'))
iarl_data['current_rom_data']['rom_date'] = ystr(xbmc.getInfoLabel('ListItem.Date'))
iarl_data['current_rom_data']['rom_year'] = ystr(xbmc.getInfoLabel('ListItem.Year'))
iarl_data['current_rom_data']['rom_plot'] = ystr(xbmc.getInfoLabel('ListItem.Plot'))
iarl_data['current_rom_data']['rom_trailer'] = ystr(xbmc.getInfoLabel('ListItem.Trailer'))
iarl_data['current_rom_data']['rom_tag'] = ystr(xbmc.getInfoLabel('ListItem.Property(tag)'))
iarl_data['current_rom_data']['rom_nplayers'] = ystr(xbmc.getInfoLabel('ListItem.Property(nplayers)'))
iarl_data['current_rom_data']['rom_rating'] = ystr(xbmc.getInfoLabel('ListItem.Property(rating)'))
iarl_data['current_rom_data']['rom_esrb'] = ystr(xbmc.getInfoLabel('ListItem.Property(esrb)'))
iarl_data['current_rom_data']['rom_perspective'] = ystr(xbmc.getInfoLabel('ListItem.Property(perspective)'))
iarl_data['current_rom_data']['rom_label'] = ystr(xbmc.getInfoLabel('ListItem.Label'))
iarl_data['current_rom_data']['emu_ext_launch_cmd'] = ystr(xbmc.getInfoLabel('ListItem.Property(emu_ext_launch_cmd)')) #Needed to add this for xml favorites
iarl_data['current_rom_data']['emu_post_download_action'] = ystr(xbmc.getInfoLabel('ListItem.Property(emu_post_download_action)')) #Needed to add this for xml favorites
iarl_data['current_rom_data']['emu_download_path'] = ystr(xbmc.getInfoLabel('ListItem.Property(emu_download_path)')) #Needed to add this for xml favorites
if not iarl_data['settings']['hard_code_favorite_settings']: #Only provide link path to original XML
xbmc.log(msg='IARL: Generating IARL Favorite with plugin:// link', level=xbmc.LOGDEBUG)
iarl_data['current_rom_data']['rom_filenames'] = [ystr(xbmc.getInfoLabel('ListItem.FolderPath'))]
else: #Hard code settings into favorites XML
xbmc.log(msg='IARL: Generating IARL Favorite with hardcoded settings', level=xbmc.LOGDEBUG)
iarl_data['current_rom_data']['rom_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_supporting_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_supporting_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_save_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_save_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_save_supporting_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_save_supporting_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_emu_command'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_emu_command)'))
try:
iarl_data['current_rom_data']['rom_override_cmd'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_override_cmd)'))
except:
iarl_data['current_rom_data']['rom_override_cmd'] = None
try:
iarl_data['current_rom_data']['rom_override_postdl'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_override_postdl)'))
except:
iarl_data['current_rom_data']['rom_override_postdl'] = None
try:
iarl_data['current_rom_data']['rom_override_downloadpath'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_override_downloadpath)'))
except:
iarl_data['current_rom_data']['rom_override_downloadpath'] = None
iarl_data['current_rom_data']['rom_size'] = [int(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_file_sizes)').split(',')] #Split into list, convert to int
for ii in range(0,total_arts):
iarl_data['current_rom_data']['rom_fanarts'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(fanart'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_boxarts'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(boxart'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_banners'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(banner'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_snapshots'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(snapshot'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_logos'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(logo'+str(ii+1)+')'))
favorites_xml_filename = query_favorites_xml(iarl_data) #Find all the current favorite xml files, prompt for which to use, or make a new one
if favorites_xml_filename is not None:
try:
add_success = add_favorite_to_xml(iarl_data,favorites_xml_filename)
if add_success:
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Complete','Favorite Added:[CR]'+str(iarl_data['current_rom_data']['rom_name']))
xbmc.log(msg='IARL: Favorite was added: '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGNOTICE)
except:
xbmc.log(msg='IARL: There was an error adding the favorite '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
if add_success:
try:
cache_category_id = iarl_data['archive_data']['category_id'][iarl_data['archive_data']['emu_filepath'].index(favorites_xml_filename)]
clear_cache_success = delete_userdata_list_cache_file(cache_category_id)
except:
xbmc.log(msg='IARL: Unable to clear list_cache for the favorite list', level=xbmc.LOGERROR)
def update_context_favorite(item_in,context_label):
new_url = plugin.url_for('update_favorite_items', item_string=item_in)
return (context_label, actions.background(new_url))
## Main Start/Index Page of Addon
@plugin.route('/')
def index():
items = []
initialize_userdata()
if iarl_data['archive_data'] is None:
iarl_data['archive_data'] = get_archive_info()
if len(iarl_data['archive_data']['emu_name'])<1: #This is a first run issue, check archive_data
iarl_data['archive_data'] = get_archive_info()
for ii in range(0,iarl_data['archive_data']['total_num_archives']):
#Generate the context menu
if iarl_data['settings']['enable_postdl_edit']:
context_menus = [update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_downloadpath','Update Download Path'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_postdlaction','Update Post DL Action'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_launcher','Update Launcher'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_ext_launch_cmd','Update Ext Launcher Command'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_launch_cmd_review','Review Launch Command'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'hide_archive','Hide This Archive'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'refresh_archive_cache','Refresh Archive Listing'),]
else:
context_menus = [update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_downloadpath','Update Download Path'),
#update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_postdlaction','Update Post DL Action'), #Hidden by default since users shouldnt change this
update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_launcher','Update Launcher'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_ext_launch_cmd','Update Ext Launcher Command'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'emu_launch_cmd_review','Review Launch Command'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'hide_archive','Hide This Archive'),
update_context(iarl_data['archive_data']['emu_filepath'][ii],'refresh_archive_cache','Refresh Archive Listing'),]
if 'favorites' in iarl_data['archive_data']['emu_category'][ii].lower(): #Add additional context to Favorites
context_menus = context_menus+[update_context(iarl_data['archive_data']['emu_filepath'][ii],'update_favorite_metadata','Update Favorite Metadata'),update_context(iarl_data['archive_data']['emu_filepath'][ii],'share_favorites_list','Share My List!'),]
if 'hidden' not in iarl_data['archive_data']['emu_category'][ii]: #Don't include the archive if it's tagged hidden
if 'alphabetical' in iarl_data['settings']['listing_convention'].lower(): #List alphabetically
current_plugin_path = plugin.url_for('get_rom_starting_letter_page', category_id=iarl_data['archive_data']['category_id'][ii])
else:
current_plugin_path = plugin.url_for('get_rom_page', category_id=iarl_data['archive_data']['category_id'][ii],page_id='1')
items.append(plugin._listitemify({
'label' : iarl_data['archive_data']['emu_name'][ii],
'path': current_plugin_path,
'icon': iarl_data['archive_data']['emu_logo'][ii],
'thumbnail' : iarl_data['archive_data']['emu_boxart'][ii],
'info' : {'genre': iarl_data['archive_data']['emu_category'][ii],
'credits': iarl_data['archive_data']['emu_author'][ii],
'date': iarl_data['archive_data']['emu_date'][ii],
'plot': iarl_data['archive_data']['emu_plot'][ii],
'trailer': get_youtube_plugin_url(iarl_data['archive_data']['emu_trailer'][ii]),
'FolderPath': iarl_data['archive_data']['emu_base_url'][ii]},
'properties' : {'fanart_image' : iarl_data['archive_data']['emu_fanart'][ii],
'banner' : iarl_data['archive_data']['emu_banner'][ii],
'clearlogo': iarl_data['archive_data']['emu_logo'][ii],
'poster': iarl_data['archive_data']['emu_boxart'][ii]},
'context_menu' : context_menus
}))
items[-1].set_banner(items[-1].get_property('banner'))
items[-1].set_landscape(items[-1].get_property('banner'))
items[-1].set_poster(items[-1].get_property('poster'))
items[-1].set_clearlogo(items[-1].get_property('clearlogo'))
items[-1].set_clearart(items[-1].get_property('clearlogo'))
#Append Search Function
if iarl_data['settings']['show_search_item']:
items.append(plugin._listitemify({
'label' : '\xc2\xa0Search',
'path' : plugin.url_for('search_roms_window'),
'icon': os.path.join(iarl_data['addon_data']['addon_media_path'],'search.jpg'),
'thumbnail' : os.path.join(iarl_data['addon_data']['addon_media_path'],'search.jpg'),
'info' : {'genre': '\xc2\xa0',
'date': '01/01/2999',
'plot' : 'Search for a particular game.'},
'properties' : {'fanart_image' : os.path.join(iarl_data['addon_data']['addon_media_path'],'fanart.jpg'),
'banner' : os.path.join(iarl_data['addon_data']['addon_media_path'],'search_banner.jpg')}
}))
items[-1].set_banner(items[-1].get_property('banner'))
items[-1].set_landscape(items[-1].get_property('banner'))
items[-1].set_poster(items[-1].get_property('poster'))
items[-1].set_clearlogo(items[-1].get_property('clearlogo'))
items[-1].set_clearart(items[-1].get_property('clearlogo'))
#Append Random Play Function
if iarl_data['settings']['show_randomplay_item']:
items.append(plugin._listitemify({
'label' : '\xc2\xa0\xc2\xa0Random Play',
'path' : plugin.url_for('random_play'),
'icon': os.path.join(iarl_data['addon_data']['addon_media_path'],'lucky.jpg'),
'thumbnail' : os.path.join(iarl_data['addon_data']['addon_media_path'],'lucky.jpg'),
'info' : {'genre': '\xc2\xa0\xc2\xa0', 'date': '01/01/2999', 'plot' : 'Play a random game from the archive.'},
'properties' : {'fanart_image' : os.path.join(iarl_data['addon_data']['addon_media_path'],'fanart.jpg'),
'banner' : os.path.join(iarl_data['addon_data']['addon_media_path'],'lucky_banner.jpg')}
}))
items[-1].set_banner(items[-1].get_property('banner'))
items[-1].set_landscape(items[-1].get_property('banner'))
items[-1].set_poster(items[-1].get_property('poster'))
items[-1].set_clearlogo(items[-1].get_property('clearlogo'))
items[-1].set_clearart(items[-1].get_property('clearlogo'))
#Append Last Played Function
if iarl_data['settings']['cache_list']: #Only show if history is turned ON
if iarl_data['settings']['show_history_item']: #And if enabled in settings
items.append(plugin._listitemify({
'label' : '\xc2\xa0\xc2\xa0\xc2\xa0Last Played',
'path' : plugin.url_for('last_played'),
'icon': os.path.join(iarl_data['addon_data']['addon_media_path'],'last_played.jpg'),
'thumbnail' : os.path.join(iarl_data['addon_data']['addon_media_path'],'last_played.jpg'),
'info' : {'genre': '\xc2\xa0\xc2\xa0\xc2\xa0', 'date': '01/01/2999', 'plot' : 'View your game history.'},
'properties' : {'fanart_image' : os.path.join(iarl_data['addon_data']['addon_media_path'],'fanart.jpg'),
'banner' : os.path.join(iarl_data['addon_data']['addon_media_path'],'last_played_banner.jpg')}
}))
items[-1].set_banner(items[-1].get_property('banner'))
items[-1].set_landscape(items[-1].get_property('banner'))
items[-1].set_poster(items[-1].get_property('poster'))
items[-1].set_clearlogo(items[-1].get_property('clearlogo'))
items[-1].set_clearart(items[-1].get_property('clearlogo'))
#Append IARL Extras
if iarl_data['settings']['show_extras_item']:
extras_content = get_iarl_extras_update_content()
extras_plot = 'Download extra game lists from the community.'
extras_date = '01/01/2999'
if len(extras_content)>0:
try:
extras_date = extras_content.split('<last_update>')[1].split('</last_update>')[0]
extras_plot = extras_plot+'[CR]Last Updated: '+str(extras_date)+'[CR]Latest Additions: '+extras_content.split('<last_update_comment>')[1].split('</last_update_comment>')[0]
except:
extras_date = '01/01/2999'
extras_plot = 'Download extra game lists from the community.'
items.append(plugin._listitemify({
'label' : '\xc2\xa0IARL Extras',
'path' : plugin.url_for('get_iarl_extras'),
'icon': os.path.join(iarl_data['addon_data']['addon_media_path'],'iarl_extras.jpg'),
'thumbnail' : os.path.join(iarl_data['addon_data']['addon_media_path'],'iarl_extras.jpg'),
'info' : {'genre': '\xc2\xa0',
'date': extras_date,
'plot' : extras_plot},
'properties' : {'fanart_image' : os.path.join(iarl_data['addon_data']['addon_media_path'],'fanart.jpg'),
'banner' : os.path.join(iarl_data['addon_data']['addon_media_path'],'extras_banner.png')}
}))
items[-1].set_banner(items[-1].get_property('banner'))
items[-1].set_landscape(items[-1].get_property('banner'))
items[-1].set_poster(items[-1].get_property('poster'))
items[-1].set_clearlogo(items[-1].get_property('clearlogo'))
items[-1].set_clearart(items[-1].get_property('clearlogo'))
#if TOU has not been agreed to, show TOU window first
if not iarl_data['settings']['hidden_setting_tou_agree']:
MyTOUWindow = TOUWindow('TOU.xml',iarl_data['addon_data']['addon_install_path'],'Default','720p')
MyTOUWindow.doModal()
if 'true' in xbmcaddon.Addon(id='plugin.program.iarl').getSetting(id='iarl_setting_tou'):
return plugin.finish(items, update_listing=True, sort_methods=[xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_GENRE])
else:
return plugin.finish([], update_listing=True)
else:
return plugin.finish(items, update_listing=True, sort_methods=[xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_GENRE])
@plugin.route('/Emulator/<category_id>/<page_id>')
def get_rom_page(category_id,page_id):
#Re-scrape the current archive data if the index was not first visited
if iarl_data['archive_data'] is None:
iarl_data['archive_data'] = get_archive_info()
#Define current archive data based on the route category_id
try:
current_index = iarl_data['archive_data']['category_id'].index(category_id)
except:
xbmc.log(msg='IARL: The archive '+str(category_id)+' could not be found.', level=xbmc.LOGERROR)
current_index = None
if current_index is not None:
iarl_data['current_archive_data'] = define_current_archive_data(iarl_data,current_index,page_id)
if ',' in page_id: #If the list was requested alphabetically, define the page and alpha ID
alpha_id = page_id.split(',')[0]
page_id = page_id.split(',')[-1]
else:
alpha_id = None
#Parse XML ROM List
try:
if alpha_id is None: #No Alpha ID = One Big List
rom_list = [plugin._listitemify(x) for x in get_rom_list(iarl_data,current_index)]
else: #Only games that start with the selected letter
if '#' in alpha_id: #List everything that doesnt start with a letter
rom_list = [plugin._listitemify(list_item) for list_item in get_rom_list(iarl_data,current_index) if not list_item['label'].lower().isalpha()]
else: #List everything that starts with the selected letter
rom_list = [plugin._listitemify(list_item) for list_item in get_rom_list(iarl_data,current_index) if (alpha_id.lower() in list_item['label'].lower()[0])]
except:
xbmc.log(msg='IARL: Unable to get ROM List: %s'%str(sys.exc_info()[0]), level=xbmc.LOGERROR)
rom_list = None
items = list()
for ii in range(0,len(rom_list)):
# items.append(plugin._listitemify(roms))
rom_list[ii].set_banner(rom_list[ii].get_property('banner'))
rom_list[ii].set_landscape(rom_list[ii].get_property('banner'))
rom_list[ii].set_poster(rom_list[ii].get_property('poster'))
rom_list[ii].set_clearlogo(rom_list[ii].get_property('clearlogo'))
rom_list[ii].set_clearart(rom_list[ii].get_property('clearlogo'))
#Paginate results
page = paginate.Page(rom_list, page=page_id, items_per_page=iarl_data['settings']['items_per_page_setting'])
#Create Page Controls
next_page = []
prev_page = []
if alpha_id is None: #One Big List
prev_page_str = str(page.previous_page)
next_page_str = str(page.next_page)
else:
prev_page_str = alpha_id+','+str(page.previous_page)
next_page_str = alpha_id+','+str(page.next_page)
prev_page.append(plugin._listitemify({
'label' : '\xc2\xa0Prev <<',
'path' : plugin.url_for('get_rom_page', category_id=category_id,page_id=prev_page_str),
'icon': os.path.join(iarl_data['addon_data']['addon_media_path'],'Previous.png'),
'thumbnail' : os.path.join(iarl_data['addon_data']['addon_media_path'],'Previous.png'),
'info' : {'genre': '\xc2\xa0',
'date': '01/01/2999',
'plot' : 'Page ' + str(page.page) + ' of ' + str(page.page_count) + '. Prev page is ' + str(page.previous_page) + '. Total of ' + str(page.item_count) + ' games in this archive.'}
}))
next_page.append(plugin._listitemify({
'label' : '\xc2\xa0Next >>',
'path' : plugin.url_for('get_rom_page', category_id=category_id,page_id=next_page_str),
'icon': os.path.join(iarl_data['addon_data']['addon_media_path'],'Next.png'),
'thumbnail' : os.path.join(iarl_data['addon_data']['addon_media_path'],'Next.png'),
'info' : {'genre': '\xc2\xa0',
'date': '01/01/2999',
'plot' : 'Page ' + str(page.page) + ' of ' + str(page.page_count) + '. Next page is ' + str(page.next_page) + '. Total of ' + str(page.item_count) + ' games in this archive.'}
}))
#Define the listitems to display
current_page = page.items
#Add next and prev page listitems
if iarl_data['settings']['hard_coded_include_back_link']:
if page.previous_page:
current_page.extend(prev_page)
if page.next_page:
current_page.extend(next_page)
# # plugin.finish(succeeded=True, update_listing=True,sort_methods=[xbmcplugin.SORT_METHOD_NONE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_DATE, xbmcplugin.SORT_METHOD_GENRE, xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE])
return plugin.finish(current_page, sort_methods=[xbmcplugin.SORT_METHOD_NONE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_DATE, xbmcplugin.SORT_METHOD_GENRE, xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE])
@plugin.route('/Emulator_Alpha/<category_id>')
def get_rom_starting_letter_page(category_id):
items = []
alpha_pages = ['#','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
for alpha_page in alpha_pages:
if '#' in alpha_page:
alpha_image_id = 'Numeric'
else:
alpha_image_id = alpha_page
items.append(plugin._listitemify({
'label' : alpha_page,
'path': plugin.url_for('get_rom_page', category_id=category_id,page_id=alpha_page+',1'),
'icon': os.path.join(iarl_data['addon_data']['addon_media_path'],alpha_image_id+'.png'),
'thumbnail' : os.path.join(iarl_data['addon_data']['addon_media_path'],alpha_image_id+'.png'),
'properties' : {'fanart_image' : os.path.join(iarl_data['addon_data']['addon_media_path'],'fanart.jpg'),
'banner' : os.path.join(iarl_data['addon_data']['addon_media_path'],alpha_image_id+'_banner.png')}
}))
items[-1].set_banner(items[-1].get_property('banner'))
items[-1].set_landscape(items[-1].get_property('banner'))
items[-1].set_poster(items[-1].get_property('poster'))
return plugin.finish(items, sort_methods=[xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE])
# @plugin.cached(TTL=24*60*30) #Using custom cache saving functions now
def get_rom_list(iarl_data,current_index):
if iarl_data['settings']['cache_list']: #Try to load a cached list, otherwise parse and save it
if os.path.isfile(os.path.join(iarl_data['addon_data']['addon_list_cache_path'],iarl_data['archive_data']['category_id'][current_index]+'.pickle')): #Cached list exists
load_success, rom_list = load_userdata_list_cache_file(iarl_data['archive_data']['category_id'][current_index])
if not load_success:
xbmc.log(msg='IARL: Error loading cached list, re-parsing list instead', level=xbmc.LOGDEBUG)
rom_list = parse_xml_romfile(iarl_data,current_index,plugin)
for ii in range(0,len(rom_list)):
rom_list[ii]['context_menu'] = [update_context_favorite('%s'%str(rom_list[ii]['label2']),'Add to IARL Favorites')]
else:
rom_list = parse_xml_romfile(iarl_data,current_index,plugin)
for ii in range(0,len(rom_list)):
rom_list[ii]['context_menu'] = [update_context_favorite('%s'%str(rom_list[ii]['label2']),'Add to IARL Favorites')]
save_success = save_userdata_list_cache_file(rom_list,iarl_data['archive_data']['category_id'][current_index])
else: #Cached lists is not selected
rom_list = parse_xml_romfile(iarl_data,current_index,plugin)
for ii in range(0,len(rom_list)):
rom_list[ii]['context_menu'] = [update_context_favorite('%s'%str(rom_list[ii]['label2']),'Add to IARL Favorites')]
return rom_list
@plugin.route('/Emulator/<category_id>/Game/<romname>')
def get_selected_rom(category_id,romname):
ystr = lambda s: s if len(s) > 0 else None
list_item_available = False
try:
current_index = iarl_data['archive_data']['category_id'].index(category_id)
except:
xbmc.log(msg='IARL: The archive '+str(category_id)+' could not be found.', level=xbmc.LOGERROR)
current_index = None
if current_index is not None:
iarl_data['current_archive_data'] = define_current_archive_data(iarl_data,current_index,None)
if len(xbmc.getInfoLabel('Listitem.Title'))>0:
if len(xbmc.getInfoLabel('ListItem.Property(rom_filenames)'))>0:
if 'plugin://' not in xbmc.getInfoLabel('ListItem.Property(rom_filenames)'): #Added for favorites bookmarks
list_item_available = True
if not list_item_available:
#The listitem is not defined, so we'll need to rescrape the xml for the game (most likely a favorite or other URL route)
if iarl_data['archive_data'] is None:
iarl_data['archive_data'] = get_archive_info()
#Define current archive data based on the route category_id
rom_list = get_rom_list(iarl_data,current_index)
try:
rom_idx = [romnames['label2'] for romnames in rom_list].index(romname)
except:
xbmc.log(msg='IARL: Unable to find the requested game '+str(romname), level=xbmc.LOGERROR)
rom_idx = None
#Define current_data by the rescraped rom_idx
if rom_idx is not None:
iarl_data['current_rom_data']['rom_name'] = rom_list[rom_idx]['properties']['rom_name']
iarl_data['current_rom_data']['rom_icon'] = rom_list[rom_idx]['properties']['rom_icon']
iarl_data['current_rom_data']['rom_thumbnail'] = rom_list[rom_idx]['properties']['rom_thumbnail']
iarl_data['current_rom_data']['rom_title'] = rom_list[rom_idx]['properties']['rom_title']
iarl_data['current_rom_data']['rom_studio'] = rom_list[rom_idx]['properties']['rom_studio']
iarl_data['current_rom_data']['rom_genre'] = rom_list[rom_idx]['properties']['rom_genre']
iarl_data['current_rom_data']['rom_date'] = rom_list[rom_idx]['properties']['rom_date']
iarl_data['current_rom_data']['rom_year'] = rom_list[rom_idx]['properties']['rom_year']
iarl_data['current_rom_data']['rom_plot'] = rom_list[rom_idx]['properties']['rom_plot']
iarl_data['current_rom_data']['rom_trailer'] = rom_list[rom_idx]['properties']['rom_trailer']
iarl_data['current_rom_data']['rom_tag'] = rom_list[rom_idx]['properties']['tag']
iarl_data['current_rom_data']['rom_nplayers'] = rom_list[rom_idx]['properties']['nplayers']
iarl_data['current_rom_data']['rom_rating'] = rom_list[rom_idx]['properties']['rating']
iarl_data['current_rom_data']['rom_esrb'] = rom_list[rom_idx]['properties']['esrb']
iarl_data['current_rom_data']['rom_perspective'] = rom_list[rom_idx]['properties']['perspective']
iarl_data['current_rom_data']['rom_emu_command'] = ystr(rom_list[rom_idx]['properties']['rom_emu_command'])
try: #Leave as a try statement for now, to catch any issues with old lists that dont include these values
iarl_data['current_rom_data']['rom_override_cmd'] = ystr(rom_list[rom_idx]['properties']['rom_override_cmd'])
except:
iarl_data['current_rom_data']['rom_override_cmd'] = None
try: #Leave as a try statement for now, to catch any issues with old lists that dont include these values
iarl_data['current_rom_data']['rom_override_postdl'] = ystr(rom_list[rom_idx]['properties']['rom_override_postdl'])
except:
iarl_data['current_rom_data']['rom_override_postdl'] = None
try: #Leave as a try statement for now, to catch any issues with old lists that dont include these values
iarl_data['current_rom_data']['rom_override_downloadpath'] = ystr(rom_list[rom_idx]['properties']['rom_override_downloadpath'])
except:
iarl_data['current_rom_data']['rom_override_downloadpath'] = None
iarl_data['current_rom_data']['rom_label'] = rom_list[rom_idx]['properties']['rom_label']
iarl_data['current_rom_data']['rom_filenames'] = [ystr(x) for x in rom_list[rom_idx]['properties']['rom_filenames'].split(',')] #Split into list
iarl_data['current_rom_data']['rom_supporting_filenames'] = [ystr(x) for x in rom_list[rom_idx]['properties']['rom_supporting_filenames'].split(',')] #Split into list
iarl_data['current_rom_data']['rom_save_filenames'] = [ystr(x) for x in rom_list[rom_idx]['properties']['rom_save_filenames'].split(',')] #Split into list
iarl_data['current_rom_data']['rom_save_supporting_filenames'] = [ystr(x) for x in rom_list[rom_idx]['properties']['rom_save_supporting_filenames'].split(',')] #Split into list
iarl_data['current_rom_data']['rom_size'] = [int(x) for x in rom_list[rom_idx]['properties']['rom_file_sizes'].split(',')] #Split into list, convert to int
for ii in range(0,total_arts):
iarl_data['current_rom_data']['rom_fanarts'][ii] = ystr(rom_list[rom_idx]['properties']['fanart'+str(ii+1)])
iarl_data['current_rom_data']['rom_boxarts'][ii] = ystr(rom_list[rom_idx]['properties']['boxart'+str(ii+1)])
iarl_data['current_rom_data']['rom_banners'][ii] = ystr(rom_list[rom_idx]['properties']['banner'+str(ii+1)])
iarl_data['current_rom_data']['rom_snapshots'][ii] = ystr(rom_list[rom_idx]['properties']['snapshot'+str(ii+1)])
iarl_data['current_rom_data']['rom_logos'][ii] = ystr(rom_list[rom_idx]['properties']['logo'+str(ii+1)])
else:
#Define current_data by the selected list item
iarl_data['current_rom_data']['rom_name'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_name)'))
iarl_data['current_rom_data']['rom_icon'] = ystr(xbmc.getInfoLabel('ListItem.Icon'))
iarl_data['current_rom_data']['rom_thumbnail'] = ystr(xbmc.getInfoLabel('ListItem.Thumb'))
iarl_data['current_rom_data']['rom_title'] = ystr(xbmc.getInfoLabel('ListItem.Title'))
iarl_data['current_rom_data']['rom_studio'] = ystr(xbmc.getInfoLabel('ListItem.Studio'))
iarl_data['current_rom_data']['rom_genre'] = ystr(xbmc.getInfoLabel('ListItem.Genre'))
iarl_data['current_rom_data']['rom_date'] = ystr(xbmc.getInfoLabel('ListItem.Date'))
iarl_data['current_rom_data']['rom_year'] = ystr(xbmc.getInfoLabel('ListItem.Year'))
iarl_data['current_rom_data']['rom_plot'] = ystr(xbmc.getInfoLabel('ListItem.Plot'))
iarl_data['current_rom_data']['rom_trailer'] = ystr(xbmc.getInfoLabel('ListItem.Trailer'))
iarl_data['current_rom_data']['rom_tag'] = ystr(xbmc.getInfoLabel('ListItem.Property(tag)'))
iarl_data['current_rom_data']['rom_nplayers'] = ystr(xbmc.getInfoLabel('ListItem.Property(nplayers)'))
iarl_data['current_rom_data']['rom_rating'] = ystr(xbmc.getInfoLabel('ListItem.Property(rating)'))
iarl_data['current_rom_data']['rom_esrb'] = ystr(xbmc.getInfoLabel('ListItem.Property(esrb)'))
iarl_data['current_rom_data']['rom_perspective'] = ystr(xbmc.getInfoLabel('ListItem.Property(perspective)'))
iarl_data['current_rom_data']['rom_emu_command'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_emu_command)'))
try:
iarl_data['current_rom_data']['rom_override_cmd'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_override_cmd)'))
except:
iarl_data['current_rom_data']['rom_override_cmd'] = None
try:
iarl_data['current_rom_data']['rom_override_postdl'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_override_postdl)'))
except:
iarl_data['current_rom_data']['rom_override_postdl'] = None
try:
iarl_data['current_rom_data']['rom_override_downloadpath'] = ystr(xbmc.getInfoLabel('ListItem.Property(rom_override_downloadpath)'))
except:
iarl_data['current_rom_data']['rom_override_downloadpath'] = None
iarl_data['current_rom_data']['rom_label'] = ystr(xbmc.getInfoLabel('ListItem.Label'))
iarl_data['current_rom_data']['rom_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_supporting_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_supporting_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_save_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_save_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_save_supporting_filenames'] = [ystr(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_save_supporting_filenames)').split(',')] #Split into list
iarl_data['current_rom_data']['rom_size'] = [int(x) for x in xbmc.getInfoLabel('ListItem.Property(rom_file_sizes)').split(',')] #Split into list, convert to int
for ii in range(0,total_arts):
iarl_data['current_rom_data']['rom_fanarts'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(fanart'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_boxarts'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(boxart'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_banners'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(banner'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_snapshots'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(snapshot'+str(ii+1)+')'))
iarl_data['current_rom_data']['rom_logos'][ii] = ystr(xbmc.getInfoLabel('ListItem.Property(logo'+str(ii+1)+')'))
if 'plugin://plugin.program.iarl' in iarl_data['current_rom_data']['rom_filenames'][0]: #IARL Favorites bookmark link, will link back to original xml listing
plugin.redirect('plugin://'+iarl_data['current_rom_data']['rom_filenames'][0].split('plugin://')[-1])
else:
check_for_warn(iarl_data['current_rom_data']['rom_size']) #Added warning for file sizes over 100MB
#Show ROM Info window, skins can override the default window by including script-IARL-infodialog.xml in their skin
if 'ROM Info Page'.lower() in iarl_data['settings']['game_select_action'].lower():
MyROMWindow = ROMWindow('script-IARL-infodialog.xml',iarl_data['addon_data']['addon_install_path'],'Default','720p',iarl_data=iarl_data)
MyROMWindow.doModal()
#Download and launch selected in settings
elif 'Download and Launch'.lower() in iarl_data['settings']['game_select_action'].lower():
download_and_launch_rom(None,iarl_data)
#Download only selected in settings
elif 'Download Only'.lower() in iarl_data['settings']['game_select_action'].lower():
iarl_data['current_save_data'] = download_rom_only(iarl_data)
if iarl_data['current_save_data']['overall_download_success']:
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Complete',iarl_data['current_rom_data']['rom_name']+' was successfully downloaded')
else:
xbmc.log(msg='IARL: Selected game action is unknown', level=xbmc.LOGERROR)
pass #Shouldn't ever see this
pass
@plugin.route('/Search_Results/<search_term>') #Not sure why normal routing with extra kwargs isn't working for this route...
def search_roms_results(search_term,**kwargs):
# xbmc.executebuiltin("Dialog.Close(all, true)")
search_results = []
current_search_term = search_term.lower().strip()
# args_in = plugin.request.args #This doesn't work in this intance when using urlfor?
try:
current_includes = kwargs['include_archives'].split(',')
except:
current_includes = 'all'
try:
current_adv_search = kwargs['adv_search']
except:
current_adv_search = 'False'
try:
current_region = kwargs['region'].lower().strip()
except:
current_region = 'any'
try:
current_genre = kwargs['genre'].lower().strip()
except:
current_genre = 'any'
try:
current_studio = kwargs['studio'].lower().strip()
except:
current_studio = 'any'
try:
current_nplayers = kwargs['nplayers'].lower().strip()
except:
current_nplayers = 'any'
try:
current_datefrom = kwargs['datefrom'].lower().strip()
except:
current_datefrom = 'any'
try:
current_dateto = kwargs['dateto'].lower().strip()
except:
current_dateto = 'any'
if current_datefrom == 'any':
datefrom_num = 1950 #Random year well before any game was invented
else:
try:
datefrom_num = int(current_datefrom.lower().strip().split('/')[-1]) #No checking... yet
except:
xbmc.log(msg='IARL: Search start date is badly formatted, default year used', level=xbmc.LOGERROR)
datefrom_num = 1950 #Bad formatted date
if current_dateto == 'any':
dateto_num = 2999 #Random year that this code will obviously be dead and gone when reached
else:
try:
dateto_num = int(current_dateto.lower().strip().split('/')[-1])+1
except:
xbmc.log(msg='IARL: Search end date is badly formatted, default year used', level=xbmc.LOGERROR)
dateto_num = 2999 #Bad formatted date
date_list = range(datefrom_num,dateto_num) #List of years to look for
if iarl_data['archive_data'] is None:
iarl_data['archive_data'] = get_archive_info()
# #Create the search dict for archives that are not hidden
# search_archive_data = dict()
# for kk in iarl_data['archive_data'].keys():
# search_archive_data[kk] = list()
# for ii in range(0,len(iarl_data['archive_data']['emu_category'])):
# if 'hidden' not in iarl_data['archive_data']['emu_category'][ii]:
# for kk in iarl_data['archive_data'].keys():
# try:
# search_archive_data[kk].append(iarl_data['archive_data'][kk][ii])
# except:
# pass
progress_dialog = xbmcgui.DialogProgress()
progress_dialog.create('IARL', 'Searching...')
#This probably isnt a very efficient method for filtering. Need to look into lambda dict filtering
if current_adv_search == 'False':
for ii in range(0,len(current_includes)):
progress_dialog.update(max(1,int(100*ii/len(current_includes))-10), 'Looking in '+iarl_data['archive_data']['emu_name'][ii])
if current_includes[ii] == '1':
iarl_data['current_archive_data'] = define_current_archive_data(iarl_data,ii,None)
if current_search_term == 'any':
for roms_in_list in get_rom_list(iarl_data,ii):
if (progress_dialog.iscanceled()):
xbmc.log(msg='IARL: Search was cancelled by the user', level=xbmc.LOGDEBUG)
return
search_results.append(roms_in_list)
else:
for roms_in_list in get_rom_list(iarl_data,ii):
if (progress_dialog.iscanceled()):
xbmc.log(msg='IARL: Search was cancelled by the user', level=xbmc.LOGDEBUG)
return
if current_search_term in roms_in_list['label'].lower().strip(): #search term is in label
search_results.append(roms_in_list)
else:
for ii in range(0,len(current_includes)):
progress_dialog.update(max(1,int(100*ii/len(current_includes))-10), 'Looking in '+iarl_data['archive_data']['emu_name'][ii])
if current_includes[ii] == '1':
iarl_data['current_archive_data'] = define_current_archive_data(iarl_data,ii,None)
for roms_in_list in get_rom_list(iarl_data,ii):
if (progress_dialog.iscanceled()):
xbmc.log(msg='IARL: Search was cancelled by the user', level=xbmc.LOGDEBUG)
return
include_this_rom = True #Default to include rom
try:
if (current_search_term not in roms_in_list['label'].lower().strip()) & (current_search_term != 'any'):
include_this_rom = False #Filter out rom if the search term is not in the label and the search term isn't "any"
except:
pass
# include_this_rom = False
try:
if (current_genre not in roms_in_list['info']['genre'].lower().strip()) & (current_genre != 'any'):
include_this_rom = False
except:
pass
# include_this_rom = False
try:
if (current_studio not in roms_in_list['info']['studio'].lower().strip()) & (current_studio != 'any'):
include_this_rom = False
except:
pass
# include_this_rom = False
try:
if (current_nplayers not in roms_in_list['properties']['nplayers'].lower().strip()) & (current_nplayers != 'any'):
include_this_rom = False
except:
pass
# include_this_rom = False
try:
if (current_region not in roms_in_list['properties']['rom_tag'].lower().strip()) & (current_region != 'any'):
include_this_rom = False
except:
pass
# include_this_rom = False
try:
if (int(roms_in_list['info']['date'][-4:]) not in date_list):
include_this_rom = False
except:
pass
# include_this_rom = False
if include_this_rom: #Append to the list if include tag is still true
search_results.append(roms_in_list)
progress_dialog.update(95, 'Compiling Results...')
xbmc.log(msg='IARL: Search found '+str(len(search_results))+' matches', level=xbmc.LOGDEBUG)
return plugin.finish(search_results,cache_to_disc=True,sort_methods=[xbmcplugin.SORT_METHOD_NONE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_DATE, xbmcplugin.SORT_METHOD_GENRE, xbmcplugin.SORT_METHOD_STUDIO_IGNORE_THE])
progress_dialog.close()
@plugin.route('/Search')
def search_roms_window():
MySearchWindow = SearchWindow('search.xml',iarl_data['addon_data']['addon_install_path'],'Default','720p')
MySearchWindow.doModal()
pass
@plugin.route('/Random')
def random_play():
import random
if iarl_data['archive_data'] is None:
iarl_data['archive_data'] = get_archive_info()
iarl_data_2 = { #Create temp dict to populate non-hidden archives into
'emu_filepath' : list(),
}
for ii in range(0,len(iarl_data['archive_data']['emu_name'])):
if 'hidden' not in iarl_data['archive_data']['emu_category'][ii]: #Don't include the archive if it's tagged hidden
iarl_data_2['emu_filepath'].append(iarl_data['archive_data']['emu_filepath'][ii])
rand_int_1 = random.randint(0,len(iarl_data_2['emu_filepath']))
# rand_int_1 = 0 #For testing
try:
current_index = iarl_data['archive_data']['emu_filepath'].index(iarl_data_2['emu_filepath'][rand_int_1])
except:
try:
rand_int_1 = random.randint(0,len(iarl_data_2['emu_filepath']))
current_index = iarl_data['archive_data']['emu_filepath'].index(iarl_data_2['emu_filepath'][rand_int_1])
except:
xbmc.log(msg='IARL: Unable to generate a random archive for some unknown reason, try again', level=xbmc.LOGERROR)
current_index = None
if current_index is not None:
iarl_data['current_archive_data'] = define_current_archive_data(iarl_data,current_index,None)
try:
rom_list = get_rom_list(iarl_data,current_index)
except:
rom_list = None
try:
rand_int_2 = random.randint(0,len(rom_list))
page = paginate.Page(rom_list, page=rand_int_2, items_per_page=1)
except:
page = None
try:
xbmc.log(msg='IARL: Random play archive: '+str(page.items[0]['properties']['emu_name'])+', game: '+str(page.items[0]['properties']['rom_title']), level=xbmc.LOGDEBUG)
except:
pass
return plugin.finish(page.items,update_listing=False)
else:
return plugin.finish([],update_listing=False)
# pass
@plugin.route('/History')
def last_played():
if os.path.isfile(os.path.join(iarl_data['addon_data']['addon_list_cache_path'],'iarl_history.pickle')): #Cached list exists
xbmc.log(msg='IARL: Loading game history file', level=xbmc.LOGDEBUG)
load_success, rom_list = load_userdata_list_cache_file('iarl_history')
else:
load_success = False
xbmc.log(msg='IARL: No game history file was found', level=xbmc.LOGNOTICE)
if load_success:
return rom_list
else:
pass
@plugin.route('/Extras')
def get_iarl_extras():
load_success, extras_data = load_iarl_extras()
items = []
if load_success:
for ii in range(0,len(extras_data['emu_extras_filename'])):
items.append(plugin._listitemify({
'label' : extras_data['emu_name'][ii],
'path': plugin.url_for('download_iarl_extra', xml_filename=extras_data['emu_extras_filename'][ii].split('/')[-1]),
'icon': extras_data['emu_logo'][ii],
'thumbnail' : extras_data['emu_thumb'][ii],
'info' : {'date': extras_data['emu_date'][ii],
'plot': extras_data['emu_plot'][ii],
'trailer': get_youtube_plugin_url(extras_data['emu_trailer'][ii])},
'properties' : {'fanart_image' : extras_data['emu_fanart'][ii],
'banner' : extras_data['emu_banner'][ii],
'clearlogo': extras_data['emu_logo'][ii],
'poster': extras_data['emu_thumb'][ii]},
# 'context_menu' : context_menus
}))
items[-1].set_banner(items[-1].get_property('banner'))
items[-1].set_landscape(items[-1].get_property('banner'))
items[-1].set_poster(items[-1].get_property('poster'))
items[-1].set_clearlogo(items[-1].get_property('clearlogo'))
items[-1].set_clearart(items[-1].get_property('clearlogo'))
return plugin.finish(items, update_listing=True, sort_methods=[xbmcplugin.SORT_METHOD_NONE, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE, xbmcplugin.SORT_METHOD_DATE])
@plugin.route('/Extras/<xml_filename>')
def download_iarl_extra(xml_filename):
extra_datfile_base_url = 'https://raw.githubusercontent.com/zach-morris/iarl.extras/master/dat_files/'
xbmc.log(msg='IARL: Requesting IARL extras file: '+str(extra_datfile_base_url+xml_filename), level=xbmc.LOGDEBUG)
download_success = download_iarl_extra_file(str(extra_datfile_base_url+xml_filename))
if download_success:
xbmc.log(msg='IARL: IARL extras file was downloaded: '+str(xml_filename), level=xbmc.LOGDEBUG)
else:
xbmc.log(msg='IARL: IARL extras file download failed: '+str(xml_filename), level=xbmc.LOGDEBUG)
pass
def download_rom_only(iarl_data):
xbmc.log(msg='IARL: Download started for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGNOTICE)
#Initialize current save data dict since it may have been populated with the last selected game
iarl_data['current_save_data']['rom_save_filenames'] = list()
iarl_data['current_save_data']['rom_save_filenames_exist'] = list()
iarl_data['current_save_data']['matching_rom_save_filenames'] = list()
iarl_data['current_save_data']['rom_save_filenames_success'] = list()
iarl_data['current_save_data']['rom_supporting_filenames'] = list()
iarl_data['current_save_data']['rom_save_supporting_filenames'] = list()
iarl_data['current_save_data']['rom_save_supporting_filenames_exist'] = list()
iarl_data['current_save_data']['matching_rom_save_supporting_filenames'] = list()
iarl_data['current_save_data']['rom_save_supporting_filenames_success'] = list()
iarl_data['current_save_data']['rom_converted_filenames'] = list()
iarl_data['current_save_data']['rom_converted_filenames_success'] = list()
iarl_data['current_save_data']['rom_converted_supporting_filenames'] = list()
iarl_data['current_save_data']['rom_converted_supporting_filenames_success'] = list()
iarl_data['current_save_data']['overwrite_existing_files'] = False #Default to not overwrite existing files, then check
iarl_data['current_save_data']['overall_download_success'] = True #Default to a good download, then check afterward
iarl_data['current_save_data']['overall_conversion_success'] = True #Default to a good conversion, then check afterward
iarl_data['current_save_data']['launch_filename'] = None #Default to no launch filename (will be populated by post_download_action)
#1. Check temp folder and clean if necessary
if 'default' in iarl_data['current_archive_data']['emu_download_path']:
check_temp_folder_and_clean(iarl_data['settings']['download_cache'])
#1b. Check about matching policy
exact_match_check = False #Default to False
if iarl_data['current_rom_data']['rom_override_postdl'] is not None and len(iarl_data['current_rom_data']['rom_override_postdl']) > 0:
try:
if iarl_data['current_rom_data']['rom_override_postdl'] in ['none','None','NONE']: #If the file will not be processed, it needs to match exactly
exact_match_check = True
else:
exact_match_check = False
except:
exact_match_check = False
else:
try:
if iarl_data['current_archive_data']['emu_post_download_action'] in ['none','None','NONE']: #If the file will not be processed, it needs to match exactly
exact_match_check = True
else:
exact_match_check = False
except:
exact_match_check = False
#2. Check if filename(s) already exist
for filenames in iarl_data['current_rom_data']['rom_save_filenames']:
if filenames:
if filenames.lower() != 'none':
iarl_data['current_save_data']['rom_save_filenames'].append(filenames)
# if os.path.exists(filenames):
file_exists_wc, file_found_wc = check_file_exists_wildcard(filenames,iarl_data['current_rom_data']['rom_name'],exact_match_check)
if file_exists_wc:
iarl_data['current_save_data']['rom_save_filenames_exist'].append(True)
iarl_data['current_save_data']['matching_rom_save_filenames'].append(file_found_wc)
iarl_data['current_save_data']['rom_save_filenames_success'].append(True)
else:
iarl_data['current_save_data']['rom_save_filenames_exist'].append(False)
iarl_data['current_save_data']['matching_rom_save_filenames'].append(None)
iarl_data['current_save_data']['rom_save_filenames_success'].append(False)
for filenames in iarl_data['current_rom_data']['rom_save_supporting_filenames']:
if filenames:
if filenames.lower() != 'none':
iarl_data['current_save_data']['rom_save_supporting_filenames'].append(filenames)
# if os.path.exists(filenames):
file_exists_wc, file_found_wc = check_file_exists_wildcard(filenames,iarl_data['current_rom_data']['rom_name'],True) #Supporting files require the exact correct name
if file_exists_wc:
iarl_data['current_save_data']['rom_save_supporting_filenames_exist'].append(True)
iarl_data['current_save_data']['matching_rom_save_supporting_filenames'].append(file_found_wc)
iarl_data['current_save_data']['rom_save_supporting_filenames_success'].append(True)
else:
iarl_data['current_save_data']['rom_save_supporting_filenames_exist'].append(False)
iarl_data['current_save_data']['matching_rom_save_supporting_filenames'].append(None)
iarl_data['current_save_data']['rom_save_supporting_filenames_success'].append(False)
#3. Determine action if file already exists
if (True in iarl_data['current_save_data']['rom_save_filenames_exist']) or (True in iarl_data['current_save_data']['rom_save_supporting_filenames_exist']):
if 'Prompt'.lower() in iarl_data['settings']['local_file_action'].lower():
current_dialog = xbmcgui.Dialog()
ret1 = current_dialog.select('Download and overwrite local files?', ['No','Yes'])
if ret1 == 0:
iarl_data['current_save_data']['overwrite_existing_files'] = False
xbmc.log(msg='IARL: File was found to exist locally, no overwrite option selected', level=xbmc.LOGDEBUG)
else:
iarl_data['current_save_data']['overwrite_existing_files'] = True
xbmc.log(msg='IARL: File was found to exist locally, overwrite option selected', level=xbmc.LOGDEBUG)
elif 'Do Not ReDownload'.lower() in iarl_data['settings']['local_file_action'].lower():
iarl_data['current_save_data']['overwrite_existing_files'] = False
xbmc.log(msg='IARL: File was found to exist locally, no overwrite option selected', level=xbmc.LOGDEBUG)
else:
iarl_data['current_save_data']['overwrite_existing_files'] = True
xbmc.log(msg='IARL: File was found to exist locally, overwrite option selected', level=xbmc.LOGDEBUG)
#4. Download the files, check the file downloaded
for ii in range (0,len(iarl_data['current_rom_data']['rom_save_filenames'])):
download_filename = False
if iarl_data['current_rom_data']['rom_save_filenames'][ii]:
if iarl_data['current_rom_data']['rom_save_filenames'][ii].lower() != 'none': #XBMC listitem uses none string
if iarl_data['current_save_data']['rom_save_filenames_exist'][ii]:
if iarl_data['current_save_data']['overwrite_existing_files']:
download_filename = True #Download the file if the file exists and overwrite was selected
else:
download_filename = True #Download the file if the file does not exist
if download_filename:
iarl_data['current_save_data']['rom_save_filenames_success'][ii] = download_tools().Downloader(quote_url(iarl_data['current_rom_data']['rom_filenames'][ii]),iarl_data['current_rom_data']['rom_save_filenames'][ii],iarl_data['settings']['ia_enable_login'],iarl_data['settings']['ia_username'],iarl_data['settings']['ia_password'],iarl_data['current_rom_data']['rom_size'][ii],iarl_data['current_rom_data']['rom_title'],'Downloading, please wait...')
if iarl_data['current_save_data']['rom_save_filenames_success'][ii]:
if not check_downloaded_file(iarl_data['current_rom_data']['rom_save_filenames'][ii]): #Check the file, if its 0 bytes, then archive.org couldnt find the file
iarl_data['current_save_data']['rom_save_filenames_exist'][ii] = True
else: #File was 0 bytes, delete it and call it a fail
iarl_data['current_save_data']['rom_save_filenames_success'][ii] = False
iarl_data['current_save_data']['rom_save_filenames_exist'][ii] = False
else: #File already exists locally, but potentially has a different file extension or naming convention
if iarl_data['current_rom_data']['rom_save_filenames'][ii] is not None:
if iarl_data['current_rom_data']['rom_save_filenames'][ii].lower() != 'none': #XBMC listitem uses none string
xbmc.log(msg='IARL: Matching file that already exists: '+str(iarl_data['current_save_data']['matching_rom_save_filenames'][ii]), level=xbmc.LOGDEBUG)
iarl_data['current_save_data']['rom_save_filenames'][ii] = iarl_data['current_save_data']['matching_rom_save_filenames'][ii]
for ii in range (0,len(iarl_data['current_rom_data']['rom_save_supporting_filenames'])):
download_filename = False
if iarl_data['current_rom_data']['rom_save_supporting_filenames'][ii]:
if iarl_data['current_rom_data']['rom_save_supporting_filenames'][ii].lower() != 'none': #XBMC listitem uses none string
if iarl_data['current_save_data']['rom_save_supporting_filenames_exist'][ii]:
if iarl_data['current_save_data']['overwrite_existing_files']:
download_filename = True #Download the file if the file exists and overwrite was selected
else:
download_filename = True #Download the file if the file does not exist
if download_filename:
iarl_data['current_save_data']['rom_save_supporting_filenames_success'][ii] = download_tools().Downloader(quote_url(iarl_data['current_rom_data']['rom_supporting_filenames'][ii]),iarl_data['current_rom_data']['rom_save_supporting_filenames'][ii],iarl_data['settings']['ia_enable_login'],iarl_data['settings']['ia_username'],iarl_data['settings']['ia_password'],9999999,iarl_data['current_rom_data']['rom_supporting_filenames'][ii],'Downloading, please wait...')
if iarl_data['current_save_data']['rom_save_supporting_filenames_success'][ii]:
if not check_downloaded_file(iarl_data['current_rom_data']['rom_save_supporting_filenames'][ii]):
iarl_data['current_save_data']['rom_save_supporting_filenames_exist'][ii] = True
else:
iarl_data['current_save_data']['rom_save_supporting_filenames_success'][ii] = False
iarl_data['current_save_data']['rom_save_supporting_filenames_exist'][ii] = False
else: #File already exists locally, but potentially has a different file extension or naming convention
if iarl_data['current_rom_data']['rom_save_supporting_filenames'][ii] is not None:
if iarl_data['current_rom_data']['rom_save_supporting_filenames'][ii].lower() != 'none': #XBMC listitem uses none string
xbmc.log(msg='IARL: Matching file that already exists: '+str(iarl_data['current_save_data']['matching_rom_save_supporting_filenames'][ii]), level=xbmc.LOGDEBUG)
iarl_data['current_save_data']['rom_save_supporting_filenames'][ii] = iarl_data['current_save_data']['matching_rom_save_supporting_filenames'][ii]
#5. Check to ensure each file was a success
for check in iarl_data['current_save_data']['rom_save_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_download_success'] = False
for check in iarl_data['current_save_data']['rom_save_supporting_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_download_success'] = False
#5. Post-download process the files if necessary
if iarl_data['current_save_data']['overall_download_success']:
if iarl_data['current_rom_data']['rom_override_postdl'] is not None and len(iarl_data['current_rom_data']['rom_override_postdl']) > 0: #Override postdl command detected, so use that
xbmc.log(msg='IARL: Post DL Override command detected for '+str(iarl_data['current_rom_data']['rom_name'])+' - '+str(iarl_data['current_rom_data']['rom_override_postdl']), level=xbmc.LOGDEBUG)
iarl_data['current_save_data']['launch_filename'], post_download_action_success = post_download_action(iarl_data,iarl_data['current_rom_data']['rom_override_postdl'],None)
else: #No override command was found, use the current_archive_data emu_post_download_action
iarl_data['current_save_data']['launch_filename'], post_download_action_success = post_download_action(iarl_data,iarl_data['current_archive_data']['emu_post_download_action'],None)
else:
xbmc.log(msg='IARL: There was a download error for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
return iarl_data['current_save_data']
def post_download_action(iarl_data,option,option2):
post_download_action_success = False
if option == 'none':
iarl_data['current_save_data']['overall_conversion_success'] = True
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_save_filenames'][0] #Define the launch filename as the first one
elif option == 'unzip_rom':
if iarl_data['current_save_data']['rom_save_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = unzip_file(filenames)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
if iarl_data['current_save_data']['rom_save_supporting_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_supporting_filenames']:
conversion_success, converted_filename = unzip_file(filenames)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error unzipping files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif option == 'unzip_and_rename_file':
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = unzip_and_rename_file(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error unzipping and reanaming for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif option == 'unzip_standalone_port_file':
if iarl_data['current_save_data']['rom_save_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_filenames']:
if option2 is None:
option2 = iarl_data['current_rom_data']['rom_emu_command']
conversion_success, converted_filename = unzip_standalone_port_file(filenames,option2)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
if iarl_data['current_save_data']['rom_save_supporting_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_supporting_filenames']:
if option2 is None:
option2 = iarl_data['current_rom_data']['rom_emu_command']
conversion_success, converted_filename = unzip_standalone_port_file(filenames,option2)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting Standalone Port files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif option == 'unzip_win31_file':
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = unzip_win31_file(iarl_data['current_rom_data']['rom_title'],iarl_data['current_save_data']['rom_save_filenames'],iarl_data['current_rom_data']['rom_emu_command'])
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting Standalone Port files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif option == 'unzip_update_rom_path_dosbox':
if iarl_data['current_save_data']['rom_save_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_filenames']:
if option2 is None:
option2 = iarl_data['current_rom_data']['rom_emu_command']
conversion_success, converted_filename = unzip_dosbox_file(filenames,option2)
# conversion_success, converted_filename = unzip_dosbox_file(filenames,iarl_data['current_rom_data']['rom_emu_command'])
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
if iarl_data['current_save_data']['rom_save_supporting_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_supporting_filenames']:
if option2 is None:
option2 = iarl_data['current_rom_data']['rom_emu_command']
conversion_success, converted_filename = unzip_dosbox_file(filenames,option2)
# conversion_success, converted_filename = unzip_dosbox_file(filenames,iarl_data['current_rom_data']['rom_emu_command'])
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting DOSBox files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'unzip_dosbox_update_conf_file' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = unzip_dosbox_update_conf_file(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the DOSBox archive for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'unzip_scummvm_update_conf_file' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = unzip_scummvm_update_conf_file(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the ScummVM archive for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif option == 'convert_chd_bin':
if iarl_data['current_save_data']['rom_save_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_chd_bin(filenames,iarl_data['addon_data']['chdman_path'],'bin')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
if iarl_data['current_save_data']['rom_save_supporting_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_supporting_filenames']:
conversion_success, converted_filename = convert_chd_bin(filenames,iarl_data['addon_data']['chdman_path'],'bin')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting CHD to BIN/CUE for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif option == 'convert_chd_cue':
if iarl_data['current_save_data']['rom_save_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_chd_bin(filenames,iarl_data['addon_data']['chdman_path'],'cue')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
if iarl_data['current_save_data']['rom_save_supporting_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_supporting_filenames']:
conversion_success, converted_filename = convert_chd_bin(filenames,iarl_data['addon_data']['chdman_path'],'cue')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting CHD to CUE/BIN for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif option == 'lynx_header_fix':
if iarl_data['current_save_data']['rom_save_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = lynx_header_fix(filenames)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
if iarl_data['current_save_data']['rom_save_supporting_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_supporting_filenames']:
conversion_success, converted_filename = lynx_header_fix(filenames)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error attempting to fix Lynx ROM Header for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'rename_rom_postdl' in option:
try:
new_extension = re.search(r'\([^)]*\)',option).group(0).replace('(','').replace(')','').strip()
except:
new_extension = ''
xbmc.log(msg='IARL: Rename ROM option extension could not be defined', level=xbmc.LOGERROR)
if iarl_data['current_save_data']['rom_save_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = rename_rom_postdl(filenames,new_extension)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
if iarl_data['current_save_data']['rom_save_supporting_filenames']:
for filenames in iarl_data['current_save_data']['rom_save_supporting_filenames']:
conversion_success, converted_filename = rename_rom_postdl(filenames,new_extension)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error attempting to rename the file extension for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'generate_uae_conf_file' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = generate_uae_conf_file(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error creating the FS-UAE conf files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'generate_uae4arm_conf_file' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = generate_uae4arm_conf_file(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error creating the UAE4ARM conf files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'generate_uae_cd32_conf_file' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = generate_uae_cd32_conf_file(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error creating the FS-UAE CD32 conf files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_7z_m3u' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_7z_m3u(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the 7z files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_zip_m3u' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_zip_m3u(iarl_data)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the zip files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_7z_track1_bin' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_7z_bin_cue_gdi(iarl_data,'track 1')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the 7z track 1 files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_7z_gdi' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_7z_bin_cue_gdi(iarl_data,'gdi')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the 7z gdi files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_7z_cue' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_7z_bin_cue_gdi(iarl_data,'cue')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the 7z cue files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_7z_iso' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_7z_bin_cue_gdi(iarl_data,'iso')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the 7z iso files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_zip_track1_bin' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_zip_bin_cue_gdi(iarl_data,'track 1')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the zip track 1 files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_zip_gdi' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_zip_bin_cue_gdi(iarl_data,'gdi')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the zip gdi files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_zip_cue' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_zip_bin_cue_gdi(iarl_data,'cue')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the zip cue files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_zip_iso' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_zip_bin_cue_gdi(iarl_data,'iso')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the zip iso files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_adf_folder' in option:
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = convert_adf_folder(iarl_data,'adf')
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error converting the zipped adf files for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_mame_softlist_dummy_file' in option:
try:
softlist_type = re.search(r'\([^)]*\)',option).group(0).replace('(','').replace(')','').replace("'",'').strip()
except:
softlist_type = ''
xbmc.log(msg='IARL: MAME softlist type could not be defined', level=xbmc.LOGERROR)
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = setup_mame_softlist_game_dummy_file(iarl_data,softlist_type)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error setting up the MAME softlist game '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_mame_softlist' in option:
try:
softlist_type = re.search(r'\([^)]*\)',option).group(0).replace('(','').replace(')','').replace("'",'').strip()
except:
softlist_type = ''
xbmc.log(msg='IARL: MAME softlist type could not be defined', level=xbmc.LOGERROR)
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = setup_mame_softlist_game(iarl_data,softlist_type)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error setting up the MAME softlist game '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_mess2014_softlist_dummy_file' in option:
try:
softlist_type = re.search(r'\([^)]*\)',option).group(0).replace('(','').replace(')','').replace("'",'').strip()
except:
softlist_type = ''
xbmc.log(msg='IARL: MESS2014 softlist type could not be defined', level=xbmc.LOGERROR)
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = setup_mess2014_softlist_game_dummy_file(iarl_data,softlist_type)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error setting up the MESS2014 softlist game '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'convert_mess2014_softlist' in option:
try:
softlist_type = re.search(r'\([^)]*\)',option).group(0).replace('(','').replace(')','').replace("'",'').strip()
except:
softlist_type = ''
xbmc.log(msg='IARL: MESS2014 softlist type could not be defined', level=xbmc.LOGERROR)
if iarl_data['current_save_data']['rom_save_filenames']:
conversion_success, converted_filename = setup_mess2014_softlist_game(iarl_data,softlist_type)
iarl_data['current_save_data']['rom_converted_filenames'].append(converted_filename)
iarl_data['current_save_data']['rom_converted_filenames_success'].append(conversion_success)
for check in iarl_data['current_save_data']['rom_converted_filenames_success']:
if not check:
iarl_data['current_save_data']['overall_conversion_success'] = False
if iarl_data['current_save_data']['overall_conversion_success']:
iarl_data['current_save_data']['launch_filename'] = iarl_data['current_save_data']['rom_converted_filenames'][0] #Define the launch filename as the first one
else:
xbmc.log(msg='IARL: There was an error setting up the MESS2014 softlist game '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGERROR)
elif 'favorites_post_action' in option:
if '|' in option:
option_1 = rom_emu_command.split('|')[0]
option_2 = rom_emu_command.split('|')[-1]
else:
option_1 = option
option_2 = None
#Call post_download_action again with new arguments
new_launch_filename, overall_conversion_success_2 = post_download_action(iarl_data,option_1,option_2)
if overall_conversion_success_2:
iarl_data['current_save_data']['launch_filename'] = new_launch_filename
iarl_data['current_save_data']['overall_conversion_success'] = True
else:
iarl_data['current_save_data']['launch_filename'] = None
post_download_action_success = False
xbmc.log(msg='IARL: The post download action '+str(option)+' is unknown', level=xbmc.LOGERROR)
return iarl_data['current_save_data']['launch_filename'], iarl_data['current_save_data']['overall_conversion_success']
def download_and_launch_rom(romwindow,iarl_data):
xbmc.log(msg='IARL: Download and Launch started for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGNOTICE)
if 'plugin.program.iarl/History' not in xbmc.getInfoLabel('Container.FolderPath'): #Update history cache if not already in the history list
history_cache_success = update_history_cache_file(iarl_data,plugin)
#Use External Launcher
if iarl_data['current_archive_data']['emu_launcher'] == 'external':
if 'Select'.lower() in iarl_data['settings']['external_launch_env'].lower():
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Error','External launching is not setup in addon settings')
xbmc.log(msg='IARL: External launching is not setup in dddon settings yet!', level=xbmc.LOGERROR)
else:
if iarl_data['current_archive_data']['emu_ext_launch_cmd'] != 'none':
#Download the required files, and process them if needed
iarl_data['current_save_data'] = download_rom_only(iarl_data)
if iarl_data['current_save_data']['overall_download_success']:
current_external_command = replace_external_launch_variables(iarl_data) #Function replaces command line variables
if '%' not in current_external_command:
#Close the Info Window if it's open
if romwindow is not None:
romwindow.closeDialog()
if 'Android' in iarl_data['addon_data']['operating_system']:
#Suspend audio for HDMI audio purposes on some systems
xbmc.log(msg='IARL: Android external command: '+str(current_external_command), level=xbmc.LOGNOTICE)
xbmc.audioSuspend()
xbmc.enableNavSounds(False)
xbmc.sleep(500) #This pause seems to help... I'm not really sure why
if iarl_data['settings']['launch_with_subprocess']:
execute_subprocess_command(current_external_command.encode('utf-8'))
else:
os.system(current_external_command.encode('utf-8')) #Android is frustrating...
#Resume audio after external command is complete
xbmc.audioResume()
xbmc.enableNavSounds(True)
else:
xbmc.log(msg='IARL: External launch command sent: '+str(current_external_command), level=xbmc.LOGNOTICE)
#Suspend audio for HDMI audio purposes on some systems
xbmc.audioSuspend()
xbmc.enableNavSounds(False)
xbmc.sleep(500) #This pause seems to help... I'm not really sure why
if iarl_data['settings']['launch_with_subprocess']:
execute_subprocess_command(current_external_command)
else:
external_command = subprocess.call(current_external_command,shell=True)
#Resume audio after external command is complete
xbmc.audioResume()
xbmc.enableNavSounds(True)
else:
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Error','Settings are not defined for external launching.[CR]See log for more info')
xbmc.log(msg='IARL: There is an undefined value in the external launch command: '+str(current_external_command.split('%')[1]), level=xbmc.LOGERROR)
#Error downloading, so the game will not be launched
else:
xbmc.log(msg='IARL: There was an error downloading the requested files, so the game will not be launched.', level=xbmc.LOGERROR)
else:
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Error','External launch command not defined.[CR]See log for more info')
xbmc.log(msg='IARL: External Launch Command is not defined for: '+str(iarl_data['current_archive_data']['emu_name']), level=xbmc.LOGERROR)
#Use Retroplayer
else:
iarl_data['current_save_data'] = download_rom_only(iarl_data)
if iarl_data['current_save_data']['overall_download_success']:
launch_game_listitem = xbmcgui.ListItem(iarl_data['current_save_data']['launch_filename'], "0", "", "")
parameters = {'title': iarl_data['current_rom_data']['rom_title'], 'url': iarl_data['current_save_data']['launch_filename']}
launch_game_listitem.setInfo(type='game', infoLabels=parameters)
if iarl_data['current_rom_data']['rom_boxarts'][0] is not None:
launch_game_listitem.setArt({'thumb': iarl_data['current_rom_data']['rom_boxarts'][0]})
launch_game_listitem.setThumbnailImage(iarl_data['current_rom_data']['rom_boxarts'][0])
if xbmc.Player().isPlaying():
xbmc.Player().stop()
xbmc.sleep(100)
#Close the Info Window if it's open
if romwindow is not None:
romwindow.closeDialog()
xbmc.sleep(500) #This pause seems to help... I'm not really sure why
xbmc.log(msg='IARL: Retroplayer launch for: '+str(iarl_data['current_save_data']['launch_filename']), level=xbmc.LOGNOTICE)
xbmc.Player().play(iarl_data['current_save_data']['launch_filename'],launch_game_listitem)
else:
xbmc.log(msg='IARL: There was an error downloading the requested files, so the game will not be launched.', level=xbmc.LOGERROR)
class ROMWindow(xbmcgui.WindowXMLDialog):
def __init__(self,strXMLname, strFallbackPath, strDefaultName, forceFallback, *args, **kwargs):
# Changing the three varibles passed won't change, anything
# Doing strXMLname = "bah.xml" will not change anything.
# don't put GUI sensitive stuff here (as the xml hasn't been read yet
# Idea to initialize your variables here
self.iarl_data = kwargs
xbmc.log(msg='IARL: ROMWindow Opened for '+str(iarl_data['current_rom_data']['rom_name']), level=xbmc.LOGDEBUG)
pass
def onInit(self):
self.action_exitkeys_id = [10, 13] #Default exit keys to close window via keyboard / controller
#Control ID's for InfoDialog
self.game_info_listitem_id = 113
self.left_art_list_id = 111
self.right_art_list_id = 112
self.download_button_id = 3001
self.download_and_launch_button_id = 3002
self.exit_button_id = 3003
self.play_trailer_button_id = 3005
self.stop_trailer_button_id = 3006
#Define theme for ROM Window - will likely phase this out
xbmcgui.Window(10000).setProperty('iarl.current_theme',str(iarl_data['current_archive_data']['emu_name']))
xbmcgui.Window(10000).setProperty('iarl.default_thumb',str(iarl_data['current_archive_data']['emu_boxart']))
xbmcgui.Window(10000).setProperty('iarl.header_color',str(iarl_data['current_archive_data']['header_color']))
xbmcgui.Window(10000).setProperty('iarl.bg_color',str(iarl_data['current_archive_data']['background_color']))
xbmcgui.Window(10000).setProperty('iarl.buttonfocustheme',str(iarl_data['current_archive_data']['button_focus']))
xbmcgui.Window(10000).setProperty('iarl.buttonnofocustheme',str(iarl_data['current_archive_data']['button_nofocus']))
#Define current ROM listitem for window
self.info_listitem = xbmcgui.ListItem(label=iarl_data['current_rom_data']['rom_name'])
self.info_listitem.setProperty('fanart_image', iarl_data['current_rom_data']['rom_fanarts'][0])
self.info_listitem.setProperty('banner', iarl_data['current_rom_data']['rom_banners'][0])
self.info_listitem.setProperty('clearlogo', iarl_data['current_rom_data']['rom_logos'][0])
self.info_listitem.setProperty('poster', iarl_data['current_rom_data']['rom_thumbnail'])
self.info_listitem.setProperty('tag', iarl_data['current_rom_data']['rom_tag'])
self.info_listitem.setProperty('rating', iarl_data['current_rom_data']['rom_rating'])
self.info_listitem.setProperty('perspective', iarl_data['current_rom_data']['rom_perspective'])
self.info_listitem.setProperty('esrb', iarl_data['current_rom_data']['rom_esrb'])
self.info_listitem.setProperty('rom_name', iarl_data['current_rom_data']['rom_name'])
self.info_listitem.setProperty('rom_icon', iarl_data['current_rom_data']['rom_icon'])
self.info_listitem.setProperty('rom_thumbnail', iarl_data['current_rom_data']['rom_thumbnail'])
self.info_listitem.setProperty('rom_title', iarl_data['current_rom_data']['rom_title'])
self.info_listitem.setProperty('rom_studio', iarl_data['current_rom_data']['rom_studio'])
self.info_listitem.setProperty('rom_genre', iarl_data['current_rom_data']['rom_genre'])
self.info_listitem.setProperty('rom_date', iarl_data['current_rom_data']['rom_date'])
if iarl_data['current_rom_data']['rom_date'] is not None:
self.info_listitem.setProperty('rom_date_string','Released: '+iarl_data['current_rom_data']['rom_date'])
else:
self.info_listitem.setProperty('rom_date_string',iarl_data['current_rom_data']['rom_date'])
self.info_listitem.setProperty('rom_year', iarl_data['current_rom_data']['rom_year'])
self.info_listitem.setProperty('rom_plot', iarl_data['current_rom_data']['rom_plot'])
self.info_listitem.setProperty('rom_trailer', iarl_data['current_rom_data']['rom_trailer'])
self.info_listitem.setProperty('rom_label', iarl_data['current_rom_data']['rom_label'])
self.info_listitem.setProperty('nplayers', iarl_data['current_rom_data']['rom_nplayers'])
if iarl_data['current_rom_data']['rom_nplayers'] is not None:
self.info_listitem.setProperty('nplayers_string','Players[CR]'+iarl_data['current_rom_data']['rom_nplayers'])
else:
self.info_listitem.setProperty('nplayers_string',iarl_data['current_rom_data']['rom_nplayers'])
self.info_listitem.setProperty('rom_size', str(sum(map(int,iarl_data['current_rom_data']['rom_size']))))
self.info_listitem.setProperty('emu_name', iarl_data['current_archive_data']['emu_name'])
self.info_listitem.setProperty('emu_boxart', iarl_data['current_archive_data']['emu_boxart'])
self.info_listitem.setProperty('emu_banner', iarl_data['current_archive_data']['emu_banner'])
self.info_listitem.setProperty('emu_fanart', iarl_data['current_archive_data']['emu_fanart'])
self.info_listitem.setProperty('emu_logo', iarl_data['current_archive_data']['emu_logo'])
self.info_listitem.setProperty('emu_trailer', iarl_data['current_archive_data']['emu_trailer'])
self.info_listitem.setProperty('emu_category', iarl_data['current_archive_data']['emu_category'])
self.info_listitem.setProperty('emu_plot', iarl_data['current_archive_data']['emu_plot'])
self.info_listitem.setProperty('current_window_id', str(xbmcgui.getCurrentWindowDialogId()))
xbmcgui.Window(10000).setProperty('iarl.trailer_started','False')
for ii in range(0,total_arts):
self.info_listitem.setProperty('fanart'+str(ii), iarl_data['current_rom_data']['rom_fanarts'][ii])
self.info_listitem.setProperty('banner'+str(ii), iarl_data['current_rom_data']['rom_banners'][ii])
self.info_listitem.setProperty('snapshot'+str(ii), iarl_data['current_rom_data']['rom_snapshots'][ii])
self.info_listitem.setProperty('boxart'+str(ii), iarl_data['current_rom_data']['rom_boxarts'][ii])
self.info_listitem.setProperty('logo'+str(ii), iarl_data['current_rom_data']['rom_logos'][ii])
self.info_list = self.getControl(self.game_info_listitem_id)
self.info_list.addItem(self.info_listitem)
#Get controls if available
try:
self.left_art_list = self.getControl(self.left_art_list_id) #Left Art List
except:
self.left_art_list = None
xbmc.log(msg='IARL: Left Art List (Control 111) is not present', level=xbmc.LOGDEBUG)
try:
self.right_art_list = self.getControl(self.right_art_list_id) #Right Art List
except:
self.right_art_list = None
xbmc.log(msg='IARL: Right Art List (Control 112) is not present', level=xbmc.LOGDEBUG)
try:
self.download_button = self.getControl(self.download_button_id) #Download Only
except:
self.download_button = None
xbmc.log(msg='IARL: Download Button (Control 3001) is not present', level=xbmc.LOGDEBUG)
try:
self.download_and_launch_button = self.getControl(self.download_and_launch_button_id) #Download and Launch
except:
self.download_and_launch_button = None
xbmc.log(msg='IARL: Download and Launch Button (Control 3002) is not present', level=xbmc.LOGDEBUG)
try:
self.exit_button = self.getControl(self.exit_button_id) #Close
except:
self.exit_button = None
xbmc.log(msg='IARL: Close Button (Control 3003) is not present', level=xbmc.LOGDEBUG)
try:
self.play_trailer_button = self.getControl(self.play_trailer_button_id) #Play Trailer
except:
self.play_trailer_button = None
xbmc.log(msg='IARL: Play Trailer Button (Control 3005) is not present', level=xbmc.LOGDEBUG)
try:
self.stop_trailer_button = self.getControl(self.stop_trailer_button_id) #Stop Trailer
except:
self.stop_trailer_button = None
xbmc.log(msg='IARL: Stop Trailer Button (Control 3006) is not present', level=xbmc.LOGDEBUG)
#Enable the buttons, these are disabled when one is selected to avoid double taps
if self.download_button is not None:
self.download_button.setEnabled(True)
if self.download_and_launch_button is not None:
self.download_and_launch_button.setEnabled(True)
if self.download_and_launch_button is not None:
self.exit_button.setEnabled(True)
#Populate the image listitems
left_art_found = False
right_art_found = False
if self.left_art_list is not None:
for rom_boxarts in filter(bool,iarl_data['current_rom_data']['rom_boxarts']):
left_art_found = True
self.left_art_list.addItem(xbmcgui.ListItem(label2=str(iarl_data['current_rom_data']['rom_name']), thumbnailImage=rom_boxarts)) #Add boxart to the left image slideshow
if not left_art_found:
self.left_art_list.addItem(xbmcgui.ListItem(label2=str(iarl_data['current_rom_data']['rom_name']), thumbnailImage=iarl_data['current_rom_data']['rom_icon'])) #If no boxart is found, make it the default box
if self.right_art_list is not None:
for rom_fanarts in filter(bool,iarl_data['current_rom_data']['rom_fanarts']):
right_art_found = True
self.right_art_list.addItem(xbmcgui.ListItem(label2=str(iarl_data['current_rom_data']['rom_name']), thumbnailImage=rom_fanarts)) #Add fanart to the right image slideshow
if self.right_art_list is not None:
for rom_snapshots in filter(bool,iarl_data['current_rom_data']['rom_snapshots']):
right_art_found = True
self.right_art_list.addItem(xbmcgui.ListItem(label2=str(iarl_data['current_rom_data']['rom_name']), thumbnailImage=rom_snapshots)) #Add snapshots to the right image slideshow
if not right_art_found:
self.right_art_list.addItem(xbmcgui.ListItem(label2=str(iarl_data['current_rom_data']['rom_name']), thumbnailImage=iarl_data['current_archive_data']['emu_fanart'])) #If no fanart is found, make it the current emulator fanart
#Auto play trailer if settings are defined
if 'yes' in iarl_data['settings']['autoplay_trailer'].lower():
if iarl_data['current_rom_data']['rom_trailer']:
if xbmc.Player().isPlaying():
xbmc.Player().stop()
xbmc.sleep(100)
xbmcgui.Window(10000).setProperty('iarl.trailer_started','True')
xbmc.sleep(250)
xbmc.Player().play(iarl_data['current_rom_data']['rom_trailer'], windowed=True)
def onAction(self, action):
# Same as normal python Windows.
if action in self.action_exitkeys_id:
self.closeDialog()
def onFocus(self, controlId):
#Not currently used
pass
def onClick(self, controlId):
#Download Only
if controlId == self.download_button_id:
#Disable buttons while we try to download (avoids double taps)
if self.download_button is not None:
self.download_button.setEnabled(False)
if self.download_and_launch_button is not None:
self.download_and_launch_button.setEnabled(False)
iarl_data['current_save_data'] = download_rom_only(iarl_data)
if iarl_data['current_save_data']['overall_download_success']:
current_dialog = xbmcgui.Dialog()
ok_ret = current_dialog.ok('Complete',iarl_data['current_rom_data']['rom_name']+' was successfully downloaded')
#Re-enable buttons after download executes
self.download_button.setEnabled(True)
if self.download_and_launch_button is not None:
self.download_and_launch_button.setEnabled(True)
#Download and Launch
if controlId == self.download_and_launch_button_id:
#Disable buttons while we try to download and launch (avoids double taps)
if self.download_and_launch_button is not None:
self.download_and_launch_button.setEnabled(False)
if self.download_button is not None:
self.download_button.setEnabled(False)
download_and_launch_rom(self,iarl_data)
#Re-enable buttons after download and launch executes
self.download_and_launch_button.setEnabled(True)
if self.download_button is not None:
self.download_button.setEnabled(True)
#Exit the window
elif controlId == self.exit_button_id:
self.download_button.setEnabled(True)
self.download_and_launch_button.setEnabled(True)
self.closeDialog()
def doAction(self, controlId):
# print controlId
pass
def closeDialog(self):
self.close()
class SearchWindow(xbmcgui.WindowXMLDialog):
def __init__(self,strXMLname, strFallbackPath, strDefaultName, forceFallback, *args, **kwargs):
# Changing the three varibles passed won't change, anything
# Doing strXMLname = "bah.xml" will not change anything.
# don't put GUI sensitive stuff here (as the xml hasn't been read yet
# Idea to initialize your variables here
self.iarl_data = kwargs
self.available_as_options = ['Genre','Release Date','Region','Num Players','Studio'] #Not currently used
def onInit(self):
# Put your List Populating code/ and GUI startup stuff here
# get control ids
self.action_exitkeys_id = [10, 13]
self.archive_list = self.getControl(101) #Archive List
self.search_box = self.getControl(102) #Search Box
self.genre_id = self.getControl(104) #Genre
self.nplayers_id = self.getControl(106) #Players
self.date_from_id = self.getControl(108) #Date From
self.date_to_id = self.getControl(109) #Date To
self.studio_id = self.getControl(111) #Studio
self.region_id = self.getControl(113) #Studio
self.control_id_button_1 = self.getControl(3001) #Add all archives button
self.control_id_button_2 = self.getControl(3002) #Remove all archives button
self.control_id_button_3 = self.getControl(3003) #Advanced search on button
self.control_id_button_4 = self.getControl(3004) #Advanced search off button
self.as_group_control = self.getControl(3005) #Advanced search on button
self.control_id_button_5 = self.getControl(3006) #Search Button
self.control_id_button_6 = self.getControl(3008) #Search Button
#Set initial vis
self.as_group_control.setVisible(False)
self.control_id_button_3.setVisible(False)
self.control_id_button_4.setVisible(True)
xbmcgui.Window(10000).setProperty('iarl.advanced_search','False') #Turn off AS by default
# translate buttons
self.control_id_button_1.setLabel('Select All')
self.control_id_button_2.setLabel('Select None')
self.control_id_button_5.setLabel('Search')
self.control_id_button_6.setLabel('Close')
#Populate Lists
for ii in range(0,len(iarl_data['archive_data']['emu_name'])):
if 'hidden' not in iarl_data['archive_data']['emu_category'][ii]: #Don't include the archive if it's tagged hidden
current_listitem = xbmcgui.ListItem(label=iarl_data['archive_data']['emu_name'][ii])
current_listitem.setIconImage(iarl_data['archive_data']['emu_boxart'][ii])
current_listitem.setProperty('include_in_search','0') #Default to not include in search
current_listitem.setProperty('hide_in_search','1') #Do not show item in search list
self.archive_list.addItem(current_listitem) #Add item to the filter list
else:
current_listitem = xbmcgui.ListItem(label=iarl_data['archive_data']['emu_name'][ii])
current_listitem.setIconImage(iarl_data['archive_data']['emu_boxart'][ii])
current_listitem.setProperty('include_in_search','0') #Default to not include in search
current_listitem.setProperty('hide_in_search','0') #Show item in search list
self.archive_list.addItem(current_listitem) #Add item to the filter list
def onAction(self, action):
# Same as normal python Windows.
if action in self.action_exitkeys_id:
self.closeDialog()
# def onFocus(self, controlId):
# pass
def onClick(self, controlId):
if controlId == 101:
current_item = self.archive_list.getSelectedItem()
if current_item.getProperty('include_in_search') == '0':
if current_item.getProperty('hide_in_search') != '0': #Prevent selection if its hidden
current_item.setProperty('include_in_search','1') #It wasnt included, and now should be included
else:
current_item.setProperty('include_in_search','0') #It was included, and now shouldnt be included
if controlId == 3001:
for ii in range(0,self.archive_list.size()):
current_listitem = self.archive_list.getListItem(ii)
if current_listitem.getProperty('hide_in_search') != '0': #Prevent selection if its hidden
current_listitem.setProperty('include_in_search','1') #Select All
if controlId == 3002:
for ii in range(0,self.archive_list.size()):
current_listitem = self.archive_list.getListItem(ii)
current_listitem.setProperty('include_in_search','0') #Select None
if controlId == 3003:
self.as_group_control.setVisible(False)
self.control_id_button_3.setVisible(False)
self.control_id_button_4.setVisible(True)
xbmcgui.Window(10000).setProperty('iarl.advanced_search','False') #Turn off AS
if controlId == 3004:
self.as_group_control.setVisible(True)
self.control_id_button_3.setVisible(True)
self.control_id_button_4.setVisible(False)
xbmcgui.Window(10000).setProperty('iarl.advanced_search','True') #Turn off AS
if controlId == 3008:
self.closeDialog()
if controlId == 3006:
#Define search criteria
include_text_arg = ''
at_least_one = False
for ii in range(0,self.archive_list.size()):
if self.archive_list.getListItem(ii).getProperty('include_in_search') == '1':
include_text_arg = include_text_arg+',1'
at_least_one = True
else:
include_text_arg = include_text_arg+',0'
include_text_arg = include_text_arg[1:] #Remove that first comma
current_search_term = 'any'
current_genre = 'any'
current_nplayers = 'any'
current_date_from = 'any'
current_date_to = 'any'
current_studio = 'any'
current_region = 'any'
if len(self.search_box.getText())>0:
current_search_term = self.search_box.getText()
if xbmcgui.Window(10000).getProperty('iarl.advanced_search') == 'True':
if len(self.genre_id.getText())>0:
current_genre = self.genre_id.getText()
if len(self.nplayers_id.getText())>0:
current_nplayers = self.nplayers_id.getText()
if len(self.date_from_id.getText())>0:
current_date_from = self.date_from_id.getText()
if len(self.date_to_id.getText())>0:
current_date_to = self.date_to_id.getText()
if len(self.studio_id.getText())>0:
current_studio = self.studio_id.getText()
if len(self.region_id.getText())>0:
current_region = self.region_id.getText()
current_dialog = xbmcgui.Dialog()
if not at_least_one:
current_dialog.ok('Wah Waaah','You must select at least one archive!')
ret1=1
else:
# ret1 = current_dialog.select('Start Search?', ['Yes','No']) #Removing redundant search dialog
ret1 = 0
if ret1 == 0:
xbmc.log(msg='IARL: Starting Search...', level=xbmc.LOGDEBUG)
#Not sure why plugin.redirect doesnt work here. It for some reason will not pass kwargs?
# search_url = plugin.url_for('search_roms_results', search_term=current_search_term,include_archives=include_text_arg, adv_search=xbmcgui.Window(10000).getProperty('iarl.advanced_search'),genre=current_genre,nplayers=current_nplayers,datefrom=current_date_from,dateto=current_date_to,studio=current_studio,region=current_region)
self.closeDialog()
search_roms_results(current_search_term,include_archives=include_text_arg,adv_search=xbmcgui.Window(10000).getProperty('iarl.advanced_search'),genre=current_genre,nplayers=current_nplayers,datefrom=current_date_from,dateto=current_date_to,studio=current_studio,region=current_region)
# plugin.redirect(search_url)
else:
pass
def closeDialog(self):
self.close()
class TOUWindow(xbmcgui.WindowXMLDialog):
def __init__(self,strXMLname, strFallbackPath, strDefaultName, forceFallback, *args, **kwargs):
# Changing the three varibles passed won't change, anything
# Doing strXMLname = "bah.xml" will not change anything.
# don't put GUI sensitive stuff here (as the xml hasn't been read yet
# Idea to initialize your variables here
self.iarl_data = kwargs
xbmc.log(msg='IARL: TOUWindow Opened', level=xbmc.LOGDEBUG)
pass
def onInit(self):
self.action_exitkeys_id = [10, 13]
#Create invisible listitem for skinning purposes
# get control ids
self.control_id_button_action1 = 3001 #Agree and Close
self.control_id_button_exit = 3003 #Do not Agree and Close
self.control_id_label_action = 3011
# set actions
self.button_action1 = self.getControl(self.control_id_button_action1)
self.button_exit = self.getControl(self.control_id_button_exit)
def onAction(self, action):
# Same as normal python Windows. Same as do not agree
if action in self.action_exitkeys_id:
self.closeDialog()
def onFocus(self, controlId):
pass
def onClick(self, controlId):
#Agree and Close
if controlId == self.control_id_button_action1:
if xbmc.Player().isPlaying():
xbmc.Player().stop()
xbmc.sleep(100)
xbmcaddon.Addon(id='plugin.program.iarl').setSetting(id='iarl_setting_tou',value='true')
xbmc.sleep(500)
xbmc.log(msg='IARL: Terms of Use Agree', level=xbmc.LOGDEBUG)
self.closeDialog()
#Do not Agree
elif controlId == self.control_id_button_exit:
if xbmc.Player().isPlaying():
xbmc.Player().stop()
xbmc.sleep(100)
xbmc.log(msg='IARL: Terms of Use do not Agree', level=xbmc.LOGDEBUG)
self.closeDialog()
def doAction(self, controlId):
# print controlId
pass
def closeDialog(self):
self.close()
if __name__ == '__main__':
plugin.run()
|
zach-morris/plugin.program.iarl
|
addon.py
|
Python
|
gpl-2.0
| 159,845
|
[
"ADF"
] |
3ed386056670924dc1850adc2dd2d5b67c8d20a4534fd1502021e4dced4df5a7
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/384')
from data_384 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:7]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Final/results/objects/384/test10_cross_validate_objects_1200ms.py
|
Python
|
mit
| 4,252
|
[
"Mayavi"
] |
53db17e15a57dfd741f6e40859dc7bcff4515dc8bffc7c5bfd60a6c9c5a67c3f
|
from math import pi, sqrt
import numpy as np
from numpy.fft import fftn
from ase.units import Hartree
from gpaw.lfc import LocalizedFunctionsCollection as LFC
from gpaw.pair_density import PairDensity2 as PairDensity
from gpaw.poisson import PoissonSolver, FFTPoissonSolver
from gpaw.utilities import pack, unpack, packed_index, unpack2
from gpaw.utilities.tools import construct_reciprocal, tri2full, symmetrize
from gpaw.utilities.gauss import Gaussian
from gpaw.utilities.blas import r2k
def get_vxc(paw, spin=0, U=None):
"""Calculate matrix elements of the xc-potential."""
assert not paw.hamiltonian.xc.xcfunc.orbital_dependent, "LDA/GGA's only"
assert paw.wfs.dtype == float, 'Complex waves not implemented'
if U is not None: # Rotate xc matrix
return np.dot(U.T.conj(), np.dot(get_vxc(paw, spin), U))
gd = paw.hamiltonian.gd
psit_nG = paw.wfs.kpt_u[spin].psit_nG[:]
if paw.density.nt_sg is None:
paw.density.interpolate()
nt_g = paw.density.nt_sg[spin]
vxct_g = paw.density.finegd.zeros()
paw.hamiltonian.xc.get_energy_and_potential(nt_g, vxct_g)
vxct_G = gd.empty()
paw.hamiltonian.restrict(vxct_g, vxct_G)
Vxc_nn = np.zeros((paw.wfs.bd.nbands, paw.wfs.bd.nbands))
# Apply pseudo part
r2k(.5 * gd.dv, psit_nG, vxct_G * psit_nG, .0, Vxc_nn) # lower triangle
tri2full(Vxc_nn, 'L') # Fill in upper triangle from lower
gd.comm.sum(Vxc_nn)
# Add atomic PAW corrections
for a, P_ni in paw.wfs.kpt_u[spin].P_ani.items():
D_sp = paw.density.D_asp[a][:]
H_sp = np.zeros_like(D_sp)
paw.wfs.setups[a].xc_correction.calculate_energy_and_derivatives(
D_sp, H_sp)
H_ii = unpack(H_sp[spin])
Vxc_nn += np.dot(P_ni, np.dot(H_ii, P_ni.T))
return Vxc_nn * Hartree
class Coulomb:
"""Class used to evaluate two index coulomb integrals."""
def __init__(self, gd, poisson=None):
"""Class should be initialized with a grid_descriptor 'gd' from
the gpaw module.
"""
self.gd = gd
self.poisson = poisson
def load(self, method):
"""Make sure all necessary attributes have been initialized"""
assert method in ('real', 'recip_gauss', 'recip_ewald'),\
str(method) + ' is an invalid method name,\n' +\
'use either real, recip_gauss, or recip_ewald'
if method.startswith('recip'):
if self.gd.comm.size > 1:
raise RuntimeError("Cannot do parallel FFT, use method='real'")
if not hasattr(self, 'k2'):
self.k2, self.N3 = construct_reciprocal(self.gd)
if method.endswith('ewald') and not hasattr(self, 'ewald'):
# cutoff radius
assert self.gd.orthogonal
rc = 0.5 * np.average(self.gd.cell_cv.diagonal())
# ewald potential: 1 - cos(k rc)
self.ewald = (np.ones(self.gd.n_c) -
np.cos(np.sqrt(self.k2) * rc))
# lim k -> 0 ewald / k2
self.ewald[0, 0, 0] = 0.5 * rc**2
elif method.endswith('gauss') and not hasattr(self, 'ng'):
gauss = Gaussian(self.gd)
self.ng = gauss.get_gauss(0) / sqrt(4 * pi)
self.vg = gauss.get_gauss_pot(0) / sqrt(4 * pi)
else: # method == 'real'
if not hasattr(self, 'solve'):
if self.poisson is not None:
self.solve = self.poisson.solve
else:
solver = PoissonSolver(nn=2)
solver.set_grid_descriptor(self.gd)
solver.initialize(load_gauss=True)
self.solve = solver.solve
def coulomb(self, n1, n2=None, Z1=None, Z2=None, method='recip_gauss'):
"""Evaluates the coulomb integral of n1 and n2
The coulomb integral is defined by::
*
/ / n1(r) n2(r')
(n1 | n2) = | dr | dr' -------------,
/ / |r - r'|
where n1 and n2 could be complex.
real:
Evaluate directly in real space using gaussians to neutralize
density n2, such that the potential can be generated by standard
procedures
recip_ewald:
Evaluate by Fourier transform.
Divergence at division by k^2 is avoided by utilizing the Ewald /
Tuckermann trick, which formaly requires the densities to be
localized within half of the unit cell.
recip_gauss:
Evaluate by Fourier transform.
Divergence at division by k^2 is avoided by removing total charge
of n1 and n2 with gaussian density ng::
* * *
(n1|n2) = (n1 - Z1 ng|n2 - Z2 ng) + (Z2 n1 + Z1 n2 - Z1 Z2 ng | ng)
The evaluation of the integral (n1 - Z1 ng|n2 - Z2 ng) is done in
k-space using FFT techniques.
"""
self.load(method)
# determine integrand using specified method
if method == 'real':
I = self.gd.zeros()
if n2 == None: n2 = n1; Z2 = Z1
self.solve(I, n2, charge=Z2, eps=1e-12, zero_initial_phi=True)
I *= n1.conj()
elif method == 'recip_ewald':
n1k = fftn(n1)
if n2 == None: n2k = n1k
else: n2k = fftn(n2)
I = n1k.conj() * n2k * self.ewald * 4 * pi / (self.k2 * self.N3)
else: # method == 'recip_gauss':
# Determine total charges
if Z1 == None: Z1 = self.gd.integrate(n1)
if Z2 == None and n2 != None: Z2 = self.gd.integrate(n2)
# Determine the integrand of the neutral system
# (n1 - Z1 ng)* int dr' (n2 - Z2 ng) / |r - r'|
nk1 = fftn(n1 - Z1 * self.ng)
if n2 == None:
I = abs(nk1)**2 * 4 * pi / (self.k2 * self.N3)
else:
nk2 = fftn(n2 - Z2 * self.ng)
I = nk1.conj() * nk2 * 4 * pi / (self.k2 * self.N3)
# add the corrections to the integrand due to neutralization
if n2 == None:
I += (2 * np.real(np.conj(Z1) * n1) -
abs(Z1)**2 * self.ng) * self.vg
else:
I += (np.conj(Z1) * n2 + Z2 * n1.conj() -
np.conj(Z1) * Z2 * self.ng) * self.vg
if n1.dtype == float and (n2 == None or n2.dtype == float):
return np.real(self.gd.integrate(I))
else:
return self.gd.integrate(I)
class CoulombNEW:
def __init__(self, gd, setups, spos_ac, fft=False):
assert gd.comm.size == 1
self.rhot1_G = gd.empty()
self.rhot2_G = gd.empty()
self.pot_G = gd.empty()
self.dv = gd.dv
if fft:
self.poisson = FFTPoissonSolver()
else:
self.poisson = PoissonSolver(nn=3)
self.poisson.set_grid_descriptor(gd)
self.poisson.initialize()
self.setups = setups
# Set coarse ghat
self.Ghat = LFC(gd, [setup.ghat_l for setup in setups],
integral=sqrt(4 * pi))
self.Ghat.set_positions(spos_ac)
def calculate(self, nt1_G, nt2_G, P1_ap, P2_ap):
I = 0.0
self.rhot1_G[:] = nt1_G
self.rhot2_G[:] = nt2_G
Q1_aL = {}
Q2_aL = {}
for a, P1_p in P1_ap.items():
P2_p = P2_ap[a]
setup = self.setups[a]
# Add atomic corrections to integral
I += 2 * np.dot(P1_p, np.dot(setup.M_pp, P2_p))
# Add compensation charges to pseudo densities
Q1_aL[a] = np.dot(P1_p, setup.Delta_pL)
Q2_aL[a] = np.dot(P2_p, setup.Delta_pL)
self.Ghat.add(self.rhot1_G, Q1_aL)
self.Ghat.add(self.rhot2_G, Q2_aL)
# Add coulomb energy of compensated pseudo densities to integral
self.poisson.solve(self.pot_G, self.rhot2_G, charge=None,
eps=1e-12, zero_initial_phi=True)
I += np.vdot(self.rhot1_G, self.pot_G) * self.dv
return I * Hartree
class HF:
def __init__(self, paw):
paw.initialize_positions()
self.nspins = paw.wfs.nspins
self.nbands = paw.wfs.bd.nbands
self.restrict = paw.hamiltonian.restrict
self.pair_density = PairDensity(paw.density, paw.atoms, finegrid=True)
self.dv = paw.wfs.gd.dv
self.dtype = paw.wfs.dtype
self.setups = paw.wfs.setups
# Allocate space for matrices
self.nt_G = paw.wfs.gd.empty()
self.rhot_g = paw.density.finegd.empty()
self.vt_G = paw.wfs.gd.empty()
self.vt_g = paw.density.finegd.empty()
self.poisson_solve = paw.hamiltonian.poisson.solve
def apply(self, paw, u=0):
H_nn = np.zeros((self.nbands, self.nbands), self.dtype)
self.soft_pseudo(paw, H_nn, u=u)
self.atomic_val_val(paw, H_nn, u=u)
self.atomic_val_core(paw, H_nn, u=u)
return H_nn * Hartree
def soft_pseudo(self, paw, H_nn, h_nn=None, u=0):
if h_nn is None:
h_nn = H_nn
kpt = paw.wfs.kpt_u[u]
pd = self.pair_density
deg = 2 / self.nspins
fmin = 1e-9
Htpsit_nG = np.zeros(kpt.psit_nG.shape, self.dtype)
for n1 in range(self.nbands):
psit1_G = kpt.psit_nG[n1]
f1 = kpt.f_n[n1] / deg
for n2 in range(n1, self.nbands):
psit2_G = kpt.psit_nG[n2]
f2 = kpt.f_n[n2] / deg
if f1 < fmin and f2 < fmin:
continue
dc = 1 + (n1 != n2)
pd.initialize(kpt, n1, n2)
pd.get_coarse(self.nt_G)
pd.add_compensation_charges(self.nt_G, self.rhot_g)
self.poisson_solve(self.vt_g, -self.rhot_g,
charge=-float(n1 == n2), eps=1e-12,
zero_initial_phi=True)
self.restrict(self.vt_g, self.vt_G)
Htpsit_nG[n1] += f2 * self.vt_G * psit2_G
if n1 != n2:
Htpsit_nG[n2] += f1 * self.vt_G * psit1_G
v_aL = paw.density.ghat.dict()
paw.density.ghat.integrate(self.vt_g, v_aL)
for a, v_L in v_aL.items():
v_ii = unpack(np.dot(paw.wfs.setups[a].Delta_pL, v_L))
P_ni = kpt.P_ani[a]
h_nn[:, n1] += f2 * np.dot(P_ni, np.dot(v_ii, P_ni[n2]))
if n1 != n2:
h_nn[:, n2] += f1 * np.dot(P_ni,np.dot(v_ii, P_ni[n1]))
symmetrize(h_nn) # Grrrr why!!! XXX
# Fill in lower triangle
r2k(0.5 * self.dv, kpt.psit_nG[:], Htpsit_nG, 1.0, H_nn)
# Fill in upper triangle from lower
tri2full(H_nn, 'L')
def atomic_val_val(self, paw, H_nn, u=0):
deg = 2 / self.nspins
kpt = paw.wfs.kpt_u[u]
for a, P_ni in kpt.P_ani.items():
# Add atomic corrections to the valence-valence exchange energy
# --
# > D C D
# -- ii iiii ii
setup = paw.wfs.setups[a]
D_p = paw.density.D_asp[a][kpt.s]
H_p = np.zeros_like(D_p)
D_ii = unpack2(D_p)
ni = len(D_ii)
for i1 in range(ni):
for i2 in range(ni):
A = 0.0
for i3 in range(ni):
p13 = packed_index(i1, i3, ni)
for i4 in range(ni):
p24 = packed_index(i2, i4, ni)
A += setup.M_pp[p13, p24] * D_ii[i3, i4]
p12 = packed_index(i1, i2, ni)
H_p[p12] -= 2 / deg * A / ((i1 != i2) + 1)
H_nn += np.dot(P_ni, np.inner(unpack(H_p), P_ni.conj()))
def atomic_val_core(self, paw, H_nn, u=0):
kpt = paw.wfs.kpt_u[u]
for a, P_ni in kpt.P_ani.items():
dH_ii = unpack(-paw.wfs.setups[a].X_p)
H_nn += np.dot(P_ni, np.inner(dH_ii, P_ni.conj()))
|
ajylee/gpaw-rtxs
|
gpaw/coulomb.py
|
Python
|
gpl-3.0
| 12,422
|
[
"ASE",
"GPAW",
"Gaussian"
] |
ec8cf62eff4491c68a0c439c9e6776b43a9b1f3b4384e0b10bfb1208e8bbd3a3
|
from kameleon_mcmc.distribution.Gaussian import Gaussian
from kameleon_mcmc.mcmc.samplers.MCMCSampler import MCMCSampler
from numpy.lib.twodim_base import eye
class StandardMetropolis(MCMCSampler):
'''
Just a plain, old, boring Metropolis Algorithm
with a fixed scale and a fixed covairance matrix
'''
is_symmetric = True
def __init__(self, distribution, scale=None, cov=None):
MCMCSampler.__init__(self, distribution)
if scale is None:
self.scale = (2.38 ** 2) / distribution.dimension
else:
self.scale = scale
if cov is None:
self.cov = eye(distribution.dimension)
else:
self.cov = cov
def __str__(self):
s = self.__class__.__name__ + "=["
s += "scale=" + str(self.scale)
s += ", " + MCMCSampler.__str__(self)
s += "]"
return s
def construct_proposal(self, y):
return Gaussian(y, self.scale * self.cov)
def adapt(self, mcmc_chain, step_output):
"""
Nothing to be seen here, this is a nonadaptive Sampler
"""
|
karlnapf/kameleon-mcmc
|
kameleon_mcmc/mcmc/samplers/StandardMetropolis.py
|
Python
|
bsd-2-clause
| 1,130
|
[
"Gaussian"
] |
8e4a48bc377b56968904b0c835c3746933b99c674a4d03272e5fa208e940e12c
|
import os
import numpy as np
os.environ['THEANO_FLAGS'] = 'floatX=float32'
import theano
import theano.tensor as tt
import theano.tensor.signal.pool # noqa: 401
# dtype = theano.config.floatX
from run_core import load_network, SoftLIFRate, round_layer
def compute_layer(layer, inputs, data):
assert isinstance(inputs, list)
assert len(layer.get('inputs', [])) == len(inputs)
print("Computing layer %s" % layer['name'])
if layer['type'] == 'data':
return data[layer['dataIdx']]
if layer['type'] == 'cost.logreg':
assert len(inputs) == 2
labels, probs = inputs
assert probs.ndim == 2
assert labels.ndim == 1
assert labels.shape[0] == probs.shape[0]
cost = -np.log(probs)[np.arange(probs.shape[0]), labels].mean()
inds = np.argsort(probs, axis=1)
top1error = (inds[:, -1] != labels).mean()
top5error = (inds[:, -5:] != labels[:, None]).all(axis=1).mean()
return cost, top1error, top5error
# single input layers
assert len(inputs) == 1
x = inputs[0]
if layer['type'] == 'fc':
weights = layer['weights'][0]
return np.dot(x.reshape(x.shape[0], -1), weights) + layer['biases']
if layer['type'] == 'neuron':
neuron = layer['neuron']
ntype = neuron['type']
if ntype == 'ident':
print(' %s' % ntype)
return x.copy()
if ntype == 'logistic':
print(' %s' % ntype)
return 1. / (1 + np.exp(-x))
if ntype == 'relu':
print(' %s' % ntype)
return np.maximum(0, x)
if ntype.startswith('softlif'): # includes softlifalpha and softlifalpharc
params = neuron['params']
print(' %s(%s)' % (ntype, ', '.join(
'%s=%0.3f' % (k, v) for k, v in params.items())))
if 't' not in params:
print("WARNING: using default neuron params")
tau_ref, tau_rc, alpha, amp = (0.001, 0.05, 0.825, 0.063)
sigma = params.get('g', params.get('a', None))
else:
tau_ref, tau_rc, alpha, amp, sigma = (
params[k] for k in ('t', 'r', 'a', 'm', 'g'))
lif = SoftLIFRate(sigma=sigma, tau_rc=tau_rc, tau_ref=tau_ref)
bias = 1.
r = amp * lif.rates(x, alpha, bias)
# r = amp * lif.rates(x.astype(np.float32), np.float32(alpha), np.float32(bias))
return r
raise NotImplementedError(ntype)
if layer['type'] == 'softmax':
assert x.ndim == 2
sx = tt.matrix()
sy = tt.nnet.softmax(sx)
f = theano.function([sx], sy)
return f(x)
if layer['type'] in ['dropout', 'dropout2']:
return layer['keep'] * x # scale all outputs by dropout factor
# layers that need square inputs
assert x.shape[-2] == x.shape[-1]
if layer['type'] == 'conv':
assert layer['sharedBiases']
n = x.shape[0]
nc = layer['channels'][0]
nx = layer['imgSize'][0]
ny = layer['modulesX']
nf = layer['filters']
s = layer['filterSize'][0]
st = layer['stride'][0]
p = -layer['padding'][0] # Alex makes -ve in layer.py (why?)
filters = layer['weights'][0].reshape(nc, s, s, nf)
filters = np.rollaxis(filters, axis=-1, start=0)
biases = layer['biases'].reshape(1, nf, 1, 1)
assert x.shape == (n, nc, nx, nx)
nx2 = (ny - 1) * st + s
xpad = np.zeros((n, nc, nx2, nx2), dtype=x.dtype)
xpad[:, :, p:p+nx, p:p+nx] = x
x = xpad
sx = tt.tensor4()
sy = tt.nnet.conv2d(
sx, filters, input_shape=x.shape, filter_shape=filters.shape,
subsample=(st, st), border_mode='valid', filter_flip=False)
sy = sy + biases
f = theano.function([sx], sy)
y = f(x)
print("Abs filters (mean, std, max) %s %s %s" % (
abs(filters).mean(), abs(filters).std(), abs(filters).max()))
print("Abs biases (mean, std, max) %s %s %s" % (
abs(biases).mean(), abs(biases).std(), abs(biases).max()))
assert y.shape == (n, nf, ny, ny)
return y
if layer['type'] == 'local':
n = x.shape[0]
nc = layer['channels'][0]
nxi = nxj = layer['imgSize'][0]
nyi = nyj = layer['modulesX']
nf = layer['filters']
si = sj = layer['filterSize'][0]
sti = stj = layer['stride'][0]
pi = pj = -layer['padding'][0] # Alex makes -ve in layer.py (why?)
assert x.shape == (n, nc, nxi, nxj)
filters = layer['weights'][0].reshape(nyi, nyj, nc, si, sj, nf)
filters = np.rollaxis(filters, axis=-1, start=0)
biases = layer['biases'].reshape(1, nf, nyi, nyj)
y = np.zeros((n, nf, nyi, nyj), dtype=x.dtype)
for i in range(nyi):
for j in range(nyj):
i0, j0 = i*sti - pi, j*stj - pj
i1, j1 = i0 + si, j0 + sj
sli = slice(max(-i0, 0), min(nxi + si - i1, si))
slj = slice(max(-j0, 0), min(nxj + sj - j1, sj))
w = filters[:, i, j, :, sli, slj].reshape(nf, -1)
xij = x[:, :, max(i0, 0):min(i1, nxi), max(j0, 0):min(j1, nxj)]
y[:, :, i, j] = np.dot(xij.reshape(n, -1), w.T)
y += biases
return y
if layer['type'] == 'pool':
assert layer['start'] == 0
n = x.shape[0]
nc = layer['channels']
nxi = nxj = layer['imgSize']
nyi = nyj = layer['outputsX']
st, s = layer['stride'], layer['sizeX']
mode = dict(max='max', avg='average_exc_pad')[layer['pool']]
assert x.shape == (n, nc, nxi, nxj)
sx = tt.tensor4()
sy = tt.signal.pool.pool_2d(
sx, (s, s), ignore_border=False, stride=(st, st), mode=mode)
f = theano.function([sx], sy)
y = f(x)
return y
raise NotImplementedError(layer['type'])
def compute_target_layer(target_key, layers, data, outputs=None):
if outputs is None:
outputs = {}
if target_key in outputs:
return
layer = layers[target_key]
input_keys = layer.get('inputs', [])
for input_key in input_keys:
if input_key not in outputs:
compute_target_layer(input_key, layers, data, outputs)
inputs = [outputs[key] for key in input_keys]
outputs[target_key] = compute_layer(layer, inputs, data)
return outputs
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Run network in Numpy")
parser.add_argument('loadfile', help="Checkpoint to load")
parser.add_argument('--histsave', help="Save layer histograms")
parser.add_argument('--n', type=int, help="Number of images to test")
args = parser.parse_args()
layers, data, dp = load_network(args.loadfile)
if 0:
# use fixed point weights
for layer in layers.values():
round_layer(layer, 2**8, clip_percent=0.1)
inds = slice(0, args.n)
data = [d[inds] for d in data]
if 0:
n = 10
images = data[0]
pimages = images[:n]
pimages = (pimages + dp.data_mean.reshape(1, 3, 24, 24)) / 255.
pimages = np.transpose(pimages, (0, 2, 3, 1))
pimages = pimages.clip(0, 1)
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 6))
for i in range(10):
plt.subplot(2, 5, i+1)
plt.imshow(pimages[i], vmin=0, vmax=1)
plt.show()
outputs = compute_target_layer('logprob', layers, data)
def print_acts(name):
for parent in layers[name].get('inputs', []):
print_acts(parent)
output = outputs[name]
print("%15s: %10.3f (%10.3f) [%10.3f %10.3f]" % (
name, output.mean(), output.std(), output.min(), output.max()))
print_acts('probs')
print("logprob: %10.6f, top-1: %0.6f, top-5: %0.6f" % outputs['logprob'])
if args.histsave is not None:
hist_dict = {}
def hist_acts(name):
output = outputs[name]
hist, edges = np.histogram(output.ravel(), bins=100)
hist_dict[name] = (hist, edges)
# compute parents
for parent in layers[name].get('inputs', []):
hist_acts(parent)
hist_acts('probs')
np.savez(args.histsave, **hist_dict)
print("Saved %r" % args.histsave)
|
hunse/cuda-convnet2
|
run_numpy.py
|
Python
|
apache-2.0
| 8,487
|
[
"NEURON"
] |
8888e4871d55ac993a70e64fc9f63160d42c3a0bdfe3e999c9b171815ccab364
|
"""
Parses an XML feed into a Python representation. You should probably use L{iface_cache.iface_cache} rather than the functions here.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import os
from logging import debug, info, warn
import errno
from zeroinstall.support import basedir
from zeroinstall.injector import qdom
from zeroinstall.injector.namespaces import config_site, config_prog, XMLNS_IFACE
from zeroinstall.injector.model import Interface, InvalidInterface, ZeroInstallFeed, escape, Feed, stability_levels
from zeroinstall.injector import model
class MissingLocalFeed(InvalidInterface):
pass
def update_from_cache(interface, iface_cache = None):
"""Read a cached interface and any native feeds or user overrides.
@param interface: the interface object to update
@type interface: L{model.Interface}
@return: True if cached version and user overrides loaded OK.
False if upstream not cached. Local interfaces (starting with /) are
always considered to be cached, although they are not actually stored in the cache.
Internal: use L{iface_cache.IfaceCache.get_interface} instread.
@rtype: bool"""
interface.reset()
if iface_cache is None:
from zeroinstall.injector import policy
iface_cache = policy.get_deprecated_singleton_config().iface_cache
# Add the distribution package manager's version, if any
path = basedir.load_first_data(config_site, 'native_feeds', model._pretty_escape(interface.uri))
if path:
# Resolve any symlinks
info(_("Adding native packager feed '%s'"), path)
interface.extra_feeds.append(Feed(os.path.realpath(path), None, False))
update_user_overrides(interface)
main_feed = iface_cache.get_feed(interface.uri, force = True)
if main_feed:
update_user_feed_overrides(main_feed)
return main_feed is not None
def load_feed_from_cache(url, selections_ok = False):
"""Load a feed. If the feed is remote, load from the cache. If local, load it directly.
@return: the feed, or None if it's remote and not cached."""
try:
if os.path.isabs(url):
debug(_("Loading local feed file '%s'"), url)
return load_feed(url, local = True, selections_ok = selections_ok)
else:
cached = basedir.load_first_cache(config_site, 'interfaces', escape(url))
if cached:
debug(_("Loading cached information for %(interface)s from %(cached)s"), {'interface': url, 'cached': cached})
return load_feed(cached, local = False)
else:
return None
except InvalidInterface as ex:
ex.feed_url = url
raise
def update_user_feed_overrides(feed):
"""Update a feed with user-supplied information.
Sets last_checked and user_stability ratings.
@param feed: feed to update
@since 0.49
"""
user = basedir.load_first_config(config_site, config_prog,
'feeds', model._pretty_escape(feed.url))
if user is None:
# For files saved by 0launch < 0.49
user = basedir.load_first_config(config_site, config_prog,
'user_overrides', escape(feed.url))
if not user:
return
try:
root = qdom.parse(open(user))
except Exception as ex:
warn(_("Error reading '%(user)s': %(exception)s"), {'user': user, 'exception': ex})
raise
last_checked = root.getAttribute('last-checked')
if last_checked:
feed.last_checked = int(last_checked)
for item in root.childNodes:
if item.uri != XMLNS_IFACE: continue
if item.name == 'implementation':
id = item.getAttribute('id')
assert id is not None
impl = feed.implementations.get(id, None)
if not impl:
debug(_("Ignoring user-override for unknown implementation %(id)s in %(interface)s"), {'id': id, 'interface': feed})
continue
user_stability = item.getAttribute('user-stability')
if user_stability:
impl.user_stability = stability_levels[str(user_stability)]
def update_user_overrides(interface):
"""Update an interface with user-supplied information.
Sets preferred stability and updates extra_feeds.
@param interface: the interface object to update
@type interface: L{model.Interface}
"""
user = basedir.load_first_config(config_site, config_prog,
'interfaces', model._pretty_escape(interface.uri))
if user is None:
# For files saved by 0launch < 0.49
user = basedir.load_first_config(config_site, config_prog,
'user_overrides', escape(interface.uri))
if not user:
return
try:
root = qdom.parse(open(user))
except Exception as ex:
warn(_("Error reading '%(user)s': %(exception)s"), {'user': user, 'exception': ex})
raise
stability_policy = root.getAttribute('stability-policy')
if stability_policy:
interface.set_stability_policy(stability_levels[str(stability_policy)])
for item in root.childNodes:
if item.uri != XMLNS_IFACE: continue
if item.name == 'feed':
feed_src = item.getAttribute('src')
if not feed_src:
raise InvalidInterface(_('Missing "src" attribute in <feed>'))
interface.extra_feeds.append(Feed(feed_src, item.getAttribute('arch'), True, langs = item.getAttribute('langs')))
def check_readable(feed_url, source):
"""Test whether a feed file is valid.
@param feed_url: the feed's expected URL
@type feed_url: str
@param source: the name of the file to test
@type source: str
@return: the modification time in src (usually just the mtime of the file)
@rtype: int
@raise InvalidInterface: If the source's syntax is incorrect,
"""
try:
feed = load_feed(source, local = False)
if feed.url != feed_url:
raise InvalidInterface(_("Incorrect URL used for feed.\n\n"
"%(feed_url)s is given in the feed, but\n"
"%(interface_uri)s was requested") %
{'feed_url': feed.url, 'interface_uri': feed_url})
return feed.last_modified
except InvalidInterface as ex:
info(_("Error loading feed:\n"
"Interface URI: %(uri)s\n"
"Local file: %(source)s\n"
"%(exception)s") %
{'uri': feed_url, 'source': source, 'exception': ex})
raise InvalidInterface(_("Error loading feed '%(uri)s':\n\n%(exception)s") % {'uri': feed_url, 'exception': ex})
def update(interface, source, local = False, iface_cache = None):
"""Read in information about an interface.
Deprecated.
@param interface: the interface object to update
@type interface: L{model.Interface}
@param source: the name of the file to read
@type source: str
@param local: use file's mtime for last-modified, and uri attribute is ignored
@raise InvalidInterface: if the source's syntax is incorrect
@return: the new feed (since 0.32)
@see: L{update_from_cache}, which calls this"""
assert isinstance(interface, Interface)
feed = load_feed(source, local)
if not local:
if feed.url != interface.uri:
raise InvalidInterface(_("Incorrect URL used for feed.\n\n"
"%(feed_url)s is given in the feed, but\n"
"%(interface_uri)s was requested") %
{'feed_url': feed.url, 'interface_uri': interface.uri})
if iface_cache is None:
from zeroinstall.injector import policy
iface_cache = policy.get_deprecated_singleton_config().iface_cache
iface_cache._feeds[unicode(interface.uri)] = feed
return feed
def load_feed(source, local = False, selections_ok = False):
"""Load a feed from a local file.
@param source: the name of the file to read
@type source: str
@param local: this is a local feed
@type local: bool
@param selections_ok: if it turns out to be a local selections document, return that instead
@type selections_ok: bool
@raise InvalidInterface: if the source's syntax is incorrect
@return: the new feed
@since: 0.48
@see: L{iface_cache.iface_cache}, which uses this to load the feeds"""
try:
with open(source) as stream:
root = qdom.parse(stream)
except IOError as ex:
if ex.errno == errno.ENOENT and local:
raise MissingLocalFeed(_("Feed not found. Perhaps this is a local feed that no longer exists? You can remove it from the list of feeds in that case."))
raise InvalidInterface(_("Can't read file"), ex)
except Exception as ex:
raise InvalidInterface(_("Invalid XML"), ex)
if local:
if selections_ok and root.uri == XMLNS_IFACE and root.name == 'selections':
from zeroinstall.injector import selections
return selections.Selections(root)
local_path = source
else:
local_path = None
feed = ZeroInstallFeed(root, local_path)
feed.last_modified = int(os.stat(source).st_mtime)
return feed
|
dabrahams/zeroinstall
|
zeroinstall/injector/reader.py
|
Python
|
lgpl-2.1
| 8,295
|
[
"VisIt"
] |
f268d809183e404502485bbd0950db4b21ed6871867bdf32b8cb2e456c1618aa
|
import pandas as pd
from astropy.io import fits
import astropy.coordinates as cd
from ctc_observ import *
from ctc_arrays import *
from scipy.interpolate import interp1d
#load HSC catalog first
#hsc = pd.read_csv('/cuc36/xxl/multiwavelength/HSC/wide.csv')
def pdf_sep_gen(sep_arcsec,xposerr,opterr,pdf='Rayleigh'):
'''
PDF of angular separation between an X-ray object and the other input catalog
with positional error poserr
'''
if pdf == 'Gaussian':
#that was 2d-normal
poserr=2*(opterr**2+xposerr**2)# this is 2*sigma^2
return np.exp(-sep_arcsec**2/poserr)/(np.pi*poserr)
else:
poserr = (opterr**2+xposerr**2)
return (sep_arcsec/poserr)*np.exp((-sep_arcsec**2)/poserr)
def getbkgcat(xcat,catopt,optdf,r_in = 7., r_out=35.,\
nmagbin=15, magname = 'imag_psf', ora='ra',odec='dec',corr_glob=True):
'''
Takes in xcat and catopt,
find optical sources with separation from any x-ray sources
between r_in and r_out (in arcsec),
and derive the magnitude dependence of these background sources
optdf = optdf_in.copy()
optdf.reset_index(inplace=True)
if len(catopt) != len(optdf):
print("catopt should be the astropy coordinate object computed from optdf!")
sys.exit(1)
'''
idhsc,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_in*u.arcsec)
#Excluding each optical source with an x-ray source within r_in
itmp=np.arange(len(catopt))
itmp[np.unique(idhsc)]=-1
#indicies for optical sources with **NO** X-ray counterparts within r_in
idhsc_ext=np.where(np.equal(optdf.index.values, itmp))[0]
#Now search for X-ray and optical matches within r_out
idhsc_in,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_out*u.arcsec)
idhsc_in = np.unique(idhsc_in)
#Cross-correlated the ``no r_in list'', and the ``r_out list''
#This will create a list of ``background optical sources''
idhsc_bkgd=np.intersect1d(idhsc_ext,idhsc_in)
hsc_bkgd=optdf.loc[idhsc_bkgd].copy()
hsc_bkgd.reset_index(inplace=True)
#
out,rmagbin=pd.cut(hsc_bkgd[magname].values,bins=nmagbin,retbins=True)
groups=hsc_bkgd.groupby(out)
#number density = total number of sources divided by the area of annulus
N_xmm=len(xcat) #number of unique XMM sources
N_bkgd=len(hsc_bkgd)
nm=groups[ora].count().values/(np.pi*(r_out**2-r_in**2)*N_xmm)
if corr_glob:
#According to Brusa et al. 2007, at faint magnitudes
#nm is not correct and should use a global one.
out,rmagbin_global=pd.cut(optdf[magname].values,bins=nmagbin,retbins=True)
groups=optdf.groupby(out)
rmag_global = binvalue(rmagbin_global)
area = \
(optdf[ora].max() - optdf[ora].min())*(optdf[odec].max() - optdf[odec].min())*3600**2
nm_global = groups[ora].count().values/area
iglobal = np.where(rmagbin > 23.)[0][:-1]
nm[iglobal] = nm_global[iglobal]
return nm,rmagbin
#def getqm(match,rmagbin, Q, NX, nm, r0=2.5):
def getqm(match,rmagbin, Q):
'''
Estimate q(m) -- the expected optical counterpart magnitude
distribution of at magintude m
'''
grp=match.groupby(pd.cut(match['rmag'].values,bins=rmagbin))
real_m=grp.rax.count().values# - np.pi*r0**2*NX*nm
qm = real_m*Q/np.sum(real_m)
return qm, Q, real_m
def calc_RC(match, quntarr, Q,lxcat,LRfrac=0.2):
'''
If quntarr is an array with length > 1 (and values between 0 to 1)
This subroutine finds the LRth value that maximize C and R.
If quntarr is a single value,
a array with correctly matched sources would be returned
'''
if type(lxcat) != float:
lxcat = float(lxcat)
if np.isscalar(quntarr):
LRth = quntarr
tmp = match[match.LR > LRth].copy()
grp = tmp.groupby('xid')
#select sources with only one match
onematch = grp.filter(lambda x: len(x) == 1).copy()
onematch.reset_index(inplace=True)
onematch['Rc'] = pd.Series(onematch.LR.values/(onematch.LR.values + 1 - Q), index=onematch.index)
#these are sources with multiple matches
multimatch = grp.filter(lambda x: len(x) > 1).copy()
if len(multimatch) > 0:
#regroup, and for each group only keep sources with LR larger than 0.2*max(LR)
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
multimatch = multimatch[igood]
multimatch.reset_index(inplace=True)
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch])
else:
allmatch = onematch
R = allmatch.Rc.mean()
C = allmatch.Rc.sum()/lxcat
return allmatch, R, C
else:
R = np.zeros(len(quntarr))
C = np.zeros(len(quntarr))
LRth = np.zeros(len(quntarr))
for index, i in enumerate(quntarr):
LRth[index] = match.LR.quantile(i)
tmp = match[match.LR > LRth[index]].copy()
grp = tmp.groupby('xid')
#select sources with only one match
onematch = grp.filter(lambda x: len(x) == 1).copy()
onematch.reset_index(inplace=True)
onematch['Rc'] = onematch.LR.values/(onematch.LR.values + 1 - Q)
#these are sources with multiple matches
multimatch = grp.filter(lambda x: len(x) > 1).copy()
#regroup, and for each group only keep sources with LR larger than 0.2*max(LR)
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
if np.sum(igood) > 0:
multimatch = multimatch[igood]
if multimatch.xid.nunique() > 1:
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
multimatch.reset_index(inplace=True)
allmatch = pd.concat([onematch,multimatch])
R[index] = allmatch.Rc.mean()
C[index] = allmatch.Rc.sum()/lxcat
else:
multimatch = multimatch[igood]
multimatch['Rc'] = multimatch.LR/(multimatch.LR.sum()+(1-Q))
allmatch = pd.concat([onematch,multimatch])
else:
allmatch = onematch
R[index] = -1.
C[index] = -1.
return R, C, LRth
'''
func = interp1d(quntarr, T, bounds_error=False,fill_value='extrapolate')
lthmax = np.linspace(0.,1.,1000)[np.where(func(np.linspace(0.,1.,1000)) ==
max(func(np.linspace(0.,1.,1000))))]
return R, C, lthmax
'''
def calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=5.0,\
lth = np.linspace(0.05,0.9,10), LRfrac=0.2,lrmax=None,\
magname = 'imag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid',opterr = 0.05,pdf='Rayleigh'):
'''
input variables:
xdf, xcat, optdf,catopt,optdf,nm, qm, Q, rmag, rsearch=5.0,\
magname = 'rmag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid'
For computing LR for every optical source within rsearch:
'''
idxmm, idhsc, d2d , d3d=catopt.search_around_sky(xcat,rsearch*u.arcsec)
match = pd.DataFrame({'xid':idxmm,'optid':idhsc,'dist':d2d.arcsec,\
'rmag':optdf.loc[idhsc,magname].values,'xposerr':xdf.loc[idxmm,xerrname],\
'raopt':optdf.loc[idhsc,ora].values,'decopt':optdf.loc[idhsc,odec].values,\
'rax':xdf.loc[idxmm,xra].values,'decx':xdf.loc[idxmm,xdec].values,\
'optname':optdf.loc[idhsc,opticalid].values})
#print('match len = ',len(match), 'xid nunique = ', match.xid.nunique())
fr = pdf_sep_gen(match.dist.values,match.xposerr.values,opterr,pdf=pdf)
n_m = interp1d(rmag, nm, bounds_error=False,fill_value='extrapolate')
q_m = interp1d(rmag, qm, bounds_error=False,fill_value='extrapolate')
fnm = n_m(match.rmag.values)
fqm = q_m(match.rmag.values)
fqm[np.where(fqm < 0.)] = 1e-8
LR = fr*fqm/fnm
match['LR'] = pd.Series(LR, index=match.index)
match['matchid'] = pd.Series(range(len(match)),index=match.index)
match['raoff'] = pd.Series((match.rax - match.raopt)*3600., index=match.index)
match['decoff'] = pd.Series((match.decx - match.decopt)*3600., index=match.index)
if match.xid.nunique() - len(match) == 0:
return match, match, 1.0, 1.0, match.LR.min()
else:
if lrmax is None:
R, C, LRth = calc_RC(match, lth, Q, len(xcat),LRfrac=LRfrac)
func = interp1d(LRth, R+C, bounds_error=False,fill_value='extrapolate')
arr = match.LR.values
farr = func(arr)
lthmax = arr[np.where(farr == max(farr))]
if not np.isscalar(lthmax):
if len(lthmax) >= 1:
lthmax = lthmax[0]
goodmatch, R, C = calc_RC(match,lthmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lthmax
else:
goodmatch, R, C = calc_RC(match,lrmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lrmax
def likmatch(xdf, xcat, optdf_in, catopt, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = np.linspace(0.05,0.9,10),LRfrac=0.2,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.05,pdf='Rayleigh',verbose=True):
'''
Likelihood ratio based source matching.
Currently is based on HSC public data release 1
(wide survey) in the XMM-LSS region.
Input: source list data frame or fits filename of the source lists.
See the input parameters for default column names
***Note that ``opticalid''
should be provided for each unique optical source
Default : xdf is in XMM SRCLIST format
optdf is for HSC.
Input parameters:
r0 - radius used for defining q(m)
r_in and r_out - radius used for selecting background sources
(X-ray sources with distance from optical counterparts that's larger than
r_in and smaller than r_out are defined as background sources.)
if (len(catopt) != len(optdf)) or (len(xcat) != len(xdf)) :
print("x/opt catalogs should be the astropy coordinate objects computed from the dataframes!!")
sys.exit(1)
'''
NX = float(len(xcat))
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
#making a copy for output
dfout = xdf.copy(deep=True)
dfout.reset_index(inplace=True)
#Background number surface density
if verbose:print('Calculating background mag. distribution, nm')
nm, rmagbin = getbkgcat(xcat,catopt,optdf,r_in = r_in, r_out=r_out,
nmagbin=nmagbin, magname = magname,ora=ora,odec=odec)
#Calculating qm for the first time using r0
if verbose:print('Calculating initial counterpart mag. dist., qm')
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
if (N1 != float(len(idopt_r0))):
print('duplicated optical sources in qm calculation')
opt_qm = optdf.loc[idopt_r0,:]
grp=opt_qm.groupby(pd.cut(opt_qm[magname].values,bins=rmagbin))
total_m=grp[ora].count().values
real_m0=total_m-np.pi*r0**2*NX*nm
real_m0[np.where(real_m0 < 0.)] = 0.1*nm[np.where(real_m0 < 0.)]*np.pi*NX*r0**2
qm0 = real_m0*(Q/np.sum(real_m0))
#for unrealistical qm values (<0), assuming the real counterpart distribution is the same
#as the background
#qm0[np.where(qm0 < 0.)] = nm[np.where(qm0 < 0.)]
rmag = binvalue(rmagbin)
density = pd.DataFrame({'rmag':rmag,'qms'+str(np.round(Q,2)):qm0,'nm':nm,'real_ms':real_m0})
#With qm, nm, and Q, calculate the first match
if verbose:print('First LR matching')
match, goodmatch, R, C, lthmax = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm0, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
if verbose:print('Q0='+str(Q), 'R0='+str(R),'C0='+str(C), len(goodmatch), lthmax)
#With the new ``matched sources'', recalculate qm again until C and R converges
if lrmax is None:
for i in range(niter):
if len(goodmatch) == 0:
print('No goodmatch in first round, breaking now')
break
lthmax0 = lthmax.copy()
if verbose:print('Iterative LR matching')
qm, Q, real_m = getqm(goodmatch,rmagbin, C)#, NX, nm)
match, goodmatch, R, C, lthmax = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth, lrmax=lrmax , magname = magname,xerrname=xerrname,\
xra = xra, xdec = xdec, ora = ora, odec = odec,\
opticalid = opticalid,opterr=opterr,pdf=pdf)
density['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qm,index=density.index)
density['real_m'+str(i)] = pd.Series(real_m,index=density.index)
if verbose:print(R, C, len(goodmatch),lthmax)
if (np.abs(lthmax0 - lthmax) < 0.01) & (lthmax > 0.1) & (i > 5):
if verbose:print('LR threshold converges, breaking now')
density['qmfinal'] = pd.Series(qm,index=density.index)
break
elif i == max(range(niter)):
density['qmfinal'] = pd.Series(qm,index=density.index)
return match,goodmatch, R, C, density, lthmax, rmagbin
else:
return match,goodmatch, R, C, density, lthmax, rmagbin
def likmatch_rerun(xdf, xcat, optdf_in, catopt, density, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = np.linspace(0.05,0.9,10),LRfrac=0.2,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.05,pdf='Rayleigh',verbose=True):
'''
similar to likmatch, but requires the density output from likmatch
useful for shift-and-rematch simulations
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
NX = float(len(xcat))
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
nm = density.nm.values
qm = density.qmfinal.values
rmag = density.rmag.values
match, goodmatch, R, C, lthmax = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
return match,goodmatch
def finalmatch(match,goodmatch):
match.set_index(match.matchid.values,inplace=True)
mid_all = np.arange(len(match))
mid_all[goodmatch.matchid.values] = -1
badmatch = match.loc[mid_all[mid_all > 0],:]
#if an xid alread has a counterpart in goodmatch, drop it.
badmatch = badmatch[np.in1d(badmatch.xid.values, goodmatch.xid.unique(),invert=True)].copy()
badmatch.reset_index(inplace=True)
bad_ok = badmatch.drop_duplicates('xid',keep=False)
ibad = np.arange(len(badmatch))
ibad[bad_ok.index.values] = -1
bad_bad = badmatch.loc[np.where(ibad > -1)[0],:]
bad_bad.drop('index',axis=1,inplace=True)
okmatch = pd.concat([goodmatch, bad_ok])
return okmatch, bad_bad
|
CTJChen/ctc_astropylib
|
lrmatch_old.py
|
Python
|
apache-2.0
| 14,519
|
[
"Gaussian"
] |
f06ef17cd3edd734fcca1fc3ecfba9adfe27a37058fdc806ab67861dd87a836f
|
from InputParameters import InputParameters
from Job import Job
import os, sys, subprocess, shutil
class PBSJob(Job):
def validParams():
params = Job.validParams()
params.addRequiredParam('chunks', "The number of PBS chunks.")
# Only one of either of the next two paramteres can be specified
params.addParam('mpi_procs', "The number of MPI processes per chunk.")
params.addParam('total_mpi_procs', "The total number of MPI processes to use divided evenly among chunks.")
params.addParam('place', 'scatter:excl', "The PBS job placement scheme to use.")
params.addParam('walltime', '4:00:00', "The requested walltime for this job.")
params.addParam('no_copy', "A list of files specifically not to copy")
params.addParam('copy_files', "A list of files specifically to copy")
params.addStringSubParam('combine_streams', '#PBS -j oe', "Combine stdout and stderror into one file (needed for NO EXPECTED ERR)")
params.addStringSubParam('threads', '--n-threads=THREADS', "The number of threads to run per MPI process.")
params.addStringSubParam('queue', '#PBS -q QUEUE', "Which queue to submit this job to.")
params.addStringSubParam('module', 'module load MODULE', 'moose-dev-gcc', "The module to load.")
params.addStringSubParam('cli_args', 'CLI_ARGS', "Any extra command line arguments to tack on.")
params.addStringSubParam('notifications', '#PBS -m NOTIFICATIONS', "The PBS notifications to enable: 'b' for begin, 'e' for end, 'a' for abort.")
params.addStringSubParam('notify_address', '#PBS -M NOTIFY_ADDRESS', "The email address to use for PBS notifications")
# Soft linked output during run
params.addParam('soft_link_output', False, "Create links to your STDOUT and STDERR files in your working directory during the run.")
params.addRequiredParam('moose_application', "The full path to the application to run.")
params.addRequiredParam('input_file', "The input file name.")
return params
validParams = staticmethod(validParams)
def __init__(self, name, params):
Job.__init__(self, name, params)
# Called from the current directory to copy files (usually from the parent)
def copyFiles(self, job_file):
params = self.specs
# Copy files (unless they are listed in "no_copy"
for file in os.listdir('../'):
if os.path.isfile('../' + file) and file != job_file and (not params.isValid('no_copy') or file not in params['no_copy']):
shutil.copy('../' + file, '.')
# Copy directories
if params.isValid('copy_files'):
for file in params['copy_files'].split():
print file
if os.path.isfile('../' + file):
shutil.copy('../' + file, '.')
elif os.path.isdir('../' + file):
shutil.copytree('../' + file, file)
def prepareJobScript(self):
f = open(self.specs['template_script'], 'r')
content = f.read()
f.close()
params = self.specs
# Error check
if params.isValid('mpi_procs') and params.isValid('total_mpi_procs'):
print "ERROR: 'mpi_procs' and 'total_mpi_procs' are exclusive. Only specify one!"
sys.exit(1)
# Do a few PBS job size calculations
if params.isValid('mpi_procs'):
params['mpi_procs_per_chunk'] = params['mpi_procs']
elif params.isValid('total_mpi_procs'):
params['mpi_procs_per_chunk'] = str(int(params['total_mpi_procs']) / int(params['chunks'])) # Need some more error checking here
else:
print "ERROR: You must specify either 'mpi_procs' or 'total_mpi_procs'"
sys.exit(1)
if params.isValid('threads'):
threads = int(params['threads'])
else:
threads = 1
params['ncpus_per_chunk'] = str(int(params['mpi_procs_per_chunk']) * threads)
# Soft Link output requires several substitutions in the template file
soft_link1 = ''
soft_link2 = ''
soft_link3 = ''
if params['soft_link_output'] == 'True':
soft_link1 = '#PBS -koe'
soft_link2 = 'ln -s $HOME/$PBS_JOBNAME.o$JOB_NUM $PBS_JOBNAME.o$JOB_NUM\nln -s $HOME/$PBS_JOBNAME.e$JOB_NUM $PBS_JOBNAME.e$JOB_NUM'
soft_link3 = 'rm $PBS_JOBNAME.o$JOB_NUM\nmv $HOME/$PBS_JOBNAME.o$JOB_NUM $PBS_JOBNAME.o$JOB_NUM\nmv $HOME/$PBS_JOBNAME.e$JOB_NUM $PBS_JOBNAME.e$JOB_NUM'
# Add substitutions on the fly
params.addStringSubParam('soft_link1', 'SOFT_LINK1', soft_link1, 'private')
params.addStringSubParam('soft_link2', 'SOFT_LINK2', soft_link2, 'private')
params.addStringSubParam('soft_link3', 'SOFT_LINK3', soft_link3, 'private')
f = open(os.path.split(params['template_script'])[1], 'w')
# Do all of the replacements for the valid parameters
for param in params.valid_keys():
if param in params.substitute:
params[param] = params.substitute[param].replace(param.upper(), params[param])
content = content.replace('<' + param.upper() + '>', str(params[param]))
# Make sure we strip out any string substitution parameters that were not supplied
for param in params.substitute_keys():
if not params.isValid(param):
content = content.replace('<' + param.upper() + '>', '')
f.write(content)
f.close()
def launch(self):
# Finally launch the job
my_process = subprocess.Popen('qsub ' + os.path.split(self.specs['template_script'])[1], stdout=subprocess.PIPE, shell=True)
print 'JOB_NAME:', self.specs['job_name'], 'JOB_ID:', my_process.communicate()[0].split('.')[0], 'TEST_NAME:', self.specs['test_name']
|
gleicher27/Tardigrade
|
moose/framework/scripts/ClusterLauncher/PBSJob.py
|
Python
|
lgpl-2.1
| 5,473
|
[
"MOOSE"
] |
2c3b7652547ea8dfab18aa60ac8d1b641ea014bdad03642acf3d7c2dca2a972c
|
# Written by Bram Cohen
# see LICENSE.txt for license information
# For a description of the algorithm see https://wiki.theory.org/BitTorrentSpecification#Choking_and_Optimistic_Unchoking.
from random import randrange
class Choker:
def __init__(self, max_uploads, schedule, done = lambda: False, min_uploads = None):
# Maximum and minimum number of peers we should unchoke.
self.max_uploads = max_uploads
if min_uploads is None:
min_uploads = max_uploads
self.min_uploads = min_uploads
# Function to schedule events in the reactor loop of RawServer.
self.schedule = schedule
# Instances of Connection defined in Connecter.py.
self.connections = []
# Counter that controls when we rotate the optimistically unchoked peer.
self.count = 0
# Returns whether we have all the pieces and are seeding.
self.done = done
schedule(self._round_robin, 10)
def _round_robin(self):
self.schedule(self._round_robin, 10)
self.count += 1
if self.count % 3 == 0:
# Visit here every 30 seconds.
for i in xrange(len(self.connections)):
u = self.connections[i].get_upload()
if u.is_choked() and u.is_interested():
# Rotate the connections so this choked but interested peer is at the front.
# The _rechoke will unchoke this peer, perhaps as the optimistic unchoke.
self.connections = self.connections[i:] + self.connections[:i]
break
self._rechoke()
def _snubbed(self, c):
if self.done():
return False
return c.get_download().is_snubbed()
def _rate(self, c):
if self.done():
# Return upload speed to peer if this client is a seed.
return c.get_upload().get_rate()
else:
# Return download speed from peer if this client is also a peer.
return c.get_download().get_rate()
def _rechoke(self):
preferred = []
for c in self.connections:
if not self._snubbed(c) and c.get_upload().is_interested():
# This peer isn't snubbing us and is interested in pieces we have.
preferred.append((-self._rate(c), c))
# Sort so maximum download or upload rates are at front of array.
preferred.sort()
# Get the (max_uploads - 1) fastest connections to peers.
del preferred[self.max_uploads - 1:]
preferred = [x[1] for x in preferred]
count = len(preferred)
# True if we have designated a peer as optimistically unchoked.
hit = False
for c in self.connections:
u = c.get_upload()
if c in preferred:
# Unchoke this connection if one of the fastest.
u.unchoke()
else:
# If max_uploads = min_uploads, then we should visit here at least once.
if count < self.min_uploads or not hit:
# Not enough uploads started, or haven't designated a peer as optimistically unchoked.
# Unchoke this connection.
# Note that this can unchoke peers that are not interested.
u.unchoke()
if u.is_interested():
# This is a peer that is actually going to download from us and consume bandwidth.
count += 1
hit = True
else:
u.choke()
def connection_made(self, connection, p = None):
# Give a new peer 3x chance of starting as optimistically unchoked.
if p is None:
p = randrange(-2, len(self.connections) + 1)
self.connections.insert(max(p, 0), connection)
self._rechoke()
def connection_lost(self, connection):
self.connections.remove(connection)
if connection.get_upload().is_interested() and not connection.get_upload().is_choked():
# Lost connection to this unchoked peer, so can now unchoke a different one.
self._rechoke()
def interested(self, connection):
if not connection.get_upload().is_choked():
self._rechoke()
def not_interested(self, connection):
if not connection.get_upload().is_choked():
self._rechoke()
def change_max_uploads(self, newval):
def foo(self=self, newval=newval):
self._change_max_uploads(newval)
self.schedule(foo, 0);
def _change_max_uploads(self, newval):
self.max_uploads = newval
self._rechoke()
class DummyScheduler:
def __init__(self):
self.s = []
def __call__(self, func, delay):
self.s.append((func, delay))
class DummyConnection:
def __init__(self, v = 0):
self.u = DummyUploader()
self.d = DummyDownloader(self)
self.v = v
def get_upload(self):
return self.u
def get_download(self):
return self.d
class DummyDownloader:
def __init__(self, c):
self.s = False
self.c = c
def is_snubbed(self):
return self.s
def get_rate(self):
return self.c.v
class DummyUploader:
def __init__(self):
self.i = False
self.c = True
def choke(self):
if not self.c:
self.c = True
def unchoke(self):
if self.c:
self.c = False
def is_choked(self):
return self.c
def is_interested(self):
return self.i
def test_round_robin_with_no_downloads():
s = DummyScheduler()
Choker(2, s)
assert len(s.s) == 1
assert s.s[0][1] == 10
s.s[0][0]()
del s.s[0]
assert len(s.s) == 1
assert s.s[0][1] == 10
s.s[0][0]()
del s.s[0]
s.s[0][0]()
del s.s[0]
s.s[0][0]()
del s.s[0]
def test_resort():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection()
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c4 = DummyConnection(3)
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
assert not c1.u.c
choker.connection_made(c2, 1)
assert not c1.u.c
assert not c2.u.c
choker.connection_made(c3, 1)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
c2.v = 2
c3.v = 1
choker.connection_made(c4, 1)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
assert not c4.u.c
choker.connection_lost(c4)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
s.s[0][0]()
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
def test_interest():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection()
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
assert not c1.u.c
choker.connection_made(c2, 1)
assert not c1.u.c
assert not c2.u.c
choker.connection_made(c3, 1)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
c3.u.i = False
choker.not_interested(c3)
assert not c1.u.c
assert not c2.u.c
assert not c3.u.c
c3.u.i = True
choker.interested(c3)
assert not c1.u.c
assert c2.u.c
assert not c3.u.c
choker.connection_lost(c3)
assert not c1.u.c
assert not c2.u.c
def test_robin_interest():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c1.u.i = True
choker.connection_made(c2)
assert not c2.u.c
choker.connection_made(c1, 0)
assert not c1.u.c
assert c2.u.c
c1.u.i = False
choker.not_interested(c1)
assert not c1.u.c
assert not c2.u.c
c1.u.i = True
choker.interested(c1)
assert not c1.u.c
assert c2.u.c
choker.connection_lost(c1)
assert not c2.u.c
def test_skip_not_interested():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c1.u.i = True
c3.u.i = True
choker.connection_made(c2)
assert not c2.u.c
choker.connection_made(c1, 0)
assert not c1.u.c
assert c2.u.c
choker.connection_made(c3, 2)
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f = s.s[0][0]
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert c2.u.c
assert not c3.u.c
def test_connection_lost_no_interrupt():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c1.u.i = True
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
choker.connection_made(c2, 1)
choker.connection_made(c3, 2)
f = s.s[0][0]
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert not c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert not c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert not c2.u.c
assert c3.u.c
choker.connection_lost(c3)
assert c1.u.c
assert not c2.u.c
f()
assert not c1.u.c
assert c2.u.c
choker.connection_lost(c2)
assert not c1.u.c
def test_connection_made_no_interrupt():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c3 = DummyConnection(2)
c1.u.i = True
c2.u.i = True
c3.u.i = True
choker.connection_made(c1)
choker.connection_made(c2, 1)
f = s.s[0][0]
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
choker.connection_made(c3, 1)
assert not c1.u.c
assert c2.u.c
assert c3.u.c
f()
assert c1.u.c
assert c2.u.c
assert not c3.u.c
def test_round_robin():
s = DummyScheduler()
choker = Choker(1, s)
c1 = DummyConnection(0)
c2 = DummyConnection(1)
c1.u.i = True
c2.u.i = True
choker.connection_made(c1)
choker.connection_made(c2, 1)
f = s.s[0][0]
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
f()
assert not c1.u.c
assert c2.u.c
f()
assert c1.u.c
assert not c2.u.c
f()
assert c1.u.c
assert not c2.u.c
f()
assert c1.u.c
assert not c2.u.c
f()
assert not c1.u.c
assert c2.u.c
def test_multi():
s = DummyScheduler()
choker = Choker(4, s)
c1 = DummyConnection(0)
c2 = DummyConnection(0)
c3 = DummyConnection(0)
c4 = DummyConnection(8)
c5 = DummyConnection(0)
c6 = DummyConnection(0)
c7 = DummyConnection(6)
c8 = DummyConnection(0)
c9 = DummyConnection(9)
c10 = DummyConnection(7)
c11 = DummyConnection(10)
choker.connection_made(c1, 0)
choker.connection_made(c2, 1)
choker.connection_made(c3, 2)
choker.connection_made(c4, 3)
choker.connection_made(c5, 4)
choker.connection_made(c6, 5)
choker.connection_made(c7, 6)
choker.connection_made(c8, 7)
choker.connection_made(c9, 8)
choker.connection_made(c10, 9)
choker.connection_made(c11, 10)
c2.u.i = True
c4.u.i = True
c6.u.i = True
c8.u.i = True
c10.u.i = True
c2.d.s = True
c6.d.s = True
c8.d.s = True
s.s[0][0]()
assert not c1.u.c
assert not c2.u.c
assert not c3.u.c
assert not c4.u.c
assert not c5.u.c
assert not c6.u.c
assert c7.u.c
assert c8.u.c
assert c9.u.c
assert not c10.u.c
assert c11.u.c
|
mgp/bittorrent-dissected
|
Choker.py
|
Python
|
mit
| 11,757
|
[
"VisIt"
] |
ad784e1a58e85009360333148fed1994db864632471d53d0f7d3a29413bc9831
|
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Genrate Climate Based Sky
This component generate a climate based sky for any hour of the year
-
Provided by Honeybee 0.0.66
Args:
north_: Input a vector to be used as a true North direction for the sun path or a number between 0 and 360 that represents the degrees off from the y-axis to make North. The default North direction is set to the Y-axis (0 degrees).
_weatherFile: epw weather file address on your system
_month: Month of the study [1-12]
_day: Day of the study [1-31]
_hour: Hour of the study [1-24]
Returns:
radiationValues: Direct and diffuse radiation of the sky
skyFilePath: Sky file location on the local drive
"""
ghenv.Component.Name = "Honeybee_Generate Climate Based Sky"
ghenv.Component.NickName = 'genClimateBasedSky'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "02 | Daylight | Light Source"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import os
import scriptcontext as sc
import Grasshopper.Kernel as gh
import math
def date2Hour(month, day, hour):
# fix the end day
numOfDays = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
# dd = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
JD = numOfDays[int(month)-1] + int(day)
return (JD - 1) * 24 + hour
def getRadiationValues(epw_file, HOY):
epwfile = open(epw_file,"r")
for lineCount, line in enumerate(epwfile):
if lineCount == int(HOY + 8 - 1):
dirRad = (float(line.split(',')[14]))
difRad = (float(line.split(',')[15]))
return dirRad, difRad
def RADDaylightingSky(epwFileAddress, locName, lat, long, timeZone, hour, day, month, north = 0):
dirNrmRad, difHorRad = getRadiationValues(epwFileAddress, date2Hour(month, day, hour))
print "Direct: " + `dirNrmRad` + "| Diffuse: " + `difHorRad`
return "# start of sky definition for daylighting studies\n" + \
"# location name: " + locName + " LAT: " + lat + "\n" + \
"!gendaylit " + `month` + ' ' + `day` + ' ' + `hour` + \
" -a " + lat + " -o " + `-float(long)` + " -m " + `-float(timeZone) * 15` + \
" -W " + `dirNrmRad` + " " + `difHorRad` + " -O " + `outputType` + \
" | xform -rz " + str(north) + "\n" + \
"skyfunc glow sky_mat\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"1 1 1 0\n" + \
"sky_mat source sky\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"0 0 1 180\n" + \
"skyfunc glow ground_glow\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"1 .8 .5 0\n" + \
"ground_glow source ground\n" + \
"0\n" + \
"0\n" + \
"4\n" + \
"0 0 -1 180\n"
def main(outputType, weatherFile, month, day, hour, north = 0):
# import the classes
if sc.sticky.has_key('honeybee_release') and sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
if sc.sticky['honeybee_release'].isInputMissing(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
hb_folders = sc.sticky["honeybee_folders"]
hb_RADPath = hb_folders["RADPath"]
hb_RADLibPath = hb_folders["RADLibPath"]
else:
print "You should first let Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let Ladybug and Honeybee to fly...")
return -1
# check forgendaylit exist
if not os.path.isfile(hb_RADPath + "\\gendaylit.exe"):
msg = "Cannot find gendaylit.exe at " + hb_RADPath + \
"Make sure that gendaylit is installed on your system."
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return -1
if weatherFile != None and weatherFile[-3:] == 'epw':
if not os.path.isfile(weatherFile):
print "Can't find the weather file at: " + weatherFile
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Can't find the weather file at: " + weatherFile)
return -1
# import data from epw file data
locName, lat, lngt, timeZone, elev, locationStr = lb_preparation.epwLocation(weatherFile)
newLocName = lb_preparation.removeBlank(locName)
else:
print "epwWeatherFile address is not a valid .epw file"
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "epwWeatherFile address is not a valid .epw file")
return -1
# make new folder for each city
subWorkingDir = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "skylib\\climateBasedSkies\\", newLocName)
subWorkingDir = lb_preparation.makeWorkingDir(subWorkingDir)
# print 'Current working directory is set to: ', subWorkingDir
outputFile = subWorkingDir + "\\climateBasedSky@_" + `month` + "_" + `day` + "@" + ('%.2f'%hour).replace(".", "") + ".sky"
northAngle, northVector = lb_preparation.angle2north(north)
skyStr = RADDaylightingSky(weatherFile, newLocName, lat, lngt, timeZone, hour, day, month, math.degrees(northAngle))
skyFile = open(outputFile, 'w')
skyFile.write(skyStr)
skyFile.close()
return outputFile , `day` + "_" + `month` + "@" + ('%.2f'%hour).replace(".", "")
if _weatherFile!=None and _month!=None and _day!=None and _hour!=None:
outputType = 0
result = main(outputType, _weatherFile, _month, _day, _hour, north_)
if result!=-1:
skyFilePath, skyDescription = result
|
mostaphaRoudsari/Honeybee
|
src/Honeybee_Generate Climate Based Sky.py
|
Python
|
gpl-3.0
| 7,388
|
[
"EPW"
] |
6000e1c30d3ec37c3c76f4b254d460e7932d401a715070b60026916884147a93
|
import socket
import webbrowser
import httplib2
import oauth2client.clientsecrets as clientsecrets
from six.moves import input
from apiclient.discovery import build
from functools import wraps
from oauth2client.client import FlowExchangeError
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import OOB_CALLBACK_URN
from oauth2client.file import Storage
from oauth2client.file import CredentialsFileSymbolicLinkError
from oauth2client.tools import ClientRedirectHandler
from oauth2client.tools import ClientRedirectServer
from oauth2client.util import scopes_to_string
from .apiattr import ApiAttribute
from .apiattr import ApiAttributeMixin
from .settings import LoadSettingsFile
from .settings import ValidateSettings
from .settings import SettingsError
from .settings import InvalidConfigError
class AuthError(Exception):
"""Base error for authentication/authorization errors."""
class InvalidCredentialsError(IOError):
"""Error trying to read credentials file."""
class AuthenticationRejected(AuthError):
"""User rejected authentication."""
class AuthenticationError(AuthError):
"""General authentication error."""
class RefreshError(AuthError):
"""Access token refresh error."""
def LoadAuth(decoratee):
"""Decorator to check if the auth is valid and loads auth if not."""
@wraps(decoratee)
def _decorated(self, *args, **kwargs):
if self.auth is None: # Initialize auth if needed.
self.auth = GoogleAuth()
if self.auth.access_token_expired:
self.auth.LocalWebserverAuth()
if self.auth.service is None: # Check if drive api is built.
self.auth.Authorize()
return decoratee(self, *args, **kwargs)
return _decorated
def CheckAuth(decoratee):
"""Decorator to check if it requires OAuth2 flow request."""
@wraps(decoratee)
def _decorated(self, *args, **kwargs):
dirty = False
code = None
save_credentials = self.settings.get('save_credentials')
if self.credentials is None and save_credentials:
self.LoadCredentials()
if self.flow is None:
self.GetFlow()
if self.credentials is None:
code = decoratee(self, *args, **kwargs)
dirty = True
else:
if self.access_token_expired:
if self.credentials.refresh_token is not None:
self.Refresh()
else:
code = decoratee(self, *args, **kwargs)
dirty = True
if code is not None:
self.Auth(code)
if dirty and save_credentials:
self.SaveCredentials()
return _decorated
class GoogleAuth(ApiAttributeMixin, object):
"""Wrapper class for oauth2client library in google-api-python-client.
Loads all settings and credentials from one 'settings.yaml' file
and performs common OAuth2.0 related functionality such as authentication
and authorization.
"""
DEFAULT_SETTINGS = {
'client_config_backend': 'file',
'client_config_file': 'client_secrets.json',
'save_credentials': False,
'oauth_scope': ['https://www.googleapis.com/auth/drive']
}
CLIENT_CONFIGS_LIST = ['client_id', 'client_secret', 'auth_uri',
'token_uri', 'revoke_uri', 'redirect_uri']
settings = ApiAttribute('settings')
client_config = ApiAttribute('client_config')
flow = ApiAttribute('flow')
credentials = ApiAttribute('credentials')
http = ApiAttribute('http')
service = ApiAttribute('service')
def __init__(self, settings_file='settings.yaml',http_timeout=None):
"""Create an instance of GoogleAuth.
This constructor just sets the path of settings file.
It does not actually read the file.
:param settings_file: path of settings file. 'settings.yaml' by default.
:type settings_file: str.
"""
self.http_timeout=http_timeout
ApiAttributeMixin.__init__(self)
self.client_config = {}
try:
self.settings = LoadSettingsFile(settings_file)
except SettingsError:
self.settings = self.DEFAULT_SETTINGS
else:
if self.settings is None:
self.settings = self.DEFAULT_SETTINGS
else:
ValidateSettings(self.settings)
@property
def access_token_expired(self):
"""Checks if access token doesn't exist or is expired.
:returns: bool -- True if access token doesn't exist or is expired.
"""
if self.credentials is None:
return True
return self.credentials.access_token_expired
@CheckAuth
def LocalWebserverAuth(self, host_name='localhost',
port_numbers=[8080, 8090]):
"""Authenticate and authorize from user by creating local webserver and
retrieving authentication code.
This function is not for webserver application. It creates local webserver
for user from standalone application.
:param host_name: host name of the local webserver.
:type host_name: str.
:param port_numbers: list of port numbers to be tried to used.
:type port_numbers: list.
:returns: str -- code returned from local webserver
:raises: AuthenticationRejected, AuthenticationError
"""
success = False
port_number = 0
for port in port_numbers:
port_number = port
try:
httpd = ClientRedirectServer((host_name, port), ClientRedirectHandler)
except socket.error as e:
pass
else:
success = True
break
if success:
oauth_callback = 'http://%s:%s/' % (host_name, port_number)
else:
print('Failed to start a local webserver. Please check your firewall')
print('settings and locally running programs that may be blocking or')
print('using configured ports. Default ports are 8080 and 8090.')
raise AuthenticationError()
self.flow.redirect_uri = oauth_callback
authorize_url = self.GetAuthUrl()
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
httpd.handle_request()
if 'error' in httpd.query_params:
print('Authentication request was rejected')
raise AuthenticationRejected('User rejected authentication')
if 'code' in httpd.query_params:
return httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
print('Try command-line authentication')
raise AuthenticationError('No code found in redirect')
@CheckAuth
def CommandLineAuth(self):
"""Authenticate and authorize from user by printing authentication url
retrieving authentication code from command-line.
:returns: str -- code returned from commandline.
"""
self.flow.redirect_uri = OOB_CALLBACK_URN
authorize_url = self.GetAuthUrl()
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
return input('Enter verification code: ').strip()
def LoadCredentials(self, backend=None):
"""Loads credentials or create empty credentials if it doesn't exist.
:param backend: target backend to save credential to.
:type backend: str.
:raises: InvalidConfigError
"""
if backend is None:
backend = self.settings.get('save_credentials_backend')
if backend is None:
raise InvalidConfigError('Please specify credential backend')
if backend == 'file':
self.LoadCredentialsFile()
else:
raise InvalidConfigError('Unknown save_credentials_backend')
def LoadCredentialsFile(self, credentials_file=None):
"""Loads credentials or create empty credentials if it doesn't exist.
Loads credentials file from path in settings if not specified.
:param credentials_file: path of credentials file to read.
:type credentials_file: str.
:raises: InvalidConfigError, InvalidCredentialsError
"""
if credentials_file is None:
credentials_file = self.settings.get('save_credentials_file')
if credentials_file is None:
raise InvalidConfigError('Please specify credentials file to read')
try:
storage = Storage(credentials_file)
self.credentials = storage.get()
except CredentialsFileSymbolicLinkError:
raise InvalidCredentialsError('Credentials file cannot be symbolic link')
def SaveCredentials(self, backend=None):
"""Saves credentials according to specified backend.
If you have any specific credentials backend in mind, don't use this
function and use the corresponding function you want.
:param backend: backend to save credentials.
:type backend: str.
:raises: InvalidConfigError
"""
if backend is None:
backend = self.settings.get('save_credentials_backend')
if backend is None:
raise InvalidConfigError('Please specify credential backend')
if backend == 'file':
self.SaveCredentialsFile()
else:
raise InvalidConfigError('Unknown save_credentials_backend')
def SaveCredentialsFile(self, credentials_file=None):
"""Saves credentials to the file in JSON format.
:param credentials_file: destination to save file to.
:type credentials_file: str.
:raises: InvalidConfigError, InvalidCredentialsError
"""
if self.credentials is None:
raise InvalidCredentialsError('No credentials to save')
if credentials_file is None:
credentials_file = self.settings.get('save_credentials_file')
if credentials_file is None:
raise InvalidConfigError('Please specify credentials file to read')
try:
storage = Storage(credentials_file)
storage.put(self.credentials)
self.credentials.set_store(storage)
except CredentialsFileSymbolicLinkError:
raise InvalidCredentialsError('Credentials file cannot be symbolic link')
def LoadClientConfig(self, backend=None):
"""Loads client configuration according to specified backend.
If you have any specific backend to load client configuration from in mind,
don't use this function and use the corresponding function you want.
:param backend: backend to load client configuration from.
:type backend: str.
:raises: InvalidConfigError
"""
if backend is None:
backend = self.settings.get('client_config_backend')
if backend is None:
raise InvalidConfigError('Please specify client config backend')
if backend == 'file':
self.LoadClientConfigFile()
elif backend == 'settings':
self.LoadClientConfigSettings()
else:
raise InvalidConfigError('Unknown client_config_backend')
def LoadClientConfigFile(self, client_config_file=None):
"""Loads client configuration file downloaded from APIs console.
Loads client config file from path in settings if not specified.
:param client_config_file: path of client config file to read.
:type client_config_file: str.
:raises: InvalidConfigError
"""
if client_config_file is None:
client_config_file = self.settings['client_config_file']
try:
client_type, client_info = clientsecrets.loadfile(client_config_file)
except clientsecrets.InvalidClientSecretsError as error:
raise InvalidConfigError('Invalid client secrets file %s' % error)
if not client_type in (clientsecrets.TYPE_WEB,
clientsecrets.TYPE_INSTALLED):
raise InvalidConfigError('Unknown client_type of client config file')
try:
config_index = ['client_id', 'client_secret', 'auth_uri', 'token_uri']
for config in config_index:
self.client_config[config] = client_info[config]
self.client_config['revoke_uri'] = client_info.get('revoke_uri')
self.client_config['redirect_uri'] = client_info['redirect_uris'][0]
except KeyError:
raise InvalidConfigError('Insufficient client config in file')
def LoadClientConfigSettings(self):
"""Loads client configuration from settings file.
:raises: InvalidConfigError
"""
for config in self.CLIENT_CONFIGS_LIST:
try:
self.client_config[config] = self.settings['client_config'][config]
except KeyError:
raise InvalidConfigError('Insufficient client config in settings')
def GetFlow(self):
"""Gets Flow object from client configuration.
:raises: InvalidConfigError
"""
if not all(config in self.client_config \
for config in self.CLIENT_CONFIGS_LIST):
self.LoadClientConfig()
constructor_kwargs = {
'redirect_uri': self.client_config['redirect_uri'],
'auth_uri': self.client_config['auth_uri'],
'token_uri': self.client_config['token_uri'],
}
if self.client_config['revoke_uri'] is not None:
constructor_kwargs['revoke_uri'] = self.client_config['revoke_uri']
self.flow = OAuth2WebServerFlow(
self.client_config['client_id'],
self.client_config['client_secret'],
scopes_to_string(self.settings['oauth_scope']),
**constructor_kwargs)
if self.settings.get('get_refresh_token'):
self.flow.params.update({
'access_type': 'offline',
'approval_prompt': 'force'
})
def Refresh(self):
"""Refreshes the access_token.
:raises: RefreshError
"""
if self.credentials is None:
raise RefreshError('No credential to refresh.')
if self.credentials.refresh_token is None:
raise RefreshError('No refresh_token found.'
'Please set access_type of OAuth to offline.')
if self.http is None:
self.http = httplib2.Http(timeout=self.http_timeout)
try:
self.credentials.refresh(self.http)
except AccessTokenRefreshError as error:
raise RefreshError('Access token refresh failed: %s' % error)
def GetAuthUrl(self):
"""Creates authentication url where user visits to grant access.
:returns: str -- Authentication url.
"""
if self.flow is None:
self.GetFlow()
return self.flow.step1_get_authorize_url()
def Auth(self, code):
"""Authenticate, authorize, and build service.
:param code: Code for authentication.
:type code: str.
:raises: AuthenticationError
"""
self.Authenticate(code)
self.Authorize()
def Authenticate(self, code):
"""Authenticates given authentication code back from user.
:param code: Code for authentication.
:type code: str.
:raises: AuthenticationError
"""
if self.flow is None:
self.GetFlow()
try:
self.credentials = self.flow.step2_exchange(code)
except FlowExchangeError as e:
raise AuthenticationError('OAuth2 code exchange failed: %s' % e)
print('Authentication successful.')
def Authorize(self):
"""Authorizes and builds service.
:raises: AuthenticationError
"""
if self.http is None:
self.http = httplib2.Http(timeout=self.http_timeout)
if self.access_token_expired:
raise AuthenticationError('No valid credentials provided to authorize')
self.http = self.credentials.authorize(self.http)
self.service = build('drive', 'v2', http=self.http)
|
smeggingsmegger/PyDrive
|
pydrive/auth.py
|
Python
|
apache-2.0
| 15,036
|
[
"VisIt"
] |
9e82eddb9f827dccea99b37577a0f39f4d4f5b5403b4f98f08c1cf3fc504d193
|
import numpy as np
import re
class PwxOutput:
def __init__(self, out):
self.outfile = out
def total_energy(self):
"""Parse output file to get total energy
Because this method should also work for relaxation
runs, in which a total energy is printed for every iteration,
we traverse the file bottom-up, and return the first total energy we
find.
"""
with open(self.outfile) as handle:
for line in handle.readlines().reverse():
words = line.split()
if words and words[0] == '!':
return words[4]
def fermi_energy(self):
"""Parse output file to get Fermi energy
Because this method should also work for relaxation
runs, in which the Fermi energy is printed for every iteration,
we traverse the file bottom-up, and return the first Fermi energy we
find.
"""
with open(self.outfile) as handle:
for line in handle.readlines().reverse():
words = line.split()
if len(words) == 6 and words[1] == 'Fermi':
return words[4]
class ScfOutput(PwxOutput):
def __init__(self, out):
super().__init__(out)
class RelaxOutput(PwxOutput):
""" A relaxation calculation with variable atomic positions
A structure relaxation where the cell geometry is kept fixed, and the atoms
are allowed to move. This is in essence a series of SCF calculations, and we
have the ability to get the 'relaxed atomic coordinates' from this.
"""
def __init__(self, out):
super().__init__(out)
def relaxed_coordinates(self):
""" Parse output file to get the relaxed coordinates
Split the file content at the 'Begin final coordinates' and "End final
coordinates' part, and extract the new coordinates.
"""
with open(self.outfile) as handle:
content = handle.read()
# Split content until we get the relevant section
before, relevant = content.split('Begin final coordinates', 1)
cruft, relevant = relevant.split('ATOMIC_POSITIONS (crystal)', 1)
relevant, after = relevant.split('End final coordinates', 1)
# Get coordinates in a list of string numbers
coordinates = relevant.splitlines()[0:]
# Convert them into arrays of floats
coordarrays = [np.array(list(map(float, x.split()[1:])))
for x in coordinates]
return coordarrays
class VcRelaxOutput(RelaxOutput):
""" Relaxation calculation with variable cell dimensions.
A special case of a structure relaxation, where both the atoms and the cell
geometry is allowed to vary. This class extends the usual relaxation
calculation, and has the added behavior that a 'relaxed lattice constant'
can be obtained.
"""
def __init__(self, out):
super().__init__(out)
def relaxed_alat(self):
""" Parse output file to get the relaxed coordinates
Split the file content at the 'Begin final coordinates' and "End final
coordinates' part, and extract the new lattice constant.
WARNING: We scale the lattice vectors such that the first lattice vector
is again (1.0, 0.0, 0.0). This means this routine will NOT work if the
first lattice vector is not along the x-direction!
"""
with open(self.outfile) as handle:
content = handle.read()
# Split content until we get the relevant section
before, relevant = content.split('Begin final coordinates', 1)
cruft, relevant = relevant.split('CELL_PARAMETERS', 1)
lines = relevant.splitlines()
alat = lines[0].split()[1] # Containts trailing ')' !
# Strip non-numeric characters from alat
non_decimal = re.compile(r'[^\d.]+')
alat = float(non_decimal.sub('', alat))
scale = float(lines[1].split()[0])
return alat*scale
class BandsOutput(PwxOutput):
def __init__(self, out, dat):
super().__init__(out)
self.dat = dat
class ParitiesOutput(PwxOutput):
def __init__(self, outputfiles):
self.outfiles = outputfiles
|
sroelants/quint
|
output.py
|
Python
|
gpl-2.0
| 4,245
|
[
"CRYSTAL"
] |
7b4e8b0b3a819cb9864a74c3d71393269cc38cf035c973a0bb8b78773d5cedd4
|
# EmphysemaViewer by Corine Slagboom & Noeska Smit
#
#
# Based on SkeletonAUIViewer:
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
# skeleton of an AUI-based viewer module
# copy and modify for your own purposes.
# set to False for 3D viewer, True for 2D image viewer
IMAGE_VIEWER = True
# import the frame, i.e. the wx window containing everything
import EmphysemaViewerFrame
# and do a reload, so that the GUI is also updated at reloads of this
# module.
reload(EmphysemaViewerFrame)
from module_kits.misc_kit import misc_utils
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin
from comedi_utils import CMSliceViewer
from comedi_utils import SyncSliceViewers
import module_utils
import os
import sys
import traceback
import vtk
import wx
class EmphysemaViewer(IntrospectModuleMixin, ModuleBase):
"""Module to visualize lungemphysema in a CT scan. A lung mask is also needed.
EmphysemaViewer consists of a volume rendering and two linked slice-based views; one with the original data and one with an emphysema overlay. The volume rendering shows 3
contours: the lungedges and 2 different contours of emphysema; a normal one and a severe one.
There are two ways of setting the emphysema values.
- The first way is choosing the 'default' values, which are literature-based. They are set on -950 HU (emphysema) and -970 HU (severe).
- The other way is a computational way: The lowest 11% values, that are present in the data are marked as emphysema, the lowest 8,5% values are marked as severe emphysema.
The theory behind this is the hypothesis that the histograms of emphysema patients differ from healthy people in a way that in emphysema patients there are relatively more
lower values present. In both ways you can finetune the values, or completely change them (if you want to).
After loading your image data and mask data, you can inspect the data and examine the severity of the emphysema of the patient.
Controls:
LMB: The left mouse button can be used to rotate objects in the 3D scene, or to poll Houndsfield Units in areas of interest (click and hold to see the values)\n
RMB: For the slice viewers, you can set the window and level values by clicking and holding the right mouse button in a slice and moving your mouse. You can see the current
window and level values in the bottom of the viewer. Outside of the slice, this zooms the camera in and out\n
MMB: The middle mouse button enables stepping through the slices if clicked and held in the center of the slice. When clicking on de edges of a slice, this re-orients the
entire slice. Outside of the slice, this pans the camera\n
Scrollwheel: The scrollwheel can be used for zooming in and out of a scene, but also for sliceviewing if used with the CTRL- or SHIFT-key\n
SHIFT: By holding the SHIFT-key, it is possible to use the mouse scrollwheel to scroll through the slices.\n
CTRL: Holding the CTRL-key does the same, but enables stepping through the data in steps of 10 slices.\n
"""
def __init__(self, module_manager):
"""Standard constructor. All DeVIDE modules have these, we do
the required setup actions.
"""
# we record the setting here, in case the user changes it
# during the lifetime of this model, leading to different
# states at init and shutdown.
self.IMAGE_VIEWER = IMAGE_VIEWER
# we need all this for our contours
self.mask_data = None
self.image_data = None
self.lungVolume = None
self.contour_severe_actor = vtk.vtkActor()
self.contour_moderate_actor = vtk.vtkActor()
self.contour_lungedge_actor = vtk.vtkActor()
self.severe_mapper = vtk.vtkPolyDataMapper()
self.severe_mapper.ScalarVisibilityOff()
self.moderate_mapper = vtk.vtkPolyDataMapper()
self.moderate_mapper.ScalarVisibilityOff()
self.lung_mapper = vtk.vtkPolyDataMapper()
self.lung_mapper.ScalarVisibilityOff()
self.contour_severe_actor.SetMapper(self.severe_mapper)
self.contour_severe_actor.GetProperty().SetColor(1,0,0)
self.contour_severe_actor.GetProperty().SetOpacity(0.5)
self.contour_moderate_actor.SetMapper(self.moderate_mapper)
self.contour_moderate_actor.GetProperty().SetColor(0.5,0,1)
self.contour_moderate_actor.GetProperty().SetOpacity(0.25)
self.contour_lungedge_actor.SetMapper(self.lung_mapper)
self.contour_lungedge_actor.GetProperty().SetColor(0.9,0.9,0.9)
self.contour_lungedge_actor.GetProperty().SetOpacity(0.1)
ModuleBase.__init__(self, module_manager)
# create the view frame
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
EmphysemaViewerFrame.EmphysemaViewerFrame)
# change the title to something more spectacular (or at least something non-default)
self._view_frame.SetTitle('EmphysemaViewer')
# create the necessary VTK objects: we only need a renderer,
# the RenderWindowInteractor in the view_frame has the rest.
self.ren = vtk.vtkRenderer()
self.ren.SetBackground(0.5,0.5,0.5)
self._view_frame.rwi.GetRenderWindow().AddRenderer(self.ren)
self.ren.AddActor(self.contour_severe_actor)
self.ren.AddActor(self.contour_moderate_actor)
self.ren.AddActor(self.contour_lungedge_actor)
self.ren2 = vtk.vtkRenderer()
self.ren2.SetBackground(0.5,0.5,0.5)
self._view_frame.overlay.GetRenderWindow().AddRenderer(self.ren2)
self.slice_viewer1 = CMSliceViewer(self._view_frame.overlay, self.ren2)
self.ren3 = vtk.vtkRenderer()
self.ren3.SetBackground(0.5,0.5,0.5)
self._view_frame.original.GetRenderWindow().AddRenderer(self.ren3)
self.slice_viewer2 = CMSliceViewer(self._view_frame.original, self.ren3)
self.slice_viewer3 = CMSliceViewer(self._view_frame.rwi, self.ren)
self.sync = SyncSliceViewers()
self.sync.add_slice_viewer(self.slice_viewer1)
self.sync.add_slice_viewer(self.slice_viewer2)
self.sync.add_slice_viewer2(self.slice_viewer3)
# hook up all event handlers
self._bind_events()
# anything you stuff into self._config will be saved
self._config.last_used_dir = ''
# make our window appear (this is a viewer after all)
self.view()
# all modules should toggle this once they have shown their
# views.
self.view_initialised = True
# apply config information to underlying logic
self.sync_module_logic_with_config()
# then bring it all the way up again to the view
self.sync_module_view_with_logic()
def close(self):
"""Clean-up method called on all DeVIDE modules when they are
deleted.
FIXME: Still get a nasty X error :(
"""
# with this complicated de-init, we make sure that VTK is
# properly taken care of
self.ren.RemoveAllViewProps()
self.ren2.RemoveAllViewProps()
self.ren3.RemoveAllViewProps()
# this finalize makes sure we don't get any strange X
# errors when we kill the module.
self.slice_viewer1.close()
self.slice_viewer2.close()
self.slice_viewer3.close()
self._view_frame.rwi.GetRenderWindow().Finalize()
self._view_frame.rwi.SetRenderWindow(None)
self._view_frame.overlay.GetRenderWindow().Finalize()
self._view_frame.overlay.SetRenderWindow(None)
self._view_frame.original.GetRenderWindow().Finalize()
self._view_frame.original.SetRenderWindow(None)
del self._view_frame.rwi
del self._view_frame.overlay
del self._view_frame.original
del self.slice_viewer3
del self.slice_viewer2
del self.slice_viewer1
# done with VTK de-init
# now take care of the wx window
self._view_frame.close()
# then shutdown our introspection mixin
IntrospectModuleMixin.close(self)
def get_input_descriptions(self):
# define this as a tuple of input descriptions if you want to
# take input data e.g. return ('vtkPolyData', 'my kind of
# data')
return ()
def get_output_descriptions(self):
# define this as a tuple of output descriptions if you want to
# generate output data.
return ()
def set_input(self, idx, input_stream):
# this gets called right before you get executed. take the
# input_stream and store it so that it's available during
# execute_module()
pass
def get_output(self, idx):
# this can get called at any time when a consumer module wants
# you output data.
pass
def execute_module(self):
# when it's you turn to execute as part of a network
# execution, this gets called.
pass
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def config_to_view(self):
pass
def view_to_config(self):
pass
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
# because we have an RWI involved, we have to do this
# SafeYield, so that the window does actually appear before we
# call the render. If we don't do this, we get an initial
# empty renderwindow.
wx.SafeYield()
self.render()
def create_volumerender(self, contourValueModerate, contourValueSevere):
"""Creates a volumerender of the masked data using iso-contour surfaces
created by the Marching Cubes algorithm at the specified contourvalues.
"""
self._view_frame.SetStatusText("Creating Volumerender...")
self.image_data
mask = vtk.vtkImageMask()
severeFraction = 0.10
moderateFraction = 0.12
# We only want to contour the lungs, so mask it
mask.SetMaskInput(self.mask_data)
mask.SetInput(self.image_data)
mask.Update()
self.lungVolume = mask.GetOutput()
if contourValueModerate == 0 and contourValueSevere == 0: # This means we get to calculate the percentual values ourselves!
scalars = self.lungVolume.GetScalarRange()
range = scalars[1]-scalars[0]
contourValueSevere = scalars[0]+range*severeFraction
contourValueModerate = scalars[0]+range*moderateFraction
self._view_frame.upper_slider.SetValue(contourValueModerate)
self._view_frame.lower_slider.SetValue(contourValueSevere)
self.create_overlay(contourValueModerate,contourValueSevere)
# Create the contours
self.adjust_contour(self.lungVolume, contourValueSevere, self.severe_mapper)
self.adjust_contour(self.lungVolume, contourValueModerate, self.moderate_mapper)
#self.adjust_contour(self.mask_data, 0.5, self.lung_mapper)
self.create_lungcontour()
# Set the camera to a nice view
cam = self.ren.GetActiveCamera()
cam.SetPosition(0,-100,0)
cam.SetFocalPoint(0,0,0)
cam.SetViewUp(0,0,1)
self.ren.ResetCamera()
self.render()
self._view_frame.SetStatusText("Created Volumerender")
def adjust_contour(self, volume, contourValue, mapper):
"""Adjust or create an isocontour using the Marching Cubes surface at the given
value using the given mapper
"""
self._view_frame.SetStatusText("Calculating new volumerender...")
contour = vtk.vtkMarchingCubes()
contour.SetValue(0,contourValue)
contour.SetInput(volume)
mapper.SetInput(contour.GetOutput())
mapper.Update()
self.render()
self._view_frame.SetStatusText("Calculated new volumerender")
def create_lungcontour(self):
"""Create a lungcontour using the Marching Cubes algorithm and smooth the surface
"""
self._view_frame.SetStatusText("Calculating lungcontour...")
contourLung = vtk.vtkMarchingCubes()
contourLung.SetValue(0,1)
contourLung.SetInput(self.mask_data)
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInput(contourLung.GetOutput())
smoother.BoundarySmoothingOn()
smoother.SetNumberOfIterations(40)
smoother.Update()
self.lung_mapper.SetInput(smoother.GetOutput())
self.lung_mapper.Update()
self._view_frame.SetStatusText("Calculated lungcontour")
def create_overlay(self, emphysemavalue, severeemphysemavalue):
"""Creates an overlay for the slice-based volume view
0: no emphysema
1: moderate emphysema
2: severe emphysema
"""
self._view_frame.SetStatusText("Creating Overlay...")
mask = vtk.vtkImageMask()
mask2 = vtk.vtkImageMask()
threshold = vtk.vtkImageThreshold()
threshold2 = vtk.vtkImageThreshold()
math=vtk.vtkImageMathematics()
mask.SetInput(self.image_data)
mask.SetMaskInput(self.mask_data)
threshold.SetInput(mask.GetOutput())
threshold.ThresholdByLower(emphysemavalue)
threshold.SetOutValue(0)
threshold.SetInValue(1)
threshold2.SetInput(mask.GetOutput())
threshold2.ThresholdByLower(severeemphysemavalue)
threshold2.SetOutValue(1)
threshold2.SetInValue(2)
math.SetOperationToMultiply()
math.SetInput1(threshold.GetOutput())
math.SetInput2(threshold2.GetOutput())
math.Update()
overlay = math.GetOutput()
self.slice_viewer1.set_overlay_input(None)
self.slice_viewer1.set_overlay_input(overlay)
self.render()
self._view_frame.SetStatusText("Created Overlay")
def load_data_from_file(self, file_path):
"""Loads scanvolume data from file. Also sets the volume as input for the sliceviewers
"""
self._view_frame.SetStatusText("Opening file: %s..." % (file_path))
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
reader = vtk.vtkMetaImageReader()
reader.SetFileName(file_path)
reader.Update()
self.image_data = reader.GetOutput()
self.slice_viewer1.set_input(self.image_data)
self.slice_viewer1.reset_camera()
self.slice_viewer1.render()
self.slice_viewer2.set_input(self.image_data)
self.slice_viewer2.reset_camera()
self.slice_viewer2.render()
self.slice_viewer3.set_input(self.image_data)
self.slice_viewer3.render()
self.slice_viewer3.set_opacity(0.1)
cam = self.ren.GetActiveCamera()
cam.SetPosition(0,-100,0)
cam.SetFocalPoint(0,0,0)
cam.SetViewUp(0,0,1)
self.ren.ResetCamera()
if (self.mask_data) is not None: # We can start calculating the volumerender
self.create_volumerender(0,0)
else:
self._view_frame.SetStatusText("Opened file")
def load_mask_from_file(self, file_path):
"""Loads mask file
"""
self._view_frame.SetStatusText( "Opening mask: %s..." % (file_path))
filename = os.path.split(file_path)[1]
fileBaseName =os.path.splitext(filename)[0]
reader = vtk.vtkMetaImageReader()
reader.SetFileName(file_path)
reader.Update()
self.mask_data = reader.GetOutput()
if (self.image_data) is not None:
self.create_volumerender(0,0)
else:
self._view_frame.SetStatusText("Opened mask file")
def save_to_file(self, file_path):
"""Save data from main renderwindow (the contour one) to a PNG-file
"""
w2i = vtk.vtkWindowToImageFilter()
w2i.SetInput(self._view_frame.rwi.GetRenderWindow());
w2i.Update()
writer = vtk.vtkPNGWriter()
writer.SetInput(w2i.GetOutput())
writer.SetFileName(file_path)
writer.Update()
result = writer.Write()
if result == 0:
self._view_frame.SetStatusText( "Saved file")
else:
self._view_frame.SetStatusText( "Saved file to: %s..." % (file_path))
def _bind_events(self):
"""Bind wx events to Python callable object event handlers.
"""
vf = self._view_frame
vf.Bind(wx.EVT_MENU, self._handler_file_open,
id = vf.id_file_open)
vf.Bind(wx.EVT_MENU, self._handler_mask_open,
id = vf.id_mask_open)
vf.Bind(wx.EVT_MENU, self._handler_file_save,
id = vf.id_mask_save)
self._view_frame.button1.Bind(wx.EVT_BUTTON,
self._handler_button1)
self._view_frame.button2.Bind(wx.EVT_BUTTON,
self._handler_button2)
self._view_frame.button3.Bind(wx.EVT_BUTTON,
self._handler_button3)
self._view_frame.button4.Bind(wx.EVT_BUTTON,
self._handler_button4)
self._view_frame.button5.Bind(wx.EVT_BUTTON,
self._handler_button5)
self._view_frame.button6.Bind(wx.EVT_BUTTON,
self._handler_button6)
self._view_frame.upper_slider.Bind(wx.EVT_SCROLL_CHANGED, self._handler_slider1)
self._view_frame.lower_slider.Bind(wx.EVT_SCROLL_CHANGED, self._handler_slider2)
def _handler_button1(self, event):
"""Reset the camera of the main render window
"""
self.ren.ResetCamera()
self.render()
def _handler_button2(self, event):
"""Reset all for the main render window
"""
cam = self.ren.GetActiveCamera()
cam.SetPosition(0,-100,0)
cam.SetFocalPoint(0,0,0)
cam.SetViewUp(0,0,1)
self.ren.ResetCamera()
self.render()
def _handler_button3(self, event):
"""Reset the camera for the sliceviewers
"""
self.slice_viewer1.reset_camera()
self.slice_viewer2.reset_camera()
self.render()
def _handler_button4(self, event):
"""Reset all for the sliceviewers
"""
self.slice_viewer1.reset_to_default_view(2)
self.slice_viewer2.reset_to_default_view(2)
orientations = [2, 0, 1]
for i, ipw in enumerate(self.slice_viewer1.ipws):
ipw.SetPlaneOrientation(orientations[i]) # axial
ipw.SetSliceIndex(0)
self.render()
for i, ipw in enumerate(self.slice_viewer2.ipws):
ipw.SetPlaneOrientation(orientations[i]) # axial
ipw.SetSliceIndex(0)
self.render()
def _handler_button5(self, event):
"""Adjust the contourvalues to values recommended in literature
"""
if self.lungVolume == None:
return
else:
self._view_frame.upper_slider.SetValue(-950)
self._view_frame.lower_slider.SetValue(-970)
self.adjust_contour(self.lungVolume, -950, self.moderate_mapper)
self.adjust_contour(self.lungVolume, -970, self.severe_mapper)
self.create_overlay(-950,-970)
def _handler_button6(self, event):
"""Adjust the contourvalues to values calculated from data
"""
if self.lungVolume == None:
return
else:
self.create_volumerender(0, 0)
def _handler_file_open(self, event):
"""Handler for file opening
"""
filters = 'Volume files (*.mhd)|*.mhd;'
dlg = wx.FileDialog(self._view_frame, "Please choose a CT-thorax file", self._config.last_used_dir, "", filters, wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
full_file_path = "%s/%s" % (self._config.last_used_dir, filename)
self.load_data_from_file(full_file_path)
dlg.Destroy()
def _handler_mask_open(self, event):
"""Handler for mask opening
"""
filters = 'Mask files (*.mhd;#.mha)|*.mhd;*mha;'
dlg = wx.FileDialog(self._view_frame, "Please choose a CT-thorax mask file", self._config.last_used_dir, "", filters, wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
full_file_path = "%s/%s" % (self._config.last_used_dir, filename)
self.load_mask_from_file(full_file_path)
dlg.Destroy()
def _handler_file_save(self, event):
"""Handler for filesaving
"""
self._view_frame.SetStatusText( "Saving file...")
filters = 'png file (*.png)|*.png'
dlg = wx.FileDialog(self._view_frame, "Choose a destination", self._config.last_used_dir, "", filters, wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
filename=dlg.GetFilename()
self._config.last_used_dir=dlg.GetDirectory()
file_path = "%s/%s" % (self._config.last_used_dir, filename)
self.save_to_file(file_path)
dlg.Destroy()
self._view_frame.SetStatusText( "Saved file")
def _handler_slider1(self, event):
"""Handler for slider adjustment (Severe emphysema)
"""
if self.lungVolume == None:
return
else:
contourValue = self._view_frame.upper_slider.GetValue()
self.adjust_contour(self.lungVolume, contourValue, self.moderate_mapper)
self.create_overlay(contourValue, self._view_frame.lower_slider.GetValue())
def _handler_slider2(self, event):
"""Handler for slider adjustment (Moderate emphysema)
"""
if self.lungVolume == None:
return
else:
contourValue = self._view_frame.lower_slider.GetValue()
self.adjust_contour(self.lungVolume, contourValue, self.severe_mapper)
self.create_overlay(self._view_frame.upper_slider.GetValue(),contourValue)
def render(self):
"""Method that calls Render() on the embedded RenderWindow.
Use this after having made changes to the scene.
"""
self._view_frame.render()
self.slice_viewer1.render()
|
nagyistoce/devide
|
modules/user/EmphysemaViewer/EmphysemaViewer.py
|
Python
|
bsd-3-clause
| 22,332
|
[
"VTK"
] |
54e37fdf8e8b7fb2c962e5e71502ea91ba6de28f6aa4f1e6fe1b1a8dafeabfb1
|
#!/usr/bin/env python
"""
Copyright 2016 Brian Quach
Licensed under MIT (https://github.com/brianquach/udacity-nano-fullstack-conference/blob/master/LICENSE) # noqa
"""
import endpoints
import json
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.ext import ndb
from api import FiveCardPokerAPI
from enum import HandState
from game import Card
from model import Game
from model import Hand
from model import User
from utility import get_by_urlsafe
class SendMoveEmail(webapp2.RequestHandler):
def post(self):
"""Send an email to a User that it is their turn."""
user = get_by_urlsafe(self.request.get('user_key'), User)
game = get_by_urlsafe(self.request.get('game_key'), Game)
player_hand = Hand.query(
ndb.AND(
Hand.player == user.key,
Hand.game == game.key,
Hand.state == HandState.STARTING.name
)
).get()
if not player_hand:
raise endpoints.NotFoundException(
'Hand not found for player key {0} and game key {1}'.format(
user.key, game.key
)
)
hand = json.loads(player_hand.hand)
cards = [Card(name=card['name'], suit=card['suit']) for card in hand]
hand_information = ''
for card in cards:
hand_information += 'Card: {0}\nCard Id: {1}\n\n'.format(
repr(card),
card.id
)
subject = 'Your Turn!'
body = '''
Hi {0}!
It's your turn current turn to play five card poker! Choose the cards you want
to replace, if any, and respond to us. After your move, we will reveal your new
hand. After each player makes their move, the game will notify each player the
winner by email. May the player with the best hand win!
The game key is:
{2}
Here is your hand:
{1}
Notice, below each listed card is a "Card Id"; this is what you will use to
identify to the server which cards you want to exchange when you make your next
move to the server.
'''.format(user.name, hand_information, game.key.urlsafe())
print body
mail.send_mail(
'noreply@{}.appspotmail.com'.format(
app_identity.get_application_id()
),
user.email,
subject,
body
)
class SendGameResultEmail(webapp2.RequestHandler):
def post(self):
"""Send an email to the players to notify them the game results."""
game = get_by_urlsafe(self.request.get('game_key'), Game)
player_one_hand = Hand.query(
ndb.AND(
Hand.player == game.player_one,
Hand.game == game.key,
Hand.state == HandState.ENDING.name
)
).get()
if not player_one_hand:
raise endpoints.NotFoundException(
'Hand not found for player key {0} and game key {1}'.format(
game.player_one, game.key
)
)
player_two_hand = Hand.query(
ndb.AND(
Hand.player == game.player_two,
Hand.game == game.key,
Hand.state == HandState.ENDING.name
)
).get()
if not player_two_hand:
raise endpoints.NotFoundException(
'Hand not found for player key {0} and game key {1}'.format(
game.player_two, game.key
)
)
player_one = game.player_one.get()
hand = json.loads(player_one_hand.hand)
cards = [Card(name=card['name'], suit=card['suit']) for card in hand]
p1_hand_information = ''
for card in cards:
p1_hand_information += 'Card: {0}\n'.format(repr(card))
player_two = game.player_two.get()
hand = json.loads(player_two_hand.hand)
cards = [Card(name=card['name'], suit=card['suit']) for card in hand]
p2_hand_information = ''
for card in cards:
p2_hand_information += 'Card: {0}\n'.format(repr(card))
subject = 'It''s a tie!'
if game.winner == game.player_one:
subject = '{0} Wins'.format(player_one.name)
elif game.winner == game.player_two:
subject = '{0} Wins!'.format(player_two.name)
body = '''
Game finished! {0}
{1}'s hand:
{2}
{3}'s hand:
{4}
'''.format(
subject,
player_one.name,
p1_hand_information,
player_two.name,
p2_hand_information
)
print body
mail.send_mail(
'noreply@{}.appspotmail.com'.format(
app_identity.get_application_id()
),
player_one.email,
subject,
body
)
mail.send_mail(
'noreply@{}.appspotmail.com'.format(
app_identity.get_application_id()
),
player_two.email,
subject,
body
)
class SendPlayerForfeitEmail(webapp2.RequestHandler):
def post(self):
"""Send an email to a player to nofity an opponent forfeit."""
game_websafe_url = self.request.get('game_key')
winner = get_by_urlsafe(self.request.get('winner_key'), User)
loser_name = self.request.get('loser_name')
subject = '{0} has forfeit the game!'.format(loser_name)
body = '''Hi {2},
Your opponent {0} for game {1} has forfeited. You are the winner!
'''.format(
loser_name,
game_websafe_url,
winner.name
)
print body
mail.send_mail(
'noreply@{}.appspotmail.com'.format(
app_identity.get_application_id()
),
winner.email,
subject,
body
)
class SendReminderEmail(webapp2.RequestHandler):
def get(self):
"""Send a reminder email to users with a game in progress."""
players = User.query(User.email != None) # noqa
for player in players:
games = Game.query(
ndb.AND(
Game.game_over == False, # noqa
Game.active_player == player.key
)
)
game_keys = ', '.join(game.key.urlsafe() for game in games)
number_of_games = games.count()
if number_of_games > 0:
subject = 'This is a reminder!'
body = '''Hey {0}, you have {1} games in progress. It is your
turn to make a move in these games! Their url safe keys are: {2}'''.format(
player.name,
number_of_games,
game_keys
)
print body
mail.send_mail(
'noreply@{}.appspotmail.com'.format(
app_identity.get_application_id()
),
player.email,
subject,
body
)
app = webapp2.WSGIApplication(
[
('/tasks/send_move_email', SendMoveEmail),
('/tasks/send_game_result_email', SendGameResultEmail),
('/tasks/send_player_forfeit_email', SendPlayerForfeitEmail),
('/crons/send_reminder', SendReminderEmail)
],
debug=True
)
|
brianquach/udacity-nano-fullstack-game
|
main.py
|
Python
|
mit
| 7,415
|
[
"Brian"
] |
2c4f3bd75260413ab7197a3b78fd47beb15e85a3ad727fe035563f12ec868bf1
|
# coding=utf-8
import logging
import threading
import time
from octopus import err
from octopus import constant
from octopus.service.selector._base import BaseSelector
from octopus.util import tools
log = logging.getLogger(constant.LOGGER_NAME)
class BaseClient(object):
def __init__(self, thrift_mod, selector, **options):
self._selector = selector
""":type: BaseSelector"""
self._thrift_mod = thrift_mod
""":type: """
self._raise_error = options.pop('raise_error', False)
""":type: bool"""
self._timeout = options.pop('timeout', None)
""":type: bool"""
def __del__(self):
# TODO close all connection
pass
def __getattr__(self, name):
def _(*args, **kwargs):
return self.call(name, *args, **kwargs)
return _
def call(self, func_name, *args, **kwargs):
ret = None
while True:
try:
self._call(func_name, *args, **kwargs)
except err.OctpServiceUnavailable:
continue
except err.OctpError as e:
log.warn('Call func(%s) encounter ERROR: %s', e)
raise e
return ret
def _deal_unavailable_service(self, service):
raise NotImplementedError()
def _call(self, func_name, *args, **kwargs):
pass
def _call_log(self, service, func_name, result, cost, in_param, out_param):
"""
:param resullt:
:param service:
:type service: Service
:return:
"""
info = {
'server': service.service_name,
'thread': threading.currentThread(),
'caller': 'octopus.service.thrift_client',
'type': 'RPC-CALL', # caller
'func': func_name,
'result': result,
'time': tools.human_time(time.time()),
'cost': tools.human_time(cost),
'in': in_param,
'out': out_param,
'tag': 'octopus,rpc-call,thrift',
# addition fields
'service_addr': service.addr,
}
info_list = ('{key}={value}'.format(key=key, value=value) for key, value in info.iteritems())
out_str = '|'.join(info_list)
log.info(out_str)
def _service_timeout(self, service):
"""
Get timeout which is the time call service.
self._timeout FIRST, then service.timeout.
:param service:
:type service: Service
:return:
"""
if self._timeout is not None:
return self._timeout
else:
return service.timeout
def _get_service(self):
"""
Get one service or else raise err.OctpServiceAllFault.
:return:
:rtype: service.Service
:raise: err.OctpServiceAllFault
"""
service = self._selector.get_service(0) # Don't wait
if service is None:
raise err.OctpServiceAllFault('Not one service is available!')
return service
|
ideascf/octopus
|
service/client/_base.py
|
Python
|
mit
| 3,049
|
[
"Octopus"
] |
778108edb1a10d5d39d4f0a491f6586e834642a7b48350a0f8dde4df7fa1be7c
|
"""
Generate a toy dataset for the matrix tri-factorisation case, and store it.
We use dimensions 100 by 50 for the dataset, 10 row clusters, and 5 column clusters.
As the prior for F, G we take value 1 for all entries (so exp 1), and for S value 2 (so exp 1/2).
As a result, each value in R has a value of around 25, and a variance of .
For contrast, the Sanger dataset is as follows:
Shape: (622,139). Fraction observed: 0.811307224317.
Mean: 11.9726909789. Variance: 34.1503768785. Maximum: 23.5959612058.
We add Gaussian noise of precision tau = 1 (prior for gamma: alpha=1,beta=1).
(Simply using the expectation of our Gamma distribution over tau)
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
from BNMTF.code.models.distributions.exponential import exponential_draw
from BNMTF.code.models.distributions.normal import normal_draw
from BNMTF.code.cross_validation.mask import generate_M
import numpy, itertools, matplotlib.pyplot as plt
def generate_dataset(I,J,K,L,lambdaF,lambdaS,lambdaG,tau):
# Generate U, V
F = numpy.zeros((I,K))
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
F[i,k] = exponential_draw(lambdaF[i,k])
S = numpy.zeros((K,L))
for k,l in itertools.product(xrange(0,K),xrange(0,L)):
S[k,l] = exponential_draw(lambdaS[k,l])
G = numpy.zeros((J,L))
for j,l in itertools.product(xrange(0,J),xrange(0,L)):
G[j,l] = exponential_draw(lambdaG[j,l])
# Generate R
true_R = numpy.dot(F,numpy.dot(S,G.T))
R = add_noise(true_R,tau)
return (F,S,G,tau,true_R,R)
def add_noise(true_R,tau):
if numpy.isinf(tau):
return numpy.copy(true_R)
(I,J) = true_R.shape
R = numpy.zeros((I,J))
for i,j in itertools.product(xrange(0,I),xrange(0,J)):
R[i,j] = normal_draw(true_R[i,j],tau)
return R
def try_generate_M(I,J,fraction_unknown,attempts):
for attempt in range(1,attempts+1):
try:
M = generate_M(I,J,fraction_unknown)
sums_columns = M.sum(axis=0)
sums_rows = M.sum(axis=1)
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobserved row in M, row %s. Fraction %s." % (i,fraction_unknown)
for j,c in enumerate(sums_columns):
assert c != 0, "Fully unobserved column in M, column %s. Fraction %s." % (j,fraction_unknown)
print "Took %s attempts to generate M." % attempt
return M
except AssertionError:
pass
raise Exception("Tried to generate M %s times, with I=%s, J=%s, fraction=%s, but failed." % (attempts,I,J,fraction_unknown))
##########
if __name__ == "__main__":
output_folder = project_location+"BNMTF/data_toy/bnmtf/"
I,J,K,L = 100, 80, 5, 5
fraction_unknown = 0.1
alpha, beta = 1., 1.
lambdaF = numpy.ones((I,K))
lambdaS = numpy.ones((K,L))
lambdaG = numpy.ones((J,L))
tau = alpha / beta
(F,S,G,tau,true_R,R) = generate_dataset(I,J,K,L,lambdaF,lambdaS,lambdaG,tau)
# Try to generate M
M = try_generate_M(I,J,fraction_unknown,attempts=1000)
# Store all matrices in text files
numpy.savetxt(open(output_folder+"F.txt",'w'),F)
numpy.savetxt(open(output_folder+"S.txt",'w'),S)
numpy.savetxt(open(output_folder+"G.txt",'w'),G)
numpy.savetxt(open(output_folder+"R_true.txt",'w'),true_R)
numpy.savetxt(open(output_folder+"R.txt",'w'),R)
numpy.savetxt(open(output_folder+"M.txt",'w'),M)
print "Mean R: %s. Variance R: %s. Min R: %s. Max R: %s." % (numpy.mean(R),numpy.var(R),R.min(),R.max())
fig = plt.figure()
plt.hist(R.flatten(),bins=range(0,int(R.max())+1))
plt.show()
|
ThomasBrouwer/BNMTF
|
data_toy/bnmtf/generate_bnmtf.py
|
Python
|
apache-2.0
| 3,776
|
[
"Gaussian"
] |
e039cda0a270c0fa59cb7cd8f6992b8110b5b3c070f9f1dea37c52be558ecead
|
"""
`cirrina` - Opinionated web framework
Simple cirrina server example.
:license: LGPL, see LICENSE for details
"""
import logging
import sys
import cirrina
from aiohttp import web
#: Holds the logger for the current example
logger = logging.getLogger(__name__)
#: Create cirrina app.
app = cirrina.Server()
app.enable_rpc('/jrpc')
wspath = '/ws'
@app.auth_handler
async def auth_handler(request, username, password):
# Example user and password
if username == 'admin' and password == 'admin':
return True
return False
@app.auth_unauthorized
async def auth_unauthorized(request):
response = web.Response(status=302)
response.headers['Location'] = '/login.html'
return response
@app.websocket_connect()
async def websocket_connected(wsclient):
username = wsclient.cirrina.web_session['username']
logger.info("websocket: new authenticated connection, user: %s", username)
@app.websocket_message(location=wspath)
async def websocket_message(wsclient, msg):
logger.info("websocket: got message: '%s'", msg)
await app.websocket_broadcast(msg)
@app.websocket_disconnect()
async def websocket_closed(wsclient):
logger.info('websocket connection closed')
@app.http_get('/')
@app.authenticated
async def default(request):
"""
---
description: This is the default page
tags:
- Defaulty Default
produces:
- text/html
responses:
"200":
description: successful operation.
"405":
description: invalid HTTP Method
"""
visit_count = 0
if 'visit_count' in request.cirrina.web_session:
visit_count = request.cirrina.web_session['visit_count']
request.cirrina.web_session['visit_count'] = visit_count + 1
html = '''<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<script type="text/javascript" src="cirrina.js"></script>
<script type="text/javascript">
var cirrina = new Cirrina('%s');
cirrina.onopen = function(ws)
{
log("websocket connected" );
sendmessage("Hello !");
}
cirrina.onmessage = function (ws, msg)
{
log("server: " + msg );
}
cirrina.onclose = function()
{
log("websocket disconnected");
}
function log(msg)
{
textbox = document.getElementById("websocket");
textbox.innerHTML += msg + "<br/>";
textbox.scrollTop = textbox.scrollHeight;
}
function sendmessage( msg )
{
log("client: " + msg );
cirrina.send( msg );
document.getElementById('text').value = "";
document.getElementById('text').focus();
}
function upload()
{
var form = document.getElementById('upload_form');
var form_data = new FormData(form);
var http = new XMLHttpRequest();
http.open('POST', '/upload', true);
http.addEventListener('load', function(event) {
if (http.status >= 200 && http.status < 300) {
console.log('file uploaded');
} else {
alert('Upload failed !');
}
});
if(http.upload) {
http.upload.onprogress = function(e) {
var done = e.position || e.loaded, total = e.totalSize || e.total;
console.log('upload progress: ' + done + ' / ' + total + ' = ' + (Math.floor(done/total*1000)/10) + '%%');
};
}
http.send(form_data);
}
</script>
</head>
<body>
<h1>Cirrina Example</h1>
Page Visit Count: %d <br/>
<h2>File Upload Example</h2>
<form id="upload_form" action="/upload" method="post" accept-charset="utf-8" enctype="multipart/form-data">
<label for="file">Select File: </label>
<input id="file" name="file" type="file" value=""/><br/>
<button type="button" onclick="upload();">Upload</button>
</form>
<h2>Websocket Example</h2>
<div id="websocket" style="width: 500px; border: 2px solid; padding: 15px; height: 150px; overflow-x: auto;"></div>
<input type="text" id="text">
<input type='button' value='Send' onclick="sendmessage(document.getElementById('text').value);">
</body>
</html>
''' % (wspath, visit_count)
resp = web.Response(text=html, content_type="text/html")
return resp
@app.jrpc
async def hello(request, session, msg, n, debug=False):
logger.info("jrpc hello called: %s - %d, debug: %d", msg, n, debug)
visit_count = session['visit_count'] if 'visit_count' in session else 1
session['visit_count'] = visit_count + 1
app.websocket_broadcast(msg)
return {"status": msg, 'visit_count': visit_count - 1}
@app.startup
def onstart():
logger.info("starting up...")
@app.shutdown
def onstop():
logger.info("shutting down...")
@app.http_upload('/upload', upload_dir="upload/")
async def file_upload(request, session, upload_die, filename):
return web.Response(text='file uploaded: {}'.format(filename))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
port = 8765
if len(sys.argv) > 1:
port = int(sys.argv[1])
app.http_static("/", 'static/')
app.run('0.0.0.0', port, debug=True)
|
neolynx/cirrina
|
examples/basic/server.py
|
Python
|
lgpl-3.0
| 4,986
|
[
"VisIt"
] |
a7cd129ac9c7e2c25b2191fb5ca4294eabc3330d573e257b3782183f2a2a563b
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
# Author: Hackathon
# Description: I find the code in https://djangosnippets.org/snippets/3046/.
You can visit that site for more inforation.
bug: change mimetype to content_type for django1.7
"""
import datetime
from django.db.models.query import QuerySet, ValuesQuerySet
from django.http import HttpResponse
class ExcelResponse(HttpResponse):
def __init__(self, data, output_name='excel_data', headers=None,
force_csv=False, encoding='utf8'):
# Make sure we've got the right type of data to work with
valid_data = False
if isinstance(data, ValuesQuerySet):
data = list(data)
elif isinstance(data, QuerySet):
data = list(data.values())
if hasattr(data, '__getitem__'):
if isinstance(data[0], dict):
if headers is None:
headers = data[0].keys()
data = [[row[col] for col in headers] for row in data]
data.insert(0, headers)
if hasattr(data[0], '__getitem__'):
valid_data = True
assert valid_data is True, "ExcelResponse requires a sequence of sequences"
import StringIO
output = StringIO.StringIO()
# Excel has a limit on number of rows; if we have more than that, make a csv
use_xls = False
if len(data) <= 65536 and force_csv is not True:
try:
import xlwt
except ImportError:
# xlwt doesn't exist; fall back to csv
pass
else:
use_xls = True
if use_xls:
book = xlwt.Workbook(encoding=encoding)
sheet = book.add_sheet('Sheet 1')
styles = {'datetime': xlwt.easyxf(num_format_str='yyyy-mm-dd hh:mm:ss'),
'date': xlwt.easyxf(num_format_str='yyyy-mm-dd'),
'time': xlwt.easyxf(num_format_str='hh:mm:ss'),
'default': xlwt.Style.default_style}
for rowx, row in enumerate(data):
for colx, value in enumerate(row):
if isinstance(value, datetime.datetime):
cell_style = styles['datetime']
elif isinstance(value, datetime.date):
cell_style = styles['date']
elif isinstance(value, datetime.time):
cell_style = styles['time']
else:
cell_style = styles['default']
sheet.write(rowx, colx, value, style=cell_style)
book.save(output)
content_type = 'application/vnd.ms-excel'
file_ext = 'xls'
else:
for row in data:
out_row = []
for value in row:
if not isinstance(value, basestring):
value = unicode(value)
value = value.encode(encoding)
out_row.append(value.replace('"', '""'))
output.write('"%s"\n' %
'","'.join(out_row))
content_type = 'text/csv'
file_ext = 'csv'
output.seek(0)
super(ExcelResponse, self).__init__(content=output.getvalue(),
content_type=content_type)
self['Content-Disposition'] = 'attachment;filename="%s.%s"' % \
(output_name.replace('"', '\"'), file_ext)
|
PegasusWang/Physics_web
|
physics/excel_response.py
|
Python
|
mit
| 3,534
|
[
"VisIt"
] |
d7748c01f54d147238a63abffdca3c893005544782e07857b14578e8106c17d8
|
"""Config flow for Elk-M1 Control integration."""
import asyncio
import logging
from urllib.parse import urlparse
import elkm1_lib as elkm1
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import (
CONF_ADDRESS,
CONF_HOST,
CONF_PASSWORD,
CONF_PROTOCOL,
CONF_TEMPERATURE_UNIT,
CONF_USERNAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.util import slugify
from . import async_wait_for_elk_to_sync
from .const import CONF_AUTO_CONFIGURE, CONF_PREFIX
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
PROTOCOL_MAP = {"secure": "elks://", "non-secure": "elk://", "serial": "serial://"}
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_PROTOCOL, default="secure"): vol.In(
["secure", "non-secure", "serial"]
),
vol.Required(CONF_ADDRESS): str,
vol.Optional(CONF_USERNAME, default=""): str,
vol.Optional(CONF_PASSWORD, default=""): str,
vol.Optional(CONF_PREFIX, default=""): str,
vol.Optional(CONF_TEMPERATURE_UNIT, default=TEMP_FAHRENHEIT): vol.In(
[TEMP_FAHRENHEIT, TEMP_CELSIUS]
),
}
)
VALIDATE_TIMEOUT = 35
async def validate_input(data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
userid = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
prefix = data[CONF_PREFIX]
url = _make_url_from_data(data)
requires_password = url.startswith("elks://")
if requires_password and (not userid or not password):
raise InvalidAuth
elk = elkm1.Elk(
{"url": url, "userid": userid, "password": password, "element_list": ["panel"]}
)
elk.connect()
if not await async_wait_for_elk_to_sync(elk, VALIDATE_TIMEOUT, url):
raise InvalidAuth
device_name = data[CONF_PREFIX] if data[CONF_PREFIX] else "ElkM1"
# Return info that you want to store in the config entry.
return {"title": device_name, CONF_HOST: url, CONF_PREFIX: slugify(prefix)}
def _make_url_from_data(data):
host = data.get(CONF_HOST)
if host:
return host
protocol = PROTOCOL_MAP[data[CONF_PROTOCOL]]
address = data[CONF_ADDRESS]
return f"{protocol}{address}"
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Elk-M1 Control."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the elkm1 config flow."""
self.importing = False
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if self._url_already_configured(_make_url_from_data(user_input)):
return self.async_abort(reason="address_already_configured")
try:
info = await validate_input(user_input)
except asyncio.TimeoutError:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(user_input[CONF_PREFIX])
self._abort_if_unique_id_configured()
if self.importing:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_create_entry(
title=info["title"],
data={
CONF_HOST: info[CONF_HOST],
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_AUTO_CONFIGURE: True,
CONF_TEMPERATURE_UNIT: user_input[CONF_TEMPERATURE_UNIT],
CONF_PREFIX: info[CONF_PREFIX],
},
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
self.importing = True
return await self.async_step_user(user_input)
def _url_already_configured(self, url):
"""See if we already have a elkm1 matching user input configured."""
existing_hosts = {
urlparse(entry.data[CONF_HOST]).hostname
for entry in self._async_current_entries()
}
return urlparse(url).hostname in existing_hosts
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
tboyce021/home-assistant
|
homeassistant/components/elkm1/config_flow.py
|
Python
|
apache-2.0
| 4,890
|
[
"Elk"
] |
2318a06e134eb007d3d1a69919b8860bc74e9c48b5c009c4e593777ec35150ce
|
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates the files featureconfig.h and featureconfig.c.
#
from __future__ import print_function
import time, string
import inspect, sys, os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..'))
import featuredefs
if len(sys.argv) != 4:
print("Usage: {} DEFFILE HPPFILE CPPFILE".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename, hfilename, cfilename = sys.argv[1:5]
print("Reading definitions from " + deffilename + "...")
defs = featuredefs.defs(deffilename)
print("Done.")
print("Writing " + hfilename + "...")
hfile = open(hfilename, 'w');
hfile.write("""/*
WARNING: This file was autogenerated by
%s on %s
Do not modify it or your changes will be overwritten!
Modify features.def instead.
*/
#ifndef _FEATURECONFIG_HPP
#define _FEATURECONFIG_HPP
#include <cmake_config.hpp>
#include "myconfig-final.hpp"
#undef CUDA
#undef FFTW
#undef H5MD
#undef SCAFACOS
#undef GSL
// these undefs need to match the externals in ../features.def
""" % (sys.argv[0], time.asctime()))
# guards for externals
hfile.write('/* Guards for externals */')
external_template = string.Template("""
// $feature is external
#if defined($feature)
#error $feature is external and can not be user-defined
#endif
""")
for feature in defs.externals:
hfile.write(external_template.substitute(feature=feature))
# Include definitions from CMake
hfile.write("""
/* Definitions from CMake */
#include <cmake_config.hpp>
""")
# handle implications
hfile.write('/* Handle implications */')
implication_template = string.Template("""
// $feature implies $implied
#if defined($feature) && !defined($implied)
#define $implied
#endif
""")
for feature, implied in defs.implications:
hfile.write(implication_template.substitute(feature=feature, implied=implied))
# output warnings if internal features are set manually
hfile.write('/* Warn when derived switches are specified manually */')
derivation_template = string.Template("""
// $feature equals $expr
#ifdef $feature
#warning $feature is a derived switch and should not be set manually!
#elif $cppexpr
#define $feature
#endif
""")
for feature, expr, cppexpr in defs.derivations:
hfile.write(derivation_template.substitute(feature=feature, cppexpr=cppexpr, expr=expr))
# write footer
# define external FEATURES and NUM_FEATURES
hfile.write("""
extern const char* FEATURES[];
extern const int NUM_FEATURES;
#endif /* of _FEATURECONFIG_HPP */""")
hfile.close()
print("Done.")
print("Writing " + cfilename + "...")
cfile = open(cfilename, 'w');
# handle requirements
cfile.write("""/*
WARNING: This file was autogenerated by
{script}
on
{date}
Do not modify it or your changes will be overwritten!
Modify features.def instead.
*/
/* config.hpp includes config-features.hpp and myconfig.hpp */
#include "config.hpp"
""".format(script=sys.argv[0], date=time.asctime()))
cfile.write('/* Handle requirements */')
requirement_string = """
// {feature} requires {expr}
#if defined({feature}) && !({cppexpr})
#error Feature {feature} requires {expr}
#endif
"""
for feature, expr, cppexpr in defs.requirements:
cfile.write(
requirement_string.format(
feature=feature, cppexpr=cppexpr, expr=expr))
cfile.write("""
/* Feature list */
const char* FEATURES[] = {
""")
feature_string = """
#ifdef {feature}
"{feature}",
#endif
"""
for feature in defs.externals.union(defs.features, defs.derived):
cfile.write(feature_string.format(feature=feature))
cfile.write("""
};
const int NUM_FEATURES = sizeof(FEATURES)/sizeof(char*);
""");
cfile.close()
print("Done.")
|
KonradBreitsprecher/espresso
|
src/core/gen_featureconfig.py
|
Python
|
gpl-3.0
| 4,430
|
[
"ESPResSo"
] |
2cb11611a41cc760151c0bcbbef4b8283d71becdb264aa7ca31786188c1ce885
|
import mdtraj as md
import os
import operator
import numpy as np
from functools import reduce
class TrajReader:
'''
def __init__(self, trajlistName, atomlistName, trajDir, trajExt, File_TOP):
self.trajlistName = trajlistName
self.atomlistName = atomlistName
self.trajDir = trajDir
self.trajExt = trajExt
self.File_TOP = File_TOP
self.homedir = self.get_homedir()
self.trajlist_list = self.get_trajlist(trajlistName, self.homedir)
self.atom_indices = self.get_atom_indices( atomlistName, self.homedir)
#self.framelist = self.get_framelist()
'''
def walk_dir(self,input_traj_dir, input_traj_ext,topdown=True):
frame_list = []
for root, dirs, files in os.walk(input_traj_dir, topdown):
for name in files:
if os.path.splitext(name)[1] == input_traj_ext:
frame_list.append( os.path.join( root, name ))
return frame_list
def get_trajlist(self,trajlist_filename, trajlist_dir):
trajlist_file = open( trajlist_filename )
#trajlist_dir = self.get_homedir()
trajlist_list = []
for line in trajlist_file:
list = trajlist_dir + '/' + line.rstrip("\n")
list = list.strip() # remove the spaces in the end line, thanks to Yang Xi for reporting this bug, Stephen 20141208
trajlist_list.append( list )
trajlist_file.close()
return trajlist_list
def get_homedir(self):
return os.getcwd()
def get_atom_indices(self, indices_filename, indices_dir):
if indices_filename != None:
atom_indices = np.loadtxt(indices_filename, dtype=np.int32).tolist()
return atom_indices
else:
return None
def get_framefile_list(self, trajlist_list):
framefile_list = []
#trajlist_list = self.trajlist_list
Ext = '.' + self.trajExt
for trajlist in trajlist_list:
framefile_list.extend(self.walk_dir(trajlist, Ext))
return framefile_list
class XTCReader(TrajReader):
def __init__(self, trajlistName, atomlistName, homedir, trajExt, File_TOP, nSubSample=None):
self.trajlistName = trajlistName
self.atomlistName = atomlistName
self.trajDir = homedir
self.trajExt = trajExt
self.File_TOP = File_TOP
self.homedir = homedir
self.nSubSample = nSubSample
#self.homedir = self.get_homedir()
self.trajlist_list = self.get_trajlist(trajlistName, self.homedir)
self.atom_indices = self.get_atom_indices( atomlistName, self.homedir)
self.framefile_list = self.get_framefile_list(self.trajlist_list)
self.trajs, self.traj_len = self.read_trajs(self.framefile_list)
def read_trajs(self, framelist):
trajs = []
traj_len = []
print("Reading trajs...")
for frame in framelist:
print('Reading: ', frame)
#traj = md.load(frame, top=self.File_TOP, atom_indices=self.atom_indices)
traj = md.load(frame, discard_overlapping_frames=True, top=self.File_TOP, #atom_indices=self.atom_indices,
stride=self.nSubSample)
#traj = traj[:-1] #remove last one
trajs.append(traj)
traj_len.append(len(traj))
len_trajs = len(trajs)
whole_trajs= reduce(operator.add, (trajs[i] for i in range(len_trajs)))
print("Done.")
print(len_trajs, "trajs,", len(whole_trajs), "frames.")
#print "debug output: len_trajs", len_trajs, "len_whole_trajs", len(whole_trajs)
return whole_trajs, traj_len
def get_phipsi(self, trajs, phi, psi):
#phi = [6, 8, 14, 16]
#psi = [4, 6, 8, 14]
PHI_INDICES = []
PSI_INDICES = []
for i in range(len(phi)):
PHI_INDICES.append(self.atom_indices.index(phi[i]))
PSI_INDICES.append(self.atom_indices.index(psi[i]))
#len_trajs = len(trajs)
print("PSI:", PSI_INDICES)
print("PHI:", PHI_INDICES)
phi_angles = md.compute_dihedrals(trajs, [PHI_INDICES]) * 180.0 / np.pi
psi_angles = md.compute_dihedrals(trajs, [PSI_INDICES]) * 180.0 / np.pi
#phi_psi=np.column_stack((phi_angles, psi_angles))
#return phi_psi
return phi_angles, psi_angles
class DCDReader(TrajReader):
def __init__(self, trajlistName, atomlistName, homedir, trajExt, File_TOP, nSubSample):
self.trajlistName = trajlistName
self.atomlistName = atomlistName
self.trajDir = homedir
self.trajExt = trajExt
self.File_TOP = File_TOP
self.homedir = homedir
self.nSubSample = nSubSample
#self.homedir = self.get_homedir()
self.trajlist_list = self.get_trajlist(trajlistName, self.homedir)
self.atom_indices = self.get_atom_indices( atomlistName, self.homedir)
def read_trajs(self, framelist):
#data = []
trajs = []
for frame in framelist:
#framedata = []
#print 'Reading: ', frame
traj = md.load_dcd(frame, self.File_TOP, stride=self.nSubSample)
trajs.append(traj)
return trajs
class AmberReader(TrajReader):
def __init__(self, trajlistName, atomlistName, homedir, trajExt, File_TOP, nSubSample):
self.trajlistName = trajlistName
self.atomlistName = atomlistName
self.trajDir = homedir
self.trajExt = trajExt
self.File_TOP = File_TOP
self.homedir = homedir
self.nSubSample = nSubSample
#self.homedir = self.get_homedir()
self.trajlist_list = self.get_trajlist(trajlistName, self.homedir)
self.atom_indices = self.get_atom_indices( atomlistName, self.homedir)
def read_trajs(self, framelist):
#data = []
trajs = []
for frame in framelist:
#framedata = []
print('Reading: ', frame)
traj = md.load_netcdf(frame, self.File_TOP, stride=self.nSubSample)
trajs.append(traj)
return trajs
class VectorReader(TrajReader):
def __init__(self, trajlistName, atomlistName=None, homedir='.', trajExt='txt', File_TOP=None, stride=None, framefile=None):
self.trajlistName = trajlistName
#self.atomlistName = atomlistName
self.trajDir = homedir
self.trajExt = trajExt
#self.File_TOP = File_TOP
self.homedir = homedir
self.stride = stride
#self.homedir = self.get_homedir()
if framefile is not None:
self.framefile_list = self.get_framefile(framefile)
else:
self.trajlist_list = self.get_trajlist(trajlistName, self.homedir)
self.framefile_list = self.get_framefile_list(self.trajlist_list)
#self.atom_indices = self.get_atom_indices( atomlistName, self.homedir)
self.trajs, self.traj_len = self.read_trajs(self.framefile_list)
def read_trajs(self, framelist):
#data = []
trajs = []
traj_len = []
for frame in framelist:
#framedata = []
print('Reading: ', frame)
traj = np.loadtxt(frame, dtype='float32')
#traj = traj[:-1] #remove last one
if self.stride is not None:
len_traj = len(traj)
traj = traj[0:len_traj:self.stride]
len_traj = len(traj)
trajs.extend(traj)
traj_len.append(len_traj)
print("Total Points:", len(trajs))
return np.asarray(trajs), traj_len
|
stephenliu1989/HK_DataMiner
|
hkdataminer/utils/reader_.py
|
Python
|
apache-2.0
| 7,594
|
[
"MDTraj"
] |
88c75df7025c33269a083bbb6bda4cb383ed2fccb9ab8f3c5a064bb9140ab198
|
#!/usr/bin/env python
"""
:Author: Martin Kircher
:Contact: mkircher@uw.edu
:Date: *13.04.2012
"""
import sys, os
from optparse import OptionParser
from collections import defaultdict
import pysam
parser = OptionParser("%prog [options]")
parser.add_option("-p","--prefix", dest="prefix", help="Prefix for output filenames (default Length)",default="Length")
parser.add_option("-l","--library", dest="library", help="Use library name from RG read header rather than the sample ID",default=False,action="store_true")
parser.add_option("--max_length", dest="max_length", help="Maximum length considered for the output (default 1000)",default=1000,type="int")
parser.add_option("--noRG", dest="noRG", help="Ignore read group information (output name = output prefix + .tsv )",default=False,action="store_true")
(options, args) = parser.parse_args()
if options.library: options.all=True
have_XP = False
rgroups = {}
for filename in args:
if os.path.exists(filename):
print "Reading %s..."%filename
cbamfile = pysam.Samfile(filename, "rb" )
id2lib = {}
if options.library and 'RG' in cbamfile.header:
for rgroup in cbamfile.header['RG']:
if 'LB' in rgroup and 'ID' in rgroup:
id2lib[rgroup['ID']] = rgroup['LB']
for read in cbamfile:
library,count = '',1
for (key,value) in read.tags:
if key == "RG":
if value in id2lib: library = id2lib[value]
else: library = value
elif key == "XP":
have_XP = True
count = value
if options.noRG: library = ''
if library not in rgroups: rgroups[library] = [defaultdict(int),defaultdict(int)]
if not read.is_paired:
rgroups[library][0][len(read.seq)]+=1
rgroups[library][1][len(read.seq)]+=count
elif read.is_read1:
length = min(options.max_length,abs(read.isize))
if length == 0: length = options.max_length
rgroups[library][0][length]+=1
rgroups[library][1][length]+=count
for library in rgroups:
if library != '': outfile = open("%s_%s.tsv"%(options.prefix.rstrip("_"),library),'w')
else: outfile = open("%s.tsv"%(options.prefix),'w')
if have_XP:
outfile.write('Length\tCounts\tInclDuplicates\n')
for length in range(min(rgroups[library][0].keys()),max(rgroups[library][0].keys())+1):
outfile.write("%d\t%d\t%d\n"%(length,rgroups[library][0][length],rgroups[library][1][length]))
else:
outfile.write('Length\tCounts\n')
for length in range(min(rgroups[library][0].keys()),max(rgroups[library][0].keys())+1):
outfile.write("%d\t%d\n"%(length,rgroups[library][0][length]))
outfile.close()
|
shendurelab/cfDNA
|
BAM_RG_Length.py
|
Python
|
mit
| 2,686
|
[
"pysam"
] |
6c80f8fea1bd25f3c857cc33931d6401ef416dcce040dc103fd65ef9294600a7
|
"""
Copyright (c) 2016 Gianluca Gerard
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Portions of the code are
Copyright (c) 2010--2015, Deep Learning Tutorials Development Team
All rights reserved.
"""
from __future__ import print_function, division
import timeit
import sys
import os
import matplotlib.pyplot as plt
import numpy
import theano
from theano import tensor
#from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
#from theano.compile.nanguardmode import NanGuardMode
from utils import get_minibatches_idx
from utils import load_n_preprocess_data
from rbm import RBM
from rbm import GRBM
from mlp import HiddenLayer
from MNIST import MNIST
class DBN(object):
"""Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
Originally from: http://deeplearning.net/tutorial/code/DBN.py
"""
def __init__(self, numpy_rng=None, theano_rng=None, n_ins=784,
gauss=True,
hidden_layers_sizes=[400], n_outs=40,
W_list=None, b_list=None):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the DBN
:type gauss: bool
:param gauss: True if the first layer is Gaussian otherwise
the first layer is Bernoullian
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type W_list: list of numpy.ndarray
:param W_list: the list of weigths matrixes for each layer of the MLP; if
None each matrix is randomly initialized
:type b_list: list of numpy.ndarray
:param b_list: the list of biases vectors for each layer of the MLP; if
None each vector is randomly initialized
"""
self.n_ins = n_ins
self.sigmoid_layers = []
self.rbm_layers = []
self.params = []
self.stacked_layers_sizes = hidden_layers_sizes + [n_outs]
self.n_layers = len(self.stacked_layers_sizes)
assert self.n_layers > 0
if numpy_rng is None:
numpy_rng = numpy.random.RandomState(123)
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
# the data is presented as rasterized images
self.x = tensor.matrix('x')
# The DBN is an MLP, for which all weights of intermediate
# layers are shared with a different RBM. We will first
# construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoidal layer we also construct an RBM
# that shares weights with that layer. During pretraining we
# will train these RBMs (which will lead to chainging the
# weights of the MLP as well).
for i in range(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden
# units of the layer below or the input size if we are on
# the first layer
if i == 0:
input_size = n_ins
else:
input_size = self.stacked_layers_sizes[i - 1]
# the input to this layer is either the activation of the
# hidden layer below or the input of the DBN if you are on
# the first layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
n_in = input_size
n_out= self.stacked_layers_sizes[i]
print('Adding a layer with %i input and %i outputs' %
(n_in, n_out))
if W_list is None:
W = numpy.asarray(numpy_rng.uniform(
low=-4.*numpy.sqrt(6. / (n_in + n_out)),
high=4.*numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),dtype=theano.config.floatX)
else:
W = W_list[i]
if b_list is None:
b = numpy.zeros((n_out,), dtype=theano.config.floatX)
else:
b = b_list[i]
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=n_in,
n_out=n_out,
W=theano.shared(W,name='W',borrow=True),
b=theano.shared(b,name='b',borrow=True),
activation=tensor.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question... but we are
# going to only declare that the parameters of the
# sigmoid_layers are parameters of the DBN. The visible
# biases in the RBM are parameters of those RBMs, but not
# of the DBN.
self.params.extend(sigmoid_layer.params)
# Construct an RBM that shared weights with this layer
if i==0 and gauss:
rbm_layer = GRBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=self.stacked_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
else:
rbm_layer = RBM(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=self.stacked_layers_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
self.rbm_layers.append(rbm_layer)
def number_of_nodes(self):
'''
Generate a list with the number of nodes in each layer
:return: list of int representing the nodes at each layer
'''
return [self.n_ins] + self.stacked_layers_sizes
def get_output(self, input, layer=-1):
'''
Return the output of the MLP layer of index layer when the network
is presented a set of samples input.
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type layer: int
:param layer: the index of the layer; if None it defaults to the
last layer of the network
:return: a theano.function object or None if the input is None
'''
if input is not None:
fn = theano.function(inputs=[],
outputs=self.sigmoid_layers[layer].output,
givens={
self.x: input
})
return fn()
else:
return None
def training_functions(self, train_set_x, batch_size, k,
lambda_1 = 0.0, lambda_2 = 0.1,
monitor=False):
'''Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the minibatch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the DBN
:type batch_size: int
:param batch_size: size of a [mini]batch
:type k: int
:param k: number of Gibbs steps to do in CD-k / PCD-k
:type lambda_1: float
:param lambda_1: parameter for tuning weigths updates in CD-k/PCD-k
of Bernoullian RBM
:type lambda_2: float
:param lambda_2: parameter for tuning weigths updates in CD-k/PCD-k
of Bernoullian RBM
:type monitor: bool
:param monitor: set to true to enable theano debugging Monitoring Mode;
default is false
'''
# index to a [mini]batch
indexes = tensor.lvector('indexes') # index to a minibatch
learning_rate = tensor.scalar('lr', dtype=theano.config.floatX) # learning rate to use
momentum = tensor.scalar('momentum', dtype=theano.config.floatX)
# TODO: deal with batch_size of 1
assert batch_size > 1
train_fns = []
free_energy_gap_fns = []
for i, rbm in enumerate(self.rbm_layers):
# get the cost and the updates list
# using CD-k here (persisent=None) for training each RBM.
# TODO: change cost function to reconstruction error
if isinstance(rbm, GRBM):
cost, updates = rbm.get_cost_updates(learning_rate,
lambda_1=lambda_1,
lambda_2 = lambda_2,
batch_size=batch_size,
persistent=None, k=k)
else:
cost, updates = rbm.get_cost_updates(learning_rate,
weightcost = 0.0002,
batch_size=batch_size,
persistent=None, k=k)
# compile the theano function
if monitor:
mode = theano.compile.MonitorMode(pre_func=self.inspect_inputs)
else:
mode = theano.config.mode
fn = theano.function(
inputs=[indexes, momentum, theano.In(learning_rate)],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[indexes],
rbm.momentum: momentum
},
mode = mode
# mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
)
# append `fn` to the list of functions
train_fns.append(fn)
train_sample = tensor.matrix('train_smaple', dtype=theano.config.floatX)
test_sample = tensor.matrix('validation_smaple', dtype=theano.config.floatX)
feg = rbm.free_energies(train_sample, test_sample)
# Obtain the input of layer i as the output of the previous
# layer
fn = theano.function(
inputs=[train_sample, test_sample],
outputs=feg,
mode=mode
)
free_energy_gap_fns.append(fn)
return train_fns, free_energy_gap_fns
def training(self, train_set_x,
batch_size, k,
pretraining_epochs, pretrain_lr,
lambda_1 = 0.0,
lambda_2 = 0.1,
validation_set_x=None,
monitor=False, graph_output=False):
'''
Run the DBN pretraining.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared var. that contains all datapoints used
for training the DBN
:type batch_size: int
:param batch_size: size of a [mini]batch
:type k: int
:param k: number of Gibbs steps to do in CD-k / PCD-k
:type pretraining_epochs: int
:param pretraining_epochs: number of epochs used for pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate
:type lambda_1: float
:param lambda_1: parameter for tuning weigths updates in CD-k/PCD-k
of Bernoullian RBM
:type lambda_2: float
:param lambda_2: parameter for tuning weigths updates in CD-k/PCD-k
of Bernoullian RBM
:type validation_set_x: theano.tensor.TensorType
:param validation_set_x: Shared var. that contains all datapoints used
for validating the DBN
:type monitor: bool
:param monitor: set to true to enable theano debugging Monitoring Mode;
default is false
:type graph_output: bool
:param graph_output: set to true to enable graphical output;
default is false
:return:
'''
print('... getting the pretraining functions')
print('Training set sample size %i' % train_set_x.get_value().shape[0])
if validation_set_x is not None:
print('Validation set sample size %i' % validation_set_x.get_value().shape[0])
training_fns, free_energy_gap_fns = self.training_functions(train_set_x=train_set_x,
batch_size=batch_size,
k=k,
lambda_1=lambda_1,
lambda_2=lambda_2,
monitor=monitor)
print('... pre-training the model')
start_time = timeit.default_timer()
# train layer-wise
if graph_output:
plt.ion()
n_data = train_set_x.get_value().shape[0]
if validation_set_x is not None:
t_set = train_set_x.get_value(borrow=True)
v_set = validation_set_x.get_value(borrow=True)
# early-stopping parameters
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
# go through this many
# minibatches before checking the network
# on the validation set; in this case we
# check every epoch
idx_minibatches, minibatches = get_minibatches_idx(n_data,
batch_size,
shuffle=True)
n_train_batches = idx_minibatches[-1] + 1
for i in range(self.n_layers):
if graph_output:
plt.figure(i+1)
if isinstance(self.rbm_layers[i], GRBM):
momentum = 0.0
else:
momentum = 0.6
# go through training epochs
best_cost = numpy.inf
epoch = 0
done_looping = False
patience = pretraining_epochs[i] # look as this many examples regardless
validation_frequency = min(20 * n_train_batches, patience // 2)
print('Validation frequency: %d' % validation_frequency)
while (epoch < pretraining_epochs[i]) and (not done_looping):
epoch = epoch + 1
idx_minibatches, minibatches = get_minibatches_idx(n_data,
batch_size,
shuffle=True)
# go through the training set
if not isinstance(self.rbm_layers[i], GRBM) and epoch == 6:
momentum = 0.9
for mb, minibatch in enumerate(minibatches):
current_cost = training_fns[i](indexes=minibatch,
momentum=momentum,
lr=pretrain_lr[i])
# iteration number
iter = (epoch - 1) * n_train_batches + mb
if (iter + 1) % validation_frequency == 0:
print('Pre-training cost (layer %i, epoch %d): ' % (i, epoch), end=' ')
print(current_cost)
# Plot the output
if graph_output:
plt.clf()
training_output = self.get_output(train_set_x, i)
plt.imshow(training_output, cmap='gray')
plt.axis('tight')
plt.title('epoch %d' % (epoch))
plt.draw()
plt.pause(1.0)
# if we got the best validation score until now
if current_cost < best_cost:
# improve patience if loss improvement is good enough
if (
current_cost < best_cost *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_cost = current_cost
best_iter = iter
if validation_set_x is not None:
# Compute the free energy gap
if i == 0:
input_t_set = t_set
input_v_set = v_set
else:
input_t_set = self.get_output(
t_set[range(v_set.shape[0])], i-1)
input_v_set = self.get_output(v_set, i-1)
free_energy_train, free_energy_test = free_energy_gap_fns[i](
input_t_set,
input_v_set)
free_energy_gap = free_energy_test.mean() - free_energy_train.mean()
print('Free energy gap (layer %i, epoch %i): ' % (i, epoch), end=' ')
print(free_energy_gap)
if patience <= iter:
done_looping = True
break
if graph_output:
plt.close()
end_time = timeit.default_timer()
print('The pretraining code for file ' + os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.), file=sys.stderr)
def MLP_output_from_datafile(self,
datafile,
holdout=0.0,
repeats=1,
clip=None,
transform_fn=None,
exponent=1.0,
datadir='data'):
train_set, validation_set = load_n_preprocess_data(datafile,
holdout=holdout,
clip=clip,
transform_fn=transform_fn,
exponent=exponent,
repeats=repeats,
shuffle=False,
datadir=datadir)
return (self.get_output(train_set), self.get_output(validation_set))
def inspect_inputs(self, i, node, fn):
'''
Helper function to inspect inputs of each node. For details see
http://deeplearning.net/software/theano/tutorial/debug_faq.html
:param i:
:param node:
:param fn:
:return: None
'''
print(i, node, "input(s) value(s):", [input[0] for input in fn.inputs],
end='\n')
def inspect_outputs(self, i, node, fn):
'''
Helper function to inspect outputs of each node. For details see
http://deeplearning.net/software/theano/tutorial/debug_faq.html
:param i:
:param node:
:param fn:
:return: None
'''
print(" output(s) value(s):", [output[0] for output in fn.outputs])
def train_top(batch_size, graph_output, joint_train_set, joint_val_set, rng):
top_DBN = DBN(numpy_rng=rng, n_ins=joint_train_set.get_value().shape[1],
gauss=False,
hidden_layers_sizes=[24],
n_outs=3)
top_DBN.training(joint_train_set,
batch_size, k=1,
pretraining_epochs=[800, 800],
pretrain_lr=[0.1, 0.1],
validation_set_x=joint_val_set,
graph_output=graph_output)
return top_DBN
def train_bottom_layer(train_set, validation_set,
batch_size=20,
k=1, layers_sizes=[40],
pretraining_epochs=[800],
pretrain_lr=[0.005],
lambda_1 = 0.0,
lambda_2 = 0.1,
rng=None,
graph_output=False
):
if rng is None:
rng = numpy.random.RandomState(123)
print('Visible nodes: %i' % train_set.get_value().shape[1])
print('Output nodes: %i' % layers_sizes[-1])
dbn = DBN(numpy_rng=rng, n_ins=train_set.get_value().shape[1],
hidden_layers_sizes=layers_sizes[:-1],
n_outs=layers_sizes[-1])
dbn.training(train_set,
batch_size, k=k,
pretraining_epochs=pretraining_epochs,
pretrain_lr=pretrain_lr,
lambda_1=lambda_1,
lambda_2=lambda_2,
validation_set_x=validation_set,
graph_output=graph_output)
output_train_set = dbn.get_output(train_set)
if validation_set is not None:
output_val_set = dbn.get_output(validation_set)
else:
output_val_set = None
return dbn, output_train_set, output_val_set
def train_MNIST_Gaussian(graph_output=False):
# Load the data
mnist = MNIST()
raw_dataset = mnist.images
n_data = raw_dataset.shape[0]
dataset = mnist.normalize(raw_dataset)
train_set = theano.shared(dataset[0:int(n_data*5/6)], borrow=True)
validation_set = theano.shared(dataset[-39:], borrow=True)
batch_size = 20
k = 1
layers_sizes = [1000, 500]
pretraining_epochs = [100, 100]
pretrain_lr = [0.01, 0.01]
lambda_1 = 0.0,
lambda_2 = 0.1
print('*** Training on MNIST ***')
print('Visible nodes: %i' % train_set.get_value().shape[1])
print('Output nodes: %i' % layers_sizes[-1])
dbn = DBN(n_ins=dataset.shape[1],
hidden_layers_sizes=layers_sizes[:-1],
n_outs=layers_sizes[-1])
dbn.training(train_set,
batch_size, k=k,
pretraining_epochs=pretraining_epochs,
pretrain_lr=pretrain_lr,
lambda_1=lambda_1,
lambda_2=lambda_2,
validation_set_x=validation_set,
graph_output=graph_output)
output_train_set = dbn.get_output(train_set)
output_val_set = dbn.get_output(validation_set)
return dbn, output_train_set, output_val_set
if __name__ == '__main__':
train_MNIST_Gaussian(graph_output=True)
|
glgerard/MDBN
|
src/dbn.py
|
Python
|
apache-2.0
| 25,760
|
[
"Gaussian"
] |
2edebaba7f8fce8d0620ddae4fe95491f6fabd2553e9b41ac9ed795546b26de3
|
import pickle
from ase.structure import molecule as mol
from ase import Atoms
from multiasecalc.lammps.charmm import CHARMM
from multiasecalc.lammps.dynamics import LAMMPSOptimizer
from multiasecalc.utils import get_datafile
from energy import Fragmentation
from ase.data.s22 import data as s22_sim_data
from ase.data.s22 import get_number_of_dimer_atoms
from ase.data import s22
import numpy as np
import traceback
##atomization GPAW calculator
class CHARMMSystem():
def __init__(self,name, atoms = None, fragment_list=None, minimize = False):
self.name = name
if atoms:
self.system = atoms
else:
self.system = mol(name) #use the default g22
self.system.center(vacuum=10.0)
self.calc = None
if fragment_list:
self.fragment_list = fragment_list
else:
self.fragment_list = self.system.get_chemical_symbols()
self.minimize = minimize
def setup_calculator(self):
parameters = dict(
neighbor = '2.0 nsq', # bin mode seems to fail with dimers
)
calc = CHARMM(ff_file_path=get_datafile('par_all36_cgenff.prm'), parameters=parameters)
calc._custom_thermo_args += ['evdwl', 'ecoul', 'emol', 'ebond']
return calc
def get_potential_energy(self):
self.system.set_calculator(self.setup_calculator())
try:
if self.minimize:
optimizer = LAMMPSOptimizer(self.system)
optimizer.run()
e = self.system.get_potential_energy()
self.system._del_calculator()
except:
traceback.print_exc()
print "{0} molecule not converged".format(self.name)
e = np.nan #not converged value
return e
def calculate_charges(atoms):
from multiasecalc.lammps.reaxff import ReaxFF
atoms.center(vacuum=1)
atoms.calc = ReaxFF(ff_file_path=get_datafile('ffield.reax'), parameters = dict(neighbor='2.0 nsq'))
atoms.get_potential_energy()
def test_s22():
fragTest = Fragmentation(s22.s22)
minimize = False
#minimize = True
print 'Relaxed:', minimize
fragTest.molecules = []
fragTest.fragments = []
testSet = [
'Ammonia_dimer',
'Water_dimer',
'Formic_acid_dimer',
'Formamide_dimer',
'Uracil_dimer_h-bonded',
'2-pyridoxine_2-aminopyridine_complex',
'Adenine-thymine_Watson-Crick_complex',
'Methane_dimer',
'Ethene_dimer',
'Benzene-methane_complex',
'Benzene_dimer_parallel_displaced',
'Pyrazine_dimer',
'Uracil_dimer_stack',
'Indole-benzene_complex_stack',
'Adenine-thymine_complex_stack',
'Ethene-ethyne_complex',
'Benzene-water_complex',
'Benzene-ammonia_complex',
'Benzene-HCN_complex',
'Benzene_dimer_T-shaped',
'Indole-benzene_T-shape_complex',
'Phenol_dimer'
]
for moleculeName in testSet:
atoms = s22.create_s22_system(moleculeName)
dimer1End = s22.data[moleculeName]['dimer atoms'][0]
frag1Atoms = atoms.copy()[:dimer1End]
frag2Atoms = atoms.copy()[dimer1End:]
calculate_charges(frag1Atoms)
calculate_charges(frag2Atoms)
atoms.set_charges(np.append(frag1Atoms.get_charges(), frag2Atoms.get_charges()))
fragment1 = CHARMMSystem(moleculeName + '_f1', frag1Atoms, minimize = minimize)
fragment2 = CHARMMSystem(moleculeName + '_f2', frag2Atoms, minimize = minimize)
system = CHARMMSystem(moleculeName, atoms, minimize = minimize,
fragment_list = [fragment1.name, fragment2.name])
fragTest.molecules.append(system)
fragTest.fragments += [fragment1, fragment2]
fragTest.fill_data_reference(data_type='s22')
fragTest.run(write=True)
def testSingle(moleculeName = "Methane_dimer"):
#TEST fragmentation
print "Test fragmentation with s22 set"
minimize = False
minimize = True
print 'Relaxed:', minimize
fragTest = Fragmentation([moleculeName])
#atoms = s22.create_s22_system(moleculeName)
data = s22.data[moleculeName]
atoms = Atoms(data['symbols'], data['positions'])
dimer1End = s22.data[moleculeName]['dimer atoms'][0]
frag1Atoms = atoms.copy()[:dimer1End]
frag2Atoms = atoms.copy()[dimer1End:]
calculate_charges(frag1Atoms)
calculate_charges(frag2Atoms)
atoms.set_charges(np.append(frag1Atoms.get_charges(), frag2Atoms.get_charges()))
fragment1 = CHARMMSystem(moleculeName + '_f1', frag1Atoms, minimize = minimize)
fragment2 = CHARMMSystem(moleculeName + '_f2', frag2Atoms, minimize = minimize)
system = CHARMMSystem(moleculeName, atoms, minimize = minimize,
fragment_list = [fragment1.name, fragment2.name])
fragTest.molecules = [system]
fragTest.fragments = [fragment1, fragment2]
fragTest.fill_data_reference(data_type='s22')
fragTest.run(write=True)
import ase.io
ase.io.write('molecule.xyz', atoms)
print fragTest.data
if __name__ == "__main__":
import sys
if len(sys.argv) == 2:
testSingle(sys.argv[1])
else:
test_s22()
|
csmm/multiase
|
tests/lammps/charmm_energy.py
|
Python
|
gpl-2.0
| 5,314
|
[
"ASE",
"CHARMM",
"GPAW",
"LAMMPS"
] |
486e35aa636eef7c40526784872b6cd6f550d7d5662944dbfdfca8baf29d8305
|
#!/usr/bin/python
#
# Octopus - IT Automation Tool
# Coded by: Alisson Menezes
# - alisson.copyleft@gmail.com
#
# Date: 29/04/2014
#
# Central Configuration Server
#
# x01 - CRUD
# x02 - RN
# x03 - Sockets
#
import socket
import json
from pymongo import *
import pymongo
import thread
import time
import os
# --- configuration --- #
host = ''
port = 7000
# ----------------------#
#x01---- CRUD - MongoDb -----
def con_db():
client = MongoClient('localhost',27017)
db = client["octopus"]
return db
def insert_crud(data):
print "Recebido: ",data
db = con_db()
nodes = db.nodes.aggregate([
{"$project":{ "_id":1,"nodes.ip":1,"nodes.hostname":1,"nodes.feet":1}},
{ "$unwind":"$nodes" },
{ "$match":{"_id":data["_id"]}}
]);
print "============================"
print data['nodes']['ip']
for f in nodes['result']:
print f['nodes']['ip']
print "============================"
if len(nodes['result']) <= 0:
print "Nenhum agent cadastrado"
db.nodes.update({"_id":data["_id"]},{"$addToSet":{"nodes":data["nodes"]}},upsert=True)
return "Cadastrando agent!!!"
else:
e = 0
print "Entrou aqui"
for f in nodes['result']:
print "ips: ",f['nodes']['ip']
if data['nodes']['ip'] in f['nodes']['ip']:
print "Existe"
db.nodes.update({"_id":data['_id'],"nodes.ip":data['nodes']['ip']},{"$set":{"nodes.$":data['nodes']}})
retorno = "Atualizando agent"
e = 1
break
else:
print "Nao existe"
retorno = "Novo agent"
if e != 1:
db.nodes.update({"_id":data["_id"]},{"$addToSet":{"nodes":data["nodes"]}},upsert=True)
return retorno
def insert_grupo_crud(data):
db = con_db()
j = { "_id":data["_id"], "feet":[],"nodes":[]}
db.nodes.insert(j)
return {"retorno":"cadastrado"}
def insert_logs_crud(data):
db = con_db()
db.logs.insert(data)
return "salvo"
def remove_crud():
print "remove"
def retrieve_nodes_crud():
db = con_db()
nodes = db.nodes.find()
return nodes
def delete_group_crud(data):
if data['_id'] == 'default':
return {"retorno":"O Grupo default nao pode ser excluido"}
db = con_db()
nodes = db.nodes.aggregate([
{"$project":{ "_id":1,"nodes.ip":1,"nodes.hostname":1}},
{ "$unwind":"$nodes" },
{ "$match":data}
]);
print "#########################"
print nodes["result"]
for i in nodes['result']:
print "incluindo ",i['nodes']," para o grupo default!"
db.nodes.update({"_id":"default"},
{"$addToSet":{"nodes":i['nodes']}}
,upsert=True)
rem = db.nodes.remove(data)
return {"retorno":"Grupo excluido"}
def retrieve_logs_crud():
db = con_db()
logs = db.logs.find().sort("data",pymongo.DESCENDING).limit(20)
return logs
def retrieve_feet_crud():
db = con_db()
feet = db.feet.find()
return feet
def find_foot_crud(data):
db = con_db()
feet = db.feet.find_one(data)
return feet
def add_foot_crud(data):
db = con_db()
r = db.feet.update({"_id":data['_id']},data,upsert=True)
return {"retorno":"cadastrado com sucesso!"}
def delete_foot_crud(data):
db = con_db()
feet = db.feet.remove(data)
return {"retorno":"removido"}
def add_foot_to_node(data):
db = con_db()
db.nodes.update({"_id":data['grupo'],"nodes.hostname":data['hostname']},
{"$addToSet":{"nodes.$.feet":data['foot']}}
,upsert=True)
return {"retorno":"Adicionado!"}
def add_foot_to_group(data):
db = con_db()
db.nodes.update({"_id":data['grupo']},
{"$addToSet":{"feet":data['foot']}}
,upsert=True)
return {"retorno":"Adicionado!"}
def remove_foot_from_group(data):
db = con_db()
db.nodes.update({"_id":data['grupo']},
{"$addToSet":{"feet":data['foot']}}
,upsert=True)
return {"retorno":"Adicionado!"}
def retrieve_crud(data,campo):
try:
db = con_db()
#s = db.nodes.find_one({'_id':data})
s = db.nodes.aggregate([
{"$project":{ "_id":0,"nodes.ip":1,"nodes.hostname":1}},
{ "$unwind":"$nodes" },
{ "$match":{"nodes.hostname":data}}
]);
res = s["result"][0]["nodes"][campo]
print "[+] Campo: "+campo
print "[+] resultado: ",res
return res
except Exception, e:
print "[!] Falhou!"
print e
def retrieve_node_info(data):
try:
print "================"
print data
db = con_db()
#s = db.nodes.find_one({'_id':data})
s = db.nodes.aggregate([
{"$project":{ "_id":1,"feet":1,"nodes.ip":1,"nodes.hostname":1,"nodes.feet":1}},
{ "$unwind":"$nodes" },
{ "$match":{"nodes.hostname":data['hostname']}}
]);
res = s["result"][0]
print "[+] resultado: ",res
return res
except Exception, e:
print "[!] Falhou!"
print e
# EOF - CRUD
#x02 - RN
def comandos(com):
res = []
print "==============="
print com
print "==============="
maquinas = com['nodes']
comando = com['command']+" "+com['params']
print "========= MAQUINAS ======"
print maquinas
try:
for m in maquinas:
print "[+] servidor: "+m
res.append(retrieve_crud(m,"ip"))
print "[+] comando: "+comando
for i in res:
print "[-] IP: ",i
thread.start_new_thread(envia_comando,(i,comando))
return {'retorno':'enviado'}
except Exception, e:
print "[!] Erro!"
print e
#EOF -- RN --
#x03 -- Sockets ----
def cria_socket(ip):
try:
addr = ((ip,port))
agent_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
agent_socket.connect(addr)
print "[+] conectou no ip: ",ip
return agent_socket
except Exception,e:
print e
def envia_comando(ip,com):
print "[+] Enviando comando"
try:
s = cria_socket(ip)
s.send(com)
except Exception, e:
print "[!] Falhou!"
print e
#EOF -- Sockets --
#nada
def retorno_dict():
d = {"cabelo":"dedo"}
return d
#thread.start_new_thread(comandos,(comando,))
|
AlissonMMenezes/Octopus
|
server/octopus/octopus_functions.py
|
Python
|
gpl-2.0
| 5,663
|
[
"Octopus"
] |
8fa636d60609d8e671696fed83c404cba004b01f7aaeb1a3e7f70f90d06d3c9a
|
import numpy as np
import mdtraj as md
from msmbuilder.example_datasets import fetch_alanine_dipeptide
from msmbuilder.featurizer import StrucRMSDFeaturizer
# np.testing.assert_array_almost_equal(array1,array2)
def test_alanine_dipeptide_basic():
# This test takes the rmsd of the 0th set of alanine dipeptide
# trajectories relative to the 0th frame of the dataset.
# The test asserts that all rmsd's calculated will be equal
# to the ones that would be calculated straight from mdtraj.
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
featurizer = StrucRMSDFeaturizer(trajectories[0][0])
data = featurizer.transform(trajectories[0:1])
true_rmsd = md.rmsd(trajectories[0], trajectories[0][0])
np.testing.assert_array_almost_equal(data[0][:,0], true_rmsd, decimal=4)
def test_omitting_indices():
# This test verifies that the result produced when
# atom_indices are omitted is the same as the result
# produced when atom_indices is all atom indices.
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
featurizer_indices = StrucRMSDFeaturizer(trajectories[0][0],
np.arange(trajectories[0].n_atoms))
data_indices = featurizer_indices.transform(trajectories[0:1])
featurizer = StrucRMSDFeaturizer(trajectories[0][0])
data = featurizer.transform(trajectories[0:1])
np.testing.assert_array_almost_equal(data[0][:,0],
data_indices[0][:,0], decimal=4)
def test_different_indices():
# This test verifies that the rmsd's calculated from
# different sets of atom indices are not the same,
# but that the arrays are still the same shape.
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
n_atoms = trajectories[0].n_atoms
halfway_point = n_atoms//2
featurizer_first_half = StrucRMSDFeaturizer(trajectories[0][0],
np.arange(halfway_point))
data_first_half = featurizer_first_half.transform(trajectories[0:1])
featurizer_second_half = StrucRMSDFeaturizer(trajectories[0][0],
np.arange(halfway_point,n_atoms))
data_second_half = featurizer_second_half.transform(trajectories[0:1])
assert data_first_half[0].shape == data_second_half[0].shape
# janky way to show that the arrays shouldn't be equal here
assert sum(data_first_half[0][:,0]) != sum(data_second_half[0][:,0])
def test_two_refs_basic():
# This test uses the 0th and 1st frames of the 0th set of
# adp trajectories as the two reference trajectories and
# ensures that the rmsd of the 0th frame of the dataset with
# the 0th reference are identical and the 1st frame of the
# dataset with the 1st reference are identical.
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
featurizer = StrucRMSDFeaturizer(trajectories[0][0:2])
data = featurizer.transform(trajectories[0:1])
true_rmsd = np.zeros((trajectories[0].n_frames, 2))
for frame in range(2):
true_rmsd[:, frame] = md.rmsd(trajectories[0], trajectories[0][frame])
np.testing.assert_almost_equal(data[0][0,0], data[0][1,1], decimal=3)
np.testing.assert_almost_equal(data[0][1,0], data[0][0,1], decimal=3)
np.testing.assert_array_almost_equal(data[0], true_rmsd, decimal=4)
def test_two_refs_omitting_indices():
# This test verifies that the result produced when
# atom_indices are omitted is the same as the result
# produced when atom_indices is all atom indices.
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
featurizer_indices = StrucRMSDFeaturizer(trajectories[0][0:2],
np.arange(trajectories[0].n_atoms))
data_indices = featurizer_indices.transform(trajectories[0:1])
featurizer = StrucRMSDFeaturizer(trajectories[0][0:2])
data = featurizer.transform(trajectories[0:1])
np.testing.assert_array_almost_equal(data[0], data_indices[0], decimal=4)
def test_two_refs_different_indices():
# This test verifies that the rmsd's calculated from
# different sets of atom indices are not the same,
# but that the arrays are still the same shape.
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
n_atoms = trajectories[0].n_atoms
halfway_point = n_atoms//2
featurizer_first_half = StrucRMSDFeaturizer(trajectories[0][0:2],
np.arange(halfway_point))
data_first_half = featurizer_first_half.transform(trajectories[0:1])
featurizer_second_half = StrucRMSDFeaturizer(trajectories[0][0:2],
np.arange(halfway_point,n_atoms))
data_second_half = featurizer_second_half.transform(trajectories[0:1])
assert data_first_half[0].shape == data_second_half[0].shape
# janky way to show that the arrays shouldn't be equal here
assert sum(data_first_half[0][:,0]) != sum(data_second_half[0][:,0])
assert sum(data_first_half[0][:,1]) != sum(data_second_half[0][:,1])
|
stephenliu1989/msmbuilder
|
msmbuilder/tests/test_strucrmsdfeaturizer.py
|
Python
|
lgpl-2.1
| 5,164
|
[
"MDTraj"
] |
4218e4d3f71f5ca08203d0303804d6a213a1f1feb99a99535740ae1c0295633b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Low-level objects providing an abstraction for the objects involved in the calculation.
"""
from __future__ import unicode_literals, division, print_function
import collections
import abc
import six
import numpy as np
import pymatgen.core.units as units
from pprint import pformat
from monty.design_patterns import singleton
from monty.collections import AttrDict
from pymatgen.core.design_patterns import Enum
from pymatgen.serializers.json_coders import PMGSONable, pmg_serialize
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from monty.json import MontyEncoder, MontyDecoder
def contract(s):
"""
>>> assert contract("1 1 1 2 2 3") == "3*1 2*2 1*3"
>>> assert contract("1 1 3 2 3") == "2*1 1*3 1*2 1*3"
"""
if not s: return s
tokens = s.split()
old = tokens[0]
count = [[1, old]]
for t in tokens[1:]:
if t == old:
count[-1][0] += 1
else:
old = t
count.append([1, t])
return " ".join("%d*%s" % (c, t) for c, t in count)
class AbivarAble(six.with_metaclass(abc.ABCMeta, object)):
"""
An `AbivarAble` object provides a method `to_abivars`
that returns a dictionary with the abinit variables.
"""
@abc.abstractmethod
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
#@abc.abstractmethod
#def from_abivars(cls, vars):
# """Build the object from a dictionary with Abinit variables."""
def __str__(self):
return pformat(self.to_abivars(), indent=1, width=80, depth=None)
def __contains__(self, key):
return key in self.to_abivars()
@singleton
class MandatoryVariable(object):
"""
Singleton used to tag mandatory variables, just because I can use
the cool syntax: variable is MANDATORY!
"""
@singleton
class DefaultVariable(object):
"""Singleton used to tag variables that will have the default value"""
MANDATORY = MandatoryVariable()
DEFAULT = DefaultVariable()
class SpinMode(collections.namedtuple('SpinMode', "mode nsppol nspinor nspden"), AbivarAble, PMGSONable):
"""
Different configurations of the electron density as implemented in abinit:
One can use as_spinmode to construct the object via SpinMode.as_spinmode
(string) where string can assume the values:
- polarized
- unpolarized
- afm (anti-ferromagnetic)
- spinor (non-collinear magnetism)
- spinor_nomag (non-collinear, no magnetism)
"""
@classmethod
def as_spinmode(cls, obj):
"""Converts obj into a `SpinMode` instance"""
if isinstance(obj, cls):
return obj
else:
# Assume a string with mode
try:
return _mode2spinvars[obj]
except KeyError:
raise KeyError("Wrong value for spin_mode: %s" % str(obj))
def to_abivars(self):
return {
"nsppol": self.nsppol,
"nspinor": self.nspinor,
"nspden": self.nspden,
}
@pmg_serialize
def as_dict(self):
return {k: getattr(self, k) for k in self._fields}
@classmethod
def from_dict(cls, d):
return cls(**{k: d[k] for k in d if k in cls._fields})
# An handy Multiton
_mode2spinvars = {
"unpolarized": SpinMode("unpolarized", 1, 1, 1),
"polarized": SpinMode("polarized", 2, 1, 2),
"afm": SpinMode("afm", 1, 1, 2),
"spinor": SpinMode("spinor", 1, 2, 4),
"spinor_nomag": SpinMode("spinor_nomag", 1, 2, 1),
}
class Smearing(AbivarAble, PMGSONable):
"""
Variables defining the smearing technique. The preferred way to instanciate
a `Smearing` object is via the class method Smearing.as_smearing(string)
"""
#: Mapping string_mode --> occopt
_mode2occopt = {
'nosmearing': 1,
'fermi_dirac': 3,
'marzari4': 4,
'marzari5': 5,
'methfessel': 6,
'gaussian': 7}
def __init__(self, occopt, tsmear):
self.occopt = occopt
self.tsmear = tsmear
def __str__(self):
s = "occopt %d # %s Smearing\n" % (self.occopt, self.mode)
if self.tsmear:
s += 'tsmear %s' % self.tsmear
return s
def __eq__(self, other):
return (self.occopt == other.occopt and
np.allclose(self.tsmear, other.tsmear))
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.mode != "nosmearing"
# py2 old version
__nonzero__ = __bool__
@classmethod
def as_smearing(cls, obj):
"""
Constructs an instance of `Smearing` from obj. Accepts obj in the form:
* Smearing instance
* "name:tsmear" e.g. "gaussian:0.004" (Hartree units)
* "name:tsmear units" e.g. "gaussian:0.1 eV"
* None --> no smearing
"""
if obj is None:
return Smearing.nosmearing()
if isinstance(obj, cls):
return obj
# obj is a string
obj, tsmear = obj.split(":")
obj.strip()
if obj == "nosmearing":
return cls.nosmearing()
else:
occopt = cls._mode2occopt[obj]
try:
tsmear = float(tsmear)
except ValueError:
tsmear, unit = tsmear.split()
tsmear = units.Energy(float(tsmear), unit).to("Ha")
return cls(occopt, tsmear)
@property
def mode(self):
for (mode_str, occopt) in self._mode2occopt.items():
if occopt == self.occopt:
return mode_str
raise AttributeError("Unknown occopt %s" % self.occopt)
@staticmethod
def nosmearing():
return Smearing(1, None)
def to_abivars(self):
if self.mode == "nosmearing":
return {}
else:
return {"occopt": self.occopt,
"tsmear": self.tsmear,}
def as_dict(self):
"""json friendly dict representation of Smearing"""
return {"occopt": self.occopt, "tsmear": self.tsmear,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@staticmethod
def from_dict(d):
return Smearing(d["occopt"], d["tsmear"])
class ElectronsAlgorithm(dict, AbivarAble):
"""Variables controlling the SCF/NSCF algorithm."""
# None indicates that we use abinit defaults.
_DEFAULT = dict(
iprcell=None, iscf=None, diemac=None, diemix=None, diemixmag=None,
dielam=None, diegap=None, dielng=None, diecut=None, nstep=50)
def __init__(self, *args, **kwargs):
super(ElectronsAlgorithm, self).__init__(*args, **kwargs)
for k in self:
if k not in self._DEFAULT:
raise ValueError("%s: No default value has been provided for "
"key %s" % (self.__class__.__name__, k))
def to_abivars(self):
return self.copy()
class Electrons(AbivarAble):
"""The electronic degrees of freedom"""
def __init__(self, spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
algorithm=None, nband=None, fband=None, charge=0.0, comment=None): # occupancies=None,
"""
Constructor for Electrons object.
Args:
comment: String comment for Electrons
charge: Total charge of the system. Default is 0.
"""
super(Electrons, self).__init__()
self.comment = comment
self.smearing = Smearing.as_smearing(smearing)
self.spin_mode = SpinMode.as_spinmode(spin_mode)
self.nband = nband
self.fband = fband
self.charge = charge
self.algorithm = algorithm
@property
def nsppol(self):
return self.spin_mode.nsppol
@property
def nspinor(self):
return self.spin_mode.nspinor
@property
def nspden(self):
return self.spin_mode.nspden
#@property
#def as_dict(self):
# "json friendly dict representation"
# d = {}
# d["@module"] = self.__class__.__module__
# d["@class"] = self.__class__.__name__
# raise NotImplementedError("")
# return d
#@staticmethod
#def from_dict(d):
# raise NotImplementedError("")
def to_abivars(self):
abivars = self.spin_mode.to_abivars()
abivars.update({
"nband" : self.nband,
"fband" : self.fband,
"charge" : self.charge,
})
if self.smearing:
abivars.update(self.smearing.to_abivars())
if self.algorithm:
abivars.update(self.algorithm)
abivars["#comment"] = self.comment
return abivars
class KSampling(AbivarAble, PMGSONable):
"""
Input variables defining the K-point sampling.
"""
# Modes supported by the constructor.
modes = Enum(('monkhorst', 'path', 'automatic',))
def __init__(self, mode="monkhorst", num_kpts= 0, kpts=((1, 1, 1),), kpt_shifts=(0.5, 0.5, 0.5),
kpts_weights=None, use_symmetries=True, use_time_reversal=True, chksymbreak=None,
comment=None):
"""
Highly flexible constructor for KSampling objects. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the object be constructed
far more easily using the convenience static constructors:
#. gamma_only
#. gamma_centered
#. monkhorst
#. monkhorst_automatic
#. path
and it is recommended that you use those.
Args:
mode: Mode for generating k-poits. Use one of the KSampling.modes enum types.
num_kpts: Number of kpoints if mode is "automatic"
Number of division for the sampling of the smallest segment if mode is "path".
Not used for the other modes
kpts: Number of divisions. Even when only a single specification is
required, e.g. in the automatic scheme, the kpts should still
be specified as a 2D array. e.g., [[20]] or [[2,2,2]].
kpt_shifts: Shifts for Kpoints.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
kpts_weights: Optional weights for kpoints. For explicit kpoints.
chksymbreak: Abinit input variable: check whether the BZ sampling preserves the symmetry of the crystal.
comment: String comment for Kpoints
.. note::
The default behavior of the constructor is monkhorst.
"""
if mode not in KSampling.modes:
raise ValueError("Unknown kpoint mode %s" % mode)
super(KSampling, self).__init__()
self.mode = mode
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.kpt_shifts = kpt_shifts
self.kpts_weights = kpts_weights
self.use_symmetries = use_symmetries
self.use_time_reversal = use_time_reversal
self.chksymbreak = chksymbreak
abivars = {}
if mode in ("monkhorst",):
assert num_kpts == 0
ngkpt = np.reshape(kpts, (-1,3))
shiftk = np.reshape(kpt_shifts, (-1,3))
if use_symmetries and use_time_reversal: kptopt = 1
if not use_symmetries and use_time_reversal: kptopt = 2
if not use_symmetries and not use_time_reversal: kptopt = 3
if use_symmetries and not use_time_reversal: kptopt = 4
abivars.update({
"ngkpt" : ngkpt,
"shiftk" : shiftk,
"nshiftk" : len(shiftk),
"kptopt" : kptopt,
"chksymbreak": chksymbreak,
})
elif mode in ("path",):
if num_kpts <= 0:
raise ValueError("For Path mode, num_kpts must be specified and >0")
kptbounds = np.reshape(kpts, (-1,3,))
#print("in path with kptbound: %s " % kptbounds)
abivars.update({
"ndivsm" : num_kpts,
"kptbounds": kptbounds,
"kptopt" : -len(kptbounds)+1,
})
elif mode in ("automatic",):
kpts = np.reshape(kpts, (-1,3))
if len(kpts) != num_kpts:
raise ValueError("For Automatic mode, num_kpts must be specified.")
kptnrm = np.ones(num_kpts)
abivars.update({
"kptopt" : 0,
"kpt" : kpts,
"nkpt" : num_kpts,
"kptnrm" : kptnrm,
"wtk" : kpts_weights, # for iscf/=-2, wtk.
"chksymbreak": chksymbreak,
})
else:
raise ValueError("Unknown mode %s" % mode)
self.abivars = abivars
self.abivars["#comment"] = comment
@property
def is_homogeneous(self):
return self.mode not in ["path"]
@classmethod
def gamma_only(cls):
"""Gamma-only sampling"""
return cls(kpt_shifts=(0.0,0.0,0.0), comment="Gamma-only sampling")
@classmethod
def gamma_centered(cls, kpts=(1, 1, 1), use_symmetries=True, use_time_reversal=True):
"""
Convenient static constructor for an automatic Gamma centered Kpoint grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
Returns:
:class:`KSampling` object.
"""
return cls(kpts=[kpts], kpt_shifts=(0.0, 0.0, 0.0),
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
comment="gamma-centered mode")
@classmethod
def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True,
use_time_reversal=True, comment=None):
"""
Convenient static constructor for a Monkhorst-Pack mesh.
Args:
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
shiftk: Shift to be applied to the kpoints.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
"""
return cls(
kpts=[ngkpt], kpt_shifts=shiftk,
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,
comment=comment if comment else "Monkhorst-Pack scheme with user-specified shiftk")
@classmethod
def monkhorst_automatic(cls, structure, ngkpt,
use_symmetries=True, use_time_reversal=True, chksymbreak=None, comment=None):
"""
Convenient static constructor for an automatic Monkhorst-Pack mesh.
Args:
structure: pymatgen structure object.
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
"""
sg = SpacegroupAnalyzer(structure)
#sg.get_crystal_system()
#sg.get_point_group()
# TODO
nshiftk = 1
#shiftk = 3*(0.5,) # this is the default
shiftk = 3*(0.5,)
#if lattice.ishexagonal:
#elif lattice.isbcc
#elif lattice.isfcc
return cls.monkhorst(
ngkpt, shiftk=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
chksymbreak=chksymbreak, comment=comment if comment else "Automatic Monkhorst-Pack scheme")
@classmethod
def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None):
"""
Static constructor for path in k-space.
Args:
structure: pymatgen structure.
kpath_bounds: List with the reduced coordinates of the k-points defining the path.
ndivsm: Number of division for the smallest segment.
comment: Comment string.
Returns:
:class:`KSampling` object.
"""
if kpath_bounds is None:
# Compute the boundaries from the input structure.
from pymatgen.symmetry.bandstructure import HighSymmKpath
sp = HighSymmKpath(structure)
# Flat the array since "path" is a a list of lists!
kpath_labels = []
for labels in sp.kpath["path"]:
kpath_labels.extend(labels)
kpath_bounds = []
for label in kpath_labels:
red_coord = sp.kpath["kpoints"][label]
#print("label %s, red_coord %s" % (label, red_coord))
kpath_bounds.append(red_coord)
return cls(mode=KSampling.modes.path, num_kpts=ndivsm, kpts=kpath_bounds,
comment=comment if comment else "K-Path scheme")
@classmethod
def path_from_structure(cls, ndivsm, structure):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, structure=structure, comment="K-path generated automatically from pymatgen structure")
@classmethod
def explicit_path(cls, ndivsm, kpath_bounds):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, kpath_bounds=kpath_bounds, comment="Explicit K-path")
@classmethod
def automatic_density(cls, structure, kppa, chksymbreak=None, use_symmetries=True, use_time_reversal=True,
shifts=(0.5, 0.5, 0.5)):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure: Input structure
kppa: Grid density
"""
lattice = structure.lattice
lengths = lattice.abc
ngrid = kppa / structure.num_sites
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3.)
num_div = [int(round(1.0 / lengths[i] * mult)) for i in range(3)]
# ensure that num_div[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
angles = lattice.angles
hex_angle_tol = 5 # in degrees
hex_length_tol = 0.01 # in angstroms
right_angles = [i for i in range(3) if abs(angles[i] - 90) < hex_angle_tol]
hex_angles = [i for i in range(3)
if abs(angles[i] - 60) < hex_angle_tol or
abs(angles[i] - 120) < hex_angle_tol]
is_hexagonal = (len(right_angles) == 2 and len(hex_angles) == 1
and abs(lengths[right_angles[0]] -
lengths[right_angles[1]]) < hex_length_tol)
#style = Kpoints.modes.gamma
#if not is_hexagonal:
# num_div = [i + i % 2 for i in num_div]
# style = Kpoints.modes.monkhorst
comment = "pymatgen generated KPOINTS with grid density = " + "{} / atom".format(kppa)
shifts = np.reshape(shifts, (-1, 3))
return cls(
mode="monkhorst", num_kpts=0, kpts=[num_div], kpt_shifts=shifts,
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,
comment=comment)
def to_abivars(self):
return self.abivars
def as_dict(self):
enc = MontyEncoder()
return {'mode': self.mode, 'comment': self.comment, 'num_kpts': self.num_kpts,
'kpts': enc.default(np.array(self.kpts)), 'kpt_shifts': self.kpt_shifts,
'kpts_weights': self.kpts_weights, 'use_symmetries': self.use_symmetries,
'use_time_reversal': self.use_time_reversal, 'chksymbreak': self.chksymbreak,
'@module': self.__class__.__module__, '@class': self.__class__.__name__}
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
dec = MontyDecoder()
d['kpts'] = dec.process_decoded(d['kpts'])
return cls(**d)
class Constraints(AbivarAble):
"""This object defines the constraints for structural relaxation"""
def to_abivars(self):
raise NotImplementedError("")
class RelaxationMethod(AbivarAble, PMGSONable):
"""
This object stores the variables for the (constrained) structural optimization
ionmov and optcell specify the type of relaxation.
The other variables are optional and their use depend on ionmov and optcell.
A None value indicates that we use abinit default. Default values can
be modified by passing them to the constructor.
The set of variables are constructed in to_abivars depending on ionmov and optcell.
"""
_default_vars = {
"ionmov" : MANDATORY,
"optcell" : MANDATORY,
"ntime" : 80,
"dilatmx" : 1.05,
"ecutsm" : 0.5,
"strfact" : None,
"tolmxf" : None,
"strtarget" : None,
"atoms_constraints": {}, # Constraints are stored in a dictionary. {} means if no constraint is enforced.
}
IONMOV_DEFAULT = 3
OPTCELL_DEFAULT = 2
def __init__(self, *args, **kwargs):
# Initialize abivars with the default values.
self.abivars = self._default_vars
# Overwrite the keys with the args and kwargs passed to constructor.
self.abivars.update(*args, **kwargs)
self.abivars = AttrDict(self.abivars)
for k in self.abivars:
if k not in self._default_vars:
raise ValueError("%s: No default value has been provided for key %s" % (self.__class__.__name__, k))
for k in self.abivars:
if k is MANDATORY:
raise ValueError("%s: No default value has been provided for the mandatory key %s" %
(self.__class__.__name__, k))
@classmethod
def atoms_only(cls, atoms_constraints=None):
if atoms_constraints is None:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=0)
else:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=0, atoms_constraints=atoms_constraints)
@classmethod
def atoms_and_cell(cls, atoms_constraints=None):
if atoms_constraints is None:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=cls.OPTCELL_DEFAULT)
else:
return cls(ionmov=cls.IOMOV_DEFAULT, optcell=cls.OPTCELL_DEFAULT, atoms_constraints=atoms_constraints)
@property
def move_atoms(self):
"""True if atoms must be moved."""
return self.abivars.ionmov != 0
@property
def move_cell(self):
"""True if lattice parameters must be optimized."""
return self.abivars.optcell != 0
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
# These variables are always present.
out_vars = {
"ionmov" : self.abivars.ionmov,
"optcell": self.abivars.optcell,
"ntime" : self.abivars.ntime,
}
# Atom relaxation.
if self.move_atoms:
out_vars.update({
"tolmxf": self.abivars.tolmxf,
})
if self.abivars.atoms_constraints:
# Add input variables for constrained relaxation.
raise NotImplementedError("")
out_vars.update(self.abivars.atoms_constraints.to_abivars())
# Cell relaxation.
if self.move_cell:
out_vars.update({
"dilatmx" : self.abivars.dilatmx,
"ecutsm" : self.abivars.ecutsm,
"strfact" : self.abivars.strfact,
"strtarget": self.abivars.strtarget,
})
return out_vars
def as_dict(self):
d = dict(self._default_vars)
d['@module'] = self.__class__.__module__
d['@class'] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
return cls(**d)
class PPModel(AbivarAble, PMGSONable):
"""
Parameters defining the plasmon-pole technique.
The common way to instanciate a PPModel object is via the class method PPModel.as_ppmodel(string)
"""
_mode2ppmodel = {
"noppmodel": 0,
"godby" : 1,
"hybersten": 2,
"linden" : 3,
"farid" : 4,
}
modes = Enum(k for k in _mode2ppmodel)
@classmethod
def as_ppmodel(cls, obj):
"""
Constructs an instance of PPModel from obj.
Accepts obj in the form:
* PPmodel instance
* string. e.g "godby:12.3 eV", "linden".
"""
if isinstance(obj, cls):
return obj
# obj is a string
if ":" not in obj:
mode, plasmon_freq = obj, None
else:
# Extract mode and plasmon_freq
mode, plasmon_freq = obj.split(":")
try:
plasmon_freq = float(plasmon_freq)
except ValueError:
plasmon_freq, unit = plasmon_freq.split()
plasmon_freq = units.Energy(float(plasmon_freq), unit).to("Ha")
return cls(mode=mode, plasmon_freq=plasmon_freq)
def __init__(self, mode="godby", plasmon_freq=None):
assert mode in PPModel.modes
self.mode = mode
self.plasmon_freq = plasmon_freq
def __eq__(self, other):
if other is None:
return False
else:
if self.mode != other.mode:
return False
if self.plasmon_freq is None:
return other.plasmon_freq is None
else:
return np.allclose(self.plasmon_freq, other.plasmon_freq)
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.mode != "noppmodel"
# py2 old version
__nonzero__ = __bool__
def __repr__(self):
return "<%s at %s, mode = %s>" % (self.__class__.__name__, id(self),
str(self.mode))
def to_abivars(self):
if self:
return {"ppmodel": self._mode2ppmodel[self.mode], "ppmfrq": self.plasmon_freq}
else:
return {}
@classmethod
def noppmodel(cls):
return cls(mode="noppmodel", plasmon_freq=None)
def as_dict(self):
return {"mode": self.mode, "plasmon_freq": self.plasmon_freq,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@staticmethod
def from_dict(d):
return PPModel(mode=d["mode"], plasmon_freq=d["plasmon_freq"])
class HilbertTransform(AbivarAble):
"""
Parameters for the Hilbert-transform method (Screening code)
i.e. the parameters defining the frequency mesh used for the spectral function
and the frequency mesh used for the polarizability
"""
def __init__(self, nomegasf, domegasf=None, spmeth=1, nfreqre=None, freqremax=None, nfreqim=None, freqremin=None):
"""
Args:
nomegasf: Number of points for sampling the spectral function along the real axis.
domegasf: Step in Ha for the linear mesh used for the spectral function.
spmeth: Algorith for the representation of the delta function.
nfreqre: Number of points along the real axis (linear mesh).
freqremax: Maximum frequency for W along the real axis (in hartree).
nfreqim: Number of point along the imaginary axis (Gauss-Legendre mesh).
freqremin: Minimum frequency for W along the real axis (in hartree).
"""
# Spectral function
self.nomegasf = nomegasf
self.domegasf = domegasf
self.spmeth = spmeth
# Mesh for the contour-deformation method used for the integration of the self-energy
self.nfreqre = nfreqre
self.freqremax = freqremax
self.freqremin = freqremin
self.nfreqim = nfreqim
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
return {
# Spectral function
"nomegasf": self.nomegasf,
"domegasf": self.domegasf,
"spmeth" : self.spmeth,
# Frequency mesh for the polarizability
"nfreqre" : self.nfreqre,
"freqremax": self.freqremax,
"nfreqim" : self.nfreqim,
"freqremin": self.freqremin,
}
class ModelDielectricFunction(AbivarAble):
"""Model dielectric function used for BSE calculation"""
def __init__(self, mdf_epsinf):
self.mdf_epsinf = mdf_epsinf
def to_abivars(self):
return {"mdf_epsinf": self.mdf_epsinf}
##########################################################################################
################################# WORK IN PROGRESS ######################################
##########################################################################################
class Screening(AbivarAble):
"""
This object defines the parameters used for the
computation of the screening function.
"""
# Approximations used for W
_WTYPES = {
"RPA": 0,
}
# Self-consistecy modes
_SC_MODES = {
"one_shot" : 0,
"energy_only" : 1,
"wavefunctions": 2,
}
def __init__(self, ecuteps, nband, w_type="RPA", sc_mode="one_shot",
hilbert=None, ecutwfn=None, inclvkb=2):
"""
Args:
ecuteps: Cutoff energy for the screening (Ha units).
nband Number of bands for the Green's function
w_type: Screening type
sc_mode: Self-consistency mode.
hilbert: Instance of :class:`HilbertTransform` defining the parameters for the Hilber transform method.
ecutwfn: Cutoff energy for the wavefunctions (Default: ecutwfn == ecut).
inclvkb: Option for the treatment of the dipole matrix elements (NC pseudos).
"""
if w_type not in self._WTYPES:
raise ValueError("W_TYPE: %s is not supported" % w_type)
if sc_mode not in self._SC_MODES:
raise ValueError("Self-consistecy mode %s is not supported" % sc_mode)
self.ecuteps = ecuteps
self.nband = nband
self.w_type = w_type
self.sc_mode = sc_mode
self.ecutwfn = ecutwfn
self.inclvkb = inclvkb
if hilbert is not None:
raise NotImplementedError("Hilber transform not coded yet")
self.hilbert = hilbert
# Default values
# TODO Change abinit defaults
self.gwpara=2
self.awtr =1
self.symchi=1
@property
def use_hilbert(self):
return hasattr(self, "hilbert")
#@property
#def gwcalctyp(self):
# "Return the value of the gwcalctyp input variable"
# dig0 = str(self._SIGMA_TYPES[self.type])
# dig1 = str(self._SC_MODES[self.sc_mode]
# return dig1.strip() + dig0.strip()
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
abivars = {
"ecuteps" : self.ecuteps,
"ecutwfn" : self.ecutwfn,
"inclvkb" : self.inclvkb,
"gwpara" : self.gwpara,
"awtr" : self.awtr,
"symchi" : self.symchi,
#"gwcalctyp": self.gwcalctyp,
#"fftgw" : self.fftgw,
}
# Variables for the Hilber transform.
if self.use_hilbert:
abivars.update(self.hilbert.to_abivars())
return abivars
class SelfEnergy(AbivarAble):
"""
This object defines the parameters used for the computation of the self-energy.
"""
_SIGMA_TYPES = {
"gw" : 0,
"hartree_fock": 5,
"sex" : 6,
"cohsex" : 7,
"model_gw_ppm": 8,
"model_gw_cd" : 9,
}
_SC_MODES = {
"one_shot" : 0,
"energy_only" : 1,
"wavefunctions": 2,
}
def __init__(self, se_type, sc_mode, nband, ecutsigx, screening,
gw_qprange=1, ppmodel=None, ecuteps=None, ecutwfn=None, gwpara=2):
"""
Args:
se_type: Type of self-energy (str)
sc_mode: Self-consistency mode.
nband: Number of bands for the Green's function
ecutsigx: Cutoff energy for the exchange part of the self-energy (Ha units).
screening: :class:`Screening` instance.
gw_qprange: Option for the automatic selection of k-points and bands for GW corrections.
See Abinit docs for more detail. The default value makes the code computie the
QP energies for all the point in the IBZ and one band above and one band below the Fermi level.
ppmodel: :class:`PPModel` instance with the parameters used for the plasmon-pole technique.
ecuteps: Cutoff energy for the screening (Ha units).
ecutwfn: Cutoff energy for the wavefunctions (Default: ecutwfn == ecut).
"""
if se_type not in self._SIGMA_TYPES:
raise ValueError("SIGMA_TYPE: %s is not supported" % se_type)
if sc_mode not in self._SC_MODES:
raise ValueError("Self-consistecy mode %s is not supported" % sc_mode)
self.type = se_type
self.sc_mode = sc_mode
self.nband = nband
self.ecutsigx = ecutsigx
self.screening = screening
self.gw_qprange = gw_qprange
self.gwpara = gwpara
if ppmodel is not None:
assert not screening.use_hilbert
self.ppmodel = PPModel.as_ppmodel(ppmodel)
self.ecuteps = ecuteps if ecuteps is not None else screening.ecuteps
self.ecutwfn = ecutwfn
#band_mode in ["gap", "full"]
#if isinstance(kptgw, str) and kptgw == "all":
# self.kptgw = None
# self.nkptgw = None
#else:
# self.kptgw = np.reshape(kptgw, (-1,3))
# self.nkptgw = len(self.kptgw)
#if bdgw is None:
# raise ValueError("bdgw must be specified")
#if isinstance(bdgw, str):
# # TODO add new variable in Abinit so that we can specify
# # an energy interval around the KS gap.
# homo = float(nele) / 2.0
# #self.bdgw =
#else:
# self.bdgw = np.reshape(bdgw, (-1,2))
#self.freq_int = freq_int
@property
def use_ppmodel(self):
"""True if we are using the plasmon-pole approximation."""
return hasattr(self, "ppmodel")
@property
def gwcalctyp(self):
"""Returns the value of the gwcalctyp input variable."""
dig0 = str(self._SIGMA_TYPES[self.type])
dig1 = str(self._SC_MODES[self.sc_mode])
return dig1.strip() + dig0.strip()
@property
def symsigma(self):
"""1 if symmetries can be used to reduce the number of q-points."""
return 1 if self.sc_mode == "one_shot" else 0
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
abivars = dict(
gwcalctyp=self.gwcalctyp,
ecuteps=self.ecuteps,
ecutsigx=self.ecutsigx,
symsigma=self.symsigma,
gw_qprange=self.gw_qprange,
gwpara=self.gwpara
#"ecutwfn" : self.ecutwfn,
#"kptgw" : self.kptgw,
#"nkptgw" : self.nkptgw,
#"bdgw" : self.bdgw,
)
# FIXME: problem with the spin
#assert len(self.bdgw) == self.nkptgw
# ppmodel variables
if self.use_ppmodel:
abivars.update(self.ppmodel.to_abivars())
return abivars
class ExcHamiltonian(AbivarAble):
"""This object contains the parameters for the solution of the Bethe-Salpeter equation."""
# Types of excitonic Hamiltonian.
_EXC_TYPES = {
"TDA": 0, # Tamm-Dancoff approximation.
"coupling": 1, # Calculation with coupling.
}
# Algorithms used to compute the macroscopic dielectric function
# and/or the exciton wavefunctions.
_ALGO2VAR = {
"direct_diago": 1,
"haydock" : 2,
"cg" : 3,
}
# Options specifying the treatment of the Coulomb term.
_COULOMB_MODES = [
"diago",
"full",
"model_df"
]
def __init__(self, bs_loband, nband, soenergy, coulomb_mode, ecuteps, spin_mode="polarized", mdf_epsinf=None,
exc_type="TDA", algo="haydock", with_lf=True, bs_freq_mesh=None, zcut=None, **kwargs):
"""
Args:
bs_loband: Lowest band index (Fortran convention) used in the e-h basis set.
Can be scalar or array of shape (nsppol,). Must be >= 1 and <= nband
nband: Max band index used in the e-h basis set.
soenergy: Scissors energy in Hartree.
coulomb_mode: Treatment of the Coulomb term.
ecuteps: Cutoff energy for W in Hartree.
mdf_epsinf: Macroscopic dielectric function :math:`\epsilon_\inf` used in
the model dielectric function.
exc_type: Approximation used for the BSE Hamiltonian
with_lf: True if local field effects are included <==> exchange term is included
bs_freq_mesh: Frequency mesh for the macroscopic dielectric function (start, stop, step) in Ha.
zcut: Broadening parameter in Ha.
**kwargs:
Extra keywords
"""
spin_mode = SpinMode.as_spinmode(spin_mode)
# We want an array bs_loband(nsppol).
try:
bs_loband = np.reshape(bs_loband, spin_mode.nsppol)
except ValueError:
bs_loband = np.array(spin_mode.nsppol * [int(bs_loband)])
self.bs_loband = bs_loband
self.nband = nband
self.soenergy = soenergy
self.coulomb_mode = coulomb_mode
assert coulomb_mode in self._COULOMB_MODES
self.ecuteps = ecuteps
self.mdf_epsinf = mdf_epsinf
self.exc_type = exc_type
assert exc_type in self._EXC_TYPES
self.algo = algo
assert algo in self._ALGO2VAR
self.with_lf = with_lf
# if bs_freq_mesh is not given, abinit will select its own mesh.
self.bs_freq_mesh = np.array(bs_freq_mesh) if bs_freq_mesh is not None else bs_freq_mesh
self.zcut = zcut
# Extra options.
self.kwargs = kwargs
#if "chksymbreak" not in self.kwargs:
# self.kwargs["chksymbreak"] = 0
# Consistency check
if any(bs_loband < 0):
raise ValueError("bs_loband <= 0 while it is %s" % bs_loband)
if any(bs_loband >= nband):
raise ValueError("bs_loband (%s) >= nband (%s)" % (bs_loband, nband))
@property
def inclvkb(self):
"""Treatment of the dipole matrix element (NC pseudos, default is 2)"""
return self.kwargs.get("inclvkb", 2)
@property
def use_haydock(self):
"""True if we are using the Haydock iterative technique."""
return self.algo == "haydock"
@property
def use_cg(self):
"""True if we are using the conjugate gradient method."""
return self.algo == "cg"
@property
def use_direct_diago(self):
"""True if we are performing the direct diagonalization of the BSE Hamiltonian."""
return self.algo == "direct_diago"
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
abivars = dict(
bs_calctype=1,
bs_loband=self.bs_loband,
#nband=self.nband,
soenergy=self.soenergy,
ecuteps=self.ecuteps,
bs_algorithm = self._ALGO2VAR[self.algo],
bs_coulomb_term=21,
mdf_epsinf=self.mdf_epsinf,
bs_exchange_term=1 if self.with_lf else 0,
inclvkb=self.inclvkb,
zcut=self.zcut,
bs_freq_mesh=self.bs_freq_mesh,
bs_coupling=self._EXC_TYPES[self.exc_type],
)
if self.use_haydock:
# FIXME
abivars.update(
bs_haydock_niter=100, # No. of iterations for Haydock
bs_hayd_term=0, # No terminator
bs_haydock_tol=[0.05, 0], # Stopping criteria
)
elif self.use_direct_diago:
raise NotImplementedError("")
elif self.use_cg:
raise NotImplementedError("")
else:
raise ValueError("Unknown algorithm for EXC: %s" % self.algo)
# Add extra kwargs
abivars.update(self.kwargs)
return abivars
|
sonium0/pymatgen
|
pymatgen/io/abinitio/abiobjects.py
|
Python
|
mit
| 42,022
|
[
"ABINIT",
"CRYSTAL",
"Gaussian",
"pymatgen"
] |
e8f962b18411750e5b74cb174b1e1404965c9fef2bc86a1f0baeb90e94a120d6
|
""" This is the customize script for GPAW on Sherlock
Compiling gpaw with this customize.py (which you should keep in the
directory where you've cloned the repository to) can be done by
python setup.py --remove-default-flags --customize=../sherlock.stanford.edu.customize.py build_ext
"""
#careful here:
# mpiicc is intelmpi wrapper for icc
# mpicc is openmpi wrapper for a c-compiler (which one?)
# at least with compiler = 'icc', mpicompiler = 'mpicc', mpilinker = 'mpicc'
# icc is used as compiler.
compiler = 'icc -mkl=sequential'
mpicompiler = 'mpicc -mkl=sequential' # use None if you don't want to build a gpaw-python
mpilinker = 'mpicc -mkl=sequential'
# platform_id = ''
scalapack = True
hdf5 = False
scalapack_dir = '$MKLROOT'
hdf5_dir = '/share/sw/free/hdf5/1.8.16/intel'
libxc_dir = '/home/rasmusk/programs/libxc-3.0.0/install'
libvdwxc_path = '/home/rasmusk/programs/libvdwxc_compiled'
# MKL
library_dirs = ['${MKLROOT}/lib/intel64']#, '${MPI_ROOT}/lib']
# MKL needs to be linked explicitly
libraries = ['mkl_intel_lp64',
'mkl_core',
'mkl_sequential',
'm',
'dl',
'pthread',
'open-rte', # We need these next four because intel IPO needs them defined explicitly
'open-pal',
'pmi2',
'pciaccess']
extra_compile_args = ['-Wall',
'-O3',
#'-Ofast', # O3 plus more, disregards strict standards compliance
#'-O2',
'-ipo0',
'-fPIC',
'-std=c99']
include_dirs += ['${MKLROOT}/include/']#, '${MPI_ROOT}/include']
if hdf5:
library_dirs += [hdf5_dir + '/lib']
include_dirs += [hdf5_dir + '/include']
# Use ScaLAPACK:
# Warning! At least scalapack 2.0.1 is required!
# See https://trac.fysik.dtu.dk/projects/gpaw/ticket/230
if scalapack:
# This actually works!
# To test it, go to a compute node and run
# gpaw-python $(which gpaw) info
# gpaw command cannot show scalapack, only parallel version of
# gpaw (=gpaw-python) can!
libraries += ['mkl_scalapack_lp64',
'mkl_blacs_openmpi_lp64']
#library_dirs += ['$MPI_ROOT/lib']
#extra_link_args += [#scalapack_dir + '/lib/intel64/libmkl_scalapack_lp64.a',
# scalapack_dir + '/lib/intel64/libmkl_blacs_openmpi_lp64.a']
define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')]
define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')]
# LibXC:
# In order to link libxc installed in a non-standard location
# (e.g.: configure --prefix=/home/user/libxc-2.0.1-1), use:
# - static linking:
if 0:
include_dirs += ['/home//include']
extra_link_args += ['/home/user/libxc-2.0.1-1/lib/libxc.a']
if 'xc' in libraries:
libraries.remove('xc')
# - dynamic linking (requires rpath or setting LD_LIBRARY_PATH at runtime):
if 1:
include_dirs += [libxc_dir + '/include']
library_dirs += [libxc_dir + '/lib']
# You can use rpath to avoid changing LD_LIBRARY_PATH:
# extra_link_args += ['-Wl,-rpath=/home/user/libxc-2.0.1-1/lib']
if 'xc' not in libraries:
libraries.append('xc')
# libvdwxc:
if 1:
libvdwxc = True
#extra_link_args += ['-Wl,-rpath=%s/lib' % libvdwxc_path]
library_dirs += ['%s/lib' % libvdwxc_path]
include_dirs += ['%s/include' % libvdwxc_path]
libraries += ['vdwxc', 'fftw3_mpi', 'fftw3']
# Build MPI-interface into _gpaw.so:
if 0:
compiler = 'mpicc'
define_macros += [('PARALLEL', '1')]
mpicompiler = None
|
RKBK/gpaw-customize-files
|
sherlock.stanford.edu.customize.py
|
Python
|
mit
| 3,589
|
[
"GPAW"
] |
81dfa426749cee270e67189255c3a715c3b95d1435f30e3ddf2e0f12dae85e26
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
#!/usr/bin/env python
from __future__ import division, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.gaussianio has been moved pymatgen.io.gaussian. "
"This stub will be removed in pymatgen 4.0.")
from .gaussian import *
|
sonium0/pymatgen
|
pymatgen/io/gaussianio.py
|
Python
|
mit
| 589
|
[
"Gaussian",
"pymatgen"
] |
ddeb75eddae3593863748eb4b68f4e9e7cc994a333611eb1d78d192e075f7975
|
import tensorflow as tf
from kentf.scoping import adapt_name
class DiagonalCovarianceGaussian:
def __init__(self, means, log_variances, name=None):
name = adapt_name(name, "gaussian")
with tf.name_scope(name):
self.name = name
self.N = tf.identity(tf.shape(means)[0], "N")
self.dims = tf.shape(means)[1:]
self.means = tf.identity(means, "mean")
self.means_with_L = tf.expand_dims(means, 1)
self.log_variances = tf.identity(log_variances, "log-variance")
self.log_variances_with_L = tf.expand_dims(log_variances, 1)
self.stddevs = tf.identity(tf.exp(0.5 * self.log_variances), "stddev")
self.variances = tf.identity(tf.exp(self.log_variances), "variance")
self.variances_with_L = tf.expand_dims(self.variances, 1)
def unit_gaussian(self, name=None):
name = adapt_name(name, "unit-gaussian")
with tf.name_scope(name):
return DiagonalCovarianceGaussian(
tf.zeros(tf.shape(self.means)),
tf.ones(tf.shape(self.log_variances)), name=name)
def sample(self, L=1, name=None):
name = adapt_name(name, "sample_%s" % self.name)
with tf.name_scope(name):
shape = tf.concat([[self.N, L], self.dims], axis=0)
noise = tf.random_normal(shape, 0, 1, dtype=tf.float32, name="noise")
samples = self.means_with_L + (self.log_variances_with_L * noise)
return tf.identity(samples, name)
def log_likelihoods(self, samples, name=None):
name = adapt_name(name, "log-likelihood_%s" % self.name)
with tf.name_scope(name):
out = np.log(2 * np.pi) + self.log_variances_with_L
out += tf.square(samples - self.means_with_L) / self.variances_with_L
out *= -0.5
return tf.identity(out, name)
def kl_divergence_from_unit(self, name=None):
name = adapt_name(name, "kl-divergence-from-unit_%s" % self.name)
return DiagonalCovarianceGaussian.kl_divergence(self,
self.unit_gaussian(), name=name)
@classmethod
def kl_divergence(cls, p, q, name=None):
name = adapt_name(name, "kl-divergence")
with tf.name_scope(name):
inner = p.variances + tf.square(p.means - q.means)
inner /= q.variances
inner = 1 + p.log_variances - q.log_variances - inner
inner *= -0.5
kl = tf.reduce_sum(inner, list(range(1, len(inner.shape))))
kl = tf.identity(kl, name)
return kl
@classmethod
def symmetric_kl_divergence(cls, p, q, name=None):
name = adapt_name(name, "symmetric-kl-divergence")
with tf.name_scope(name):
return tf.identity(cls.kl_divergence(p, q) + cls.kl_divergence(q, p), name)
|
kkleidal/kentf
|
gaussian.py
|
Python
|
mit
| 2,860
|
[
"Gaussian"
] |
dac680ecea605a7f8441d64caf784ba84bec1aa4224d740ec2f7535a46405933
|
"""Contains the parent class for MPF's Language module."""
# language.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import re
from mpf.system.config import Config
class Language(object):
"""MPF module which handles text, audio, and video replacement of objects
for multi-language environments.
Args:
machine: The main machine object
"""
def __init__(self, machine):
self.log = logging.getLogger('Language')
self.machine = machine
self.config = None
self.machine.language = None
self.current_language = None
# See if there's a Languages section in the config and it's not empty
if 'languages' in self.machine.config and (
self.machine.config['languages']):
self._configure()
def _configure(self):
self.config = self.machine.config['languages']
self.machine.language = self
self.languages = Config.string_to_list(
self.machine.config['languages'])
# Set the default language to the first entry in the list
self.set_language(self.languages[0])
self.default_language = self.languages[0]
self.find_text = re.compile('(\(.*?\))')
def set_language(self, language_string):
"""Sets the current language based on the string passed.
Args:
language_string: The string name of the language you want to set the
machine to.
Language strings can be whatever you want, based on how you define them
in your config file. It can be an actual language, like English or
French, or it can simply be alternate assets, like "Kid-Friendly" versus
"Mature."
This language change is instant, and you can safely call it often.
Change languages for each player in the same game, or even in the middle
of a ball!
"""
self.log.debug('Setting language to: %s', language_string)
self.current_language = language_string
def get_language(self):
"""Returns the string name of the current language."""
return self.current_language
def text(self, text):
"""Translates a text string (or part of a text string) based on the
current language setting.
Args:
text: The string of text you want to translate.
Returns: A translated string.
The incoming text string is searched for text within parentheses, and
each of those segments is looked up for replacement. You can wrap the
entire string in parentheses, or just part of it, or multiple parts.
A new, translated string is returned with the parentheses removed. If
a translation is not found in the current language's translation
strings, the original text is returned.
The string lookup is case-sensitive since different languages have
different rules around casing.
It is not possible to display text with parentheses in it since this
method will remove them. If this is something you need, contact us and
we can add that feature.
"""
self.log.debug("Getting language for text: %s", text)
if self.config and '(' in text and ')' in text:
for match in self.find_text.findall(text):
replacement_string = match
text_string = replacement_string[1:-1]
modified_string = text_string
if (self.current_language in self.machine.config['languagestrings']
and text_string in self.machine.config['languagestrings']
[self.current_language]):
modified_string = (self.machine.config['languagestrings']
[self.current_language][text_string])
text = text.replace(replacement_string, modified_string)
return text
def get_text(self, text, language):
"""Returns a translated text string for a specific language string.
Args:
text: The text string you'd like to get the replacement for.
language: The language you'd like to lookup for the replacement.
If the specific text string and language combination doesn't exist in
the translation file, the original string is returned.
The string lookup is case-sensitive.
This method is similar to text(), except this method doesn't strip out
the parentheses. (i.e. it's just used to look up what's "inside" the
parentheses.)
"""
if text in self.text_dict:
return self.text_dict[text]
else:
return text
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
jabdoa2/mpf
|
mpf/media_controller/core/language.py
|
Python
|
mit
| 6,008
|
[
"Brian"
] |
5cb34fe9a20828434b89af9ea1408fdb46d48a3ed99dac71de7e7366305ddac2
|
from __future__ import print_function, division
import time
import warnings
from mdtraj.utils.delay_import import import_
from mdtraj.utils.validation import ensure_type, cast_indices, check_random_state
from mdtraj.utils.unit import in_units_of
from mdtraj.utils.rotation import rotation_matrix_from_quaternion, uniform_quaternion
from mdtraj.utils.unitcell import (lengths_and_angles_to_box_vectors,
box_vectors_to_lengths_and_angles)
from mdtraj.utils.contextmanagers import timing, enter_temp_directory
__all__ = ["ensure_type", "import_", "in_units_of",
"lengths_and_angles_to_box_vectors",
"box_vectors_to_lengths_and_angles",
"ilen", "timing", "cast_indices", "check_random_state",
"rotation_matrix_from_quaternion", "uniform_quaternion",
"enter_temp_directory", "timing", "deprecated"]
# Make sure that DeprecationWarning get printed
warnings.simplefilter("always", DeprecationWarning)
def ilen(iterable):
"""Length of an iterator. Note, this consumes the iterator
Parameters
----------
iterable : iterable
An iterable, such as a generator, list, etc.
Returns
-------
length : int
The number of elements in the iterable
"""
return sum(1 for _ in iterable)
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Copied from scikit-learn: sklearn/utils/__init__.py
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
|
kyleabeauchamp/mdtraj
|
mdtraj/utils/__init__.py
|
Python
|
lgpl-2.1
| 3,720
|
[
"MDTraj"
] |
c349c6eef34cb71e7f8e793f72f1986c967aee9f22a14a193068ee4aae96334a
|
from __future__ import unicode_literals
from django.conf import settings
import six.moves.urllib.parse as urlparse
from horizon.utils.memoized import memoized # noqa
import requests
import json
@memoized
def get_token(request):
return request.user.token.id
# -----------------------------------------------------------------------------
#
# Crystal Projects
#
def list_projects_crystal_enabled(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/"
headers["X-Auth-Token"] = str(token)
r = requests.get(url, headers=headers)
return r
def is_crystal_project(request, project_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/" + str(project_id)
headers["X-Auth-Token"] = str(token)
r = requests.post(url, headers=headers)
return r
def enable_crystal(request, project_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/" + str(project_id)
headers["X-Auth-Token"] = str(token)
r = requests.put(url, headers=headers)
return r
def disable_crystal(request, project_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/" + str(project_id)
headers["X-Auth-Token"] = str(token)
r = requests.delete(url, headers=headers)
return r
#
# Project Groups
#
def create_projects_group(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/groups/"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
data['attached_projects'] = json.dumps(data['attached_projects'])
r = requests.post(url, json.dumps(data), headers=headers)
return r
def update_projects_group(request, data, group_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/groups/" + str(group_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
data['attached_projects'] = json.dumps(data['attached_projects'])
r = requests.put(url, json.dumps(data), headers=headers)
return r
def get_all_project_groups(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/groups/"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def get_project_group(request, group_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/groups/" + str(group_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def delete_project_group(request, group_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/groups/" + str(group_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
#
# Project users
#
def get_project_users(request, project_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/" + str(project_id) + "/users"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
#
# Project groups
#
def get_project_groups(request, project_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/projects/" + str(project_id) + "/groups"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
|
Crystal-SDS/dashboard
|
crystal_dashboard/api/projects.py
|
Python
|
gpl-3.0
| 3,910
|
[
"CRYSTAL"
] |
84f76d13b586580df2026ecfea7200c625a49e707892ca01f43e8a96c34e26f2
|
#!/usr/bin/env python
r"""
Constructing workflows (:mod:`skbio.workflow`)
==============================================
.. currentmodule:: skbio.workflow
Construct arbitrarily complex workflows in which the specific methods run are
determined at runtime. This module supports short circuiting a workflow if an
item fails, supports ordering methods, callbacks for processed items, and
deciding what methods are executed based on state or runtime options.
Classes
-------
.. autosummary::
:toctree: generated/
Workflow
Decorators
----------
.. autosummary::
:toctree: generated/
requires
method
Examples
--------
>>> from skbio.workflow import Workflow
As an example of the ``Workflow`` object, let's construct a sequence processor
that will filter sequences that are < 10 nucleotides, reverse the sequence
if the runtime options indicate to, and truncate if a specific nucleotide
pattern is observed. The ``Workflow`` object will only short circuit, and
evaluate requirements on methods decorated by ``method``. Developers are free
to define as many methods as they'd like within the object definition, and
which can be called from workflow methods, but they will not be subjected
directly to workflow checks.
>>> nuc_pattern = 'AATTG'
>>> has_nuc_pattern = lambda s: s[:len(nuc_pattern)] == nuc_pattern
>>> class SequenceProcessor(Workflow):
... def initialize_state(self, item):
... # Setup the state for a new item (e.g., a new sequence)
... self.state = item
... @method(priority=100)
... def check_length(self):
... # Always make sure the sequence is at least 10 nucleotides
... if len(self.state) < 10:
... self.failed = True
... @method(priority=90)
... @requires(state=has_nuc_pattern)
... def truncate(self):
... # Truncate if a specific starting nucleotide pattern is observed
... self.state = self.state[len(nuc_pattern):]
... @method(priority=80)
... @requires(option='reverse', values=True)
... def reverse(self):
... # Reverse the sequence if indicatd at runtime
... self.state = self.state[::-1]
An instance of a ``Workflow`` must be passed a ``state`` object and any runtime
options. There are a few other useful parameters that can be specfied but are
out of scope for the purposes of this example. We also do not need to provide
a state object as our ``initialize_state`` method overrides ``self.state``.
Now, let's create the instance.
>>> wf = SequenceProcessor(state=None, options={'reverse=': False})
To run items through the ``SequenceProcessor``, we need to pass in an
iterable. So, lets create a ``list`` of sequences.
>>> seqs = ['AAAAAAATTTTTTT', 'ATAGACC', 'AATTGCCGGAC', 'ATATGAACAAA']
Before we run these sequences through, we're going to also define callbacks
that are applied to the result of an single pass through the ``Workflow``.
Callbacks are optional -- by default, a success will simply yield the state
member variable while failures are ignored -- but, depending on your workflow,
it can be useful to handle failures or potentially do something fun and
exciting on success.
>>> def success_f(obj):
... return "SUCCESS: %s" % obj.state
>>> def fail_f(obj):
... return "FAIL: %s" % obj.state
Now, lets process some data!
>>> for result in wf(seqs, success_callback=success_f, fail_callback=fail_f):
... print result
SUCCESS: AAAAAAATTTTTTT
FAIL: ATAGACC
SUCCESS: CCGGAC
SUCCESS: ATATGAACAAA
A few things of note just happened. First off, none of the sequences were
reversed as the ``SequenceProcessor`` did not have option "reverse"
set to ``True``. Second, you'll notice that the 3rd sequence was truncated,
which is expected as it matched our nucleotide pattern of interest. Finally,
of the sequences we processed, only a single sequence failed.
To assist in constructing workflows, debug information is available but it
must be turned on at instantiation. Let's do that, and while we're at it, let's
go ahead and enable the reversal method. This time through though, were going
to walk through an item at a time so we can examine the debug information.
>>> wf = SequenceProcessor(state=None, options={'reverse':True}, debug=True)
>>> gen = wf(seqs, fail_callback=lambda x: x.state)
>>> gen.next()
'TTTTTTTAAAAAAA'
>>> print wf.failed
False
>>> print wf.debug_trace
set([('check_length', 0), ('reverse', 2)])
The ``debug_trace`` specifies the methods executed, and the order of their
execution where closer to zero indicates earlier in the execution order. Gaps
indicate there was a method evaluated but not executed. Each of the items in
the ``debug_trace`` is a key into a few other ``dict`` of debug information
which we'll discuss in a moment. Did you see that the sequence was reversed
this time through the workflow?
Now, let's take a look at the next item, which on our prior run through the
workflow was a failed item.
>>> gen.next()
'ATAGACC'
>>> print wf.failed
True
>>> print wf.debug_trace
set([('check_length', 0)])
What we can see is that the failed sequence only executed the check_length
method. Since the sequence didn't pass our length filter of 10 nucleotides,
it was marked as failed within the ``check_length`` method. As a result, none
of the other methods were evaluated (note: this short circuiting behavior can
be disabled if desired).
This third item previously matched our nucleotide pattern of interest for
truncation. Let's see what that looks like in the debug output.
>>> gen.next() #
'CAGGCC'
>>> print wf.failed
False
>>> wf.debug_trace
set([('check_length', 0), ('truncate', 1), ('reverse', 2)])
In this last example, we can see that the ``truncate`` method was executed
prior to the ``reverse`` method and following the ``check_length`` method. This
is as anticipated given the priorities we specified for these methods. Since
the ``truncate`` method is doing something interesting, let's take a closer
look at how the ``state`` is changing. First, we're going to dump out the
state of the workflow prior to the call to ``truncate`` and then we're going
to dump out the ``state`` following the call to ``truncate``, which will allow
us to rapidly what is going on.
>>> wf.debug_pre_state[('truncate', 1)]
'AATTGCCGGAC'
>>> wf.debug_post_state[('truncate', 1)]
'CCGGAC'
As we expect, we have our original sequence going into ``truncate``, and
following the application of ``truncate``, our sequence is missing our
nucleotide pattern of interest. Awesome, right?
There is one final piece of debug output, ``wf.debug_runtime``, which can
be useful when diagnosing the amount of time required for individual methods
on a particular piece of state (as opposed to the aggregate as provided by
cProfile).
Three final components of the workflow that are quite handy are objects that
allow you to indicate ``anything`` as an option value, anything that is
``not_none``, and a mechanism to define a range of valid values.
>>> from skbio.workflow import not_none, anything
>>> class Ex(Workflow):
... @method()
... @requires(option='foo', values=not_none)
... def do_something(self):
... pass
... @method()
... @requires(option='bar', values=anything)
... def do_something_else(self):
... pass
... @method()
... @requires(option='foobar', values=[1,2,3])
... def do_something_awesome(self):
... pass
...
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from future.utils import viewitems
import sys
from copy import deepcopy
from time import time
from functools import update_wrapper
from collections import Iterable
from types import MethodType
class NotExecuted(object):
"""Helper object to track if a method was executed"""
def __init__(self):
self.msg = None
def __call__(self, msg):
self.msg = msg
return self
_not_executed = NotExecuted()
class Exists(object):
"""Stub object to assist with ``requires`` when a value exists"""
def __contains__(self, item):
return True
anything = Exists() # external, for when a value can be anything
class NotNone(object):
def __contains__(self, item):
if item is None:
return False
else:
return True
not_none = NotNone()
class Workflow(object):
"""Arbitrary workflow support structure
Methods that are considered to be directly part of the workflow must
be decorated with ``method``. The workflow methods offer a mechanism to
logically group functionality together, and are free to make subsequent
calls to other methods.
All methods of a subclass of Workflow (those with and without the
``method`` decoration) can take advantage of the ``requires`` decorator
to specify any option or state requirements for the decorated function.
Parameters
----------
state : object
State can be anything or nothing. This is dependent on the
workflow as in some cases, it is useful to preallocate state
while in other workflows state may be ignored.
short_circuit : bool
if True, enables ignoring function methods when a given item
has failed
debug : bool
Enable debug mode
options : dict
runtime options, {'option':values}, that the ``requires``
decorator can interrogate.
kwargs : dict
Additional arguments will be added as member variables to self.
This is handy if additional contextual information is needed by a
workflow method (e.g., a lookup table).
Attributes
----------
state
short_circuit
debug
options
failed
"""
def __init__(self, state, short_circuit=True, debug=False, options=None,
**kwargs):
r"""Build thy workflow of self"""
if options is None:
self.options = {}
else:
self.options = options
self.short_circuit = short_circuit
self.failed = False
self.debug = debug
self.state = state
self.iter_ = None
for k, v in viewitems(kwargs):
if hasattr(self, k):
raise AttributeError("'%s' already exists in self." % k)
setattr(self, k, v)
if self.debug:
self._setup_debug()
def initialize_state(self, item):
"""Initialize state
This method is called first prior to any other defined workflow method
with the exception of _setup_debug_trace if self.debug is True
Parameters
----------
item : anything
Workflow dependent
"""
raise NotImplementedError("Must implement this method")
def _setup_debug(self):
"""Wrap all methods with debug trace support"""
# ignore all members of the baseclass
ignore = set(dir(Workflow))
for attrname in dir(self):
if attrname in ignore:
continue
attr = getattr(self, attrname)
if isinstance(attr, MethodType):
setattr(self, attrname, self._debug_trace_wrapper(attr))
def _all_wf_methods(self):
"""Get all workflow methods
Methods are sorted by priority
"""
methods = []
for item in dir(self):
obj = getattr(self, item)
if hasattr(obj, 'priority'):
methods.append(obj)
key = lambda x: getattr(x, 'priority')
methods_sorted = sorted(methods, key=key, reverse=True)
if self.debug:
methods_sorted.insert(0, self._setup_debug_trace)
return methods_sorted
def _setup_debug_trace(self):
"""Setup a trace
The trace is per item iterated over by the workflow. Information about
each method executed is tracked and keyed by::
(function name, order of execution)
Order of execution starts from zero. Multiple calls to the same
function are independent in the trace.
The following information is tracked::
debug_trace : set([key])
debug_runtime : {key: runtime}
debug_pre_state : {key: deepcopy(Workflow.state)}, state prior to
method execution
debug_post_state : {key: deepcopy(Workflow.state)}, state following
method execution
"""
self.debug_counter = 0
self.debug_trace = set()
self.debug_runtime = {}
self.debug_pre_state = {}
self.debug_post_state = {}
def __call__(self, iter_, success_callback=None, fail_callback=None):
"""Operate on all the data
This is the processing engine of the workflow. Callbacks are executed
following applying all workflow methods to an item from ``iter_``
(unless ``short_cicruit=True`` in which case method execution for an
item is stopped if ``failed=True``). Callbacks are provided ``self``
which allows them to examine any aspect of the workflow.
Parameters
----------
it : an iterator
success_callback : method to call on a successful item prior to
yielding. By default, ``self.state`` is yielded.
fail_callback : method to call on a failed item prior to yielding. By
default, failures are ignored.
.. shownumpydoc
"""
if success_callback is None:
success_callback = lambda x: x.state
self.iter_ = iter_
workflow = self._all_wf_methods()
for item in self.iter_:
self.failed = False
self.initialize_state(item)
for func in workflow:
if self.short_circuit and self.failed:
break
else:
func()
if self.failed:
if fail_callback is not None:
yield fail_callback(self)
else:
yield success_callback(self)
self.iter_ = None
def _debug_trace_wrapper(self, func):
"""Trace a function call"""
def wrapped():
"""Track debug information about a method execution"""
if not hasattr(self, 'debug_trace'):
raise AttributeError(
"%s doesn't have debug_trace!" % self.__class__)
exec_order = self.debug_counter
name = func.__name__
key = (name, exec_order)
pre_state = deepcopy(self.state)
self.debug_trace.add(key)
self.debug_counter += 1
start_time = time()
if func() is _not_executed:
self.debug_trace.remove(key)
else:
self.debug_runtime[key] = time() - start_time
self.debug_pre_state[key] = pre_state
self.debug_post_state[key] = deepcopy(self.state)
return update_wrapper(wrapped, func)
class method(object):
"""Decorate a function to indicate it is a workflow method
Parameters
----------
priority : int
Specify a priority for the method, the higher the value the higher
the priority. Priorities are relative to a given workflow
"""
highest_priority = sys.maxsize
def __init__(self, priority=0):
self.priority = priority
def __call__(self, func):
func.priority = self.priority
return func
class requires(object):
"""Decorator that executes a function if requirements are met
Parameters
----------
option : any Hashable object
An option that is required for the decorated method to execute.
This option will be looked up within the containing ``Workflow``s'
``options``.
values : object
A required value. This defaults to ``anything`` indicating that
the only requirement is that the ``option`` exists. It can be
useful to specify ``not_none`` which indicates that the
requirement is satisfied if the ``option`` exists and it holds
a value that is not ``None``. Values also supports iterables
or singular values.
state : Function
A requirement on workflow state. This must be a function that
accepts a single argument, and returns ``True`` to indicate
the requirement is satisfied, or ``False`` to indicate the
requirement is not satisfied. This method will be passed the
containing ``Workflow``s' ``state`` member variable.
"""
def __init__(self, option=None, values=anything, state=None):
# self here is the requires object
self.option = option
self.required_state = state
if values is anything:
self.values = anything
elif values is not_none:
self.values = not_none
elif isinstance(values, set):
self.values = values
else:
if isinstance(values, str):
self.values = values
elif isinstance(values, Iterable):
self.values = set(values)
else:
self.values = set([values])
def __call__(self, func):
"""Wrap a function
func : the function to wrap
"""
def decorated(dec_self):
"""A decorated function that has requirements
dec_self : this is "self" for the decorated function
"""
if self.required_state is not None:
if not self.required_state(dec_self.state):
return _not_executed
s_opt = self.option
ds_opts = dec_self.options
# if this is a function that doesn't have an option to validate
if s_opt is None:
func(dec_self)
# if the option exists in the Workflow
elif s_opt in ds_opts:
val = ds_opts[s_opt]
# if the value just needs to be not None
if self.values is not_none and val is not None:
func(dec_self)
# otherwise make sure the value is acceptable
elif val in self.values:
func(dec_self)
else:
return _not_executed
else:
return _not_executed
return update_wrapper(decorated, func)
|
Kleptobismol/scikit-bio
|
skbio/workflow.py
|
Python
|
bsd-3-clause
| 18,613
|
[
"exciting",
"scikit-bio"
] |
154978bc5cdfcef5b08f6c9d4bed5d14325f942ee6b42104bdbda281451443e8
|
from dnfpy.core.map2D import Map2D
import dnfpy.core.utils as utils
import numpy as np
class OnOffFilter(Map2D):
"""
One on cell in the center and two off cells
Parameters :
on cell stdXY, intXY
off cells stdXY, intXY
shift distance ( distance between center on gaussian and center off gaussian)
TODO finish this class
"""
def _compute(self,size,onIntXY,onStdXY,offIntXY,offStdXY,shift):
onCell = utils.getAssymetricGaussian2D(size,onIntXY,onStdXY)
offCell1 = utils.getAssymetricGaussian2D(size,offIntXY,offStdXY)
offCell2 = np.array(offCell1)
#for now it is a vertical edge detector TODO generalize
offCell1 = np.roll(offCell1,-int(shift))
offCell2 = np.roll(offCell2,int(shift))
self._data = onCell - (offCell1 + offCell2)
def _onParamsUpdate(self,size,onStdXY,offStdXY,shift):
onStdXY *= size
offStdXY *= size
shift *= size
ret = dict(onStdXY=onStdXY,offStdXY=offStdXY,shift=shift)
return ret
|
bchappet/dnfpy
|
src/dnfpy/model/onOffFilter.py
|
Python
|
gpl-2.0
| 1,085
|
[
"Gaussian"
] |
5c84880744c06396a3bb9200ca094cc7cda92815b4719602c12ba3da352f55aa
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
import numpy as np
from monty.json import MSONable
from scipy.spatial import HalfspaceIntersection
from scipy.optimize import bisect
from itertools import chain
from pymatgen.electronic_structure.dos import FermiDos
from pymatgen.analysis.defects.core import DefectEntry
from pymatgen.analysis.structure_matcher import PointDefectComparator
import matplotlib.pyplot as plt
import matplotlib.cm as cm
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class DefectPhaseDiagram(MSONable):
"""
This is similar to a PhaseDiagram object in pymatgen,
but has ability to do quick analysis of defect formation energies
when fed DefectEntry objects.
uses many of the capabilities from PyCDT's DefectsAnalyzer class...
This class is able to get:
a) stability of charge states for a given defect,
b) list of all formation ens
c) transition levels in the gap
Args:
dentries ([DefectEntry]): A list of DefectEntry objects
vbm (float): Valence Band energy to use for all defect entries.
NOTE if using band shifting-type correction then this VBM
should still be that of the GGA calculation
(the bandedgeshifting_correction accounts for shift's
contribution to formation energy).
band_gap (float): Band gap to use for all defect entries.
NOTE if using band shifting-type correction then this gap
should still be that of the Hybrid calculation you are shifting to.
filter_compatible (bool): Whether to consider entries which were ruled
incompatible by the DefectComaptibility class. Note this must be set to False
if you desire a suggestion for larger supercell sizes.
Default is True (to omit calculations which have "is_compatible"=False in
DefectEntry'sparameters)
metadata (dict): Dictionary of metadata to store with the PhaseDiagram. Has
no impact on calculations.
"""
def __init__(self, entries, vbm, band_gap, filter_compatible=True, metadata={}):
self.vbm = vbm
self.band_gap = band_gap
self.filter_compatible = filter_compatible
if filter_compatible:
self.entries = [e for e in entries if e.parameters.get("is_compatible", True)]
else:
self.entries = entries
for ent_ind, ent in enumerate(self.entries):
if 'vbm' not in ent.parameters.keys() or ent.parameters['vbm'] != vbm:
logger.info("Entry {} did not have vbm equal to given DefectPhaseDiagram value."
" Manually overriding.".format( ent.name))
new_ent = ent.copy()
new_ent.parameters['vbm'] = vbm
self.entries[ent_ind] = new_ent
self.metadata = metadata
self.find_stable_charges()
def as_dict(self):
"""
Json-serializable dict representation of DefectPhaseDiagram
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self.entries],
"vbm": self.vbm,
"band_gap": self.band_gap,
"filter_compatible": self.filter_compatible,
"metadata": self.metadata}
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a DefectPhaseDiagram object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of DefectPhaseDiagram.
Returns:
DefectPhaseDiagram object
"""
entries = [DefectEntry.from_dict(entry_dict) for entry_dict in d.get("entries")]
vbm = d["vbm"]
band_gap = d["band_gap"]
filter_compatible = d.get("filter_compatible", True)
metadata = d.get("metadata", {})
if 'entry_id' in d.keys() and 'entry_id' not in metadata:
metadata['entry_id'] = d['entry_id']
return cls(entries, vbm, band_gap, filter_compatible=filter_compatible,
metadata=metadata)
def find_stable_charges(self):
"""
Sets the stable charges and transition states for a series of
defect entries. This function uses scipy's HalfspaceInterection
to oncstruct the polygons corresponding to defect stability as
a function of the Fermi-level. The Halfspace Intersection
constructs N-dimensional hyperplanes, in this case N=2, based
on the equation of defect formation energy with considering chemical
potentials:
E_form = E_0^{Corrected} + Q_{defect}*(E_{VBM} + E_{Fermi})
Extra hyperplanes are constructed to bound this space so that
the algorithm can actually find enclosed region.
This code was modeled after the Halfspace Intersection code for
the Pourbaix Diagram
"""
def similar_defects( entryset):
"""
Used for grouping similar defects of different charges
Can distinguish identical defects even if they are not in same position
"""
pdc = PointDefectComparator( check_charge=False, check_primitive_cell=True,
check_lattice_scale=False)
grp_def_sets = []
grp_def_indices = []
for ent_ind, ent in enumerate( entryset):
# TODO: more pythonic way of grouping entry sets with PointDefectComparator.
# this is currently most time intensive part of DefectPhaseDiagram
matched_ind = None
for grp_ind, defgrp in enumerate(grp_def_sets):
if pdc.are_equal( ent.defect, defgrp[0].defect):
matched_ind = grp_ind
break
if matched_ind is not None:
grp_def_sets[matched_ind].append( ent.copy())
grp_def_indices[matched_ind].append( ent_ind)
else:
grp_def_sets.append( [ent.copy()])
grp_def_indices.append( [ent_ind])
return zip(grp_def_sets, grp_def_indices)
# Limits for search
# E_fermi = { -1 eV to band gap+1}
# E_formation = { (min(Eform) - 30) to (max(Eform) + 30)}
all_eform = [one_def.formation_energy(fermi_level=self.band_gap/2.) for one_def in self.entries]
min_y_lim = min(all_eform) - 30
max_y_lim = max(all_eform) + 30
limits = [[-1, self.band_gap + 1], [min_y_lim, max_y_lim]]
stable_entries = {}
finished_charges = {}
transition_level_map = {}
# Grouping by defect types
for defects, index_list in similar_defects( self.entries):
defects = list(defects)
# prepping coefficient matrix for half-space intersection
# [-Q, 1, -1*(E_form+Q*VBM)] -> -Q*E_fermi+E+-1*(E_form+Q*VBM) <= 0 where E_fermi and E are the variables in the hyperplanes
hyperplanes = np.array(
[[-1.0 * entry.charge, 1, -1.0 * (entry.energy + entry.charge * self.vbm)] for entry in defects])
border_hyperplanes = [[-1, 0, limits[0][0]], [1, 0, -1 * limits[0][1]], [0, -1, limits[1][0]],
[0, 1, -1 * limits[1][1]]]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = [self.band_gap / 2, min(all_eform) - 1.]
hs_ints = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# Group the intersections and coresponding facets
ints_and_facets = zip(hs_ints.intersections, hs_ints.dual_facets)
# Only inlcude the facets corresponding to entries, not the boundaries
total_entries = len(defects)
ints_and_facets = filter(lambda int_and_facet: all(np.array(int_and_facet[1]) < total_entries),
ints_and_facets)
# sort based on transition level
ints_and_facets = list(sorted(ints_and_facets, key=lambda int_and_facet: int_and_facet[0][0]))
# log a defect name for tracking (using full index list to avoid naming
# in-equivalent defects with same name)
str_index_list = [str(ind) for ind in sorted(index_list)]
track_name = defects[0].name + "@" + str("-".join(str_index_list))
if len(ints_and_facets):
# Unpack into lists
_, facets = zip(*ints_and_facets)
# Map of transition level: charge states
transition_level_map[track_name] = {
intersection[0]: [defects[i].charge for i in facet]
for intersection, facet in ints_and_facets
}
stable_entries[track_name] = list(set([defects[i] for dual in facets for i in dual]))
finished_charges[track_name] = [defect.charge for defect in defects]
else:
# if ints_and_facets is empty, then there is likely only one defect...
if len(defects) != 1:
#confirm formation energies dominant for one defect over other identical defects
name_set = [one_def.name+'_chg'+str(one_def.charge) for one_def in defects]
vb_list = [one_def.formation_energy( fermi_level=limits[0][0]) for one_def in defects]
cb_list = [one_def.formation_energy( fermi_level=limits[0][1]) for one_def in defects]
vbm_def_index = vb_list.index( min(vb_list))
name_stable_below_vbm = name_set[vbm_def_index]
cbm_def_index = cb_list.index( min(cb_list))
name_stable_above_cbm = name_set[cbm_def_index]
if name_stable_below_vbm != name_stable_above_cbm:
raise ValueError("HalfSpace identified only one stable charge out of list: {}\n"
"But {} is stable below vbm and {} is "
"stable above cbm.\nList of VBM formation energies: {}\n"
"List of CBM formation energies: {}"
"".format(name_set, name_stable_below_vbm, name_stable_above_cbm,
vb_list, cb_list))
else:
logger.info("{} is only stable defect out of {}".format( name_stable_below_vbm, name_set))
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[vbm_def_index]])
finished_charges[track_name] = [one_def.charge for one_def in defects]
else:
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[0]])
finished_charges[track_name] = [defects[0].charge]
self.transition_level_map = transition_level_map
self.transition_levels = {
defect_name: list(defect_tls.keys())
for defect_name, defect_tls in transition_level_map.items()
}
self.stable_entries = stable_entries
self.finished_charges = finished_charges
self.stable_charges = {
defect_name: [entry.charge for entry in entries]
for defect_name, entries in stable_entries.items()
}
@property
def defect_types(self):
"""
List types of defects existing in the DefectPhaseDiagram
"""
return list(self.finished_charges.keys())
@property
def all_stable_entries(self):
"""
List all stable entries (defect+charge) in the DefectPhaseDiagram
"""
return set(chain.from_iterable(self.stable_entries.values()))
@property
def all_unstable_entries(self):
"""
List all unstable entries (defect+charge) in the DefectPhaseDiagram
"""
all_stable_entries = self.all_stable_entries
return [e for e in self.entries if e not in all_stable_entries]
def defect_concentrations(self, chemical_potentials, temperature=300, fermi_level=0.):
"""
Give list of all concentrations at specified efermi in the DefectPhaseDiagram
args:
chemical_potentials = {Element: number} is dictionary of chemical potentials to provide formation energies for
temperature = temperature to produce concentrations from
fermi_level: (float) is fermi level relative to valence band maximum
Default efermi = 0 = VBM energy
returns:
list of dictionaries of defect concentrations
"""
concentrations = []
for dfct in self.all_stable_entries:
concentrations.append({
'conc':
dfct.defect_concentration(
chemical_potentials=chemical_potentials, temperature=temperature, fermi_level=fermi_level),
'name':
dfct.name,
'charge':
dfct.charge
})
return concentrations
def suggest_charges(self, tolerance=0.1):
"""
Suggest possible charges for defects to compute based on proximity
of known transitions from entires to VBM and CBM
Args:
tolerance (float): tolerance with respect to the VBM and CBM to
` continue to compute new charges
"""
recommendations = {}
for def_type in self.defect_types:
test_charges = np.arange(
np.min(self.stable_charges[def_type]) - 1,
np.max(self.stable_charges[def_type]) + 2)
test_charges = [charge for charge in test_charges if charge not in self.finished_charges[def_type]]
if len(self.transition_level_map[def_type].keys()):
# More positive charges will shift the minimum transition level down
# Max charge is limited by this if its transition level is close to VBM
min_tl = min(self.transition_level_map[def_type].keys())
if min_tl < tolerance:
max_charge = max(self.transition_level_map[def_type][min_tl])
test_charges = [charge for charge in test_charges if charge < max_charge]
# More negative charges will shift the maximum transition level up
# Minimum charge is limited by this if transition level is near CBM
max_tl = max(self.transition_level_map[def_type].keys())
if max_tl > (self.band_gap - tolerance):
min_charge = min(self.transition_level_map[def_type][max_tl])
test_charges = [charge for charge in test_charges if charge > min_charge]
else:
test_charges = [charge for charge in test_charges if charge not in self.stable_charges[def_type]]
recommendations[def_type] = test_charges
return recommendations
def suggest_larger_supercells(self, tolerance=0.1):
"""
Suggest larger supercells for different defect+chg combinations based on use of
compatibility analysis. Does this for any charged defects which have is_compatible = False,
and the defect+chg formation energy is stable at fermi levels within the band gap.
NOTE: Requires self.filter_compatible = False
Args:
tolerance (float): tolerance with respect to the VBM and CBM for considering
larger supercells for a given charge
"""
if self.filter_compatible:
raise ValueError("Cannot suggest larger supercells if filter_compatible is True.")
recommendations = {}
for def_type in self.defect_types:
template_entry = self.stable_entries[def_type][0].copy()
defect_indices = [int(def_ind) for def_ind in def_type.split('@')[-1].split('-')]
for charge in self.finished_charges[def_type]:
chg_defect = template_entry.defect.copy()
chg_defect.set_charge ( charge)
for entry_index in defect_indices:
entry = self.entries[entry_index]
if entry.charge == charge:
break
if entry.parameters.get("is_compatible", True):
continue
else:
# consider if transition level is within
# tolerance of band edges
suggest_bigger_supercell = True
for tl, chgset in self.transition_level_map.items():
sorted_chgset = list(chgset)
sorted_chgset.sort(reverse=True)
if charge == sorted_chgset[0] and tl < tolerance:
suggest_bigger_supercell = False
elif charge == sorted_chgset[1] and tl > (self.band_gap - tolerance):
suggest_bigger_supercell = False
if suggest_bigger_supercell:
if def_type not in recommendations:
recommendations[def_type] = []
recommendations[def_type].append( charge)
return recommendations
def solve_for_fermi_energy(self, temperature, chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy self-consistently as a function of T
Observations are Defect concentrations, electron and hole conc
Args:
temperature: Temperature to equilibrate fermi energies for
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality
"""
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_,fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = sum([
d['charge'] * d['conc']
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials, temperature=temperature, fermi_level=ef)
])
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1., self.band_gap + 1.)
def solve_for_non_equilibrium_fermi_energy(self, temperature, quench_temperature,
chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy after quenching in the defect concentrations at a higher
temperature (the quench temperature),
as outlined in P. Canepa et al (2017) Chemistry of Materials (doi: 10.1021/acs.chemmater.7b02909)
Args:
temperature: Temperature to equilibrate fermi energy at after quenching in defects
quench_temperature: Temperature to equilibrate defect concentrations at (higher temperature)
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality with respect to frozen in defect concentrations
"""
high_temp_fermi_level = self.solve_for_fermi_energy( quench_temperature, chemical_potentials,
bulk_dos)
fixed_defect_charge = sum([
d['charge'] * d['conc']
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials, temperature=quench_temperature,
fermi_level=high_temp_fermi_level)
])
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_,fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = fixed_defect_charge
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1., self.band_gap + 1.)
return
def get_dopability_limits(self, chemical_potentials):
"""
Find Dopability limits for a given chemical potential.
This is defined by the defect formation energies which first cross zero
in formation energies.
This determine bounds on the fermi level.
Does this by computing formation energy for every stable defect with non-zero charge.
If the formation energy value changes sign on either side of the band gap, then
compute the fermi level value where the formation energy is zero
(formation energies are lines and basic algebra shows: x_crossing = x1 - (y1 / q)
for fermi level, x1, producing formation energy y1)
Args:
chemical_potentials: dict of chemical potentials to use for calculation fermi level
Returns:
lower dopability limit, upper dopability limit
(returns None if no limit exists for upper or lower i.e. no negative defect
crossing before +/- 20 of band edges OR defect formation energies are entirely zero)
"""
min_fl_range = -20.
max_fl_range = self.band_gap + 20.
lower_lim = None
upper_lim = None
for def_entry in self.all_stable_entries:
min_fl_formen = def_entry.formation_energy(chemical_potentials = chemical_potentials,
fermi_level=min_fl_range)
max_fl_formen = def_entry.formation_energy(chemical_potentials = chemical_potentials,
fermi_level=max_fl_range)
if min_fl_formen < 0. and max_fl_formen < 0.:
logger.error("Formation energy is negative through entire gap for entry {} q={}." \
" Cannot return dopability limits.".format( def_entry.name, def_entry.charge))
return None, None
elif np.sign( min_fl_formen) != np.sign( max_fl_formen):
x_crossing = min_fl_range - (min_fl_formen / def_entry.charge)
if min_fl_formen < 0.:
if lower_lim is None or lower_lim < x_crossing:
lower_lim = x_crossing
else:
if upper_lim is None or upper_lim > x_crossing:
upper_lim = x_crossing
return lower_lim, upper_lim
def plot(self, mu_elts=None, xlim=None, ylim=None, ax_fontsize=1.3, lg_fontsize=1.,
lg_position=None, fermi_level = None, title=None, saved=False):
"""
Produce defect Formation energy vs Fermi energy plot
Args:
mu_elts:
a dictionnary of {Element:value} giving the chemical
potential of each element
xlim:
Tuple (min,max) giving the range of the x (fermi energy) axis
ylim:
Tuple (min,max) giving the range for the formation energy axis
ax_fontsize:
float multiplier to change axis label fontsize
lg_fontsize:
float multiplier to change legend label fontsize
lg_position:
Tuple (horizontal-position, vertical-position) giving the position
to place the legend.
Example: (0.5,-0.75) will likely put it below the x-axis.
saved:
Returns:
a matplotlib object
"""
if xlim is None:
xlim = (-0.5, self.band_gap+0.5)
xy = {}
lower_cap = -100.
upper_cap = 100.
y_range_vals = [] # for finding max/min values on y-axis based on x-limits
for defnom, def_tl in self.transition_level_map.items():
xy[defnom] = [[],[]]
if def_tl:
org_x = list(def_tl.keys()) # list of transition levels
org_x.sort() # sorted with lowest first
#establish lower x-bound
first_charge = max(def_tl[org_x[0]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == first_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=lower_cap)
fe_left = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=xlim[0])
xy[defnom][0].append(lower_cap)
xy[defnom][1].append(form_en)
y_range_vals.append( fe_left)
#iterate over stable charge state transitions
for fl in org_x:
charge = max(def_tl[fl])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=fl)
xy[defnom][0].append(fl)
xy[defnom][1].append(form_en)
y_range_vals.append( form_en)
#establish upper x-bound
last_charge = min(def_tl[org_x[-1]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == last_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=upper_cap)
fe_right = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=xlim[1])
xy[defnom][0].append(upper_cap)
xy[defnom][1].append(form_en)
y_range_vals.append( fe_right)
else:
#no transition - just one stable charge
chg_ent = self.stable_entries[defnom][0]
for x_extrem in [lower_cap, upper_cap]:
xy[defnom][0].append( x_extrem)
xy[defnom][1].append( chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=x_extrem)
)
for x_window in xlim:
y_range_vals.append( chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=x_window)
)
if ylim is None:
window = max(y_range_vals) - min(y_range_vals)
spacer = 0.1 * window
ylim = (min(y_range_vals) - spacer, max(y_range_vals) + spacer)
if len(xy) <= 8:
colors=cm.Dark2(np.linspace(0, 1, len(xy)))
else:
colors=cm.gist_rainbow(np.linspace(0, 1, len(xy)))
plt.figure()
plt.clf()
width, height = 12, 8
#plot formation energy lines
for_legend = []
for cnt, defnom in enumerate(xy.keys()):
plt.plot(xy[defnom][0], xy[defnom][1], linewidth=3, color=colors[cnt])
for_legend.append( self.stable_entries[defnom][0].copy())
#plot transtition levels
for cnt, defnom in enumerate(xy.keys()):
x_trans, y_trans = [], []
for x_val, chargeset in self.transition_level_map[defnom].items():
x_trans.append( x_val)
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == chargeset[0]:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts,
fermi_level=x_val)
y_trans.append( form_en)
if len(x_trans):
plt.plot(x_trans, y_trans, marker='*', color=colors[cnt], markersize=12, fillstyle='full')
#get latex-like legend titles
legends_txt = []
for dfct in for_legend:
flds = dfct.name.split('_')
if 'Vac' == flds[0]:
base = '$Vac'
sub_str = '_{'+flds[1]+'}$'
elif 'Sub' == flds[0]:
flds = dfct.name.split('_')
base = '$'+flds[1]
sub_str = '_{'+flds[3]+'}$'
elif 'Int' == flds[0]:
base = '$'+flds[1]
sub_str = '_{inter}$'
else:
base = dfct.name
sub_str = ''
legends_txt.append( base + sub_str)
if not lg_position:
plt.legend(legends_txt, fontsize=lg_fontsize*width, loc=0)
else:
plt.legend(legends_txt, fontsize=lg_fontsize*width, ncol=3,
loc='lower center', bbox_to_anchor=lg_position)
plt.ylim(ylim)
plt.xlim(xlim)
plt.plot([xlim[0], xlim[1]], [0, 0], 'k-') # black dashed line for Eformation = 0
plt.axvline(x=0.0, linestyle='--', color='k', linewidth=3) # black dashed lines for gap edges
plt.axvline(x=self.band_gap, linestyle='--', color='k',
linewidth=3)
if fermi_level is not None:
plt.axvline(x=fermi_level, linestyle='-.', color='k', linewidth=2) # smaller dashed lines for gap edges
plt.xlabel("Fermi energy (eV)", size=ax_fontsize*width)
plt.ylabel("Defect Formation\nEnergy (eV)", size=ax_fontsize*width)
if title:
plt.title("{}".format(title), size=ax_fontsize*width)
if saved:
plt.savefig(str(title) + "FreyplnravgPlot.pdf")
else:
return plt
|
blondegeek/pymatgen
|
pymatgen/analysis/defects/thermodynamics.py
|
Python
|
mit
| 30,426
|
[
"pymatgen"
] |
eadb2cb2d00faa4aa79e8c06ac9a02f4c50f4898bd706f80d1687b38d0be0920
|
#!/usr/bin/python
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Module: gitim
=============
"""
from __future__ import print_function
import platform
import numpy as np
from scipy.spatial import distance
from . import utilities
from .sanity_check import SanityCheck
from .surface import SurfaceFlatInterface
from .surface import SurfaceGenericInterface
try:
from pytetgen import Delaunay
except ImportError:
from scipy.spatial import Delaunay
from .interface import Interface
from .patches import patchTrajectory, patchOpenMM, patchMDTRAJ
from circumradius import circumradius
class GITIM(Interface):
""" Identifies interfacial molecules at curved interfaces.
*(Sega, M.; Kantorovich, S.; Jedlovszky, P.; Jorge, M., \
J. Chem. Phys. 138, 044110, 2013)*
:param Object universe: The MDAnalysis_ Universe, MDTraj_ trajectory
or OpenMM_ Simulation objects.
:param Object group: An AtomGroup, or an array-like object with
the indices of the atoms in the group. Will
identify the interfacial molecules from
this group
:param float alpha: The probe sphere radius
:param str normal: 'x','y,'z' or 'guess'
(for planar interfaces only)
:param bool molecular: Switches between search of interfacial
molecules / atoms (default: True)
:param int max_layers: The number of layers to be identified
:param dict radii_dict: Dictionary with the atomic radii of the
elements in the group. If None is supplied,
the default one (from GROMOS 43a1) will be
used.
:param float cluster_cut: Cutoff used for neighbors or density-based
cluster search (default: None disables the
cluster analysis)
:param float cluster_threshold_density: Number density threshold for
the density-based cluster search. 'auto'
determines the threshold automatically.
Default: None uses simple neighbors cluster
search, if cluster_cut is not None
:param Object extra_cluster_groups: Additional groups, to allow for
mixed interfaces
:param bool biggest_cluster_only: Tag as surface atoms/molecules only
those in the largest cluster. Need to
specify also a :py:obj:`cluster_cut` value.
:param str symmetry: Gives the code a hint about the topology
of the interface: 'generic' (default)
or 'planar'
:param bool centered: Center the :py:obj:`group`
:param bool info: Print additional info
:param bool warnings: Print warnings
:param bool autoassign: If true (default) detect the interface
every time a new frame is selected.
Example:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(MICELLE_PDB)
>>> g = u.select_atoms('resname DPC')
>>>
>>> interface =pytim.GITIM(u,group=g,molecular=False, alpha=2.0)
>>> layer = interface.layers[0]
>>> interface.writepdb('gitim.pdb',centered=False)
>>> print (repr(layer))
<AtomGroup with 909 atoms>
Successive layers can be identified with :mod:`~pytim.gitim.GITIM`
as well. In this example we identify two solvation shells of glucose:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(GLUCOSE_PDB)
>>> g = u.select_atoms('name OW')
>>> # it is faster to consider only oxygens.
>>> # Hydrogen atoms are anyway within Oxygen's radius,
>>> # in SPC* models.
>>> interface =pytim.GITIM(u, group=g, alpha=2.0, max_layers=2)
>>>
>>> interface.writepdb('glucose_shells.pdb')
>>> print (repr(interface.layers[0]))
<AtomGroup with 54 atoms>
>>> print (repr(interface.layers[1]))
<AtomGroup with 117 atoms>
.. _MDAnalysis: http://www.mdanalysis.org/
.. _MDTraj: http://www.mdtraj.org/
.. _OpenMM: http://www.openmm.org/
"""
def __init__(self,
universe,
group=None,
alpha=2.0,
normal='guess',
molecular=True,
max_layers=1,
radii_dict=None,
cluster_cut=None,
cluster_threshold_density=None,
extra_cluster_groups=None,
biggest_cluster_only=False,
symmetry='generic',
centered=False,
info=False,
warnings=False,
autoassign=True,
_noextrapoints=False,
**kargs):
# this is just for debugging/testing
self._noextrapoints = _noextrapoints
self.autoassign = autoassign
self.system = platform.system()
self.do_center = centered
self.biggest_cluster_only = biggest_cluster_only
sanity = SanityCheck(self, warnings=warnings)
sanity.assign_universe(universe, group)
sanity.assign_alpha(alpha)
self.max_layers = max_layers
self._layers = np.empty([max_layers], dtype=type(self.universe.atoms))
self.info = info
self.normal = None
self.PDB = {}
self.molecular = molecular
sanity.assign_cluster_params(cluster_cut,
cluster_threshold_density, extra_cluster_groups)
sanity.check_multiple_layers_options()
sanity.assign_radii(radii_dict=radii_dict)
self._assign_symmetry(symmetry)
try:
self._buffer_factor = kargs['buffer_factor']
except:
self._buffer_factor = 3.5
if (self.symmetry == 'planar'):
sanity.assign_normal(normal)
self._surfaces = np.empty(
max_layers, dtype=type(SurfaceFlatInterface))
for nlayer in range(max_layers):
self._surfaces[nlayer] = SurfaceFlatInterface(
self, options={'layer': nlayer})
else: # generic
self._surfaces = np.empty(
max_layers, dtype=type(SurfaceGenericInterface))
for nlayer in range(max_layers):
self._surfaces[nlayer] = SurfaceGenericInterface(
self, options={'layer': nlayer})
patchTrajectory(self.universe.trajectory, self)
self._assign_layers()
def _sanity_checks(self):
""" Basic checks to be performed after the initialization.
"""
def alpha_shape(self, alpha, group, layer):
box = self.universe.dimensions[:3]
delta = np.array([self._buffer_factor * self.alpha] * 3)
delta = np.min([delta, box / 2.], axis=0)
points = group.positions[:]
nrealpoints = len(points)
if self._noextrapoints is False:
extrapoints, extraids = utilities.generate_periodic_border(
points, box, delta, method='3d')
# add points at the vertices of the expanded (by 2 alpha) box by
# generating general linear positions of the expanded box vertices
vertices = utilities.generate_cube_vertices(
box, delta, jitter=True)
n_cube = len(vertices)
extrapoints = np.vstack((extrapoints, vertices))
extraids = np.append(extraids, [-1] * n_cube)
else:
n_cube = 0
extrapoints = np.copy(points)
extraids = np.arange(len(points), dtype=int)
self.triangulation.append(Delaunay(extrapoints))
try:
triangulation = self.triangulation[layer]
except IndexError:
raise IndexError("alpha_shape called using a wrong layer")
triangulation.radii = np.append(group.radii[extraids[extraids >= 0]],
np.zeros(8))
simplices = triangulation.simplices
try:
_points = self.triangulation[layer].points
radii = self.triangulation[layer].radii
except IndexError:
raise IndexError("alpha_shape called using a wrong layer")
cr = circumradius(_points, radii, simplices)
# we filter first according to the touching sphere radius
a_shape = simplices[cr >= self.alpha]
# then we remove all simplices involving the 8 outer points, if any
cond = np.where(np.all(a_shape < len(_points) - n_cube, axis=1))[0]
a_shape = a_shape[np.unique(cond)]
# finally, we select only the ids of atoms in the basic cell.
return np.unique(a_shape[np.where(a_shape < nrealpoints)])
def _assign_layers_setup(self):
self.reset_labels()
self.prepare_box()
self._define_cluster_group()
self.centered_positions = None
if self.do_center:
self.center()
# first we label all atoms in group to be in the gas phase
self.label_group(self.analysis_group.atoms, beta=0.5)
# then all atoms in the larges group are labelled as liquid-like
self.label_group(self.cluster_group.atoms, beta=0.0)
alpha_group = self.cluster_group[:]
# TODO the successive layers analysis should be done by removing points
# from the triangulation and updating the circumradius of the neighbors
# of the removed points only.
dbs = utilities.do_cluster_analysis_dbscan
return alpha_group, dbs
def _assign_layers_postprocess(self, dbs, group, alpha_group, layer):
if len(group) > 0:
if self.molecular:
group = group.residues.atoms
self._layers[layer] = group
else:
self._layers[layer] = group.universe.atoms[:0]
alpha_group = alpha_group[:] - group[:]
self.label_group(
self._layers[layer], beta=1. * (layer + 1), layer=(layer + 1))
return alpha_group
def _assign_layers(self):
"""Determine the GITIM layers."""
alpha_group, dbs = self._assign_layers_setup()
self.triangulation = [] # storage for triangulations
for layer in range(0, self.max_layers):
alpha_ids = self.alpha_shape(self.alpha, alpha_group, layer)
group = alpha_group[alpha_ids]
alpha_group = self._assign_layers_postprocess(
dbs, group, alpha_group, layer)
# reset the interpolator
self._interpolator = None
@property
def layers(self):
"""Access the layers as numpy arrays of AtomGroups.
The object can be sliced as usual with numpy arrays.
Differently from :mod:`~pytim.itim.ITIM`, there are no sides. Example:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import MICELLE_PDB
>>>
>>> u = mda.Universe(MICELLE_PDB)
>>> micelle = u.select_atoms('resname DPC')
>>> inter = pytim.GITIM(u, group=micelle, max_layers=3,molecular=False)
>>> inter.layers #all layers
array([<AtomGroup with 909 atoms>, <AtomGroup with 301 atoms>,
<AtomGroup with 164 atoms>], dtype=object)
>>> inter.layers[0] # first layer (0)
<AtomGroup with 909 atoms>
"""
return self._layers
def _():
""" additional tests
>>> import numpy as np
>>> from circumradius import circumradius
>>> from scipy.spatial import Delaunay
>>> p = [0.,0,0,0,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,1,1,0,1]
>>> r = np.array(p).reshape(8,3)
>>> tri = Delaunay(r)
>>> radius = circumradius(r,np.ones(8)*0.5,tri.simplices)[0]
>>> print(np.isclose(radius, (np.sqrt(3)-1.)/2))
True
>>> import pytim
>>> import numpy as np
>>> from pytim.datafiles import _TEST_BCC_GRO
>>> import MDAnalysis as mda
>>> u= mda.Universe(_TEST_BCC_GRO)
>>> # we use a minimal system with one atom in the group
>>> # this will represent a cubic lattice, and test also PBCs
>>> u.atoms[0:1].positions = np.array([0., 0., 0.])
>>> u.dimensions = np.array([1., 1., 1., 90., 90., 90.])
>>> g = u.atoms[0:1]
>>> # the maximum value is (np.sqrt(3)-1.)/2) ~= 0.366025403
>>> inter = pytim.GITIM(u,group=g,radii_dict={'A':0.5},alpha=0.3660254)
>>> print(repr(inter.atoms))
<AtomGroup with 1 atom>
>>> # with alpha > (np.sqrt(3)-1.)/2) no atom is found as surface one
>>> inter = pytim.GITIM(u,group=g,radii_dict={'A':0.5},alpha=0.3660255)
>>> print(repr(inter.atoms))
<AtomGroup with 0 atoms>
"""
#
|
Marcello-Sega/pytim
|
pytim/gitim.py
|
Python
|
gpl-3.0
| 13,525
|
[
"GROMOS",
"MDAnalysis",
"MDTraj",
"OpenMM"
] |
f6e35ba21b7b9147fecbcab2e3583111fa71d97e9a38b4a3be47fb9f209647a3
|
#!/usr/bin/env python
from utilities import filesFromList, vtkWriteDataStructured2d, vtkWriteHeaderAndGridStructured2d
from utilities import vtkWritePointDataHeader, vtkWritePointDataStructured2D
from utilities import writeLog
from plotTools import addContourf, addToPlot
from footprintTools import *
from mapTools import readNumpyZTile, filterAndScale
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
'''
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
# = # = # = # Function definitions # = # = # = # = # = # = #
# = # = # = # = # = # = # = # = # = # = # = # = # = # = #
# = # = # = # End Function definitions # = # = # = # = # = #
#========================================================== #
parser = argparse.ArgumentParser(prog='footprintGather.py')
parser.add_argument("fileKey", help="Search string for collecting (.npz) files.",\
nargs='?', default="npz")
parser.add_argument("-a", "--allfiles", help="Select all files automatically.",\
action="store_true", default=False)
parser.add_argument("-fo", "--fileout", type=str, default='fp_gather',\
help="Footprint output file. (npz format)")
parser.add_argument("-ft", "--filetopo", type=str,\
help="File containing the topography data. (npz format)", default='')
helpFlt = ''' Filter type and its associated number. Available filters:
median, percentile, rank, gaussian, local, max.
Entering \"user num\" allows the user to specify <num> different filters consecutively.
Example entry: median 5'''
parser.add_argument("-fl","--filter",type=str,nargs=2,default=[None,None], help=helpFlt)
parser.add_argument("-n1", "--norm2one", help="Normalize by making global sum = 1.",\
action="store_true", default=False)
parser.add_argument("-v","--vtk", help="Write the results in VTK format with topography.",\
action="store_true", default=False)
parser.add_argument("-p", "--printOn", help="Print the contour of the footprint.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print the contour. Don't save.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#========================================================== #
# Rename ... that's all.
fileKey = args.fileKey
fileout = args.fileout
filetopo = args.filetopo
flt = args.filter
allFiles = args.allfiles
norm2one = args.norm2one
printOn = args.printOn or args.printOnly
printOnly = args.printOnly
vtkOn = args.vtk
if( vtkOn and (filetopo == '')):
sys.exit(' Error! VTK results require -ft/--filetopo. Exiting ...')
# Gather footprint data files:
fileNos, fileList = filesFromList( "*"+fileKey+"*", allFiles )
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# xO := origin coords. # xt := target coords. # ut := target speed
Ft = None; Ct = None; Zt = None
for fn in fileNos:
print(' Processing file: {}'.format(fileList[fn]))
Fi, X, Y, Z, Ci = readNumpyZFootprint( fileList[fn] )
Fi *= Ci # Return the footprint into unscaled state.
if( Ft is None ):
Ft = Fi.copy()
Ct = Ci
Zt = Z.copy(); Xt = X.copy(); Yt = Y.copy()
else:
Ft += Fi # Accumulate the footprint data.
Ct += Ci # Accumulate the coefficient for normalization.
Zt = np.maximum( Zt, Z )
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Resolution:
dPx = np.array([ (Xt[0,1]-Xt[0,0]) , (Yt[1,0]-Yt[0,0]) ])
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Compute the final footprint:
Fi = X = Y = Z = Ci = None
Ft /= Ct
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Apply filter if desired.
if( flt.count(None) == 0):
Fft = np.zeros( np.shape(Ft) , float)
Fft = filterAndScale(Fft, Ft, flt )
Ft = Fft.copy(); Fft = None
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Kormann et. Meixner Analytical footprint.
# Tower: u = 4.86, sigma_v = 0.75,
# LES : u = 4.5 - 5.1, sigma_v = 0.72-0.74
# Upwind LES mean over 1200 m:
# u = 6.1, sigma_v = 0.95
L =10000.; z_m = (60.-14.9); z_0 = 1.4; sigma_v = 0.75; u=4.86
x_off = 2.*228.; y_off = 2.*508.
F_km = kormann_and_meixner_fpr(z_0, z_m, u, sigma_v, L, Xt, Yt, x_off, y_off)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Make the analytical and LES footprints comparable
# Force the global sum = 1.
if( norm2one ):
print(' Normalizing the footprints such that SUM(Fp) = 1 ...')
Cn = 1./np.sum( Ft * np.prod(dPx)); Ft *= Cn
Ca = 1./np.sum( F_km* np.prod(dPx)); F_km*= Ca
print('... done! C_les = {} and C_ana = {}'.format(Cn, Ca))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Extract indecies for partial (%) footprints
id50 = percentileFootprintIds( Ft, 50. )
id75 = percentileFootprintIds( Ft, 75. )
id90 = percentileFootprintIds( Ft, 90. )
id90_km = percentileFootprintIds( F_km, 90. ) # 90%
id75_km = percentileFootprintIds( F_km, 75. ) # 75%
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# Output to npz and vtk formats.
if( not printOnly ):
# Compute the cross wind mean of the footprint.
Ftm = writeCrossWindSum( Ft, Xt, fileout )
#Ftm50 = writeCrossWindSum( Ft, Xt, fileout+'-50' , id50 )
#Ftm75 = writeCrossWindSum( Ft, Xt, fileout+'-75' , id75 )
#Ftm90 = writeCrossWindSum( Ft, Xt, fileout+'-90' , id90 )
Fm_km = writeCrossWindSum( F_km, Xt, fileout+'_km' )
#Fm90_km = writeCrossWindSum( F_km, Xt, fileout+'-90_km', id90_km )
# Write the footprint in npz format.
IDict = {}
IDict[50] = id50[::-1,:]; IDict[75] = id75[::-1,:]; IDict[90] = id90[::-1,:]
writeNumpyZFootprint(fileout, Ft[::-1,:], Xt, Yt, Zt, Ct, IDict )
# Write also the Kormann-Meixner footprint
IDict = {}
IDict[75] = id75_km[::-1,:]; IDict[90] = id90_km[::-1,:]
writeNumpyZFootprint(fileout+'_KormannMeixner', F_km[::-1,:], Xt, Yt, Zt, Ct, IDict )
if( vtkOn ):
# Footprint to VTK-format together with the complete topography.
Ftmp = np.zeros( np.shape(Ft), float )
Rdict = readNumpyZTile( filetopo )
R = Rdict['R']
Rdims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
dPx = Rdict['dPx']
Rdict = None
if( all(Rdims != np.shape(Xt)) ):
sys.exit(' Error! Mismatch Topo_dims={} vs. Fp_dims={}'.format(Rdims,np.shape(Xt)))
f_vtk = vtkWriteHeaderAndGridStructured2d( Xt, Yt, R[::-1,:], fileout, 'Footprint'); R=None
f_vtk = vtkWritePointDataHeader( f_vtk, Ft, 5 )
# ======= Write 100% Ft ================
f_vtk = vtkWritePointDataStructured2D( f_vtk, Ft , Xt, 'fp' )
# ======= Write 75% Ft ================
Ftmp[:,:] = 0.; Ftmp += Ft*id75
f_vtk = vtkWritePointDataStructured2D( f_vtk, Ftmp , Xt, 'fp75' )
# ======= Write 90% Ft ================
Ftmp[:,:] = 0.; Ftmp += Ft*id90
f_vtk = vtkWritePointDataStructured2D( f_vtk, Ftmp , Xt, 'fp90' )
# ======= Write 100% F_km ================
f_vtk = vtkWritePointDataStructured2D( f_vtk, F_km, Xt, 'fp_km' )
# ======= Write 00% F_km ================
Ftmp[:,:] = 0.; Ftmp += F_km*id90_km
f_vtk = vtkWritePointDataStructured2D( f_vtk, Ftmp , Xt, 'fp90_km' )
# Close the file at the end.
f_vtk.close(); Ftmp = None
if( printOn ):
Cfp = addContourf( Xt, Yt, Ft , 'F(x,y)' , fileout )
Cfa = addContourf( Xt, Yt, F_km, 'F_km(x,y), Ana', fileout+'_km' )
Fym = writeCrossWindSum( Ft, Xt, None, None )
pfig = plt.figure(num=3, figsize=(12.,9.))
varLabel = '$fp_y(x) = \sum_y fp(x,y)$'
axLabels = ['Cross Wind Integrated Footprint', 'x', 'sum_y fp(x,y) ']
pfig = addToPlot(pfig, Xt[0,:], Fym, varLabel, axLabels, False )
Ft = Zt = Xt = Yt = None
F_km = None
plt.show()
|
saskartt/P4UL
|
pyFootprint/footprintGather.py
|
Python
|
mit
| 7,807
|
[
"Gaussian",
"VTK"
] |
abb826f06c3c4c3a836afb17e53a06d0fdbd20b5f6671a7d559b7122121acc75
|
import copy
from hearthbreaker.cards.base import SpellCard
from hearthbreaker.tags.action import AddCard
from hearthbreaker.tags.base import Effect, BuffUntil, Buff, AuraUntil, ActionTag
from hearthbreaker.tags.condition import IsSpell
from hearthbreaker.tags.event import TurnStarted, TurnEnded, SpellCast
from hearthbreaker.tags.selector import PlayerSelector, CardSelector
from hearthbreaker.tags.status import Stealth, ChangeAttack, ManaChange
import hearthbreaker.targeting
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
class Assassinate(SpellCard):
def __init__(self):
super().__init__("Assassinate", 5, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.die(self)
class Backstab(SpellCard):
def __init__(self):
super().__init__("Backstab", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_minion_spell_target,
filter_func=lambda target: target.health == target.calculate_max_health() and
target.spell_targetable())
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(2), self)
class Betrayal(SpellCard):
def __init__(self):
super().__init__("Betrayal", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
left_minion = None
right_minion = None
index = self.target.index
if index > 0:
left_minion = game.other_player.minions[index - 1]
if index < min(len(game.other_player.minions) - 1, 6):
right_minion = game.other_player.minions[index + 1]
original_immune = self.target.immune
self.target.immune = True
if left_minion is not None:
left_minion.damage(self.target.calculate_attack(), self.target)
if right_minion is not None:
right_minion.damage(self.target.calculate_attack(), self.target)
self.target.immune = original_immune
class BladeFlurry(SpellCard):
def __init__(self):
super().__init__("Blade Flurry", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
if player.weapon is not None:
# Yes, this card is affected by spell damage cards.
# Source: http://www.hearthhead.com/card=1064/blade-flurry#comments:id=1927317
attack_power = player.effective_spell_damage(player.hero.calculate_attack())
player.weapon.destroy()
for minion in copy.copy(game.other_player.minions):
minion.damage(attack_power, self)
game.other_player.hero.damage(attack_power, self)
class ColdBlood(SpellCard):
def __init__(self):
super().__init__("Cold Blood", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
if player.cards_played > 0:
self.target.change_attack(4)
else:
self.target.change_attack(2)
class Conceal(SpellCard):
def __init__(self):
super().__init__("Conceal", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in player.minions:
if not minion.stealth:
minion.add_buff(BuffUntil(Stealth(), TurnStarted()))
class DeadlyPoison(SpellCard):
def __init__(self):
super().__init__("Deadly Poison", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
player.weapon.base_attack += 2
player.hero.change_temp_attack(2)
def can_use(self, player, game):
return super().can_use(player, game) and player.weapon is not None
class Eviscerate(SpellCard):
def __init__(self):
super().__init__("Eviscerate", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
if player.cards_played > 0:
self.target.damage(player.effective_spell_damage(4), self)
else:
self.target.damage(player.effective_spell_damage(2), self)
class FanOfKnives(SpellCard):
def __init__(self):
super().__init__("Fan of Knives", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in copy.copy(game.other_player.minions):
minion.damage(player.effective_spell_damage(1), self)
player.draw()
class Headcrack(SpellCard):
def __init__(self):
super().__init__("Headcrack", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
game.other_player.hero.damage(player.effective_spell_damage(2), self)
if player.cards_played > 0:
player.add_effect(Effect(TurnEnded(), ActionTag(AddCard(self), PlayerSelector())))
class Preparation(SpellCard):
def __init__(self):
super().__init__("Preparation", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def use(self, player, game):
super().use(player, game)
player.add_aura(AuraUntil(ManaChange(-3), CardSelector(condition=IsSpell()), SpellCast()))
class Sap(SpellCard):
def __init__(self):
super().__init__("Sap", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE,
target_func=hearthbreaker.targeting.find_enemy_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
class Shadowstep(SpellCard):
def __init__(self):
super().__init__("Shadowstep", 0, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
self.target.card.add_buff(Buff(ManaChange(-3)))
class Shiv(SpellCard):
def __init__(self):
super().__init__("Shiv", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(1), self)
player.draw()
class SinisterStrike(SpellCard):
def __init__(self):
super().__init__("Sinister Strike", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
game.other_player.hero.damage(player.effective_spell_damage(3), self)
class Sprint(SpellCard):
def __init__(self):
super().__init__("Sprint", 7, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for i in range(0, 4):
player.draw()
class Vanish(SpellCard):
def __init__(self):
super().__init__("Vanish", 6, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
targets = copy.copy(game.other_player.minions)
targets.extend(player.minions)
# Minions are returned to a player's hand in the order in which they were played.
# Source: http://www.hearthhead.com/card=196/vanish#comments:id=1908549
for minion in sorted(targets, key=lambda m: m.born):
minion.bounce()
class TinkersSharpswordOil(SpellCard):
def __init__(self):
super().__init__("Tinker's Sharpsword Oil", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
player.weapon.base_attack += 3
player.hero.change_temp_attack(3)
if player.cards_played > 0:
targets = hearthbreaker.targeting.find_friendly_minion_battlecry_target(player.game, lambda x: x)
if targets is not None:
target = player.game.random_choice(targets)
target.add_buff(Buff(ChangeAttack(3)))
def can_use(self, player, game):
return super().can_use(player, game) and player.weapon is not None
class Sabotage(SpellCard):
def __init__(self):
super().__init__("Sabotage", 4, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def use(self, player, game):
super().use(player, game)
targets = hearthbreaker.targeting.find_enemy_minion_battlecry_target(player.game, lambda x: True)
target = game.random_choice(targets)
target.die(None)
game.check_delayed()
if player.cards_played > 0 and game.other_player.weapon is not None:
game.other_player.weapon.destroy()
def can_use(self, player, game):
return super().can_use(player, game) and len(game.other_player.minions) >= 1
class GangUp(SpellCard):
def __init__(self):
super().__init__("Gang Up", 2, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
for i in range(3):
player.put_back(type(self.target.card)())
|
Ragowit/hearthbreaker
|
hearthbreaker/cards/spells/rogue.py
|
Python
|
mit
| 9,707
|
[
"TINKER"
] |
eb820743bfaf84d9f2dc06e345aefcc6849a7682c74ced7c635066705e76e239
|
##
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ESMF, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Damian Alvarez (Forschungszentrum Juelich GmbH)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_ESMF(ConfigureMake):
"""Support for building/installing ESMF."""
def configure_step(self):
"""Custom configuration procedure for ESMF through environment variables."""
env.setvar('ESMF_DIR', self.cfg['start_dir'])
env.setvar('ESMF_INSTALL_PREFIX', self.installdir)
env.setvar('ESMF_INSTALL_BINDIR', 'bin')
env.setvar('ESMF_INSTALL_LIBDIR', 'lib')
env.setvar('ESMF_INSTALL_MODDIR', 'mod')
# specify compiler
comp_family = self.toolchain.comp_family()
if comp_family in [toolchain.GCC]:
compiler = 'gfortran'
else:
compiler = comp_family.lower()
env.setvar('ESMF_COMPILER', compiler)
env.setvar('ESMF_F90COMPILEOPTS', os.getenv('F90FLAGS'))
env.setvar('ESMF_CXXCOMPILEOPTS', os.getenv('CXXFLAGS'))
# specify MPI communications library
comm = None
mpi_family = self.toolchain.mpi_family()
if mpi_family in [toolchain.MPICH, toolchain.QLOGICMPI]:
# MPICH family for MPICH v3.x, which is MPICH2 compatible
comm = 'mpich2'
else:
comm = mpi_family.lower()
env.setvar('ESMF_COMM', comm)
# specify decent LAPACK lib
env.setvar('ESMF_LAPACK', 'user')
ldflags = os.getenv('LDFLAGS')
liblapack = os.getenv('LIBLAPACK_MT') or os.getenv('LIBLAPACK')
if liblapack is None:
raise EasyBuildError("$LIBLAPACK(_MT) not defined, no BLAS/LAPACK in %s toolchain?", self.toolchain.name)
else:
env.setvar('ESMF_LAPACK_LIBS', ldflags + ' ' + liblapack)
# specify netCDF
netcdf = get_software_root('netCDF')
if netcdf:
if LooseVersion(self.version) >= LooseVersion('7.1.0'):
env.setvar('ESMF_NETCDF', 'nc-config')
else:
env.setvar('ESMF_NETCDF', 'user')
netcdf_libs = ['-L%s/lib' % netcdf, '-lnetcdf']
# Fortran
netcdff = get_software_root('netCDF-Fortran')
if netcdff:
netcdf_libs = ["-L%s/lib" % netcdff] + netcdf_libs + ["-lnetcdff"]
else:
netcdf_libs.append('-lnetcdff')
# C++
netcdfcxx = get_software_root('netCDF-C++')
if netcdfcxx:
netcdf_libs = ["-L%s/lib" % netcdfcxx] + netcdf_libs + ["-lnetcdf_c++"]
else:
netcdfcxx = get_software_root('netCDF-C++4')
if netcdfcxx:
netcdf_libs = ["-L%s/lib" % netcdfcxx] + netcdf_libs + ["-lnetcdf_c++4"]
else:
netcdf_libs.append('-lnetcdf_c++')
env.setvar('ESMF_NETCDF_LIBS', ' '.join(netcdf_libs))
# 'make info' provides useful debug info
cmd = "make info"
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def sanity_check_step(self):
"""Custom sanity check for ESMF."""
if LooseVersion(self.version) < LooseVersion('8.1.0'):
binaries = ['ESMF_Info', 'ESMF_InfoC', 'ESMF_Regrid', 'ESMF_RegridWeightGen',
'ESMF_Scrip2Unstruct', 'ESMF_WebServController']
else:
binaries = ['ESMF_PrintInfo', 'ESMF_PrintInfoC', 'ESMF_Regrid', 'ESMF_RegridWeightGen',
'ESMF_Scrip2Unstruct', 'ESMF_WebServController']
libs = ['libesmf.a', 'libesmf.%s' % get_shared_lib_ext()]
custom_paths = {
'files': [os.path.join('bin', x) for x in binaries] + [os.path.join('lib', x) for x in libs],
'dirs': ['include', 'mod'],
}
super(EB_ESMF, self).sanity_check_step(custom_paths=custom_paths)
|
boegel/easybuild-easyblocks
|
easybuild/easyblocks/e/esmf.py
|
Python
|
gpl-2.0
| 5,444
|
[
"NetCDF"
] |
5d7133f8ca3bb4f7cd46e6a21e7340078b4a7fdad80b6faec655866836a92ef4
|
##############################################################
## Brian P. Kent
## cd_tree.py
## Created: 20130417
## Updated: 20130417
##############################################################
##############
### SET UP ###
##############
"""
Main functions and classes for construction and use of Chaudhuri-Dasgupta level
set trees. A companion to debacl.py, which has a more developed set of tools for
working with generic level set trees.
"""
try:
import numpy as np
import scipy.spatial.distance as spd
import scipy.io as spio
import igraph as igr
import utils as utl
except:
raise ImportError("Critical packages are not installed.")
try:
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import pandas as pd
except:
print "Matplotlib and/or Pandas packages are not installed, so plot and " +\
"print functions may fail."
#####################
### BASIC CLASSES ###
#####################
class ConnectedComponent(object):
"""
Defines a connected component for level set tree construction. A level set
tree is really just a set of ConnectedComponents.
"""
def __init__(self, idnum, parent, children, start_radius, end_radius,
members):
self.idnum = idnum
self.parent = parent
self.children = children
self.start_radius = start_radius
self.end_radius = end_radius
self.members = members
def copy(self):
"""
Creates and returns a copy of a CD_Component object.
Parameters
----------
Returns
-------
component : CD_Component
"""
component = ConnectedComponent(self.idnum, self.parent, self.children,
self.start_radius, self.end_radius, self.members)
return component
class CDTree(object):
"""
Defines methods and attributes for a Chaudhuri-Dasgupta level set tree.
"""
def __init__(self):
self.nodes = {}
self.subgraphs = {}
def __str__(self):
"""
Produce a tree summary table with Pandas. This can be printed to screen
or saved easily to CSV. This is much simpler than manually formatting
the strings in the CoAxLab version of DeBaCl, but it does require
Pandas.
"""
summary = pd.DataFrame()
for u, v in self.nodes.items():
row = {
'key': u,
'r1': v.start_radius,
'r2': v.end_radius,
'size': len(v.members),
'parent': v.parent,
'children': v.children
}
row = pd.DataFrame([row])
summary = summary.append(row)
summary.set_index('key', inplace=True)
out = summary.to_string()
return out
def prune(self, method='size-merge', **kwargs):
"""
Prune the tree. A dispatch function to other methods.
Parameters
----------
method : {'size-merge'}
gamma : integer
Nodes smaller than this will be merged (for 'size-merge') or cut
(for 'size-cut')
Notes
-----
Modifies the tree in-place.
"""
if method == 'size-merge':
required = set(['gamma'])
if not set(kwargs.keys()).issuperset(required):
raise ValueError("Incorrect arguments for size-merge pruning.")
else:
gamma = kwargs.get('gamma')
self.mergeBySize(gamma)
else:
print "Pruning method not understood. 'size-merge' is the only " +\
"pruning method currently implemented. No changes were made to " + \
"the tree."
def save(self, fname):
"""
Save a level set tree object to file.
Saves a level set tree as a MATLAB struct using the scipy.io module.
Ignore the warning about using oned_as default value ('column').
Parameters
----------
fname : string
File to save the tree to. The .mat extension is not necessary.
"""
tree_dict = {
'bg_sets': self.bg_sets,
'levels': self.levels,
'idnums': [x.idnum for x in self.nodes.values()],
'start_radii': [x.start_radius for x in self.nodes.values()],
'end_radii': [x.end_radius for x in self.nodes.values()],
'parents': [(-1 if x.parent is None else x.parent)
for x in self.nodes.values()],
'children': [x.children for x in self.nodes.values()],
'members': [x.members for x in self.nodes.values()]
}
spio.savemat(fname, tree_dict)
def makeSubtree(self, ix):
"""
Return the subtree with node 'ix' as the root, and all ancestors of 'ix'.
Parameters
----------
ix : int
Node to use at the root of the new tree.
Returns
-------
T : LevelSetTree
A completely indpendent level set tree, with 'ix' as the root node.
"""
T = CDTree()
T.nodes[ix] = self.nodes[ix].copy()
T.nodes[ix].parent = None
queue = self.nodes[ix].children[:]
while len(queue) > 0:
branch_ix = queue.pop()
T.nodes[branch_ix] = self.nodes[branch_ix]
queue += self.nodes[branch_ix].children
return T
def mergeBySize(self, threshold):
"""
Prune splits from a tree based on size of child nodes. Merge members of
child nodes rather than removing them.
Parameters
----------
threshold : numeric
Tree branches with fewer members than this will be merged into
larger siblings or parents.
Notes
-----
Modifies a level set tree in-place.
"""
## remove small root branches
small_roots = [k for k, v in self.nodes.iteritems()
if v.parent==None and len(v.members) <= threshold]
for root in small_roots:
root_tree = makeSubtree(self, root)
for ix in root_tree.nodes.iterkeys():
del self.nodes[ix]
## main pruning
parents = [k for k, v in self.nodes.iteritems() if len(v.children) >= 1]
parents = np.sort(parents)[::-1]
for ix_parent in parents:
parent = self.nodes[ix_parent]
# get size of each child
kid_size = {k: len(self.nodes[k].members) for k in parent.children}
# count children larger than 'threshold'
n_bigkid = sum(np.array(kid_size.values()) >= threshold)
if n_bigkid == 0:
# update parent's end level and end mass
parent.end_radius = max([self.nodes[k].end_radius
for k in parent.children])
# remove small kids from the tree
for k in parent.children:
del self.nodes[k]
parent.children = []
elif n_bigkid == 1:
pass
# identify the big kid
ix_bigkid = [k for k, v in kid_size.iteritems() if v >= threshold][0]
bigkid = self.nodes[ix_bigkid]
# update k's end radius
parent.end_radius = bigkid.end_radius
# set grandkids' parent to k
for c in bigkid.children:
self.nodes[c].parent = ix_parent
# delete small kids
for k in parent.children:
if k != ix_bigkid:
del self.nodes[k]
# set k's children to grandkids
parent.children = bigkid.children
# delete the single bigkid
del self.nodes[ix_bigkid]
else:
pass # do nothing here
def plot(self, width='uniform', gap=0.05):
"""
Create a static plot of a Chaudhuri-Dasgupta level set tree.
Parameters
----------
width : {'uniform', 'mass'}, optional
Determines how much horzontal space each level set tree node is
given. The default of "uniform" gives each child node an equal
fraction of the parent node's horizontal space. If set to 'mass',
then horizontal space is allocated proportional to the mass (i.e.
fraction of points) of a node relative to its siblings.
sort : bool
If True, sort sibling nodes from most to least points and draw left
to right. Also sorts root nodes in the same way.
gap : float
Fraction of vertical space to leave at the bottom. Default is 5%,
and 0% also works well. Higher values are used for interactive tools
to make room for buttons and messages.
Returns
-------
fig : matplotlib figure
Use fig.show() to view, fig.savefig() to save, etc.
"""
## Initialize the plot containers
segments = {}
splits = {}
segmap = []
splitmap = []
## Find the root connected components and corresponding plot intervals
ix_root = np.array([k for k, v in self.nodes.iteritems()
if v.parent is None])
n_root = len(ix_root)
census = np.array([len(self.nodes[x].members) for x in ix_root],
dtype=np.float)
n = sum(census)
## Order the roots by mass decreasing from left to right
seniority = np.argsort(census)[::-1]
ix_root = ix_root[seniority]
census = census[seniority]
if width == 'mass':
weights = census / n
intervals = np.cumsum(weights)
intervals = np.insert(intervals, 0, 0.0)
else:
intervals = np.linspace(0.0, 1.0, n_root+1)
## Do a depth-first search on each root to get segments for each branch
for i, ix in enumerate(ix_root):
branch = self.constructBranchMap(ix, (intervals[i], intervals[i+1]),
width)
branch_segs, branch_splits, branch_segmap, branch_splitmap = branch
segments = dict(segments.items() + branch_segs.items())
splits = dict(splits.items() + branch_splits.items())
segmap += branch_segmap
splitmap += branch_splitmap
## get the the vertical line segments in order of the segment map (segmap)
verts = [segments[k] for k in segmap]
lats = [splits[k] for k in splitmap]
## Find the fraction of nodes in each segment (to use as linewidths)
thickness = [max(1.0, 12.0 * len(self.nodes[x].members)/n) for x in segmap]
## Find the right tick marks for the plot
# radius_ticks = np.sort(list(set(
# [v.start_radius for v in self.nodes.itervalues()] + \
# [v.end_radius for v in self.nodes.itervalues()])))
# radius_tick_labels = [str(round(lvl, 2)) for lvl in radius_ticks]
primary_ticks = [(x[0][1], x[1][1]) for x in segments.values()]
primary_ticks = np.unique(np.array(primary_ticks).flatten())
primary_labels = [str(round(tick, 2)) for tick in primary_ticks]
## Set up the plot framework
frame_dims = [0.15, 0.05, 0.8, 0.93]
fig, ax = plt.subplots()
ax.set_position(frame_dims)
ax.set_xlim((-0.04, 1.04))
ax.set_xticks([])
ax.set_xticklabels([])
ax.yaxis.grid(color='gray')
ax.set_yticks(primary_ticks)
ax.set_yticklabels(primary_labels)
## Add the line segments
segclr = np.array([[0.0, 0.0, 0.0]] * len(segmap))
splitclr = np.array([[0.0, 0.0, 0.0]] * len(splitmap))
linecol = LineCollection(verts, linewidths=thickness, colors=segclr)
ax.add_collection(linecol)
splitcol = LineCollection(lats, colors=splitclr)
ax.add_collection(splitcol)
## Make the plot
ax.set_ylabel("Radius")
ymax = max([v.start_radius for v in self.nodes.itervalues()])
ymin = min([v.end_radius for v in self.nodes.itervalues()])
rng = ymax - ymin
ax.set_ylim(ymin - gap*rng, ymax + 0.05*rng)
ax.invert_yaxis()
return fig
def getClusterLabels(self, method='all-mode', **kwargs):
"""
Umbrella function for retrieving custer labels from the level set tree.
Parameters
----------
method : {'all-mode', 'first-k', 'upper-set', 'k-level'}, optional
Method for obtaining cluster labels from the tree. 'all-mode' treats
each leaf of the tree as a separate cluter. 'first-k' finds the
first K non-overlapping clusters from the roots of the tree.
'upper-set' returns labels by cutting the tree at a specified
density (lambda) or mass (alpha) level. 'k-level' returns labels at
the lowest density level that has k nodes.
k : integer
If method is 'first-k' or 'k-level', this is the desired number of
clusters.
threshold : float
If method is 'upper-set', this is the threshold at which to cut the
tree.
Returns
-------
labels : 2-dimensional numpy array
Each row corresponds to an observation. The first column indicates
the index of the observation in the original data matrix, and the
second column is the integer cluster label (starting at 0). Note
that the set of observations in this "foreground" set is typically
smaller than the original dataset.
nodes : list
Indices of tree nodes corresponding to foreground clusters.
"""
if method == 'all-mode':
labels, nodes = self.allModeCluster()
elif method == 'first-k':
required = set(['k'])
if not set(kwargs.keys()).issuperset(required):
raise ValueError("Incorrect arguments for the first-k " + \
"cluster labeling method.")
else:
k = kwargs.get('k')
labels, nodes = self.firstKCluster(k)
elif method == 'upper-set':
required = set(['threshold'])
if not set(kwargs.keys()).issuperset(required):
raise ValueError("Incorrect arguments for the upper-set " + \
"cluster labeling method.")
else:
threshold = kwargs.get('threshold')
labels, nodes = self.upperSetCluster(threshold)
else:
print 'method not understood'
labels = np.array([])
nodes = []
return labels, nodes
def upperSetCluster(self, threshold):
"""
Set foreground clusters by finding connected components at an upper
level set. This is slightly different than GeomTree.upperSetCluster in
that this method returns all members of tree nodes that cross the
desired threshold, rather than the components of the true upper level
set.
Parameters
----------
threshold : float
The radius that defines the foreground set of points.
Returns
-------
labels : 2-dimensional numpy array
Each row corresponds to an observation. The first column indicates
the index of the observation in the original data matrix, and the
second column is the integer cluster label (starting at 0). Note
that the set of observations in this "foreground" set is typically
smaller than the original dataset.
nodes : list
Indices of tree nodes corresponding to foreground clusters.
"""
## identify upper level points and the nodes active at the cut
nodes = [k for k, v in self.nodes.iteritems()
if v.start_radius >= threshold and v.end_radius < threshold]
## find intersection between upper set points and each active component
points = []
cluster = []
for i, c in enumerate(nodes):
points.extend(self.nodes[c].members)
cluster += ([i] * len(self.nodes[c].members))
labels = np.array([points, cluster], dtype=np.int).T
return labels, nodes
def allModeCluster(self):
"""
Set every leaf node as a foreground cluster.
Parameters
----------
Returns
-------
labels : 2-dimensional numpy array
Each row corresponds to an observation. The first column indicates
the index of the observation in the original data matrix, and the
second column is the integer cluster label (starting at 0). Note
that the set of observations in this "foreground" set is typically
smaller than the original dataset.
leaves : list
Indices of tree nodes corresponding to foreground clusters. This is
the same as 'nodes' for other clustering functions, but here they
are also the leaves of the tree.
"""
leaves = [k for k, v in self.nodes.items() if v.children == []]
## find components in the leaves
points = []
cluster = []
for i, k in enumerate(leaves):
points.extend(self.nodes[k].members)
cluster += ([i] * len(self.nodes[k].members))
labels = np.array([points, cluster], dtype=np.int).T
return labels, leaves
def firstKCluster(self, k):
"""
Returns foreground cluster labels for the 'k' modes with the lowest
start levels. In principle, this is the 'k' leaf nodes with the smallest
indices, but this function double checks by finding and ordering all
leaf start values and ordering.
Parameters
----------
k : integer
The desired number of clusters.
Returns
-------
labels : 2-dimensional numpy array
Each row corresponds to an observation. The first column indicates
the index of the observation in the original data matrix, and the
second column is the integer cluster label (starting at 0). Note
that the set of observations in this "foreground" set is typically
smaller than the original dataset.
nodes : list
Indices of tree nodes corresponding to foreground clusters.
"""
parents = np.array([u for u, v in self.nodes.items()
if len(v.children) > 0])
roots = [u for u, v in self.nodes.items() if v.parent is None]
splits = [self.nodes[u].end_radius for u in parents]
order = np.argsort(splits)
star_parents = parents[order[:(k-len(roots))]]
children = [u for u, v in self.nodes.items() if v.parent is None]
for u in star_parents:
children += self.nodes[u].children
nodes = [x for x in children if
sum(np.in1d(self.nodes[x].children, children))==0]
points = []
cluster = []
for i, c in enumerate(nodes):
cluster_pts = self.nodes[c].members
points.extend(cluster_pts)
cluster += ([i] * len(cluster_pts))
labels = np.array([points, cluster], dtype=np.int).T
return labels, nodes
def constructBranchMap(self, ix, interval, width):
"""
Map level set tree nodes to locations in a plot canvas. Finds the plot
coordinates of vertical line segments corresponding to LST nodes and
horizontal line segments corresponding to node splits. Also provides
indices of vertical segments and splits for downstream use with
interactive plot picker tools. This function is not meant to be called
by the user; it is a helper function for the LevelSetTree.plot() method.
This function is recursive: it calls itself to map the coordinates of
children of the current node 'ix'.
Parameters
----------
ix : int
The tree node to map.
interval: length 2 tuple of floats
Horizontal space allocated to node 'ix'.
width : {'uniform', 'mass'}, optional
Determines how much horzontal space each level set tree node is
given. See LevelSetTree.plot() for more information.
Returns
-------
segments : dict
A dictionary with values that contain the coordinates of vertical
line segment endpoints. This is only useful to the interactive
analysis tools.
segmap : list
Indicates the order of the vertical line segments as returned by the
recursive coordinate mapping function, so they can be picked by the
user in the interactive tools.
splits : dict
Dictionary values contain the coordinates of horizontal line
segments (i.e. node splits).
splitmap : list
Indicates the order of horizontal line segments returned by
recursive coordinate mapping function, for use with interactive
tools.
"""
## get children
children = np.array(self.nodes[ix].children)
n_child = len(children)
## if there's no children, just one segment at the interval mean
if n_child == 0:
xpos = np.mean(interval)
segments = {}
segmap = [ix]
splits = {}
splitmap = []
segments[ix] = (([xpos, self.nodes[ix].start_radius],
[xpos, self.nodes[ix].end_radius]))
## else, construct child branches then figure out parent's position
else:
parent_range = interval[1] - interval[0]
segments = {}
segmap = [ix]
splits = {}
splitmap = []
census = np.array([len(self.nodes[x].members) for x in children],
dtype=np.float)
weights = census / sum(census)
## sort branches by mass in decreasing order from left to right
seniority = np.argsort(weights)[::-1]
children = children[seniority]
weights = weights[seniority]
## get relative branch intervals
if width == 'mass':
child_intervals = np.cumsum(weights)
child_intervals = np.insert(child_intervals, 0, 0.0)
else:
child_intervals = np.linspace(0.0, 1.0, n_child+1)
## loop over the children
for j, child in enumerate(children):
## translate local interval to absolute interval
branch_interval = (interval[0] + child_intervals[j] * parent_range,
interval[0] + child_intervals[j+1] * parent_range)
## recurse on the child
branch = self.constructBranchMap(child, branch_interval, width)
branch_segs, branch_splits, branch_segmap, branch_splitmap = branch
segmap += branch_segmap
splitmap += branch_splitmap
splits = dict(splits.items() + branch_splits.items())
segments = dict(segments.items() + branch_segs.items())
## find the middle of the children's x-position and make vertical segment ix
children_xpos = np.array([segments[k][0][0] for k in children])
xpos = np.mean(children_xpos)
## add horizontal segments to the list
for child in children:
splitmap.append(child)
child_xpos = segments[child][0][0]
splits[child] = ([xpos, self.nodes[ix].end_radius],
[child_xpos, self.nodes[ix].end_radius])
## add vertical segment for current node
segments[ix] = (([xpos, self.nodes[ix].start_radius],
[xpos, self.nodes[ix].end_radius]))
return segments, splits, segmap, splitmap
#############################################
### LEVEL SET TREE CONSTRUCTION FUNCTIONS ###
#############################################
def cdTree(X, k, alpha=1.0, start='complete', verbose=False):
"""
Construct a Chaudhuri-Dasgupta level set tree. A level set tree is
constructed by identifying connected components of observations as edges are
removed from the geometric graph in descending order of pairwise distance.
Parameters
----------
X : 2D array
Data matrix, with observations as rows.
k : integer
Number of observations to consider as neighbors of each point.
alpha : float
A robustness parameter. Dilates the threshold for including edges in an
upper level set similarity graph.
start : {'complete', 'knn'}, optional
Initialization of the similarity graph. 'Complete' starts with a
complete similarity graph (as written in the Chaudhuri-Dasgupta paper)
and knn starts with a k-nearest neighbor similarity graph.
verbose: {False, True}, optional
If set to True, then prints to the screen a progress indicator every 100
levels.
Returns
-------
T : levelSetTree
See debacl.levelSetTree for class and method definitions.
"""
n, p = X.shape
## Find the distance between each pair of points
r_node = spd.pdist(X, metric='euclidean')
D = spd.squareform(r_node)
## Get the k-neighbor radius for each point
rank = np.argsort(D, axis=1)
ix_nbr = rank[:, 0:k] # should this be k+1 to match Kpotufe paper?
k_nbr = ix_nbr[:, -1]
k_radius = D[np.arange(n), k_nbr]
## Construct a complete graph
G = igr.Graph.Full(n)
G.vs['name'] = range(n)
G.vs['radius'] = k_radius
G.es['name'] = range(G.ecount())
G.es['length'] = r_node
## Set all relevant distances
r_edge = r_node / alpha
r_levels = np.unique(np.append(r_node, r_edge))[::-1]
## Instantiate the tree
T = CDTree()
if start == 'complete':
T.subgraphs[0] = G
T.nodes[0] = ConnectedComponent(0, parent=None, children=[],
start_radius=r_levels[0], end_radius=None, members=G.vs['name'])
elif start == 'knn':
max_radius = max(k_radius)
# remove edges longer than the maximum k-neighbor radius
cut_edges = G.es.select(length_gt = max_radius)
if len(cut_edges) > 0:
cut_edges.delete()
# initialize a subgraph and node for each root component
cc0 = G.components()
for i, c in enumerate(cc0):
T.subgraphs[i] = G.subgraph(c)
T.nodes[i] = ConnectedComponent(i, parent=None, children=[],
start_radius=max_radius, end_radius=None,
members=G.vs[c]['name'])
else:
print "Start value not understood."
return
## Iterate through relevant threshold values in descending order
for i, r in enumerate(r_levels):
n_iter = len(r_levels)
if i % 1000 == 0 and verbose:
print "iteration:", i, "/", n_iter
deactivate_keys = []
activate_subgraphs = {}
for (k, H) in T.subgraphs.items():
# remove nodes and edges with large weight
cut_nodes = H.vs.select(radius_ge = r)
if len(cut_nodes) > 0:
cut_nodes.delete()
cut_edges = H.es.select(length_ge = alpha * r) # note this alpha
if len(cut_edges) > 0:
cut_edges.delete()
# check if component has vanishe
if H.vcount() == 0:
T.nodes[k].end_radius = r
deactivate_keys.append(k)
# if the graph has changed, look for splits
if len(cut_edges) > 0 or len(cut_nodes) > 0:
cc = H.components()
if len(cc) > 1:
T.nodes[k].end_radius = r
deactivate_keys.append(k)
for c in cc:
new_key = max(T.nodes.keys()) + 1
T.nodes[k].children.append(new_key)
activate_subgraphs[new_key] = H.subgraph(c)
T.nodes[new_key] = ConnectedComponent(new_key, parent=k,
children=[], start_radius=r, end_radius=None,
members=H.vs[c]['name'])
# update active components
for k in deactivate_keys:
del T.subgraphs[k]
T.subgraphs.update(activate_subgraphs)
return T
def loadTree(fname):
"""
Load a saved tree from file.
Parameters
----------
fname : string
Filename to load. The .mat extension is not necessary.
Returns
-------
T : LevelSetTree
The loaded and reconstituted level set tree object.
"""
indata = spio.loadmat(fname)
## format inputs
idnums = indata['idnums'].flatten()
levels = list(indata['levels'].flatten())
bg_sets = [np.array(x[0].flatten()) for x in indata['bg_sets']]
start_radii = indata['start_radii'].flatten()
end_radii = indata['end_radii'].flatten()
parents = [(None if x == -1 else x) for x in indata['parents'].flatten()]
children = [list(x[0].flatten()) for x in indata['children']]
members = [list(x[0].flatten()) for x in indata['members']]
if len(children) == 0:
children = [[]]*len(idnums)
## create tree
T = CD_Tree()
## add nodes to the tree
nodes = {}
for i, k in enumerate(idnums):
nodes[k] = ConnectedComponent(k, parents[i], children[i],
start_radii[i], end_radii[i], members[i])
T.nodes = nodes
return T
|
nicolasfauchereau/DeBaCl
|
debacl/cd_tree.py
|
Python
|
bsd-3-clause
| 25,521
|
[
"Brian"
] |
4c11c570a3b6e2b996fd10a66bcb0dad924ee143266480f00e5af767f725af43
|
import mock
import unittest
import tempfile
import os
import shutil
import errno
from DIRAC import S_OK, S_ERROR, gLogger
# gLogger.setLevel( 'DEBUG' )
from DIRAC.Resources.Storage.StorageElement import StorageElementItem
def mock_StorageFactory_getConfigStorageName( storageName, referenceType ):
resolvedName = storageName
return S_OK( resolvedName )
def mock_StorageFactory_getConfigStorageOptions( storageName ):
""" Get the options associated to the StorageElement as defined in the CS
"""
optionsDict = {'BackendType': 'local',
'ReadAccess': 'Active',
'WriteAccess': 'Active'}
return S_OK( optionsDict )
def mock_StorageFactory_getConfigStorageProtocols( storageName ):
""" Protocol specific information is present as sections in the Storage configuration
"""
protocolDetails = [{'Host': '',
'Path': '/tmp/se',
'PluginName': 'File',
'Port': '',
'Protocol': 'file',
'SpaceToken': '',
'WSUrl': ''}]
return S_OK( protocolDetails )
class TestBase( unittest.TestCase ):
""" Base test class. Defines all the method to test
"""
@mock.patch( 'DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageName',
side_effect = mock_StorageFactory_getConfigStorageName )
@mock.patch( 'DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageOptions',
side_effect = mock_StorageFactory_getConfigStorageOptions )
@mock.patch( 'DIRAC.Resources.Storage.StorageFactory.StorageFactory._getConfigStorageProtocols',
side_effect = mock_StorageFactory_getConfigStorageProtocols )
@mock.patch( 'DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE',
return_value = S_OK( True ) ) # Pretend it's local
def setUp( self, mk_getConfigStorageName, mk_getConfigStorageOptions, mk_getConfigStorageProtocols, mk_isLocalSE ):
self.se = StorageElementItem( 'FAKE' )
self.basePath = tempfile.mkdtemp( dir = '/tmp' )
# Update the basePath of the plugin
self.se.storages[0].basePath = self.basePath
self.srcPath = tempfile.mkdtemp( dir = '/tmp' )
self.destPath = tempfile.mkdtemp( dir = '/tmp' )
self.existingFile = '/lhcb/file.txt'
self.existingFileSize = 0
self.nonExistingFile = '/lhcb/nonExistingFile.txt'
self.subDir = '/lhcb/subDir'
self.subFile = os.path.join( self.subDir, 'subFile.txt' )
self.subFileSize = 0
self.FILES = [self.existingFile, self.nonExistingFile, self.subFile]
self.DIRECTORIES = [self.subDir]
self.ALL = self.FILES + self.DIRECTORIES
with open( os.path.join( self.srcPath, self.existingFile.replace( '/lhcb/', '' ) ), 'w' ) as f:
f.write( "I put something in the file so that it has a size\n" )
self.existingFileSize = os.path.getsize( os.path.join( self.srcPath, self.existingFile.replace( '/lhcb/', '' ) ) )
assert self.existingFileSize
os.mkdir( os.path.join( self.srcPath, os.path.basename( self.subDir ) ) )
with open( os.path.join( self.srcPath, self.subFile.replace( '/lhcb/', '' ) ), 'w' ) as f:
f.write( "This one should have a size as well\n" )
self.subFileSize = os.path.getsize( os.path.join( self.srcPath, self.subFile.replace( '/lhcb/', '' ) ) )
assert self.subFileSize
def tearDown(self):
shutil.rmtree( self.basePath )
shutil.rmtree( self.srcPath )
shutil.rmtree( self.destPath )
pass
def walkAll( self ):
for dirname in [self.basePath, self.destPath]:
self.walkPath( dirname )
def walkPath(self, path):
for root, dirs, files in os.walk( path ):
print root
print " dirs"
for d in dirs:
print " ", os.path.join( root, d )
print " files"
for f in files:
print " ", os.path.join( root, f )
@mock.patch( 'DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE',
return_value = S_OK( True ) ) # Pretend it's local
def test_01_getURL( self, mk_isLocalSE ):
"""Testing getURL"""
# Testing the getURL
res = self.se.getURL( self.ALL )
self.assert_( res['OK'], res )
self.assert_( not res['Value']['Failed'], res['Value']['Failed'] )
self.assert_( len( res['Value']['Successful'] ) == len( self.ALL ) )
for lfn, url in res['Value']['Successful'].items():
self.assertEqual( url, self.basePath.rstrip( '/' ) + lfn )
@mock.patch( 'DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE',
return_value = S_OK( True ) ) # Pretend it's local
def test_02_FileTest( self, mk_isLocalSE ):
"""Testing createDirectory"""
# Putting the files
def localPutFile( fn, size = 0 ):
"""If fn is '/lhcb/fn.txt', it calls
{ '/lhcb/fn.txt' : /tmp/generatedPath/fn.txt}
"""
transfDic = { fn : os.path.join( self.srcPath, fn.replace( '/lhcb/', '' ) )}
return self.se.putFile( transfDic, sourceSize = size )
# wrong size
res = localPutFile( self.existingFile, size = -1 )
self.assert_( res['OK'], res )
self.assert_( self.existingFile in res['Value']['Failed'] )
self.assert_( 'not match' in res['Value']['Failed'][self.existingFile], res )
self.assert_( not os.path.exists( self.basePath + self.existingFile ) )
# Correct size
res = localPutFile( self.existingFile, size = self.existingFileSize )
self.assert_( res['OK'], res )
self.assert_( self.existingFile in res['Value']['Successful'], res )
self.assert_( os.path.exists( self.basePath + self.existingFile ) )
# No size
res = localPutFile( self.existingFile )
self.assert_( res['OK'], res )
self.assert_( self.existingFile in res['Value']['Successful'], res )
self.assert_( os.path.exists( self.basePath + self.existingFile ) )
# No existing source file
res = localPutFile( self.nonExistingFile )
self.assert_( res['OK'], res )
self.assert_( self.nonExistingFile in res['Value']['Failed'], res )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
# sub file
res = localPutFile( self.subFile )
self.assert_( res['OK'], res )
self.assert_( self.subFile in res['Value']['Successful'], res )
self.assert_( os.path.exists( self.basePath + self.subFile ) )
# Directory
res = localPutFile( self.subDir )
self.assert_( res['OK'], res )
self.assert_( self.subDir in res['Value']['Failed'] )
self.assert_( os.strerror( errno.EISDIR ) in res['Value']['Failed'][self.subDir], res )
res = self.se.exists( self.FILES )
self.assert_( res['OK'], res )
self.assert_( not res['Value']['Failed'], res )
self.assert_( res['Value']['Successful'][self.existingFile], res )
self.assert_( not res['Value']['Successful'][self.nonExistingFile], res )
res = self.se.getFileSize( self.ALL )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful'][self.existingFile], self.existingFileSize )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
self.assert_( os.strerror( errno.EISDIR ) in res['Value']['Failed'][self.subDir], res )
res = self.se.getFileMetadata( self.ALL )
self.assert_( res['OK'], res )
self.assert_( self.existingFile in res['Value']['Successful'] )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
self.assert_( os.strerror( errno.EISDIR ) in res['Value']['Failed'][self.subDir], res )
res = self.se.isFile( self.ALL )
self.assert_( res['OK'], res )
self.assert_( res['Value']['Successful'][self.existingFile], res )
self.assert_( not res['Value']['Successful'][self.subDir], res )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
res = self.se.getFile( self.ALL, localPath = self.destPath )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful'][self.existingFile], self.existingFileSize )
self.assert_( os.path.exists( os.path.join( self.destPath, os.path.basename( self.existingFile ) ) ) )
self.assertEqual( res['Value']['Successful'][self.subFile], self.subFileSize )
self.assert_( os.path.exists( os.path.join( self.destPath, os.path.basename( self.subFile ) ) ) )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
self.assert_( os.strerror( errno.EISDIR ) in res['Value']['Failed'][self.subDir], res )
res = self.se.removeFile( self.ALL )
self.assert_( res['OK'], res )
self.assert_( res['Value']['Successful'][self.existingFile] )
self.assert_( not os.path.exists( self.basePath + self.existingFile ) )
self.assert_( res['Value']['Successful'][self.subFile] )
self.assert_( not os.path.exists( self.basePath + self.subFile ) )
self.assert_( res['Value']['Successful'][self.nonExistingFile] )
self.assert_( os.strerror( errno.EISDIR ) in res['Value']['Failed'][self.subDir] )
@mock.patch( 'DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE',
return_value = S_OK( True ) ) # Pretend it's local
def test_03_createDirectory( self, mk_isLocalSE ):
"""Testing creating directories"""
res = self.se.createDirectory( self.subDir )
self.assert_( res['OK'], res )
self.assert_( self.subDir in res['Value']['Successful'] )
self.assert_( os.path.exists( self.basePath + self.subDir ) )
@mock.patch( 'DIRAC.Resources.Storage.StorageElement.StorageElementItem._StorageElementItem__isLocalSE',
return_value = S_OK( True ) ) # Pretend it's local
def test_04_putDirectory( self, mk_isLocalSE ):
"""Testing putDirectory"""
nonExistingDir = '/lhcb/forsuredoesnotexist'
localdirs = ['/lhcb', nonExistingDir]
# Correct size
res = self.se.putDirectory( { '/lhcb' : self.srcPath} )
self.assert_( res['OK'], res )
self.assert_( '/lhcb' in res['Value']['Successful'], res )
self.assertEqual( res['Value']['Successful']['/lhcb'], {'Files': 2, 'Size': self.existingFileSize + self.subFileSize} )
self.assert_( os.path.exists( self.basePath + '/lhcb' ) )
self.assert_( os.path.exists( self.basePath + self.existingFile ) )
self.assert_( os.path.exists( self.basePath + self.subFile ) )
# No existing source directory
res = self.se.putDirectory( { '/lhcb' : nonExistingDir} )
self.assert_( res['OK'], res )
self.assert_( '/lhcb' in res['Value']['Failed'], res )
self.assertEqual( res['Value']['Failed']['/lhcb'], {'Files': 0, 'Size': 0} )
# sub file
res = self.se.putDirectory( { '/lhcb' : self.existingFile} )
self.assert_( res['OK'], res )
self.assert_( '/lhcb' in res['Value']['Failed'], res )
self.assertEqual( res['Value']['Failed']['/lhcb'], {'Files': 0, 'Size': 0} )
res = self.se.exists( self.DIRECTORIES + localdirs )
self.assert_( res['OK'], res )
self.assert_( not res['Value']['Failed'], res )
self.assert_( res['Value']['Successful'][self.subDir], res )
self.assert_( not res['Value']['Successful'][nonExistingDir], res )
res = self.se.getDirectorySize( self.ALL + localdirs )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful'][self.subDir], { 'Files' : 1, 'Size' : self.subFileSize, 'SubDirs' : 0 } )
self.assertEqual( res['Value']['Successful']['/lhcb'], { 'Files' : 1, 'Size' : self.existingFileSize, 'SubDirs' : 1 } )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
self.assert_( os.strerror( errno.ENOTDIR ) in res['Value']['Failed'][self.existingFile], res )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][nonExistingDir], res )
res = self.se.getDirectoryMetadata( self.ALL + localdirs )
self.assert_( res['OK'], res )
self.assert_( self.subDir in res['Value']['Successful'] )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][nonExistingDir], res )
self.assert_( os.strerror( errno.ENOTDIR ) in res['Value']['Failed'][self.existingFile], res )
res = self.se.isDirectory( self.ALL + localdirs )
self.assert_( res['OK'], res )
self.assert_( not res['Value']['Successful'][self.existingFile] )
self.assert_( res['Value']['Successful'][self.subDir], res )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][nonExistingDir], res )
res = self.se.listDirectory( self.ALL + localdirs )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful'][self.subDir], {'Files': [self.subFile], 'SubDirs': []} )
self.assertEqual( res['Value']['Successful']['/lhcb'], {'Files': [self.existingFile], 'SubDirs': [self.subDir]} )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][self.nonExistingFile], res )
self.assert_( os.strerror( errno.ENOTDIR ) in res['Value']['Failed'][self.existingFile], res )
self.assert_( os.strerror( errno.ENOENT ) in res['Value']['Failed'][nonExistingDir], res )
res = self.se.getDirectory( self.ALL + localdirs, localPath = self.destPath )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful']['/lhcb'], {'Files' : 2, 'Size' : self.existingFileSize + self.subFileSize} )
self.assert_( os.path.exists( self.destPath + self.existingFile ) )
self.assert_( os.path.exists( self.destPath + self.subFile ) )
self.assertEqual( res['Value']['Successful'][self.subDir], {'Files' : 1, 'Size' : self.subFileSize} )
self.assert_( os.path.exists( self.destPath + self.subFile.replace( '/lhcb', '' ) ) )
self.assertEqual( res['Value']['Failed'][self.nonExistingFile], {'Files': 0, 'Size': 0} )
self.assertEqual( res['Value']['Failed'][self.existingFile], {'Files': 0, 'Size': 0} )
self.assertEqual( res['Value']['Failed'][nonExistingDir], {'Files': 0, 'Size': 0} )
res = self.se.removeDirectory( nonExistingDir, recursive = False )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful'][nonExistingDir], True )
res = self.se.removeDirectory( nonExistingDir, recursive = True )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Failed'][nonExistingDir], {'FilesRemoved':0, 'SizeRemoved':0} )
res = self.se.removeDirectory( self.nonExistingFile, recursive = False )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful'][self.nonExistingFile], True )
res = self.se.removeDirectory( self.nonExistingFile, recursive = True )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Failed'][self.nonExistingFile], {'FilesRemoved':0, 'SizeRemoved':0} )
res = self.se.removeDirectory( self.existingFile, recursive = False )
self.assert_( res['OK'], res )
self.assert_( os.strerror( errno.ENOTDIR ) in res['Value']['Failed'][self.existingFile], res )
res = self.se.removeDirectory( self.existingFile, recursive = True )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Failed'][self.existingFile], {'FilesRemoved':0, 'SizeRemoved':0} )
res = self.se.removeDirectory( '/lhcb', recursive = False )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful']['/lhcb'], True )
self.assert_( not os.path.exists( self.basePath + self.existingFile ) )
self.assert_( os.path.exists( self.basePath + self.subFile ) )
res = self.se.removeDirectory( '/lhcb', recursive = True )
self.assert_( res['OK'], res )
self.assertEqual( res['Value']['Successful']['/lhcb'], {'FilesRemoved':1, 'SizeRemoved':self.subFileSize} )
self.assert_( not os.path.exists( self.basePath + '/lhcb' ) )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( TestBase )
unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
vmendez/DIRAC
|
Resources/Storage/test/TestFilePlugin.py
|
Python
|
gpl-3.0
| 16,320
|
[
"DIRAC"
] |
108540873e4994b5802df13af2931c1e8143d327813cf8cfad1acf815e03434b
|
import numpy as np
import numpy.linalg as la
from scipy import sparse as sp
from scipy.sparse import linalg as spla
from pysal.model.spreg.utils import spdot, spmultiply
from .family import Binomial, Poisson
def _compute_betas(y, x):
"""
compute MLE coefficients using iwls routine
Methods: p189, Iteratively (Re)weighted Least Squares (IWLS),
Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).
Geographically weighted regression: the analysis of spatially varying relationships.
"""
xT = x.T
xtx = spdot(xT, x)
xtx_inv = la.inv(xtx)
xtx_inv = sp.csr_matrix(xtx_inv)
xTy = spdot(xT, y, array_out=False)
betas = spdot(xtx_inv, xTy)
return betas
def _compute_betas_gwr(y, x, wi):
"""
compute MLE coefficients using iwls routine
Methods: p189, Iteratively (Re)weighted Least Squares (IWLS),
Fotheringham, A. S., Brunsdon, C., & Charlton, M. (2002).
Geographically weighted regression: the analysis of spatially varying relationships.
"""
xT = (x * wi).T
xtx = np.dot(xT, x)
xtx_inv = la.inv(xtx)
xtx_inv_xt = np.dot(xtx_inv, xT)
betas = np.dot(xtx_inv_xt, y)
return betas, xtx_inv_xt
def iwls(y, x, family, offset, y_fix,
ini_betas=None, tol=1.0e-8, max_iter=200, wi=None):
"""
Iteratively re-weighted least squares estimation routine
Parameters
----------
y : array
n*1, dependent variable
x : array
n*k, designs matrix of k independent variables
family : family object
probability models: Gaussian, Poisson, or Binomial
offset : array
n*1, the offset variable for each observation.
y_fix : array
n*1, the fixed intercept value of y for each observation
ini_betas : array
1*k, starting values for the k betas within the iteratively
weighted least squares routine
tol : float
tolerance for estimation convergence
max_iter : integer maximum number of iterations if convergence not met
wi : array
n*1, weights to transform observations from location i in GWR
Returns
-------
betas : array
k*1, estimated coefficients
mu : array
n*1, predicted y values
wx : array
n*1, final weights used for iwls for GLM
n_iter : integer
number of iterations that when iwls algorithm terminates
w : array
n*1, final weights used for iwls for GWR
z : array
iwls throughput
v : array
iwls throughput
xtx_inv_xt : array
iwls throughout to compute GWR hat matrix
[X'X]^-1 X'
"""
n_iter = 0
diff = 1.0e6
if ini_betas is None:
betas = np.zeros((x.shape[1], 1), np.float)
else:
betas = ini_betas
if isinstance(family, Binomial):
y = family.link._clean(y)
if isinstance(family, Poisson):
y_off = y / offset
y_off = family.starting_mu(y_off)
v = family.predict(y_off)
mu = family.starting_mu(y)
else:
mu = family.starting_mu(y)
v = family.predict(mu)
while diff > tol and n_iter < max_iter:
n_iter += 1
w = family.weights(mu)
z = v + (family.link.deriv(mu) * (y - mu))
w = np.sqrt(w)
if not isinstance(x, np.ndarray):
w = sp.csr_matrix(w)
z = sp.csr_matrix(z)
wx = spmultiply(x, w, array_out=False)
wz = spmultiply(z, w, array_out=False)
if wi is None:
n_betas = _compute_betas(wz, wx)
else:
n_betas, xtx_inv_xt = _compute_betas_gwr(wz, wx, wi)
v = spdot(x, n_betas)
mu = family.fitted(v)
if isinstance(family, Poisson):
mu = mu * offset
diff = min(abs(n_betas - betas))
betas = n_betas
if wi is None:
return betas, mu, wx, n_iter
else:
return betas, mu, v, w, z, xtx_inv_xt, n_iter
|
lixun910/pysal
|
pysal/model/spglm/iwls.py
|
Python
|
bsd-3-clause
| 4,241
|
[
"Gaussian"
] |
ac3b6c578739b757457074cac5095dc2026666ed33bb12d7c55f21a9a689983d
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('hack33.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
Rositsazz/hack33
|
config/urls.py
|
Python
|
mit
| 1,599
|
[
"VisIt"
] |
7704662bc6c35ff37360ccad2e69fc0a6066b0e30a8949bced61c44598c5b815
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module provides a set of teachers that deal with dialog.
``FixedDialogTeacher(Teacher)``
Base class for teachers in tasks that have fixed dialog - i.e., dialog
that is not dynamically generated but rather is pulled from set examples.
However, the class can be extended to all tasks involved fixed data.
Implements much of the basic functionality of these teachers, including
``observe()``, ``act()``, ``next_example()``
``DialogTeacher(FixedDialogTeacher)``
Base teacher class for doing dialog specifically with fixed chat logs.
``ParlAIDialogTeacher(DialogTeacher)``
Teacher class that provides access to data in the ParlAI Dialog format.
See the class description for more details.
``ConversationTeacher(DialogTeacher)``
Teacher class that provides access to data in the Conversations format.
See the class description for more details.
``FbDeprecatedDialogTeacher(DialogTeacher)``
Teacher class that provides access to data in the Facebook Dialog format.
See the class description for more details. **This class is deprecated**.
This module also includes ``DataLoader``, a threadpool data loader for
``FixedDialogTeacher``, and ``DialogData``/``StreamDialogData``, data
structures for accessing textual dialog data and utilized by ``DialogTeacher``
"""
from parlai.core.params import ParlaiParser
from parlai.core.agents import Agent, create_agent_from_shared
from parlai.core.image_featurizers import ImageLoader
from parlai.core.loader import load_teacher_module
from parlai.core.loader import register_teacher # noqa: F401
from parlai.core.message import Message
from parlai.core.metrics import TeacherMetrics, aggregate_named_reports
from parlai.core.opt import Opt
from parlai.utils.conversations import Conversations
from parlai.utils.data import DatatypeHelper
from parlai.utils.misc import AttrDict, str_to_msg, warn_once, SimpleCounter
from parlai.utils.distributed import get_rank, num_workers, is_distributed
import parlai.utils.torch as torch_utils
import parlai.utils.logging as logging
from parlai.utils.io import PathManager
from parlai.core.mutators import Mutator
from abc import ABC, abstractmethod
import argparse
from collections import defaultdict
import concurrent.futures
import copy
import json
import os
import queue
import random
import yaml
from threading import Thread
import torch
from typing import List, Tuple, Optional, TypeVar, Any
ERROR_MESSAGE_NO_DATAFILE = (
"{class_name} is expected to set self.opt['datafile'] inside `__init__` "
"before calling `super().__init__`. This will passed to setup_data, "
"indicating what data to load. If you don't know what to use, set "
"`opt['datafile'] = parlai.utils.data.DatatypeHelper.fold(opt['datatype'])` "
"to receive the fold name in setup_data."
)
ChunkOutput = TypeVar('ChunkOutput')
class DataLoader(Thread):
"""
A worker thread that provides a threadpool for data loading.
A teacher may submit a request to the loader, which will return the
appropriate data.
To submit a request, a teacher should call ``request_load``.
"""
def __init__(self, opt):
Thread.__init__(self, daemon=True)
self.num_workers = opt.get('num_load_threads', 1)
self.request_queue = queue.Queue()
self.last_future = None
def request_load(self, receive_fn, load_fn, args):
"""
Queue a request for loading.
:param receive_fn:
a receive function (for receiving the data)
:param load_fn:
a load function (for loading the data)
:param args:
arguments for the load function. args can be either a dictionary of
arguments for a function, or a list of positional arguments
"""
self.request_queue.put((receive_fn, load_fn, args))
def run(self):
"""
Run the execution loop.
"""
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=self.num_workers, thread_name_prefix=self.name
)
with executor:
while True:
receive_fn, load_fn, args = self.request_queue.get()
if receive_fn is StopIteration:
return
try:
if type(args) == dict:
future = executor.submit(load_fn, **args)
else:
future = executor.submit(load_fn, *args)
self.last_future = future
receive_fn(future)
except RuntimeError:
return
class _ErrorThrowingDataLoader(object):
"""
A fake DataLoader which throws an exception when a work order is placed.
Since threads cannot be mixed with spawn_method='fork', we need to disallow users
from combining --num-workers with teachers that utilize threads. This placeholder
object is only useful for ensuring the user sees a loud error message when they
accidentally use a thread.
"""
def __init__(self, opt):
pass
def request_load(self, receive_fn, load_fn, args):
raise RuntimeError(
'One of your teachers uses a DataLoader or a thread. You may only '
'combine this with --num-workers 0.'
)
def start(self):
pass
class Teacher(Agent):
"""
Basic Teacher agent that keeps track of how many times it's received messages.
Teachers provide the ``report()`` method to get back metrics.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser.add_argument(
'--mutators',
'-mut',
default=None,
help='Apply one or more mutators to the data.',
)
mutators = Mutator.load_mutator_types(partial_opt.get('mutators'))
for m in mutators:
m.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt: Opt, shared=None):
if not hasattr(self, 'opt'):
self.opt = copy.deepcopy(opt)
if not hasattr(self, 'id'):
self.id = opt.get('task', 'teacher')
if not hasattr(self, 'metrics'):
self.metrics = TeacherMetrics(
metrics_list=opt.get('metrics', 'default'),
shared=shared['metrics'] if shared is not None else None,
)
self.epochDone = False
# return state/action dict based upon passed state
def act(self):
"""
Act upon the previous observation.
"""
if self.observation is not None and 'text' in self.observation:
t = Message({'text': 'Hello agent!'})
return t
def epoch_done(self):
"""
Return whether the epoch is done.
"""
return self.epochDone
# Default unknown length
def num_examples(self):
"""
Return the number of examples (e.g. individual utterances) in the dataset.
Default implementation returns `None`, indicating an unknown number.
"""
return None
def num_episodes(self):
"""
Return the number of episodes (e.g. conversations) in the dataset.
Default implementation returns `None`, indicating an unknown number.
"""
return None
def report(self):
"""
Return metrics showing total examples and accuracy if available.
"""
return self.metrics.report()
def reset(self):
"""
Reset the teacher.
"""
super().reset()
self.reset_metrics()
self.epochDone = False
def reset_metrics(self):
"""
Reset metrics.
"""
self.metrics.clear()
def share(self):
"""
In addition to default Agent shared parameters, share metrics.
"""
shared = super().share()
shared['metrics'] = self.metrics.share()
return shared
def __iter__(self):
"""
Iterate through the examples of the teacher.
"""
clone = self.clone()
while True:
message = clone.act()
if not isinstance(message, Message):
# backwards compatibility with older agents
message = Message(message)
if message.is_padding():
break
yield message
class FixedDialogTeacher(Teacher):
"""
A teacher agent for all teachers involved in tasks with fixed data.
This class provides the following functionality for its subclasses:
- Resets a teacher
- Provides an observe method
- Computes and retrieves the next episode index for a teacher
- Provides a threadpool option for loading data (especially useful for
large data, e.g. images)
In order to take advantage of the first few features, all a subclass has to
implement is three functions: ``num_episodes``, ``num_examples``, and
``get`` (which returns a specific example from a specific episode).
To utilize the DataLoader for threadpool loading, a teacher should
implement the ``submit_load_request`` function to send a load request
to the DataLoader by calling ``self.data_loader.request_load`` with the
appropriate arguments (``receive_fn, load_fn, args``). The DataLoader then
returns the data to the teacher's ``data_queue``, which the teacher can
poll in its ``act`` method.
The following is an example of the DataLoader usage in the VQA-V1 teacher.
1. In the teacher's ``init`` function, the teacher calls its
``submit_load_request`` function to preload an image.
2. The ``submit_load_request`` function gets the next ``episode_idx``,
and computes the image path for the load request.
3. At the end of ``submit_load_request``, the teacher calls
``self.data_loader.request_load`` with three args:
- ``self.receive_data`` - the function that the DataLoader calls to
return the the loaded object
- ``self.image_loader.load`` - the function used to load the image
from the image path
- ``[img_path]`` - a list of arguments for the load function, which
in this case is the path of the image.
4. In the teacher's ``act`` function, the teacher loads the data from
its data queue.
5. At the end of the ``act`` function, the teacher calls
``submit_load_request`` to preload an image for the next example.
To see this in action, take a look at this teacher in ``tasks.vqa_v1.agents``.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if not hasattr(self, 'datatype'):
self.datatype = opt['datatype']
if not hasattr(self, 'random'):
self.random = self.datatype == 'train'
if not hasattr(self, 'training'):
self.training = DatatypeHelper.is_training(self.datatype)
if not hasattr(self, 'cycle'):
self.cycle = DatatypeHelper.should_cycle(self.datatype)
if not hasattr(self, 'datafile'):
self.datafile = opt.get('datafile')
# set up support for multithreaded data loading
self.data_queue = queue.Queue()
if shared:
self.index = shared['index']
if 'data_loader' in shared:
self.data_loader = shared['data_loader']
if 'threadindex' in shared:
self.threadindex = shared['threadindex']
if 'examples' in shared:
self.examples = shared['examples']
else:
self.index = AttrDict(value=-1)
if not hasattr(self, 'data_loader'):
if opt.get('background_index') is None:
self.data_loader = DataLoader(opt)
else:
self.data_loader = _ErrorThrowingDataLoader(opt)
self.data_loader.start()
# set up batching
self.bsz = opt.get('batchsize', 1)
if shared:
self.mutators = shared.get('mutators', [])
else:
mutator_types = Mutator.load_mutator_types(self.opt.get('mutators'))
self.mutators = [mutator(self.opt) for mutator in mutator_types]
self._episode_done = True
def reset(self):
"""
Reset the dialog to the start of the epoch, and reset all metrics.
"""
super().reset()
self.metrics.clear()
self.lastY = None
self.last_act = None
self._episode_done = True
self.epochDone = False
self.data_queue = queue.Queue()
self.episode_idx = -1
self.index.value = -1
def submit_load_request(self):
"""
Submit a load request.
An agent should implement this method to submit requests to the data
loader. At the end of this method, the agent should call
``self.data_loader.request_load()`` with the appropriate args.
By default, this method does nothing.
"""
# TODO: mark as abstract
pass
def receive_data(self, future: concurrent.futures.Future):
"""
Receive data from the data loader.
:param future: result from the load request.
"""
data = future.result()
self.data_queue.put(data)
def share(self):
"""
Share the data and dataloader.
"""
shared = super().share()
if hasattr(self, 'examples'):
shared['examples'] = self.examples
if hasattr(self, 'data_loader'):
shared['data_loader'] = self.data_loader
if hasattr(self, 'mutators'):
shared['mutators'] = self.mutators
shared['index'] = self.index
return shared
def next_episode_idx(self, num_eps=None, loop=None):
"""
Return the next episode index.
:param num_eps:
default None uses ``num_episodes`` value.
:param loop:
default None loops during training but not evaluation.
"""
if num_eps is None:
num_eps = self.num_episodes()
if loop is None:
loop = self.training
if self.random:
new_idx = random.randrange(num_eps)
else:
self.index.value += 1
if loop:
try:
self.index.value %= num_eps
except ZeroDivisionError:
raise ZeroDivisionError(
"The teacher has either empty data (e.g. setup_data yielded "
"no items, or self.num_episodes() == 0). We do not support "
"empty datasets (or folds) at this time."
)
new_idx = self.index.value
return new_idx
def next_example(self):
"""
Return the next example.
If there are multiple examples in the same episode, returns the next one in that
episode. If that episode is over, gets a new episode index and returns the first
example of that episode.
"""
if self._episode_done:
self.episode_idx = self.next_episode_idx()
self.entry_idx = 0
if self.episode_idx >= self.num_episodes():
return Message.padding_example(), True
# buffer the full conversation ahead of time for mutators
episode_buffer = []
buffer_entry_idx = 0
while True:
entry = self.get(self.episode_idx, buffer_entry_idx)
if not isinstance(entry, Message):
assert isinstance(entry, dict)
typ = type(self)
warn_once(
f"{typ.__module__}.{typ.__name__}' is outputting dicts "
"instead of messages. If this is a teacher that is part of "
"ParlAI, please file an issue on GitHub. If it is your own "
"teacher, please return a Message object instead."
)
entry = Message(entry)
episode_buffer.append(entry)
if entry.get('episode_done'):
break
buffer_entry_idx += 1
# apply mutators
if self.mutators:
episode_buffer = [m.copy() for m in episode_buffer]
for mutator in self.mutators:
episode_buffer = mutator(episode_buffer)
self.episode_buffer = list(episode_buffer)
if not self.episode_buffer:
# if we got back an empty episode after mutating, skip it
return self.next_example()
else:
self.entry_idx += 1
if self.episode_idx >= self.num_episodes():
return Message.padding_example(), True
# buffer the entire conversation so we can apply mutators
ex = self.episode_buffer[self.entry_idx]
self._episode_done = self.entry_idx == len(self.episode_buffer) - 1
if (
not self.cycle
and self._episode_done
and self.episode_idx + self.opt.get("batchsize", 1) >= self.num_episodes()
):
epoch_done = True
else:
epoch_done = False
return ex, epoch_done
def num_episodes(self) -> int:
"""
Get the number of episodes in this dataset.
"""
raise RuntimeError('"num_episodes" must be overridden by children.')
def num_examples(self) -> int:
"""
Get the total number of examples in this dataset.
"""
raise RuntimeError('"num_examples" must be overridden by children.')
def get(self, episode_idx, entry_idx=0):
"""
Get the specified episode and the specified entry in that episode.
Children must override this method in order to inherit the
`next_example` method.
:param episode_idx:
which episode to return examples from
:param entry_idx:
which example to return from the episode. Many datasets have only
single-entry episodes, so this defaults to zero.
"""
# TODO: mark as abstract, get rid of runtime error.
raise RuntimeError('"Get" method must be overridden by children.')
def observe(self, observation):
"""
Process observation for metrics.
"""
self.metrics.clear_recent()
if hasattr(self, 'lastY') and self.lastY is not None:
self.metrics.evaluate_response(observation, self.lastY)
self.custom_evaluation(self.last_act, self.lastY, observation)
self.lastY = None
recent_metrics = self.metrics.report_recent()
if recent_metrics:
# for display purposes (display_model), take all accumulated
# metrics back into the original observation. This is an abuse of
# Messages being pointers
if 'metrics' in observation:
# override agent-level metrics if present
observation.pop('metrics')
observation['metrics'] = recent_metrics
return observation
def custom_evaluation(
self,
teacher_action: Message,
labels: Optional[Tuple[str]],
model_response: Message,
) -> None:
"""
A method designated for hooking custom evaluations into teachers.
Generally, a user will want to use `self.metrics.add` to record any
specialized metrics that only make sense for this one dataset.
:param teacher_action:
The message last sent from this teacher.
:param labels:
The previous correct labels, if there were any.
:param model_response:
The raw response from the model. Generally you want to rely on the
text field, but others may be necessary in specific situations.
"""
pass
def act(self):
"""
Send new dialog message.
"""
orig_action = self.get_orig_action()
processed_action = self.process_action(orig_action)
return processed_action
def get_orig_action(self) -> Message:
"""
Get the unprocessed action and reset if needed.
This function will return the raw action from `self.next_example()`, before the
`self.last_act` and `self.lastY` attributes have been defined based on this
action for metrics or custom evaluations. This is so that wrapper teachers can
modify the raw action first, such as to change the contents of its 'text' and
'label' fields, without the action becoming out of sync with `self.last_act` and
`self.lastY`.
"""
if not hasattr(self, 'epochDone'):
# reset if haven't yet
self.reset()
# get next example, action is episode_done dict if already out of exs
action, self.epochDone = self.next_example()
if not isinstance(action, Message):
# TODO: all teachers should eventually create messages
# while setting up the data, so this won't be necessary
action = Message(action)
return action
def process_action(self, action: Message) -> Message:
"""
Remember the raw action and prepare its fields for passing out of the teacher.
"""
action.force_set('id', self.getID())
# remember correct answer if available
self.last_act = action
self.lastY = action.get('labels', action.get('eval_labels', None))
if not DatatypeHelper.is_training(self.datatype) and 'labels' in action:
# move labels to eval field so not used for training
# but this way the model can use the labels for perplexity or loss
action = action.copy()
labels = action.pop('labels')
if not self.opt.get('hide_labels', False):
action['eval_labels'] = labels
return action
class DialogTeacher(FixedDialogTeacher):
"""
A base teacher class for doing dialog with fixed chat logs.
This class provides a set a basic functionality:
- uses data class to store and query text data
- generates action tables to send to the student agent from the data
In order to subclass this class, you must implement ``setup_data()`` in
your class, which reads your data file as an iterator.
"""
def __init__(self, opt, shared=None):
# Check for setup_data
if not hasattr(self, 'setup_data'):
raise RuntimeError(
'Must implement setup_data or subclass a class '
'which implements it (e.g. FbDeprecatedDialogTeacher) '
'in order to use this class.'
)
super().__init__(opt, shared)
self.datatype = opt['datatype']
self.training = DatatypeHelper.is_training(self.datatype)
self.cycle = DatatypeHelper.should_cycle(self.datatype)
self.stream = 'stream' in self.datatype
# first initialize any shared objects
data_class = StreamDialogData if self.stream else DialogData
kwargs = (
# never cycle if "ordered" is in the datatype. this is used by
# build_dict to enumerate through the data exactly once while still
# marking examples as training examples.
{'cycle': self.cycle}
if self.stream
else {}
)
if shared and shared.get('data'):
self.data = data_class(opt, shared=shared['data'], **kwargs)
else:
if 'datafile' not in self.opt:
raise KeyError(
ERROR_MESSAGE_NO_DATAFILE.format(class_name=self.__class__.__name__)
)
self.data = data_class(
opt,
data_loader=self.setup_data,
cands=self.label_candidates(),
**kwargs,
)
self.reset()
@abstractmethod
def setup_data(self, datafile: str):
"""
The core method which the user should override.
Yields the data, one message at a time, as well as markers indicating
new episodes.
:param str datafile:
If the initializer set a 'datafile' field within the initialization,
this will be provided here. Otherwise, datafile will be the fold:
either "train", "valid", or "test".
:return:
Yields pairs (message, new_episode) containing a Message object
and whether the message marks the beginning of a totally new
episode.
"""
pass
def reset(self):
"""
Reset the dialog to the start of the epoch, reset all metrics.
"""
super().reset()
if self.stream:
self.data.reset()
self.epochDone = False
def share(self):
"""
Share the data.
"""
shared = super().share()
if hasattr(self, 'data'):
shared['data'] = self.data.share()
return shared
def label_candidates(self):
"""
Provide consistent label candidates for all examples.
Default implementation returns ``None`` always, but this may be overridden to
provide candidates in all areas. See ``FbDialogueTeacher``.
"""
# TODO DEPRECATIONDAY: FbDialogueTeacher is being deprecated, should we
# remove this?
# TODO: mark as optionally abstract?
return None
def num_episodes(self) -> int:
"""
Return the number of episodes in the data.
"""
if hasattr(self, "_num_episodes_cache"):
return self._num_episodes_cache
try:
return self.data.num_episodes()
except AttributeError:
return super().num_episodes()
def num_examples(self) -> int:
"""
Return the number of examples in the data.
"""
if hasattr(self, '_num_examples_cache'):
return self._num_examples_cache
try:
self._num_examples_cache: int = self.data.num_examples()
except AttributeError:
self._num_examples_cache = super().num_examples()
return self._num_examples_cache
def get(self, episode_idx, entry_idx=0):
"""
Get a specific example.
"""
return self.data.get(episode_idx, entry_idx)[0]
def next_example(self):
"""
Get the next example.
"""
if self.stream:
# unfortunately we need to also do the mutator buffering here.
# it's difficult to structure it so it's not
if hasattr(self, 'episode_buffer') and self.episode_buffer:
action = self.episode_buffer.pop(0)
epoch_done = (not self.episode_buffer) and self._saw_epoch_done
return action, epoch_done
episode_buffer = []
while True:
action, epoch_done = self.data.get()
episode_buffer.append(action)
if action['episode_done']:
self._saw_epoch_done = epoch_done
break
# perform any mutations there are
if self.mutators:
episode_buffer = [m.copy() for m in episode_buffer]
for mutator in self.mutators:
episode_buffer = mutator(episode_buffer)
# make sure mutations are fully realized (not generators)
self.episode_buffer = list(episode_buffer)
# The recursive call has dual purpose:
# - if we get back an empty episode after mutating, skip it gracefully
# - pull the first item the episode w/ epoch_done logic, but DRY
return self.next_example()
else:
action, epoch_done = super().next_example()
return action, epoch_done
class DialogData(object):
"""
Provides a data structure for accessing textual dialog data.
This can be used whenever the dialog data is a fixed log of chats
(i.e not a simulator setting). The logs can include dialog text and possibly
supervised labels, candidate labels and rewards.
All these are stored in this internal data format which is used by the
``DialogTeacher`` class.
:param opt:
options to initialize the class
:param data_loader:
an iterable with each call returning a tuple in the form
``((x, y, r, c, i), new_episode?)`` where the ``x`` and ``new_episode``
fields are mandatory and other fields may be omitted or ``None``.
:param cands:
can be set to provide a list of candidate labels for every example in
this dataset, which the agent can choose from (the correct answer
should be in this set).
:param random:
tells the data class whether or not to visit episodes sequentially or
randomly when returning examples to the caller.
The contents of the ``((x, y, r, c, i), new_episode?)`` tuples returned by
the data loader is the following:
- ``x`` (str) is a query and possibly context
- ``y`` (iter) is an iterable of label(s) for that query
- ``r`` (str) is the str reward for getting that query correct
- ``c`` (iter) is an iterable of label candidates that the student can choose from
- ``i`` (str) is a str path to an image on disk, which will be loaded by the
data class at request-time. should always point to the raw image file.
- ``new_episode?`` (bool) is a boolean value specifying whether that example
is the start of a new episode. If you don't use episodes set this
to ``True`` every time.
"""
def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
# in case we need to shard the dataset
self.rank = get_rank()
self.num_workers = num_workers()
self.is_distributed_and_is_eval = is_distributed() and any(
x in opt['datatype'] for x in ('valid', 'test', 'train:evalmode')
)
# self.data is a list of episodes
# each episode is a tuple of entries
# each entry is a tuple of values for the action/observation table
if shared:
self.image_loader = shared.get('image_loader', None)
self.data = shared.get('data', [])
self.cands = shared.get('cands', None)
else:
self.image_loader = ImageLoader(opt)
self.data = []
if 'datafile' not in opt:
raise KeyError(
ERROR_MESSAGE_NO_DATAFILE.format(class_name=self.__class__.__name__)
)
self._load(data_loader, opt['datafile'])
self.cands = None if cands is None else set(c for c in cands)
self.addedCands = []
self.copied_cands = False
def share(self):
"""
Share the data.
"""
shared = {
'data': self.data,
'cands': self.cands,
'image_loader': self.image_loader,
}
return shared
def _read_episode(self, data_loader):
"""
Read one episode at a time from the provided iterable over entries.
:param data_loader:
an iterable which returns tuples in the format described in the
class docstring.
"""
episode = []
for entry, new in data_loader:
if new and len(episode) > 0:
yield episode
episode = []
episode.append(entry)
if len(episode) > 0:
yield episode
def _load(self, data_loader, datafile):
"""
Load up data from an iterable over tuples described in the class docs.
:param iter data_loader:
an iterator which returns tuples in the format described in the
class docstring.
:param str datafile:
"""
for i, episode in enumerate(self._read_episode(data_loader(datafile))):
if not self.is_distributed_and_is_eval or i % self.num_workers == self.rank:
self.data.append(episode)
def num_episodes(self):
"""
Return number of episodes in the dataset.
"""
return len(self.data)
def num_examples(self):
"""
Return total number of entries available.
Each episode has at least one entry, but might have many more.
"""
if hasattr(self, '_num_examples_cache'):
return self._num_examples_cache
self._num_examples_cache = sum(len(episode) for episode in self.data)
return self._num_examples_cache
def get(self, episode_idx, entry_idx=0):
"""
Get the specified episode and the specified entry in that episode.
:param episode_idx:
which episode to return examples from
:param entry_idx:
which example to return from the episode. Many datasets have only
single-entry episodes, so this defaults to zero.
"""
if episode_idx >= len(self.data):
return Message.padding_example(), True
next_episode_idx_for_rank = episode_idx + 1
# first look up data
episode = self.data[episode_idx]
entry = episode[entry_idx]
episode_done = entry_idx == len(episode) - 1
end_of_data = episode_done and next_episode_idx_for_rank >= len(self.data)
# now pack it in a action-observation dictionary
table = self.build_table(entry)
# last entry in this episode
table['episode_done'] = episode_done
return table, end_of_data
def build_table(self, entry):
"""
Packs an entry into an action-observation dictionary.
:param entry: a tuple in the form described in the class docstring.
"""
if isinstance(entry, (dict, Message)):
# user is already provided things
if 'eval_labels' in entry or 'eval_label' in entry:
raise KeyError(
'Labels are converted to eval_labels automatically. Please do not '
'set them in setup_data.'
)
if 'episode_done' in entry:
raise KeyError(
"episode_done is set automatically for you. Please don't set it "
"in setup_data."
)
if 'label' in entry:
# for convenience, rename to the labels convention automatically
label = entry.pop('label')
assert isinstance(label, str)
entry['labels'] = (label,)
if 'labels' in entry and isinstance(entry['labels'], str):
entry['labels'] = (entry['labels'],)
table = entry.copy()
elif isinstance(entry, (Tuple, List)):
table = {}
if entry[0] is not None:
table['text'] = entry[0]
if len(entry) > 1 and entry[1] is not None:
l = entry[1]
if isinstance(l, str):
l = (l,)
table['labels'] = l
if len(entry) > 2 and entry[2] is not None:
table['reward'] = entry[2]
if len(entry) > 3 and entry[3] is not None:
table['label_candidates'] = entry[3]
if len(entry) > 4 and entry[4] is not None:
img = self.image_loader.load(entry[4])
if img is not None:
table['image'] = img
else:
raise TypeError(
f"items out of setup_data should be dict, Message, list, or tuple. "
f"Got {type(entry)})"
)
if table.get('labels', None) is not None and self.cands is not None:
if self.addedCands:
# remove elements in addedCands
self.cands.difference_update(self.addedCands)
self.addedCands.clear()
for label in table['labels']:
if label not in self.cands:
# add labels, queue them for removal next time
if not self.copied_cands:
self.cands = self.cands.copy()
self.copied_cands = True
self.cands.add(label)
self.addedCands.append(label)
table['label_candidates'] = self.cands
if 'labels' in table and 'label_candidates' in table:
if table['labels'][0] not in table['label_candidates']:
raise RuntimeError('true label missing from candidate labels')
# go ahead and make it a message
if isinstance(table, dict):
table = Message(table)
return table
class StreamDialogData(DialogData):
"""
Provides a data structure for streaming textual dialog data.
This can be used whenever the dialog data follows the format described in
DialogData but cannot fit entirely into memory.
Additional keyword-argument cycle defines if the stream should restart from
the beginning after an epoch is finished (defaults to True).
:param opt:
options to initialize the class
:param data_loader:
an iterable with each call returning a tuple in the form
``((x, y, r, c, i), new_episode?)`` where the ``x`` and ``new_episode``
fields are mandatory and other fields may be omitted or ``None``.
:param cands:
can be set to provide a list of candidate labels for every example in
this dataset, which the agent can choose from (the correct answer
should be in this set).
:param random:
tells the data class whether or not to visit episodes sequentially or
randomly when returning examples to the caller.
:param cycle:
(default True) whether to restart at beginning when end of stream
reached without reset being called.
"""
# represents that we haven't read in any data at all
_FIRST_PASS = None
# represents that we are out of data.
_END_OF_EPOCH = -1
def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
# super() call initiates stream in self.data by calling _load()
super().__init__(opt, data_loader, cands, shared, **kwargs)
self.cycle = kwargs['cycle'] if 'cycle' in kwargs else True
if shared:
# auxiliary instances hold pointer to main datastream in self.data
self.reset_data = shared['reset']
# Share datafile and data_loader for computing num_exs and num_eps
self.datafile = shared['datafile']
self.data_loader = shared['data_loader']
if 'lock' in shared:
self.lock = shared['lock']
else:
# main instance holds the stream and shares pointer to it
self.data_loader = data_loader
if 'datafile' not in opt:
raise KeyError(
ERROR_MESSAGE_NO_DATAFILE.format(class_name=self.__class__.__name__)
)
self.datafile = opt['datafile']
self.reset_data = None
self.is_reset = True
self.entry_idx = 0
self.cur_episode = self._FIRST_PASS
self.num_eps = None
self.num_exs = None
self.rank = get_rank()
self.num_workers = num_workers()
self.is_distributed_and_is_eval = (
self.num_workers > 1 and not DatatypeHelper.is_training(opt['datatype'])
)
def share(self):
"""
Share the stream.
"""
shared = super().share()
# also share reset method to allow datastream to be reset
shared['reset'] = self.reset
# share datafile and data for loading length if necessary
shared['datafile'] = self.datafile
shared['data_loader'] = self.data_loader
if hasattr(self, 'lock'):
shared['lock'] = self.lock
return shared
def _load(self, data_loader, datafile):
"""
Load data generator into data field.
"""
self.data = self._data_generator(data_loader, datafile)
def _data_generator(self, data_loader, datafile):
"""
Generate data using the iterator over tuples constructed by data_loader.
"""
self.is_reset = False
idx = 0
while True:
for episode in self._read_episode(data_loader(datafile)):
# We only shard the data set at evaluation time, as training is
# done using sampling-with-replacement.
if not self.is_distributed_and_is_eval or (
idx % self.num_workers == self.rank
):
yield episode
idx += 1
while not self.cycle:
yield self._END_OF_EPOCH
def load_length(self):
"""
Calculate the length of the dataset and caches it in a file.
Note that this can take some time for large datasets. Episode and entry indexes
cannot be specified during streaming.
"""
datafiles = self.datafile if type(self.datafile) is tuple else [self.datafile]
length_file = datafiles[0] + ".lengths"
if not PathManager.exists(length_file):
num_eps = 0
num_exs = 0
for episode in self._read_episode(self.data_loader(self.datafile)):
num_eps += 1
num_exs += len(episode)
with PathManager.open(length_file, 'w', encoding="utf-8") as f:
f.write("{}\n{}".format(num_eps, num_exs))
else:
with PathManager.open(length_file, 'r', encoding='utf-8') as f:
num_eps, num_exs = f.readlines()
return int(num_eps), int(num_exs)
def num_examples(self):
"""
Return the number of examples in the data.
"""
if not self.num_exs:
self.num_eps, self.num_exs = self.load_length()
return self.num_exs
def num_episodes(self):
"""
Return the number of episodes in the data.
"""
if not self.num_eps:
self.num_eps, self.num_exs = self.load_length()
return self.num_eps
def get(self):
"""
Get the next entry from the stream.
When episode is done returns first entry of next episode.
"""
if self.cur_episode is self._FIRST_PASS:
# first go around, always read off the episode
# maybe lock this line
self.cur_episode = next(self.data)
if self.cur_episode == self._END_OF_EPOCH:
# we're done here
return Message.padding_example(), True
entry = self.cur_episode[self.entry_idx]
table = self.build_table(entry)
episode_done = self.entry_idx == len(self.cur_episode) - 1
table['episode_done'] = episode_done
if episode_done:
# maybe lock this line
self.cur_episode = next(self.data)
self.entry_idx = 0
else:
self.entry_idx += 1
return table, self.cur_episode == self._END_OF_EPOCH
def reset(self):
"""
Reset the datastream to its beginning.
"""
if self.reset_data is not None:
# auxiliary instance, reset main datastream
self.data = self.reset_data()
elif not self.is_reset:
# if main instance is not reset, reset datastream
self._load(self.data_loader, self.datafile)
self.is_reset = True
self.entry_idx = 0
self.cur_episode = self._FIRST_PASS
return self.data
class FbDeprecatedDialogTeacher(DialogTeacher):
"""
This module provides access to data in the Facebook Dialog format.
Subclasses ``DialogTeacher`` for functionality and provides an
implementation of ``setup_data()`` which iterates over datasets in the
"fbdialog" format. If your data is in the format below, use this class to
handle file parsing for you.
The way FB Dialog data is set up is as follows:
::
1 Sam went to the kitchen.
2 Pat gave Sam the milk.
3 Where is the milk?<TAB>kitchen<TAB>1<TAB>hallway|kitchen|bathroom
4 Sam went to the hallway.
5 Pat went to the bathroom.
6 Where is the milk?<TAB>hallway<TAB>1<TAB>hallway|kitchen|bathroom
Lines 1-6 represent a single episode, with two different examples: the
first example is lines 1-3, and the second is lines 4-6.
Lines 1,2,4, and 5 represent contextual information.
Lines 3 and 6 contain a query, a label, a reward for getting the question
correct, and three label candidates.
Since both of these examples are part of the same episode, the information
provided in the first example is relevant to the query in the second
example and therefore the agent must remember the first example in order to
do well.
In general dialog in this format can contain any speech, not just QA pairs:
::
1 Hi how's it going?<TAB>It's going great. What's new?
2 Well I'm working on a new project at work.<TAB>Oh me too!
3 Oh cool!<TAB>Tell me about yours.
etc.
Note that dialogs are interpreted as being one-way. For example, consider
this dialog:
::
1 X1 Y1
2 X2 Y2
3 X3 Y3
A set of examples X1 => Y1, X2 => Y2, and X3 => Y3 will be generated.
However, Y1 => X2 and Y2 => X3 are not created as separate examples by
default. This makes sense for some data (we don't need to train on the idea
that "kitchen" should be followed by "Sam went to the hallway..." above),
but for other datasets it may be helpful to add additional examples in the
reverse direction ("Oh cool!" is a response to "Oh me too!" above).
"""
def __init__(self, opt, shared=None):
self.opt = opt
self.cloze = opt.get('cloze', False)
if shared and 'cands' in shared:
self.cands = shared['cands']
else:
self.cands = self.load_cands(opt.get('cands_datafile', None))
super().__init__(opt, shared)
def share(self):
"""
Share the data and candidates.
"""
shared = super().share()
shared['cands'] = self.cands
return shared
def label_candidates(self):
"""
Return the candidates.
"""
return self.cands
def load_cands(self, path):
"""
Load a global fixed set of candidates.
The candidates will be provided by the teacher for every example (the true
labels for a specific example are also added to this set, so that it's possible
to get the right answer).
"""
if path is None:
return None
cands = []
lines_have_ids = False
cands_are_replies = False
cnt = 0
with PathManager.open(path, encoding='utf-8') as read:
for line in read:
line = line.strip().replace('\\n', '\n')
if len(line) > 0:
cnt = cnt + 1
# If lines are numbered we strip them of numbers.
if cnt == 1 and line[0:2] == '1 ':
lines_have_ids = True
# If tabs then the label_candidates are all the replies.
if '\t' in line and not cands_are_replies:
cands_are_replies = True
cands = []
if lines_have_ids:
space_idx = line.find(' ')
line = line[space_idx + 1 :]
if cands_are_replies:
sp = line.split('\t')
if len(sp) > 1 and sp[1] != '':
cands.append(sp[1])
else:
cands.append(line)
else:
cands.append(line)
return cands
def setup_data(self, path):
r"""
Read data in the fbdialog format.
Returns ``((x,y,r,c), new_episode?)`` tuples.
``x`` represents a query, ``y`` represents the labels, ``r`` represents
any reward, and ``c`` represents any label_candidates.
The example above will be translated into the following tuples:
::
x: 'Sam went to the kitchen\nPat gave Sam the milk\nWhere is the milk?'
y: ['kitchen']
r: '1'
c: ['hallway', 'kitchen', 'bathroom']
new_episode = True (this is the first example in the episode)
::
x: 'Sam went to the hallway\\nPat went to the bathroom\\nWhere is the
milk?'
y: ['hallway']
r: '1'
c: ['hallway', 'kitchen', 'bathroom']
new_episode = False (this is the second example in the episode)
"""
logging.info(f"loading fbdialog data: {path}")
with PathManager.open(path, encoding='utf-8') as read:
start = True
x = ''
reward = 0
last_conv_id = None
for line in read:
line = line.strip().replace('\\n', '\n')
if len(line) == 0:
# empty response
continue
# first, get conversation index -- '1' means start of episode
space_idx = line.find(' ')
if space_idx == -1:
# empty line, both individuals are saying whitespace
conv_id = int(line)
else:
conv_id = int(line[:space_idx])
# split line into constituent parts, if available:
# x<tab>y<tab>reward<tab>label_candidates
# where y, reward, and label_candidates are optional
split = line[space_idx + 1 :].split('\t')
# remove empty items and strip each one
for i in range(len(split)):
word = split[i].strip()
if len(word) == 0:
split[i] = ''
else:
split[i] = word
# Empty reward string same as None
if len(split) > 2 and split[2] == '':
split[2] = None
# now check if we're at a new episode
if last_conv_id is None or conv_id <= last_conv_id:
x = x.strip()
if x:
yield [x, None, reward], start
start = True
reward = 0
# start a new episode
if self.cloze:
x = 'Fill in the blank in the last sentence.\n{x}'.format(
x=split[0]
)
else:
x = split[0]
else:
if x:
# otherwise add current x to what we have so far
x = '{x}\n{next_x}'.format(x=x, next_x=split[0])
else:
x = split[0]
last_conv_id = conv_id
if len(split) > 2 and split[2]:
reward += float(split[2])
if len(split) > 1 and split[1]:
# only generate an example if we have a y
split[0] = x
# split labels
split[1] = split[1].split('|')
if len(split) > 3:
# split label_candidates
split[3] = split[3].split('|')
if len(split) > 2:
split[2] = reward
else:
split.append(reward)
if start:
yield split, True
start = False
else:
yield split, False
# reset x in case there is unlabeled data still left
x = ''
reward = 0
if x:
yield [x, None, reward], start
class ParlAIDialogTeacher(FixedDialogTeacher):
"""
This module provides access to data in the ParlAI Text Dialog format.
Subclasses ``FixedDialogTeacher`` for functionality and provides an
implementation of ``setup_data()`` which iterates over datasets in the
"ParlAI text" format. If your data is in the format below, use this class to
handle file parsing for you.
The way the data is set up is as follows:
::
text:Sam went to the kitchen. <NEWL>
Pat gave Sam the milk. <NEWL>
Where is the milk? <TAB> labels:kitchen <TAB> reward:1
<TAB> label_candidates:hallway|kitchen|bathroom
text:Sam went to the hallway. <NEWL>
Pat went to the bathroom. <NEWL>
Where is the milk? <TAB> labels:hallway <TAB> reward:1
<TAB> label_candidates:hallway|kitchen|bathroom <TAB> episode_done:True
Lines 1-2 represent a single episode, with a different example on each line.
The lines contain a query and a label for getting the question
correct, and three label candidates.
Since both of these examples are part of the same episode, the information
provided in the first example is relevant to the query in the second
example and therefore the agent must remember the first example in order to
do well.
In general dialog this format can contain any speech, not just QA pairs:
::
text:Hi how's it going?<TAB>labels:It's going great. What's new?
text:Well I'm working on a new project at work.<TAB>labels:Oh me too!
text:Oh cool!<TAB>labels:Tell me about yours.
etc.
Note that dialogs are interpreted as being one-way. For example, consider
this dialog:
::
1 X1 Y1
2 X2 Y2
3 X3 Y3
A set of examples X1 => Y1, X2 => Y2, and X3 => Y3 will be generated.
However, Y1 => X2 and Y2 => X3 are not created as separate examples by
default. This makes sense for some data (we don't need to train on the idea
that "kitchen" should be followed by "Sam went to the hallway..." above),
but for other datasets it may be helpful to add additional examples in the
reverse direction ("Oh cool!" is a response to "Oh me too!" above).
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if not shared:
self.episodes = []
self.num_exs = 0
if opt.get('parlaidialogteacher_datafile') is not None:
self._setup_data(opt.get('parlaidialogteacher_datafile'))
else:
self.episodes = shared['episodes']
self.num_exs = sum(len(e) for e in self.episodes)
self.id = opt['task']
self.reset()
def share(self):
"""
Share the episodes.
"""
shared = super().share()
shared['episodes'] = self.episodes
return shared
def num_examples(self):
"""
Return the number of examples from the data.
"""
return self.num_exs
def num_episodes(self):
"""
Return the number of episodes from the data.
"""
return len(self.episodes)
def get(self, episode_idx, entry_idx=None):
"""
Get a specific example from the dataset.
"""
return self.episodes[episode_idx][entry_idx]
def _setup_data(self, path):
logging.info(f"Loading ParlAI text data: {path}")
self.episodes = []
self.num_exs = 0
eps = []
with PathManager.open(path, newline='\n', encoding='utf-8') as read:
for line_no, line in enumerate(read, 1):
msg = str_to_msg(line.rstrip('\n'))
if msg and 'eval_labels' in msg:
raise ValueError(
f"It looks like you've written eval_labels as a key in your "
f"data file. This is not appropriate; labels will be converted "
f"for you automatically. This is happening on Line {line_no} "
f"in {path}. The line is:\n\t{line}"
)
if msg and 'text' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "text" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'labels' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "labels" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg:
self.num_exs += 1
eps.append(msg)
if msg.get('episode_done', False):
self.episodes.append(eps)
eps = []
if len(eps) > 0:
# add last episode
eps[-1].force_set('episode_done', True)
self.episodes.append(eps)
if len(self.episodes) == 1 and line_no > 100:
logging.error(
f'The data in {path} looks like one very long episode. If this '
f'is intentional, you may ignore this, but you MAY have a bug in '
f'your data.'
)
class YamlTeacher(DialogTeacher):
"""
Teacher which loads data generated by `parlai.utils.testing.AutoTeacherTest`.
"""
def __init__(self, opt, shared=None):
# TODO: if we get rid of the streaming datafile num_episodes/num_examples
# cache then we can support streaming here. but for now let's
# just hardcode it
opt = opt.copy()
opt['datatype'] = opt['datatype'].replace(':stream', '')
super().__init__(opt, shared=shared)
def setup_data(self, datafile):
with PathManager.open(datafile) as f:
records = yaml.safe_load(f)
next_episode_new = True
for act in records['acts']:
act = act[0] # yaml wraps in a weird singleton list
next_episode_new = act.pop('episode_done')
if 'eval_labels' in act:
act['labels'] = act.pop('eval_labels')
yield act, next_episode_new
class ConversationTeacher(DialogTeacher):
"""
This module provides access to data in the Conversations format.
Subclasses ``DialogTeacher`` for functionality and provides an
implementation of ``setup_data()`` which iterates over datasets in the
"Conversations" format. If your data is in the format below, use this class to
handle file parsing for you.
The data should be set up so that each dialogue instance (or, episode)
occupies one line of valid JSON. The way the data is set up is as follows:
::
{ "dialog": [ [ {"id": "partner1", "text": "hello!"}, {"id": "partner2", "text": "hi back!"} ] ] }
NOTE: If the data is not on one line per dialogue, it will not load.
Further, note that by default, dialogs are interpreted as being one-way.
For example, consider this dialog (not that the data below is not on:
::
{
"dialog":[ [
{"id":"modelx", "text": X1},
{"id":"modely", "text": Y1},
{"id":"modelx", "text": X2},
{"id":"modely", "text": Y2},
{"id":"modelx", "text": X3},
{"id":"modely", "text": Y3},
] ]
}
(Note: we use line breaks for readability above, but this data will not load as
stated, it must be on one line.)
A set of examples X1 => Y1, X2 => Y2, and X3 => Y3 will be generated,
forming one episode. However, Y1 => X2 and Y2 => X3 are not created as
separate examples by default.
To change this behavior, you can set ``opt['label_turns']`` or ``--label-turns flag``.
The default value is 'secondspeaker' (i.e., the second speaker's utterances are
used as labels), but 'firstspeaker' and 'both' are also options. In the
case of 'both', two episodes are generated for each conversation.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
agent = super().add_cmdline_args(parser, partial_opt)
agent.add_argument(
'--label-turns',
type=str,
help='which speaker to use as label',
choices=['firstspeaker', 'secondspeaker', 'both'],
default='secondspeaker',
)
return parser
def __init__(self, opt, shared=None):
if not opt.get('conversationteacher_datafile'):
raise RuntimeError('conversationteacher_datafile not specified')
opt = copy.deepcopy(opt)
opt['datafile'] = opt.get('conversationteacher_datafile')
self.label_turns = opt.get('label_turns')
super().__init__(opt, shared)
self.id = opt['task']
def _return_episode_examples(self, episode):
for idx, example in enumerate(episode):
episode_begin = idx == 0
if 'episode_done' in example:
example.pop('episode_done')
yield example, episode_begin
def setup_data(self, path):
logging.info(f"[loading data from json file into task: {path} ]")
conversations = Conversations(path)
for conv in conversations:
if conv.context:
warn_once(
'At least one of these conversations contains a context, which is not being used'
)
turns = [t for t in conv.turns if t.get('id') != 'context']
if len(turns) != len(conv.turns):
warn_once(
'At least one of these conversations contains a context within the dialogue, which is being discarded'
)
turns.insert(0, Message({'text': '__SILENCE__'}))
# train on odd turns as labels (turns w/ first speaker)
if self.label_turns in ['firstspeaker', 'both']:
eps = self._get_ep_from_turns(turns[::2], turns[1::2])
if eps:
for example, example_begins in self._return_episode_examples(eps):
yield example, example_begins
# train on even turns as labels (turns w/ second speaker)
if self.label_turns in ['secondspeaker', 'both']:
eps = self._get_ep_from_turns(turns[1::2], turns[2::2])
if eps:
for example, example_begins in self._return_episode_examples(eps):
yield example, example_begins
def _get_ep_from_turns(self, xturns, yturns):
eps = []
for xturn, yturn in zip(xturns, yturns):
turn = {}
turn['text'] = xturn.get('text').strip()
turn['labels'] = [yturn.get('text').strip()]
eps.append(turn)
return eps
class AbstractImageTeacher(FixedDialogTeacher):
"""
Abstract class to allow easier creation of image + dialogue tasks.
This class handles creating image features via ImageLoader if applicable
(resnet, resnext variants) or loading existing image features from a dict
path as per get_image_features_path().
Important methods and properties (override in subclass if needed):
- get_data_path(): where data file is found (default: <datapath>/<task>)
- get_image_path(): where images found (default: <datapath>/<task>/images)
- get_image_features_path(): dict of image features (default:
<datapath>/<task>/image_features)
- @property image_id_key: which key in data file objects represents image_id
- @property text_key: which key in data file objects represents text
Note: Assumes data files are named <dt>.json
@abstractmethod image_id_to_image_path() must be implemented in subclass
Example with the key defaults (but the keys can be customized):
.. code-block:: python
obs = {
'text': <caption>,
'image': <image features if specified else image>
}
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.opt = opt
self.task = opt['task'].split(':')[1] if ':' in opt['task'] else opt['task']
self.data_path = self.get_data_path(opt)
self.data = self.load_data(self.data_path, self.opt)
self.datatype = DatatypeHelper.fold(opt['datatype'])
# Example of available models: 'resnet152', 'resnext101_32x48d_wsl',
# and ImageLoader supports other resnet and resnext models too
# Raises an Exception if not valid
self._validate_image_mode_name(opt.get('image_mode'))
# IMPORTANT NOTE: this teacher will be instantiated twice. The first
# by build_dict in which case the image_mode is to 'no_image_model' to
# avoid calculating image features twice.
self.image_mode = opt.get('image_mode')
# Not using default image_mode parameter b/c there is a normalization
# (or bug) somewhere in build_dict that is setting it to none
self.include_image = opt.get('image_mode') != 'no_image_model'
self.image_path = self.get_image_path(opt)
self.image_loader = None
self.image_features_dim = opt.get('image_features_dim')
self.blank_image_features = torch.FloatTensor(self.image_features_dim).fill_(0)
if shared and 'data' in shared:
self.data = shared['data']
self.image_loader = shared['image_loader']
if 'image_features_dict' in shared:
self.image_features_dict = shared['image_features_dict']
elif self.include_image:
self.setup_image_features(self.data_path)
else:
# This will happen when building the dictionary - is normal
# build_dict sets image_mode to 'none'
warn_once('AbstractImageTeacher self.include_image was False')
self.image_features_dict = None
# TODO: won't need this after we have proper logging levels set
self.__verbose = False
self.reset()
def get_available_image_mode_names(self):
"""
Available image model names.
resnet and resnext variants available from the ImageLoader. resnext101_XXXXX_wsl
is the open-sourced FB AI model (960m images, 1.5k hashtags, finetuned on
ImageNet).
"""
available_model_names = ImageLoader.get_available_model_names()
return ['no_image_model', 'raw', 'ascii'] + available_model_names
def _validate_image_mode_name(self, a):
"""
Validate the image_mode passed in.
Needed because image_mode used elsewhere in ParlAI is not always consistent with
what the image teacher allows.
"""
if not isinstance(a, str):
raise argparse.ArgumentTypeError(
'%s must be a string representing image model name' % a
)
available_model_names = self.get_available_image_mode_names()
if a not in available_model_names:
raise argparse.ArgumentTypeError(
'\"%s\" unknown image model name. Choose from: %s. Currently suggested resnet is resnet152 and resnext is resnext101_32x48d_wsl.'
% (a, available_model_names)
)
return a
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
# Be sure to call super() if overriding this method b/c
# AbstractImageTeacher has necessary params
parser = super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('AbstractImageTeacher Arguments')
agent.add_argument(
'--image-path',
type=str,
default=None,
help='Optional argument to specify where images for dataset are'
'stored if already downloaded. Most tasks will download the images'
'if not present on the < datapath > / < task > _images / * and * if'
'this argument is not specified.',
)
agent.add_argument(
'--image-features-dim',
type=int,
default=2048,
help='Specify the size of image features Tensors.',
)
return parser
@property
def image_id_key(self):
"""
Which key in the input data dict objects uniquely identify each image.
Common image keys are "image_id" or "image_num". May be implemented by subclass.
"""
return 'image_id'
@property
def text_key(self):
"""
Which key in the input data dict objects identifies the text.
Common keys are "text" or "comment". May be implemented by subclass.
"""
return 'text'
@abstractmethod
def image_id_to_image_path(self, image_id):
"""
Get the path of the image on disk.
Must be implemented by subclass.
"""
pass
def get_data_path(self, opt):
"""
Determines path to the data file.
"""
task_name = opt['task'].split(':')[1] if ':' in opt['task'] else opt['task']
data_path = os.path.join(opt['datapath'], task_name)
return data_path
def get_image_path(self, opt):
"""
Return the path to the data directory and to the image directory.
Is based on opt fields: task, datatype (train, valid, test), datapath.
Subclass can override this.
"""
data_path = self.get_data_path(opt)
if opt.get('image_path', None):
image_path = opt['image_path']
else:
# other common choice: .join(opt['datapath'], task_name + '_images')
image_path = os.path.join(data_path, 'images')
return image_path
def get_image_features_path(self, task, image_model_name, dt):
"""
Image features for the dataset images are stored here.
Can be overridden in subclass to use custom paths. Image features can be
manually copied into this directory or in the case of ImageLoader eligible
models, they will be built and stored here if not already there.
"""
# In default implementation, self.data_path already has task name added
image_features_path = os.path.join(self.data_path, 'image_features')
PathManager.mkdirs(image_features_path)
return os.path.join(
image_features_path, '%s_%s_%s_features_dict' % (task, image_model_name, dt)
)
def is_image_mode_buildable(self, model_name):
"""
Is buildable if features can be calculated by ImageLoader.
Users may wish to compute features for the dataset offline and use in the model,
in which case, the image model should return False and get_image_features()
should be overridden in subclass.
"""
return model_name in ImageLoader.get_available_model_names()
def load_data(self, data_path, opt):
"""
Loading the data file, which is the index to the images and text.
It is often a .json file with the name of the <datatype>.json (i.e.
train.json). Stores in self.data.
Can be override by subclass.
"""
dt = DatatypeHelper.fold(opt['datatype'])
# Sometimes file is named "val" instead of "valid"
if dt not in ['train', 'valid', 'val', 'test']:
raise Exception(
'Unknown dt parameter: %s. Expected either "train", "valid", or "test".'
% dt
)
# Assumes file is train.json or valid.json named
data_file = os.path.join(self.data_path, '%s.json' % dt)
# Load the text data and image number indexes
with PathManager.open(data_file, encoding='utf-8') as f:
self.data = json.load(f)
if len(self.data) > 0 and self.image_id_key not in self.data[0]:
# Data doesn't have a "image_id" like field so add the index in the file to the data
for idx, d in enumerate(self.data):
d[self.image_id_key] = idx
return self.data
def setup_image_features(self, data_path):
"""
Load text and image data.
The image features all live in dicts by default in <data_path>/
image_features/ but get_image_features_path() above can be overridden by
subclass to put them elsewhere.
In the (very odd) case that the resnet or resnext dicts (models
buildable using ImageLoader) are not found, we build them.
"""
if self.image_mode in ['raw', 'ascii']:
self.image_features_dict = None
self.image_loader = ImageLoader(self.opt)
return
image_mode_features_dict_path = self.get_image_features_path(
self.task, self.image_mode, self.datatype
)
if PathManager.exists(image_mode_features_dict_path):
logging.info(
f'Loading existing image features dict for model: {self.image_mode} at: {image_mode_features_dict_path}'
)
with PathManager.open(image_mode_features_dict_path, 'rb') as f:
self.image_features_dict = torch.load(f, map_location='cpu')
else:
logging.warning('No existing image features, attempting to build.')
if self.is_image_mode_buildable(self.image_mode):
# TODO: Awkward to modify the input opt but needed to use
# TODO: ImageLoader functionality. Is from comment_battle,
# TODO: will refactor this at some point soon most likely
image_loader_opt = self.opt.copy()
image_loader_opt['image_mode'] = (
self.image_mode if self.include_image else 'no_image_model'
)
image_loader_opt['image_size'] = 256
image_loader_opt['image_cropsize'] = 224
self.image_loader = ImageLoader(image_loader_opt)
# try to build with ImageLoader (i.e. resenet/resnext variants)
self.image_features_dict = self._build_image_features_dict(
self.data_path, self.datatype, image_mode_features_dict_path
)
else:
raise RuntimeError(
'Image model: %s is not buildable by ImageLoader but does'
'not already exist on disk as an image features dict for'
'this dataset.' % self.image_mode
)
def _build_image_features_dict(self, data_path, dt, store_dict_path):
"""
Build resne(x)t image features with ImageLoader.
(Or anything handleable by ImageLoader) and save to path. Only called if we
haven't already built the dict before.
"""
image_features_dict = {}
total = len(self.data)
import tqdm
pbar = tqdm.tqdm(
total=total,
unit='cand',
unit_scale=True,
desc='Building image features dict for %s with ImageLoader.'
% self.image_mode,
)
num = 0
for ex in self.data:
img_id = ex[self.image_id_key]
img_path = self.image_id_to_image_path(img_id)
image = self.image_loader.load(img_path).detach()
# spatial features are [1, image_dim, spatial_dim, spatial_dim] tensors.
# reduce non-spatial features to one-dimensional feature prior to saving.
if not self.image_loader.is_spatial(self.image_mode):
image = image[0, :, 0, 0]
image_features_dict[img_id] = image
num += 1
pbar.update(1)
if num % 1000 == 0:
logging.debug(f'Processing image index: {num}')
torch_utils.atomic_save(image_features_dict, store_dict_path)
return image_features_dict
def reset(self):
super().reset()
self.example = None
def num_episodes(self):
return self.num_examples()
def num_examples(self):
return len(self.data)
def get_image_features(self, example):
"""
Get image features for example.
Can be overridden in subclass for different behavior. For large datasets, it may
be more appropriate to use the ImageLoader.load() method to load image features
(as this is essentially streaming the features from disk, so that we do not have
to load a large image feature dict in memory). #TODO Could be the default option
if we are using -dt train:stream
"""
if self.image_mode in ['raw', 'ascii']:
try:
image = self.image_loader.load(
self.image_id_to_image_path(example['image_id'])
)
except FileNotFoundError:
# No Image Here
image = None
return image
key = str(example[self.image_id_key])
if not self.include_image or key not in self.image_features_dict:
image_features = self.blank_image_features
else:
image_features = self.image_features_dict[key]
return image_features
def get(self, episode_idx, entry_idx=0):
"""
Override this in subclass if your data should be handled in a different format.
"""
example = self.data[episode_idx]
image_features = self.get_image_features(example)
return {
'labels': [example[self.text_key]],
'image': image_features,
'episode_idx': episode_idx,
'episode_done': True,
}
def share(self):
shared = super().share()
shared['data'] = self.data
shared['image_loader'] = self.image_loader
if hasattr(self, 'image_features_dict'):
shared['image_features_dict'] = self.image_features_dict
return shared
class MultiTaskTeacher(Teacher):
"""
MultiTaskTeacher which teaches multiple tasks.
Creates a teacher that is actually a set of teachers each based on a task
string -- each of these teachers will get called in turn,
either randomly or in order. They are all in the same world (they are the
same agent switching tasks).
The task string format is described for the ``create_task_agents()``
function above.
"""
def __init__(self, opt: Opt, shared=None):
self.tasks: List[Agent] = []
self.opt = opt
self.id = opt['task']
if shared and 'tasks' in shared:
self.tasks = [create_agent_from_shared(t) for t in shared['tasks']]
else:
tasks = opt['task'].split(',')
for k in tasks:
k = k.strip()
if k:
opt_singletask = copy.deepcopy(opt)
opt_singletask['task'] = k
self.tasks.extend(create_task_agent_from_taskname(opt_singletask))
self.task_idx = -1
self.new_task = True
self.random = DatatypeHelper.should_shuffle(opt['datatype'])
# Make multi-task task probabilities.
self.cum_task_weights = [1] * len(self.tasks)
self.task_choices = range(len(self.tasks))
weights = self.opt.get('multitask_weights', [1])
if weights == 'stochastic':
weights = [t.num_episodes() for t in self.tasks]
sum = 0
for i in self.task_choices:
if len(weights) > i:
weight = weights[i]
else:
weight = 1
self.cum_task_weights[i] = weight + sum
sum += weight
def num_examples(self):
"""
Return the number of examples.
"""
if not hasattr(self, 'num_exs'):
# num_examples is sum of all examples in all tasks
tasks_num_exs = [t.num_examples() for t in self.tasks]
if any(num is None for num in tasks_num_exs):
self.num_exs = None
else:
self.num_exs = sum(tasks_num_exs)
return self.num_exs
def num_episodes(self):
"""
Return the number of episodes.
"""
if not hasattr(self, 'num_eps'):
# num_episodes is sum of all num_episodes in all tasks
tasks_num_eps = [t.num_episodes() for t in self.tasks]
if any(num is None for num in tasks_num_eps):
self.num_eps = None
else:
self.num_eps = sum(tasks_num_eps)
return self.num_eps
def observe(self, observation):
"""
Make an observation.
"""
return self.tasks[self.task_idx].observe(observation)
def act(self):
"""
Act on the previous observation.
"""
if self.new_task:
self.new_task = False
if self.random:
# select random teacher
self.task_idx = random.choices(
self.task_choices, cum_weights=self.cum_task_weights
)[0]
else:
# do at most one full loop looking for unfinished task
for _ in range(len(self.tasks)):
self.task_idx = (self.task_idx + 1) % len(self.tasks)
if not self.tasks[self.task_idx].epoch_done():
# if this task has examples ready, break
break
if self.tasks[self.task_idx].epoch_done():
# all tasks are done, so return empty action table
return Message.padding_example()
t = self.tasks[self.task_idx].act()
if t['episode_done']:
self.new_task = True
return t
def epoch_done(self):
"""
Return whether all subtasks are completed.
"""
for t in self.tasks:
if not t.epoch_done():
return False
return True
# return transformed metrics showing total examples and accuracy if avail.
def report(self):
"""
Report aggregated metrics across all subtasks.
"""
return aggregate_named_reports(
{t.getID(): t.report() for t in self.tasks},
micro_average=self.opt.get('aggregate_micro', False),
)
def reset(self):
"""
Reset all subtasks.
"""
for t in self.tasks:
t.reset()
def reset_metrics(self):
"""
Reset metrics for each subtask.
"""
for t in self.tasks:
t.reset_metrics()
def share(self):
"""
Shares this teacher by sharing each subtask.
"""
shared = {}
shared['class'] = type(self)
shared['opt'] = self.opt
shared['tasks'] = [t.share() for t in self.tasks]
return shared
def shutdown(self):
"""
Shutdown each agent.
"""
for t in self.tasks:
t.shutdown()
class ChunkTeacher(FixedDialogTeacher, ABC):
"""
Useful for loading large amounts of data.
Data is separated into chunks and loaded one chunk at a time. Loads the data off of
the main thread.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.buffersize = self.get_buffersize()
self.set_datasettings(opt)
# chunk teacher makes shuffling decisions based on training, but
# train:stream turns off shuffling in other teachers.
self.datatype = DatatypeHelper.strip_stream(opt['datatype'])
self.dws = int(self.opt.get('distributed_world_size', 1))
self.rank = int(self.opt.get('rank', 0))
self.bg_index = self.opt.get('background_index', None)
# If we're in training mode with --num-workers > 0, we will run the
# chunk teacher in single threaded mode (self.threading is False). In
# this mode, we will block on chunk loading.
# If we're not using --num-workers, or we're in validation/testing, we
# _always_ run in normal threading mode, where chunk loading is pushed
# to a background thread. However, since python threading is is blocked
# by the GIL, this only manages to background I/O.
# Potentially in the future, we may support --num-workers in validation,
# in which case we can get rid of one of these.
self.threading = not (opt.get('num_workers', 0) > 0 and self.is_train)
if not self.threading and opt.get('background_index') is None:
# don't start loading data on the main driver, we don't need it
opt['no_auto_enqueues'] = True
if not self.threading:
# if we're in single-threaded (background preprocessing mode), we
# can't have a max queue size or we will hang if we overfill it
self.buffersize = 0
if shared is not None:
self.is_root_teacher = False
self.chunks = shared['chunks']
self.samples = shared['samples']
self.reset_counter = shared['reset_counter']
self.rng = shared['rng']
self.tot_samples_loaded = shared['tot_samples_loaded']
else:
self.is_root_teacher = True
self.samples = queue.Queue(maxsize=self.buffersize)
self.chunks = queue.Queue()
self.reset_counter = SimpleCounter() # track no. of resets
if DatatypeHelper.should_shuffle(self.datatype):
# TODO: possible need a fixed seed here in the future
self.rng = random.Random()
else:
self.rng = random.Random(42)
self._enqueue_chunks()
# launch queue loader on the main thread
self.tot_samples_loaded = defaultdict(int)
if not opt.get("no_auto_enqueues", False):
# we only enqueue the train thread because the reset() called at
# the top of training will handle this
self._enqueue_request()
self._episode_done = True
self.last_queue_output = None
def _get_data_folder(self):
if not self.opt.get('datafile'):
raise RuntimeError(
'Must specify datafile or override this function (_get_data_folder) '
'to return the data folder.'
)
return self.opt['datafile']
@abstractmethod
def get_num_samples(self, opt: Opt) -> Tuple[int, int]:
"""
[Abstract] Return the number of samples.
Returns a tuple of (num_examples, num_episodes) based on the data split.
"""
pass
@abstractmethod
def get_fold_chunks(self, opt: Opt) -> List[int]: # type: ignore
"""
[Abstract] Return a list of chunk IDs (integer).
Given the datatype (train/test/valid), return the list of chunk IDs that
correspond to that split.
"""
pass
def get_buffersize(self):
"""
Size of buffer.
Override this in your child class to change the buffer size.
"""
return 100000
def set_datasettings(self, opt: Opt):
self.folder = self._get_data_folder()
self.num_exs, self.num_eps = self.get_num_samples(opt)
self.fold_chunks = self.get_fold_chunks(opt)
self.is_train = DatatypeHelper.is_training(opt['datatype'])
def share(self):
shared = super().share()
shared['samples'] = self.samples
shared['chunks'] = self.chunks
shared['reset_counter'] = self.reset_counter
shared['rng'] = self.rng
shared['tot_samples_loaded'] = self.tot_samples_loaded
return shared
def _setup_data(self, datatype):
"""
Passthrough.
"""
pass
def num_episodes(self):
if self.is_train:
return self.num_eps
else:
return self.num_eps // self.dws + int((self.num_eps % self.dws) > self.rank)
def num_examples(self):
if self.is_train:
return self.num_exs
else:
return self.num_exs // self.dws + int((self.num_exs % self.dws) > self.rank)
def next_episode_idx(self):
# We don't actually track episodes in ChunkTeacher, we just blindly
# trust the queue. This hacks around FixedDialogTeacher's next_example
# check that the epoch is done.
return 0
def _enqueue_request(self):
"""
Queue a request for loading to the data loader.
"""
if self.threading:
self.data_loader.request_load(self.receive_data, self.get_chunk, ())
else:
self._process_data(self.get_chunk())
def receive_data(self, future):
"""
Receive loaded data and place it onto the sample queue.
:param future:
A Future object which will return a value from a call to get_chunk()
"""
return self._process_data(future.result())
def _process_data(self, output: Optional[Tuple[Any, int]]):
"""
Loads data.
Load data into self.samples until buffersize is reached.
:param output:
The output of an item from a call to get_chunk()
"""
if output is None:
return
chunk_output, chunk_reset_cnt = output
if chunk_output is None:
self.samples.put((None, chunk_reset_cnt))
return
if self.threading:
while chunk_output:
# self.samples is a queue with maxsize
# self.buffersize, so will block if the
# buffer gets full
sample = chunk_output.pop(0)
if (
self.is_train
or self.tot_samples_loaded[chunk_reset_cnt] % self.dws == self.rank
):
# log the reset count at the time the chunk was queued
self.samples.put((sample, chunk_reset_cnt))
self.tot_samples_loaded[chunk_reset_cnt] += 1
else:
# we're actually running in single processor mode so we'll just
# do a thread-unsafe hit of the python internals, which is much faster
# than trying to safely put things onto the queue
self.samples.queue.extend((co, chunk_reset_cnt) for co in chunk_output)
self.tot_samples_loaded[chunk_reset_cnt] += len(chunk_output)
if self.threading:
# and start loading the next chunk
self._enqueue_request()
def _enqueue_chunks(self):
"""
Shuffles and queues fold chunks for loading.
"""
if DatatypeHelper.should_shuffle(self.datatype):
self.rng.shuffle(self.fold_chunks)
# save the reset count at the time a chunk was queued
reset_cnt = self.reset_counter.value()
for c in self.fold_chunks:
self.chunks.put((c, reset_cnt))
self.chunks.put((None, reset_cnt))
# gross hack: in training models, when we get to validation, we enqueue
# a request in the constructor, followed by another enqueue from a
# reset immediately after. If the former is already running, we'll end
# up with one too many calls to get_chunk and block on termination.
# That's what I refer to as "losing" the race condition. If you look in
# get_chunk, you'll also find the spot where we "win" the race
# condition.
self.chunks.put((None, reset_cnt))
@abstractmethod
def load_from_chunk(self, chunk_idx: int) -> List[ChunkOutput]:
"""
[Abstract] Given the chunk index, load examples from that chunk.
Return a list of tuples. The function `_create_message` will take these tuples
to form the Message object that is returned by the teacher.
"""
pass
@abstractmethod
def create_message(self, queue_output: ChunkOutput, entry_idx=0) -> 'Message':
"""
[Abstract] Given the tuple output of the queue, return an act.
May depend on entry index if queue output is a multi-turn episode.
"""
pass
def get_chunk(self):
"""
Refill the buffer.
"""
next_chunk, chunk_reset_cnt = self.chunks.get()
if next_chunk is None:
if DatatypeHelper.should_cycle(self.datatype):
# start putting chunks back onto the queue
self._enqueue_chunks()
next_chunk, chunk_reset_cnt = self.chunks.get()
if next_chunk is None:
# See the race condition described around "gross hack" in
# _enqueue_chunks. if we win the race condition, then
# catch it here
next_chunk, chunk_reset_cnt = self.chunks.get()
else:
# if we're in valid/test, we need to actually signal the end
return (None, chunk_reset_cnt)
# abstract method `load_from_chunk` returns a list of tuples
output = self.load_from_chunk(next_chunk)
if DatatypeHelper.should_shuffle(self.datatype):
# randomize the samples
random.Random().shuffle(output)
return output, chunk_reset_cnt
def next_example(self):
# next_example will always fail to provide useful signal on whether
# we're at the end of an epoch in chunk teacher. Instead, the queue
# empties and we simply start outputting pads forever. As such, we'll
# measure epochs when we start receiving only pads.
# (This isn't relevant for the training loop, which loops for ever and
# never "epochs").
retval, fake_epoch_done = super().next_example()
real_epoch_done = retval.is_padding()
return retval, real_epoch_done
def get(self, episode_idx, entry_idx=0):
if not self.threading and self.samples.empty():
self._enqueue_request()
curr_reset_cnt = self.reset_counter.value()
if self._episode_done:
# Get the next episode or example
queue_output, reset_cnt = self.samples.get()
stale_exs = 0
while curr_reset_cnt > reset_cnt:
stale_exs += 1
queue_output, reset_cnt = self.samples.get()
if stale_exs > 0:
logging.debug(f"Removed {stale_exs} stale examples from the queue.")
if queue_output is None:
self.samples.put((None, reset_cnt))
return Message.padding_example()
# Update the last queue output in the case
# of multi-turn episodes
self.last_queue_output = queue_output
# create a Message object from the queue output
msg = self.create_message(self.last_queue_output, entry_idx)
self._episode_done = msg['episode_done']
return msg
def _drain(self, q):
while not q.empty():
try:
q.get()
except queue.Empty:
return
def reset(self):
super().reset()
if self.is_root_teacher:
self.reset_counter.increment()
# drain the queues and refill the chunk queue with a new epoch.
# additionally, we have to relaunch the loader
self._drain(self.samples)
self._drain(self.chunks)
self._enqueue_chunks()
self.tot_samples_loaded.clear() # reset the count of samples loaded
if self.threading:
self._enqueue_request()
def shutdown(self):
# Time to wrap up. We should rush out to the worker and tell them
# that they're "done" processing data.
# same signal as end of epoch
self.chunks.put((None, None))
self.chunks.put((None, None))
def _add_task_flags_to_agent_opt(agent, opt: Opt, flags):
"""
Handle task flags provided by the task name itself.
With this you can set specific opts with `-t task:flag=foo`.
"""
fl = flags.split(':')
task = []
for f in fl:
if '=' in f:
one_flag = f.split('=')
key = one_flag[0].replace('-', '_')
raw_value = one_flag[1].replace(';', ':')
# Convert to bool/int/float if necessary
if raw_value.lower() == 'true':
value = True
elif raw_value.lower() == 'false':
value = False
else:
try:
value = int(raw_value) # type: ignore
except ValueError:
try:
value = float(raw_value) # type: ignore
except ValueError:
value = raw_value # type: ignore
opt[key] = value
else:
task.append(f)
opt['task'] = ':'.join(task)
def create_task_agent_from_taskname(opt: Opt):
"""
Create task agent(s) assuming the input ``task_dir:teacher_class``.
e.g. def_string is a shorthand path like ``babi:Task1k:1`` or ``#babi`` or a
complete path like ``parlai.tasks.babi.agents:Task1kTeacher:1``, which essentially
performs ``from parlai.tasks.babi import Task1kTeacher`` with the parameter ``1`` in
``opt['task']`` to be used by the class ``Task1kTeacher``.
"""
if not opt.get('task'):
raise RuntimeError(
'No task specified. Please select a task with ' + '--task {task_name}.'
)
if ',' not in opt['task']:
# Single task
teacher_class = load_teacher_module(opt['task'])
_add_task_flags_to_agent_opt(teacher_class, opt, opt['task'])
task_agents = teacher_class(opt)
if type(task_agents) != list:
task_agents = [task_agents]
return task_agents
else:
# Multitask teacher/agent
task_agents = MultiTaskTeacher(opt)
if type(task_agents) != list:
task_agents = [task_agents]
return task_agents
|
facebookresearch/ParlAI
|
parlai/core/teachers.py
|
Python
|
mit
| 99,105
|
[
"VisIt"
] |
6514023d33d948ae80e0bef4a87d77a1b23868abbf27d899a90836de5f612605
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
from paddle.utils import gast
from collections import defaultdict
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import NodeVarType
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import generate_name_node
from paddle.fluid.dygraph.dygraph_to_static.utils import get_attribute_full_name
from paddle.fluid.dygraph.dygraph_to_static.utils import ForLoopTuplePreTransformer
from paddle.fluid.dygraph.dygraph_to_static.utils import ForNodeVisitor
from paddle.fluid.dygraph.dygraph_to_static.utils import RenameTransformer
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node
__all__ = ['LoopTransformer', 'NameVisitor']
WHILE_CONDITION_PREFIX = 'while_condition'
WHILE_BODY_PREFIX = 'while_body'
FOR_CONDITION_PREFIX = 'for_loop_condition'
FOR_BODY_PREFIX = 'for_loop_body'
GENERATE_VARIABLE_PREFIX = 'generate_variable'
ATTRIBUTE_VARIABLE_PREFIX = '__attribute_variable'
def create_while_nodes(condition_name, body_name, loop_var_names):
"""
Returns a list of gast.Node which represents the calling of Paddle
controlflow while_loop.
Usually, the list just contain 1 statement such as:
[a, b, c] = paddle.jit.dy2static.convert_while_loop(
condition_name, body_name, [a, b, c])
where a, b, c are in loop_var_names.
However, if loop_var_names contains property such as foo.x, we cannot
assign the property as output of convert_while_loop because Python
property is a kind of read-only attribute. To handle the case, we replace
the attributes which are output of convert_while_loop with generated
variables, then if we know the attribute is not read-only at runtime, we
assign the attribute. The created statements are like:
[a, b, __attribute_variable_1] = paddle.jit.dy2static.convert_while_loop(
condition_name, body_name, [a, b, foo.x])
if not isinstance(getattr(type(foo), x, None), property): foo.x = __attribute_variable_1
The number of above statements is not only 1, that's why the return type is
a list of gast.Node.
"""
# NOTE(liym27):
# It's better to parse the source code into an AST node than to customize an AST node
# including child nodes, because it is easy to mistake the ast node type when customizing the node.
#
# For example: loop_var_names = [a, b, foo.x], the type of `a` or `b` is gast.Name,
# but the type of `foo.x` gast.Attribute.
unique_name_to_origin = {}
# We have to make loop_var_names and assign_loop_var_names with same order
# set doesn't have order so we convert it to list
loop_var_names = list(loop_var_names)
assign_loop_var_names = []
for name in (loop_var_names):
if "." in name:
# name is an attribute variable such as foo.x
tmp_attr_name = unique_name.generate(ATTRIBUTE_VARIABLE_PREFIX)
unique_name_to_origin[tmp_attr_name] = name
assign_loop_var_names.append(tmp_attr_name)
else:
assign_loop_var_names.append(name)
while_func_name = "paddle.jit.dy2static.convert_while_loop"
while_node_str = "[{}] = {}({}, {}, [{}])".format(
",".join(assign_loop_var_names), while_func_name, condition_name,
body_name, ",".join(loop_var_names))
while_node = gast.parse(while_node_str).body[0]
ret = [while_node]
for tmp_attr_name in unique_name_to_origin:
origin_attr_var = unique_name_to_origin[tmp_attr_name]
dot_pos = origin_attr_var.rindex(".")
obj_name = origin_attr_var[0:dot_pos]
attr_name = origin_attr_var[dot_pos + 1:]
assign_if_not_prop_str = "if not isinstance(getattr(type({}), '{}', None), property): {} = {}".format(
obj_name, attr_name, origin_attr_var, tmp_attr_name)
assign_if_not_prop_node = gast.parse(assign_if_not_prop_str).body[0]
ret.append(assign_if_not_prop_node)
return ret
class NameVisitor(gast.NodeVisitor):
'''
Analysis name liveness for loop transformer
'''
def __init__(self, root_node):
# Set of gast.Name or gast.Attribute for variables
self.current_seen_vars = set()
# List of gast.While/gast.For nodes
self.current_loop = []
# List of nodes that have scope of variables.
self.nodes_with_scope = []
self.blacklist_names = {"False", "True", "None"}
# Mapping from gast.While/gast.For to variable nodes
self.before_loop_body_vars = defaultdict(set)
# NOTE: Use ordered list as dict value
self.in_loop_vars = defaultdict(list)
# Mapping from gast.While/gast.For to variable nodes which is condition
# of loop or being modified during the loop
self.write_in_loop = defaultdict(set)
self.condition_vars = defaultdict(set)
self.in_condition = False
# Some names are types, we shouldn't record them as loop var names.
self.type_vars = set()
self.static_analysis_visitor = StaticAnalysisVisitor(root_node)
self.node_to_wrapper_map = self.static_analysis_visitor.get_node_to_wrapper_map(
)
self.visit(root_node)
def get_loop_var_names(self, node):
assert isinstance(
node, (gast.While, gast.For)), "Input node is not gast loop node"
loop_var_names = set()
create_var_names = set()
read_context = {type(gast.Load()), type(gast.AugLoad())}
in_loop_vars_list = self.in_loop_vars[node]
# get dict `var_name_to_ctxs`
var_name_to_ctxs = defaultdict(list)
for var_node in in_loop_vars_list:
var_name_to_ctxs[self._var_node_to_name(var_node)].append(
var_node.ctx)
in_loop_vars = set(in_loop_vars_list)
in_loop_vars = self._remove_unnecessary_vars(in_loop_vars, node)
in_loop_name_strs = self._var_nodes_to_names(in_loop_vars)
before_loop_body_vars = self.before_loop_body_vars[node]
before_loop_body_vars = self._remove_unnecessary_vars(
before_loop_body_vars, node)
before_loop_name_strs = self._var_nodes_to_names(before_loop_body_vars)
after_loop_vars = self.current_seen_vars - before_loop_body_vars - in_loop_vars
after_loop_vars = self._remove_unnecessary_vars(after_loop_vars, node)
after_loop_name_strs = self._var_nodes_to_names(after_loop_vars,
read_context)
condition_vars = self.condition_vars[node]
condition_names = self._var_nodes_to_names(condition_vars)
write_vars = self.write_in_loop[node]
write_names = self._var_nodes_to_names(write_vars)
name_to_type = {}
for var in in_loop_vars:
wrapper = self.node_to_wrapper_map[var]
name_to_type[self._var_node_to_name(var)] = wrapper.node_var_type
for name in in_loop_name_strs:
if name in before_loop_name_strs:
# If a variable is used in loop and created before loop
# If this var is a basic variable and read-only and not
# condition var, it may not be loop_var else it should
# be in loop_var as input
if (not name in condition_names) and (
not name in write_names
) and self._node_var_type_is_basic(name_to_type[name]):
continue
loop_var_names.add(name)
elif name in after_loop_name_strs:
# If a variable is created in the while loop and read after
# loop, it should be in loop_var and we should create it
# because name in after_loop_name must be initialized in loop
# So it is write-only, we don't have to filter read-only basic
# vars out
loop_var_names.add(name)
create_var_names.add(name)
else:
# If a variable is used and created in loop, but used before created,
# it should be in loop_var and we should create it.
# For example, `var_a` should be in loop_var and we should create it.
#
# res = 0
# for i, x in enumerate(x_array):
# if i > 2:
# x = func1(var_a)
# var_a = func2(x)
#
is_created = False
for ctx in var_name_to_ctxs[name]:
if isinstance(ctx, gast.Store):
is_created = True
if isinstance(var_name_to_ctxs[name][0],
gast.Load) and is_created:
loop_var_names.add(name)
create_var_names.add(name)
return loop_var_names, create_var_names
def visit_Name(self, node):
if self._is_call_func_name_node(node):
self.generic_visit(node)
return
if node.id in self.blacklist_names:
self.generic_visit(node)
return
self.current_seen_vars.add(node)
write_context = {
type(gast.Store()), type(gast.AugStore()), type(gast.Del())
}
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].append(node)
if type(node.ctx) in write_context:
self.write_in_loop[loop_node].add(node)
if self.in_condition:
self.condition_vars[loop_node].add(node)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.nodes_with_scope.append(node)
self.blacklist_names.add(node.name)
# The variables in the function are not visible to the outside scope.
before_func_seen_vars = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.nodes_with_scope.pop()
# After exiting the scope of the node, variables in this scope
# should be removed from self.current_seen_vars.
if self.nodes_with_scope:
self.current_seen_vars = before_func_seen_vars
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
ret = visitor(node)
return ret
def visit_Attribute(self, node):
if self._is_call_func_name_node(node):
return
attr_full_name = get_attribute_full_name(node)
# Class variables are not allowed to appear in the arguments list
# of defined function under class methods in Python.
"""
def class_func(self):
def while_loop_body(self.x, y) # `self.x` is illegal.
"""
# TODO: If do change the variable with `self.var`, need a better
# way to deal with this case.
if attr_full_name.startswith("self."):
return
self.current_seen_vars.add(node)
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].append(node)
# sub-nodes are visited during get_attribute_full_name and we shouldn't
# visit again
def visit_For(self, node):
self.current_loop.append(node)
self.in_condition = True
self.visit(node.target)
self.visit(node.iter)
self.in_condition = False
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def visit_While(self, node):
self.current_loop.append(node)
self.in_condition = True
self.visit(node.test)
self.in_condition = False
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def visit_Call(self, node):
# Store type var names such as "isinstance(x, some_type_names)" and
# Remove them later
if isinstance(node.func, gast.Name) and node.func.id == 'isinstance':
type_node = node.args[1]
if isinstance(type_node, gast.Tuple):
for element in type_node.elts:
self.type_vars.add(ast_to_source_code(element).strip())
else:
self.type_vars.add(ast_to_source_code(type_node).strip())
self.generic_visit(node)
def _var_nodes_to_names(self, node_set, ctx_filter_set=None):
ret = set()
for node in node_set:
if ctx_filter_set is None or type(node.ctx) in ctx_filter_set:
ret.add(self._var_node_to_name(node))
return ret
def _var_node_to_name(self, node):
if isinstance(node, gast.Name):
return node.id
elif isinstance(node, gast.Attribute):
return get_attribute_full_name(node)
def _node_var_type_is_basic(self, node_var_type):
basic_types = {
NodeVarType.BOOLEAN, NodeVarType.INT, NodeVarType.FLOAT,
NodeVarType.STRING
}
for t in node_var_type:
if t in basic_types:
return True
return False
def _is_call_func_name_node(self, node):
parent_node = self._get_parent_node(node)
if isinstance(parent_node, gast.Call) and parent_node.func == node:
return True
return False
def _is_ancestor_node(self, ancestor_node, node):
parent_node = self._get_parent_node(node)
while parent_node is not None:
if parent_node == ancestor_node:
return True
parent_node = self._get_parent_node(parent_node)
return False
def _get_parent_node(self, node):
wrapper_node = self.node_to_wrapper_map.get(node)
if wrapper_node:
if wrapper_node.parent:
parent_node = wrapper_node.parent.node
return parent_node
return None
def _remove_unnecessary_vars(self, loop_vars, loop_node):
"""
Remove unnecessary vars from before_loop_vars, after_loop_vars or in_loop_vars about loop_node.
1. Remove target vars of gast.For from before_loop_vars or after_loop_vars.
2. Remove vars only in gast.comprehension.
3. Remove vars that are type names, for example: "isinstance(x, var_type_name)"
:param loop_vars: before_loop_vars, after_loop_vars or in_loop_vars of loop_node.
:param loop_node: Current loop node.
"""
def filter_name_nodes_from(root_node, target_var_names):
"""
Filter children with gast.Name type from node.(inclusivly)
"""
name_nodes = set()
if isinstance(root_node, gast.Name):
if node.id in target_var_names:
name_nodes.add(root_node)
for child_node in gast.walk(root_node):
if isinstance(child_node, gast.Name):
if child_node.id in target_var_names:
name_nodes.add(child_node)
return name_nodes
vars_of_list_generator = set()
target_vars_of_for_node = set()
for name_node in loop_vars:
if not isinstance(name_node, gast.Name):
continue
parent_node = self._get_parent_node(name_node)
# NOTE: gast.For.target or gast.comprehension.target can be gast.Tuple.
# For examples:
# 1) `for i, j in enumerate(x)` has two target vars: i and j
# 2) `[x for x,y in array]` has two target vars: x and y
if isinstance(parent_node, gast.Tuple):
parent_node = self._get_parent_node(parent_node)
# 1. Get vars only in gast.comprehension.
# For examples:
# 1) [x for x,y in array] -> x, x, y
# 2) [f(x) for x in array] -> x
# 3) [func(x, y) for x in array] -> x, x
if isinstance(parent_node, gast.comprehension):
# 1.1 target vars in list/set comprehensions
target_node = parent_node.target
if isinstance(target_node, gast.Tuple):
target_vars = target_node.elts
else:
target_vars = [target_node]
vars_of_list_generator = vars_of_list_generator | set(
target_vars)
# 1.2 vars from target vars used in elt_node
target_var_names = {var.id for var in target_vars}
comp_node = self._get_parent_node(parent_node)
elt_nodes = []
if isinstance(comp_node, gast.ListComp):
elt_nodes.append(comp_node.elt)
elif isinstance(comp_node, gast.DictComp):
elt_nodes.extend([comp_node.key, comp_node.value])
for node in elt_nodes:
vars_of_list_generator |= filter_name_nodes_from(
node, target_var_names)
# 2. Get target vars or vars from target vars used in for-loop but the for-loop is
# 1) not the "loop_node" itself
# 2) not the ancestor of the "loop_node"
#
# For examples:
# for k in range(x): # if it's this "loop_node", i or j both should be target vars.
# # do something
#
# for i in range(a): # if it's this "loop_node", k or j should be in target vars but i should not.
# for j in range(a): # if it's this "loop_node", k should be in target_vars but i or j should not.
# x = i+j
elif isinstance(parent_node, gast.For):
if parent_node is loop_node:
continue
if self._is_ancestor_node(parent_node, loop_node):
continue
# 2.1 target vars in gast.For node.
target_node = parent_node.target
if isinstance(target_node, gast.Tuple):
target_vars = target_node.elts
else:
target_vars = [target_node]
target_vars_of_for_node = target_vars_of_for_node | set(
target_vars)
# 2.2 vars from target vars used in for-loop
target_vars_name_strs = {var.id for var in target_vars_of_for_node}
for var in loop_vars:
if not isinstance(var, gast.Name):
continue
if var.id in target_vars_name_strs and var not in self.condition_vars[
loop_node]:
target_vars_of_for_node.add(var)
removed_vars = target_vars_of_for_node | vars_of_list_generator
# 3. Remove var type names which are stored in self.type_vars
for var in loop_vars:
if ast_to_source_code(var).strip() in self.type_vars:
removed_vars.add(var)
return loop_vars - removed_vars
class LoopTransformer(gast.NodeTransformer):
"""
This class transforms python while/for statement into Static Graph Ast
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of LoopTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def transform(self):
ForLoopTuplePreTransformer(self.wrapper_root).transform()
self.name_visitor = NameVisitor(self.root)
self.visit(self.root)
def visit(self, node):
self.generic_visit(node)
# All parent nodes that may contain gast.While/gast.For
if hasattr(node, 'body'):
self.replace_stmt_list(node.body)
if hasattr(node, 'orelse'):
self.replace_stmt_list(node.orelse)
return node
def replace_stmt_list(self, body_list):
if not isinstance(body_list, list):
return
i = 0
while i < len(body_list):
if isinstance(body_list[i], gast.While):
new_stmts = self.get_while_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
elif isinstance(body_list[i], gast.For):
new_stmts = self.get_for_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
else:
i += 1
def get_for_stmt_nodes(self, node):
# TODO: consider for - else in python
# 1. get key statements for different cases
# NOTE 1: three key statements:
# 1). init_stmts: list[node], prepare nodes of for loop, may not only one
# 2). cond_stmt: node, condition node to judge whether continue loop
# 3). body_stmts: list[node], updated loop body, sometimes we should change
# the original statement in body, not just append new statement
#
# NOTE 2: The following `for` statements will be transformed to `while` statements:
# 1). for x in range(*)
# 2). for x in iter_var
# 3). for i, x in enumerate(*)
current_for_node_parser = ForNodeVisitor(node)
stmts_tuple = current_for_node_parser.parse()
if stmts_tuple is None:
return [node]
init_stmts, cond_stmt, body_stmts = stmts_tuple
# 2. get original loop vars
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
# NOTE: in 'for x in var' or 'for i, x in enumerate(var)' cases,
# we need append new loop var & remove useless loop var
# 1. for x in var -> x is no need
# 2. for i, x in enumerate(var) -> x is no need
if current_for_node_parser.is_for_iter(
) or current_for_node_parser.is_for_enumerate_iter():
iter_var_name = current_for_node_parser.iter_var_name
iter_idx_name = current_for_node_parser.iter_idx_name
loop_var_names.add(iter_idx_name)
if iter_var_name not in create_var_names:
loop_var_names.remove(iter_var_name)
# 3. prepare result statement list
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# for x in range(10):
# y += x
# print(x) # x = 10
#
# We need to create static variable for those variables
for name in create_var_names:
if "." not in name:
new_stmts.append(create_static_variable_gast_node(name))
# 4. append init statements
new_stmts.extend(init_stmts)
# 5. create & append condition function node
condition_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=cond_stmt)],
decorator_list=[],
returns=None,
type_comment=None)
for name in loop_var_names:
if "." in name:
rename_transformer = RenameTransformer(condition_func_node)
rename_transformer.rename(
name, unique_name.generate(GENERATE_VARIABLE_PREFIX))
new_stmts.append(condition_func_node)
# 6. create & append loop body function node
# append return values for loop body
body_stmts.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load(), gen_tuple_if_single=True)))
body_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=body_stmts,
decorator_list=[],
returns=None,
type_comment=None)
for name in loop_var_names:
if "." in name:
rename_transformer = RenameTransformer(body_func_node)
rename_transformer.rename(
name, unique_name.generate(GENERATE_VARIABLE_PREFIX))
new_stmts.append(body_func_node)
# 7. create & append while loop node
while_loop_nodes = create_while_nodes(
condition_func_node.name, body_func_node.name, loop_var_names)
new_stmts.extend(while_loop_nodes)
return new_stmts
def get_while_stmt_nodes(self, node):
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# while x < 10:
# x += 1
# y = x
# z = y
#
# We need to create static variable for those variables
for name in create_var_names:
if "." not in name:
new_stmts.append(create_static_variable_gast_node(name))
condition_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=node.test)],
decorator_list=[],
returns=None,
type_comment=None)
for name in loop_var_names:
if "." in name:
rename_transformer = RenameTransformer(condition_func_node)
rename_transformer.rename(
name, unique_name.generate(GENERATE_VARIABLE_PREFIX))
new_stmts.append(condition_func_node)
new_body = node.body
new_body.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load())))
body_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=new_body,
decorator_list=[],
returns=None,
type_comment=None)
for name in loop_var_names:
if "." in name:
rename_transformer = RenameTransformer(body_func_node)
rename_transformer.rename(
name, unique_name.generate(GENERATE_VARIABLE_PREFIX))
new_stmts.append(body_func_node)
while_loop_nodes = create_while_nodes(
condition_func_node.name, body_func_node.name, loop_var_names)
new_stmts.extend(while_loop_nodes)
return new_stmts
|
luotao1/Paddle
|
python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py
|
Python
|
apache-2.0
| 29,117
|
[
"VisIt"
] |
e6db81d944f84fd1f35eef78c807bb9b1b5adbada35cfa5029360fbbc8ffbe22
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for dissociation curves of bimolecular complexes.
| Geometries and reference interaction energies from Grafova et al. JCTC 6 2365 (2010).
| Note that the S22by5-N-1.0 members are essentially the same geometries as S22-N (there's trivial round-off error) but the reference interaction energies for S22by5 are of lower quality than those of S22.
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
- ``'equilibrium'``
- ``'mol1'`` five-point (0.9, 1.0, 1.2, 1.5, 2.0) :math:`\\times R_{eq}` dissociation curve for molecule 1
- ...
- ``'mol22'`` five-point (0.9, 1.0, 1.2, 1.5, 2.0) :math:`\\times R_{eq}` dissociation curve for molecule 22
"""
import re
import qcdb
# <<< S22by5 Database Module >>>
dbse = 'S22by5'
# <<< Database Members >>>
mol1 = []
mol2 = []
mol3 = []
mol4 = []
mol5 = []
mol6 = []
mol7 = []
mol8 = []
mol9 = []
mol10 = []
mol11 = []
mol12 = []
mol13 = []
mol14 = []
mol15 = []
mol16 = []
mol17 = []
mol18 = []
mol19 = []
mol20 = []
mol21 = []
mol22 = []
dist = [0.9, 1.0, 1.2, 1.5, 2.0]
for d in dist:
mol1.append('1-' + str(d))
mol2.append('2-' + str(d))
mol3.append('3-' + str(d))
mol4.append('4-' + str(d))
mol5.append('5-' + str(d))
mol6.append('6-' + str(d))
mol7.append('7-' + str(d))
mol8.append('8-' + str(d))
mol9.append('9-' + str(d))
mol10.append('10-' + str(d))
mol11.append('11-' + str(d))
mol12.append('12-' + str(d))
mol13.append('13-' + str(d))
mol14.append('14-' + str(d))
mol15.append('15-' + str(d))
mol16.append('16-' + str(d))
mol17.append('17-' + str(d))
mol18.append('18-' + str(d))
mol19.append('19-' + str(d))
mol20.append('20-' + str(d))
mol21.append('21-' + str(d))
mol22.append('22-' + str(d))
temp = [mol1, mol2, mol3, mol4, mol5, mol6, mol7, mol8, mol9, mol10, mol11,
mol12, mol13, mol14, mol15, mol16, mol17, mol18, mol19, mol20, mol21, mol22]
HRXN = sum(temp, [])
HRXN_SM = ['1-0.9', '2-1.0', '8-1.5', '16-2.0']
HRXN_LG = ['15-0.9']
HRXN_EQ = []
for m in range(1, 23):
HRXN_EQ.append(str(m) + '-1.0')
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values >>>
BIND = {}
BIND['%s-1-0.9' % (dbse)] = -2.41
BIND['%s-1-1.0' % (dbse)] = -3.14
BIND['%s-1-1.2' % (dbse)] = -2.36
BIND['%s-1-1.5' % (dbse)] = -1.11
BIND['%s-1-2.0' % (dbse)] = -0.36
BIND['%s-2-0.9' % (dbse)] = -4.32
BIND['%s-2-1.0' % (dbse)] = -4.97
BIND['%s-2-1.2' % (dbse)] = -4.04
BIND['%s-2-1.5' % (dbse)] = -2.29
BIND['%s-2-2.0' % (dbse)] = -0.96
BIND['%s-3-0.9' % (dbse)] = -16.34
BIND['%s-3-1.0' % (dbse)] = -18.59
BIND['%s-3-1.2' % (dbse)] = -15.62
BIND['%s-3-1.5' % (dbse)] = -9.24
BIND['%s-3-2.0' % (dbse)] = -3.63
BIND['%s-4-0.9' % (dbse)] = -14.14
BIND['%s-4-1.0' % (dbse)] = -15.95
BIND['%s-4-1.2' % (dbse)] = -13.40
BIND['%s-4-1.5' % (dbse)] = -8.10
BIND['%s-4-2.0' % (dbse)] = -3.51
BIND['%s-5-0.9' % (dbse)] = -18.73
BIND['%s-5-1.0' % (dbse)] = -20.46
BIND['%s-5-1.2' % (dbse)] = -17.16
BIND['%s-5-1.5' % (dbse)] = -10.46
BIND['%s-5-2.0' % (dbse)] = -4.58
BIND['%s-6-0.9' % (dbse)] = -15.13
BIND['%s-6-1.0' % (dbse)] = -16.70
BIND['%s-6-1.2' % (dbse)] = -13.93
BIND['%s-6-1.5' % (dbse)] = -8.18
BIND['%s-6-2.0' % (dbse)] = -3.26
BIND['%s-7-0.9' % (dbse)] = -15.02
BIND['%s-7-1.0' % (dbse)] = -16.37
BIND['%s-7-1.2' % (dbse)] = -13.30
BIND['%s-7-1.5' % (dbse)] = -7.43
BIND['%s-7-2.0' % (dbse)] = -2.59
BIND['%s-8-0.9' % (dbse)] = -0.34
BIND['%s-8-1.0' % (dbse)] = -0.53
BIND['%s-8-1.2' % (dbse)] = -0.25
BIND['%s-8-1.5' % (dbse)] = -0.06
BIND['%s-8-2.0' % (dbse)] = -0.01
BIND['%s-9-0.9' % (dbse)] = -0.68
BIND['%s-9-1.0' % (dbse)] = -1.48
BIND['%s-9-1.2' % (dbse)] = -0.81
BIND['%s-9-1.5' % (dbse)] = -0.20
BIND['%s-9-2.0' % (dbse)] = -0.03
BIND['%s-10-0.9' % (dbse)] = -1.09
BIND['%s-10-1.0' % (dbse)] = -1.50
BIND['%s-10-1.2' % (dbse)] = -1.13
BIND['%s-10-1.5' % (dbse)] = -0.48
BIND['%s-10-2.0' % (dbse)] = -0.12
BIND['%s-11-0.9' % (dbse)] = -0.15
BIND['%s-11-1.0' % (dbse)] = -2.81
BIND['%s-11-1.2' % (dbse)] = -1.92
BIND['%s-11-1.5' % (dbse)] = -0.53
BIND['%s-11-2.0' % (dbse)] = -0.07
BIND['%s-12-0.9' % (dbse)] = -1.69
BIND['%s-12-1.0' % (dbse)] = -4.51
BIND['%s-12-1.2' % (dbse)] = -3.02
BIND['%s-12-1.5' % (dbse)] = -0.98
BIND['%s-12-2.0' % (dbse)] = -0.19
BIND['%s-13-0.9' % (dbse)] = -6.76
BIND['%s-13-1.0' % (dbse)] = -9.87
BIND['%s-13-1.2' % (dbse)] = -6.26
BIND['%s-13-1.5' % (dbse)] = -2.42
BIND['%s-13-2.0' % (dbse)] = -0.69
BIND['%s-14-0.9' % (dbse)] = -2.13
BIND['%s-14-1.0' % (dbse)] = -5.18
BIND['%s-14-1.2' % (dbse)] = -3.61
BIND['%s-14-1.5' % (dbse)] = -1.08
BIND['%s-14-2.0' % (dbse)] = -0.10
BIND['%s-15-0.9' % (dbse)] = -7.99
BIND['%s-15-1.0' % (dbse)] = -12.22
BIND['%s-15-1.2' % (dbse)] = -8.23
BIND['%s-15-1.5' % (dbse)] = -3.25
BIND['%s-15-2.0' % (dbse)] = -0.92
BIND['%s-16-0.9' % (dbse)] = -1.17
BIND['%s-16-1.0' % (dbse)] = -1.49
BIND['%s-16-1.2' % (dbse)] = -1.08
BIND['%s-16-1.5' % (dbse)] = -0.49
BIND['%s-16-2.0' % (dbse)] = -0.15
BIND['%s-17-0.9' % (dbse)] = -3.01
BIND['%s-17-1.0' % (dbse)] = -3.27
BIND['%s-17-1.2' % (dbse)] = -2.47
BIND['%s-17-1.5' % (dbse)] = -1.30
BIND['%s-17-2.0' % (dbse)] = -0.49
BIND['%s-18-0.9' % (dbse)] = -2.04
BIND['%s-18-1.0' % (dbse)] = -2.35
BIND['%s-18-1.2' % (dbse)] = -1.75
BIND['%s-18-1.5' % (dbse)] = -0.85
BIND['%s-18-2.0' % (dbse)] = -0.28
BIND['%s-19-0.9' % (dbse)] = -4.02
BIND['%s-19-1.0' % (dbse)] = -4.52
BIND['%s-19-1.2' % (dbse)] = -3.68
BIND['%s-19-1.5' % (dbse)] = -2.09
BIND['%s-19-2.0' % (dbse)] = -0.85
BIND['%s-20-0.9' % (dbse)] = -2.20
BIND['%s-20-1.0' % (dbse)] = -2.80
BIND['%s-20-1.2' % (dbse)] = -2.25
BIND['%s-20-1.5' % (dbse)] = -1.12
BIND['%s-20-2.0' % (dbse)] = -0.35
BIND['%s-21-0.9' % (dbse)] = -4.99
BIND['%s-21-1.0' % (dbse)] = -5.74
BIND['%s-21-1.2' % (dbse)] = -4.88
BIND['%s-21-1.5' % (dbse)] = -2.80
BIND['%s-21-2.0' % (dbse)] = -1.10
BIND['%s-22-0.9' % (dbse)] = -6.42
BIND['%s-22-1.0' % (dbse)] = -7.05
BIND['%s-22-1.2' % (dbse)] = -5.79
BIND['%s-22-1.5' % (dbse)] = -3.41
BIND['%s-22-2.0' % (dbse)] = -1.38
# <<< Comment Lines >>>
TAGL = {}
rxnpattern = re.compile(r'^(.+)-(.+)$')
for item in mol1:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'HB-1 Ammonia Dimer at %s Req, C2H' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Ammonia Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Ammonia from Ammonia Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Ammonia from Ammonia Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Ammonia from Ammonia Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Ammonia from Ammonia Dimer at %s Req' % (molname.group(2))
for item in mol2:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'HB-2 Water Dimer at %s Req, CS' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Water Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Water from Water Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Water from Water Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Water from Water Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Water from Water Dimer at %s Req' % (molname.group(2))
for item in mol3:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'HB-3 Formic Acid Dimer at %s Req, C2H' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Formic Acid Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Formic Acid from Formic Acid Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Formic Acid from Formic Acid Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Formic Acid from Formic Acid Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Formic Acid from Formic Acid Dimer at %s Req' % (molname.group(2))
for item in mol4:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'HB-4 Formamide Dimer at %s Req, C2H' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Formamide Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Formamide from Formamide Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Formamide from Formamide Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Formamide from Formamide Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Formamide from Formamide Dimer at %s Req' % (molname.group(2))
for item in mol5:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'HB-5 Uracil Dimer HB at %s Req, C2H' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Uracil Dimer HB at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Uracil from Uracil Dimer HB at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Uracil from Uracil Dimer HB at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Uracil from Uracil Dimer HB at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Uracil from Uracil Dimer HB at %s Req' % (molname.group(2))
for item in mol6:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'HB-6 2-Pyridone-2-Aminopyridine Complex at %s Req, C1' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = '2-Pyridone-2-Aminopyridine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = '2-Pyridone from 2-Pyridone-2-Aminopyridine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = '2-Aminopyridine from 2-Pyridone-2-Aminopyridine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = '2-Pyridone from 2-Pyridone-2-Aminopyridine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = '2-Aminopyridine from 2-Pyridone-2-Aminopyridine Complex at %s Req' % (molname.group(2))
for item in mol7:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'HB-7 Adenine-Thymine Complex WC at %s Req, C1' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Adenine-Thymine Complex WC at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Adenine from Adenine-Thymine Complex WC at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Thymine from Adenine-Thymine Complex WC at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Adenine from Adenine-Thymine Complex WC at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Thymine from Adenine-Thymine Complex WC at %s Req' % (molname.group(2))
for item in mol8:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'DD-1 Methane Dimer at %s Req, D3D' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Methane Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Methane from Methane Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Methane from Methane Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Methane from Methane Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Methane from Methane Dimer at %s Req' % (molname.group(2))
for item in mol9:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'DD-2 Ethene Dimer at %s Req, D2D' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Ethene Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Ethene from Ethene Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Ethene from Ethene Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Ethene from Ethene Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Ethene from Ethene Dimer at %s Req' % (molname.group(2))
for item in mol10:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'DD-3 Benzene-Methane Complex at %s Req, C3' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-Methane Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-Methane Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Methane from Benzene-Methane Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Benzene-Methane Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Methane from Benzene-Methane Complex at %s Req' % (molname.group(2))
for item in mol11:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'DD-4 Benzene Dimer Parallel-Disp at %s Req, C2H' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene Dimer PD at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene Dimer PD at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Benzene from Benzene Dimer PD at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Benzene Dimer PD at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Benzene from Benzene Dimer PD at %s Req' % (molname.group(2))
for item in mol12:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'DD-6 Pyrazine Dimer at %s Req, CS' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Pyrazine Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Pyrazine from Pyrazine Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Pyrazine from Pyrazine Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Pyrazine from Pyrazine Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Pyrazine from Pyrazine Dimer at %s Req' % (molname.group(2))
for item in mol13:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-5 Uracil Dimer Stack at %s Req, C2' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Uracil Dimer Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Uracil from Uracil Dimer Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Uracil from Uracil Dimer Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Uracil from Uracil Dimer Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Uracil from Uracil Dimer Stack at %s Req' % (molname.group(2))
for item in mol14:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'DD-7 Indole-Benzene Complex Stack at %s Req, C1' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Indole-Benzene Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Indole-Benzene Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Indole from Indole-Benzene Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Indole-Benzene Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Indole from Indole-Benzene Complex Stack at %s Req' % (molname.group(2))
for item in mol15:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-8 Adenine-Thymine Complex Stack at %s Req, C1' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Adenine-Thymine Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Adenine from Adenine-Thymine Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Thymine from Adenine-Thymine Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Adenine from Adenine-Thymine Complex Stack at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Thymine from Adenine-Thymine Complex Stack at %s Req' % (molname.group(2))
for item in mol16:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-1 Ethene-Ethine Complex at %s Req, C2V' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Ethene-Ethine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Ethene from Ethene-Ethine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Ethine from Ethene-Ethine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Ethene from Ethene-Ethine Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Ethine from Ethene-Ethine Complex at %s Req' % (molname.group(2))
for item in mol17:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-2 Benzene-Water Complex at %s Req, CS' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-Water Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-Water Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Water from Benzene-Water Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Benzene-Water Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Water from Benzene-Water Complex at %s Req' % (molname.group(2))
for item in mol18:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-3 Benzene-Ammonia Complex at %s Req, CS' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-Ammonia Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-Ammonia Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Ammonia from Benzene-Ammonia Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Benzene-Ammonia Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Ammonia from Benzene-Ammonia Complex at %s Req' % (molname.group(2))
for item in mol19:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-4 Benzene-HCN Complex at %s Req, CS' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-HCN Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-HCN Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'HCN from Benzene-HCN Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Benzene-HCN Complex at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'HCN from Benzene-HCN Complex at %s Req' % (molname.group(2))
for item in mol20:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'DD-5 Benzene Dimer T-Shape at %s Req, C2V' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene Dimer T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene Dimer T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Benzene from Benzene Dimer T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Benzene Dimer T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Benzene from Benzene Dimer T-Shape at %s Req' % (molname.group(2))
for item in mol21:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-6 Indole-Benzene Complex T-Shape at %s Req, C1' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Indole-Benzene Complex T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Indole-Benzene Complex T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Indole from Indole-Benzene Complex T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Benzene from Indole-Benzene Complex T-Shape at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Indole from Indole-Benzene Complex T-Shape at %s Req' % (molname.group(2))
for item in mol22:
molname = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'MX-7 Phenol Dimer at %s Req, C1' % (molname.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Phenol Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Phenol from Phenol Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Phenol from Phenol Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoA-unCP' % (dbse, item)] = 'Phenol from Phenol Dimer at %s Req' % (molname.group(2))
TAGL['%s-%s-monoB-unCP' % (dbse, item)] = 'Phenol from Phenol Dimer at %s Req' % (molname.group(2))
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1-0.9')] = qcdb.Molecule("""
0 1
N -0.535020551 -0.861570006 0.000000000
H -1.142058700 -0.825740733 -0.809565000
H -1.142058700 -0.825740733 0.809565000
H 0.000000000 0.000000000 0.000000000
--
0 1
N 2.253621272 0.000000000 0.000000000
H 2.860659421 -0.035829274 -0.809565000
H 1.718600721 -0.861570006 0.000000000
H 2.860659421 -0.035829274 0.809565000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '1-1.0')] = qcdb.Molecule("""
0 1
N -1.578718000 -0.046611000 0.000000000
H -2.158621000 0.136396000 -0.809565000
H -2.158621000 0.136396000 0.809565000
H -0.849471000 0.658193000 0.000000000
--
0 1
N 1.578718000 0.046611000 0.000000000
H 2.158621000 -0.136396000 -0.809565000
H 0.849471000 -0.658193000 0.000000000
H 2.158621000 -0.136396000 0.809565000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '1-1.2')] = qcdb.Molecule("""
0 1
N -0.535020551 -0.861570006 0.000000000
H -1.142058700 -0.825740733 -0.809565000
H -1.142058700 -0.825740733 0.809565000
H 0.000000000 0.000000000 0.000000000
--
0 1
N 3.004828362 0.000000000 0.000000000
H 3.611866511 -0.035829274 -0.809565000
H 2.469807811 -0.861570006 0.000000000
H 3.611866511 -0.035829274 0.809565000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '1-1.5')] = qcdb.Molecule("""
0 1
N -0.535020551 -0.861570006 0.000000000
H -1.142058700 -0.825740733 -0.809565000
H -1.142058700 -0.825740733 0.809565000
H 0.000000000 0.000000000 0.000000000
--
0 1
N 3.756035452 0.000000000 0.000000000
H 4.363073601 -0.035829274 -0.809565000
H 3.221014901 -0.861570006 0.000000000
H 4.363073601 -0.035829274 0.809565000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '1-2.0')] = qcdb.Molecule("""
0 1
N -0.535020551 -0.861570006 0.000000000
H -1.142058700 -0.825740733 -0.809565000
H -1.142058700 -0.825740733 0.809565000
H 0.000000000 0.000000000 0.000000000
--
0 1
N 5.008047270 0.000000000 0.000000000
H 5.615085419 -0.035829274 -0.809565000
H 4.473026719 -0.861570006 0.000000000
H 5.615085419 -0.035829274 0.809565000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2-0.9')] = qcdb.Molecule("""
0 1
O -0.956332646 -0.120638358 0.000000000
H -1.307535174 0.769703274 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
O 1.756426600 0.000000000 0.000000000
H 2.068390928 -0.496847294 -0.758561000
H 2.068390928 -0.496847294 0.758561000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2-1.0')] = qcdb.Molecule("""
0 1
O -1.551007000 -0.114520000 0.000000000
H -1.934259000 0.762503000 0.000000000
H -0.599677000 0.040712000 0.000000000
--
0 1
O 1.350625000 0.111469000 0.000000000
H 1.680398000 -0.373741000 -0.758561000
H 1.680398000 -0.373741000 0.758561000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2-1.2')] = qcdb.Molecule("""
0 1
O -0.956332646 -0.120638358 0.000000000
H -1.307535174 0.769703274 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
O 2.341902133 0.000000000 0.000000000
H 2.653866461 -0.496847294 -0.758561000
H 2.653866461 -0.496847294 0.758561000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2-1.5')] = qcdb.Molecule("""
0 1
O -0.956332646 -0.120638358 0.000000000
H -1.307535174 0.769703274 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
O 2.927377666 0.000000000 0.000000000
H 3.239341994 -0.496847294 -0.758561000
H 3.239341994 -0.496847294 0.758561000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2-2.0')] = qcdb.Molecule("""
0 1
O -0.956332646 -0.120638358 0.000000000
H -1.307535174 0.769703274 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
O 3.903170222 0.000000000 0.000000000
H 4.215134550 -0.496847294 -0.758561000
H 4.215134550 -0.496847294 0.758561000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3-0.9')] = qcdb.Molecule("""
0 1
C -1.434944263 -1.236643950 0.000000000
O -0.995009531 0.001876693 0.000000000
O -0.752030700 -2.248465543 0.000000000
H -2.527660580 -1.276950582 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
C 2.186205474 -1.011821594 0.000000000
O 1.746270742 -2.250342236 0.000000000
O 1.503291911 0.000000000 0.000000000
H 3.278921791 -0.971514961 0.000000000
H 0.751261211 -2.248465543 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3-1.0')] = qcdb.Molecule("""
0 1
C -1.888896000 -0.179692000 0.000000000
O -1.493280000 1.073689000 0.000000000
O -1.170435000 -1.166590000 0.000000000
H -2.979488000 -0.258829000 0.000000000
H -0.498833000 1.107195000 0.000000000
--
0 1
C 1.888896000 0.179692000 0.000000000
O 1.493280000 -1.073689000 0.000000000
O 1.170435000 1.166590000 0.000000000
H 2.979488000 0.258829000 0.000000000
H 0.498833000 -1.107195000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3-1.2')] = qcdb.Molecule("""
0 1
C -1.434944263 -1.236643950 0.000000000
O -0.995009531 0.001876693 0.000000000
O -0.752030700 -2.248465543 0.000000000
H -2.527660580 -1.276950582 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
C 2.687302778 -1.011821594 0.000000000
O 2.247368046 -2.250342236 0.000000000
O 2.004389215 0.000000000 0.000000000
H 3.780019095 -0.971514961 0.000000000
H 1.252358515 -2.248465543 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3-1.5')] = qcdb.Molecule("""
0 1
C -1.434944263 -1.236643950 0.000000000
O -0.995009531 0.001876693 0.000000000
O -0.752030700 -2.248465543 0.000000000
H -2.527660580 -1.276950582 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
C 3.188400082 -1.011821594 0.000000000
O 2.748465350 -2.250342236 0.000000000
O 2.505486519 0.000000000 0.000000000
H 4.281116399 -0.971514961 0.000000000
H 1.753455819 -2.248465543 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3-2.0')] = qcdb.Molecule("""
0 1
C -1.434944263 -1.236643950 0.000000000
O -0.995009531 0.001876693 0.000000000
O -0.752030700 -2.248465543 0.000000000
H -2.527660580 -1.276950582 0.000000000
H 0.000000000 0.000000000 0.000000000
--
0 1
C 4.023562255 -1.011821594 0.000000000
O 3.583627523 -2.250342236 0.000000000
O 3.340648692 0.000000000 0.000000000
H 5.116278572 -0.971514961 0.000000000
H 2.588617992 -2.248465543 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4-0.9')] = qcdb.Molecule("""
0 1
C -0.604120150 -1.070346233 0.000000000
O 0.000000000 0.000000000 0.000000000
N -0.035273679 -2.286277608 0.000000000
H -0.620847527 -3.100915874 0.000000000
H 0.982356530 -2.387103713 0.000000000
H -1.704185444 -1.098607493 0.000000000
--
0 1
C 3.242982655 -1.316757480 0.000000000
O 2.638862505 -2.387103713 0.000000000
N 2.674136184 -0.100826104 0.000000000
H 3.259710032 0.713812161 0.000000000
H 1.656505975 0.000000000 0.000000000
H 4.343047949 -1.288496220 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4-1.0')] = qcdb.Molecule("""
0 1
C -2.018649000 0.052883000 0.000000000
O -1.452200000 1.143634000 0.000000000
N -1.407770000 -1.142484000 0.000000000
H -1.964596000 -1.977036000 0.000000000
H -0.387244000 -1.207782000 0.000000000
H -3.117061000 -0.013701000 0.000000000
--
0 1
C 2.018649000 -0.052883000 0.000000000
O 1.452200000 -1.143634000 0.000000000
N 1.407770000 1.142484000 0.000000000
H 1.964596000 1.977036000 0.000000000
H 0.387244000 1.207782000 0.000000000
H 3.117061000 0.013701000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4-1.2')] = qcdb.Molecule("""
0 1
C -0.604120150 -1.070346233 0.000000000
O 0.000000000 0.000000000 0.000000000
N -0.035273679 -2.286277608 0.000000000
H -0.620847527 -3.100915874 0.000000000
H 0.982356530 -2.387103713 0.000000000
H -1.704185444 -1.098607493 0.000000000
--
0 1
C 3.795151314 -1.316757480 0.000000000
O 3.191031164 -2.387103713 0.000000000
N 3.226304843 -0.100826104 0.000000000
H 3.811878691 0.713812161 0.000000000
H 2.208674634 0.000000000 0.000000000
H 4.895216608 -1.288496220 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4-1.5')] = qcdb.Molecule("""
0 1
C -0.604120150 -1.070346233 0.000000000
O 0.000000000 0.000000000 0.000000000
N -0.035273679 -2.286277608 0.000000000
H -0.620847527 -3.100915874 0.000000000
H 0.982356530 -2.387103713 0.000000000
H -1.704185444 -1.098607493 0.000000000
--
0 1
C 4.347319973 -1.316757480 0.000000000
O 3.743199823 -2.387103713 0.000000000
N 3.778473502 -0.100826104 0.000000000
H 4.364047350 0.713812161 0.000000000
H 2.760843293 0.000000000 0.000000000
H 5.447385267 -1.288496220 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4-2.0')] = qcdb.Molecule("""
0 1
C -0.604120150 -1.070346233 0.000000000
O 0.000000000 0.000000000 0.000000000
N -0.035273679 -2.286277608 0.000000000
H -0.620847527 -3.100915874 0.000000000
H 0.982356530 -2.387103713 0.000000000
H -1.704185444 -1.098607493 0.000000000
--
0 1
C 5.267601070 -1.316757480 0.000000000
O 4.663480920 -2.387103713 0.000000000
N 4.698754599 -0.100826104 0.000000000
H 5.284328447 0.713812161 0.000000000
H 3.681124390 0.000000000 0.000000000
H 6.367666364 -1.288496220 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5-0.9')] = qcdb.Molecule("""
0 1
O 0.000000000 0.000000000 0.000000000
C -0.664243938 1.036879148 0.000000000
N -0.108663437 2.286389518 0.000000000
C -0.864691937 3.427521953 0.000000000
C -2.214231597 3.403909532 0.000000000
C -2.909869859 2.131803891 0.000000000
N -2.034924624 1.029301194 0.000000000
O -4.115521524 1.958733959 0.000000000
H -2.793840332 4.310799346 0.000000000
H 0.917908194 2.334329905 0.000000000
H -2.469325804 0.116551326 0.000000000
H -0.300037631 4.348024043 0.000000000
--
0 1
O 2.515009084 2.334329905 0.000000000
C 3.179253022 1.297450757 0.000000000
N 2.623672521 0.047940387 0.000000000
C 3.379701020 -1.093192048 0.000000000
C 4.729240680 -1.069579627 0.000000000
C 5.424878943 0.202526014 0.000000000
N 4.549933708 1.305028711 0.000000000
O 6.630530608 0.375595946 0.000000000
H 5.308849416 -1.976469441 0.000000000
H 1.597100890 0.000000000 0.000000000
H 4.984334888 2.217778579 0.000000000
H 2.815046715 -2.013694138 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5-1.0')] = qcdb.Molecule("""
0 1
O -1.466332000 1.012169000 0.000000000
C -0.628146000 1.914268000 0.000000000
N 0.720509000 1.688269000 0.000000000
C 1.636729000 2.705276000 0.000000000
C 1.276904000 4.006176000 0.000000000
C -0.128601000 4.362155000 0.000000000
N -0.977723000 3.239643000 0.000000000
O -0.597223000 5.486407000 0.000000000
H 2.010350000 4.793864000 0.000000000
H 1.023251000 0.706182000 0.000000000
H -1.970027000 3.432385000 0.000000000
H 2.669062000 2.388342000 0.000000000
--
0 1
O 1.466332000 -1.012169000 0.000000000
C 0.628146000 -1.914268000 0.000000000
N -0.720509000 -1.688269000 0.000000000
C -1.636729000 -2.705276000 0.000000000
C -1.276904000 -4.006176000 0.000000000
C 0.128601000 -4.362155000 0.000000000
N 0.977723000 -3.239643000 0.000000000
O 0.597223000 -5.486407000 0.000000000
H -2.010350000 -4.793864000 0.000000000
H -1.023251000 -0.706182000 0.000000000
H 1.970027000 -3.432385000 0.000000000
H -2.669062000 -2.388342000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5-1.2')] = qcdb.Molecule("""
0 1
O 0.000000000 0.000000000 0.000000000
C -0.664243938 1.036879148 0.000000000
N -0.108663437 2.286389518 0.000000000
C -0.864691937 3.427521953 0.000000000
C -2.214231597 3.403909532 0.000000000
C -2.909869859 2.131803891 0.000000000
N -2.034924624 1.029301194 0.000000000
O -4.115521524 1.958733959 0.000000000
H -2.793840332 4.310799346 0.000000000
H 0.917908194 2.334329905 0.000000000
H -2.469325804 0.116551326 0.000000000
H -0.300037631 4.348024043 0.000000000
--
0 1
O 3.047376048 2.334329905 0.000000000
C 3.711619986 1.297450757 0.000000000
N 3.156039485 0.047940387 0.000000000
C 3.912067984 -1.093192048 0.000000000
C 5.261607644 -1.069579627 0.000000000
C 5.957245907 0.202526014 0.000000000
N 5.082300672 1.305028711 0.000000000
O 7.162897572 0.375595946 0.000000000
H 5.841216380 -1.976469441 0.000000000
H 2.129467854 0.000000000 0.000000000
H 5.516701852 2.217778579 0.000000000
H 3.347413679 -2.013694138 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5-1.5')] = qcdb.Molecule("""
0 1
O 0.000000000 0.000000000 0.000000000
C -0.664243938 1.036879148 0.000000000
N -0.108663437 2.286389518 0.000000000
C -0.864691937 3.427521953 0.000000000
C -2.214231597 3.403909532 0.000000000
C -2.909869859 2.131803891 0.000000000
N -2.034924624 1.029301194 0.000000000
O -4.115521524 1.958733959 0.000000000
H -2.793840332 4.310799346 0.000000000
H 0.917908194 2.334329905 0.000000000
H -2.469325804 0.116551326 0.000000000
H -0.300037631 4.348024043 0.000000000
--
0 1
O 3.579743012 2.334329905 0.000000000
C 4.243986950 1.297450757 0.000000000
N 3.688406449 0.047940387 0.000000000
C 4.444434948 -1.093192048 0.000000000
C 5.793974608 -1.069579627 0.000000000
C 6.489612871 0.202526014 0.000000000
N 5.614667636 1.305028711 0.000000000
O 7.695264536 0.375595946 0.000000000
H 6.373583344 -1.976469441 0.000000000
H 2.661834818 0.000000000 0.000000000
H 6.049068816 2.217778579 0.000000000
H 3.879780643 -2.013694138 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5-2.0')] = qcdb.Molecule("""
0 1
O 0.000000000 0.000000000 0.000000000
C -0.664243938 1.036879148 0.000000000
N -0.108663437 2.286389518 0.000000000
C -0.864691937 3.427521953 0.000000000
C -2.214231597 3.403909532 0.000000000
C -2.909869859 2.131803891 0.000000000
N -2.034924624 1.029301194 0.000000000
O -4.115521524 1.958733959 0.000000000
H -2.793840332 4.310799346 0.000000000
H 0.917908194 2.334329905 0.000000000
H -2.469325804 0.116551326 0.000000000
H -0.300037631 4.348024043 0.000000000
--
0 1
O 4.467021284 2.334329905 0.000000000
C 5.131265222 1.297450757 0.000000000
N 4.575684721 0.047940387 0.000000000
C 5.331713220 -1.093192048 0.000000000
C 6.681252880 -1.069579627 0.000000000
C 7.376891143 0.202526014 0.000000000
N 6.501945908 1.305028711 0.000000000
O 8.582542808 0.375595946 0.000000000
H 7.260861616 -1.976469441 0.000000000
H 3.549113090 0.000000000 0.000000000
H 6.936347088 2.217778579 0.000000000
H 4.767058915 -2.013694138 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6-0.9')] = qcdb.Molecule("""
0 1
O -0.969652624 -2.245611164 -0.386822525
N -1.037789793 0.004508753 -0.001131127
C -3.759261297 0.014028068 -0.018375760
C -3.057727058 1.221631156 0.204402100
C -1.692392879 1.172000703 0.205277859
C -1.650068007 -1.222514751 -0.217981663
C -3.088264390 -1.161828225 -0.221825966
H -4.841300764 0.016708498 -0.026892047
H -3.567221821 2.156831083 0.369386687
H -1.068064568 2.038779450 0.367771502
H -3.612088503 -2.090701001 -0.390563867
H 0.000000000 0.000000000 0.000000000
--
0 1
N 1.673493386 0.000000000 0.000000000
C 2.352093429 -1.145324213 0.192591910
C 3.760459273 -1.168677470 0.196637005
C 4.459573002 0.005477083 -0.001723239
C 3.755182987 1.194447664 -0.202961469
C 2.372894041 1.130328028 -0.192845808
H 4.279274134 -2.103975233 0.356345736
H 5.541001766 -0.003103367 -0.001911235
H 4.259765167 2.134632052 -0.364687797
H 1.782114958 2.025258423 -0.349790900
N 1.620216197 -2.272201547 0.435153550
H 2.101618920 -3.145888174 0.315408858
H 0.644520940 -2.270442069 0.133172072
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6-1.0')] = qcdb.Molecule("""
0 1
O -1.397621000 -1.885837000 -0.367306000
N -1.464255000 0.364183000 0.019230000
C -4.185740000 0.369667000 0.036096000
C -3.483260000 1.578311000 0.250075000
C -2.117950000 1.530705000 0.233838000
C -2.077383000 -0.863749000 -0.189941000
C -3.515603000 -0.805195000 -0.175759000
H -5.267804000 0.370743000 0.041142000
H -3.992033000 2.512756000 0.421441000
H -1.492920000 2.398410000 0.388502000
H -4.040123000 -1.734845000 -0.337927000
H -0.426527000 0.361213000 0.007354000
--
0 1
N 1.432762000 0.363970000 -0.015951000
C 2.115420000 -0.780345000 0.168110000
C 3.523759000 -0.801610000 0.154503000
C 4.218590000 0.373578000 -0.052593000
C 3.509971000 1.561501000 -0.244976000
C 2.128014000 1.495332000 -0.217537000
H 4.045921000 -1.736136000 0.307688000
H 5.299943000 0.366601000 -0.066335000
H 4.011092000 2.502431000 -0.413005000
H 1.533988000 2.389384000 -0.367057000
N 1.388312000 -1.908304000 0.419815000
H 1.869471000 -2.781277000 0.294038000
H 0.408907000 -1.907994000 0.130086000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6-1.2')] = qcdb.Molecule("""
0 1
O -0.969652624 -2.245611164 -0.386822525
N -1.037789793 0.004508753 -0.001131127
C -3.759261297 0.014028068 -0.018375760
C -3.057727058 1.221631156 0.204402100
C -1.692392879 1.172000703 0.205277859
C -1.650068007 -1.222514751 -0.217981663
C -3.088264390 -1.161828225 -0.221825966
H -4.841300764 0.016708498 -0.026892047
H -3.567221821 2.156831083 0.369386687
H -1.068064568 2.038779450 0.367771502
H -3.612088503 -2.090701001 -0.390563867
H 0.000000000 0.000000000 0.000000000
--
0 1
N 2.231324514 0.000000000 0.000000000
C 2.909924557 -1.145324213 0.192591910
C 4.318290401 -1.168677470 0.196637005
C 5.017404130 0.005477083 -0.001723239
C 4.313014115 1.194447664 -0.202961469
C 2.930725169 1.130328028 -0.192845808
H 4.837105262 -2.103975233 0.356345736
H 6.098832894 -0.003103367 -0.001911235
H 4.817596295 2.134632052 -0.364687797
H 2.339946086 2.025258423 -0.349790900
N 2.178047325 -2.272201547 0.435153550
H 2.659450048 -3.145888174 0.315408858
H 1.202352068 -2.270442069 0.133172072
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6-1.5')] = qcdb.Molecule("""
0 1
O -0.969652624 -2.245611164 -0.386822525
N -1.037789793 0.004508753 -0.001131127
C -3.759261297 0.014028068 -0.018375760
C -3.057727058 1.221631156 0.204402100
C -1.692392879 1.172000703 0.205277859
C -1.650068007 -1.222514751 -0.217981663
C -3.088264390 -1.161828225 -0.221825966
H -4.841300764 0.016708498 -0.026892047
H -3.567221821 2.156831083 0.369386687
H -1.068064568 2.038779450 0.367771502
H -3.612088503 -2.090701001 -0.390563867
H 0.000000000 0.000000000 0.000000000
--
0 1
N 2.789155642 0.000000000 0.000000000
C 3.467755685 -1.145324213 0.192591910
C 4.876121529 -1.168677470 0.196637005
C 5.575235258 0.005477083 -0.001723239
C 4.870845243 1.194447664 -0.202961469
C 3.488556297 1.130328028 -0.192845808
H 5.394936390 -2.103975233 0.356345736
H 6.656664022 -0.003103367 -0.001911235
H 5.375427423 2.134632052 -0.364687797
H 2.897777214 2.025258423 -0.349790900
N 2.735878453 -2.272201547 0.435153550
H 3.217281176 -3.145888174 0.315408858
H 1.760183196 -2.270442069 0.133172072
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6-2.0')] = qcdb.Molecule("""
0 1
O -0.969652624 -2.245611164 -0.386822525
N -1.037789793 0.004508753 -0.001131127
C -3.759261297 0.014028068 -0.018375760
C -3.057727058 1.221631156 0.204402100
C -1.692392879 1.172000703 0.205277859
C -1.650068007 -1.222514751 -0.217981663
C -3.088264390 -1.161828225 -0.221825966
H -4.841300764 0.016708498 -0.026892047
H -3.567221821 2.156831083 0.369386687
H -1.068064568 2.038779450 0.367771502
H -3.612088503 -2.090701001 -0.390563867
H 0.000000000 0.000000000 0.000000000
--
0 1
N 3.718874190 0.000000000 0.000000000
C 4.397474233 -1.145324213 0.192591910
C 5.805840077 -1.168677470 0.196637005
C 6.504953806 0.005477083 -0.001723239
C 5.800563791 1.194447664 -0.202961469
C 4.418274845 1.130328028 -0.192845808
H 6.324654938 -2.103975233 0.356345736
H 7.586382570 -0.003103367 -0.001911235
H 6.305145971 2.134632052 -0.364687797
H 3.827495762 2.025258423 -0.349790900
N 3.665597001 -2.272201547 0.435153550
H 4.146999724 -3.145888174 0.315408858
H 2.689901744 -2.270442069 0.133172072
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7-0.9')] = qcdb.Molecule("""
0 1
N 0.000000000 0.000000000 0.000000000
C -0.738685058 -0.157889771 1.110355410
C -2.139452884 -0.168053559 0.964712563
C -2.629497187 -0.008665792 -0.331201352
N -1.918309833 0.152634753 -1.454844039
C -0.614262216 0.143659867 -1.193547121
N -3.152980999 -0.310697201 1.883518666
C -4.247466012 -0.237200328 1.144874976
N -3.994250734 -0.056604504 -0.187030096
N -0.136179412 -0.289433845 2.300428025
H 0.055161346 0.265959015 -2.035655088
H -5.252585445 -0.308958331 1.525406574
H -4.668404863 0.026245320 -0.929656824
H 0.876876426 -0.329105732 2.359811410
H -0.708581316 -0.452407073 3.108240602
--
0 1
N 4.674076612 0.155627547 -1.128075158
C 5.366947235 -0.031573530 0.039652507
C 4.745331442 -0.213180550 1.225999310
C 3.289690418 -0.205459536 1.237959001
N 2.678823212 -0.008913767 0.013109028
C 3.292432779 0.176239188 -1.205417098
C 5.464603172 -0.419950938 2.517000917
O 2.621308338 -0.362031655 2.261654302
O 2.694203350 0.342506569 -2.253367774
H 5.154382378 0.288458351 -2.002300903
H 1.636966971 0.000000000 0.000000000
H 6.444191927 -0.024779868 -0.049650000
H 5.195022957 0.354841198 3.233018736
H 5.183915029 -1.373098243 2.962397530
H 6.542374655 -0.403617008 2.368385087
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7-1.0')] = qcdb.Molecule("""
0 1
N 0.935015000 -0.027980000 -0.378892000
C 1.673964000 -0.035777000 0.742432000
C 3.074796000 -0.009448000 0.599456000
C 3.564611000 0.019545000 -0.705987000
N 2.853151000 0.025803000 -1.840960000
C 1.549076000 0.001257000 -1.580801000
N 4.088582000 -0.005443000 1.528979000
C 5.182992000 0.025397000 0.787218000
N 4.929487000 0.041240000 -0.556727000
N 1.071618000 -0.076537000 1.939139000
H 0.879444000 0.005026000 -2.431571000
H 6.188259000 0.037554000 1.173882000
H 5.603537000 0.064876000 -1.303681000
H 0.058692000 -0.042376000 2.003918000
H 1.644380000 -0.034739000 2.761916000
--
0 1
N -3.921173000 -0.000965000 -1.516366000
C -4.613683000 0.016905000 -0.333652000
C -3.991739000 0.021935000 0.866334000
C -2.536137000 0.007465000 0.876672000
N -1.925648000 -0.011059000 -0.363895000
C -2.539590000 -0.014947000 -1.596236000
C -4.710613000 0.041337000 2.173864000
O -1.867473000 0.011209000 1.912083000
O -1.941678000 -0.029188000 -2.657378000
H -4.401717000 -0.003608000 -2.400492000
H -0.883826000 -0.021617000 -0.378427000
H -5.690922000 0.026935000 -0.422718000
H -4.443928000 -0.830257000 2.769566000
H -4.426706000 0.918618000 2.753026000
H -5.788397000 0.050553000 2.024728000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7-1.2')] = qcdb.Molecule("""
0 1
N 0.000000000 0.000000000 0.000000000
C -0.738685058 -0.157889771 1.110355410
C -2.139452884 -0.168053559 0.964712563
C -2.629497187 -0.008665792 -0.331201352
N -1.918309833 0.152634753 -1.454844039
C -0.614262216 0.143659867 -1.193547121
N -3.152980999 -0.310697201 1.883518666
C -4.247466012 -0.237200328 1.144874976
N -3.994250734 -0.056604504 -0.187030096
N -0.136179412 -0.289433845 2.300428025
H 0.055161346 0.265959015 -2.035655088
H -5.252585445 -0.308958331 1.525406574
H -4.668404863 0.026245320 -0.929656824
H 0.876876426 -0.329105732 2.359811410
H -0.708581316 -0.452407073 3.108240602
--
0 1
N 5.219732269 0.155627547 -1.128075158
C 5.912602892 -0.031573530 0.039652507
C 5.290987099 -0.213180550 1.225999310
C 3.835346075 -0.205459536 1.237959001
N 3.224478869 -0.008913767 0.013109028
C 3.838088436 0.176239188 -1.205417098
C 6.010258829 -0.419950938 2.517000917
O 3.166963995 -0.362031655 2.261654302
O 3.239859007 0.342506569 -2.253367774
H 5.700038035 0.288458351 -2.002300903
H 2.182622628 0.000000000 0.000000000
H 6.989847584 -0.024779868 -0.049650000
H 5.740678614 0.354841198 3.233018736
H 5.729570686 -1.373098243 2.962397530
H 7.088030312 -0.403617008 2.368385087
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7-1.5')] = qcdb.Molecule("""
0 1
N 0.000000000 0.000000000 0.000000000
C -0.738685058 -0.157889771 1.110355410
C -2.139452884 -0.168053559 0.964712563
C -2.629497187 -0.008665792 -0.331201352
N -1.918309833 0.152634753 -1.454844039
C -0.614262216 0.143659867 -1.193547121
N -3.152980999 -0.310697201 1.883518666
C -4.247466012 -0.237200328 1.144874976
N -3.994250734 -0.056604504 -0.187030096
N -0.136179412 -0.289433845 2.300428025
H 0.055161346 0.265959015 -2.035655088
H -5.252585445 -0.308958331 1.525406574
H -4.668404863 0.026245320 -0.929656824
H 0.876876426 -0.329105732 2.359811410
H -0.708581316 -0.452407073 3.108240602
--
0 1
N 5.765387926 0.155627547 -1.128075158
C 6.458258549 -0.031573530 0.039652507
C 5.836642756 -0.213180550 1.225999310
C 4.381001732 -0.205459536 1.237959001
N 3.770134526 -0.008913767 0.013109028
C 4.383744093 0.176239188 -1.205417098
C 6.555914486 -0.419950938 2.517000917
O 3.712619652 -0.362031655 2.261654302
O 3.785514664 0.342506569 -2.253367774
H 6.245693692 0.288458351 -2.002300903
H 2.728278285 0.000000000 0.000000000
H 7.535503241 -0.024779868 -0.049650000
H 6.286334271 0.354841198 3.233018736
H 6.275226343 -1.373098243 2.962397530
H 7.633685969 -0.403617008 2.368385087
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7-2.0')] = qcdb.Molecule("""
0 1
N 0.000000000 0.000000000 0.000000000
C -0.738685058 -0.157889771 1.110355410
C -2.139452884 -0.168053559 0.964712563
C -2.629497187 -0.008665792 -0.331201352
N -1.918309833 0.152634753 -1.454844039
C -0.614262216 0.143659867 -1.193547121
N -3.152980999 -0.310697201 1.883518666
C -4.247466012 -0.237200328 1.144874976
N -3.994250734 -0.056604504 -0.187030096
N -0.136179412 -0.289433845 2.300428025
H 0.055161346 0.265959015 -2.035655088
H -5.252585445 -0.308958331 1.525406574
H -4.668404863 0.026245320 -0.929656824
H 0.876876426 -0.329105732 2.359811410
H -0.708581316 -0.452407073 3.108240602
--
0 1
N 6.674814021 0.155627547 -1.128075158
C 7.367684644 -0.031573530 0.039652507
C 6.746068851 -0.213180550 1.225999310
C 5.290427827 -0.205459536 1.237959001
N 4.679560621 -0.008913767 0.013109028
C 5.293170188 0.176239188 -1.205417098
C 7.465340581 -0.419950938 2.517000917
O 4.622045747 -0.362031655 2.261654302
O 4.694940759 0.342506569 -2.253367774
H 7.155119787 0.288458351 -2.002300903
H 3.637704380 0.000000000 0.000000000
H 8.444929336 -0.024779868 -0.049650000
H 7.195760366 0.354841198 3.233018736
H 7.184652438 -1.373098243 2.962397530
H 8.543112064 -0.403617008 2.368385087
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8-0.9')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
H 0.364514644 0.513239461 -0.888512354
H 0.364514644 0.513105641 0.888589641
H 0.364215723 -1.026226426 -0.000077278
H -1.089122980 0.000311014 0.000000023
--
0 1
C 3.346489810 0.000000000 0.000000000
H 4.435612789 -0.000311014 -0.000000023
H 2.981975165 -0.513105641 -0.888589641
H 2.981975165 -0.513239461 0.888512354
H 2.982274086 1.026226426 0.000077278
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8-1.0')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.000140000 1.859161000
H -0.888551000 0.513060000 1.494685000
H 0.888551000 0.513060000 1.494685000
H 0.000000000 -1.026339000 1.494868000
H 0.000000000 0.000089000 2.948284000
--
0 1
C 0.000000000 0.000140000 -1.859161000
H 0.000000000 -0.000089000 -2.948284000
H -0.888551000 -0.513060000 -1.494685000
H 0.888551000 -0.513060000 -1.494685000
H 0.000000000 1.026339000 -1.494868000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8-1.2')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
H 0.364514644 0.513239461 -0.888512354
H 0.364514644 0.513105641 0.888589641
H 0.364215723 -1.026226426 -0.000077278
H -1.089122980 0.000311014 0.000000023
--
0 1
C 4.461986413 0.000000000 0.000000000
H 5.551109392 -0.000311014 -0.000000023
H 4.097471768 -0.513105641 -0.888589641
H 4.097471768 -0.513239461 0.888512354
H 4.097770689 1.026226426 0.000077278
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8-1.5')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
H 0.364514644 0.513239461 -0.888512354
H 0.364514644 0.513105641 0.888589641
H 0.364215723 -1.026226426 -0.000077278
H -1.089122980 0.000311014 0.000000023
--
0 1
C 5.577483016 0.000000000 0.000000000
H 6.666605995 -0.000311014 -0.000000023
H 5.212968371 -0.513105641 -0.888589641
H 5.212968371 -0.513239461 0.888512354
H 5.213267292 1.026226426 0.000077278
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8-2.0')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
H 0.364514644 0.513239461 -0.888512354
H 0.364514644 0.513105641 0.888589641
H 0.364215723 -1.026226426 -0.000077278
H -1.089122980 0.000311014 0.000000023
--
0 1
C 7.436644022 0.000000000 0.000000000
H 8.525767001 -0.000311014 -0.000000023
H 7.072129377 -0.513105641 -0.888589641
H 7.072129377 -0.513239461 0.888512354
H 7.072428298 1.026226426 0.000077278
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9-0.9')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.471925000 0.471925000
C 0.000000000 0.471925000 -0.471925000
H 0.922986000 -0.872422000 0.872422000
H 0.922986000 0.872422000 -0.872422000
H -0.924197000 -0.870464000 0.870464000
H -0.924197000 0.870464000 -0.870464000
--
0 1
C 3.346399800 0.471925000 0.471925000
C 3.346399800 -0.471925000 -0.471925000
H 2.423413800 0.872422000 0.872422000
H 2.423413800 -0.872422000 -0.872422000
H 4.270596800 0.870464000 0.870464000
H 4.270596800 -0.870464000 -0.870464000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9-1.0')] = qcdb.Molecule("""
0 1
C -0.471925000 -0.471925000 -1.859111000
C 0.471925000 0.471925000 -1.859111000
H -0.872422000 -0.872422000 -0.936125000
H 0.872422000 0.872422000 -0.936125000
H -0.870464000 -0.870464000 -2.783308000
H 0.870464000 0.870464000 -2.783308000
--
0 1
C -0.471925000 0.471925000 1.859111000
C 0.471925000 -0.471925000 1.859111000
H -0.872422000 0.872422000 0.936125000
H 0.872422000 -0.872422000 0.936125000
H -0.870464000 0.870464000 2.783308000
H 0.870464000 -0.870464000 2.783308000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9-1.2')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.471925000 0.471925000
C 0.000000000 0.471925000 -0.471925000
H 0.922986000 -0.872422000 0.872422000
H 0.922986000 0.872422000 -0.872422000
H -0.924197000 -0.870464000 0.870464000
H -0.924197000 0.870464000 -0.870464000
--
0 1
C 4.461866400 0.471925000 0.471925000
C 4.461866400 -0.471925000 -0.471925000
H 3.538880400 0.872422000 0.872422000
H 3.538880400 -0.872422000 -0.872422000
H 5.386063400 0.870464000 0.870464000
H 5.386063400 -0.870464000 -0.870464000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9-1.5')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.471925000 0.471925000
C 0.000000000 0.471925000 -0.471925000
H 0.922986000 -0.872422000 0.872422000
H 0.922986000 0.872422000 -0.872422000
H -0.924197000 -0.870464000 0.870464000
H -0.924197000 0.870464000 -0.870464000
--
0 1
C 5.577333000 0.471925000 0.471925000
C 5.577333000 -0.471925000 -0.471925000
H 4.654347000 0.872422000 0.872422000
H 4.654347000 -0.872422000 -0.872422000
H 6.501530000 0.870464000 0.870464000
H 6.501530000 -0.870464000 -0.870464000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9-2.0')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.471925000 0.471925000
C 0.000000000 0.471925000 -0.471925000
H 0.922986000 -0.872422000 0.872422000
H 0.922986000 0.872422000 -0.872422000
H -0.924197000 -0.870464000 0.870464000
H -0.924197000 0.870464000 -0.870464000
--
0 1
C 7.436444000 0.471925000 0.471925000
C 7.436444000 -0.471925000 -0.471925000
H 6.513458000 0.872422000 0.872422000
H 6.513458000 -0.872422000 -0.872422000
H 8.360641000 0.870464000 0.870464000
H 8.360641000 -0.870464000 -0.870464000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10-0.9')] = qcdb.Molecule("""
0 1
C 0.000011002 0.036291078 -1.393218002
C -0.000011075 -1.188401879 -0.728035925
C 0.000010922 -1.224707791 0.665180078
C -0.000011002 -0.036296745 1.393204002
C 0.000011075 1.188416213 0.728037925
C -0.000010922 1.224699125 -0.665168078
H 0.001567004 0.064448010 -2.474274004
H 0.001550866 -2.110540915 -1.292958866
H 0.001566862 -2.175007759 1.181323138
H 0.001550996 -0.064464677 2.474261004
H 0.001567134 2.110560249 1.292950866
H 0.001551138 2.175006092 -1.181303138
--
0 1
C 3.452913900 -0.000000069 0.000000000
H 3.816671953 0.838173871 -0.586878053
H 3.816671906 0.089163973 1.019318994
H 2.366964900 0.000000000 0.000000000
H 3.816671841 -0.927338119 -0.432440941
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10-1.0')] = qcdb.Molecule("""
0 1
C 1.393218000 0.036291000 -0.633280000
C 0.728036000 -1.188402000 -0.633302000
C -0.665180000 -1.224708000 -0.633280000
C -1.393204000 -0.036297000 -0.633302000
C -0.728038000 1.188416000 -0.633280000
C 0.665168000 1.224699000 -0.633302000
H 2.474274000 0.064448000 -0.631724000
H 1.292959000 -2.110541000 -0.631740000
H -1.181323000 -2.175008000 -0.631724000
H -2.474261000 -0.064465000 -0.631740000
H -1.292951000 2.110560000 -0.631724000
H 1.181303000 2.175006000 -0.631740000
--
0 1
C 0.000000000 0.000000000 3.082619000
H 0.586878000 0.838174000 3.446377000
H -1.019319000 0.089164000 3.446377000
H 0.000000000 0.000000000 1.996670000
H 0.432441000 -0.927338000 3.446377000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10-1.2')] = qcdb.Molecule("""
0 1
C 0.000011002 0.036291078 -1.393218002
C -0.000011075 -1.188401879 -0.728035925
C 0.000010922 -1.224707791 0.665180078
C -0.000011002 -0.036296745 1.393204002
C 0.000011075 1.188416213 0.728037925
C -0.000010922 1.224699125 -0.665168078
H 0.001567004 0.064448010 -2.474274004
H 0.001550866 -2.110540915 -1.292958866
H 0.001566862 -2.175007759 1.181323138
H 0.001550996 -0.064464677 2.474261004
H 0.001567134 2.110560249 1.292950866
H 0.001551138 2.175006092 -1.181303138
--
0 1
C 4.241902200 -0.000000069 0.000000000
H 4.605660253 0.838173871 -0.586878053
H 4.605660206 0.089163973 1.019318994
H 3.155953200 0.000000000 0.000000000
H 4.605660141 -0.927338119 -0.432440941
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10-1.5')] = qcdb.Molecule("""
0 1
C 0.000011002 0.036291078 -1.393218002
C -0.000011075 -1.188401879 -0.728035925
C 0.000010922 -1.224707791 0.665180078
C -0.000011002 -0.036296745 1.393204002
C 0.000011075 1.188416213 0.728037925
C -0.000010922 1.224699125 -0.665168078
H 0.001567004 0.064448010 -2.474274004
H 0.001550866 -2.110540915 -1.292958866
H 0.001566862 -2.175007759 1.181323138
H 0.001550996 -0.064464677 2.474261004
H 0.001567134 2.110560249 1.292950866
H 0.001551138 2.175006092 -1.181303138
--
0 1
C 5.030890500 -0.000000069 0.000000000
H 5.394648553 0.838173871 -0.586878053
H 5.394648506 0.089163973 1.019318994
H 3.944941500 0.000000000 0.000000000
H 5.394648441 -0.927338119 -0.432440941
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10-2.0')] = qcdb.Molecule("""
0 1
C 0.000011002 0.036291078 -1.393218002
C -0.000011075 -1.188401879 -0.728035925
C 0.000010922 -1.224707791 0.665180078
C -0.000011002 -0.036296745 1.393204002
C 0.000011075 1.188416213 0.728037925
C -0.000010922 1.224699125 -0.665168078
H 0.001567004 0.064448010 -2.474274004
H 0.001550866 -2.110540915 -1.292958866
H 0.001566862 -2.175007759 1.181323138
H 0.001550996 -0.064464677 2.474261004
H 0.001567134 2.110560249 1.292950866
H 0.001551138 2.175006092 -1.181303138
--
0 1
C 6.345871000 -0.000000069 0.000000000
H 6.709629053 0.838173871 -0.586878053
H 6.709629006 0.089163973 1.019318994
H 5.259922000 0.000000000 0.000000000
H 6.709628941 -0.927338119 -0.432440941
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11-0.9')] = qcdb.Molecule("""
0 1
C 0.629051507 -1.244058476 0.000000000
C 0.314072291 -0.622134657 1.206205000
C 0.314072291 -0.622134657 -1.206205000
C -0.314813547 0.621699240 1.206954000
C -0.627568995 1.244929310 0.000000000
C -0.314813547 0.621699240 -1.206954000
H 0.563930576 -1.102778154 -2.142315000
H -0.559388819 1.104085746 -2.143798000
H -1.116894124 2.209685917 0.000000000
H -0.559388819 1.104085746 2.143798000
H 0.563930576 -1.102778154 2.142315000
H 1.129721711 -2.202462660 0.000000000
--
0 1
C 2.759649224 1.244058476 0.000000000
C 3.074628440 0.622134657 -1.206205000
C 3.074628440 0.622134657 1.206205000
C 3.703514278 -0.621699240 -1.206954000
C 4.016269727 -1.244929310 0.000000000
C 3.703514278 -0.621699240 1.206954000
H 2.258979020 2.202462660 0.000000000
H 2.824770156 1.102778154 2.142315000
H 3.948089550 -1.104085746 2.143798000
H 4.505594855 -2.209685917 0.000000000
H 3.948089550 -1.104085746 -2.143798000
H 2.824770156 1.102778154 -2.142315000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11-1.0')] = qcdb.Molecule("""
0 1
C -1.047825000 -1.421674000 0.000000000
C -1.454503000 -0.855446000 1.206205000
C -1.454503000 -0.855446000 -1.206205000
C -2.266797000 0.277161000 1.206954000
C -2.671478000 0.845021000 0.000000000
C -2.266797000 0.277161000 -1.206954000
H -1.133853000 -1.292059000 -2.142315000
H -2.582494000 0.716307000 -2.143798000
H -3.303042000 1.723270000 0.000000000
H -2.582494000 0.716307000 2.143798000
H -1.133853000 -1.292059000 2.142315000
H -0.406025000 -2.291905000 0.000000000
--
0 1
C 1.047825000 1.421674000 0.000000000
C 1.454503000 0.855446000 -1.206205000
C 1.454503000 0.855446000 1.206205000
C 2.266797000 -0.277161000 -1.206954000
C 2.671478000 -0.845021000 0.000000000
C 2.266797000 -0.277161000 1.206954000
H 0.406025000 2.291905000 0.000000000
H 1.133853000 1.292059000 2.142315000
H 2.582494000 -0.716307000 2.143798000
H 3.303042000 -1.723270000 0.000000000
H 2.582494000 -0.716307000 -2.143798000
H 1.133853000 1.292059000 -2.142315000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11-1.2')] = qcdb.Molecule("""
0 1
C 0.629051507 -1.244058476 0.000000000
C 0.314072291 -0.622134657 1.206205000
C 0.314072291 -0.622134657 -1.206205000
C -0.314813547 0.621699240 1.206954000
C -0.627568995 1.244929310 0.000000000
C -0.314813547 0.621699240 -1.206954000
H 0.563930576 -1.102778154 -2.142315000
H -0.559388819 1.104085746 -2.143798000
H -1.116894124 2.209685917 0.000000000
H -0.559388819 1.104085746 2.143798000
H 0.563930576 -1.102778154 2.142315000
H 1.129721711 -2.202462660 0.000000000
--
0 1
C 3.889216135 1.244058476 0.000000000
C 4.204195351 0.622134657 -1.206205000
C 4.204195351 0.622134657 1.206205000
C 4.833081189 -0.621699240 -1.206954000
C 5.145836638 -1.244929310 0.000000000
C 4.833081189 -0.621699240 1.206954000
H 3.388545931 2.202462660 0.000000000
H 3.954337067 1.102778154 2.142315000
H 5.077656461 -1.104085746 2.143798000
H 5.635161766 -2.209685917 0.000000000
H 5.077656461 -1.104085746 -2.143798000
H 3.954337067 1.102778154 -2.142315000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11-1.5')] = qcdb.Molecule("""
0 1
C 0.629051507 -1.244058476 0.000000000
C 0.314072291 -0.622134657 1.206205000
C 0.314072291 -0.622134657 -1.206205000
C -0.314813547 0.621699240 1.206954000
C -0.627568995 1.244929310 0.000000000
C -0.314813547 0.621699240 -1.206954000
H 0.563930576 -1.102778154 -2.142315000
H -0.559388819 1.104085746 -2.143798000
H -1.116894124 2.209685917 0.000000000
H -0.559388819 1.104085746 2.143798000
H 0.563930576 -1.102778154 2.142315000
H 1.129721711 -2.202462660 0.000000000
--
0 1
C 5.018783046 1.244058476 0.000000000
C 5.333762262 0.622134657 -1.206205000
C 5.333762262 0.622134657 1.206205000
C 5.962648100 -0.621699240 -1.206954000
C 6.275403549 -1.244929310 0.000000000
C 5.962648100 -0.621699240 1.206954000
H 4.518112842 2.202462660 0.000000000
H 5.083903978 1.102778154 2.142315000
H 6.207223372 -1.104085746 2.143798000
H 6.764728677 -2.209685917 0.000000000
H 6.207223372 -1.104085746 -2.143798000
H 5.083903978 1.102778154 -2.142315000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11-2.0')] = qcdb.Molecule("""
0 1
C 0.629051507 -1.244058476 0.000000000
C 0.314072291 -0.622134657 1.206205000
C 0.314072291 -0.622134657 -1.206205000
C -0.314813547 0.621699240 1.206954000
C -0.627568995 1.244929310 0.000000000
C -0.314813547 0.621699240 -1.206954000
H 0.563930576 -1.102778154 -2.142315000
H -0.559388819 1.104085746 -2.143798000
H -1.116894124 2.209685917 0.000000000
H -0.559388819 1.104085746 2.143798000
H 0.563930576 -1.102778154 2.142315000
H 1.129721711 -2.202462660 0.000000000
--
0 1
C 6.901394563 1.244058476 0.000000000
C 7.216373779 0.622134657 -1.206205000
C 7.216373779 0.622134657 1.206205000
C 7.845259617 -0.621699240 -1.206954000
C 8.158015066 -1.244929310 0.000000000
C 7.845259617 -0.621699240 1.206954000
H 6.400724359 2.202462660 0.000000000
H 6.966515495 1.102778154 2.142315000
H 8.089834889 -1.104085746 2.143798000
H 8.647340194 -2.209685917 0.000000000
H 8.089834889 -1.104085746 -2.143798000
H 6.966515495 1.102778154 -2.142315000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12-0.9')] = qcdb.Molecule("""
0 1
C 0.395653045 1.059432142 -0.696139000
C 0.395653045 1.059432142 0.696139000
N -0.003263357 0.000227377 1.414480000
C -0.391847355 -1.059697307 0.696729000
C -0.391847355 -1.059697307 -0.696729000
N -0.003263357 0.000227377 -1.414480000
H 0.718983381 1.933370245 -1.247280000
H 0.718983381 1.933370245 1.247280000
H -0.713152254 -1.934362753 1.247560000
H -0.713152254 -1.934362753 -1.247560000
--
0 1
C 3.398538200 0.643131999 1.130045000
C 2.862793235 -0.642689433 1.130631000
N 2.589772167 -1.306738847 0.000000000
C 2.862793235 -0.642689433 -1.130631000
C 3.398538200 0.643131999 -1.130045000
N 3.676023139 1.305979850 0.000000000
H 3.609496345 1.152471205 2.061864000
H 2.643057716 -1.147744338 2.062399000
H 2.643057716 -1.147744338 -2.062399000
H 3.609496345 1.152471205 -2.061864000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12-1.0')] = qcdb.Molecule("""
0 1
C -1.247189000 -1.171821000 -0.696139000
C -1.247189000 -1.171821000 0.696139000
N -0.258951000 -1.723577000 1.414480000
C 0.731533000 -2.265222000 0.696729000
C 0.731533000 -2.265222000 -0.696729000
N -0.258951000 -1.723577000 -1.414480000
H -2.063436000 -0.722320000 -1.247280000
H -2.063436000 -0.722320000 1.247280000
H 1.548800000 -2.712828000 1.247560000
H 1.548800000 -2.712828000 -1.247560000
--
0 1
C -0.338003000 2.080061000 1.130045000
C 0.854025000 1.359347000 1.130631000
N 1.470179000 0.990760000 0.000000000
C 0.854025000 1.359347000 -1.130631000
C -0.338003000 2.080061000 -1.130045000
N -0.952306000 2.452884000 0.000000000
H -0.810376000 2.364303000 2.061864000
H 1.320858000 1.067061000 2.062399000
H 1.320858000 1.067061000 -2.062399000
H -0.810376000 2.364303000 -2.061864000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12-1.2')] = qcdb.Molecule("""
0 1
C 0.395653045 1.059432142 -0.696139000
C 0.395653045 1.059432142 0.696139000
N -0.003263357 0.000227377 1.414480000
C -0.391847355 -1.059697307 0.696729000
C -0.391847355 -1.059697307 -0.696729000
N -0.003263357 0.000227377 -1.414480000
H 0.718983381 1.933370245 -1.247280000
H 0.718983381 1.933370245 1.247280000
H -0.713152254 -1.934362753 1.247560000
H -0.713152254 -1.934362753 -1.247560000
--
0 1
C 4.442367465 0.643131999 1.130045000
C 3.906622500 -0.642689433 1.130631000
N 3.633601432 -1.306738847 0.000000000
C 3.906622500 -0.642689433 -1.130631000
C 4.442367465 0.643131999 -1.130045000
N 4.719852404 1.305979850 0.000000000
H 4.653325610 1.152471205 2.061864000
H 3.686886981 -1.147744338 2.062399000
H 3.686886981 -1.147744338 -2.062399000
H 4.653325610 1.152471205 -2.061864000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12-1.5')] = qcdb.Molecule("""
0 1
C 0.395653045 1.059432142 -0.696139000
C 0.395653045 1.059432142 0.696139000
N -0.003263357 0.000227377 1.414480000
C -0.391847355 -1.059697307 0.696729000
C -0.391847355 -1.059697307 -0.696729000
N -0.003263357 0.000227377 -1.414480000
H 0.718983381 1.933370245 -1.247280000
H 0.718983381 1.933370245 1.247280000
H -0.713152254 -1.934362753 1.247560000
H -0.713152254 -1.934362753 -1.247560000
--
0 1
C 5.486196730 0.643131999 1.130045000
C 4.950451765 -0.642689433 1.130631000
N 4.677430697 -1.306738847 0.000000000
C 4.950451765 -0.642689433 -1.130631000
C 5.486196730 0.643131999 -1.130045000
N 5.763681669 1.305979850 0.000000000
H 5.697154875 1.152471205 2.061864000
H 4.730716246 -1.147744338 2.062399000
H 4.730716246 -1.147744338 -2.062399000
H 5.697154875 1.152471205 -2.061864000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12-2.0')] = qcdb.Molecule("""
0 1
C 0.395653045 1.059432142 -0.696139000
C 0.395653045 1.059432142 0.696139000
N -0.003263357 0.000227377 1.414480000
C -0.391847355 -1.059697307 0.696729000
C -0.391847355 -1.059697307 -0.696729000
N -0.003263357 0.000227377 -1.414480000
H 0.718983381 1.933370245 -1.247280000
H 0.718983381 1.933370245 1.247280000
H -0.713152254 -1.934362753 1.247560000
H -0.713152254 -1.934362753 -1.247560000
--
0 1
C 7.225912172 0.643131999 1.130045000
C 6.690167207 -0.642689433 1.130631000
N 6.417146139 -1.306738847 0.000000000
C 6.690167207 -0.642689433 -1.130631000
C 7.225912172 0.643131999 -1.130045000
N 7.503397111 1.305979850 0.000000000
H 7.436870317 1.152471205 2.061864000
H 6.470431688 -1.147744338 2.062399000
H 6.470431688 -1.147744338 -2.062399000
H 7.436870317 1.152471205 -2.061864000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13-0.9')] = qcdb.Molecule("""
0 1
N -0.277905006 1.293679543 0.176141970
C -0.313143400 0.778657200 -1.090194030
H -0.556628453 1.482976305 -1.871437030
C -0.054429325 -0.522034140 -1.338280030
H -0.083339176 -0.920071815 -2.337796030
C 0.315741834 -1.403319766 -0.246380030
O 0.657066634 -2.571655559 -0.351837030
N 0.272892517 -0.783286382 1.008844970
H 0.575575188 -1.342483138 1.797579970
C 0.057676398 0.551482081 1.292935970
O 0.162197796 1.034239706 2.404014970
H -0.355882042 2.285950208 0.331021970
--
0 1
N 3.306699593 -1.293679543 0.176141970
C 3.341937987 -0.778657200 -1.090194030
H 3.585423040 -1.482976305 -1.871437030
C 3.083223911 0.522034140 -1.338280030
H 3.112133763 0.920071815 -2.337796030
C 2.713052753 1.403319766 -0.246380030
O 2.371727953 2.571655559 -0.351837030
N 2.755902070 0.783286382 1.008844970
H 2.453219399 1.342483138 1.797579970
C 2.971118189 -0.551482081 1.292935970
O 2.866596791 -1.034239706 2.404014970
H 3.384676629 -2.285950208 0.331021970
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13-1.0')] = qcdb.Molecule("""
0 1
N 2.011359000 -1.213207000 -0.098067000
C 2.025708000 -0.697180000 -1.364403000
H 2.297521000 -1.391059000 -2.145646000
C 1.714523000 0.591965000 -1.612489000
H 1.727287000 0.990847000 -2.612005000
C 1.308960000 1.457534000 -0.520589000
O 0.920593000 2.611086000 -0.626046000
N 1.376888000 0.839745000 0.734636000
H 1.051804000 1.386223000 1.523371000
C 1.645991000 -0.485211000 1.018727000
O 1.561109000 -0.971806000 2.129806000
H 2.129463000 -2.201505000 0.056813000
--
0 1
N -2.011359000 1.213207000 -0.098067000
C -2.025708000 0.697180000 -1.364403000
H -2.297521000 1.391059000 -2.145646000
C -1.714523000 -0.591965000 -1.612489000
H -1.727287000 -0.990847000 -2.612005000
C -1.308960000 -1.457534000 -0.520589000
O -0.920593000 -2.611086000 -0.626046000
N -1.376888000 -0.839745000 0.734636000
H -1.051804000 -1.386223000 1.523371000
C -1.645991000 0.485211000 1.018727000
O -1.561109000 0.971806000 2.129806000
H -2.129463000 2.201505000 0.056813000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13-1.2')] = qcdb.Molecule("""
0 1
N -0.277905006 1.293679543 0.176141970
C -0.313143400 0.778657200 -1.090194030
H -0.556628453 1.482976305 -1.871437030
C -0.054429325 -0.522034140 -1.338280030
H -0.083339176 -0.920071815 -2.337796030
C 0.315741834 -1.403319766 -0.246380030
O 0.657066634 -2.571655559 -0.351837030
N 0.272892517 -0.783286382 1.008844970
H 0.575575188 -1.342483138 1.797579970
C 0.057676398 0.551482081 1.292935970
O 0.162197796 1.034239706 2.404014970
H -0.355882042 2.285950208 0.331021970
--
0 1
N 4.316297789 -1.293679543 0.176141970
C 4.351536183 -0.778657200 -1.090194030
H 4.595021236 -1.482976305 -1.871437030
C 4.092822107 0.522034140 -1.338280030
H 4.121731959 0.920071815 -2.337796030
C 3.722650949 1.403319766 -0.246380030
O 3.381326149 2.571655559 -0.351837030
N 3.765500266 0.783286382 1.008844970
H 3.462817595 1.342483138 1.797579970
C 3.980716385 -0.551482081 1.292935970
O 3.876194987 -1.034239706 2.404014970
H 4.394274825 -2.285950208 0.331021970
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13-1.5')] = qcdb.Molecule("""
0 1
N -0.277905006 1.293679543 0.176141970
C -0.313143400 0.778657200 -1.090194030
H -0.556628453 1.482976305 -1.871437030
C -0.054429325 -0.522034140 -1.338280030
H -0.083339176 -0.920071815 -2.337796030
C 0.315741834 -1.403319766 -0.246380030
O 0.657066634 -2.571655559 -0.351837030
N 0.272892517 -0.783286382 1.008844970
H 0.575575188 -1.342483138 1.797579970
C 0.057676398 0.551482081 1.292935970
O 0.162197796 1.034239706 2.404014970
H -0.355882042 2.285950208 0.331021970
--
0 1
N 5.325895984 -1.293679543 0.176141970
C 5.361134378 -0.778657200 -1.090194030
H 5.604619431 -1.482976305 -1.871437030
C 5.102420302 0.522034140 -1.338280030
H 5.131330154 0.920071815 -2.337796030
C 4.732249144 1.403319766 -0.246380030
O 4.390924344 2.571655559 -0.351837030
N 4.775098461 0.783286382 1.008844970
H 4.472415790 1.342483138 1.797579970
C 4.990314580 -0.551482081 1.292935970
O 4.885793182 -1.034239706 2.404014970
H 5.403873020 -2.285950208 0.331021970
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13-2.0')] = qcdb.Molecule("""
0 1
N -0.277905006 1.293679543 0.176141970
C -0.313143400 0.778657200 -1.090194030
H -0.556628453 1.482976305 -1.871437030
C -0.054429325 -0.522034140 -1.338280030
H -0.083339176 -0.920071815 -2.337796030
C 0.315741834 -1.403319766 -0.246380030
O 0.657066634 -2.571655559 -0.351837030
N 0.272892517 -0.783286382 1.008844970
H 0.575575188 -1.342483138 1.797579970
C 0.057676398 0.551482081 1.292935970
O 0.162197796 1.034239706 2.404014970
H -0.355882042 2.285950208 0.331021970
--
0 1
N 7.008559644 -1.293679543 0.176141970
C 7.043798038 -0.778657200 -1.090194030
H 7.287283091 -1.482976305 -1.871437030
C 6.785083962 0.522034140 -1.338280030
H 6.813993814 0.920071815 -2.337796030
C 6.414912804 1.403319766 -0.246380030
O 6.073588004 2.571655559 -0.351837030
N 6.457762121 0.783286382 1.008844970
H 6.155079450 1.342483138 1.797579970
C 6.672978240 -0.551482081 1.292935970
O 6.568456842 -1.034239706 2.404014970
H 7.086536680 -2.285950208 0.331021970
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14-0.9')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
C -0.044485647 -1.177978626 0.743160105
C -0.010824638 -2.411208517 0.095333145
C 0.064150773 -2.466933785 -1.295623602
C 0.100950904 -1.287437054 -2.038959973
C 0.067356799 -0.053500209 -1.391376263
H -0.013797739 0.956881587 0.503348328
H -0.091346970 -1.134458005 1.822398921
H -0.039754009 -3.325680275 0.672358669
H 0.085389531 -3.424849020 -1.798373823
H 0.146442780 -1.330172544 -3.119514770
H 0.100852832 0.862456237 -1.964945566
--
0 1
H 2.717766027 -0.578056849 3.494904751
C 2.793508398 -0.571969873 2.415753956
C 2.753054336 0.633650134 1.734349558
H 2.645935858 1.567038531 2.272036098
C 2.855804852 0.624347564 0.333339655
C 2.845637545 1.633662034 -0.673499279
H 2.762013625 2.698030593 -0.533251753
C 2.976224608 0.992808148 -1.884517470
N 3.081930238 -0.360086596 -1.675422891
C 2.997750328 -0.624347564 -0.333339655
C 3.046288127 -1.839842986 0.351754941
H 3.153106953 -2.780217935 -0.172940228
C 2.941516868 -1.796211682 1.733036170
H 2.973148444 -2.718261443 2.297634930
H 3.103876306 -1.056446212 -2.398978775
H 3.012441631 1.398036276 -2.881807744
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14-1.0')] = qcdb.Molecule("""
0 1
C -0.021074000 1.531861000 -1.363935000
C -1.274679000 0.974103000 -1.607410000
C -1.378305000 -0.225698000 -2.308415000
C -0.228943000 -0.866405000 -2.768794000
C 1.024788000 -0.303517000 -2.531241000
C 1.129000000 0.896679000 -1.829983000
H 0.060074000 2.456563000 -0.809396000
H -2.165100000 1.465452000 -1.240568000
H -2.350973000 -0.661612000 -2.492670000
H -0.310342000 -1.795576000 -3.317270000
H 1.916585000 -0.794084000 -2.899394000
H 2.100035000 1.332676000 -1.640042000
--
0 1
H -2.941765000 0.895383000 2.223905000
C -2.022067000 0.425854000 1.901355000
C -0.814942000 1.074045000 2.106698000
H -0.785153000 2.044381000 2.585609000
C 0.370429000 0.449285000 1.684746000
C 1.750862000 0.803894000 1.719400000
H 2.187011000 1.699828000 2.127590000
C 2.445136000 -0.231074000 1.135331000
N 1.564646000 -1.213781000 0.755538000
C 0.286121000 -0.826949000 1.061875000
C -0.928467000 -1.485312000 0.860694000
H -0.972920000 -2.455485000 0.383401000
C -2.079285000 -0.841767000 1.287644000
H -3.038997000 -1.320385000 1.146840000
H 1.807574000 -2.036696000 0.233304000
H 3.502879000 -0.348534000 0.969523000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14-1.2')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
C -0.044485647 -1.177978626 0.743160105
C -0.010824638 -2.411208517 0.095333145
C 0.064150773 -2.466933785 -1.295623602
C 0.100950904 -1.287437054 -2.038959973
C 0.067356799 -0.053500209 -1.391376263
H -0.013797739 0.956881587 0.503348328
H -0.091346970 -1.134458005 1.822398921
H -0.039754009 -3.325680275 0.672358669
H 0.085389531 -3.424849020 -1.798373823
H 0.146442780 -1.330172544 -3.119514770
H 0.100852832 0.862456237 -1.964945566
--
0 1
H 3.693358557 -0.578056849 3.494904751
C 3.769100928 -0.571969873 2.415753956
C 3.728646866 0.633650134 1.734349558
H 3.621528388 1.567038531 2.272036098
C 3.831397382 0.624347564 0.333339655
C 3.821230075 1.633662034 -0.673499279
H 3.737606155 2.698030593 -0.533251753
C 3.951817138 0.992808148 -1.884517470
N 4.057522768 -0.360086596 -1.675422891
C 3.973342858 -0.624347564 -0.333339655
C 4.021880657 -1.839842986 0.351754941
H 4.128699483 -2.780217935 -0.172940228
C 3.917109398 -1.796211682 1.733036170
H 3.948740974 -2.718261443 2.297634930
H 4.079468836 -1.056446212 -2.398978775
H 3.988034161 1.398036276 -2.881807744
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14-1.5')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
C -0.044485647 -1.177978626 0.743160105
C -0.010824638 -2.411208517 0.095333145
C 0.064150773 -2.466933785 -1.295623602
C 0.100950904 -1.287437054 -2.038959973
C 0.067356799 -0.053500209 -1.391376263
H -0.013797739 0.956881587 0.503348328
H -0.091346970 -1.134458005 1.822398921
H -0.039754009 -3.325680275 0.672358669
H 0.085389531 -3.424849020 -1.798373823
H 0.146442780 -1.330172544 -3.119514770
H 0.100852832 0.862456237 -1.964945566
--
0 1
H 4.668951087 -0.578056849 3.494904751
C 4.744693458 -0.571969873 2.415753956
C 4.704239396 0.633650134 1.734349558
H 4.597120918 1.567038531 2.272036098
C 4.806989912 0.624347564 0.333339655
C 4.796822605 1.633662034 -0.673499279
H 4.713198685 2.698030593 -0.533251753
C 4.927409668 0.992808148 -1.884517470
N 5.033115298 -0.360086596 -1.675422891
C 4.948935388 -0.624347564 -0.333339655
C 4.997473187 -1.839842986 0.351754941
H 5.104292013 -2.780217935 -0.172940228
C 4.892701928 -1.796211682 1.733036170
H 4.924333504 -2.718261443 2.297634930
H 5.055061366 -1.056446212 -2.398978775
H 4.963626691 1.398036276 -2.881807744
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14-2.0')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 0.000000000
C -0.044485647 -1.177978626 0.743160105
C -0.010824638 -2.411208517 0.095333145
C 0.064150773 -2.466933785 -1.295623602
C 0.100950904 -1.287437054 -2.038959973
C 0.067356799 -0.053500209 -1.391376263
H -0.013797739 0.956881587 0.503348328
H -0.091346970 -1.134458005 1.822398921
H -0.039754009 -3.325680275 0.672358669
H 0.085389531 -3.424849020 -1.798373823
H 0.146442780 -1.330172544 -3.119514770
H 0.100852832 0.862456237 -1.964945566
--
0 1
H 6.294938637 -0.578056849 3.494904751
C 6.370681008 -0.571969873 2.415753956
C 6.330226946 0.633650134 1.734349558
H 6.223108468 1.567038531 2.272036098
C 6.432977462 0.624347564 0.333339655
C 6.422810155 1.633662034 -0.673499279
H 6.339186235 2.698030593 -0.533251753
C 6.553397218 0.992808148 -1.884517470
N 6.659102848 -0.360086596 -1.675422891
C 6.574922938 -0.624347564 -0.333339655
C 6.623460737 -1.839842986 0.351754941
H 6.730279563 -2.780217935 -0.172940228
C 6.518689478 -1.796211682 1.733036170
H 6.550321054 -2.718261443 2.297634930
H 6.681048916 -1.056446212 -2.398978775
H 6.589614241 1.398036276 -2.881807744
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15-0.9')] = qcdb.Molecule("""
0 1
N 0.067390759 1.213806097 -1.171192513
C -0.034440687 0.160916029 -2.035179690
H -0.037909102 0.307694674 -3.102311444
N -0.122286497 -1.014214485 -1.431659388
C -0.061278153 -0.690156063 -0.097738525
C -0.083866474 -1.480006435 1.065121981
N -0.207551291 -2.830167865 1.008466281
H 0.020236002 -3.318294510 1.858492777
H 0.100823981 -3.261839820 0.151791829
N -0.015107287 -0.872886238 2.254820437
C 0.095534438 0.468473589 2.286592142
H 0.148443656 0.902433537 3.277055537
N 0.150791629 1.330817541 1.268232413
C 0.061278153 0.690156063 0.097738525
H 0.213123816 2.178532043 -1.420082564
--
0 1
N 2.995457244 1.318912569 0.115169333
C 3.033773997 0.544134785 1.248235461
H 3.166936649 1.084216460 2.174491246
C 2.913123372 -0.802036026 1.213306349
C 2.965573998 -1.664227788 2.429380731
H 2.009790775 -2.161867438 2.585037720
H 3.726416066 -2.435033978 2.315487569
H 3.189128467 -1.070628980 3.313538183
C 2.718644614 -1.440326451 -0.080379664
O 2.558245305 -2.640081851 -0.255033817
N 2.729839539 -0.560837886 -1.168484485
H 2.554150647 -0.977998743 -2.072617562
C 2.814781928 0.814169728 -1.152798148
O 2.732113465 1.513854058 -2.149163262
H 3.033823338 2.322516737 0.179118562
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15-1.0')] = qcdb.Molecule("""
0 1
N 0.279301000 2.406839000 -0.605752000
C -1.084857000 2.445746000 -0.551161000
H -1.659440000 3.023029000 -1.256090000
N -1.597712000 1.717988000 0.428754000
C -0.489725000 1.171436000 1.030191000
C -0.346137000 0.291471000 2.117234000
N -1.418709000 -0.167777000 2.810144000
H -1.238875000 -0.959480000 3.404758000
H -2.291873000 -0.178822000 2.307362000
N 0.885763000 -0.070076000 2.491949000
C 1.935235000 0.407288000 1.796802000
H 2.906033000 0.078841000 2.145818000
N 1.940978000 1.224202000 0.740220000
C 0.695219000 1.577986000 0.406398000
H 0.861007000 2.829804000 -1.310450000
--
0 1
N 1.275461000 -0.647899000 -1.977910000
C 1.413053000 -1.553685000 -0.955067000
H 2.425877000 -1.867078000 -0.746878000
C 0.357598000 -2.023950000 -0.253057000
C 0.482129000 -3.017949000 0.852122000
H 0.175770000 -2.575607000 1.798628000
H -0.160169000 -3.877041000 0.663950000
H 1.511244000 -3.357277000 0.951366000
C -0.968471000 -1.529811000 -0.593979000
O -2.002928000 -1.839696000 -0.019945000
N -0.995692000 -0.638387000 -1.672042000
H -1.901406000 -0.250172000 -1.898576000
C 0.068470000 -0.119176000 -2.376376000
O -0.039788000 0.722701000 -3.253108000
H 2.085329000 -0.276018000 -2.445458000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15-1.2')] = qcdb.Molecule("""
0 1
N 0.067390759 1.213806097 -1.171192513
C -0.034440687 0.160916029 -2.035179690
H -0.037909102 0.307694674 -3.102311444
N -0.122286497 -1.014214485 -1.431659388
C -0.061278153 -0.690156063 -0.097738525
C -0.083866474 -1.480006435 1.065121981
N -0.207551291 -2.830167865 1.008466281
H 0.020236002 -3.318294510 1.858492777
H 0.100823981 -3.261839820 0.151791829
N -0.015107287 -0.872886238 2.254820437
C 0.095534438 0.468473589 2.286592142
H 0.148443656 0.902433537 3.277055537
N 0.150791629 1.330817541 1.268232413
C 0.061278153 0.690156063 0.097738525
H 0.213123816 2.178532043 -1.420082564
--
0 1
N 3.951238365 1.318912569 0.115169333
C 3.989555118 0.544134785 1.248235461
H 4.122717770 1.084216460 2.174491246
C 3.868904493 -0.802036026 1.213306349
C 3.921355119 -1.664227788 2.429380731
H 2.965571896 -2.161867438 2.585037720
H 4.682197187 -2.435033978 2.315487569
H 4.144909588 -1.070628980 3.313538183
C 3.674425735 -1.440326451 -0.080379664
O 3.514026426 -2.640081851 -0.255033817
N 3.685620660 -0.560837886 -1.168484485
H 3.509931768 -0.977998743 -2.072617562
C 3.770563049 0.814169728 -1.152798148
O 3.687894586 1.513854058 -2.149163262
H 3.989604459 2.322516737 0.179118562
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15-1.5')] = qcdb.Molecule("""
0 1
N 0.067390759 1.213806097 -1.171192513
C -0.034440687 0.160916029 -2.035179690
H -0.037909102 0.307694674 -3.102311444
N -0.122286497 -1.014214485 -1.431659388
C -0.061278153 -0.690156063 -0.097738525
C -0.083866474 -1.480006435 1.065121981
N -0.207551291 -2.830167865 1.008466281
H 0.020236002 -3.318294510 1.858492777
H 0.100823981 -3.261839820 0.151791829
N -0.015107287 -0.872886238 2.254820437
C 0.095534438 0.468473589 2.286592142
H 0.148443656 0.902433537 3.277055537
N 0.150791629 1.330817541 1.268232413
C 0.061278153 0.690156063 0.097738525
H 0.213123816 2.178532043 -1.420082564
--
0 1
N 4.907019487 1.318912569 0.115169333
C 4.945336240 0.544134785 1.248235461
H 5.078498892 1.084216460 2.174491246
C 4.824685615 -0.802036026 1.213306349
C 4.877136241 -1.664227788 2.429380731
H 3.921353018 -2.161867438 2.585037720
H 5.637978309 -2.435033978 2.315487569
H 5.100690710 -1.070628980 3.313538183
C 4.630206857 -1.440326451 -0.080379664
O 4.469807548 -2.640081851 -0.255033817
N 4.641401782 -0.560837886 -1.168484485
H 4.465712890 -0.977998743 -2.072617562
C 4.726344171 0.814169728 -1.152798148
O 4.643675708 1.513854058 -2.149163262
H 4.945385581 2.322516737 0.179118562
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15-2.0')] = qcdb.Molecule("""
0 1
N 0.067390759 1.213806097 -1.171192513
C -0.034440687 0.160916029 -2.035179690
H -0.037909102 0.307694674 -3.102311444
N -0.122286497 -1.014214485 -1.431659388
C -0.061278153 -0.690156063 -0.097738525
C -0.083866474 -1.480006435 1.065121981
N -0.207551291 -2.830167865 1.008466281
H 0.020236002 -3.318294510 1.858492777
H 0.100823981 -3.261839820 0.151791829
N -0.015107287 -0.872886238 2.254820437
C 0.095534438 0.468473589 2.286592142
H 0.148443656 0.902433537 3.277055537
N 0.150791629 1.330817541 1.268232413
C 0.061278153 0.690156063 0.097738525
H 0.213123816 2.178532043 -1.420082564
--
0 1
N 6.499988023 1.318912569 0.115169333
C 6.538304776 0.544134785 1.248235461
H 6.671467428 1.084216460 2.174491246
C 6.417654151 -0.802036026 1.213306349
C 6.470104777 -1.664227788 2.429380731
H 5.514321554 -2.161867438 2.585037720
H 7.230946845 -2.435033978 2.315487569
H 6.693659246 -1.070628980 3.313538183
C 6.223175393 -1.440326451 -0.080379664
O 6.062776084 -2.640081851 -0.255033817
N 6.234370318 -0.560837886 -1.168484485
H 6.058681426 -0.977998743 -2.072617562
C 6.319312707 0.814169728 -1.152798148
O 6.236644244 1.513854058 -2.149163262
H 6.538354117 2.322516737 0.179118562
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16-0.9')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.667578000 0.000000000
C 0.000000000 0.667578000 0.000000000
H -0.001526000 -1.232253000 -0.923621000
H -0.001526000 -1.232253000 0.923621000
H -0.001526000 1.232253000 0.923621000
H -0.001526000 1.232253000 -0.923621000
--
0 1
C 4.749960900 0.000000000 0.000000000
C 3.542697900 0.000000000 0.000000000
H 2.476809900 0.000000000 0.000000000
H 5.813386900 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16-1.0')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.667578000 -2.124659000
C 0.000000000 0.667578000 -2.124659000
H 0.923621000 -1.232253000 -2.126185000
H -0.923621000 -1.232253000 -2.126185000
H -0.923621000 1.232253000 -2.126185000
H 0.923621000 1.232253000 -2.126185000
--
0 1
C 0.000000000 0.000000000 2.900503000
C 0.000000000 0.000000000 1.693240000
H 0.000000000 0.000000000 0.627352000
H 0.000000000 0.000000000 3.963929000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16-1.2')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.667578000 0.000000000
C 0.000000000 0.667578000 0.000000000
H -0.001526000 -1.232253000 -0.923621000
H -0.001526000 -1.232253000 0.923621000
H -0.001526000 1.232253000 0.923621000
H -0.001526000 1.232253000 -0.923621000
--
0 1
C 5.575564200 0.000000000 0.000000000
C 4.368301200 0.000000000 0.000000000
H 3.302413200 0.000000000 0.000000000
H 6.638990200 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16-1.5')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.667578000 0.000000000
C 0.000000000 0.667578000 0.000000000
H -0.001526000 -1.232253000 -0.923621000
H -0.001526000 -1.232253000 0.923621000
H -0.001526000 1.232253000 0.923621000
H -0.001526000 1.232253000 -0.923621000
--
0 1
C 6.401167500 0.000000000 0.000000000
C 5.193904500 0.000000000 0.000000000
H 4.128016500 0.000000000 0.000000000
H 7.464593500 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16-2.0')] = qcdb.Molecule("""
0 1
C 0.000000000 -0.667578000 0.000000000
C 0.000000000 0.667578000 0.000000000
H -0.001526000 -1.232253000 -0.923621000
H -0.001526000 -1.232253000 0.923621000
H -0.001526000 1.232253000 0.923621000
H -0.001526000 1.232253000 -0.923621000
--
0 1
C 7.777173000 0.000000000 0.000000000
C 6.569910000 0.000000000 0.000000000
H 5.504022000 0.000000000 0.000000000
H 8.840599000 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17-0.9')] = qcdb.Molecule("""
0 1
C 0.068736158 1.392383840 -1.207543000
C 0.000000000 0.000000000 -1.207904000
C -0.034807303 -0.696435878 0.000000000
C 0.000000000 0.000000000 1.207904000
C 0.068736158 1.392383840 1.207543000
C 0.102581137 2.088313342 0.000000000
H 0.096477114 1.931999350 -2.144148000
H -0.022815407 -0.540397951 -2.144055000
H -0.086694943 -1.776497744 0.000000000
H -0.022815407 -0.540397951 2.144055000
H 0.096477114 1.931999350 2.144148000
H 0.153430751 3.168579194 0.000000000
--
0 1
O 3.175061618 0.124369730 0.000000000
H 3.265337861 1.079117991 0.000000000
H 2.221117117 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17-1.0')] = qcdb.Molecule("""
0 1
C 0.780612000 -0.609888000 -1.207543000
C 0.478404000 0.751041000 -1.207904000
C 0.327659000 1.431857000 0.000000000
C 0.478404000 0.751041000 1.207904000
C 0.780612000 -0.609888000 1.207543000
C 0.932151000 -1.289961000 0.000000000
H 0.896669000 -1.137605000 -2.144148000
H 0.357390000 1.278209000 -2.144055000
H 0.091859000 2.487141000 0.000000000
H 0.357390000 1.278209000 2.144055000
H 0.896669000 -1.137605000 2.144148000
H 1.169006000 -2.345167000 0.000000000
--
0 1
O -2.788527000 -0.274485000 0.000000000
H -2.622911000 -1.219083000 0.000000000
H -1.901510000 0.097911000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17-1.2')] = qcdb.Molecule("""
0 1
C 0.068736158 1.392383840 -1.207543000
C 0.000000000 0.000000000 -1.207904000
C -0.034807303 -0.696435878 0.000000000
C 0.000000000 0.000000000 1.207904000
C 0.068736158 1.392383840 1.207543000
C 0.102581137 2.088313342 0.000000000
H 0.096477114 1.931999350 -2.144148000
H -0.022815407 -0.540397951 -2.144055000
H -0.086694943 -1.776497744 0.000000000
H -0.022815407 -0.540397951 2.144055000
H 0.096477114 1.931999350 2.144148000
H 0.153430751 3.168579194 0.000000000
--
0 1
O 3.915433991 0.124369730 0.000000000
H 4.005710234 1.079117991 0.000000000
H 2.961489490 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17-1.5')] = qcdb.Molecule("""
0 1
C 0.068736158 1.392383840 -1.207543000
C 0.000000000 0.000000000 -1.207904000
C -0.034807303 -0.696435878 0.000000000
C 0.000000000 0.000000000 1.207904000
C 0.068736158 1.392383840 1.207543000
C 0.102581137 2.088313342 0.000000000
H 0.096477114 1.931999350 -2.144148000
H -0.022815407 -0.540397951 -2.144055000
H -0.086694943 -1.776497744 0.000000000
H -0.022815407 -0.540397951 2.144055000
H 0.096477114 1.931999350 2.144148000
H 0.153430751 3.168579194 0.000000000
--
0 1
O 4.655806363 0.124369730 0.000000000
H 4.746082606 1.079117991 0.000000000
H 3.701861862 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17-2.0')] = qcdb.Molecule("""
0 1
C 0.068736158 1.392383840 -1.207543000
C 0.000000000 0.000000000 -1.207904000
C -0.034807303 -0.696435878 0.000000000
C 0.000000000 0.000000000 1.207904000
C 0.068736158 1.392383840 1.207543000
C 0.102581137 2.088313342 0.000000000
H 0.096477114 1.931999350 -2.144148000
H -0.022815407 -0.540397951 -2.144055000
H -0.086694943 -1.776497744 0.000000000
H -0.022815407 -0.540397951 2.144055000
H 0.096477114 1.931999350 2.144148000
H 0.153430751 3.168579194 0.000000000
--
0 1
O 5.889760317 0.124369730 0.000000000
H 5.980036560 1.079117991 0.000000000
H 4.935815816 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18-0.9')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 -1.207108000
C -0.094723910 -0.690687169 0.000000000
C 0.000000000 0.000000000 1.207108000
C 0.189293052 1.381194838 1.207073000
C 0.284209467 2.071771374 0.000000000
C 0.189293052 1.381194838 -1.207073000
H -0.070884435 -0.536454706 -2.143289000
H -0.235335157 -1.762640796 0.000000000
H -0.070884435 -0.536454706 2.143289000
H 0.262434233 1.916830087 2.143695000
H 0.430373810 3.143257869 0.000000000
H 0.262434233 1.916830087 -2.143695000
--
0 1
N 3.322432676 -0.175158455 0.000000000
H 3.685723470 0.316960994 -0.806073000
H 3.685723470 0.316960994 0.806073000
H 2.324338249 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18-1.0')] = qcdb.Molecule("""
0 1
C -0.739281000 0.515879000 -1.207108000
C -1.426144000 0.396545000 0.000000000
C -0.739281000 0.515879000 1.207108000
C 0.634227000 0.754640000 1.207073000
C 1.321043000 0.873757000 0.000000000
C 0.634227000 0.754640000 -1.207073000
H -1.271950000 0.420632000 -2.143289000
H -2.490220000 0.205238000 0.000000000
H -1.271950000 0.420632000 2.143289000
H 1.166800000 0.847488000 2.143695000
H 2.386359000 1.059631000 0.000000000
H 1.166800000 0.847488000 -2.143695000
--
0 1
N 0.180393000 -2.949123000 0.000000000
H 0.759549000 -3.145948000 -0.806073000
H 0.759549000 -3.145948000 0.806073000
H 0.044417000 -1.944940000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18-1.2')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 -1.207108000
C -0.094723910 -0.690687169 0.000000000
C 0.000000000 0.000000000 1.207108000
C 0.189293052 1.381194838 1.207073000
C 0.284209467 2.071771374 0.000000000
C 0.189293052 1.381194838 -1.207073000
H -0.070884435 -0.536454706 -2.143289000
H -0.235335157 -1.762640796 0.000000000
H -0.070884435 -0.536454706 2.143289000
H 0.262434233 1.916830087 2.143695000
H 0.430373810 3.143257869 0.000000000
H 0.262434233 1.916830087 -2.143695000
--
0 1
N 4.097212092 -0.175158455 0.000000000
H 4.460502886 0.316960994 -0.806073000
H 4.460502886 0.316960994 0.806073000
H 3.099117665 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18-1.5')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 -1.207108000
C -0.094723910 -0.690687169 0.000000000
C 0.000000000 0.000000000 1.207108000
C 0.189293052 1.381194838 1.207073000
C 0.284209467 2.071771374 0.000000000
C 0.189293052 1.381194838 -1.207073000
H -0.070884435 -0.536454706 -2.143289000
H -0.235335157 -1.762640796 0.000000000
H -0.070884435 -0.536454706 2.143289000
H 0.262434233 1.916830087 2.143695000
H 0.430373810 3.143257869 0.000000000
H 0.262434233 1.916830087 -2.143695000
--
0 1
N 4.871991508 -0.175158455 0.000000000
H 5.235282302 0.316960994 -0.806073000
H 5.235282302 0.316960994 0.806073000
H 3.873897081 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18-2.0')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 -1.207108000
C -0.094723910 -0.690687169 0.000000000
C 0.000000000 0.000000000 1.207108000
C 0.189293052 1.381194838 1.207073000
C 0.284209467 2.071771374 0.000000000
C 0.189293052 1.381194838 -1.207073000
H -0.070884435 -0.536454706 -2.143289000
H -0.235335157 -1.762640796 0.000000000
H -0.070884435 -0.536454706 2.143289000
H 0.262434233 1.916830087 2.143695000
H 0.430373810 3.143257869 0.000000000
H 0.262434233 1.916830087 -2.143695000
--
0 1
N 6.163290535 -0.175158455 0.000000000
H 6.526581329 0.316960994 -0.806073000
H 6.526581329 0.316960994 0.806073000
H 5.165196108 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19-0.9')] = qcdb.Molecule("""
0 1
C -0.023100946 0.696978594 1.207702000
C -0.046160335 1.393808033 0.000000000
C -0.023100946 0.696978594 -1.207702000
C 0.023085816 -0.696895106 -1.207865000
C 0.046190594 -1.393975010 0.000000000
C 0.023085816 -0.696895106 1.207865000
H -0.038624622 1.237369182 2.144051000
H -0.079148681 2.474493071 0.000000000
H -0.038624622 1.237369182 -2.144051000
H 0.042839694 -1.237142510 -2.144256000
H 0.083401415 -2.474593580 0.000000000
H 0.042839694 -1.237142510 2.144256000
--
0 1
N 4.308034683 0.304536859 0.000000000
C 3.151543935 0.145763954 0.000000000
H 2.093660645 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19-1.0')] = qcdb.Molecule("""
0 1
C -0.709774000 -0.990423000 1.207702000
C -1.406534000 -0.965353000 0.000000000
C -0.709774000 -0.990423000 -1.207702000
C 0.683965000 -1.040510000 -1.207865000
C 1.380978000 -1.065552000 0.000000000
C 0.683965000 -1.040510000 1.207865000
H -1.249948000 -0.968628000 2.144051000
H -2.486920000 -0.923706000 0.000000000
H -1.249948000 -0.968628000 -2.144051000
H 1.224288000 -1.058075000 -2.144256000
H 2.461589000 -1.102982000 0.000000000
H 1.224288000 -1.058075000 2.144256000
--
0 1
N -0.003412000 3.535393000 0.000000000
C 0.075196000 2.370704000 0.000000000
H 0.147629000 1.305285000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19-1.2')] = qcdb.Molecule("""
0 1
C -0.023100946 0.696978594 1.207702000
C -0.046160335 1.393808033 0.000000000
C -0.023100946 0.696978594 -1.207702000
C 0.023085816 -0.696895106 -1.207865000
C 0.046190594 -1.393975010 0.000000000
C 0.023085816 -0.696895106 1.207865000
H -0.038624622 1.237369182 2.144051000
H -0.079148681 2.474493071 0.000000000
H -0.038624622 1.237369182 -2.144051000
H 0.042839694 -1.237142510 -2.144256000
H 0.083401415 -2.474593580 0.000000000
H 0.042839694 -1.237142510 2.144256000
--
0 1
N 5.005921565 0.304536859 0.000000000
C 3.849430817 0.145763954 0.000000000
H 2.791547527 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19-1.5')] = qcdb.Molecule("""
0 1
C -0.023100946 0.696978594 1.207702000
C -0.046160335 1.393808033 0.000000000
C -0.023100946 0.696978594 -1.207702000
C 0.023085816 -0.696895106 -1.207865000
C 0.046190594 -1.393975010 0.000000000
C 0.023085816 -0.696895106 1.207865000
H -0.038624622 1.237369182 2.144051000
H -0.079148681 2.474493071 0.000000000
H -0.038624622 1.237369182 -2.144051000
H 0.042839694 -1.237142510 -2.144256000
H 0.083401415 -2.474593580 0.000000000
H 0.042839694 -1.237142510 2.144256000
--
0 1
N 5.703808447 0.304536859 0.000000000
C 4.547317699 0.145763954 0.000000000
H 3.489434409 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19-2.0')] = qcdb.Molecule("""
0 1
C -0.023100946 0.696978594 1.207702000
C -0.046160335 1.393808033 0.000000000
C -0.023100946 0.696978594 -1.207702000
C 0.023085816 -0.696895106 -1.207865000
C 0.046190594 -1.393975010 0.000000000
C 0.023085816 -0.696895106 1.207865000
H -0.038624622 1.237369182 2.144051000
H -0.079148681 2.474493071 0.000000000
H -0.038624622 1.237369182 -2.144051000
H 0.042839694 -1.237142510 -2.144256000
H 0.083401415 -2.474593580 0.000000000
H 0.042839694 -1.237142510 2.144256000
--
0 1
N 6.866953250 0.304536859 0.000000000
C 5.710462502 0.145763954 0.000000000
H 4.652579212 0.000000000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20-0.9')] = qcdb.Molecule("""
0 1
C -1.080615000 0.000000000 0.000000000
C -1.779254000 -1.206008000 0.000000000
C -3.173171000 -1.207177000 0.000000000
C -3.870155000 0.000000000 0.000000000
C -3.173171000 1.207177000 0.000000000
C -1.779254000 1.206008000 0.000000000
H 0.000000000 0.000000000 0.000000000
H -1.236002000 -2.141639000 0.000000000
H -3.714575000 -2.143566000 0.000000000
H -4.951730000 0.000000000 0.000000000
H -3.714575000 2.143566000 0.000000000
H -1.236002000 2.141639000 0.000000000
--
0 1
C 2.189283067 0.000000000 -1.394063000
C 2.189759067 1.207238000 -0.697047000
C 2.189759067 1.207238000 0.697047000
C 2.189283067 0.000000000 1.394063000
C 2.189759067 -1.207238000 0.697047000
C 2.189759067 -1.207238000 -0.697047000
H 2.185453067 0.000000000 -2.475399000
H 2.188807067 2.143565000 -1.238232000
H 2.188807067 2.143565000 1.238232000
H 2.185453067 0.000000000 2.475399000
H 2.188807067 -2.143565000 1.238232000
H 2.188807067 -2.143565000 -1.238232000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20-1.0')] = qcdb.Molecule("""
0 1
C 0.000000000 0.000000000 1.059035000
C 0.000000000 -1.206008000 1.757674000
C 0.000000000 -1.207177000 3.151591000
C 0.000000000 0.000000000 3.848575000
C 0.000000000 1.207177000 3.151591000
C 0.000000000 1.206008000 1.757674000
H 0.000000000 0.000000000 -0.021580000
H 0.000000000 -2.141639000 1.214422000
H 0.000000000 -2.143566000 3.692995000
H 0.000000000 0.000000000 4.930150000
H 0.000000000 2.143566000 3.692995000
H 0.000000000 2.141639000 1.214422000
--
0 1
C -1.394063000 0.000000000 -2.454152000
C -0.697047000 1.207238000 -2.454628000
C 0.697047000 1.207238000 -2.454628000
C 1.394063000 0.000000000 -2.454152000
C 0.697047000 -1.207238000 -2.454628000
C -0.697047000 -1.207238000 -2.454628000
H -2.475399000 0.000000000 -2.450322000
H -1.238232000 2.143565000 -2.453676000
H 1.238232000 2.143565000 -2.453676000
H 2.475399000 0.000000000 -2.450322000
H 1.238232000 -2.143565000 -2.453676000
H -1.238232000 -2.143565000 -2.453676000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20-1.2')] = qcdb.Molecule("""
0 1
C -1.080615000 0.000000000 0.000000000
C -1.779254000 -1.206008000 0.000000000
C -3.173171000 -1.207177000 0.000000000
C -3.870155000 0.000000000 0.000000000
C -3.173171000 1.207177000 0.000000000
C -1.779254000 1.206008000 0.000000000
H 0.000000000 0.000000000 0.000000000
H -1.236002000 -2.141639000 0.000000000
H -3.714575000 -2.143566000 0.000000000
H -4.951730000 0.000000000 0.000000000
H -3.714575000 2.143566000 0.000000000
H -1.236002000 2.141639000 0.000000000
--
0 1
C 2.919149867 0.000000000 -1.394063000
C 2.919625867 1.207238000 -0.697047000
C 2.919625867 1.207238000 0.697047000
C 2.919149867 0.000000000 1.394063000
C 2.919625867 -1.207238000 0.697047000
C 2.919625867 -1.207238000 -0.697047000
H 2.915319867 0.000000000 -2.475399000
H 2.918673867 2.143565000 -1.238232000
H 2.918673867 2.143565000 1.238232000
H 2.915319867 0.000000000 2.475399000
H 2.918673867 -2.143565000 1.238232000
H 2.918673867 -2.143565000 -1.238232000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20-1.5')] = qcdb.Molecule("""
0 1
C -1.080615000 0.000000000 0.000000000
C -1.779254000 -1.206008000 0.000000000
C -3.173171000 -1.207177000 0.000000000
C -3.870155000 0.000000000 0.000000000
C -3.173171000 1.207177000 0.000000000
C -1.779254000 1.206008000 0.000000000
H 0.000000000 0.000000000 0.000000000
H -1.236002000 -2.141639000 0.000000000
H -3.714575000 -2.143566000 0.000000000
H -4.951730000 0.000000000 0.000000000
H -3.714575000 2.143566000 0.000000000
H -1.236002000 2.141639000 0.000000000
--
0 1
C 3.649016667 0.000000000 -1.394063000
C 3.649492667 1.207238000 -0.697047000
C 3.649492667 1.207238000 0.697047000
C 3.649016667 0.000000000 1.394063000
C 3.649492667 -1.207238000 0.697047000
C 3.649492667 -1.207238000 -0.697047000
H 3.645186667 0.000000000 -2.475399000
H 3.648540667 2.143565000 -1.238232000
H 3.648540667 2.143565000 1.238232000
H 3.645186667 0.000000000 2.475399000
H 3.648540667 -2.143565000 1.238232000
H 3.648540667 -2.143565000 -1.238232000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20-2.0')] = qcdb.Molecule("""
0 1
C -1.080615000 0.000000000 0.000000000
C -1.779254000 -1.206008000 0.000000000
C -3.173171000 -1.207177000 0.000000000
C -3.870155000 0.000000000 0.000000000
C -3.173171000 1.207177000 0.000000000
C -1.779254000 1.206008000 0.000000000
H 0.000000000 0.000000000 0.000000000
H -1.236002000 -2.141639000 0.000000000
H -3.714575000 -2.143566000 0.000000000
H -4.951730000 0.000000000 0.000000000
H -3.714575000 2.143566000 0.000000000
H -1.236002000 2.141639000 0.000000000
--
0 1
C 4.865461333 0.000000000 -1.394063000
C 4.865937333 1.207238000 -0.697047000
C 4.865937333 1.207238000 0.697047000
C 4.865461333 0.000000000 1.394063000
C 4.865937333 -1.207238000 0.697047000
C 4.865937333 -1.207238000 -0.697047000
H 4.861631333 0.000000000 -2.475399000
H 4.864985333 2.143565000 -1.238232000
H 4.864985333 2.143565000 1.238232000
H 4.861631333 0.000000000 2.475399000
H 4.864985333 -2.143565000 1.238232000
H 4.864985333 -2.143565000 -1.238232000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21-0.9')] = qcdb.Molecule("""
0 1
C -0.052652077 -1.393225783 0.000000000
C -0.025543347 -0.696940104 -1.208292000
C 0.026348254 0.696724226 -1.208365000
C 0.051042263 1.393657541 0.000000000
C 0.026348254 0.696724226 1.208365000
C -0.025543347 -0.696940104 1.208292000
H -0.097430661 -2.473655966 0.000000000
H -0.040509756 -1.237360068 -2.144590000
H 0.050955575 1.236531293 -2.144838000
H 0.089657645 2.474412421 0.000000000
H 0.050955575 1.236531293 2.144838000
H -0.040509756 -1.237360068 2.144590000
--
0 1
H 2.007797424 0.000000000 0.000000000
N 3.015114828 0.005056388 0.000000000
C 3.796769012 1.132604937 0.000000000
C 5.125653739 0.772354616 0.000000000
C 5.167047225 -0.653193161 0.000000000
C 3.817202589 -1.104920876 0.000000000
C 3.482542920 -2.462094972 0.000000000
C 4.524735226 -3.376178892 0.000000000
C 5.869058665 -2.951641292 0.000000000
C 6.199398544 -1.606705567 0.000000000
H 3.343074787 2.109594763 0.000000000
H 5.961043541 1.451489921 0.000000000
H 2.450153978 -2.785730808 0.000000000
H 4.303017780 -4.434822780 0.000000000
H 6.655123584 -3.694570139 0.000000000
H 7.235724321 -1.294593877 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21-1.0')] = qcdb.Molecule("""
0 1
C 2.511900000 1.625015000 0.000000000
C 2.713009000 0.957854000 -1.208292000
C 3.117782000 -0.376744000 -1.208365000
C 3.321385000 -1.043731000 0.000000000
C 3.117782000 -0.376744000 1.208365000
C 2.713009000 0.957854000 1.208292000
H 2.202404000 2.661136000 0.000000000
H 2.551176000 1.473691000 -2.144590000
H 3.270300000 -0.895141000 -2.144838000
H 3.636814000 -2.078152000 0.000000000
H 3.270300000 -0.895141000 2.144838000
H 2.551176000 1.473691000 2.144590000
--
0 1
H 0.806524000 -0.435887000 0.000000000
N -0.144241000 -0.768693000 0.000000000
C -0.516112000 -2.089322000 0.000000000
C -1.889876000 -2.181449000 0.000000000
C -2.393232000 -0.847083000 0.000000000
C -1.264065000 0.019589000 0.000000000
C -1.389600000 1.411767000 0.000000000
C -2.672650000 1.936645000 0.000000000
C -3.805451000 1.097479000 0.000000000
C -3.679817000 -0.281721000 0.000000000
H 0.231002000 -2.865317000 0.000000000
H -2.458576000 -3.095605000 0.000000000
H -0.518873000 2.053952000 0.000000000
H -2.807757000 3.009786000 0.000000000
H -4.790599000 1.543937000 0.000000000
H -4.558019000 -0.914292000 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21-1.2')] = qcdb.Molecule("""
0 1
C -0.052652077 -1.393225783 0.000000000
C -0.025543347 -0.696940104 -1.208292000
C 0.026348254 0.696724226 -1.208365000
C 0.051042263 1.393657541 0.000000000
C 0.026348254 0.696724226 1.208365000
C -0.025543347 -0.696940104 1.208292000
H -0.097430661 -2.473655966 0.000000000
H -0.040509756 -1.237360068 -2.144590000
H 0.050955575 1.236531293 -2.144838000
H 0.089657645 2.474412421 0.000000000
H 0.050955575 1.236531293 2.144838000
H -0.040509756 -1.237360068 2.144590000
--
0 1
H 2.677063232 0.000000000 0.000000000
N 3.684380636 0.005056388 0.000000000
C 4.466034820 1.132604937 0.000000000
C 5.794919547 0.772354616 0.000000000
C 5.836313033 -0.653193161 0.000000000
C 4.486468397 -1.104920876 0.000000000
C 4.151808728 -2.462094972 0.000000000
C 5.194001034 -3.376178892 0.000000000
C 6.538324473 -2.951641292 0.000000000
C 6.868664352 -1.606705567 0.000000000
H 4.012340595 2.109594763 0.000000000
H 6.630309349 1.451489921 0.000000000
H 3.119419786 -2.785730808 0.000000000
H 4.972283588 -4.434822780 0.000000000
H 7.324389392 -3.694570139 0.000000000
H 7.904990129 -1.294593877 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21-1.5')] = qcdb.Molecule("""
0 1
C -0.052652077 -1.393225783 0.000000000
C -0.025543347 -0.696940104 -1.208292000
C 0.026348254 0.696724226 -1.208365000
C 0.051042263 1.393657541 0.000000000
C 0.026348254 0.696724226 1.208365000
C -0.025543347 -0.696940104 1.208292000
H -0.097430661 -2.473655966 0.000000000
H -0.040509756 -1.237360068 -2.144590000
H 0.050955575 1.236531293 -2.144838000
H 0.089657645 2.474412421 0.000000000
H 0.050955575 1.236531293 2.144838000
H -0.040509756 -1.237360068 2.144590000
--
0 1
H 3.346329040 0.000000000 0.000000000
N 4.353646444 0.005056388 0.000000000
C 5.135300628 1.132604937 0.000000000
C 6.464185355 0.772354616 0.000000000
C 6.505578841 -0.653193161 0.000000000
C 5.155734205 -1.104920876 0.000000000
C 4.821074536 -2.462094972 0.000000000
C 5.863266842 -3.376178892 0.000000000
C 7.207590281 -2.951641292 0.000000000
C 7.537930160 -1.606705567 0.000000000
H 4.681606403 2.109594763 0.000000000
H 7.299575157 1.451489921 0.000000000
H 3.788685594 -2.785730808 0.000000000
H 5.641549396 -4.434822780 0.000000000
H 7.993655200 -3.694570139 0.000000000
H 8.574255937 -1.294593877 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21-2.0')] = qcdb.Molecule("""
0 1
C -0.052652077 -1.393225783 0.000000000
C -0.025543347 -0.696940104 -1.208292000
C 0.026348254 0.696724226 -1.208365000
C 0.051042263 1.393657541 0.000000000
C 0.026348254 0.696724226 1.208365000
C -0.025543347 -0.696940104 1.208292000
H -0.097430661 -2.473655966 0.000000000
H -0.040509756 -1.237360068 -2.144590000
H 0.050955575 1.236531293 -2.144838000
H 0.089657645 2.474412421 0.000000000
H 0.050955575 1.236531293 2.144838000
H -0.040509756 -1.237360068 2.144590000
--
0 1
H 4.461772054 0.000000000 0.000000000
N 5.469089458 0.005056388 0.000000000
C 6.250743642 1.132604937 0.000000000
C 7.579628369 0.772354616 0.000000000
C 7.621021855 -0.653193161 0.000000000
C 6.271177219 -1.104920876 0.000000000
C 5.936517550 -2.462094972 0.000000000
C 6.978709856 -3.376178892 0.000000000
C 8.323033295 -2.951641292 0.000000000
C 8.653373174 -1.606705567 0.000000000
H 5.797049417 2.109594763 0.000000000
H 8.415018171 1.451489921 0.000000000
H 4.904128608 -2.785730808 0.000000000
H 6.756992410 -4.434822780 0.000000000
H 9.109098214 -3.694570139 0.000000000
H 9.689698951 -1.294593877 0.000000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22-0.9')] = qcdb.Molecule("""
0 1
C -1.445967355 -1.221065858 0.265808750
O -0.945229913 -0.047318091 -0.209467563
H 0.000000000 0.000000000 0.000000000
C -0.683142700 -2.127785201 1.005109011
C -1.257798399 -3.314090975 1.456540663
C -2.590627730 -3.605427919 1.179051667
C -3.348500619 -2.695116849 0.443286115
C -2.782549405 -1.509701903 -0.013287247
H 0.352786431 -1.905463972 1.224781047
H -0.656349187 -4.009576034 2.026231320
H -3.032993188 -4.526384329 1.531085059
H -4.385512900 -2.907317436 0.221017935
H -3.357888956 -0.796017014 -0.586234960
--
0 1
O 1.743489077 0.000000000 0.000000000
C 2.341981491 -1.142898789 -0.483732445
H 2.342838533 0.417604441 0.628041164
C 1.645485086 -1.867622674 -1.447211527
C 2.204739700 -3.035912794 -1.954567993
C 3.449296078 -3.479350313 -1.509647408
C 4.136609561 -2.744696418 -0.547410307
C 3.584309534 -1.574952605 -0.029436748
H 0.681454799 -1.513028491 -1.784467064
H 1.661729182 -3.600082357 -2.699896207
H 3.877956013 -4.387511286 -1.908204233
H 5.102623102 -3.077497147 -0.194005162
H 4.116289930 -1.004251641 0.722333197
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22-1.0')] = qcdb.Molecule("""
0 1
C -2.007106000 0.763846000 -0.108351000
O -1.388504000 1.929852000 -0.443121000
H -0.523812000 1.964652000 -0.006461000
C -1.463081000 -0.151912000 0.794993000
C -2.147579000 -1.329509000 1.088368000
C -3.374321000 -1.603143000 0.489586000
C -3.914373000 -0.683855000 -0.409103000
C -3.237050000 0.492961000 -0.709613000
H -0.510651000 0.056657000 1.264256000
H -1.715113000 -2.032145000 1.787842000
H -3.902466000 -2.517387000 0.719795000
H -4.867073000 -0.882294000 -0.881132000
H -3.643166000 1.213434000 -1.405759000
--
0 1
O 1.353117000 1.938272000 0.472313000
C 2.036975000 0.786504000 0.149549000
H 1.784285000 2.348749000 1.229711000
C 1.590403000 0.069686000 -0.957415000
C 2.241737000 -1.106977000 -1.312811000
C 3.331567000 -1.566560000 -0.574864000
C 3.769684000 -0.839690000 0.528644000
C 3.122484000 0.338350000 0.896049000
H 0.744551000 0.436798000 -1.521858000
H 1.892146000 -1.664973000 -2.170184000
H 3.833023000 -2.481154000 -0.856667000
H 4.613763000 -1.185010000 1.109263000
H 3.459885000 0.903038000 1.756949000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22-1.2')] = qcdb.Molecule("""
0 1
C -1.445967355 -1.221065858 0.265808750
O -0.945229913 -0.047318091 -0.209467563
H 0.000000000 0.000000000 0.000000000
C -0.683142700 -2.127785201 1.005109011
C -1.257798399 -3.314090975 1.456540663
C -2.590627730 -3.605427919 1.179051667
C -3.348500619 -2.695116849 0.443286115
C -2.782549405 -1.509701903 -0.013287247
H 0.352786431 -1.905463972 1.224781047
H -0.656349187 -4.009576034 2.026231320
H -3.032993188 -4.526384329 1.531085059
H -4.385512900 -2.907317436 0.221017935
H -3.357888956 -0.796017014 -0.586234960
--
0 1
O 2.324652103 0.000000000 0.000000000
C 2.923144517 -1.142898789 -0.483732445
H 2.924001559 0.417604441 0.628041164
C 2.226648112 -1.867622674 -1.447211527
C 2.785902726 -3.035912794 -1.954567993
C 4.030459104 -3.479350313 -1.509647408
C 4.717772587 -2.744696418 -0.547410307
C 4.165472560 -1.574952605 -0.029436748
H 1.262617825 -1.513028491 -1.784467064
H 2.242892208 -3.600082357 -2.699896207
H 4.459119039 -4.387511286 -1.908204233
H 5.683786128 -3.077497147 -0.194005162
H 4.697452956 -1.004251641 0.722333197
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22-1.5')] = qcdb.Molecule("""
0 1
C -1.445967355 -1.221065858 0.265808750
O -0.945229913 -0.047318091 -0.209467563
H 0.000000000 0.000000000 0.000000000
C -0.683142700 -2.127785201 1.005109011
C -1.257798399 -3.314090975 1.456540663
C -2.590627730 -3.605427919 1.179051667
C -3.348500619 -2.695116849 0.443286115
C -2.782549405 -1.509701903 -0.013287247
H 0.352786431 -1.905463972 1.224781047
H -0.656349187 -4.009576034 2.026231320
H -3.032993188 -4.526384329 1.531085059
H -4.385512900 -2.907317436 0.221017935
H -3.357888956 -0.796017014 -0.586234960
--
0 1
O 2.905815129 0.000000000 0.000000000
C 3.504307543 -1.142898789 -0.483732445
H 3.505164585 0.417604441 0.628041164
C 2.807811138 -1.867622674 -1.447211527
C 3.367065752 -3.035912794 -1.954567993
C 4.611622130 -3.479350313 -1.509647408
C 5.298935613 -2.744696418 -0.547410307
C 4.746635586 -1.574952605 -0.029436748
H 1.843780851 -1.513028491 -1.784467064
H 2.824055234 -3.600082357 -2.699896207
H 5.040282065 -4.387511286 -1.908204233
H 6.264949154 -3.077497147 -0.194005162
H 5.278615982 -1.004251641 0.722333197
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22-2.0')] = qcdb.Molecule("""
0 1
C -1.445967355 -1.221065858 0.265808750
O -0.945229913 -0.047318091 -0.209467563
H 0.000000000 0.000000000 0.000000000
C -0.683142700 -2.127785201 1.005109011
C -1.257798399 -3.314090975 1.456540663
C -2.590627730 -3.605427919 1.179051667
C -3.348500619 -2.695116849 0.443286115
C -2.782549405 -1.509701903 -0.013287247
H 0.352786431 -1.905463972 1.224781047
H -0.656349187 -4.009576034 2.026231320
H -3.032993188 -4.526384329 1.531085059
H -4.385512900 -2.907317436 0.221017935
H -3.357888956 -0.796017014 -0.586234960
--
0 1
O 3.874420172 0.000000000 0.000000000
C 4.472912586 -1.142898789 -0.483732445
H 4.473769628 0.417604441 0.628041164
C 3.776416181 -1.867622674 -1.447211527
C 4.335670795 -3.035912794 -1.954567993
C 5.580227173 -3.479350313 -1.509647408
C 6.267540656 -2.744696418 -0.547410307
C 5.715240629 -1.574952605 -0.029436748
H 2.812385894 -1.513028491 -1.784467064
H 3.792660277 -3.600082357 -2.699896207
H 6.008887108 -4.387511286 -1.908204233
H 7.233554197 -3.077497147 -0.194005162
H 6.247221025 -1.004251641 0.722333197
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-0.9-dimer' ] = 41.68443604
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-0.9-monoA-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-0.9-monoB-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.0-dimer' ] = 40.31423984
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.0-monoA-unCP' ] = 11.94743172
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.0-monoB-unCP' ] = 11.94743172
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.2-dimer' ] = 38.12133822
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.2-monoA-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.2-monoB-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.5-dimer' ] = 35.74458853
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.5-monoA-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.5-monoB-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-2.0-dimer' ] = 33.16044361
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-2.0-monoA-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-2.0-monoB-unCP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-0.9-dimer' ] = 38.01573644
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-0.9-monoA-unCP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-0.9-monoB-unCP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.0-dimer' ] = 36.66284785
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.0-monoA-unCP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.0-monoB-unCP' ] = 9.17803890
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.2-dimer' ] = 34.45708029
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.2-monoA-unCP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.2-monoB-unCP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.5-dimer' ] = 32.00182680
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.5-monoA-unCP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.5-monoB-unCP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-2.0-dimer' ] = 29.24367241
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-2.0-monoA-unCP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-2.0-monoB-unCP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-0.9-dimer' ] = 241.06935387
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-0.9-monoA-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-0.9-monoB-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.0-dimer' ] = 235.94662032
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.0-monoA-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.0-monoB-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.2-dimer' ] = 227.13906126
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.2-monoA-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.2-monoB-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.5-dimer' ] = 216.59223679
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.5-monoA-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.5-monoB-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-2.0-dimer' ] = 203.69065134
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-2.0-monoA-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-2.0-monoB-unCP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-0.9-dimer' ] = 235.63918473
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-0.9-monoA-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-0.9-monoB-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.0-dimer' ] = 230.79485521
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.0-monoA-unCP' ] = 71.07286375
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.0-monoB-unCP' ] = 71.07286375
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.2-dimer' ] = 222.48256856
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.2-monoA-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.2-monoB-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.5-dimer' ] = 212.56291415
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.5-monoA-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.5-monoB-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-2.0-dimer' ] = 200.48924225
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-2.0-monoA-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-2.0-monoB-unCP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-0.9-dimer' ] = 1043.41428619
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-0.9-monoA-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-0.9-monoB-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.0-dimer' ] = 1032.28187517
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.0-monoA-unCP' ] = 357.22675307
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.0-monoB-unCP' ] = 357.22675307
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.2-dimer' ] = 1012.32214892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.2-monoA-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.2-monoB-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.5-dimer' ] = 986.94222381
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.5-monoA-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.5-monoB-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-2.0-dimer' ] = 953.38226556
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-2.0-monoA-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-2.0-monoB-unCP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-0.9-dimer' ] = 822.43713935
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-0.9-monoA-unCP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-0.9-monoB-unCP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.0-dimer' ] = 812.28851500
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.0-monoA-unCP' ] = 275.70186300
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.0-monoB-unCP' ] = 275.67198277
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.2-dimer' ] = 794.18088651
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.2-monoA-unCP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.2-monoB-unCP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.5-dimer' ] = 771.37080294
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.5-monoA-unCP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.5-monoB-unCP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-2.0-dimer' ] = 741.67097558
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-2.0-monoA-unCP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-2.0-monoB-unCP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-0.9-dimer' ] = 1379.46455665
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-0.9-monoA-unCP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-0.9-monoB-unCP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.0-dimer' ] = 1365.23225970
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.0-monoA-unCP' ] = 503.39628585
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.0-monoB-unCP' ] = 440.30157446
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.2-dimer' ] = 1339.53568799
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.2-monoA-unCP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.2-monoB-unCP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.5-dimer' ] = 1306.57985526
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.5-monoA-unCP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.5-monoB-unCP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-2.0-dimer' ] = 1262.60816943
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-2.0-monoA-unCP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-2.0-monoB-unCP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-0.9-dimer' ] = 42.51958713
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-0.9-monoA-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-0.9-monoB-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.0-dimer' ] = 41.00026380
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.0-monoA-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.0-monoB-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.2-dimer' ] = 38.69237776
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.2-monoA-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.2-monoB-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.5-dimer' ] = 36.35739726
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.5-monoA-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.5-monoB-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-2.0-dimer' ] = 34.00360791
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-2.0-monoA-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-2.0-monoB-unCP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-0.9-dimer' ] = 105.78748629
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-0.9-monoA-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-0.9-monoB-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.0-dimer' ] = 102.16530928
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.0-monoA-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.0-monoB-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.2-dimer' ] = 96.54599785
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.2-monoA-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.2-monoB-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.5-dimer' ] = 90.75195168
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.5-monoA-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.5-monoB-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-2.0-dimer' ] = 84.83436234
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-2.0-monoA-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-2.0-monoB-unCP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-0.9-dimer' ] = 276.01204527
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-0.9-monoA-unCP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-0.9-monoB-unCP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.0-dimer' ] = 272.46180693
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.0-monoA-unCP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.0-monoB-unCP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.2-dimer' ] = 266.43391366
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.2-monoA-unCP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.2-monoB-unCP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.5-dimer' ] = 259.41406121
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.5-monoA-unCP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.5-monoB-unCP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-2.0-dimer' ] = 251.20587713
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-2.0-monoA-unCP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-2.0-monoB-unCP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-0.9-dimer' ] = 648.07922043
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-0.9-monoA-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-0.9-monoB-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.0-dimer' ] = 628.97202476
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.0-monoA-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.0-monoB-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.2-dimer' ] = 597.97029184
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.2-monoA-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.2-monoB-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.5-dimer' ] = 564.14537638
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.5-monoA-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.5-monoB-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-2.0-dimer' ] = 527.66680634
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-2.0-monoA-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-2.0-monoB-unCP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-0.9-dimer' ] = 674.23986713
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-0.9-monoA-unCP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-0.9-monoB-unCP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.0-dimer' ] = 654.13200064
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.0-monoA-unCP' ] = 208.63967421
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.0-monoB-unCP' ] = 208.62628027
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.2-dimer' ] = 621.43592234
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.2-monoA-unCP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.2-monoB-unCP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.5-dimer' ] = 585.61705547
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.5-monoA-unCP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.5-monoB-unCP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-2.0-dimer' ] = 546.77330917
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-2.0-monoA-unCP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-2.0-monoB-unCP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-0.9-dimer' ] = 1195.17642590
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-0.9-monoA-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-0.9-monoB-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.0-dimer' ] = 1161.47071638
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.0-monoA-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.0-monoB-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.2-dimer' ] = 1105.10369422
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.2-monoA-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.2-monoB-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.5-dimer' ] = 1041.01622426
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.5-monoA-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.5-monoB-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-2.0-dimer' ] = 968.73081054
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-2.0-monoA-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-2.0-monoB-unCP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-0.9-dimer' ] = 958.32945282
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-0.9-monoA-unCP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-0.9-monoB-unCP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.0-dimer' ] = 935.53014764
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.0-monoA-unCP' ] = 203.66956608
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.0-monoB-unCP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.2-dimer' ] = 896.86323089
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.2-monoA-unCP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.2-monoB-unCP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.5-dimer' ] = 852.00454788
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.5-monoA-unCP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.5-monoB-unCP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-2.0-dimer' ] = 800.10296417
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-2.0-monoA-unCP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-2.0-monoB-unCP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-0.9-dimer' ] = 1583.70519732
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-0.9-monoA-unCP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-0.9-monoB-unCP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.0-dimer' ] = 1542.14304855
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.0-monoA-unCP' ] = 503.36563836
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.0-monoB-unCP' ] = 440.14698895
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.2-dimer' ] = 1471.85303122
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.2-monoA-unCP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.2-monoB-unCP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.5-dimer' ] = 1390.54653938
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.5-monoA-unCP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.5-monoB-unCP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-2.0-dimer' ] = 1296.67402837
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-2.0-monoA-unCP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-2.0-monoB-unCP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-0.9-dimer' ] = 87.03519106
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-0.9-monoA-unCP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-0.9-monoB-unCP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.0-dimer' ] = 85.18906420
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.0-monoA-unCP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.0-monoB-unCP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.2-dimer' ] = 82.12691022
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.2-monoA-unCP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.2-monoB-unCP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.5-dimer' ] = 78.64799841
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.5-monoA-unCP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.5-monoB-unCP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-2.0-dimer' ] = 74.65765021
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-2.0-monoA-unCP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-2.0-monoB-unCP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-0.9-dimer' ] = 277.22718975
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-0.9-monoA-unCP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-0.9-monoB-unCP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.0-dimer' ] = 273.32940378
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.0-monoA-unCP' ] = 203.63369789
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.0-monoB-unCP' ] = 9.16734036
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.2-dimer' ] = 266.69472124
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.2-monoA-unCP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.2-monoB-unCP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.5-dimer' ] = 258.94739117
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.5-monoA-unCP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.5-monoB-unCP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-2.0-dimer' ] = 249.87743360
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-2.0-monoA-unCP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-2.0-monoB-unCP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-0.9-dimer' ] = 276.96602136
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-0.9-monoA-unCP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-0.9-monoB-unCP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.0-dimer' ] = 273.27963627
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.0-monoA-unCP' ] = 203.67277417
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.0-monoB-unCP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.2-dimer' ] = 266.99529350
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.2-monoA-unCP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.2-monoB-unCP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.5-dimer' ] = 259.64238957
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.5-monoA-unCP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.5-monoB-unCP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-2.0-dimer' ] = 251.01630493
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-2.0-monoA-unCP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-2.0-monoB-unCP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-0.9-dimer' ] = 307.56164739
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-0.9-monoA-unCP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-0.9-monoB-unCP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.0-dimer' ] = 303.28139253
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.0-monoA-unCP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.0-monoB-unCP' ] = 23.66987364
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.2-dimer' ] = 295.87805947
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.2-monoA-unCP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.2-monoB-unCP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.5-dimer' ] = 287.02868743
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.5-monoA-unCP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.5-monoB-unCP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-2.0-dimer' ] = 276.34590865
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-2.0-monoA-unCP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-2.0-monoB-unCP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-0.9-dimer' ] = 601.46920410
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-0.9-monoA-unCP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-0.9-monoB-unCP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.0-dimer' ] = 592.41663921
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.0-monoA-unCP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.0-monoB-unCP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.2-dimer' ] = 576.54530095
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.2-monoA-unCP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.2-monoB-unCP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.5-dimer' ] = 557.14862254
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.5-monoA-unCP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.5-monoB-unCP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-2.0-dimer' ] = 532.99122415
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-2.0-monoA-unCP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-2.0-monoB-unCP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-0.9-dimer' ] = 888.79508333
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-0.9-monoA-unCP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-0.9-monoB-unCP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.0-dimer' ] = 876.91918503
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.0-monoA-unCP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.0-monoB-unCP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.2-dimer' ] = 855.79228809
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.2-monoA-unCP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.2-monoB-unCP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.5-dimer' ] = 829.42534245
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.5-monoA-unCP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.5-monoB-unCP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-2.0-dimer' ] = 795.71041545
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-2.0-monoA-unCP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-2.0-monoB-unCP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-0.9-dimer' ] = 814.74763476
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-0.9-monoA-unCP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-0.9-monoB-unCP' ] = 271.34619735
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.0-dimer' ] = 805.11772632
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.0-monoA-unCP' ] = 271.43868470
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.0-monoB-unCP' ] = 271.34619734
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.2-dimer' ] = 787.64120113
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.2-monoA-unCP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.2-monoB-unCP' ] = 271.34619735
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.5-dimer' ] = 765.14830882
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.5-monoA-unCP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.5-monoB-unCP' ] = 271.34619735
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-2.0-dimer' ] = 735.23075037
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-2.0-monoA-unCP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-2.0-monoB-unCP' ] = 271.34619735
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-0.9-monoA-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-0.9-monoB-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.0-monoA-CP' ] = 11.94743172
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.0-monoB-CP' ] = 11.94743172
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.2-monoA-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.2-monoB-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.5-monoA-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-1.5-monoB-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-2.0-monoA-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-1-2.0-monoB-CP' ] = 11.94743173
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-0.9-monoA-CP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-0.9-monoB-CP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.0-monoA-CP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.0-monoB-CP' ] = 9.17803890
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.2-monoA-CP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.2-monoB-CP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.5-monoA-CP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-1.5-monoB-CP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-2.0-monoA-CP' ] = 9.16383015
DATA['NUCLEAR REPULSION ENERGY']['S22by5-2-2.0-monoB-CP' ] = 9.17803891
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-0.9-monoA-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-0.9-monoB-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.0-monoA-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.0-monoB-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.2-monoA-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.2-monoB-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.5-monoA-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-1.5-monoB-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-2.0-monoA-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-3-2.0-monoB-CP' ] = 70.11578330
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-0.9-monoA-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-0.9-monoB-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.0-monoA-CP' ] = 71.07286375
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.0-monoB-CP' ] = 71.07286375
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.2-monoA-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.2-monoB-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.5-monoA-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-1.5-monoB-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-2.0-monoA-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-4-2.0-monoB-CP' ] = 71.07286374
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-0.9-monoA-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-0.9-monoB-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.0-monoA-CP' ] = 357.22675307
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.0-monoB-CP' ] = 357.22675307
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.2-monoA-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.2-monoB-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.5-monoA-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-1.5-monoB-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-2.0-monoA-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-5-2.0-monoB-CP' ] = 357.22675306
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-0.9-monoA-CP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-0.9-monoB-CP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.0-monoA-CP' ] = 275.70186300
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.0-monoB-CP' ] = 275.67198277
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.2-monoA-CP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.2-monoB-CP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.5-monoA-CP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-1.5-monoB-CP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-2.0-monoA-CP' ] = 275.70186301
DATA['NUCLEAR REPULSION ENERGY']['S22by5-6-2.0-monoB-CP' ] = 275.67198279
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-0.9-monoA-CP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-0.9-monoB-CP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.0-monoA-CP' ] = 503.39628585
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.0-monoB-CP' ] = 440.30157446
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.2-monoA-CP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.2-monoB-CP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.5-monoA-CP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-1.5-monoB-CP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-2.0-monoA-CP' ] = 503.39628584
DATA['NUCLEAR REPULSION ENERGY']['S22by5-7-2.0-monoB-CP' ] = 440.30157444
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-0.9-monoA-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-0.9-monoB-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.0-monoA-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.0-monoB-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.2-monoA-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.2-monoB-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.5-monoA-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-1.5-monoB-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-2.0-monoA-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-8-2.0-monoB-CP' ] = 13.44804227
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-0.9-monoA-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-0.9-monoB-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.0-monoA-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.0-monoB-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.2-monoA-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.2-monoB-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.5-monoA-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-1.5-monoB-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-2.0-monoA-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-9-2.0-monoB-CP' ] = 33.36026958
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-0.9-monoA-CP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-0.9-monoB-CP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.0-monoA-CP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.0-monoB-CP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.2-monoA-CP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.2-monoB-CP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.5-monoA-CP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-1.5-monoB-CP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-2.0-monoA-CP' ] = 203.70797334
DATA['NUCLEAR REPULSION ENERGY']['S22by5-10-2.0-monoB-CP' ] = 13.48552804
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-0.9-monoA-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-0.9-monoB-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.0-monoA-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.0-monoB-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.2-monoA-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.2-monoB-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.5-monoA-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-1.5-monoB-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-2.0-monoA-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-11-2.0-monoB-CP' ] = 203.71090864
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-0.9-monoA-CP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-0.9-monoB-CP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.0-monoA-CP' ] = 208.63967421
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.0-monoB-CP' ] = 208.62628027
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.2-monoA-CP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.2-monoB-CP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.5-monoA-CP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-1.5-monoB-CP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-2.0-monoA-CP' ] = 208.63967419
DATA['NUCLEAR REPULSION ENERGY']['S22by5-12-2.0-monoB-CP' ] = 208.62628028
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-0.9-monoA-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-0.9-monoB-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.0-monoA-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.0-monoB-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.2-monoA-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.2-monoB-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.5-monoA-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-1.5-monoB-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-2.0-monoA-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-13-2.0-monoB-CP' ] = 357.16045924
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-0.9-monoA-CP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-0.9-monoB-CP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.0-monoA-CP' ] = 203.66956608
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.0-monoB-CP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.2-monoA-CP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.2-monoB-CP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.5-monoA-CP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-1.5-monoB-CP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-2.0-monoA-CP' ] = 203.66956609
DATA['NUCLEAR REPULSION ENERGY']['S22by5-14-2.0-monoB-CP' ] = 401.14359309
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-0.9-monoA-CP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-0.9-monoB-CP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.0-monoA-CP' ] = 503.36563836
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.0-monoB-CP' ] = 440.14698895
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.2-monoA-CP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.2-monoB-CP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.5-monoA-CP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-1.5-monoB-CP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-2.0-monoA-CP' ] = 503.36563835
DATA['NUCLEAR REPULSION ENERGY']['S22by5-15-2.0-monoB-CP' ] = 440.14698892
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-0.9-monoA-CP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-0.9-monoB-CP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.0-monoA-CP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.0-monoB-CP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.2-monoA-CP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.2-monoB-CP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.5-monoA-CP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-1.5-monoB-CP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-2.0-monoA-CP' ] = 33.35807208
DATA['NUCLEAR REPULSION ENERGY']['S22by5-16-2.0-monoB-CP' ] = 24.69794610
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-0.9-monoA-CP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-0.9-monoB-CP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.0-monoA-CP' ] = 203.63369789
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.0-monoB-CP' ] = 9.16734036
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.2-monoA-CP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.2-monoB-CP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.5-monoA-CP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-1.5-monoB-CP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-2.0-monoA-CP' ] = 203.63369790
DATA['NUCLEAR REPULSION ENERGY']['S22by5-17-2.0-monoB-CP' ] = 9.16734035
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-0.9-monoA-CP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-0.9-monoB-CP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.0-monoA-CP' ] = 203.67277417
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.0-monoB-CP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.2-monoA-CP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.2-monoB-CP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.5-monoA-CP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-1.5-monoB-CP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-2.0-monoA-CP' ] = 203.67277418
DATA['NUCLEAR REPULSION ENERGY']['S22by5-18-2.0-monoB-CP' ] = 11.96105518
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-0.9-monoA-CP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-0.9-monoB-CP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.0-monoA-CP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.0-monoB-CP' ] = 23.66987364
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.2-monoA-CP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.2-monoB-CP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.5-monoA-CP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-1.5-monoB-CP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-2.0-monoA-CP' ] = 203.59513507
DATA['NUCLEAR REPULSION ENERGY']['S22by5-19-2.0-monoB-CP' ] = 23.66987363
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-0.9-monoA-CP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-0.9-monoB-CP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.0-monoA-CP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.0-monoB-CP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.2-monoA-CP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.2-monoB-CP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.5-monoA-CP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-1.5-monoB-CP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-2.0-monoA-CP' ] = 203.68142723
DATA['NUCLEAR REPULSION ENERGY']['S22by5-20-2.0-monoB-CP' ] = 203.66408617
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-0.9-monoA-CP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-0.9-monoB-CP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.0-monoA-CP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.0-monoB-CP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.2-monoA-CP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.2-monoB-CP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.5-monoA-CP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-1.5-monoB-CP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-2.0-monoA-CP' ] = 203.56579265
DATA['NUCLEAR REPULSION ENERGY']['S22by5-21-2.0-monoB-CP' ] = 401.05660150
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-0.9-monoA-CP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-0.9-monoB-CP' ] = 271.34619735
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.0-monoA-CP' ] = 271.43868470
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.0-monoB-CP' ] = 271.34619734
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.2-monoA-CP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.2-monoB-CP' ] = 271.34619735
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.5-monoA-CP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-1.5-monoB-CP' ] = 271.34619735
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-2.0-monoA-CP' ] = 271.43868469
DATA['NUCLEAR REPULSION ENERGY']['S22by5-22-2.0-monoB-CP' ] = 271.34619735
|
ashutoshvt/psi4
|
psi4/share/psi4/databases/S22by5.py
|
Python
|
lgpl-3.0
| 176,348
|
[
"Psi4"
] |
18888a04b1e12fd8a6d57153067d107dd368df7f6bd105d2c401f70b19008945
|
#!/usr/bin/python2
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
The core object model for the Decoder Generator. The dg_input and
dg_output modules both operate in terms of these classes.
"""
NUM_INST_BITS = 32
NEWLINE_STR="""
"""
def _popcount(int):
"""Returns the number of 1 bits in the input."""
count = 0
for bit in range(0, NUM_INST_BITS):
count = count + ((int >> bit) & 1)
return count
def neutral_repr(value):
"""Returns a neutral representation for the value.
Used to remove identifier references from values, so that we can
merge rows/actions of the table.
"""
if (isinstance(value, BitExpr) or isinstance(value, SymbolTable) or
isinstance(value, Row) or isinstance(value, DecoderAction) ):
return value.neutral_repr()
elif isinstance(value, list):
return '[' + ',\n '.join([ neutral_repr(v) for v in value ]) + ']'
else:
return repr(value)
def sub_bit_exprs(value):
"""Returns the list of (immediate) sub bit expressions within the
given value.
Used to find the set of identifier references used by a value.
"""
if isinstance(value, BitExpr) or isinstance(value, SymbolTable):
return value.sub_bit_exprs()
elif isinstance(value, list):
exprs = set()
_add_if_bitexpr(value, exprs)
return list(exprs)
else:
return []
def _add_if_bitexpr(value, set):
"""Adds value to set if value is a BitExpr (or sub values if a list)."""
if value:
if isinstance(value, BitExpr):
set.add(value)
elif isinstance(value, list):
for v in value:
_add_if_bitexpr(v, set)
def _close_referenced_bit_exprs(values):
"""Adds to values, all indirect BitExprs referenced by BitExprs already
in the set of values."""
workset = list(values)
while workset:
value = workset.pop()
for exp in sub_bit_exprs(value):
if exp not in values:
values.add(exp)
workset.append(exp)
class BitExpr(object):
"""Define a bit expression."""
def negate(self):
"""Returns the negation of self."""
return NegatedTest(self)
def to_bitfield(self, options={}):
"""Returns the bit field (i.e. sequence of bits) described by the
BitExpr. Returns an instance of BitField.
"""
# Default implementation is to convert it by adding unsigned
# int mask.
return BitField(self, NUM_INST_BITS - 1, 0)
def to_type(self, type, options={}):
"""Converts the expression to the given type."""
if type == 'bool':
return self.to_bool(options)
if type == 'uint32':
return self.to_uint32(options)
if type == 'register':
return self.to_register(options)
if type == 'register_list':
return self.to_register_list(options)
if type == 'bitfield':
return self.to_bitfield(options)
raise Exception("to_type(%s): can't convert %s" % (type, self))
def to_bool(self, options={}):
"""Returns a string describing this as a C++ boolean
expression."""
return "(%s != 0)" % self.to_uint32(options)
def to_commented_bool(self, options={}):
"""Returns a string describing this as a C++ boolean expression,
with a comment describing the corresponding BitExpr it came
from."""
return '%s /* %s */' % (self.to_bool(options), repr(self))
def to_register(self, options={}):
"""Returns a string describing this as a C++ Register."""
return 'Register(%s)' % self.to_uint32(options)
def to_commented_register(self, options={}):
"""Returns a string describing this as a C++ Register expression,
with a comment describing the corresponding BitExpr it came
from."""
return '%s /* %s */' % (self.to_register(options), repr(self))
def to_register_list(self, options={}):
"""Returns a string describing this as a C++ RegisterList
expression."""
raise Exception("to_register_list not defined for %s %s." %
(type(self), self))
def to_commmented_register_list(self, options={}):
"""Returns a string describing this as a C++ RegisterList
Expression, with a comment describing the corresponding BitExpr
it came from."""
return '%s /* %s */' % (self.to_register_list(options), repr(self))
def to_uint32(self, options={}):
"""Returns a string describing this as a C++ uint32_t
expression."""
raise Exception("to_uint32 not defined for %s." % type(self))
def to_commented_uint32(self, options={}):
"""Returns a string describing this as a C++ uint32_t expression,
with a comment describing the corresponding BitExpr it came
from."""
return '%s /* %s */' % (self.to_uint32(options), repr(self))
def sub_bit_exprs(self):
"""Returns a list of (immediate) sub bit expressions within the
bit expr."""
return []
def must_be_valid_shift_op(self):
"""Returns true only if it is provable that the corresponding
value generated by to_uint32 is greater than or equal to zero,
and not greater than 32. In such cases, we know that the
shift will not cause a runtime error, and needs to be checked.
"""
return self.must_be_in_range(0, 33)
def must_be_in_range(self, min_include, max_exclude):
"""Returns true only if it is provable that the corresponding
value returned by to_uint32 is greater than or equal to
min_include, and less than max_exclude."""
return False
def to_uint32_constant(self):
"""Returns (uint32) constant denoted by self, or None if
it can't be converted to a constant."""
return None
def neutral_repr(self):
"""Like repr(self) except identifier references are replaced with
their definition.
Used to define a form for comparison/hashing that is neutral to
the naming conventions used in the expression.
"""
raise Exception("neutral_repr not defined for %s" % type(self))
def __hash__(self):
return hash(neutral_repr(self))
def __cmp__(self, other):
return (cmp(type(self), type(other)) or
cmp(neutral_repr(self), neutral_repr(other)))
class IdRef(BitExpr):
"""References an (already defined) name, and the value associated
with the name.
"""
def __init__(self, name, value=None):
self._name = name
self._value = value
def name(self):
return self._name
def value(self):
return self._value
def to_bitfield(self, options={}):
return self._value.to_bitfield(options)
def to_bool(self, options={}):
return self._value.to_bool(options)
def to_register(self, options={}):
return self._value.to_register(options)
def to_register_list(self, options={}):
return self._value.to_register_list(options)
def to_uint32(self, options={}):
return self._value.to_uint32(options)
def __repr__(self):
return '%s' % self._name
def sub_bit_exprs(self):
return [self._value]
def must_be_in_range(self, min_include, max_exclude):
return self._value.must_be_in_range(min_include, max_exclude)
def to_uint32_constant(self):
return self._value.to_uint32_constant()
def neutral_repr(self):
return self._value.neutral_repr()
def __hash__(self):
return hash(self._name) + hash(self._value)
def __cmp__(self, other):
return (cmp(type(self), type(other)) or
cmp(self._name, other._name) or
cmp(self._value, other._value))
_AND_PRINT_OP=""" &&
"""
class AndExp(BitExpr):
"""Models an anded expression."""
def __init__(self, args):
if not isinstance(args, list) or len(args) < 2:
raise Exception(
"AndExp(%s) expects at least two elements" % args)
self._args = args
def args(self):
return self._args[:]
def to_bool(self, options={}):
value = '(%s)' % self._args[0].to_bool(options)
for arg in self._args[1:]:
value = '%s%s(%s)' % (value, _AND_PRINT_OP, arg.to_bool(options))
return value
def to_uint32(self, options={}):
value = self._args[0].to_uint32(options)
for arg in self._args[1:]:
value = '%s & %s' % (value, arg.to_uint32(options))
return '(%s)' % value
def to_register(self, options={}):
raise Exception("to_register not defined for %s" % self)
def to_register_list(self, options={}):
raise Exception("to_register_list not defined for %s" % self)
def sub_bit_exprs(self):
return list(self._args)
def __repr__(self):
return _AND_PRINT_OP.join([repr(a) for a in self._args])
def neutral_repr(self):
return _AND_PRINT_OP.join([neutral_repr(a) for a in self._args])
_OR_PRINT_OP=""" ||
"""
class OrExp(BitExpr):
"""Models an or-ed expression."""
def __init__(self, args):
if not isinstance(args, list) or len(args) < 2:
raise Exception(
"OrExp(%s) expects at least two elements" % args)
self._args = args
def args(self):
return self._args[:]
def to_bool(self, options={}):
value = '(%s)' % self._args[0].to_bool(options)
for arg in self._args[1:]:
value = '%s%s(%s)' % (value, _OR_PRINT_OP, arg.to_bool(options))
return value
def to_register(self, options={}):
raise Exception("to_register not defined for %s" % self)
def to_register_list(self, options={}):
raise Exception("to_register_list not defined for %s" % self)
def to_uint32(self, options={}):
value = self.args[0].to_uint32(options)
for arg in self._args[1:]:
value = '%s | %s' % (value, arg.to_uint32(options))
return '(%s)' % value
def sub_bit_exprs(self):
return list(self._args)
def __repr__(self):
return _OR_PRINT_OP.join([repr(a) for a in self._args])
def neutral_repr(self):
return _OR_PRINT_OP.join([neutral_repr(a) for a in self._args])
# Defines the negated comparison operator.
_NEGATED_COMPARE_OP = {
'<': '>=',
'<=': '>',
'==': '!=',
'!=': '==',
'>=': '<',
'>': '<=',
}
_COMPARE_OP_FORMAT=""" %s
"""
class CompareExp(BitExpr):
"""Models the comparison of two values."""
def __init__(self, op, arg1, arg2):
if not _NEGATED_COMPARE_OP.get(op):
raise Exception("Unknown compare operator: %s" % op)
self._op = op
self._args = [arg1, arg2]
def op(self):
return self._op
def args(self):
return self._args[:]
def negate(self):
return CompareExp(_NEGATED_COMPARE_OP[self._op],
self._args[0], self._args[1])
def to_bool(self, options={}):
return '((%s) %s (%s))' % (self._args[0].to_uint32(options),
self._op,
self._args[1].to_uint32(options))
def to_register(self, options={}):
raise Exception("to_register not defined for %s" % self)
def to_register_list(self, options={}):
raise Exception("to_register_list not defined for %s" % self)
def to_uint32(self, options={}):
raise Exception("to_uint32 not defined for %s" % self)
def sub_bit_exprs(self):
return list(self._args)
def __repr__(self):
return '%s %s %s' % (repr(self._args[0]),
self.compare_op(),
repr(self._args[1]))
def neutral_repr(self):
# Note: We canconicalize the tests to improve chances that we
# merge more expressions.
arg1 = neutral_repr(self._args[0])
arg2 = neutral_repr(self._args[1])
if self._op in ['==', '!=']:
# Order arguments based on comparison value.
cmp_value = cmp(arg1, arg2)
if cmp_value < 0:
return '%s %s %s' % (arg1, self.compare_op(), arg2)
elif cmp_value > 0:
return '%s %s %s' % (arg2, self.compare_op(), arg1)
else:
# comparison against self can be simplified.
return BoolValue(self._op == '==').neutral_repr()
elif self._op in ['>', '>=']:
return '%s %s %s' % (arg2,
self.compare_op(_NEGATED_COMPARE_OP.get(self._op)),
arg1)
else:
# Assume in canonical order.
return '%s %s %s' % (arg1, self.compare_op(), arg2)
def compare_op(self, op=None):
if op == None: op = self._op
return _COMPARE_OP_FORMAT % op
class ShiftOp(BitExpr):
"""Models a left/right shift operator."""
def __init__(self, op, arg1, arg2):
if op not in ['<<', '>>']:
raise Exception("Not shift op: %s" % op)
if not arg2.must_be_valid_shift_op():
raise Exception("Can't statically determine shift value.")
self._op = op
self._args = [arg1, arg2]
def args(self):
return list(self._args)
def to_uint32(self, options={}):
return '(%s %s %s)' % (self._args[0].to_uint32(options),
self._op,
self._args[1].to_uint32(options))
def __repr__(self):
return '%s %s %s' % (self._args[0],
self._op,
self._args[1])
def neutral_repr(self):
return '%s %s %s' % (neutral_repr(self._args[0]),
self._op,
neutral_repr(self._args[1]))
def sub_bit_exprs(self):
return list(self._args)
class AddOp(BitExpr):
"""Models an additive operator."""
def __init__(self, op, arg1, arg2):
if op not in ['+', '-']:
raise Exception("Not add op: %s" % op)
self._op = op
self._args = [arg1, arg2]
def args(self):
return self._args[:]
def to_register_list(self, options={}):
rl = self._args[0].to_register_list(options)
if self._op == '+':
return '%s.\n Add(%s)' % (rl, self._args[1].to_register(options))
elif self._op == '-':
return '%s.\n Remove(%s)' % (rl, self._args[1].to_register(options))
else:
raise Exception("Bad op %s" % self._op)
def to_uint32(self, options={}):
# Check subtraction as a special case. By default, we assume that all
# integers are unsigned. However, a difference may generate a negative
# value. In C++, the subtraction of unsigned integers is an unsigned
# integer, which is not a difference. To fix this, we insert integer
# typecasts.
if self._is_subtract_bitfields():
# Cast each argument to an int, so that we can do subtraction that
# can result in negative values.
args = [TypeCast('int', a) for a in self._args]
return AddOp('-', args[0], args[1]).to_uint32(options)
else:
return '%s %s %s' % (self._args[0].to_uint32(options),
self._op,
self._args[1].to_uint32(options))
def _is_subtract_bitfields(self):
"""Returns true if the subtraction of bitfields that are not defined
by typecasts."""
if self._op != '-': return False
for arg in self.args():
if isinstance(arg, TypeCast):
return False
try:
bf = arg.to_bitfield()
except:
return False
return True
def to_uint32_constant(self):
args = [a.to_uint32_constant() for a in self._args]
if None in args or len(args) != 2:
return None
return self._eval(args[0], self._op, args[1])
def sub_bit_exprs(self):
return list(self._args)
def must_be_in_range(self, min_include, max_exclude):
for i in [0, 1]:
c = self._args[i].to_uint32_constant()
if c:
if self._op == '+':
# Adjust the range by the constant c, and then test if
# the other argument is in that range. We can do this,
# since addition is commutative. Note that the range
# of the other argument is defined by subtracting c from
# the range of the result.
return self._args[1-i].must_be_in_range(
self._eval(min_include, '-', c),
self._eval(max_exclude, '-', c))
elif i == 1: # i.e. of form: _args[0] - c
# Adjust the range by the constant c, and test if the
# first argument is in that range. Note that the range
# of the first argument is defined by adding c to the
# range of the result.
return self._args[0].must_be_in_range(
self._eval(min_include, '+', c),
self._eval(max_exclude, '+', c))
# If reached, don't know how to prove.
return False
def _eval(self, x, op, y):
"""Calculates 'x op y', assuming op in {'-', '+'})."""
if op == '+':
return x + y
elif op == '-':
return x - y
else:
raise Exception("Don't know how to apply: %s %s %s" %
(x, op, y))
def __repr__(self):
return '%s %s %s' % (repr(self._args[0]),
self._op,
repr(self._args[1]))
def neutral_repr(self):
return '%s %s %s' % (neutral_repr(self._args[0]),
self._op,
neutral_repr(self._args[1]))
# Defines the c++ operator for the given mulop.
_CPP_MULOP = {
'*': '*',
'/': '/',
'mod': '%%',
}
class MulOp(BitExpr):
"""Models an additive operator."""
def __init__(self, op, arg1, arg2):
if not _CPP_MULOP.get(op):
raise Exception("Not mul op: %s" % op)
self._op = op
self._args = [arg1, arg2]
def args(self):
return self._args[:]
def to_uint32(self, options={}):
return '%s %s %s' % (self._args[0].to_uint32(options),
_CPP_MULOP[self._op],
self._args[1].to_uint32(options))
def sub_bit_exprs(self):
return list(self._args)
def __repr__(self):
return '%s %s %s' % (repr(self._args[0]),
self._op,
repr(self._args[1]))
def neutral_repr(self):
return '%s %s %s' % (neutral_repr(self._args[0]),
self._op,
neutral_repr(self._args[1]))
class Concat(BitExpr):
"""Models a value generated by concatentation bitfields."""
def __init__(self, args):
if not isinstance(args, list) or len(args) < 2:
raise Exception(
"Concat(%s) expects at least two arguments" % args)
self._args = args
def args(self):
return self._args[:]
def to_bitfield(self, options={}):
# Assume we can generate a bitfield from the expression, by
# concatenating subfields, if each subfield is a bitfield.
try:
# As long ase each subfield is convertable, and the max number
# of bits is not exceeded, generate the corresponding bitfield.
bits = sum([ a.to_bitfield(options).num_bits() for a in self._args ])
if bits > 32:
raise exception("can't compose bitfield from concat")
return BitField(self, bits - 1, 0)
except:
# Can't convert,piecewise, so just assume that we can
# convert to an unsigned integer, and then define a
# bitfield on the unsigned integer.
return BitField(self, NUM_INST_BITS - 1, 0)
def to_uint32(self, options={}):
value = self._args[0].to_uint32()
for arg in self._args[1:]:
bitfield = arg.to_bitfield(options)
value = ("(((%s) << %s) | %s)" %
(value, bitfield.num_bits(), bitfield.to_uint32()))
return value
def sub_bit_exprs(self):
return list(self._args)
def __repr__(self):
return ':'.join([repr(a) for a in self._args])
def neutral_repr(self):
return ':'.join([a.neutral_repr() for a in self._args])
class BitSet(BitExpr):
"""Models a set of expressions."""
def __init__(self, values):
self._values = values
def args(self):
return self._values[:]
def to_register_list(self, options={}):
code = 'RegisterList()'
for value in self._values:
code = '%s.\n Add(%s)' % (code, value.to_register(options))
return code
def sub_bit_exprs(self):
return list(self._values)
def __repr__(self):
return '{%s}' % ', '.join([repr(v) for v in self._values])
def neutral_repr(self):
return '{%s}' % ', '.join([neutral_repr(v) for v in self._values])
"""Defines a map from a function name, to the list of possible signatures
for the funtion. The signature is a two-tuple where the first element
is a list of parameter types, and the second is the result type.
If a function does not appear in this list, all arguments are assumed
to be of type uint32, and return type uint32.
NOTE: Currently, one can't allow full polymorphism in the signatures,
because there is no way to test if an expression can be of a particular
type.
"""
_FUNCTION_SIGNATURE_MAP = {
'Add': [(['register_list', 'register'], 'register_list')],
'Contains': [(['register_list', 'register'], 'bool')],
'Union': [(['register_list', 'register_list'], 'register_list')],
'NumGPRs': [(['register_list'], 'uint32')],
'SmallestGPR': [(['register_list'], 'uint32')],
'Register': [(['uint32'], 'register')],
'RegisterList': [([], 'register_list'),
(['uint32'], 'register_list'),
# (['register'], 'registerlist),
],
}
# Models how each DGEN type is represented as a C++ type cast.
DGEN_TYPE_TO_CPP_TYPE = {
'int': 'int32_t',
'unsigned': 'uint32_t'
}
class TypeCast(BitExpr):
"""Allow some simple type castings."""
def __init__(self, type, arg):
self._type = type
self._arg = arg
if type not in DGEN_TYPE_TO_CPP_TYPE.keys():
raise Exception('TypeCast(%s, %s): type not understood.' %
(type, arg))
# Verify we can convert arg to an integer.
arg.to_uint32()
def name(self):
return self._name
def arg(self):
return self._arg
def to_uint32(self, options={}):
return ('static_cast<%s>(%s)' %
(DGEN_TYPE_TO_CPP_TYPE[self._type],
self._arg.to_uint32(options)))
def sub_bit_exprs(self):
return [ self._arg ]
def __repr__(self):
return '%s(%s)' % (self._type, self._arg)
def neutral_repr(self):
return '%s(%s)' % (self._type, neutral_repr(self._arg))
class FunctionCall(BitExpr):
"""Abstract class defining an (external) function call."""
def __init__(self, name, args):
self._name = name
self._args = args
def name(self):
return self._name
def args(self):
return self._args[:]
def to_bitfield(self, options={}):
raise Exception('to_bitfield not defined for %s' % self)
def to_bool(self, options={}):
return self._to_call('bool', self._add_namespace_option(options))
def to_register(self, options={}):
return self._to_call('register', self._add_namespace_option(options))
def to_register_list(self, options={}):
return self._to_call('register_list', self._add_namespace_option(options))
def to_uint32(self, options={}):
return self._to_call('uint32', self._add_namespace_option(options))
def sub_bit_exprs(self):
return list(self._args)
def __repr__(self):
return "%s(%s)" % (self._name,
', '.join([repr(a) for a in self._args]))
def neutral_repr(self):
return "%s(%s)" % (self._name,
', '.join([neutral_repr(a) for a in self._args]))
def matches_signature(self, signature, return_type, options={}):
"""Checks whether the function call matches the signature.
If so, returns the corresponding (translated) arguments
to use for the call. Otherwise returns None.
"""
params, result = signature
if result != return_type: return None
if len(params) != len(self.args()): return None
args = []
for (type, arg) in zip(params, self.args()):
args.append(arg.to_type(type, options))
return args
def _to_call(self, return_type, options={}):
"""Generates a call to the external function."""
# Try (pseudo) translation functions.
trans_fcns = _FUNCTION_TRANSLATION_MAP.get(self._name)
if trans_fcns:
for fcn in trans_fcns:
exp = fcn(self, return_type, options)
if exp: return exp
# Convert arguments to corresponding signatures, and
# return corresponding call.
namespace = (('%s::' % options.get('namespace'))
if options.get('namespace') else '')
signatures = _FUNCTION_SIGNATURE_MAP.get(self._name)
if signatures == None:
args = [a.to_uint32(options) for a in self.args()]
else:
good = False
for signature in signatures:
args = self.matches_signature(signature, return_type, options)
if args != None:
good = True
if not good:
raise Exception("don't know how to translate to %s: %s" %
(return_type, self))
return '%s(%s)' % ('%s%s' % (namespace, self._name), ', '.join(args))
def _add_namespace_option(self, options):
if not options.get('namespace'):
options['namespace'] = 'nacl_arm_dec'
return options
class InSet(BitExpr):
"""Abstract class defining set containment."""
def __init__(self, value, bitset):
self._value = value
self._bitset = bitset
def value(self):
"""Returns the value to test membership on."""
return self._value
def bitset(self):
"""Returns the set of values to test membership on."""
return self._bitset
def to_bitfield(self, options={}):
raise Exception("to_bitfield not defined for %s" % self)
def to_bool(self, options={}):
return self._simplify().to_bool(options)
def to_register(self, options={}):
raise Exception("to_register not defined for %s" % self)
def to_register_list(self, options={}):
raise Exception("to_register_list not defined for %s" % self)
def to_uint32(self, options={}):
return self._simplify().to_uint32(options)
def sub_bit_exprs(self):
return [self._value, self._bitset]
def neutral_repr(self):
return self._simplify().neutral_repr()
def _simplify(self):
"""Returns the simplified or expression that implements the
membership tests."""
args = self._bitset.args()
if not args: return BoolValue(False)
if len(args) == 1: return self._simplify_test(args[0])
return OrExp([self._simplify_test(a) for a in args])
def _simplify_test(self, arg):
"""Returns how to test if the value matches arg."""
raise Exception("InSet._simplify_test not defined for type %s" % type(self))
class InUintSet(InSet):
"""Models testing a value in a set of integers."""
def __init__(self, value, bitset):
InSet.__init__(self, value, bitset)
def _simplify_test(self, arg):
return CompareExp("==", self._value, arg)
def __repr__(self):
return "%s in %s" % (repr(self._value), repr(self._bitset))
class InBitSet(InSet):
"""Models testing a value in a set of bit patterns"""
def __init__(self, value, bitset):
InSet.__init__(self, value, bitset)
# Before returning, be sure the value/bitset entries correctly
# correspond, by forcing construction of the simplified expression.
self.neutral_repr()
def _simplify_test(self, arg):
return BitPattern.parse(arg, self._value)
def __repr__(self):
return "%s in bitset %s" % (repr(self._value), repr(self._bitset))
_IF_THEN_ELSE_CPP_FORMAT="""(%s
? %s
: %s)"""
_IF_THEN_ELSE_DGEN_FORMAT="""%s
if %s
else %s"""
class IfThenElse(BitExpr):
"""Models a conditional expression."""
def __init__(self, test, then_value, else_value):
self._test = test
self._then_value = then_value
self._else_value = else_value
def test(self):
return self._test
def then_value(self):
return self._then_value
def else_value(self):
return self._else_value
def negate(self):
return IfThenElse(self._test, self._else_value, self._then_value)
def to_bool(self, options={}):
return _IF_THEN_ELSE_CPP_FORMAT % (
self._test.to_bool(options),
self._then_value.to_bool(options),
self._else_value.to_bool(options))
def to_register_list(self, options={}):
return _IF_THEN_ELSE_CPP_FORMAT % (
self._test.to_bool(options),
self._then_value.to_register_list(options),
self._else_value.to_register_list(options))
def to_uint32(self, options={}):
return _IF_THEN_ELSE_CPP_FORMAT % (
self._test.to_bool(options),
self._then_value.to_uint32(options),
self._else_value.to_uint32(options))
def sub_bit_exprs(self):
return [self._test, self._then_value, self._else_value]
def __repr__(self):
return _IF_THEN_ELSE_DGEN_FORMAT % (
self._then_value, self._test, self._else_value)
def neutral_repr(self):
return _IF_THEN_ELSE_DGEN_FORMAT % (
neutral_repr(self._then_value),
neutral_repr(self._test),
neutral_repr(self._else_value))
class ParenthesizedExp(BitExpr):
"""Models a parenthesized expression."""
def __init__(self, exp):
self._exp = exp
def exp(self):
return self._exp
def negate(self):
value = self._exp.negate()
return ParenthesizedExp(value)
def to_bitfield(self, options={}):
return self._exp.to_bitfield(options)
def to_bool(self, options={}):
return '(%s)' % self._exp.to_bool(options)
def to_register(self, options={}):
return '(%s)' % self._exp.to_register(options)
def to_register_list(self, options={}):
return '(%s)' % self._exp.to_register_list(options)
def to_uint32(self, options={}):
return '(%s)' % self._exp.to_uint32(options)
def sub_bit_exprs(self):
return [self._exp]
def must_be_in_range(self, min_include, max_exclude):
return self._exp.must_be_in_range(min_include, max_exclude)
def to_uint32_constant(self):
return self._exp.to_uint32_constant()
def __repr__(self):
return '(%s)' % repr(self._exp)
def neutral_repr(self):
return '(%s)' % neutral_repr(self._exp)
class Implicit(BitExpr):
"""Models an implicit method definition, based on the values of
other method definitions."""
def __init__(self, methods):
self._methods = methods
def methods(self):
return list(self._methods)
def __repr__(self):
return ("implied by %s" %
', '.join([repr(m) for m in self._methods]))
def neutral_repr(self):
return repr(self)
class QuotedString(BitExpr):
"""Models a quoted string."""
def __init__(self, text, name=None):
if not isinstance(text, str):
raise Exception("Can't create a quoted string from %s" % text)
self._text = text
if name == None: name = repr(text)
self._name = name
def name(self):
return self._name
def text(self):
return self._text
def __repr__(self):
return self.name()
def to_cstring(self):
return '"%s"' % self._text
def neutral_repr(self):
return self.name()
class Literal(BitExpr):
"""Models a literal unsigned integer."""
def __init__(self, value, name=None):
if not isinstance(value, int):
raise Exception("Can't create literal from %s" % value)
self._value = value
if name == None: name = repr(value)
self._name = name
def name(self):
return self._name
def value(self):
return self._value
def to_uint32(self, options={}):
return repr(self._value)
def must_be_in_range(self, min_include, max_exclude):
c = self._to_uint32_constant()
return min_include <= c and c < max_exclude
def _to_uint32_constant(self):
return int(self.to_uint32())
def neutral_repr(self):
return self.name()
def __repr__(self):
return self.name()
class BoolValue(BitExpr):
"""Models true and false."""
def __init__(self, value):
if not isinstance(value, bool):
raise Exception("Can't create boolean value from %s" % value)
self._value = value
def value(self):
return self._value
def to_bool(self, options={}):
return 'true' if self._value else 'false'
def to_uint32(self, options={}):
return 1 if self._value else 0
def __repr__(self):
return self.to_bool()
def neutral_repr(self):
return self.to_bool()
class NegatedTest(BitExpr):
"""Models a negated (test) value."""
def __init__(self, test):
self._test = test
def negate(self):
return self._test
def to_bitfield(self, options={}):
raise Exception("to_bitfield not defined for %s" % self)
def to_bool(self, options={}):
return "!(%s)" % self._test.to_bool(options)
def to_register(self, options={}):
raise Exception('to_register not defined for %s' % self)
def to_uint32(self, options={}):
return "((uint32_t) %s)" % self.to_bool(options)
def sub_bit_exprs(self):
return [self._test]
def __repr__(self):
return 'not %s' % self._test
def neutral_repr(self):
return 'not %s' % self._test.neutral_repr()
class BitField(BitExpr):
"""Defines a bitfield within an instruction."""
def __init__(self, name, hi, lo, options={}):
if not isinstance(name, BitExpr):
# Unify non-bit expression bitfield to corresponding bitfield
# version, so that there is a uniform representation.
name = name if isinstance(name, str) else repr(name)
name = IdRef(name, Instruction())
self._name = name
self._hi = hi
self._lo = lo
max_bits = self._max_bits(options)
if not (0 <= lo and lo <= hi and hi < max_bits):
raise Exception('BitField %s: range illegal' % repr(self))
def num_bits(self):
""""returns the number of bits represented by the bitfield."""
return self._hi - self._lo + 1
def mask(self):
mask = 0
for i in range(0, self.num_bits()):
mask = (mask << 1) + 1
mask = mask << self._lo
return mask
def name(self):
return self._name
def hi(self):
return self._hi
def lo(self):
return self._lo
def to_bitfield(self, option={}):
return self
def to_uint32(self, options={}):
masked_value ="(%s & 0x%08X)" % (self._name.to_uint32(options), self.mask())
if self._lo != 0:
masked_value = '(%s >> %s)' % (masked_value, self._lo)
return masked_value
def must_be_in_range(self, min_include, max_exclude):
if min_include > 0: return False
if (1 << (self.hi() + 1 - self.lo())) > max_exclude: return False
return True
def _max_bits(self, options):
"""Returns the maximum number of bits allowed."""
max_bits = options.get('max_bits')
if not max_bits:
max_bits = 32
return max_bits
def sub_bit_exprs(self):
return [self._name]
def __repr__(self):
return self._named_repr(repr(self._name))
def neutral_repr(self):
return self._named_repr(self._name.neutral_repr())
def _named_repr(self, name):
if self._hi == self._lo:
return '%s(%s)' % (name, self._hi)
else:
return '%s(%s:%s)' % (name, self._hi, self._lo)
class Instruction(BitExpr):
"""Models references to the intruction being decoded."""
def to_uint32(self, options={}):
return '%s.Bits()' % self._inst_name(options)
def __repr__(self):
return self._inst_name()
def neutral_repr(self):
return self._inst_name()
def _inst_name(self, options={}):
inst = options.get('inst')
if not inst:
inst = 'inst'
return inst
class SafetyAction(BitExpr):
"""Models a safety check, and the safety action returned."""
def __init__(self, test, action):
# Note: The following list is from inst_classes.h, and should
# be kept in sync with that file (see type SafetyLevel).
if action not in ['UNINITIALIZED',
'UNKNOWN', 'UNDEFINED', 'NOT_IMPLEMENTED',
'UNPREDICTABLE', 'DEPRECATED', 'FORBIDDEN',
'FORBIDDEN_OPERANDS', 'DECODER_ERROR', 'MAY_BE_SAFE']:
raise Exception("Safety action %s => %s not understood" %
(test, action))
self._test = test
self._action = action
def test(self):
return self._test
def action(self):
return self._action
def to_bitfield(self, options={}):
raise Exception("to_bitfield not defined for %s" % self)
def to_bool(self, options={}):
# Be sure to handle inflection of safety value when defining the boolean
# value the safety action corresponds to.
if self._action == 'MAY_BE_SAFE':
return self._test.to_bool(options)
else:
return self._test.negate().to_bool(options)
def to_register(self, options={}):
raise Exception("to_register not defined for %s" % self)
def to_register_list(self, options={}):
raise Exception("to_register_list not defined for %s" % self)
def to_uint32(self, options={}):
raise Exception("to_uint32 not defined for %s" % self)
def sub_bit_exprs(self):
return [self._test]
def __repr__(self):
return '%s => %s' % (repr(self._test), self._action)
def neutral_repr(self):
return '%s => %s' % (neutral_repr(self._test), self._action)
class Violation(BitExpr):
"""Models a (conditional) violation."""
def __init__(self, test, print_args):
self._test = test
self._print_args = print_args
def test(self):
return self._test
def violation_type(self):
return self._violation_type
def print_args(self):
return list(self._print_args)
def to_bool(self, options={}):
return self.test().to_bool(options)
def sub_bit_exprs(self):
return [self._test] + self._print_args
def __repr__(self):
return '%s =>\n error(%s)' % (
self._test,
', '.join([repr(a) for a in self._print_args]))
def neutral_repr(self):
return '%s =>\n error(%s)' % (
neutral_repr(self._test),
', '.join([neutral_repr(a) for a in self._print_args]))
_INHERITS_SYMBOL = '$inherits$'
_INHERITS_EXCLUDES_SYMBOL = '$inherits-excludes$'
class SymbolTable(object):
"""Holds mapping from names to corresponding value."""
def __init__(self):
self._dict = {}
self._frozen = False
def copy(self):
"""Returns a copy of the symbol table"""
st = SymbolTable()
for k in self._dict:
st._dict[k] = self._dict[k]
return st
def find(self, name, install_inheriting=True):
value = self._dict.get(name)
if value: return value
inherits = self._dict.get(_INHERITS_SYMBOL)
if not inherits: return None
excludes = self._dict.get(_INHERITS_EXCLUDES_SYMBOL)
if excludes and name in excludes:
return None
value = inherits.find(name)
if value == None: return value
if self._frozen:
raise Exception(
"Can't copy inherited value of %s, symbol table frozen" % name)
# Install locally before going on, so that the same
# definition is consistently used.
if install_inheriting:
self._dict[name] = value
return value
def define(self, name, value, fail_if_defined = True):
"""Adds (name, value) pair to symbol table if not already defined.
Returns True if added, otherwise False.
"""
if self._dict.get(name):
if fail_if_defined:
raise Exception('%s: multiple definitions' % name)
return False
elif self._frozen:
raise Exception("Can't assign %s, symbol table is frozen" % name)
else:
self._dict[name] = value
return True
def freeze(self):
"""Freeze symbol table, i.e. no longer allow assignments into the symbol
table."""
self._frozen = True
def remove(self, name):
self._dict.pop(name, None)
def inherits(self, context, excludes):
"""Adds inheriting symbol table."""
self.define(_INHERITS_SYMBOL, context)
self.define(_INHERITS_EXCLUDES_SYMBOL, excludes)
def disinherit(self):
"""Removes inheriting symbol tables."""
# Install inheriting values not explicitly overridden.
excludes = set([_INHERITS_EXCLUDES_SYMBOL, _INHERITS_SYMBOL])
current_st = self
inherits_st = current_st.find(_INHERITS_SYMBOL)
while inherits_st:
# Start by updating symbols to be excluded from the current
# symbol table.
inherits_excludes = current_st.find(
_INHERITS_EXCLUDES_SYMBOL, install_inheriting=False)
if inherits_excludes:
for sym in inherits_excludes:
excludes.add(sym)
# Copy definitions in inherits to this, excluding symbols that
# should not be inherited.
for key in inherits_st.keys():
if key not in excludes:
# If the key defines a fields argument, remove references
# to excluded fields.
value = inherits_st.find(key)
if key == 'fields' and isinstance(value, list):
filtered_fields = []
for field in value:
subfield = field
if isinstance(subfield, BitField):
subfield = subfield.name()
if (isinstance(subfield, IdRef)
and subfield.name() in excludes):
continue
filtered_fields.append(field)
value = filtered_fields
# Install value.
self.define(key, value, fail_if_defined=False)
current_st = inherits_st
inherits_st = current_st.find(_INHERITS_SYMBOL)
# Before returning, remove inheriting entries.
self.remove(_INHERITS_EXCLUDES_SYMBOL)
self.remove(_INHERITS_SYMBOL)
def keys(self):
return [k for k in self._dict.keys() if k != _INHERITS_SYMBOL]
def sub_bit_exprs(self):
exprs = set()
for (key, value) in self._dict:
_add_if_bitexpr(key, exprs)
_add_if_bitexpr(value, exprs)
return list(exprs)
def __hash__(self):
return hash(self.neutral_repr())
def __cmp__(self, other):
return (cmp(type(self), type(other)) or
cmp(self.neutral_repr(), other.neutral_repr()))
def neutral_repr(self):
neutral_dict = {}
for k in self._dict.keys():
value = neutral_repr(self._dict[k])
neutral_dict[k] = value
return self._describe(neutral_dict)
def __repr__(self):
return self._describe(self._dict)
def _describe(self, dict):
dict_rep = '{'
is_first = True
for k in sorted(dict.keys()):
if is_first:
is_first = False
else:
dict_rep = '%s,%s ' % (dict_rep, NEWLINE_STR)
value = dict[k]
# Try to better pretty-print lists.
if isinstance(value, list) and len(repr(value)) > 60:
value = '[' + ',\n '.join([repr(v) for v in value]) + ']'
dict_rep = "%s%s: %s" % (dict_rep, k, value)
dict_rep += '}'
return dict_rep
class BitPattern(BitExpr):
"""A pattern for matching strings of bits. See parse() for
syntax."""
@staticmethod
def parse(pattern, column):
"""Parses a string pattern describing some bits. The string
can consist of '1' and '0' to match bits explicitly, 'x' or
'X' to ignore bits, '_' as an ignored separator, and an
optional leading '~' to negate the entire pattern.
Examples:
10xx0
1111_xxxx
~111x
The pattern may also optionally be '-', which is equivalent to
a sequence of 'xxx...xxx' of the requested width.
Args:
pattern: a string in the format described above.
column: The tuple (name, hi, lo) defining a column.
Returns:
A BitPattern instance that describes the match, and is capable of
transforming itself to a C expression.
Raises:
Exception: the input didn't meet the rules described above.
"""
col = column.to_bitfield()
hi_bit = col.hi()
lo_bit = col.lo()
num_bits = col.num_bits()
# Convert - into a full-width don't-care pattern.
if pattern == '-':
return BitPattern.parse('x' * num_bits, column)
# Derive the operation type from the presence of a leading
# tilde.
if pattern.startswith('~'):
op = '!='
pattern = pattern[1:]
else:
op = '=='
# Allow use of underscores anywhere in the pattern, as a
# separator.
pattern = pattern.replace('_', '')
if len(pattern) != num_bits:
raise Exception('Pattern %s is wrong length for %d:%u'
% (pattern, hi_bit, lo_bit))
mask = 0
value = 0
for c in pattern:
if c == '1':
mask = (mask << 1) | 1
value = (value << 1) | 1
elif c == '0':
mask = (mask << 1) | 1
value = value << 1
elif c.isalpha(): # covers both rule patterns and table patterns
mask = mask << 1
value = value << 1
else:
raise Exception('Invalid characters in pattern %s' % pattern)
mask = mask << lo_bit
value = value << lo_bit
return BitPattern(mask, value, op, col)
@staticmethod
def parse_catch(pattern, column):
""""Calls parse with given arguments, and catches exceptions
raised. Prints raised exceptions and returns None.
"""
try:
return BitPattern.parse(pattern, column)
except Exception as ex:
print "Error: %s" % ex
return None
@staticmethod
def always_matches(column=None):
"""Returns a bit pattern corresponding to always matches."""
return BitPattern(0, 0, '==', column)
def matches_any(self):
"""Returns true if pattern matches any pattern of bits."""
return self.mask == 0
def negate(self):
"""Returns pattern that is negation of given pattern"""
if self.is_equal_op():
return BitPattern(self.mask, self.value, '!=', self.column)
else:
return BitPattern(self.mask, self.value, '==', self.column)
def __init__(self, mask, value, op, column=None):
"""Initializes a BitPattern.
Args:
mask: an integer with 1s in the bit positions we care about (e.g.
those that are not X)
value: an integer that would match our pattern, subject to the mask.
op: either '==' or '!=', if the pattern is positive or negative,
respectively.
column: If specified, the corresponding column information for
the bit pattern.
"""
self.mask = mask
self.value = value
self.op = op
self.column = column
# Fail if we get something we don't know how to handle.
if column:
Good = isinstance(column, BitField)
if isinstance(column, IdRef):
Good = isinstance(column.value, BitField)
if not Good:
raise Exception(
"Don't know how to generate bit pattern for %s" % column)
def signif_bits(self):
"""Returns the number of signifcant bits in the pattern
(i.e. occurrences of 0/1 in the pattern."""
return _popcount(self.mask)
def copy(self):
"""Returns a copy of the given bit pattern."""
return BitPattern(self.mask, self.value, self.op, self.column)
def union_mask_and_value(self, other):
"""Returns a new bit pattern unioning the mask and value of the
other bit pattern."""
return BitPattern(self.mask | other.mask, self.value | other.value,
self.op, self.column)
def is_equal_op(self):
"""Returns true if the bit pattern is an equals (rather than a
not equals)."""
return self.op == '=='
def conflicts(self, other):
"""Returns an integer with a 1 in each bit position that
conflicts between the two patterns, and 0s elsewhere. Note
that this is only useful if the masks and ops match.
"""
return (self.mask & self.value) ^ (other.mask & other.value)
def is_complement(self, other):
"""Checks if two patterns are complements of each other. This
means they have the same mask and pattern bits, but one is
negative.
"""
return (self.op != other.op
and self.mask == other.mask
and self.value == other.value)
def strictly_overlaps(self, other):
"""Checks if patterns overlap, and aren't equal."""
return ((self.mask & other.mask) != 0) and (self != other)
def is_strictly_compatible(self, other):
"""Checks if two patterns are safe to merge using +, but are
not ==."""
if self.is_complement(other):
return True
elif self.op == other.op:
return (self.mask == other.mask
and _popcount(self.conflicts(other)) == 1)
return False
def categorize_match(self, pattern):
""" Compares this pattern againts the given pattern, and returns one
of the following values:
'match' - All specified bits in this match the corresponding bits in
the given pattern.
'conflicts' - There are bits in this pattern that conflict with the
given pattern. Hence, there is no way this pattern will
succeed for instructions matching the given pattern.
'consistent' - The specified bits in this pattern neither match,
nor conflicts with the unmatched pattern. No conclusions
can be drawn from the overlapping bits of this and the
given pattern.
"""
if self.is_equal_op():
# Compute the significant bits that overlap between this pattern and
# the given pattern.
mask = (self.mask & pattern.mask)
if pattern.is_equal_op():
# Testing if significant bits of this pattern differ (i.e. conflict)
# with the given pattern.
if mask & (self.value ^ pattern.value):
# Conflicts, no pattern match.
return 'conflicts'
else:
# Matches on signifcant bits in mask
return 'match'
else:
# Test if negated given pattern matches the significant
# bits of this pattern.
if mask & (self.value ^ ~pattern.value):
# Conflicts, so given pattern can't match negation. Hence,
# this pattern succeeds.
return 'match'
else:
# Consistent with negation. For now, we don't try any harder,
# since it is not needed to add rule patterns to decoder table
# rows.
return 'consistent'
else:
# self match on negation.
negated_self = self.copy()
negated_self.op = '=='
result = negated_self.categorize_match(pattern)
if result == 'match':
return 'match'
else:
# Not exact match. Can only assume they are consistent (since none
# of the bits conflicted).
return 'consistent'
def remove_overlapping_bits(self, pattern):
"""Returns a copy of this with overlapping significant bits of this
and the given pattern.
"""
# Compute significant bits that overlap between this pattern and
# the given pattern, and build a mask to remove those bits.
mask = ~(self.mask & pattern.mask)
# Now build a new bit pattern with overlapping bits removed.
return BitPattern((mask & self.mask),
(mask & self.value),
self.op,
self.column)
def __add__(self, other):
"""Merges two compatible patterns into a single pattern that matches
everything either pattern would have matched.
"""
assert (self == other) or self.is_strictly_compatible(other)
if self.op == other.op:
c = self.conflicts(other)
return BitPattern((self.mask | other.mask) ^ c,
(self.value | other.value) ^ c, self.op, self.column)
else:
return BitPattern.always_matches(self.column)
def to_bool(self, options={}):
# Generate expression corresponding to bit pattern.
if self.mask == 0:
value = 'true'
else:
inst = self.column.name().to_uint32(options)
value = ('(%s & 0x%08X) %s 0x%08X'
% (inst, self.mask,
_COMPARE_OP_FORMAT % self.op,
self.value))
return value
def to_commented_bool(self, options={}):
if not self.column and self.mask == 0:
# No information is provided by the comment, so don't add!
return 'true'
return BitExpr.to_commented_bool(self, options)
def bitstring(self):
"""Returns a string describing the bitstring of the pattern."""
bits = self._bits_repr()
if self.column:
col = self.column.to_bitfield()
bits = bits[col.lo() : col.hi() + 1]
bits.reverse()
return ''.join(bits)
def __hash__(self):
value = hash(self.mask) + hash(self.value) + hash(self.op)
if self.column:
value += hash(neutral_repr(self.column))
return value
def __cmp__(self, other):
"""Compares two patterns for sorting purposes. We sort by
- # of significant bits, DESCENDING,
- then mask value, numerically,
- then value, numerically,
- and finally op.
This is also used for equality comparison using ==.
"""
return (cmp(type(self), type(other))
or cmp(other.signif_bits(), self.signif_bits())
or cmp(self.mask, other.mask)
or cmp(self.value, other.value)
or cmp(self.op, other.op)
or cmp(neutral_repr(self.column), neutral_repr(other.column)))
def first_bit(self):
"""Returns the index of the first 0/1 bit in the pattern. or
None if no significant bits exist for the pattern.
"""
for i in range(0, NUM_INST_BITS):
if (self.mask >> i) & 1:
return i
return None
def add_column_info(self, columns):
"""If the bit pattern doesn't have column information, add
it based on the columns passed in. Otherwise return self.
"""
if self.column: return self
for c in columns:
hi_bit = c.hi()
lo_bit = c.lo()
index = self.first_bit()
if index is None : continue
if index >= lo_bit and index <= hi_bit:
return BitPattern(self.mask, self.value, self.op, c)
return self
def sub_bit_exprs(self):
return [self.column]
def _bits_repr(self):
"""Returns the 0/1/x's of the bit pattern as a list (indexed
by bit position).
"""
pat = []
for i in range(0, NUM_INST_BITS):
if (self.mask >> i) & 1:
pat.append(`(self.value >> i) & 1`)
else:
pat.append('x')
return pat
def neutral_repr(self):
if self.column:
return '%s=%s%s' % (self.column.neutral_repr(),
'' if self.is_equal_op() else'~',
self.bitstring())
elif self.is_equal_op():
return self.bitstring()
else:
return "~%s" % self.bitstring()
def __repr__(self):
"""Returns the printable string for the bit pattern."""
if self.column:
return '%s=%s%s' % (self.column,
'' if self.is_equal_op() else'~',
self.bitstring())
elif self.is_equal_op():
return self.bitstring()
else:
return "~%s" % self.bitstring()
TABLE_FORMAT="""
Table %s
%s
%s
"""
class Table(object):
"""A table in the instruction set definition. Each table contains 1+
columns, and 1+ rows. Each row contains a bit pattern for each column, plus
the action to be taken if the row matches."""
def __init__(self, name, citation):
"""Initializes a new Table.
Args:
name: a name for the table, used to reference it from other tables.
citation: the section in the ISA spec this table was derived from.
"""
self.name = name
self.citation = citation
self.default_row = None
self._rows = []
self._columns = []
def columns(self):
return self._columns[:]
def add_column(self, column):
"""Adds a column to the table. Returns true if successful."""
for col in self._columns:
if repr(col) == repr(column):
return False
self._columns.append(column)
return True
def rows(self, default_also = True):
"""Returns all rows in table (including the default row
as the last element if requested).
"""
r = self._rows[:]
if default_also and self.default_row:
r.append(self.default_row)
return r
def add_default_row(self, action):
"""Adds a default action to use if none of the rows apply.
Returns True if able to define.
"""
if self.default_row: return False
self.default_row = Row([BitPattern.always_matches()], action)
return True
def add_row(self, patterns, action):
"""Adds a row to the table.
Args:
patterns: a list containing a BitPattern for every column in the
table.
action: The action associated with the row. Must either be
a DecoderAction or a DecoderMethod.
"""
row = Row(patterns, action)
self._rows.append(row)
return row
def remove_table(self, name):
"""Removes method calls to the given table name from the table"""
for row in self._rows:
row.remove_table(name)
def define_pattern(self, pattern, column):
"""Converts the given input pattern (for the given column) to the
internal form. Returns None if pattern is bad.
"""
if column >= len(self._columns): return None
return BitPattern.parse_catch(pattern, self._columns[column])
def copy(self):
"""Returns a copy of the table."""
table = Table(self.name, self.citation)
table._columns = self._columns
for r in self._rows:
table.add_row(r.patterns, r.action)
if self.default_row:
table.add_default_row(self.default_row.action)
return table
def row_filter(self, filter):
"""Returns a copy of the table, filtering each row with the
replacement row defined by function argument filter (of
form: lambda row:).
"""
table = Table(self.name, self.citation)
table._columns = self._columns
for r in self._rows:
row = filter(r)
if row:
table.add_row(row.patterns, row.action)
if self.default_row:
row = filter(self.default_row)
if row:
table.add_default_row(row.action)
return table
def action_filter(self, names):
"""Returns a table with DecoderActions reduced to the given field names.
Used to optimize out duplicates, depending on context.
"""
return self.row_filter(
lambda r: Row(r.patterns, r.action.action_filter(names)))
def add_column_to_rows(self, rows):
"""Add column information to each row, returning a copy of the rows
with column information added.
"""
new_rows = []
for r in rows:
new_patterns = []
for p in r.patterns:
new_patterns.append(p.add_column_info(self._columns))
new_rows.append(Row(new_patterns, r.action))
return new_rows
def methods(self):
"""Returns the (sorted) list of methods called by the table."""
methods = set()
for r in self.rows(True):
if r.action.__class__.__name__ == 'DecoderMethod':
methods.add(r.action)
return sorted(methods)
def __repr__(self):
rows = list(self._rows)
if self.default_row:
rows.append(self.default_row)
return TABLE_FORMAT % (self.name,
' '.join([repr(c) for c in self._columns]),
NEWLINE_STR.join([repr(r) for r in rows]))
# Defines a mapping from decoder action field names, to the
# corresponding type of the expression. The domain is a field
# name. The range is a list of type names defined in
# BitExpr.to_type. Otherwise it must be a function (taking a single
# argument) which does the type checking. Note: This is filled
# dynamically during import time, so that circular dependencies can be
# handled. In particular, dgen_decoder.py fills in types for method
# fields.
_DECODER_ACTION_FIELD_TYPE_MAP = {}
def DefineDecoderFieldType(name, type):
"""Adds the corresponding type association to the list of known
types for decoder fields."""
global _DECODER_ACTION_FIELD_TYPE_MAP
types = _DECODER_ACTION_FIELD_TYPE_MAP.get(name)
if types == None:
types = set()
_DECODER_ACTION_FIELD_TYPE_MAP[name] = types
types.add(type)
class DecoderAction:
"""An action defining a class decoder to apply.
Fields are:
_st - Symbol table of other information stored on the decoder action.
_neutral_st - Symbol table to use for neutral_repr. Note: This
symbol table is the same as _st, except when method action_filter
is called. In the latter case, it has the minimal needed fields
(based on the filter) so that the neutral representation does
not get messed up with aliases added to _st during filtering.
"""
def __init__(self, baseline=None, actual=None):
self._st = SymbolTable()
self._neutral_st = self._st
if baseline != None:
self._st.define('baseline', baseline)
if actual != None:
self._st.define('actual', actual)
# The following field is set by method force_type_checking, and is
# used to force type checking while parsing a decoder action. This
# allows the parser to report problems at the corresponding source
# line that defined the value of a field to something we don't
# understand.
self._force_type_checking = False
def force_type_checking(self, value):
"""Sets field defining if type checking will be done as symbols
are added to the decoder action. This allows the parser to
report problems at the corresponding source line that defined
the field."""
self._force_type_checking = value
def find(self, name, install_inheriting=True):
return self._st.find(name, install_inheriting)
def define(self, name, value, fail_if_defined=True):
if self._force_type_checking:
types = _DECODER_ACTION_FIELD_TYPE_MAP.get(name)
if types:
# Now try translating value for each type, so that
# if there is a problem, a corresponding exception
# will be raised.
for type in types:
if isinstance(type, str):
if not isinstance(value, BitExpr):
raise Exception(
"Defining %s:%s. Value must be BitExpr" %
(name, value))
value.to_type(type)
else:
type(value)
return self._st.define(name, value, fail_if_defined)
def freeze(self):
"""Don't allow any modifications of fields (unless copying)."""
self._st.freeze()
def remove(self, name):
self._st.remove(name)
def inherits(self, context, excludes):
self._st.inherits(context, excludes)
def disinherit(self):
self._st.disinherit()
def keys(self):
return self._st.keys()
def copy(self):
"""Returns a copy of the decoder action."""
action = DecoderAction()
action._st = SymbolTable()
action._neutral_st = action._st
for field in self._st.keys():
action.define(field, self.find(field))
return action
def action_filter(self, names):
"""Filters fields in the decoder to only include fields in names.
for most operations, we build a symbol table (_st) that contains
not only the specified fields, but any implicit dependent fields.
For method neutral_repr, we create a special symbol table _neutral_st
that only contains the fields specified.
Note: actual and actual-not-baseline are handled specially. See
code of function body for details.
"""
action = DecoderAction()
action._st = SymbolTable()
action._neutral_st = SymbolTable()
# Copy direct values in.
values = set()
for n in names:
name = n
if name == 'actual':
# Check for special case symbol that copies the baseline if the
# actual field is not defined.
value = self.actual()
elif name == 'actual-not-baseline':
# Check for special case symbol that defines actual only if
# the actual is different than the baseline.
actual = self.actual()
if actual and actual != self.baseline():
name = 'actual'
value = actual
else:
value = None
else:
# Copy over if defined.
value = self._st.find(n)
if value != None:
# Copy to both the copy, and the neutral form.
action._st.define(name, value, fail_if_defined=False)
neutral_value = value
if name == 'safety':
# To get better compression of actual decoders,
# merge and order alternatives.
neutral_value = set()
strs = set()
for v in value:
if isinstance(v, BitExpr):
neutral_value.add(v)
else:
strs.add(v)
neutral_value = sorted(neutral_value) + sorted(strs)
action._neutral_st.define(name, neutral_value, fail_if_defined=False)
_add_if_bitexpr(value, values)
# Collect sub expressions (via closure) and add names of id refs
# if applicable.
_close_referenced_bit_exprs(values)
for v in values:
if isinstance(v, IdRef):
v_name = v.name()
v_value = self._st.find(v_name)
if v_value:
action._st.define(v_name, v_value, fail_if_defined=False)
# Fill in fields.
fields = self._st.find('fields')
if fields:
new_fields = []
for f in fields:
if f in values:
new_fields.append(f)
action._st.define('fields', new_fields, fail_if_defined=False)
action._st.freeze()
action._neutral_st.freeze()
return action
def actual(self):
"""Returns the actual decoder class defined for the action if defined.
Otherwise, returns the baseline."""
act = self.find('actual')
return act if act != None else self.baseline()
def baseline(self):
"""Returns the baseline decoder class defined for the action."""
return self.find('baseline')
def pattern(self):
"""Returns the pattern associated with the action."""
return self.find('pattern')
def rule(self):
"""Returns the rule associated with the action."""
return self.find('rule')
def safety(self):
"""Returns the safety associated with the action."""
s = self.find('safety')
return s if s else []
def defs(self):
"""Returns the defs defined for the instruction, or None if undefined."""
return self.find('defs')
def __eq__(self, other):
return (type(self) == type(other) and
neutral_repr(self) == neutral_repr(other))
def __cmp__(self, other):
# Order lexicographically on type/fields.
return cmp(type(self), type(other)) or cmp(neutral_repr(self._st),
neutral_repr(other._st))
def __hash__(self):
return hash(self.neutral_repr())
def __repr__(self):
return self._describe(repr(self._st))
def neutral_repr(self):
return self._describe(neutral_repr(self._neutral_st))
def _describe(self, text):
return "= %s" % text.replace(NEWLINE_STR, NEWLINE_STR + ' ').rstrip(' ')
class DecoderMethod(object):
"""An action defining a parse method to call.
Corresponds to the parsed decoder method:
decoder_method ::= '->' id
Fields are:
name - The name of the corresponding table (i.e. method) that
should be used to continue searching for a matching class
decoder.
"""
def __init__(self, name):
self.name = name
def action_filter(self, unused_names):
return self
def copy(self):
"""Returns a copy of the decoder method."""
return DecoderMethod(self.name)
def __eq__(self, other):
return (self.__class__.__name__ == 'DecoderMethod'
and self.name == other.name)
def __cmp__(self, other):
# Lexicographically compare types/fields.
return (cmp(type(self), type(other)) or
cmp(self.name, other.name))
def __hash__(self):
return hash('DecoderMethod') + hash(self.name)
def __repr__(self):
return '-> %s' % self.name
class Row(object):
""" A row in a Table."""
def __init__(self, patterns, action):
"""Initializes a Row.
Args:
patterns: a list of BitPatterns that must match for this Row to
match.
action: the action to be taken if this Row matches.
"""
self.patterns = patterns
self.action = action
self.significant_bits = 0
for p in patterns:
self.significant_bits += p.signif_bits()
def add_pattern(self, pattern):
"""Adds a pattern to the row."""
self.patterns.append(pattern)
def remove_table(self, name):
"""Removes method call to the given table name from the row,
if applicable.
"""
if (isinstance(self.action, DecoderMethod) and
self.action.name == name):
self.action = DecoderAction('NotImplemented', 'NotImplemented')
def strictly_overlaps_bits(self, bitpat):
"""Checks if bitpat strictly overlaps a bit pattern in the row."""
for p in self.patterns:
if bitpat.strictly_overlaps(p):
return True
return False
def can_merge(self, other):
"""Determines if we can merge two Rows."""
if self.action != other.action:
return False
equal_columns = 0
compat_columns = 0
for (a, b) in zip(self.patterns, other.patterns):
if a == b:
equal_columns += 1
# Be sure the column doesn't overlap with other columns in pattern.
if (not self.strictly_overlaps_bits(a) and
not other.strictly_overlaps_bits(b) and
a.is_strictly_compatible(b)):
compat_columns += 1
cols = len(self.patterns)
return (equal_columns == cols
or (equal_columns == cols - 1 and compat_columns == 1))
def copy_with_action(self, action):
return Row(self.patterns, action)
def copy_with_patterns(self, patterns):
return Row(patterns, self.action)
def __add__(self, other):
assert self.can_merge(other) # Caller is expected to check!
return Row([a + b for (a, b) in zip(self.patterns, other.patterns)],
self.action)
def __cmp__(self, other):
"""Compares two rows, so we can order pattern matches by specificity.
"""
return (cmp(type(self), type(other))
or cmp(self.patterns, other.patterns)
or cmp(self.action, other.action))
def __repr__(self):
return self._describe([repr(p) for p in self.patterns], repr(self.action))
def neutral_repr(self):
return self._describe([neutral_repr(p) for p in self.patterns],
neutral_repr(self.action))
def _describe(self, patterns, action):
return (ROW_PATTERN_ACTION_FORMAT %
(' & '.join(patterns), action.replace(NEWLINE_STR,
ROW_ACTION_INDENT)))
ROW_PATTERN_ACTION_FORMAT="""%s
%s"""
ROW_ACTION_INDENT="""
"""
class Decoder(object):
"""Defines a class decoder which consists of set of tables.
A decoder has a primary (i.e. start) table to parse intructions (and
select the proper ClassDecoder), as well as a set of additional
tables to complete the selection of a class decoder. Instances of
this class correspond to the internal representation of parsed
decoder tables recognized by dgen_input.py (see that file for
details).
Fields are:
primary - The entry parse table to find a class decoder.
tables - The (sorted) set of tables defined by a decoder.
value_map - Saved values of the decoder.
Note: maintains restriction that tables have unique names.
"""
def __init__(self):
self.primary = None
self._is_sorted = False
self._tables = []
self._value_map = {}
def value_keys(self):
return self._value_map.keys()
def define_value(self, name, value):
"""Associate value with name, for the given decoder."""
self._value_map[name] = value
def get_value(self, name, default_value=None):
"""Returns the associated value with the given name. Use the
default if the name is not bound."""
return self._value_map.get(name, default_value)
def add(self, table):
"""Adds the table to the set of tables. Returns true if successful.
"""
if filter(lambda(t): t.name == table.name, self._tables):
# Table with name already defined, report error.
return False
else:
if not self._tables:
self.primary = table
self._tables.append(table)
self.is_sorted = False
return True
def tables(self):
"""Returns the sorted (by table name) list of tables"""
if self._is_sorted: return self._tables
self._tables = sorted(self._tables, key=lambda(tbl): tbl.name)
self._is_sorted = True
return self._tables
def table_names(self):
"""Returns the names of all tables in the decoder."""
return sorted([tbl.name for tbl in self.tables()])
def get_table(self, name):
"""Returns the table with the given name"""
for tbl in self._tables:
if tbl.name == name:
return tbl
return None
def remove_table(self, name):
"""Removes the given table from the decoder"""
new_tables = []
for table in self._tables:
if table.name != name:
new_tables = new_tables + [table]
table.remove_table(name)
self._tables = new_tables
def table_filter(self, filter):
"""Returns a copy of the decoder, filtering each table with
the replacement row defined by function argument filter (of
form: lambda table:).
Note: The filter can't change the name of the primary table.
"""
decoder = Decoder()
tables = set()
for tbl in self._tables:
filtered_table = filter(tbl)
if filtered_table != None:
tables.add(filtered_table)
if tbl.name == self.primary.name:
decoder.primary = filtered_table
elif tbl.name == self.primary.name:
raise Exception("table_filter: can't filter out table %s" %
self.primary.name)
decoder._tables = sorted(tables, key=lambda(tbl): tbl.name)
decoder._value_map = self._value_map.copy()
return decoder
def action_filter(self, names):
"""Returns a new set of tables, with actions reduced to the set of
specified field names.
"""
# Filter actions in tables.
decoder = self.table_filter(lambda tbl: tbl.action_filter(names))
# Now filter other decoders associated with the specification.
for key in decoder.value_keys():
action = decoder.get_value(key)
if isinstance(action, DecoderAction):
decoder.define_value(key, action.action_filter(names))
return decoder
def decoders(self):
"""Returns the sorted sequence of DecoderAction's defined in the tables."""
decoders = set()
# Add other decoders associated with specification, but not in tables.
for key in self.value_keys():
action = self.get_value(key)
if isinstance(action, DecoderAction):
decoders.add(action)
# Add decoders specified in the tables.
for t in self._tables:
for r in t.rows(True):
if isinstance(r.action, DecoderAction):
decoders.add(r.action)
return sorted(decoders)
def rules(self):
"""Returns the sorted sequence of DecoderActions that define
the rule field.
"""
return sorted(filter(lambda (r): r.rule, self.decoders()))
def show_table(self, table):
tbl = self.get_table(table)
if tbl != None:
print "%s" % tbl
else:
raise Exception("Can't find table %s" % table)
def _TranslateSignExtend(exp, return_type, options):
"""Implements SignExtend(x, i):
if i == len(x) then x else Replicate(TopBit(x), i - len(x)):x
where i >= len(x)
"""
# Note: Returns None if not translatible.
args = _ExtractExtendArgs(exp, return_type)
if args == None: return None
if exp.name() != 'SignExtend': return None
(i, x, len_x) = args
value_x = x.to_uint32(options)
if i == len_x:
return value_x
else:
top_bit_mask = 1
for n in range(1, len_x):
top_bit_mask = top_bit_mask << 1
replicate_mask = 0
for n in range(0, i - len_x):
replicate_mask = (replicate_mask << 1) | 1
replicate_mask = replicate_mask << len_x
text = ("""(((%s) & 0x%08X)
? ((%s) | 0x%08X)
: %s)""" %
(value_x, top_bit_mask, value_x, replicate_mask, value_x))
return text
def _TranslateZeroExtend(exp, return_type, options):
"""Implements ZeroExtend(x, i):
if i == len(x) then x else Replicate('0', i-len(x)):x
"""
args = _ExtractExtendArgs(exp, return_type)
if args == None: return None
if exp.name() != 'ZeroExtend': return None
# Note: Converting to unsigned integer is the same as extending.
return x.to_uint32(options)
def _ToBitFieldExtend(exp, return_type, options):
"""Implements a to_bitfield conversion for ZeroExtend and
SignExtend.
"""
args = _ExtractExtendArgs(exp, return_type)
if not args or exp.name() not in ['ZeroExtend', 'SignExtend']:
return None
i, x, len_x = args
return BitField(exp, len_x - 1, 0)
def _ExtractExtendArgs(exp, return_type):
"""Returns (i, x, len(x)) if exp is one of the following forms:
XXX(x, i)
XXX(x, i)
Otherwise, returns None.
"""
if not isinstance(exp, FunctionCall): return None
if return_type != 'uint32': return None
if len(exp.args()) != 2: return None
args = exp.args()
x = args[0]
i = args[1]
try:
bf = x.to_bitfield()
except:
return None
if not isinstance(i, Literal): return None
i = i.value()
if i < 0 or i > NUM_INST_BITS: return None
len_x = bf.num_bits()
if i < len_x: return None
return (i, x, len_x)
# Holds the set of installed parameters. Map is from parameter name,
# to the previous value defined in _FUNCTION_TRANSLATION_MAP.
_INSTALLED_PARAMS_MAP = {}
def InstallParameter(name, type):
"""Installs parameter in as a preprocessing fuction of no arguments."""
global _INSTALLED_PARAMS_MAP
global _FUNCTION_TRANSLATION_MAP
installed = _FUNCTION_TRANSLATION_MAP.get(name)
_INSTALLED_PARAMS_MAP[name] = installed
if installed:
installed = list(installed)
else:
installed = []
installed.insert(0, _BuildParameter(name, type))
_FUNCTION_TRANSLATION_MAP[name] = installed
def UninstallParameter(name):
"""Restores the function translation map to its former state before
the previous call to InstallParameter with the given name.
"""
global _INSTALLED_PARAMS_MAP
global _FUNCTION_TRANSLATION_MAP
_FUNCTION_TRANSLATION_MAP[name] = _INSTALLED_PARAMS_MAP[name]
_INSTALLED_PARAMS_MAP[name] = None
def _BuildParameter(name, type):
"""Builds a parameter translation function for the correspondin
parameter macro.
"""
return (lambda exp, return_type, options:
(name if exp.matches_signature(([], type), return_type) != None
else None))
"""Defines special processing fuctions if the signature matches."""
_FUNCTION_TRANSLATION_MAP = {
'ZeroExtend': [_TranslateZeroExtend, _ToBitFieldExtend],
'SignExtend': [_TranslateSignExtend, _ToBitFieldExtend],
}
|
Lind-Project/native_client
|
src/trusted/validator_arm/dgen_core.py
|
Python
|
bsd-3-clause
| 80,167
|
[
"ASE"
] |
0c9ee8f2d4e43df5aac8843c639d477adc3f57478a563738d9a5b5c0cbe27778
|
#!/usr/bin/env python
#=======================================================================================================================
# Created on 2014-08-11
# @author: Yi Li
#
# PyLOH
# Copyright (c) 2014 Yi Li <yil8@uci.edu>
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of GNU GPL v2.0 (see the file LICENSE included with the distribution).
#=======================================================================================================================
import argparse
import sys
import numpy as np
import pysam
CHROM_LIST = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8',
'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15',
'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
'13', '14', '15', '16', '17', '18', '19', '20', '21', '22']
def main():
parser = argparse.ArgumentParser(description='Converting paired normal and tumor BAM files to input for DNAcopy')
parser.add_argument('normal_bam', help='''Input BAM file for normal sample.''')
parser.add_argument('tumor_bam', help='''Input BAM file for tumor sample.''')
parser.add_argument('exons_bed', help='''Input BED file for all exon regions.''')
parser.add_argument('DNAcopy_bed', help='''Output BED file for DNAcopy.''')
parser.add_argument('--min_depth', default=100, type=int,
help='''Minimum reads detph required for each exon region in both
normal and tumor samples. Default is 100.''')
args = parser.parse_args()
normal_bam = pysam.Samfile(args.normal_bam, 'rb')
tumor_bam = pysam.Samfile(args.tumor_bam, 'rb')
exons_bed = open(args.exons_bed)
DNAcopy_bed = open(args.DNAcopy_bed, 'w')
depth_thred = args.min_depth
i = 0
for line in exons_bed:
i+= 1
if i % 1000 == 0:
print '%s exons processed...' % (i)
sys.stdout.flush()
fields = line.strip('\n').split('\t')
chrom = fields[0]
start = int(fields[1])
end = int(fields[2])
fields2 = [chrom,fields[1],fields[2]]
normal_reads = normal_bam.count(chrom, start, end)
tumor_reads = tumor_bam.count(chrom, start, end)
if normal_reads < args.min_depth or tumor_reads < args.min_depth or chrom not in CHROM_LIST:
continue
log2_ratio = np.log2(tumor_reads*1.0/normal_reads)
pos = (end + start)/2
fields.extend(map(str, [normal_reads, tumor_reads, log2_ratio, pos]))
fields2.extend(map(str, [normal_reads, tumor_reads, log2_ratio, pos]))
if len(fields)==7:
DNAcopy_bed.write('\t'.join(fields) + '\n')
else:
DNAcopy_bed.write('\t'.join(fields2) + '\n')
normal_bam.close()
tumor_bam.close()
exons_bed.close()
DNAcopy_bed.close()
if __name__ == '__main__':
main()
|
uros-sipetic/PyLOH
|
bin/bam2DNAcopy.py
|
Python
|
gpl-2.0
| 3,083
|
[
"pysam"
] |
0d53d47f5afb0bb51a84d92c9bbc7f5ff61144cab3ad42a4ea5ad27935f4e71b
|
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
import sys
from splinter import Browser
from .base import BaseBrowserTests
from .fake_webapp import EXAMPLE_APP
from .is_element_present_nojs import IsElementPresentNoJSTest
@unittest.skipIf(
sys.version_info[0] > 2,
"zope.testbrowser is not currently compatible with Python 3",
)
class ZopeTestBrowserDriverTest(
BaseBrowserTests, IsElementPresentNoJSTest, unittest.TestCase
):
@classmethod
def setUpClass(cls):
cls.browser = Browser("zope.testbrowser", wait_time=0.1)
def setUp(self):
self.browser.visit(EXAMPLE_APP)
@classmethod
def tearDownClass(self):
self.browser.quit()
def test_should_support_with_statement(self):
with Browser("zope.testbrowser"):
pass
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
self.assertIn(open(file_path).read().encode("utf-8"), html)
def test_forward_to_none_page(self):
"should not fail when trying to forward to none"
browser = Browser("zope.testbrowser")
browser.visit(EXAMPLE_APP)
browser.forward()
self.assertEqual(EXAMPLE_APP, browser.url)
browser.quit()
def test_cant_switch_to_frame(self):
"zope.testbrowser should not be able to switch to frames"
with self.assertRaises(NotImplementedError) as cm:
self.browser.get_iframe("frame_123")
self.fail()
e = cm.exception
self.assertEqual("zope.testbrowser doesn't support frames.", e.args[0])
def test_simple_type(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method")
def test_simple_type_on_element(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").type("with type method")
def test_can_clear_password_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("password").first.clear()
def test_can_clear_tel_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("telephone").first.clear()
def test_can_clear_text_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").first.clear()
def test_slowly_typing(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method", slowly=True)
def test_slowly_typing_on_element(self):
"""
zope.testbrowser won't support type method
on element because it doesn't interac with JavaScript
"""
with self.assertRaises(NotImplementedError):
query = self.browser.find_by_name("query")
query.type("with type method", slowly=True)
def test_cant_mouseover(self):
"zope.testbrowser should not be able to put the mouse over the element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_over()
def test_cant_mouseout(self):
"zope.testbrowser should not be able to mouse out of an element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_out()
def test_links_with_nested_tags_xpath(self):
links = self.browser.find_by_xpath('//a/span[text()="first bar"]/..')
self.assertEqual(
len(links),
1,
'Found not exactly one link with a span with text "BAR ONE". %s'
% (map(lambda item: item.outer_html, links)),
)
def test_finding_all_links_by_non_ascii_text(self):
"should find links by non ascii text"
non_ascii_encodings = {
"pangram_pl": u"Jeżu klątw, spłódź Finom część gry hańb!",
"pangram_ja": u"天 地 星 空",
"pangram_ru": u"В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!",
"pangram_eo": u"Laŭ Ludoviko Zamenhof bongustas freŝa ĉeĥa manĝaĵo kun spicoj.",
}
for key, text in non_ascii_encodings.iteritems():
link = self.browser.find_link_by_text(text)
self.assertEqual(key, link["id"])
|
bmcculley/splinter
|
tests/test_zopetestbrowser.py
|
Python
|
bsd-3-clause
| 5,355
|
[
"VisIt"
] |
37fb6e7867c4e6586212474351e1d01151af270c59fae69e29b826466d289972
|
# Orca
#
# Copyright 2009 Eitan Isaacson
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" Custom script for The notify-osd"""
from script import Script
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/notify-osd/__init__.py
|
Python
|
gpl-3.0
| 838
|
[
"ORCA"
] |
608cd593124afb8f0c7cb15e9a2cfa220c509fbf602f6fc51eaa7ad2afc8e5d0
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import copy
import os
import platform
import re
import urllib2
import getpass
from mozharness.base.config import ReadOnlyDict, parse_config_file
from mozharness.base.errors import BaseErrorList
from mozharness.base.log import FATAL
from mozharness.base.python import (
ResourceMonitoringMixin,
VirtualenvMixin,
virtualenv_config_options,
)
from mozharness.mozilla.buildbot import BuildbotMixin
from mozharness.mozilla.proxxy import Proxxy
from mozharness.mozilla.structuredlog import StructuredOutputParser
from mozharness.mozilla.testing.unittest import DesktopUnittestOutputParser
from mozharness.lib.python.authentication import get_credentials
INSTALLER_SUFFIXES = ('.tar.bz2', '.zip', '.dmg', '.exe', '.apk', '.tar.gz')
testing_config_options = [
[["--installer-url"],
{"action": "store",
"dest": "installer_url",
"default": None,
"help": "URL to the installer to install",
}],
[["--installer-path"],
{"action": "store",
"dest": "installer_path",
"default": None,
"help": "Path to the installer to install. This is set automatically if run with --download-and-extract.",
}],
[["--binary-path"],
{"action": "store",
"dest": "binary_path",
"default": None,
"help": "Path to installed binary. This is set automatically if run with --install.",
}],
[["--exe-suffix"],
{"action": "store",
"dest": "exe_suffix",
"default": None,
"help": "Executable suffix for binaries on this platform",
}],
[["--test-url"],
{"action": "store",
"dest": "test_url",
"default": None,
"help": "URL to the zip file containing the actual tests",
}],
[["--jsshell-url"],
{"action": "store",
"dest": "jsshell_url",
"default": None,
"help": "URL to the jsshell to install",
}],
[["--download-symbols"],
{"action": "store",
"dest": "download_symbols",
"type": "choice",
"choices": ['ondemand', 'true'],
"help": "Download and extract crash reporter symbols.",
}],
] + copy.deepcopy(virtualenv_config_options)
# TestingMixin {{{1
class TestingMixin(VirtualenvMixin, BuildbotMixin, ResourceMonitoringMixin):
"""
The steps to identify + download the proper bits for [browser] unit
tests and Talos.
"""
installer_url = None
installer_path = None
binary_path = None
test_url = None
test_zip_path = None
tree_config = ReadOnlyDict({})
symbols_url = None
symbols_path = None
jsshell_url = None
minidump_stackwalk_path = None
default_tools_repo = 'https://hg.mozilla.org/build/tools'
proxxy = None
def _query_proxxy(self):
"""manages the proxxy"""
if not self.proxxy:
self.proxxy = Proxxy(self.config, self.log_obj)
return self.proxxy
def download_proxied_file(self, url, file_name=None, parent_dir=None,
create_parent_dir=True, error_level=FATAL,
exit_code=3):
proxxy = self._query_proxxy()
return proxxy.download_proxied_file(url=url, file_name=file_name,
parent_dir=parent_dir,
create_parent_dir=create_parent_dir,
error_level=error_level,
exit_code=exit_code)
def download_file(self, *args, **kwargs):
'''
This function helps not to use download of proxied files
since it does not support authenticated downloads.
This could be re-factored and fixed in bug 1087664.
'''
if self.config.get("developer_mode"):
return super(TestingMixin, self).download_file(*args, **kwargs)
else:
return self.download_proxied_file(*args, **kwargs)
def query_value(self, key):
"""
This function allows us to check for a value
in the self.tree_config first and then on self.config
"""
return self.tree_config.get(key, self.config.get(key))
def query_jsshell_url(self):
"""
Attempt to determine the url of the js shell package given
the installer url.
"""
if self.jsshell_url:
return self.jsshell_url
if not self.installer_url:
self.fatal("Can't figure out jsshell without an installer_url!")
last_slash = self.installer_url.rfind('/')
base_url = self.installer_url[:last_slash]
for suffix in INSTALLER_SUFFIXES:
if self.installer_url.endswith(suffix):
no_suffix = self.installer_url[:-len(suffix)]
last_dot = no_suffix.rfind('.')
pf = no_suffix[last_dot + 1:]
self.jsshell_url = base_url + '/jsshell-' + pf + '.zip'
return self.jsshell_url
else:
self.fatal("Can't figure out jsshell from installer_url %s!" % self.installer_url)
def query_symbols_url(self):
if self.symbols_url:
return self.symbols_url
if not self.installer_url:
self.fatal("Can't figure out symbols_url without an installer_url!")
for suffix in INSTALLER_SUFFIXES:
if self.installer_url.endswith(suffix):
self.symbols_url = self.installer_url[:-len(suffix)] + '.crashreporter-symbols.zip'
return self.symbols_url
else:
self.fatal("Can't figure out symbols_url from installer_url %s!" % self.installer_url)
def _pre_config_lock(self, rw_config):
for i, (target_file, target_dict) in enumerate(rw_config.all_cfg_files_and_dicts):
if 'developer_config' in target_file:
self._developer_mode_changes(rw_config)
def _developer_mode_changes(self, rw_config):
""" This function is called when you append the config called
developer_config.py. This allows you to run a job
outside of the Release Engineering infrastructure.
What this functions accomplishes is:
* read-buildbot-config is removed from the list of actions
* --installer-url is set
* --test-url is set if needed
* every url is substituted by another external to the
Release Engineering network
"""
c = self.config
orig_config = copy.deepcopy(c)
self.warning("When you use developer_config.py, we drop " \
"'read-buildbot-config' from the list of actions.")
if "read-buildbot-config" in rw_config.actions:
rw_config.actions.remove("read-buildbot-config")
self.actions = tuple(rw_config.actions)
def _replace_url(url, changes):
for from_, to_ in changes:
if url.startswith(from_):
new_url = url.replace(from_, to_)
self.info("Replacing url %s -> %s" % (url, new_url))
return new_url
return url
assert c["installer_url"], "You must use --installer-url with developer_config.py"
if c.get("require_test_zip"):
assert c["test_url"], "You must use --test-url with developer_config.py"
c["installer_url"] = _replace_url(c["installer_url"], c["replace_urls"])
if c.get("test_url"):
c["test_url"] = _replace_url(c["test_url"], c["replace_urls"])
for key, value in self.config.iteritems():
if type(value) == str and value.startswith("http"):
self.config[key] = _replace_url(value, c["replace_urls"])
# Any changes to c means that we need credentials
if not c == orig_config:
get_credentials()
def _urlopen(self, url, **kwargs):
'''
This function helps dealing with downloading files while outside
of the releng network.
'''
# Code based on http://code.activestate.com/recipes/305288-http-basic-authentication
def _urlopen_basic_auth(url, **kwargs):
self.info("We want to download this file %s" % url)
if not hasattr(self, "https_username"):
self.info("NOTICE: Files downloaded from outside of "
"Release Engineering network require LDAP "
"credentials.")
self.https_username, self.https_password = get_credentials()
# This creates a password manager
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Because we have put None at the start it will use this username/password combination from here on
passman.add_password(None, url, self.https_username, self.https_password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
return urllib2.build_opener(authhandler).open(url, **kwargs)
# If we have the developer_run flag enabled then we will switch
# URLs to the right place and enable http authentication
if "developer_config.py" in self.config["config_files"]:
return _urlopen_basic_auth(url, **kwargs)
else:
return urllib2.urlopen(url, **kwargs)
# read_buildbot_config is in BuildbotMixin.
def postflight_read_buildbot_config(self):
"""
Determine which files to download from the buildprops.json file
created via the buildbot ScriptFactory.
"""
if self.buildbot_config:
c = self.config
message = "Unable to set %s from the buildbot config"
if c.get("installer_url"):
self.installer_url = c['installer_url']
if c.get("test_url"):
self.test_url = c['test_url']
try:
files = self.buildbot_config['sourcestamp']['changes'][-1]['files']
buildbot_prop_branch = self.buildbot_config['properties']['branch']
# Bug 868490 - Only require exactly two files if require_test_zip;
# otherwise accept either 1 or 2, since we'll be getting a
# test_zip url that we don't need.
expected_length = [1, 2, 3]
if c.get("require_test_zip") and not self.test_url:
expected_length = [2, 3]
if buildbot_prop_branch.startswith('gaia-try'):
expected_length = range(1, 1000)
actual_length = len(files)
if actual_length not in expected_length:
self.fatal("Unexpected number of files in buildbot config %s.\nExpected these number(s) of files: %s, but got: %d" %
(c['buildbot_json_path'], str(expected_length), actual_length))
for f in files:
if f['name'].endswith('tests.zip'): # yuk
if not self.test_url:
# str() because of unicode issues on mac
self.test_url = str(f['name'])
self.info("Found test url %s." % self.test_url)
elif f['name'].endswith('crashreporter-symbols.zip'): # yuk
self.symbols_url = str(f['name'])
self.info("Found symbols url %s." % self.symbols_url)
else:
if not self.installer_url:
self.installer_url = str(f['name'])
self.info("Found installer url %s." % self.installer_url)
except IndexError, e:
self.error(str(e))
missing = []
if not self.installer_url:
missing.append("installer_url")
if c.get("require_test_zip") and not self.test_url:
missing.append("test_url")
if missing:
self.fatal("%s!" % (message % ('+'.join(missing))))
else:
self.fatal("self.buildbot_config isn't set after running read_buildbot_config!")
def _query_binary_version(self, regex, cmd):
output = self.get_output_from_command(cmd, silent=False)
return regex.search(output).group(0)
def preflight_download_and_extract(self):
message = ""
if not self.installer_url:
message += """installer_url isn't set!
You can set this by:
1. specifying --installer-url URL, or
2. running via buildbot and running the read-buildbot-config action
"""
if self.config.get("require_test_zip") and not self.test_url:
message += """test_url isn't set!
You can set this by:
1. specifying --test-url URL, or
2. running via buildbot and running the read-buildbot-config action
"""
if message:
self.fatal(message + "Can't run download-and-extract... exiting")
if self.config.get("developer_mode") and self._is_darwin():
# Bug 1066700 only affects Mac users that try to run mozharness locally
version = self._query_binary_version(
regex=re.compile("UnZip\ (\d+\.\d+)\ .*",re.MULTILINE),
cmd=[self.query_exe('unzip'), '-v']
)
if not version >= 6:
self.fatal("We require a more recent version of unzip to unpack our tests.zip files.\n" \
"You are currently using version %s. Please update to at least 6.0.\n" \
"You can visit http://www.info-zip.org/UnZip.html" % version)
def _download_test_zip(self):
dirs = self.query_abs_dirs()
file_name = None
if self.test_zip_path:
file_name = self.test_zip_path
# try to use our proxxy servers
# create a proxxy object and get the binaries from it
source = self.download_file(self.test_url, file_name=file_name,
parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
self.test_zip_path = os.path.realpath(source)
def _download_unzip(self, url, parent_dir):
"""Generic download+unzip.
This is hardcoded to halt on failure.
We should probably change some other methods to call this."""
dirs = self.query_abs_dirs()
zipfile = self.download_file(url, parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
command = self.query_exe('unzip', return_type='list')
command.extend(['-q', '-o', zipfile])
self.run_command(command, cwd=parent_dir, halt_on_failure=True,
fatal_exit_code=3, output_timeout=1760)
def _extract_test_zip(self, target_unzip_dirs=None):
dirs = self.query_abs_dirs()
unzip = self.query_exe("unzip")
test_install_dir = dirs.get('abs_test_install_dir',
os.path.join(dirs['abs_work_dir'], 'tests'))
self.mkdir_p(test_install_dir)
# adding overwrite flag otherwise subprocess.Popen hangs on waiting for
# input in a hidden pipe whenever this action is run twice without
# clobber
unzip_cmd = [unzip, '-q', '-o', self.test_zip_path]
if target_unzip_dirs:
unzip_cmd.extend(target_unzip_dirs)
# TODO error_list
# unzip return code 11 is 'no matching files were found'
self.run_command(unzip_cmd, cwd=test_install_dir,
halt_on_failure=True, success_codes=[0, 11],
fatal_exit_code=3)
def _read_tree_config(self):
"""Reads an in-tree config file"""
dirs = self.query_abs_dirs()
test_install_dir = dirs.get('abs_test_install_dir',
os.path.join(dirs['abs_work_dir'], 'tests'))
if 'in_tree_config' in self.config:
rel_tree_config_path = self.config['in_tree_config']
tree_config_path = os.path.join(test_install_dir, rel_tree_config_path)
if not os.path.isfile(tree_config_path):
self.fatal("The in-tree configuration file '%s' does not exist!"
"It must be added to '%s'. See bug 1035551 for more details." %
(tree_config_path, os.path.join('gecko', 'testing', rel_tree_config_path)))
try:
self.tree_config.update(parse_config_file(tree_config_path))
except:
msg = "There was a problem parsing the in-tree configuration file '%s'!" % \
os.path.join('gecko', 'testing', rel_tree_config_path)
self.exception(message=msg, level=FATAL)
self.dump_config(file_path=os.path.join(dirs['abs_log_dir'], 'treeconfig.json'),
config=self.tree_config)
self.tree_config.lock()
def structured_output(self, suite_category):
"""Defines whether structured logging is in use in this configuration. This
may need to be replaced with data from a different config at the resolution
of bug 1070041 and related bugs.
"""
return ('structured_suites' in self.tree_config and
suite_category in self.tree_config['structured_suites'])
def get_test_output_parser(self, suite_category, strict=False,
fallback_parser_class=DesktopUnittestOutputParser,
**kwargs):
"""Derive and return an appropriate output parser, either the structured
output parser or a fallback based on the type of logging in use as determined by
configuration.
"""
if not self.structured_output(suite_category):
if fallback_parser_class is DesktopUnittestOutputParser:
return DesktopUnittestOutputParser(suite_category=suite_category, **kwargs)
return fallback_parser_class(**kwargs)
self.info("Structured output parser in use for %s." % suite_category)
return StructuredOutputParser(suite_category=suite_category, strict=strict, **kwargs)
def _download_installer(self):
file_name = None
if self.installer_path:
file_name = self.installer_path
dirs = self.query_abs_dirs()
source = self.download_file(self.installer_url,
file_name=file_name,
parent_dir=dirs['abs_work_dir'],
error_level=FATAL)
self.installer_path = os.path.realpath(source)
self.set_buildbot_property("build_url", self.installer_url, write_to_file=True)
def _download_and_extract_symbols(self):
dirs = self.query_abs_dirs()
self.symbols_url = self.query_symbols_url()
if self.config.get('download_symbols') == 'ondemand':
self.symbols_path = self.symbols_url
return
if not self.symbols_path:
self.symbols_path = os.path.join(dirs['abs_work_dir'], 'symbols')
self.mkdir_p(self.symbols_path)
source = self.download_file(self.symbols_url,
parent_dir=self.symbols_path,
error_level=FATAL)
self.set_buildbot_property("symbols_url", self.symbols_url,
write_to_file=True)
self.run_command(['unzip', '-q', source], cwd=self.symbols_path,
halt_on_failure=True, fatal_exit_code=3)
def download_and_extract(self, target_unzip_dirs=None):
"""
download and extract test zip / download installer
"""
# Swap plain http for https when we're downloading from ftp
# See bug 957502 and friends
from_ = "http://ftp.mozilla.org"
to_ = "https://ftp-ssl.mozilla.org"
for attr in 'test_url', 'symbols_url', 'installer_url':
url = getattr(self, attr)
if url and url.startswith(from_):
new_url = url.replace(from_, to_)
self.info("Replacing url %s -> %s" % (url, new_url))
setattr(self, attr, new_url)
if self.test_url:
self._download_test_zip()
self._extract_test_zip(target_unzip_dirs=target_unzip_dirs)
self._read_tree_config()
self._download_installer()
if self.config.get('download_symbols'):
self._download_and_extract_symbols()
# create_virtualenv is in VirtualenvMixin.
def preflight_install(self):
if not self.installer_path:
if self.config.get('installer_path'):
self.installer_path = self.config['installer_path']
else:
self.fatal("""installer_path isn't set!
You can set this by:
1. specifying --installer-path PATH, or
2. running the download-and-extract action
""")
if not self.is_python_package_installed("mozInstall"):
self.fatal("""Can't call install() without mozinstall!
Did you run with --create-virtualenv? Is mozinstall in virtualenv_modules?""")
def install(self):
""" Dependent on mozinstall """
# install the application
cmd = self.query_exe("mozinstall", default=self.query_python_path("mozinstall"), return_type="list")
if self.config.get('application'):
cmd.extend(['--app', self.config['application']])
# Remove the below when we no longer need to support mozinstall 0.3
self.info("Detecting whether we're running mozinstall >=1.0...")
output = self.get_output_from_command(cmd + ['-h'])
if '--source' in output:
cmd.append('--source')
# End remove
dirs = self.query_abs_dirs()
target_dir = dirs.get('abs_app_install_dir',
os.path.join(dirs['abs_work_dir'],
'application'))
self.mkdir_p(target_dir)
cmd.extend([self.installer_path,
'--destination', target_dir])
# TODO we'll need some error checking here
self.binary_path = self.get_output_from_command(cmd, halt_on_failure=True,
fatal_exit_code=3)
def install_minidump_stackwalk(self):
dirs = self.query_abs_dirs()
if not os.path.isdir(os.path.join(dirs['abs_work_dir'], 'tools', 'breakpad')):
# clone hg.m.o/build/tools
repos = [{
'repo': self.config.get('tools_repo') or self.default_tools_repo,
'vcs': 'hg',
'dest': os.path.join(dirs['abs_work_dir'], "tools")
}]
self.vcs_checkout(**repos[0])
def query_minidump_stackwalk(self):
if self.minidump_stackwalk_path:
return self.minidump_stackwalk_path
dirs = self.query_abs_dirs()
env = self.query_env()
if os.path.isdir(os.path.join(dirs['abs_work_dir'], 'tools', 'breakpad')):
# find binary for platform/architecture
path = os.path.join(dirs['abs_work_dir'], 'tools', 'breakpad', '%s', 'minidump_stackwalk')
pltfrm = platform.platform().lower()
arch = platform.architecture()
if 'linux' in pltfrm:
if '64' in arch:
self.minidump_stackwalk_path = path % 'linux64'
else:
self.minidump_stackwalk_path = path % 'linux'
elif any(s in pltfrm for s in ('mac', 'osx', 'darwin')):
if '64' in arch:
self.minidump_stackwalk_path = path % 'osx64'
else:
self.minidump_stackwalk_path = path % 'osx'
elif 'win' in pltfrm:
self.minidump_stackwalk_path = path % 'win32' + '.exe'
elif os.path.isfile(env.get('MINIDUMP_STACKWALK', '')):
self.minidump_stackwalk_path = env['MINIDUMP_STACKWALK']
elif os.path.isfile(os.path.join(dirs['abs_work_dir'], 'minidump_stackwalk')):
self.minidump_stackwalk_path = os.path.join(dirs['abs_work_dir'], 'minidump_stackwalk')
return self.minidump_stackwalk_path
def _run_cmd_checks(self, suites):
if not suites:
return
dirs = self.query_abs_dirs()
for suite in suites:
# XXX platform.architecture() may give incorrect values for some
# platforms like mac as excutable files may be universal
# files containing multiple architectures
# NOTE 'enabled' is only here while we have unconsolidated configs
if not suite['enabled']:
continue
if suite.get('architectures'):
arch = platform.architecture()[0]
if arch not in suite['architectures']:
continue
cmd = suite['cmd']
name = suite['name']
self.info("Running pre test command %(name)s with '%(cmd)s'"
% {'name': name, 'cmd': ' '.join(cmd)})
if self.buildbot_config: # this cmd is for buildbot
# TODO rather then checking for formatting on every string
# in every preflight enabled cmd: find a better solution!
# maybe I can implement WithProperties in mozharness?
cmd = [x % (self.buildbot_config.get('properties'))
for x in cmd]
self.run_command(cmd,
cwd=dirs['abs_work_dir'],
error_list=BaseErrorList,
halt_on_failure=suite['halt_on_failure'],
fatal_exit_code=suite.get('fatal_exit_code', 3))
def preflight_run_tests(self):
"""preflight commands for all tests"""
# If the in tree config hasn't been loaded by a previous step, load it here.
if len(self.tree_config) == 0:
self._read_tree_config()
c = self.config
if c.get('run_cmd_checks_enabled'):
self._run_cmd_checks(c.get('preflight_run_cmd_suites', []))
elif c.get('preflight_run_cmd_suites'):
self.warning("Proceeding without running prerun test commands."
" These are often OS specific and disabling them may"
" result in spurious test results!")
def postflight_run_tests(self):
"""preflight commands for all tests"""
c = self.config
if c.get('run_cmd_checks_enabled'):
self._run_cmd_checks(c.get('postflight_run_cmd_suites', []))
|
mrrrgn/build-mozharness
|
mozharness/mozilla/testing/testbase.py
|
Python
|
mpl-2.0
| 26,922
|
[
"VisIt"
] |
485014c6ebb07a01f4e9fd14ab1efabcaaca5479b984ac1c1ece4b87e502c538
|
"""
Definitions of the various forms used
"""
from wtforms import validators, widgets, Form, Field, FormField, FieldList, TextField, TextAreaField, BooleanField, DecimalField, IntegerField, SelectField, SelectMultipleField, FileField, PasswordField, StringField, SubmitField, HiddenField, compat
from flask import g
from .models import User
from .util import VALID_UNITS
# TODO refactor with flask_wtf which presets form csrfs (or roll my own I guess)
class BaseForm(Form):
"""Custom Form class that implements csrf by default
render with {{ form.csrf }} in templates
"""
def reset(self):
blankData = MultiDict([('csrf', self.reset_csrf())])
self.process(blankData)
class CSVField(Field):
"""Text field that parses data as comma separated values
"""
widget = widgets.TextInput()
def _value(self):
if self.data:
return ', '.join(self.data)
else:
return ''
def process_formdata(self, valuelist):
if valuelist[0]:
self.data = [x.strip().lower() for x in valuelist[0].split(',') if x.strip()]
else:
self.data = []
class SelectExtended(widgets.Select):
"""
Renders a select field, with disabled options
If `multiple` is True, then the `size` property should be specified on
rendering to make the field useful.
The field must provide an `iter_choices()` method which the widget will
call on rendering; this method must yield tuples of
`(value, label, selected, disabled)`.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
if self.multiple:
kwargs['multiple'] = True
html = ['<select %s>' % widgets.html_params(name=field.name, **kwargs)]
for val, label, selected, disabled in field.iter_choices():
html.append(self.render_option(val, label, selected, disabled))
html.append('</select>')
return widgets.HTMLString(''.join(html))
@classmethod
def render_option(cls, value, label, selected, disabled, **kwargs):
if value is True:
# Handle the special case of a 'True' value.
value = compat.text_type(value)
options = dict(kwargs, value=value)
if selected:
options['selected'] = True
if disabled:
options['disabled'] = True
return widgets.HTMLString('<option %s>%s</option>' % (widgets.html_params(**options), compat.text_type(label)))
class SelectWithPlaceholderField(SelectField):
"""Make the first option disabled selected, thus like a placeholder
<option value="" disabled selected>Select your option</option>
"""
widget = SelectExtended()
def iter_choices(self):
first = True
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data, first)
first = False
class ToggleField(BooleanField):
"""Subclass check field to take advantge of the
bootstrap toggle plugin
"""
def __init__(self, label='', validators=None, **kwargs):
"""on and off are the text for on/off states
onstyle/offstyle are the button style, see bs buttons
"""
render_kw = kwargs.pop('render_kw', {})
render_kw['data-toggle'] = 'toggle'
for field in 'on,onstyle,off,offstyle,size'.split(','):
arg = kwargs.pop(field, None)
if arg:
render_kw['data-{}'.format(field)] = arg
super(ToggleField, self).__init__(label, validators, render_kw=render_kw, **kwargs)
class HiddenIntField(IntegerField):
"""Hidden field and have it coerced into an integer on validate
"""
widget = widgets.HiddenInput()
class EmailField(TextField):
"""Todo
"""
def process_formdata(self, valuelist):
if valuelist[0]:
self.data = valuelist[0].strip()
else:
self.data = ""
def pairs(l):
return [(x,x) for x in l]
class DrinksForm(BaseForm):
# display options
prices = BooleanField("Prices", description="Display prices for drinks based on stock")
prep_line = BooleanField("Preparation", description="Display a line showing glass, ice, and prep")
stats = BooleanField("Stats", description="Print out a detailed statistics block for the selected recipes")
examples = BooleanField("Examples", description="Show specific examples of a recipe based on the ingredient stock")
all_ingredients = BooleanField("All Ingredients", description="Show every ingredient instead of just the main liquors with each example")
convert = TextField("Convert", description="Convert recipes to a different primary unit", default=None, validators=[validators.AnyOf(VALID_UNITS), validators.Optional()])
markup = DecimalField("Margin", description="Drink markup: price = ceil((base_cost+1)*markup)", default=1.1) # TODO config management (current_bar)
info = BooleanField("Info", description="Show the info line for recipes")
origin = BooleanField("Origin", description="Check origin and mark drinks as Schubar originals")
variants = BooleanField("Variants", description="Show variants for drinks")
# filtering options
search = TextField("", description="")
all_ = BooleanField("Allow all ingredients", description="Include all recipes, regardless of if they can be made from the loaded barstock")
include = CSVField("Include Ingredients", description="Recipes that contain any/all of these comma separated ingredient(s)")
exclude = CSVField("Exclude Ingredients", description="Recipes that don't contain any/all of these comma separated ingredient(s)")
include_use_or = ToggleField("<br>", on="any", off="all", onstyle="secondary", offstyle="secondary")
exclude_use_or = ToggleField("<br>", default='y', on="any", off="all", onstyle="secondary", offstyle="secondary")
tag = TextField("Tag", description="Filter by tag")
style = SelectField("Style", description="", choices=pairs(['','All Day Cocktail','Before Dinner Cocktail','After Dinner Cocktail','Longdrink', 'Hot Drink', 'Sparkling Cocktail', 'Wine Cocktail']))
glass = SelectField("Glass", description="", choices=pairs(['','cocktail','martini','collins','rocks','highball','flute','shot','shooter','mug']))
prep = SelectField("Prep", description="", choices=pairs(['','shake', 'stir', 'build', 'throw']))
ice = SelectField("Ice", description="", choices=pairs(['','cubed','crushed','neat']))
# sorting options
# abv, cost, alcohol content
sorting = SelectWithPlaceholderField("", choices=[
('None', "Sort Results..."),
('abv', "ABV (Low to High)"),
('abvX', "ABV (High to Low)"),
('cost', "Cost ($ to $$$)"),
('costX', "Cost ($$$ to $)"),
('std_drinks', "Total Alcohol (Low to High)"),
('std_drinksX', "Total Alcohol (High to Low)"),
])
# pdf options
pdf_filename = TextField("Filename to use", description="Basename of the pdf and tex files generated", default="web_drinks_file")
ncols = IntegerField("Number of columns", default=2, description="Number of columns to use for the menu")
liquor_list = BooleanField("Liquor list", description="Show list of the available ingredients")
liquor_list_own_page = BooleanField("Liquor list (own page)", description="Show list of the available ingredients on a separate page")
debug = BooleanField("LaTeX debug output", description="Add debugging output to the pdf")
align = BooleanField("Align items", description="Align drink names across columns")
title = TextField("Title", description="Title to use")
tagline = TextField("Tagline", description="Tagline to use below the title")
class RecipeIngredientForm(BaseForm):
ingredient = TextField("Ingredient", validators=[validators.InputRequired()])
quantity = DecimalField("Quantity", validators=[validators.InputRequired()])
is_optional = BooleanField("Optional")
class RecipeForm(BaseForm):
name = TextField("Name", description="The recipe name", validators=[validators.InputRequired()])
info = TextField("Info", description="Additional information about the recipe")
ingredients = FieldList(FormField(RecipeIngredientForm), min_entries=1, validators=[validators.InputRequired()])
unit = SelectField("Unit", choices=pairs(VALID_UNITS), validators=[validators.InputRequired()])
#glass =
#unit =
#prep =
#ice =
#garnish =
class RecipeListSelector(BaseForm):
recipes = SelectMultipleField("Available Recipe Lists", description="Select recipe lists to be used for generating a menu",
choices=[("recipes_schubar.json", "Core Recipes (from @Schubar)"),
("IBA_unforgettables.json", "IBA Unforgettables"),
("IBA_contemporary_classics.json", "IBA Contemporary Classics"),
("IBA_new_era_drinks.json", "IBA New Era Drinks")])
class UploadBarstockForm(BaseForm):
upload_csv = FileField("Choose file", validators=[validators.regexp(r'^[^/\\]\.csv$')])
replace_existing = BooleanField("Replace existing stock?", default=False)
class BarstockForm(BaseForm):
categories = 'Spirit,Liqueur,Vermouth,Bitters,Syrup,Dry,Juice,Mixer,Wine,Ice'.split(',')
# TODO maybe as an "other" then fill...
types = 'Brandy,Dry Gin,Genever,Amber Rum,White Rum,Dark Rum,Rye Whiskey,Vodka,Orange Liqueur,Dry Vermouth,Sweet Vermouth,Aromatic Bitters,Orange Bitters,Fruit Bitters,Bourbon Whiskey,Tennessee Whiskey,Irish Whiskey,Scotch Whisky,Silver Tequila,Gold Tequila,Mezcal,Aquavit,Amaretto,Blackberry Liqueur,Raspberry Liqueur,Campari,Amaro,Cynar,Aprol,Creme de Cacao,Creme de Menthe,Grenadine,Simple Syrup,Rich Simple Syrup,Honey Syrup,Orgeat,Maple Syrup,Sugar'.split(',')
def types_list(self):
return ', '.join(types)
category = SelectField("Category", validators=[validators.InputRequired()], choices=pairs(categories))
type_ = TextField("Ingredient", validators=[validators.InputRequired()])
kind = TextField("Kind", validators=[validators.InputRequired()])
abv = DecimalField("ABV", description='Alcohol by Volume (percentage), i.e. enter "20" if the ABV is 20%', validators=[validators.InputRequired(), validators.NumberRange(min=0, max=100)])
unit = SelectField("Unit", choices=pairs([VALID_UNITS[1],VALID_UNITS[0]]+VALID_UNITS[2:]), validators=[validators.InputRequired()])
size = DecimalField("Size", description="Volume in selected unit", validators=[validators.InputRequired(), validators.NumberRange(min=0, max=20000)])
price = DecimalField("Price ($)", description="$ paid or ~USD value for Size", validators=[validators.InputRequired(), validators.NumberRange(min=0, max=9999999999)])
class OrderForm(BaseForm):
notes = TextField("Notes")
class OrderFormAnon(OrderForm):
name = TextField("Your Name", validators=[validators.InputRequired()])
email = EmailField("Confirmation Email", validators=[validators.Email("Invalid email address"), validators.InputRequired()])
class LoginForm(BaseForm):
#name = TextField("Your Name", validators=[validators.InputRequired()])
email = EmailField("Email", validators=[validators.InputRequired()])
password = PasswordField("Password", validators=[validators.InputRequired()])
class EditUserForm(BaseForm):
first_name = StringField('First Name')
last_name = StringField('Last Name')
nickname = StringField('Nickname')
venmo_id = StringField('Venmo ID')
submit = SubmitField('Save Profile')
class CreateBarForm(BaseForm):
cname = TextField("Bar Unique Name", description="Unique name for the bar", validators=[validators.InputRequired()])
name = TextField("Bar Display Name", description="Display name for the bar, leave blank to use unique name")
tagline = TextField("Tagline", description="Tag line or slogan for the bar")
create_bar = SubmitField("Create Bar", render_kw={"class": "btn btn-success"})
class EditBarForm(BaseForm):
def __init__(self, *args, **kwargs):
super(EditBarForm, self).__init__(*args, **kwargs)
choices = [('', '')]+[(user.email, user.get_name_with_email()) for user in User.query.all()]
self.bartender.choices = choices
name = TextField("Bar Name", description="Display name for the bar")
tagline = TextField("Tagline", description="Tag line or slogan for the bar")
ONTEXT = "On"
OFFTEXT = "Off"
ONSTYLE = "secondary"
OFFSTYLE = None
# TODO use just "bartenders" for the current bar after there's a real syetem
# for bars to pick bartenders - maybe off the user page
# user table on dashboard could generate links to edit the user page, user has a selectmultiple for roles
status = ToggleField("Bar Status", description="Open or close the bar to orders",
on="Open", off="Closed", onstyle="success", offstyle="danger")
is_public = ToggleField("Public", description="Make the bar available to browse",
on="Visible", off="Hidden", onstyle="success", offstyle="danger")
bartender = SelectField("Assign Bartender On Duty", description="Assign a bartender to receive orders", choices=[])
prices = ToggleField("Prices", description="Show prices",
on="Included", off="Free", onstyle="success", offstyle="secondary")
prep_line = ToggleField("Preparation", description="Show preparation instructions",
on=ONTEXT, off=OFFTEXT, onstyle=ONSTYLE, offstyle=OFFSTYLE)
examples = ToggleField("Examples", description="Show specific examples for each recipe",
on=ONTEXT, off=OFFTEXT, onstyle=ONSTYLE, offstyle=OFFSTYLE)
convert = SelectField("Convert to", choices=[('', 'None')]+pairs(VALID_UNITS))
markup = DecimalField("Margin", description="Recommended Tip = ceil((material_cost+1)*margin)")
info = ToggleField("Info", description="Adds info tidbit to recipes",
on=ONTEXT, off=OFFTEXT, onstyle=ONSTYLE, offstyle=OFFSTYLE)
origin = ToggleField("Origin", description="Denote drinks originating at Schubar",
on=ONTEXT, off=OFFTEXT, onstyle=ONSTYLE, offstyle=OFFSTYLE)
variants = ToggleField("Variants", description="List variants for drinks",
on=ONTEXT, off=OFFTEXT, onstyle=ONSTYLE, offstyle=OFFSTYLE)
summarize = ToggleField("Summarize", description="List ingredient names instead of full recipe",
on=ONTEXT, off=OFFTEXT, onstyle=ONSTYLE, offstyle=OFFSTYLE)
edit_bar = SubmitField("Commit Changes", render_kw={"class": "btn btn-primary"})
class SetBarOwnerForm(BaseForm):
def __init__(self, *args, **kwargs):
super(SetBarOwnerForm, self).__init__(*args, **kwargs)
choices = [('', '')]+[(user.email, user.get_name_with_email()) for user in User.query.all()]
self.owner.choices = choices
owner = SelectField("Assign Bar Owner", description="Assign an owner who can manage the bar's stock and settings", choices=[])
submit = SubmitField("Commit Changes", render_kw={"class": "btn btn-primary"})
|
twschum/mix-mind
|
mixmind/forms.py
|
Python
|
apache-2.0
| 15,031
|
[
"Amber"
] |
5281bea9d94aab4127edd3a38967f751f5370d32b7445d4b7ad656afa0175eb5
|
from ovito import *
from ovito.io import *
from ovito.modifiers import *
import numpy
node = import_file("../../files/LAMMPS/animation.dump.gz")
modifier = ColorCodingModifier()
node.modifiers.append(modifier)
print("Parameter defaults:")
print(" start_value: {}".format(modifier.start_value))
print(" end_value: {}".format(modifier.end_value))
print(" gradient: {}".format(modifier.gradient))
print(" only_selected: {}".format(modifier.only_selected))
print(" assign_to: {}".format(modifier.assign_to))
print(" particle_property: {}".format(modifier.particle_property))
print(" bond_property: {}".format(modifier.bond_property))
modifier.gradient = ColorCodingModifier.Rainbow()
modifier.gradient = ColorCodingModifier.Jet()
modifier.gradient = ColorCodingModifier.Hot()
modifier.gradient = ColorCodingModifier.Grayscale()
modifier.gradient = ColorCodingModifier.BlueWhiteRed()
modifier.gradient = ColorCodingModifier.Viridis()
modifier.gradient = ColorCodingModifier.Magma()
modifier.gradient = ColorCodingModifier.Custom("../../../doc/manual/images/modifiers/color_coding_custom_map.png")
print(node.compute().particle_properties.color.array)
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/color_coding_modifier.py
|
Python
|
gpl-3.0
| 1,160
|
[
"LAMMPS",
"OVITO"
] |
9e480c336b3a0e31e538be127871cdd3ea52efa8c389bd055126ec8488d40b32
|
import os
from mpp.models import SQLTestCase
from mpp.lib.config import GPDBConfig
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.gpfilespace import HAWQGpfilespace
def setUpFilespaceForCTAS(isForHawq):
config = GPDBConfig()
if isForHawq:
filespace = HAWQGpfilespace()
else:
filespace = Gpfilespace()
if config.is_not_insync_segments():
filespace.create_filespace('tincrepo_qp_ddl_ctas')
class TestCTASWithOrcaInGPDB(SQLTestCase):
'''
testing create table as with Orca. optimizer=on is added in gucs
because the _setup files ignore the optimizer_mode parameter
@optimizer_mode on
@gucs optimizer_enable_ctas=on; optimizer_log=on; gp_create_table_random_default_distribution=on; optimizer=on
@product_version gpdb: [4.3.3-]
'''
sql_dir = 'sql/'
ans_dir = 'expected/'
out_dir = 'output_orca/'
@classmethod
def setUpClass(cls):
super(TestCTASWithOrcaInGPDB, cls).setUpClass()
setUpFilespaceForCTAS(False)
@classmethod
def get_substitutions(self):
MYD = os.path.dirname(os.path.realpath(__file__))
substitutions = { '%MYD%' : MYD, '%USED_OPT%' : 'orca' }
return substitutions
class TestCTASWithPlannerInGPDB(SQLTestCase):
'''
testing create table as with Planner. optimizer=off is added in gucs
because the _setup files ignore the optimizer_mode parameter
@optimizer_mode off
@gucs gp_create_table_random_default_distribution=on; optimizer=off
@product_version gpdb: [4.3.3-]
'''
sql_dir = 'sql/'
ans_dir = 'expected/'
out_dir = 'output_planner/'
@classmethod
def setUpClass(cls):
super(TestCTASWithPlannerInGPDB, cls).setUpClass()
setUpFilespaceForCTAS(False)
@classmethod
def get_substitutions(self):
MYD = os.path.dirname(os.path.realpath(__file__))
substitutions = { '%MYD%' : MYD, '%USED_OPT%' : 'planner' }
return substitutions
|
cjcjameson/gpdb
|
src/test/tinc/tincrepo/ddl/create_table_as/test_ctas.py
|
Python
|
apache-2.0
| 2,141
|
[
"ORCA"
] |
06a848f26b0b7bb58814f43881955922c8c737f12b7ef0a85eeed1fe974bc395
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import numbers
import os
def is_integral(x):
"""Any integer value"""
try:
return isinstance(int(x), numbers.Integral) and \
not isinstance(x, bool) and int(x) > 0
except ValueError:
return False
class Nek5000(Package):
"""A fast and scalable high-order solver for computational fluid
dynamics"""
homepage = "https://nek5000.mcs.anl.gov/"
url = "https://github.com/Nek5000/Nek5000"
tags = ['cfd', 'flow', 'hpc', 'solver', 'navier-stokes',
'spectral-elements', 'fluid']
version('17.0', '6a13bfad2ce023897010dd88f54a0a87',
url="https://github.com/Nek5000/Nek5000/releases/download/"
"v17.0/Nek5000-v17.0.tar.gz")
version('develop', git='https://github.com/Nek5000/Nek5000.git',
branch='master')
# MPI, Profiling and Visit variants
variant('mpi', default=True, description='Build with MPI.')
variant('profiling', default=True, description='Build with profiling data.')
variant('visit', default=False, description='Build with Visit.')
# TODO: add a variant 'blas' or 'external-blas' to enable the usage of
# Spack installed/configured blas.
# Variant for MAXNEL, we need to read this from user
variant(
'MAXNEL',
default=150000,
description='Maximum number of elements for Nek5000 tools.',
values=is_integral
)
# Variants for Nek tools
variant('genbox', default=True, description='Build genbox tool.')
variant('int_tp', default=True, description='Build int_tp tool.')
variant('n2to3', default=True, description='Build n2to3 tool.')
variant('postnek', default=True, description='Build postnek tool.')
variant('reatore2', default=True, description='Build reatore2 tool.')
variant('genmap', default=True, description='Build genmap tool.')
variant('nekmerge', default=True, description='Build nekmerge tool.')
variant('prenek', default=True, description='Build prenek tool.')
# Dependencies
depends_on('mpi', when="+mpi")
depends_on('libx11', when="+prenek")
depends_on('libx11', when="+postnek")
# libxt is needed for X11/Intrinsic.h but not for linking
depends_on('libxt', when="+prenek")
depends_on('libxt', when="+postnek")
depends_on('visit', when="+visit")
@run_before('install')
def fortran_check(self):
if not self.compiler.f77:
msg = 'Cannot build Nek5000 without a Fortran 77 compiler.'
raise RuntimeError(msg)
@run_after('install')
def test_install(self):
currentDir = os.getcwd()
eddyDir = 'short_tests/eddy'
os.chdir(eddyDir)
os.system(join_path(self.prefix.bin, 'makenek') + ' eddy_uv')
if not os.path.isfile(join_path(os.getcwd(), 'nek5000')):
msg = 'Cannot build example: short_tests/eddy.'
raise RuntimeError(msg)
os.chdir(currentDir)
def install(self, spec, prefix):
toolsDir = 'tools'
binDir = 'bin'
# Do not use the Spack compiler wrappers.
# Use directly the compilers:
FC = self.compiler.f77
CC = self.compiler.cc
fflags = spec.compiler_flags['fflags']
cflags = spec.compiler_flags['cflags']
if ('+prenek' in spec) or ('+postnek' in spec):
libx11_h = find_headers('Xlib', spec['libx11'].prefix.include,
recursive=True)
if not libx11_h:
raise RuntimeError('Xlib.h not found in %s' %
spec['libx11'].prefix.include)
cflags += ['-I%s' % os.path.dirname(libx11_h.directories[0])]
libxt_h = find_headers('Intrinsic', spec['libxt'].prefix.include,
recursive=True)
if not libxt_h:
raise RuntimeError('X11/Intrinsic.h not found in %s' %
spec['libxt'].prefix.include)
cflags += ['-I%s' % os.path.dirname(libxt_h.directories[0])]
if self.compiler.name in ['xl', 'xl_r']:
# Use '-qextname' to add underscores.
# Use '-WF,-qnotrigraph' to fix an error about a string: '... ??'
fflags += ['-qextname', '-WF,-qnotrigraph']
fflags = ' '.join(fflags)
cflags = ' '.join(cflags)
# Build the tools, maketools copy them to Nek5000/bin by default.
# We will then install Nek5000/bin under prefix after that.
with working_dir(toolsDir):
# Update the maketools script to use correct compilers
filter_file(r'^#FC\s*=.*', 'FC="{0}"'.format(FC), 'maketools')
filter_file(r'^#CC\s*=.*', 'CC="{0}"'.format(CC), 'maketools')
if fflags:
filter_file(r'^#FFLAGS=.*', 'FFLAGS="{0}"'.format(fflags),
'maketools')
if cflags:
filter_file(r'^#CFLAGS=.*', 'CFLAGS="{0}"'.format(cflags),
'maketools')
if self.compiler.name in ['xl', 'xl_r']:
# Patch 'maketools' to use '-qextname' when checking for
# underscore becasue 'xl'/'xl_r' use this option to enable the
# addition of the underscore.
filter_file(r'^\$FC -c ', '$FC -qextname -c ', 'maketools')
libx11_lib = find_libraries('libX11', spec['libx11'].prefix.lib,
shared=True, recursive=True)
if not libx11_lib:
libx11_lib = \
find_libraries('libX11', spec['libx11'].prefix.lib64,
shared=True, recursive=True)
if not libx11_lib:
raise RuntimeError('libX11 not found in %s/{lib,lib64}' %
spec['libx11'].prefix)
# There is no other way to set the X11 library path except brute
# force:
filter_file(r'-L\$\(X\)', libx11_lib.search_flags,
join_path('prenek', 'makefile'))
filter_file(r'-L\$\(X\)', libx11_lib.search_flags,
join_path('postnek', 'makefile'))
if self.compiler.name in ['xl', 'xl_r']:
# Use '-qextname' when compiling mxm.f
filter_file('\$\(OLAGS\)', '-qextname $(OLAGS)',
join_path('postnek', 'makefile'))
# Define 'rename_' function that calls 'rename'
with open(join_path('postnek', 'xdriver.c'), 'a') as xdriver:
xdriver.write('\nvoid rename_(char *from, char *to)\n{\n'
' rename(from, to);\n}\n')
maxnel = self.spec.variants['MAXNEL'].value
filter_file(r'^#MAXNEL\s*=.*', 'MAXNEL=' + maxnel, 'maketools')
makeTools = Executable('./maketools')
# Build the tools
if '+genbox' in spec:
makeTools('genbox')
# "ERROR: int_tp does not exist!"
# if '+int_tp' in spec:
# makeTools('int_tp')
if '+n2to3' in spec:
makeTools('n2to3')
if '+postnek' in spec:
makeTools('postnek')
if '+reatore2' in spec:
makeTools('reatore2')
if '+genmap' in spec:
makeTools('genmap')
if '+nekmerge' in spec:
makeTools('nekmerge')
if '+prenek' in spec:
makeTools('prenek')
with working_dir(binDir):
if '+mpi' in spec:
FC = spec['mpi'].mpif77
CC = spec['mpi'].mpicc
else:
filter_file(r'^#MPI=0', 'MPI=0', 'makenek')
if '+profiling' not in spec:
filter_file(r'^#PROFILING=0', 'PROFILING=0', 'makenek')
if '+visit' in spec:
filter_file(r'^#VISIT=1', 'VISIT=1', 'makenek')
filter_file(r'^#VISIT_INSTALL=.*', 'VISIT_INSTALL=\"' +
spec['visit'].prefix.bin + '\"', 'makenek')
# Update the makenek to use correct compilers and
# Nek5000 source.
filter_file(r'^#FC\s*=.*', 'FC="{0}"'.format(FC), 'makenek')
filter_file(r'^#CC\s*=.*', 'CC="{0}"'.format(CC), 'makenek')
filter_file(r'^#SOURCE_ROOT\s*=\"\$H.*', 'SOURCE_ROOT=\"' +
prefix.bin.Nek5000 + '\"', 'makenek')
if fflags:
filter_file(r'^#FFLAGS=.*', 'FFLAGS="{0}"'.format(fflags),
'makenek')
if cflags:
filter_file(r'^#CFLAGS=.*', 'CFLAGS="{0}"'.format(cflags),
'makenek')
with working_dir('core'):
if self.compiler.name in ['xl', 'xl_r']:
# Patch 'core/makenek.inc' and 'makefile.template' to use
# '-qextname' when checking for underscore becasue 'xl'/'xl_r'
# use this option to enable the addition of the underscore.
filter_file(r'^\$FCcomp -c ', '$FCcomp -qextname -c ',
'makenek.inc')
filter_file(r'\$\(FC\) -c \$\(L0\)',
'$(FC) -c -qextname $(L0)', 'makefile.template')
# Install Nek5000/bin in prefix/bin
install_tree(binDir, prefix.bin)
# Copy Nek5000 source to prefix/bin
install_tree('../Nek5000', prefix.bin.Nek5000)
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/nek5000/package.py
|
Python
|
lgpl-2.1
| 10,813
|
[
"VisIt"
] |
26c8aa1c4ada66b124bee5c52cdfd13eec14a38f0a35525da580dea0c886fdf8
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This module analyzes and estimates the distribution of averaged anomaly scores
from a given model. Given a new anomaly score ``s``, estimates
``P(score >= s)``.
The number ``P(score >= s)`` represents the likelihood of the current state of
predictability. For example, a likelihood of 0.01 or 1% means we see this much
predictability about one out of every 100 records. The number is not as unusual
as it seems. For records that arrive every minute, this means once every hour
and 40 minutes. A likelihood of 0.0001 or 0.01% means we see it once out of
10,000 records, or about once every 7 days.
USAGE
+++++
There are two ways to use the code: using the
:class:`.anomaly_likelihood.AnomalyLikelihood` helper class or using the raw
individual functions :func:`~.anomaly_likelihood.estimateAnomalyLikelihoods` and
:func:`~.anomaly_likelihood.updateAnomalyLikelihoods`.
Low-Level Function Usage
++++++++++++++++++++++++
There are two primary interface routines.
- :func:`~.anomaly_likelihood.estimateAnomalyLikelihoods`: batch routine, called
initially and once in a while
- :func:`~.anomaly_likelihood.updateAnomalyLikelihoods`: online routine, called
for every new data point
Initially:
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
estimateAnomalyLikelihoods(metric_data)
Whenever you get new data:
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
updateAnomalyLikelihoods(data2, estimatorParams)
And again (make sure you use the new estimatorParams returned in the above call
to updateAnomalyLikelihoods!).
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
updateAnomalyLikelihoods(data3, estimatorParams)
Every once in a while update estimator with a lot of recent data.
.. code-block:: python
likelihoods, avgRecordList, estimatorParams = \\
estimateAnomalyLikelihoods(lots_of_metric_data)
PARAMS
++++++
The parameters dict returned by the above functions has the following
structure. Note: the client does not need to know the details of this.
::
{
"distribution": # describes the distribution
{
"name": STRING, # name of the distribution, such as 'normal'
"mean": SCALAR, # mean of the distribution
"variance": SCALAR, # variance of the distribution
# There may also be some keys that are specific to the distribution
},
"historicalLikelihoods": [] # Contains the last windowSize likelihood
# values returned
"movingAverage": # stuff needed to compute a rolling average
# of the anomaly scores
{
"windowSize": SCALAR, # the size of the averaging window
"historicalValues": [], # list with the last windowSize anomaly
# scores
"total": SCALAR, # the total of the values in historicalValues
},
}
"""
import collections
import math
import numbers
import numpy
from nupic.serializable import Serializable
from nupic.utils import MovingAverage
class AnomalyLikelihood(Serializable):
"""
Helper class for running anomaly likelihood computation. To use it simply
create an instance and then feed it successive anomaly scores:
.. code-block:: python
anomalyLikelihood = AnomalyLikelihood()
while still_have_data:
# Get anomaly score from model
# Compute probability that an anomaly has ocurred
anomalyProbability = anomalyLikelihood.anomalyProbability(
value, anomalyScore, timestamp)
"""
def __init__(self,
claLearningPeriod=None,
learningPeriod=288,
estimationSamples=100,
historicWindowSize=8640,
reestimationPeriod=100):
"""
NOTE: Anomaly likelihood scores are reported at a flat 0.5 for
learningPeriod + estimationSamples iterations.
claLearningPeriod and learningPeriod are specifying the same variable,
although claLearningPeriod is a deprecated name for it.
:param learningPeriod: (claLearningPeriod: deprecated) - (int) the number of
iterations required for the algorithm to learn the basic patterns in the
dataset and for the anomaly score to 'settle down'. The default is based
on empirical observations but in reality this could be larger for more
complex domains. The downside if this is too large is that real anomalies
might get ignored and not flagged.
:param estimationSamples: (int) the number of reasonable anomaly scores
required for the initial estimate of the Gaussian. The default of 100
records is reasonable - we just need sufficient samples to get a decent
estimate for the Gaussian. It's unlikely you will need to tune this since
the Gaussian is re-estimated every 10 iterations by default.
:param historicWindowSize: (int) size of sliding window of historical
data points to maintain for periodic reestimation of the Gaussian. Note:
the default of 8640 is based on a month's worth of history at 5-minute
intervals.
:param reestimationPeriod: (int) how often we re-estimate the Gaussian
distribution. The ideal is to re-estimate every iteration but this is a
performance hit. In general the system is not very sensitive to this
number as long as it is small relative to the total number of records
processed.
"""
if historicWindowSize < estimationSamples:
raise ValueError("estimationSamples exceeds historicWindowSize")
self._iteration = 0
self._historicalScores = collections.deque(maxlen=historicWindowSize)
self._distribution = None
if claLearningPeriod != None:
print("claLearningPeriod is deprecated, use learningPeriod instead.")
self._learningPeriod = claLearningPeriod
else:
self._learningPeriod = learningPeriod
self._probationaryPeriod = self._learningPeriod + estimationSamples
self._reestimationPeriod = reestimationPeriod
def __eq__(self, o):
# pylint: disable=W0212
return (isinstance(o, AnomalyLikelihood) and
self._iteration == o._iteration and
self._historicalScores == o._historicalScores and
self._distribution == o._distribution and
self._probationaryPeriod == o._probationaryPeriod and
self._learningPeriod == o._learningPeriod and
self._reestimationPeriod == o._reestimationPeriod)
# pylint: enable=W0212
def __str__(self):
return ("AnomalyLikelihood: %s %s %s %s %s %s" % (
self._iteration,
self._historicalScores,
self._distribution,
self._probationaryPeriod,
self._learningPeriod,
self._reestimationPeriod) )
@staticmethod
def computeLogLikelihood(likelihood):
"""
Compute a log scale representation of the likelihood value. Since the
likelihood computations return low probabilities that often go into four 9's
or five 9's, a log value is more useful for visualization, thresholding,
etc.
"""
# The log formula is:
# Math.log(1.0000000001 - likelihood) / Math.log(1.0 - 0.9999999999)
return math.log(1.0000000001 - likelihood) / -23.02585084720009
@staticmethod
def _calcSkipRecords(numIngested, windowSize, learningPeriod):
"""Return the value of skipRecords for passing to estimateAnomalyLikelihoods
If `windowSize` is very large (bigger than the amount of data) then this
could just return `learningPeriod`. But when some values have fallen out of
the historical sliding window of anomaly records, then we have to take those
into account as well so we return the `learningPeriod` minus the number
shifted out.
:param numIngested - (int) number of data points that have been added to the
sliding window of historical data points.
:param windowSize - (int) size of sliding window of historical data points.
:param learningPeriod - (int) the number of iterations required for the
algorithm to learn the basic patterns in the dataset and for the anomaly
score to 'settle down'.
"""
numShiftedOut = max(0, numIngested - windowSize)
return min(numIngested, max(0, learningPeriod - numShiftedOut))
@classmethod
def read(cls, proto):
""" capnp deserialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.AnomalyLikelihoodRegion.capnp
:returns: (Object) the deserialized AnomalyLikelihood object
"""
# pylint: disable=W0212
anomalyLikelihood = object.__new__(cls)
anomalyLikelihood._iteration = proto.iteration
anomalyLikelihood._historicalScores = collections.deque(
maxlen=proto.historicWindowSize)
for i, score in enumerate(proto.historicalScores):
anomalyLikelihood._historicalScores.append((i, score.value,
score.anomalyScore))
if proto.distribution.name: # is "" when there is no distribution.
anomalyLikelihood._distribution = dict()
anomalyLikelihood._distribution['distribution'] = dict()
anomalyLikelihood._distribution['distribution']["name"] = proto.distribution.name
anomalyLikelihood._distribution['distribution']["mean"] = proto.distribution.mean
anomalyLikelihood._distribution['distribution']["variance"] = proto.distribution.variance
anomalyLikelihood._distribution['distribution']["stdev"] = proto.distribution.stdev
anomalyLikelihood._distribution["movingAverage"] = {}
anomalyLikelihood._distribution["movingAverage"]["windowSize"] = proto.distribution.movingAverage.windowSize
anomalyLikelihood._distribution["movingAverage"]["historicalValues"] = []
for value in proto.distribution.movingAverage.historicalValues:
anomalyLikelihood._distribution["movingAverage"]["historicalValues"].append(value)
anomalyLikelihood._distribution["movingAverage"]["total"] = proto.distribution.movingAverage.total
anomalyLikelihood._distribution["historicalLikelihoods"] = []
for likelihood in proto.distribution.historicalLikelihoods:
anomalyLikelihood._distribution["historicalLikelihoods"].append(likelihood)
else:
anomalyLikelihood._distribution = None
anomalyLikelihood._probationaryPeriod = proto.probationaryPeriod
anomalyLikelihood._learningPeriod = proto.learningPeriod
anomalyLikelihood._reestimationPeriod = proto.reestimationPeriod
# pylint: enable=W0212
return anomalyLikelihood
def write(self, proto):
""" capnp serialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.AnomalyLikelihoodRegion.capnp
"""
proto.iteration = self._iteration
pHistScores = proto.init('historicalScores', len(self._historicalScores))
for i, score in enumerate(list(self._historicalScores)):
_, value, anomalyScore = score
record = pHistScores[i]
record.value = float(value)
record.anomalyScore = float(anomalyScore)
proto.distribution.name = self._distribution["distribution"]["name"]
proto.distribution.mean = float(self._distribution["distribution"]["mean"])
proto.distribution.variance = float(self._distribution["distribution"]["variance"])
proto.distribution.stdev = float(self._distribution["distribution"]["stdev"])
proto.distribution.movingAverage.windowSize = float(self._distribution["movingAverage"]["windowSize"])
historicalValues = self._distribution["movingAverage"]["historicalValues"]
pHistValues = proto.distribution.movingAverage.init(
"historicalValues", len(historicalValues))
for i, value in enumerate(historicalValues):
pHistValues[i] = float(value)
#proto.distribution.movingAverage.historicalValues = self._distribution["movingAverage"]["historicalValues"]
proto.distribution.movingAverage.total = float(self._distribution["movingAverage"]["total"])
historicalLikelihoods = self._distribution["historicalLikelihoods"]
pHistLikelihoods = proto.distribution.init("historicalLikelihoods",
len(historicalLikelihoods))
for i, likelihood in enumerate(historicalLikelihoods):
pHistLikelihoods[i] = float(likelihood)
proto.probationaryPeriod = self._probationaryPeriod
proto.learningPeriod = self._learningPeriod
proto.reestimationPeriod = self._reestimationPeriod
proto.historicWindowSize = self._historicalScores.maxlen
def anomalyProbability(self, value, anomalyScore, timestamp=None):
"""
Compute the probability that the current value plus anomaly score represents
an anomaly given the historical distribution of anomaly scores. The closer
the number is to 1, the higher the chance it is an anomaly.
:param value: the current metric ("raw") input value, eg. "orange", or
'21.2' (deg. Celsius), ...
:param anomalyScore: the current anomaly score
:param timestamp: [optional] timestamp of the ocurrence,
default (None) results in using iteration step.
:returns: the anomalyLikelihood for this record.
"""
if timestamp is None:
timestamp = self._iteration
dataPoint = (timestamp, value, anomalyScore)
# We ignore the first probationaryPeriod data points
if self._iteration < self._probationaryPeriod:
likelihood = 0.5
else:
# On a rolling basis we re-estimate the distribution
if ( (self._distribution is None) or
(self._iteration % self._reestimationPeriod == 0) ):
numSkipRecords = self._calcSkipRecords(
numIngested=self._iteration,
windowSize=self._historicalScores.maxlen,
learningPeriod=self._learningPeriod)
_, _, self._distribution = estimateAnomalyLikelihoods(
self._historicalScores,
skipRecords=numSkipRecords)
likelihoods, _, self._distribution = updateAnomalyLikelihoods(
[dataPoint],
self._distribution)
likelihood = 1.0 - likelihoods[0]
# Before we exit update historical scores and iteration
self._historicalScores.append(dataPoint)
self._iteration += 1
return likelihood
def estimateAnomalyLikelihoods(anomalyScores,
averagingWindow=10,
skipRecords=0,
verbosity=0):
"""
Given a series of anomaly scores, compute the likelihood for each score. This
function should be called once on a bunch of historical anomaly scores for an
initial estimate of the distribution. It should be called again every so often
(say every 50 records) to update the estimate.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
For best results, the list should be between 1000
and 10,000 records
:param averagingWindow: integer number of records to average over
:param skipRecords: integer specifying number of records to skip when
estimating distributions. If skip records are >=
len(anomalyScores), a very broad distribution is returned
that makes everything pretty likely.
:param verbosity: integer controlling extent of printouts for debugging
0 = none
1 = occasional information
2 = print every record
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
a small JSON dict that contains the state of the estimator
"""
if verbosity > 1:
print("In estimateAnomalyLikelihoods.")
print("Number of anomaly scores:", len(anomalyScores))
print("Skip records=", skipRecords)
print("First 20:", anomalyScores[0:min(20, len(anomalyScores))])
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
# Compute averaged anomaly scores
aggRecordList, historicalValues, total = _anomalyScoreMovingAverage(
anomalyScores,
windowSize = averagingWindow,
verbosity = verbosity)
s = [r[2] for r in aggRecordList]
dataValues = numpy.array(s)
# Estimate the distribution of anomaly scores based on aggregated records
if len(aggRecordList) <= skipRecords:
distributionParams = nullDistribution(verbosity = verbosity)
else:
distributionParams = estimateNormal(dataValues[skipRecords:])
# HACK ALERT! The HTMPredictionModel currently does not handle constant
# metric values very well (time of day encoder changes sometimes lead to
# unstable SDR's even though the metric is constant). Until this is
# resolved, we explicitly detect and handle completely flat metric values by
# reporting them as not anomalous.
s = [r[1] for r in aggRecordList]
# Only do this if the values are numeric
if all([isinstance(r[1], numbers.Number) for r in aggRecordList]):
metricValues = numpy.array(s)
metricDistribution = estimateNormal(metricValues[skipRecords:],
performLowerBoundCheck=False)
if metricDistribution["variance"] < 1.5e-5:
distributionParams = nullDistribution(verbosity = verbosity)
# Estimate likelihoods based on this distribution
likelihoods = numpy.array(dataValues, dtype=float)
for i, s in enumerate(dataValues):
likelihoods[i] = tailProbability(s, distributionParams)
# Filter likelihood values
filteredLikelihoods = numpy.array(
_filterLikelihoods(likelihoods) )
params = {
"distribution": distributionParams,
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": averagingWindow,
},
"historicalLikelihoods":
list(likelihoods[-min(averagingWindow, len(likelihoods)):]),
}
if verbosity > 1:
print("Discovered params=")
print(params)
print("Number of likelihoods:", len(likelihoods))
print("First 20 likelihoods:", (
filteredLikelihoods[0:min(20, len(filteredLikelihoods))] ))
print("leaving estimateAnomalyLikelihoods")
return (filteredLikelihoods, aggRecordList, params)
def updateAnomalyLikelihoods(anomalyScores,
params,
verbosity=0):
"""
Compute updated probabilities for anomalyScores using the given params.
:param anomalyScores: a list of records. Each record is a list with the
following three elements: [timestamp, value, score]
Example::
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
:param params: the JSON dict returned by estimateAnomalyLikelihoods
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: 3-tuple consisting of:
- likelihoods
numpy array of likelihoods, one for each aggregated point
- avgRecordList
list of averaged input records
- params
an updated JSON object containing the state of this metric.
"""
if verbosity > 3:
print("In updateAnomalyLikelihoods.")
print("Number of anomaly scores:", len(anomalyScores))
print("First 20:", anomalyScores[0:min(20, len(anomalyScores))])
print("Params:", params)
if len(anomalyScores) == 0:
raise ValueError("Must have at least one anomalyScore")
if not isValidEstimatorParams(params):
raise ValueError("'params' is not a valid params structure")
# For backward compatibility.
if "historicalLikelihoods" not in params:
params["historicalLikelihoods"] = [1.0]
# Compute moving averages of these new scores using the previous values
# as well as likelihood for these scores using the old estimator
historicalValues = params["movingAverage"]["historicalValues"]
total = params["movingAverage"]["total"]
windowSize = params["movingAverage"]["windowSize"]
aggRecordList = numpy.zeros(len(anomalyScores), dtype=float)
likelihoods = numpy.zeros(len(anomalyScores), dtype=float)
for i, v in enumerate(anomalyScores):
newAverage, historicalValues, total = (
MovingAverage.compute(historicalValues, total, v[2], windowSize)
)
aggRecordList[i] = newAverage
likelihoods[i] = tailProbability(newAverage, params["distribution"])
# Filter the likelihood values. First we prepend the historical likelihoods
# to the current set. Then we filter the values. We peel off the likelihoods
# to return and the last windowSize values to store for later.
likelihoods2 = params["historicalLikelihoods"] + list(likelihoods)
filteredLikelihoods = _filterLikelihoods(likelihoods2)
likelihoods[:] = filteredLikelihoods[-len(likelihoods):]
historicalLikelihoods = likelihoods2[-min(windowSize, len(likelihoods2)):]
# Update the estimator
newParams = {
"distribution": params["distribution"],
"movingAverage": {
"historicalValues": historicalValues,
"total": total,
"windowSize": windowSize,
},
"historicalLikelihoods": historicalLikelihoods,
}
assert len(newParams["historicalLikelihoods"]) <= windowSize
if verbosity > 3:
print("Number of likelihoods:", len(likelihoods))
print("First 20 likelihoods:", likelihoods[0:min(20, len(likelihoods))])
print("Leaving updateAnomalyLikelihoods.")
return (likelihoods, aggRecordList, newParams)
def _filterLikelihoods(likelihoods,
redThreshold=0.99999, yellowThreshold=0.999):
"""
Filter the list of raw (pre-filtered) likelihoods so that we only preserve
sharp increases in likelihood. 'likelihoods' can be a numpy array of floats or
a list of floats.
:returns: A new list of floats likelihoods containing the filtered values.
"""
redThreshold = 1.0 - redThreshold
yellowThreshold = 1.0 - yellowThreshold
# The first value is untouched
filteredLikelihoods = [likelihoods[0]]
for i, v in enumerate(likelihoods[1:]):
if v <= redThreshold:
# Value is in the redzone
if likelihoods[i] > redThreshold:
# Previous value is not in redzone, so leave as-is
filteredLikelihoods.append(v)
else:
filteredLikelihoods.append(yellowThreshold)
else:
# Value is below the redzone, so leave as-is
filteredLikelihoods.append(v)
return filteredLikelihoods
def _anomalyScoreMovingAverage(anomalyScores,
windowSize=10,
verbosity=0,
):
"""
Given a list of anomaly scores return a list of averaged records.
anomalyScores is assumed to be a list of records of the form:
[datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]
Each record in the returned list list contains:
[datetime, value, averagedScore]
*Note:* we only average the anomaly score.
"""
historicalValues = []
total = 0.0
averagedRecordList = [] # Aggregated records
for record in anomalyScores:
# Skip (but log) records without correct number of entries
if not isinstance(record, (list, tuple)) or len(record) != 3:
if verbosity >= 1:
print("Malformed record:", record)
continue
avg, historicalValues, total = (
MovingAverage.compute(historicalValues, total, record[2], windowSize)
)
averagedRecordList.append( [record[0], record[1], avg] )
if verbosity > 2:
print("Aggregating input record:", record)
print("Result:", [record[0], record[1], avg])
return averagedRecordList, historicalValues, total
def estimateNormal(sampleData, performLowerBoundCheck=True):
"""
:param sampleData:
:type sampleData: Numpy array.
:param performLowerBoundCheck:
:type performLowerBoundCheck: bool
:returns: A dict containing the parameters of a normal distribution based on
the ``sampleData``.
"""
params = {
"name": "normal",
"mean": numpy.mean(sampleData),
"variance": numpy.var(sampleData),
}
if performLowerBoundCheck:
# Handle edge case of almost no deviations and super low anomaly scores. We
# find that such low anomaly means can happen, but then the slightest blip
# of anomaly score can cause the likelihood to jump up to red.
if params["mean"] < 0.03:
params["mean"] = 0.03
# Catch all for super low variance to handle numerical precision issues
if params["variance"] < 0.0003:
params["variance"] = 0.0003
# Compute standard deviation
if params["variance"] > 0:
params["stdev"] = math.sqrt(params["variance"])
else:
params["stdev"] = 0
return params
def nullDistribution(verbosity=0):
"""
:param verbosity: integer controlling extent of printouts for debugging
:type verbosity: int
:returns: A distribution that is very broad and makes every anomaly score
between 0 and 1 pretty likely.
"""
if verbosity>0:
print("Returning nullDistribution")
return {
"name": "normal",
"mean": 0.5,
"variance": 1e6,
"stdev": 1e3,
}
def tailProbability(x, distributionParams):
"""
Given the normal distribution specified by the mean and standard deviation
in distributionParams, return the probability of getting samples further
from the mean. For values above the mean, this is the probability of getting
samples > x and for values below the mean, the probability of getting
samples < x. This is the Q-function: the tail probability of the normal distribution.
:param distributionParams: dict with 'mean' and 'stdev' of the distribution
"""
if "mean" not in distributionParams or "stdev" not in distributionParams:
raise RuntimeError("Insufficient parameters to specify the distribution.")
if x < distributionParams["mean"]:
# Gaussian is symmetrical around mean, so flip to get the tail probability
xp = 2 * distributionParams["mean"] - x
return tailProbability(xp, distributionParams)
# Calculate the Q function with the complementary error function, explained
# here: http://www.gaussianwaves.com/2012/07/q-function-and-error-functions
z = (x - distributionParams["mean"]) / distributionParams["stdev"]
return 0.5 * math.erfc(z/1.4142)
def isValidEstimatorParams(p):
"""
:returns: ``True`` if ``p`` is a valid estimator params as might be returned
by ``estimateAnomalyLikelihoods()`` or ``updateAnomalyLikelihoods``,
``False`` otherwise. Just does some basic validation.
"""
if not isinstance(p, dict):
return False
if "distribution" not in p:
return False
if "movingAverage" not in p:
return False
dist = p["distribution"]
if not ("mean" in dist and "name" in dist
and "variance" in dist and "stdev" in dist):
return False
return True
|
scottpurdy/nupic
|
src/nupic/algorithms/anomaly_likelihood.py
|
Python
|
agpl-3.0
| 28,330
|
[
"Gaussian"
] |
479befa6640ebf35a8d73b087266fad1af93203464a3ab4b0831950fd6c80d2a
|
import os
import sys
from ase import Atoms, Atom, QuasiNewton, PickleTrajectory
from gpaw import *
from gpaw.cluster import Cluster
from gpaw.utilities.viewmol import ViewmolTrajectory, write_viewmol
s = Cluster([Atom('H'), Atom('H',(0,0,3))])
s.minimal_box(2)
c = GPAW(h=0.3, nbands=2)
s.set_calculator(c)
vfname='traj.vmol'
pfname='traj.pickle'
vmt = ViewmolTrajectory(s, vfname)
traj = PickleTrajectory(pfname, 'w', s)
#c.attach(vmt.add, 100000)
#sys.exit()
# Find the theoretical bond length:
dyn = QuasiNewton(s)
dyn.attach(traj.write)
dyn.attach(vmt.add)
dyn.run(fmax=0.05)
traj = PickleTrajectory(pfname, 'r')
vfname2='pickle.vmol'
write_viewmol(traj, vfname2)
|
robwarm/gpaw-symm
|
oldtest/viewmol_trajectory.py
|
Python
|
gpl-3.0
| 674
|
[
"ASE",
"GPAW"
] |
44d87955409a5eac6432a4c3cc33abc910f4a6b419095b18d0c6f1915c55c70c
|
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence
* Without a direct way to compute N steps forward, the
semantics of jumpahead(n) are weakened to simply jump
to another distant state and rely on the large period
to avoid overlapping sequences.
* The random() method is implemented in C, executes in
a single Python step, and is, therefore, threadsafe.
"""
from types import BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from math import floor as _floor
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"cunifvariate","expovariate","vonmisesvariate","gammavariate",
"stdgamma","gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister core generator.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
"""
VERSION = 2 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 2:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1L<<BPF, _BuiltinMethod=_BuiltinMethodType):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int', 'default', and 'maxwidth' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
if istart >= maxwidth and type(self.random) is _BuiltinMethod:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*(istop - istart))
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*(istop - istart))
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth and type(self.random) is _BuiltinMethod:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange()"
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) / istep
elif istep < 0:
n = (width + istep + 1) / istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= maxwidth and type(self.random) is _BuiltinMethod:
return istart + self._randbelow(n)
return istart + istep*int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, bpf=BPF, maxwidth=1L<<BPF,
long=long, _log=_log, int=int):
"""Return a random int in the range [0,n)
Handles the case where n has more bits than returned
by a single call to the underlying generator.
"""
# k is a sometimes over but never under estimate of the bits in n
k = int(1.00001 + _log(n-1, 2)) # 2**k > n-1 >= 2**(k-2)
random = self.random
r = n
while r >= n:
# In Py2.4, this section becomes: r = self.getrandbits(k)
r = long(random() * maxwidth)
bits = bpf
while bits < k:
r = (r << bpf) | (long(random() * maxwidth))
bits += bpf
r >>= (bits - k)
return r
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))]
def shuffle(self, x, random=None, int=int):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
Note that for even rather small len(x), the total number of
permutations of x is larger than the period of most random number
generators; this implies that "most" permutations of a long
sequence can never be generated.
"""
if random is None:
random = self.random
for i in xrange(len(x)-1, 0, -1):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use xrange as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(xrange(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a
# dictionary.
# When the number of selections is small compared to the population,
# then tracking selections is efficient, requiring only a small
# dictionary and an occasional reselection. For a larger number of
# selections, the pool tracking method is preferred since the list takes
# less space than the dictionary and it doesn't suffer from frequent
# reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError, "sample larger than population"
random = self.random
_int = int
result = [None] * k
if n < 6 * k: # if n len list takes less space than a k len dict
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
n > 0 and (population[0], population[n//2], population[n-1])
except (TypeError, KeyError): # handle sets and dictionaries
population = tuple(population)
selected = {}
for i in xrange(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
result[i] = selected[j] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"""Get a random number in the range [a, b)."""
return a + (b-a) * self.random()
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while True:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- circular uniform --------------------
def cunifvariate(self, mean, arc):
"""Circular uniform distribution.
mean is the mean angle, and arc is the range of the distribution,
centered around the mean angle. Both values must be expressed in
radians. Returned values range between mean - arc/2 and
mean + arc/2 and are normalized to between 0 and pi.
Deprecated in version 2.3. Use:
(mean + arc * (Random.random() - 0.5)) % Math.pi
"""
# mean: mean angle (in radians between 0 and pi)
# arc: range of distribution (in radians between 0 and pi)
import warnings
warnings.warn("The cunifvariate function is deprecated; Use (mean "
"+ arc * (Random.random() - 0.5)) % Math.pi instead.",
DeprecationWarning, 2)
return (mean + arc * (self.random() - 0.5)) % _pi
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. (The parameter would be
called "lambda", but that is a reserved word in Python.) Returned
values range from 0 to positive infinity.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
random = self.random
u = random()
while u <= 1e-7:
u = random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while True:
u1 = random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = random()
if not (u2 >= c * (2.0 - c) and u2 > c * _exp(1.0 - c)):
break
u3 = random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while True:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while True:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = pow(p, 1.0/alpha)
else:
# p > 1
x = -_log((b-p)/alpha)
u1 = random()
if not (((p <= 1.0) and (u1 > _exp(-x))) or
((p > 1) and (u1 > pow(x, alpha - 1.0)))):
break
return x * beta
def stdgamma(self, alpha, ainv, bbb, ccc):
# This method was (and shall remain) undocumented.
# This method is deprecated
# for the following reasons:
# 1. Returns same as .gammavariate(alpha, 1.0)
# 2. Requires caller to provide 3 extra arguments
# that are functions of alpha anyway
# 3. Can't be used for alpha < 0.5
# ainv = sqrt(2 * alpha - 1)
# bbb = alpha - log(4)
# ccc = alpha + ainv
import warnings
warnings.warn("The stdgamma function is deprecated; "
"use gammavariate() instead.",
DeprecationWarning, 2)
return self.gammavariate(alpha, 1.0)
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > -1 and beta} > -1.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- Wichmann-Hill -------------------
class WichmannHill(Random):
VERSION = 1 # used by getstate/setstate
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time.
If a is not None or an int or long, hash(a) is used instead.
If a is an int or long, a is used directly. Distinct values between
0 and 27814431486575L inclusive are guaranteed to yield distinct
internal states (this guarantee is specific to the default
Wichmann-Hill generator).
"""
if a is None:
# Initialize from current time
import time
a = long(time.time() * 256)
if not isinstance(a, (int, long)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Act as if n calls to random() were made, but quickly.
n is an int, greater than or equal to 0.
Example use: If you have 2 threads and know that each will
consume no more than a million random numbers, create two Random
objects r1 and r2, then do
r2.setstate(r1.getstate())
r2.jumpahead(1000000)
Then r1 and r2 will use guaranteed-disjoint segments of the full
period.
"""
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
"""Set the Wichmann-Hill seed from (x, y, z).
These must be integers in the range [0, 256).
"""
if not type(x) == type(y) == type(z) == int:
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
"""Seed from hashable object's hash code.
None or no argument seeds from current time. It is not guaranteed
that objects with distinct hash codes lead to distinct internal
states.
This is obsolete, provided for compatibility with the seed routine
used prior to Python 2.1. Use the .seed() method instead.
"""
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## -------------------- test program --------------------
def _test_generator(n, funccall):
import time
print n, 'times', funccall
code = compile(funccall, funccall, 'eval')
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = eval(code)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, 'random()')
_test_generator(N, 'normalvariate(0.0, 1.0)')
_test_generator(N, 'lognormvariate(0.0, 1.0)')
_test_generator(N, 'cunifvariate(0.0, 1.0)')
_test_generator(N, 'vonmisesvariate(0.0, 1.0)')
_test_generator(N, 'gammavariate(0.01, 1.0)')
_test_generator(N, 'gammavariate(0.1, 1.0)')
_test_generator(N, 'gammavariate(0.1, 2.0)')
_test_generator(N, 'gammavariate(0.5, 1.0)')
_test_generator(N, 'gammavariate(0.9, 1.0)')
_test_generator(N, 'gammavariate(1.0, 1.0)')
_test_generator(N, 'gammavariate(2.0, 1.0)')
_test_generator(N, 'gammavariate(20.0, 1.0)')
_test_generator(N, 'gammavariate(200.0, 1.0)')
_test_generator(N, 'gauss(0.0, 1.0)')
_test_generator(N, 'betavariate(3.0, 3.0)')
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
cunifvariate = _inst.cunifvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
stdgamma = _inst.stdgamma
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
jumpahead = _inst.jumpahead
if __name__ == '__main__':
_test()
|
trivoldus28/pulsarch-verilog
|
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/random.py
|
Python
|
gpl-2.0
| 29,339
|
[
"Gaussian"
] |
71e2e04d1bfd07bbacd00482d059cee3f1f8d5e409153d55632055729bf26022
|
from __future__ import print_function
##################################################
# Data utilities package
#
# Generic routines, useful for all data
##################################################
import sys
print("Loading data utilities")
# Load routines from separate files
#try:
# from plotdata import plotdata
#except:
# print "No plotdata"
try:
from boututils.datafile import DataFile
except:
print("No datafile")
try:
from boututils.file_import import file_import
except:
print("No file_import")
try:
from boututils.calculus import deriv, integrate
except:
print("No calculus")
try:
from boututils.linear_regression import linear_regression
except:
print("No linear regression")
try:
from boututils.shell import shell
except:
print("No shell commands")
try:
from boututils.ncpus import determineNumberOfCPUs
except:
print("No determineNumberOfCPUs")
try:
from boututils.launch import launch
except:
print("No launch command")
try:
from boututils.getmpirun import getmpirun
except:
print("No getmpirun command")
try:
from boututils.fft_integrate import fft_integrate
except:
print("No fft_integrate command")
try:
from boututils.mode_structure import mode_structure
except:
print("No mode_structure command")
try:
if sys.version_info[0]==3:
print("polplotslice uses the VTK library through mayavi, which"+\
" is currently only available in python 2")
else:
from boututils.plotpolslice import plotpolslice
except:
print("No plotpolslice command")
try:
from boututils.moment_xyzt import moment_xyzt
except:
print("No moment_xyzt command")
try:
from boututils.volume_integral import volume_integral
except:
print("No volume_integral command")
try:
from boututils.surface_average import surface_average
except:
print("No surface_average command")
try:
from boututils.showdata import showdata
except:
print("No showdata")
try:
from boututils.closest_line import closest_line
except:
print("No closest_line")
try:
from boututils.fft_deriv import fft_deriv
except:
print("No fft_deriv")
try:
from boututils.int_func import int_func
except:
print("No int_func")
try:
from boututils.surface_average import surface_average
except:
print("No surface_average ")
try:
from boututils.efit_analyzer import View2D
except:
print("No View2D ")
try:
if sys.version_info[0]==3:
print("mlab uses the VTK library through mayavi, which"+\
" is currently only available in python 2")
else:
from mayavi import mlab
except:
print("No mlab")
try:
if sys.version_info[0]==3:
print("anim uses the VTK library through mayavi, which"+\
" is currently only available in python 2")
else:
from boututils.anim import anim
except:
print("No anim")
try:
if sys.version_info[0]==3:
print("View3D uses the VTK library through mayavi, which"+\
" is currently only available in python 2")
else:
from boututils.View3D import View3D
except:
print("No View3D")
|
kevinpetersavage/BOUT-dev
|
tools/pylib/boututils/__init__.py
|
Python
|
gpl-3.0
| 3,195
|
[
"Mayavi",
"VTK"
] |
eb563c8e828705a552e7c06a61b79d071de3a20ed481c1d5a0280d66a4187701
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes for non-standard space-group settings
"""
import re
from fractions import Fraction
from typing import List, Tuple, Union
import numpy as np
from pymatgen.core import Lattice
from pymatgen.core.operations import MagSymmOp, SymmOp
from pymatgen.util.string import transformation_to_string
__author__ = "Matthew Horton"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Development"
__date__ = "Apr 2017"
class JonesFaithfulTransformation:
"""
Transformation for space-groups defined in a non-standard setting
"""
def __init__(self, P, p):
"""
Transform between settings using matrix P and origin shift vector p,
using same notation as reference.
Should initialize using `from_transformation_string` in Jones
faithful notation, given by a string specifying both a
transformation matrix and an origin shift, with parts delimited
by a semi-colon. Best shown by example:
* `a,b,c;0,0,0` is the identity (no change)
* `-b+c,a+c,-a+b+c;0,0,0` is R3:r to R3:h (rhombohedral to
hexagonal setting)
* `a,b,c;-1/4,-1/4,-1/4` is Pnnn:1 to Pnnn:2 (change in origin
choice)
* `b,c,a;-1/2,-1/2,-1/2` is Bbab:1 to Ccca:2 (change settin
and origin)
Can transform points (coords), lattices and symmetry operations.
Used for transforming magnetic space groups since these are
commonly used in multiple settings, due to needing to transform
between magnetic and non-magnetic settings.
See: International Tables for Crystallography (2016). Vol. A,
Chapter 1.5, pp. 75–106.
"""
# using capital letters in violation of PEP8 to
# be consistent with variables in supplied reference,
# for easier debugging in future
self._P, self._p = P, p
@classmethod
def from_transformation_string(cls, transformation_string="a,b,c;0,0,0"):
"""
Construct SpaceGroupTransformation from its transformation string.
:param P: matrix
:param p: origin shift vector
:return:
"""
P, p = JonesFaithfulTransformation.parse_transformation_string(transformation_string)
return cls(P, p)
@classmethod
def from_origin_shift(cls, origin_shift="0,0,0"):
"""
Construct SpaceGroupTransformation from its origin shift string.
:param p: origin shift vector
:return:
"""
P = np.identity(3)
p = [float(Fraction(x)) for x in origin_shift.split(",")]
return cls(P, p)
@staticmethod
def parse_transformation_string(
transformation_string: str = "a,b,c;0,0,0",
) -> Tuple[Union[List[List[float]], np.ndarray], List[float]]:
"""
Args:
transformation_string (str, optional): Defaults to "a,b,c;0,0,0".
Raises:
ValueError: When transformation string fails to parse.
Returns:
Tuple[Union[List[List[float]], np.ndarray], List[float]]: transformation matrix & vector
"""
try:
a = np.array([1, 0, 0])
b = np.array([0, 1, 0])
c = np.array([0, 0, 1])
b_change, o_shift = transformation_string.split(";")
basis_change = b_change.split(",")
origin_shift = o_shift.split(",")
# add implicit multiplication symbols
basis_change = [
re.sub(
r"(?<=\w|\))(?=\() | (?<=\))(?=\w) | (?<=(\d|a|b|c))(?=([abc]))",
r"*",
x,
flags=re.X,
)
for x in basis_change
]
# should be fine to use eval here but be mindful for security
# reasons
# see http://lybniz2.sourceforge.net/safeeval.html
# could replace with regex? or sympy expression?
P = np.array([eval(x, {"__builtins__": None}, {"a": a, "b": b, "c": c}) for x in basis_change])
P = P.transpose() # by convention
p = [float(Fraction(x)) for x in origin_shift]
return P, p
except Exception:
raise ValueError("Failed to parse transformation string.")
@property
def P(self) -> List[List[float]]:
"""
:return: transformation matrix
"""
return self._P
@property
def p(self) -> List[float]:
"""
:return: translation vector
"""
return self._p
@property
def inverse(self) -> "JonesFaithfulTransformation":
"""
:return: JonesFaithfulTransformation
"""
Q = np.linalg.inv(self.P)
return JonesFaithfulTransformation(Q, -np.matmul(Q, self.p))
@property
def transformation_string(self) -> str:
"""
:return: transformation string
"""
return self._get_transformation_string_from_Pp(self.P, self.p)
@staticmethod
def _get_transformation_string_from_Pp(P: Union[List[List[float]], np.ndarray], p: List[float]) -> str:
P = np.array(P).transpose()
P_string = transformation_to_string(P, components=("a", "b", "c"))
p_string = transformation_to_string(np.zeros((3, 3)), p)
return P_string + ";" + p_string
def transform_symmop(self, symmop: Union[SymmOp, MagSymmOp]) -> Union[SymmOp, MagSymmOp]:
"""
Takes a symmetry operation and transforms it.
:param symmop: SymmOp or MagSymmOp
:return:
"""
W = symmop.rotation_matrix
w = symmop.translation_vector
Q = np.linalg.inv(self.P)
W_ = np.matmul(np.matmul(Q, W), self.P)
I = np.identity(3)
w_ = np.matmul(Q, (w + np.matmul(W - I, self.p)))
w_ = np.mod(w_, 1.0)
if isinstance(symmop, MagSymmOp):
return MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=W_,
translation_vec=w_,
time_reversal=symmop.time_reversal,
tol=symmop.tol,
)
if isinstance(symmop, SymmOp):
return SymmOp.from_rotation_and_translation(rotation_matrix=W_, translation_vec=w_, tol=symmop.tol)
raise RuntimeError
def transform_coords(self, coords: Union[List[List[float]], np.ndarray]) -> List[List[float]]:
"""
Takes a list of coordinates and transforms them.
:param coords: List of coords
:return:
"""
new_coords = []
for x in coords:
x = np.array(x)
Q = np.linalg.inv(self.P)
x_ = np.matmul(Q, (x - self.p)) # type: ignore
new_coords.append(x_.tolist())
return new_coords
def transform_lattice(self, lattice):
# type: (Lattice) -> Lattice
"""
Takes a Lattice object and transforms it.
:param lattice: Lattice
:return:
"""
return Lattice(np.matmul(lattice.matrix, self.P))
def __eq__(self, other):
return np.allclose(self.P, other.P) and np.allclose(self.p, other.p)
def __str__(self):
return str(JonesFaithfulTransformation.transformation_string)
def __repr__(self):
return f"JonesFaithfulTransformation with P:\n{self.P}\nand p:\n{self.p}"
|
materialsproject/pymatgen
|
pymatgen/symmetry/settings.py
|
Python
|
mit
| 7,555
|
[
"pymatgen"
] |
6d469786151f9907ac0403bce7dd212ec2ffc437b4893a15362ee8907f6f9e70
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import functools
import itertools
import math
import os
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import image_ops_impl
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
class RGBToYIQTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yiq and yiq_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class RGBToYUVTest(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yuv and yuv_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YUV and back, as a batch and individually
with self.cached_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yuv(batch0)
batch2 = image_ops.yuv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yuv, split0))
split2 = list(map(image_ops.yuv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testGrayscaleToRGBInputValidation(self):
# tests whether the grayscale_to_rgb function raises
# an exception if the input images' last dimension is
# not of size 1, i.e. the images have shape
# [batch size, height, width] or [height, width]
# tests if an exception is raised if a three dimensional
# input is used, i.e. the images have shape [batch size, height, width]
with self.cached_session(use_gpu=True):
# 3-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "Last dimension of a grayscale image should be size 1"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
# tests if an exception is raised if a two dimensional
# input is used, i.e. the images have shape [height, width]
with self.cached_session(use_gpu=True):
# 1-D input without batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([2])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# this is the error message we expect the function to raise
err_msg = "must be at least two-dimensional"
with self.assertRaisesRegex(ValueError, err_msg):
image_ops.grayscale_to_rgb(x_tf)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.cached_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.cached_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.cached_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.cached_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_less_zero_float32(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_uint8(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image_ops.adjust_gamma(x, gamma=-1)
def test_adjust_gamma_less_zero_tensor(self):
"""White image should be returned for gamma equal to zero"""
with self.cached_session():
x_data = np.random.uniform(0, 1.0, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = constant_op.constant(-1.0, dtype=dtypes.float32)
err_msg = "Gamma should be a non-negative real number"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
image = image_ops.adjust_gamma(x, gamma=y)
self.evaluate(image)
def _test_adjust_gamma_uint8(self, gamma):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 255, (8, 8)).astype(np.uint8)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = np.trunc(self.evaluate(y))
# calculate gamma correction using numpy
# firstly, transform uint8 to float representation
# then perform correction
y_np = np.power(x_np / 255.0, gamma)
# convert correct numpy image back to uint8 type
y_np = np.trunc(np.clip(y_np * 255.5, 0, 255.0))
self.assertAllClose(y_tf, y_np, 1e-6)
def _test_adjust_gamma_float32(self, gamma):
"""Verifying the output with expected results for gamma
correction for float32 images
"""
with self.cached_session():
x_np = np.random.uniform(0, 1.0, (8, 8))
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=gamma)
y_tf = self.evaluate(y)
y_np = np.clip(np.power(x_np, gamma), 0, 1.0)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_one_float32(self):
"""Same image should be returned for gamma equal to one"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_one_uint8(self):
self._test_adjust_gamma_uint8(1.0)
def test_adjust_gamma_zero_uint8(self):
"""White image should be returned for gamma equal
to zero for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.0)
def test_adjust_gamma_less_one_uint8(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=0.5)
def test_adjust_gamma_greater_one_uint8(self):
"""Verifying the output with expected results for gamma
correction for uint8 images
"""
self._test_adjust_gamma_uint8(gamma=1.0)
def test_adjust_gamma_less_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half for float32 images
"""
self._test_adjust_gamma_float32(0.5)
def test_adjust_gamma_greater_one_float32(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two for float32 images
"""
self._test_adjust_gamma_float32(1.0)
def test_adjust_gamma_zero_float32(self):
"""White image should be returned for gamma equal
to zero for float32 images
"""
self._test_adjust_gamma_float32(0.0)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = self.evaluate(y)
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class FlipImageBenchmark(test.Benchmark):
def _benchmarkFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkRandomFlipLeftRight(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkRandomFlipLeftRight_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkRandomFlipLeftRight_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def _benchmarkBatchedRandomFlipLeftRight(self, device, cpu_count):
image_shape = [16, 299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
run_op = image_ops.random_flip_left_right(inputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s step_time: "
"%.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkBatchedRandomFlipLeftRight_16_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkFlipLeftRightCpu1(self):
self._benchmarkFlipLeftRight("/cpu:0", 1)
def benchmarkFlipLeftRightCpuAll(self):
self._benchmarkFlipLeftRight("/cpu:0", None)
def benchmarkFlipLeftRightGpu(self):
self._benchmarkFlipLeftRight(test.gpu_device_name(), None)
def benchmarkRandomFlipLeftRightCpu1(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", 1)
def benchmarkRandomFlipLeftRightCpuAll(self):
self._benchmarkRandomFlipLeftRight("/cpu:0", None)
def benchmarkRandomFlipLeftRightGpu(self):
self._benchmarkRandomFlipLeftRight(test.gpu_device_name(), None)
def benchmarkBatchedRandomFlipLeftRightCpu1(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", 1)
def benchmarkBatchedRandomFlipLeftRightCpuAll(self):
self._benchmarkBatchedRandomFlipLeftRight("/cpu:0", None)
def benchmarkBatchedRandomFlipLeftRightGpu(self):
self._benchmarkBatchedRandomFlipLeftRight(test.gpu_device_name(), None)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustHue_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with self.benchmark_session() as sess:
self.evaluate(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_area_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" %
(results["name"],
1000 * results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testBatchSaturation(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session(use_gpu=True):
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self.evaluate(image_ops.adjust_saturation(x_np, scale))
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRightStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down),
)
def testRandomFlipStateless(self, func):
with test_util.use_gpu():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
if y_tf_eval[0][0] == 1:
self.assertAllEqual(y_tf_eval, x_np)
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval, y_np)
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
# Verify that results are deterministic.
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down)
)
def testRandomFlipStatelessWithBatch(self, func):
with test_util.use_gpu():
batch_size = 16
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
for j in range(batch_size):
if y_tf_eval[j][0][0] == 1:
self.assertAllEqual(y_tf_eval[j], x_np[j])
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval[j], y_np[j])
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDownStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertIsNone(transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegex(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegex(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session(use_gpu=True):
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session(use_gpu=True):
for k in xrange(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testFlipImageUnknownShape(self):
expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]],
[[9, 10, 11], [6, 7, 8]]]])
def generator():
image_input = np.array(
[[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32)
yield image_input
dataset = dataset_ops.Dataset.from_generator(
generator,
output_types=dtypes.int32,
output_shapes=tensor_shape.TensorShape([1, 2, 2, 3]))
dataset = dataset.map(image_ops.flip_left_right)
image_flipped_via_dataset_map = get_single_element.get_single_element(
dataset.take(1))
self.assertAllEqual(image_flipped_via_dataset_map, expected_output)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"contrast_factor must be scalar|"
"Shape must be rank 0 but is rank 1"):
image_ops.adjust_contrast(x_np, [2.0])
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
mn = np.mean(x)
std = np.std(x)
stddev = max(std, 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.float32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.cached_session(use_gpu=True):
whiten_np = self.evaluate(whiten)
self.assertFalse(np.any(np.isnan(whiten_np)))
def testBatchWhitening(self):
imgs_np = np.random.uniform(0., 255., [4, 24, 24, 3])
whiten_np = [self._NumpyPerImageWhitening(img) for img in imgs_np]
with self.cached_session(use_gpu=True):
imgs = constant_op.constant(imgs_np)
whiten = image_ops.per_image_standardization(imgs)
whiten_tf = self.evaluate(whiten)
for w_tf, w_np in zip(whiten_tf, whiten_np):
self.assertAllClose(w_tf, w_np, atol=1e-4)
def testPreservesDtype(self):
imgs_npu8 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.uint8)
imgs_tfu8 = constant_op.constant(imgs_npu8)
whiten_tfu8 = image_ops.per_image_standardization(imgs_tfu8)
self.assertEqual(whiten_tfu8.dtype, dtypes.uint8)
imgs_npf16 = np.random.uniform(0., 255., [2, 5, 5, 3]).astype(np.float16)
imgs_tff16 = constant_op.constant(imgs_npf16)
whiten_tff16 = image_ops.per_image_standardization(imgs_tff16)
self.assertEqual(whiten_tff16.dtype, dtypes.float16)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
with self.cached_session(use_gpu=True):
return self.evaluate(y)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (
([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], r"height must be >= target \+ offset"),
([0, 2, 3, 3], r"width must be >= target \+ offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shapes = [[13, 9, 3], [5, 13, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
x_shape = [2, 4, 8, 1]
x_np = np.array(
[[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1],
[8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 2, 1]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[[3, 4, 5, 6], [3, 4, 5, 6]],
[[6, 5, 4, 3], [6, 5, 4, 3]]]).reshape([2, 2, 4, 1])
with self.cached_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testCropping2(self):
# Test case for 10315
x_shapes = [[240, 320, 3], [5, 240, 320, 3]]
expected_y_shapes = [[80, 106, 3], [5, 80, 106, 3]]
for x_shape, y_shape in zip(x_shapes, expected_y_shapes):
x_np = np.zeros(x_shape, dtype=np.int32)
y_np = np.zeros(y_shape, dtype=np.int32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y_tf = self.evaluate(image_ops.central_crop(x_np, 0.33))
self.assertAllEqual(y_tf, y_np)
self.assertAllEqual(y_tf.shape, y_np.shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test no-op fraction=1.0, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
# Test no-op fraction=0.5, with 3-D tensors.
self._assertShapeInference([50, 60, 3], 0.5, [26, 30, 3])
self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
self._assertShapeInference([50, None, 3], 0.5, [26, None, 3])
self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
self._assertShapeInference([50, 60, None], 0.5, [26, 30, None])
self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# Test no-op fraction=1.0, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 1.0, [5, 50, 60, 3])
self._assertShapeInference([5, None, 60, 3], 1.0, [5, None, 60, 3])
self._assertShapeInference([5, 50, None, 3], 1.0, [5, 50, None, 3])
self._assertShapeInference([5, None, None, 3], 1.0, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 1.0, [5, 50, 60, None])
self._assertShapeInference([5, None, None, None], 1.0,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 1.0,
[None, None, None, None])
# Test no-op fraction=0.5, with 4-D tensors.
self._assertShapeInference([5, 50, 60, 3], 0.5, [5, 26, 30, 3])
self._assertShapeInference([5, None, 60, 3], 0.5, [5, None, 30, 3])
self._assertShapeInference([5, 50, None, 3], 0.5, [5, 26, None, 3])
self._assertShapeInference([5, None, None, 3], 0.5, [5, None, None, 3])
self._assertShapeInference([5, 50, 60, None], 0.5, [5, 26, 30, None])
self._assertShapeInference([5, None, None, None], 0.5,
[5, None, None, None])
self._assertShapeInference([None, None, None, None], 0.5,
[None, None, None, None])
def testErrorOnInvalidCentralCropFractionValues(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
def testErrorOnInvalidShapes(self):
x_shapes = [None, [], [3], [3, 9], [3, 9, 3, 9, 3]]
for x_shape in x_shapes:
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.5)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
y = image_ops.central_crop(x_np, 1.0)
self.assertTrue(y.op.name.startswith("central_crop"))
class PadToBoundingBoxTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def pad_bbox(*args):
return image_ops.pad_to_bounding_box(*args)
with self.cached_session(use_gpu=True):
return self.evaluate(pad_bbox(x_tensor, offset_height, offset_width,
target_height, target_width))
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testInt64(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
i = constant_op.constant([1, 0, 4, 3], dtype=dtypes.int64)
y_tf = image_ops.pad_to_bounding_box(x, i[0], i[1], i[2], i[3])
with self.cached_session(use_gpu=True):
self.assertAllClose(y, self.evaluate(y_tf))
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2), ([2, 0, 2], 2, 2), ([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParamsScalarInputs(self):
# In this test, inputs do not get converted to tensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the scalars.
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[False])
def testBadParamsTensorInputsEager(self):
# In this test inputs get converted to EagerTensors before calling the
# tf.function. The error message here is raised in python
# since the python function has direct access to the tensor's values.
with context.eager_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = (
(-1, 0, 4, 4,
"offset_height must be >= 0"),
(0, -1, 4, 4,
"offset_width must be >= 0"),
(2, 0, 4, 4,
"height must be <= target - offset"),
(0, 2, 4, 4,
"width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(
x, x_shape, *config_item, use_tensor_inputs_options=[True])
@parameterized.named_parameters([("OffsetHeight", (-1, 0, 4, 4)),
("OffsetWidth", (0, -1, 4, 4)),
("Height", (2, 0, 4, 4)),
("Width", (0, 2, 4, 4))])
def testBadParamsTensorInputsGraph(self, config):
# In this test inputs get converted to tensors before calling the
# tf.function. The error message here is raised during shape inference.
with context.graph_mode():
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
self._assertRaises(
x,
x_shape,
*config,
"Paddings must be non-negative",
use_tensor_inputs_options=[True])
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.pad_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.op.name.startswith("pad_to_bounding_box"))
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.cached_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# min_object_covered as tensor
min_object_covered_t = ops.convert_to_tensor(min_object_covered)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered_t,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=array_ops.placeholder(dtypes.float32),
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testDefaultMinObjectCovered(self):
# By default min_object_covered=0.1 if not provided
with self.cached_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
def _testStatelessSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered,
aspect_ratio_range, area_range):
with test_util.use_gpu():
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (
np.array(bounding_box, dtype=np.float32).reshape([1, 1, 4]))
iterations = 2
test_seeds = [(1, 2), (3, 4), (5, 6)]
for seed in test_seeds:
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
for _ in range(iterations):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(bounding_box_np,
dtype=dtypes.float32,
shape=bounding_box_np.shape)
begin, size, _ = image_ops.stateless_sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
seed=seed,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
y_tf = self.evaluate(y)
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratio = area / original_area
area_ratios.append(area_ratio)
fraction_object_covered.append(
float(np.sum(y_tf)) / bounding_box_area)
# Check that `area_ratio` is within valid range.
self.assertLessEqual(area_ratio, area_range[1])
self.assertGreaterEqual(area_ratio, area_range[0])
# Each array should consist of one value just repeated `iteration` times
# because the same seed is used.
self.assertEqual(len(set(aspect_ratios)), 1)
self.assertEqual(len(set(area_ratios)), 1)
self.assertEqual(len(set(fraction_object_covered)), 1)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWholeImageBoundingBoxStateless(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testWithBoundingBoxStateless(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
# Test both scalar and tensor input for `min_object_covered`.
for min_obj_covered in [0.1, constant_op.constant(0.1)]:
self._testStatelessSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_obj_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShapeStateless(self):
with test_util.use_gpu():
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[[[0.0, 0.0, 1.0, 1.0]]],
shape=[1, 1, 4],
dtype=dtypes.float32,
)
bbox_func = functools.partial(
image_ops.stateless_sample_distorted_bounding_box,
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Check error is raised with wrong seed shapes.
for seed in [1, (1, 2, 3)]:
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
begin, end, bbox_for_drawing = bbox_func(seed=seed)
test_seed = (1, 2)
begin, end, bbox_for_drawing = bbox_func(seed=test_seed)
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
# Actual run to make sure shape is correct inside Compute().
begin = self.evaluate(begin)
end = self.evaluate(end)
bbox_for_drawing = self.evaluate(bbox_for_drawing)
self.assertAllEqual([3], begin.shape)
self.assertAllEqual([3], end.shape)
self.assertAllEqual([1, 1, 4], bbox_for_drawing.shape)
class ResizeImagesV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images_v2(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethod.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images_v2(image, target_shape, target_method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testBfloat16MultipleOps(self):
target_height = 8
target_width = 12
img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32)
img_bf16 = ops.convert_to_tensor(img, dtype="bfloat16")
new_size = constant_op.constant([target_height, target_width])
img_methods = [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA
]
for method in img_methods:
out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method)
out_op_f32 = image_ops.resize_images_v2(img, new_size, method)
bf16_val = self.evaluate(out_op_bf16)
f32_val = self.evaluate(out_op_f32)
self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = (max_h, max_w)
x_tensor = x
def resize_func(t,
target_max=target_max,
preserve_aspect_ratio=preserve_aspect_ratio):
return image_ops.resize_images(
t, ops.convert_to_tensor(target_max),
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return self.evaluate(resize_func(x_tensor))
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 80, 10]
x = np.random.uniform(size=x_shape)
for preserve_aspect_ratio in [True, False]:
with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio):
expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \
else [10, 250, 250, 10]
self._assertResizeCheckShape(
x,
x_shape, [250, 250],
expect_shape,
preserve_aspect_ratio=preserve_aspect_ratio)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImagesTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
METHODS = [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.BICUBIC, image_ops.ResizeMethodV1.AREA
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethodV1.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethodV1.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2], [
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images(image, target_shape, method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images(image, target_shape, target_method)
if (method == image_ops.ResizeMethodV1.NEAREST_NEIGHBOR or
target_shape == image.shape[1:3]):
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
64.0, 48.0, 32.0, 32.0, 48.0, 48.0, 48.0, 48.0, 32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0, 50.0, 75.0, 100.0, 100.0, 50.0, 75.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethodV1.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=False)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3, 3, 6, 6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethodV1.BILINEAR] = [
6.0, 5.0, 4.0, 3.0, 4.5, 4.5, 4.5, 4.5, 3.0, 4.0, 5.0, 6.0, 4.5, 5.5,
6.5, 7.5, 6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethodV1.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0,
9.0, 9.0, 6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethodV1.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethodV1.AREA] = [
6.0, 6.0, 6.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 6.0, 3.0, 3.0,
3.0, 6.0, 6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for method in [
image_ops.ResizeMethodV1.BILINEAR,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
image_ops.ResizeMethodV1.AREA
]:
with self.cached_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], method, align_corners=True)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [
128, 128, 64, 64, 128, 128, 64, 64, 64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [
128, 135, 96, 55, 64, 114, 134, 128, 78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84, 74, 70, 95, 122, 115, 69, 49, 55, 100,
105, 75, 43, 50, 89, 105, 100, 57, 54, 74, 96, 91, 65, 55, 58, 70, 69,
75, 81, 80, 72, 69, 70, 105, 112, 75, 36, 45, 92, 111, 105
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.BICUBIC)
resized = self.evaluate(y)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethodV1.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethodV1.BILINEAR,
align_corners=align_corners)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
img_shape = [1, 3, 2, 1]
with self.cached_session(use_gpu=True):
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = [max_h, max_w]
x_tensor = x
y = image_ops.resize_images(
x_tensor, target_max, preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session(use_gpu=True):
return self.evaluate(y)
def _assertResizeEqual(self, x, x_shape, y, y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self, x, x_shape, target_shape,
y_shape, preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [250, 250], [10, 250, 250, 10],
preserve_aspect_ratio=False)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
class ResizeImageWithPadV1Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session(use_gpu=True):
return self.evaluate(
image_ops.resize_image_with_pad_v1(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 3, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [1, 3]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# half_pixel_centers not supported by XLA
@test_util.for_all_test_methods(test_util.disable_xla, "b/127616992")
class ResizeImageWithPadV2Test(test_util.TensorFlowTestCase):
def _ResizeImageWithPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
with self.cached_session(use_gpu=True):
return self.evaluate(
image_ops.resize_image_with_pad_v2(x_tensor, target_height,
target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_pad_v1(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
# Test with 3-D tensors.
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
# Test with 4-D tensors.
self._assertShapeInference([5, 55, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 66, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, 60, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 50, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, None, None, 3], 55, 66, [5, 55, 66, 3])
self._assertShapeInference([5, 55, 66, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, 50, 60, None], 55, 66, [5, 55, 66, None])
self._assertShapeInference([5, None, None, None], 55, 66,
[5, 55, 66, None])
self._assertShapeInference([None, None, None, None], 55, 66,
[None, 55, 66, None])
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Reduce vertical dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 3.5, 5.5, 0]
y_shape = [1, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Reduce horizontal dimension
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5, 0, 0]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [3.5, 5.5]
y_shape = [1, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
@def_function.function
def resize_crop_or_pad(*args):
return image_ops.resize_image_with_crop_or_pad(*args)
with self.cached_session(use_gpu=True):
return self.evaluate(
resize_crop_or_pad(x_tensor, target_height, target_width))
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0, 0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3, 6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4, 8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [3, 4, 5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
x_shape = [8, 2, 1]
y = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 2, 3, 6, 7, 0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2, 3, 4, 5, 6, 7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0, 0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The original error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"inner 3 dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_image_with_crop_or_pad(image, 55, 66)
self.assertTrue(y.op.name.startswith("resize_image_with_crop_or_pad"))
def simple_color_ramp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = self.evaluate([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.cached_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = self.evaluate([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testCropAndDecodeJpeg(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
crop_windows = [[0, 0, 5, 5], [0, 0, 5, w], [0, 0, h, 5],
[h - 6, w - 5, 6, 5], [6, 5, 15, 10], [0, 0, h, w]]
for crop_window in crop_windows:
# Explicit two stages: decode + crop.
image1 = image_ops.decode_jpeg(jpeg0)
y, x, h, w = crop_window
image1_crop = image_ops.crop_to_bounding_box(image1, y, x, h, w)
# Combined decode+crop.
image2 = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
# Combined decode+crop should have the same shape inference
self.assertAllEqual(image1_crop.get_shape().as_list(),
image2.get_shape().as_list())
# CropAndDecode should be equal to DecodeJpeg+Crop.
image1_crop, image2 = self.evaluate([image1_crop, image2])
self.assertAllEqual(image1_crop, image2)
def testCropAndDecodeJpegWithInvalidCropWindow(self):
with self.cached_session() as sess:
# Encode it, then decode it, then encode it
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
h, w, _ = 256, 128, 3
# Invalid crop windows.
crop_windows = [[-1, 11, 11, 11], [11, -1, 11, 11], [11, 11, -1, 11],
[11, 11, 11, -1], [11, 11, 0, 11], [11, 11, 11, 0],
[0, 0, h + 1, w], [0, 0, h, w + 1]]
for crop_window in crop_windows:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Invalid JPEG data or crop window"):
result = image_ops.decode_and_crop_jpeg(jpeg0, crop_window)
self.evaluate(result)
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = self.evaluate(
[jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.cached_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(simple_color_ramp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = self.evaluate([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
def testExtractJpegShape(self):
# Read a real jpeg and verify shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.cached_session(use_gpu=True):
jpeg = io_ops.read_file(path)
# Extract shape without decoding.
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
self.assertAllEqual(image_shape, [256, 128, 3])
def testExtractJpegShapeforCmyk(self):
# Read a cmyk jpeg image, and verify its shape.
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1_cmyk.jpg")
with self.cached_session(use_gpu=True):
jpeg = io_ops.read_file(path)
image_shape = self.evaluate(image_ops.extract_jpeg_shape(jpeg))
# Cmyk jpeg image has 4 channels.
self.assertAllEqual(image_shape, [256, 128, 4])
def testRandomJpegQuality(self):
# Previous implementation of random_jpeg_quality had a bug.
# This unit test tests the fixed version, but due to forward compatibility
# this test can only be done when fixed version is used.
# Test jpeg quality dynamic randomization.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
random_jpeg_image = image_ops.random_jpeg_quality(image, 40, 100)
with self.cached_session(use_gpu=True) as sess:
# Test randomization.
random_jpeg_images = [sess.run(random_jpeg_image) for _ in range(5)]
are_images_equal = []
for i in range(1, len(random_jpeg_images)):
# Most of them should be different if randomization is occurring
# correctly.
are_images_equal.append(
np.array_equal(random_jpeg_images[0], random_jpeg_images[i]))
self.assertFalse(all(are_images_equal))
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
def testStatelessRandomJpegQuality(self):
# Test deterministic randomness in jpeg quality by checking that the same
# sequence of jpeg quality adjustments are returned each round given the
# same seed.
with test_util.use_gpu():
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
jpeg_quality = (40, 100)
seeds_list = [(1, 2), (3, 4)]
iterations = 2
random_jpeg_images_all = [[] for _ in range(iterations)]
for random_jpeg_images in random_jpeg_images_all:
for seed in seeds_list:
distorted_jpeg = image_ops.stateless_random_jpeg_quality(
image, jpeg_quality[0], jpeg_quality[1], seed=seed)
# Verify that the random jpeg image is different from the original
# jpeg image.
self.assertNotAllEqual(image, distorted_jpeg)
random_jpeg_images.append(self.evaluate(distorted_jpeg))
# Verify that the results are identical given the same seed.
for i in range(1, iterations):
self.assertAllEqual(random_jpeg_images_all[0],
random_jpeg_images_all[i])
def testAdjustJpegQuality(self):
# Test if image_ops.adjust_jpeg_quality works when jpeq quality
# is an int (not tensor) for backward compatibility.
with ops.Graph().as_default(), self.test_session():
np.random.seed(7)
jpeg_quality = np.random.randint(40, 100)
path = ("tensorflow/core/lib/jpeg/testdata/medium.jpg")
jpeg = io_ops.read_file(path)
image = image_ops.decode_jpeg(jpeg)
adjust_jpeg_quality_image = image_ops.adjust_jpeg_quality(
image, jpeg_quality)
with self.cached_session(use_gpu=True) as sess:
sess.run(adjust_jpeg_quality_image)
def testAdjustJpegQualityShape(self):
with self.cached_session(use_gpu=True):
image = constant_op.constant(
np.arange(24, dtype=np.uint8).reshape([2, 4, 3]))
adjusted_image = image_ops.adjust_jpeg_quality(image, 80)
adjusted_image.shape.assert_is_compatible_with([None, None, 3])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = ((1, "lena_gray.png"), (4, "lena_rgba.png"),
(3, "lena_palette.png"), (4, "lena_palette_trns.png"))
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.cached_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = self.evaluate([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, self.evaluate(image1))
def testSynthetic(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(simple_color_ramp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.cached_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = simple_color_ramp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = self.evaluate([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def _testValid(self, filename):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.cached_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = self.evaluate([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testValid(self):
self._testValid("scan.gif")
self._testValid("optimized.gif")
def testShape(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y, y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate, y_np, atol=1e-5)
def testNoConvert(self):
# Tests with Tensor.op requires a graph.
with ops.Graph().as_default():
# Make sure converting to the same data type creates only an identity op
with self.cached_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEqual(y.op.type, "Identity")
self.assertEqual(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.cached_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.cached_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = self.evaluate(y)
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
# TODO(b/133851381): re-enable this test.
def disabledtestTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2], [4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18], [29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120], [193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as
# well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :], b[np.newaxis, :], c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
class FormatTest(test_util.TensorFlowTestCase):
def testFormats(self):
prefix = "tensorflow/core/lib"
paths = ("png/testdata/lena_gray.png", "jpeg/testdata/jpeg_merge_test1.jpg",
"gif/testdata/lena.gif")
decoders = {
"jpeg": functools.partial(image_ops.decode_jpeg, channels=3),
"png": functools.partial(image_ops.decode_png, channels=3),
"gif": lambda s: array_ops.squeeze(image_ops.decode_gif(s), axis=0),
}
with self.cached_session():
for path in paths:
contents = self.evaluate(io_ops.read_file(os.path.join(prefix, path)))
images = {}
for name, decode in decoders.items():
image = self.evaluate(decode(contents))
self.assertEqual(image.ndim, 3)
for prev_name, prev in images.items():
print("path %s, names %s %s, shapes %s %s" %
(path, name, prev_name, image.shape, prev.shape))
self.assertAllEqual(image, prev)
images[name] = image
def testError(self):
path = "tensorflow/core/lib/gif/testdata/scan.gif"
with self.cached_session():
for decode in image_ops.decode_jpeg, image_ops.decode_png:
with self.assertRaisesOpError(r"Got 12 frames"):
decode(io_ops.read_file(path)).eval()
class CombinedNonMaxSuppressionTest(test_util.TensorFlowTestCase):
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another test `testInvalidTensorInput`
# which is identical to this one except that the input here is a scalar as
# opposed to a tensor.
def testInvalidPyInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
# NOTE(b/142795960): parameterized tests do not work well with tf.tensor
# inputs. Due to failures, creating another this test which is identical to
# `testInvalidPyInput` except that the input is a tensor here as opposed
# to a scalar.
def testInvalidTensorInput(self):
boxes_np = [[[[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]]]
scores_np = [[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]
max_output_size_per_class = 5
max_total_size = ops.convert_to_tensor(2**31)
with self.assertRaisesRegex(
(TypeError, ValueError),
"type int64 that does not match expected type of int32|"
"Tensor conversion requested dtype int32 for Tensor with dtype int64"):
image_ops.combined_non_max_suppression(
boxes=boxes_np,
scores=scores_np,
max_output_size_per_class=max_output_size_per_class,
max_total_size=max_total_size)
class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
def testNonMaxSuppression(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [3, 0, 5])
def testInvalidShape(self):
def nms_func(box, score, iou_thres, score_thres):
return image_ops.non_max_suppression(box, score, iou_thres, score_thres)
iou_thres = 3
score_thres = 0.5
# The boxes should be 2D of shape [num_boxes, 4].
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 2 but is rank 1"):
boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, iou_thres, score_thres)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Dimension must be 4 but is 3"):
boxes = constant_op.constant([[0.0, 0.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, iou_thres, score_thres)
# The boxes is of shape [num_boxes, 4], and the scores is
# of shape [num_boxes]. So an error will be thrown.
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Dimensions must be equal, but are 1 and 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9, 0.75])
nms_func(boxes, scores, iou_thres, score_thres)
# The scores should be 1D of shape [num_boxes].
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 1 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([[0.9]])
nms_func(boxes, scores, iou_thres, score_thres)
# The max_output_size should be a scalar (0-D).
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 0 but is rank 1"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, [iou_thres], score_thres)
# The iou_threshold should be a scalar (0-D).
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Shape must be rank 0 but is rank 2"):
boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
scores = constant_op.constant([0.9])
nms_func(boxes, scores, iou_thres, [[score_thres]])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testDataTypes(self):
# Test case for GitHub issue 20199.
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = float("-inf")
# Note: There are multiple versions of non_max_suppression v2, v3, v4.
# gen_image_ops.non_max_suppression_v2:
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v2(
boxes, scores, max_output_size, iou_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v3
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices = gen_image_ops.non_max_suppression_v3(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v4.
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
selected_indices, _ = gen_image_ops.non_max_suppression_v4(
boxes, scores, max_output_size, iou_threshold, score_threshold)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
# gen_image_ops.non_max_suppression_v5.
soft_nms_sigma_np = float(0.0)
for dtype in [np.float16, np.float32]:
with self.cached_session():
boxes = constant_op.constant(boxes_np, dtype=dtype)
scores = constant_op.constant(scores_np, dtype=dtype)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np, dtype=dtype)
score_threshold = constant_op.constant(score_threshold_np, dtype=dtype)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np, dtype=dtype)
selected_indices, _, _ = gen_image_ops.non_max_suppression_v5(
boxes, scores, max_output_size, iou_threshold, score_threshold,
soft_nms_sigma)
selected_indices = self.evaluate(selected_indices)
self.assertAllClose(selected_indices, [3, 0, 5])
def testZeroIOUThreshold(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [1., 1., 1., 1., 1., 1.]
max_output_size_np = 3
iou_threshold_np = 0.0
with self.cached_session():
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices, [0, 3, 5])
class NonMaxSuppressionWithScoresTest(test_util.TensorFlowTestCase):
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersWithSoftNMS(self):
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 6
iou_threshold_np = 1.0
score_threshold_np = 0.0
soft_nms_sigma_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
soft_nms_sigma = constant_op.constant(soft_nms_sigma_np)
selected_indices, selected_scores = \
image_ops.non_max_suppression_with_scores(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma)
selected_indices, selected_scores = self.evaluate(
[selected_indices, selected_scores])
self.assertAllClose(selected_indices, [3, 0, 1, 5, 4, 2])
self.assertAllClose(selected_scores,
[0.95, 0.9, 0.384, 0.3, 0.256, 0.197],
rtol=1e-2, atol=1e-2)
class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold):
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
yp, nvp = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
y, n = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(yp.shape.is_fully_defined(), True)
self.assertEqual(y.shape.is_fully_defined(), False)
return yp, nvp, y, n
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
selected_indices_padded, num_valid_padded, selected_indices, num_valid = \
func(boxes_np, scores_np, max_output_size_np, iou_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(self.evaluate(num_valid_padded), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(self.evaluate(num_valid), 3)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold, score_threshold):
boxes = constant_op.constant(boxes)
scores = constant_op.constant(scores)
max_output_size = constant_op.constant(max_output_size)
iou_threshold = constant_op.constant(iou_threshold)
score_threshold = constant_op.constant(score_threshold)
y, nv = image_ops.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold, score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(y.shape.is_fully_defined(), False)
return y, nv
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
selected_indices, num_valid = func(boxes_np, scores_np,
max_output_size_np, iou_threshold_np,
score_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(self.evaluate(num_valid), 3)
class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
def testSelectOneFromThree(self):
overlaps_np = [
[1.0, 0.7, 0.2],
[0.7, 1.0, 0.0],
[0.2, 0.0, 1.0],
]
scores_np = [0.7, 0.9, 0.1]
max_output_size_np = 3
overlaps = constant_op.constant(overlaps_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
overlap_threshold = 0.6
score_threshold = 0.4
selected_indices = image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices, [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):
"""Tests utility function used by ssim() and psnr()."""
def testWrongDims(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img = array_ops.placeholder(dtype=dtypes.float32)
img_np = np.array((2, 2))
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(img, img)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img: img_np})
def testShapeMismatch(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
img1 = array_ops.placeholder(dtype=dtypes.float32)
img2 = array_ops.placeholder(dtype=dtypes.float32)
img1_np = np.array([1, 2, 2, 1])
img2_np = np.array([1, 3, 3, 1])
with self.cached_session(use_gpu=True) as sess:
_, _, checks = image_ops_impl._verify_compatible_image_shapes(
img1, img2)
with self.assertRaises(errors.InvalidArgumentError):
sess.run(checks, {img1: img1_np, img2: img2_np})
class PSNRTest(test_util.TensorFlowTestCase):
"""Tests for PSNR."""
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/psnr/testdata", filename))
im = image_ops.decode_jpeg(content, dct_method="INTEGER_ACCURATE")
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
q20 = self._LoadTestImage(sess, "cat_q20.jpg")
q72 = self._LoadTestImage(sess, "cat_q72.jpg")
q95 = self._LoadTestImage(sess, "cat_q95.jpg")
return q20, q72, q95
def _PSNR_NumPy(self, orig, target, max_value):
"""Numpy implementation of PSNR."""
mse = ((orig - target) ** 2).mean(axis=(-3, -2, -1))
return 20 * np.log10(max_value) - 10 * np.log10(mse)
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testPSNRSingleImage(self):
image1 = self._RandomImage((8, 8, 1), 1)
image2 = self._RandomImage((8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1.0, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testPSNRMultiImage(self):
image1 = self._RandomImage((10, 8, 8, 1), 1)
image2 = self._RandomImage((10, 8, 8, 1), 1)
psnr = self._PSNR_NumPy(image1, image2, 1)
with self.cached_session(use_gpu=True):
tf_image1 = constant_op.constant(image1, shape=image1.shape,
dtype=dtypes.float32)
tf_image2 = constant_op.constant(image2, shape=image2.shape,
dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_image1, tf_image2, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testGoldenPSNR(self):
q20, q72, q95 = self._LoadTestImages()
# Verify NumPy implementation first.
# Golden values are generated using GNU Octave's psnr() function.
psnr1 = self._PSNR_NumPy(q20, q72, 1)
self.assertNear(30.321, psnr1, 0.001, msg="q20.dtype=" + str(q20.dtype))
psnr2 = self._PSNR_NumPy(q20, q95, 1)
self.assertNear(29.994, psnr2, 0.001)
psnr3 = self._PSNR_NumPy(q72, q95, 1)
self.assertNear(35.302, psnr3, 0.001)
# Test TensorFlow implementation.
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_q72 = constant_op.constant(q72, shape=q72.shape, dtype=dtypes.float32)
tf_q95 = constant_op.constant(q95, shape=q95.shape, dtype=dtypes.float32)
tf_psnr1 = self.evaluate(image_ops.psnr(tf_q20, tf_q72, 1, "psnr1"))
tf_psnr2 = self.evaluate(image_ops.psnr(tf_q20, tf_q95, 1, "psnr2"))
tf_psnr3 = self.evaluate(image_ops.psnr(tf_q72, tf_q95, 1, "psnr3"))
self.assertAllClose(psnr1, tf_psnr1, atol=0.001)
self.assertAllClose(psnr2, tf_psnr2, atol=0.001)
self.assertAllClose(psnr3, tf_psnr3, atol=0.001)
def testInfinity(self):
q20, _, _ = self._LoadTestImages()
psnr = self._PSNR_NumPy(q20, q20, 1)
with self.cached_session(use_gpu=True):
tf_q20 = constant_op.constant(q20, shape=q20.shape, dtype=dtypes.float32)
tf_psnr = self.evaluate(image_ops.psnr(tf_q20, tf_q20, 1, "psnr"))
self.assertAllClose(psnr, tf_psnr, atol=0.001)
def testInt(self):
img1 = self._RandomImage((10, 8, 8, 1), 255)
img2 = self._RandomImage((10, 8, 8, 1), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
psnr_uint8 = image_ops.psnr(img1, img2, 255)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
psnr_float32 = image_ops.psnr(img1, img2, 1.0)
with self.cached_session(use_gpu=True):
self.assertAllClose(
self.evaluate(psnr_uint8), self.evaluate(psnr_float32), atol=0.001)
class SSIMTest(test_util.TensorFlowTestCase):
"""Tests for SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_ssim = np.asarray([[1.000000, 0.230880, 0.231153],
[0.230880, 1.000000, 0.996828],
[0.231153, 0.996828, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against values produced by Matlab."""
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testBatch(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBatchNumpyInputs(self):
img = self._LoadTestImages()
expected = self._ssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
with self.cached_session(use_gpu=True):
img1 = self.evaluate(constant_op.constant(img1))
img2 = self.evaluate(constant_op.constant(img2))
ssim = image_ops.ssim(
img1,
img2,
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testBroadcast(self):
img = self._LoadTestImages()[:2]
expected = self._ssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
ssim = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(ssim), atol=1e-4)
def testNegative(self):
"""Tests against negative SSIM index."""
step = np.expand_dims(np.arange(0, 256, 16, dtype=np.uint8), axis=0)
img1 = np.tile(step, (16, 1))
img2 = np.fliplr(img1)
img1 = img1.reshape((1, 16, 16, 1))
img2 = img2.reshape((1, 16, 16, 1))
ssim = image_ops.ssim(
constant_op.constant(img1),
constant_op.constant(img2),
255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertLess(self.evaluate(ssim), 0)
def testInt(self):
img1 = self._RandomImage((1, 16, 16, 3), 255)
img2 = self._RandomImage((1, 16, 16, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
class MultiscaleSSIMTest(test_util.TensorFlowTestCase):
"""Tests for MS-SSIM."""
_filenames = ["checkerboard1.png",
"checkerboard2.png",
"checkerboard3.png",]
_msssim = np.asarray([[1.000000, 0.091016, 0.091025],
[0.091016, 1.000000, 0.999567],
[0.091025, 0.999567, 1.000000]])
def _LoadTestImage(self, sess, filename):
content = io_ops.read_file(os.path.join(
"tensorflow/core/lib/ssim/testdata", filename))
im = image_ops.decode_png(content)
im = image_ops.convert_image_dtype(im, dtypes.float32)
im, = self.evaluate([im])
return np.expand_dims(im, axis=0)
def _LoadTestImages(self):
with self.cached_session(use_gpu=True) as sess:
return [self._LoadTestImage(sess, f) for f in self._filenames]
def _RandomImage(self, shape, max_val):
"""Returns an image or image batch with given shape."""
return np.random.rand(*shape).astype(np.float32) * max_val
def testAgainstMatlab(self):
"""Tests against MS-SSIM computed with Matlab implementation.
For color images, MS-SSIM scores are averaged over color channels.
"""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3)]
def ssim_func(x):
return image_ops.ssim_multiscale(
*x, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
scores = [
self.evaluate(ssim_func(t))
for t in itertools.combinations_with_replacement(img, 2)
]
self.assertAllClose(expected, np.squeeze(scores), atol=1e-4)
def testUnweightedIsDifferentiable(self):
img = self._LoadTestImages()
@def_function.function
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testUnweightedIsDifferentiableEager(self):
if not context.executing_eagerly():
self.skipTest("Eager mode only")
img = self._LoadTestImages()
def msssim_func(x1, x2, scalar):
return image_ops.ssim_multiscale(
x1 * scalar,
x2 * scalar,
max_val=1.0,
power_factors=(1, 1, 1, 1, 1),
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
scalar = constant_op.constant(1.0, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(scalar)
y = msssim_func(img[0], img[1], scalar)
grad = tape.gradient(y, scalar)
np_grads = self.evaluate(grad)
self.assertTrue(np.isfinite(np_grads).all())
def testBatch(self):
"""Tests MS-SSIM computed in batch."""
img = self._LoadTestImages()
expected = self._msssim[np.triu_indices(3, k=1)]
img1, img2 = zip(*itertools.combinations(img, 2))
img1 = np.concatenate(img1)
img2 = np.concatenate(img2)
msssim = image_ops.ssim_multiscale(
constant_op.constant(img1),
constant_op.constant(img2),
1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(msssim), 1e-4)
def testBroadcast(self):
"""Tests MS-SSIM broadcasting."""
img = self._LoadTestImages()[:2]
expected = self._msssim[:2, :2]
img = constant_op.constant(np.concatenate(img))
img1 = array_ops.expand_dims(img, axis=0) # batch dims: 1, 2.
img2 = array_ops.expand_dims(img, axis=1) # batch dims: 2, 1.
score_tensor = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(expected, self.evaluate(score_tensor), 1e-4)
def testRange(self):
"""Tests against low MS-SSIM score.
MS-SSIM is a geometric mean of SSIM and CS scores of various scales.
If any of the value is negative so that the geometric mean is not
well-defined, then treat the MS-SSIM score as zero.
"""
with self.cached_session(use_gpu=True) as sess:
img1 = self._LoadTestImage(sess, "checkerboard1.png")
img2 = self._LoadTestImage(sess, "checkerboard3.png")
images = [img1, img2, np.zeros_like(img1),
np.full_like(img1, fill_value=255)]
images = [ops.convert_to_tensor(x, dtype=dtypes.float32) for x in images]
msssim_ops = [
image_ops.ssim_multiscale(
x, y, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
for x, y in itertools.combinations(images, 2)
]
msssim = self.evaluate(msssim_ops)
msssim = np.squeeze(msssim)
self.assertTrue(np.all(msssim >= 0.0))
self.assertTrue(np.all(msssim <= 1.0))
def testInt(self):
img1 = self._RandomImage((1, 180, 240, 3), 255)
img2 = self._RandomImage((1, 180, 240, 3), 255)
img1 = constant_op.constant(img1, dtypes.uint8)
img2 = constant_op.constant(img2, dtypes.uint8)
ssim_uint8 = image_ops.ssim_multiscale(
img1, img2, 255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
img1 = image_ops.convert_image_dtype(img1, dtypes.float32)
img2 = image_ops.convert_image_dtype(img2, dtypes.float32)
ssim_float32 = image_ops.ssim_multiscale(
img1, img2, 1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
with self.cached_session(use_gpu=True):
self.assertAllClose(
self.evaluate(ssim_uint8), self.evaluate(ssim_float32), atol=0.001)
def testNumpyInput(self):
"""Test case for GitHub issue 28241."""
image = np.random.random([512, 512, 1])
score_tensor = image_ops.ssim_multiscale(image, image, max_val=1.0)
with self.cached_session(use_gpu=True):
_ = self.evaluate(score_tensor)
class ImageGradientsTest(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session(use_gpu=True):
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
class SobelEdgesTest(test_util.TensorFlowTestCase):
def disabled_testSobelEdges1x2x3x1(self):
img = constant_op.constant([[1, 3, 6], [4, 1, 5]],
dtype=dtypes.float32, shape=[1, 2, 3, 1])
expected = np.reshape([[[0, 0], [0, 12], [0, 0]],
[[0, 0], [0, 12], [0, 0]]], [1, 2, 3, 1, 2])
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected, actual_sobel)
def testSobelEdges5x3x4x2(self):
batch_size = 5
plane = np.reshape([[1, 3, 6, 2], [4, 1, 5, 7], [2, 5, 1, 4]],
[1, 3, 4, 1])
two_channel = np.concatenate([plane, plane], axis=3)
batch = np.concatenate([two_channel] * batch_size, axis=0)
img = constant_op.constant(batch, dtype=dtypes.float32,
shape=[batch_size, 3, 4, 2])
expected_plane = np.reshape([[[0, 0], [0, 12], [0, 10], [0, 0]],
[[6, 0], [0, 6], [-6, 10], [-6, 0]],
[[0, 0], [0, 0], [0, 10], [0, 0]]],
[1, 3, 4, 1, 2])
expected_two_channel = np.concatenate(
[expected_plane, expected_plane], axis=3)
expected_batch = np.concatenate([expected_two_channel] * batch_size, axis=0)
sobel = image_ops.sobel_edges(img)
with self.cached_session(use_gpu=True):
actual_sobel = self.evaluate(sobel)
self.assertAllClose(expected_batch, actual_sobel)
@test_util.run_all_in_graph_and_eager_modes
class DecodeImageTest(test_util.TensorFlowTestCase, parameterized.TestCase):
_FORWARD_COMPATIBILITY_HORIZONS = [
(2020, 1, 1),
(2020, 7, 14),
(2525, 1, 1), # future behavior
]
def testBmpChannels(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with test_util.use_gpu():
base = "tensorflow/core/lib/bmp/testdata"
# `rgba_transparent.bmp` has 4 channels with transparent pixels.
# Test consistency between `decode_image` and `decode_bmp` functions.
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
image0 = image_ops.decode_image(bmp0, channels=4)
image1 = image_ops.decode_bmp(bmp0, channels=4)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 4 channels.
# Note that this operation simply drops 4th channel information. This
# is the same behavior as `decode_png`.
# e.g. pixel values [25, 25, 25, 100] becomes [25, 25, 25].
bmp1 = io_ops.read_file(os.path.join(base, "rgb_small.bmp"))
image2 = image_ops.decode_bmp(bmp0, channels=3)
image3 = image_ops.decode_bmp(bmp1)
image2, image3 = self.evaluate([image2, image3])
self.assertAllEqual(image2, image3)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 3 channels. Alpha channel should be set to
# UINT8_MAX.
bmp3 = io_ops.read_file(os.path.join(base, "rgb_small_255.bmp"))
bmp4 = io_ops.read_file(os.path.join(base, "rgba_small_255.bmp"))
image4 = image_ops.decode_bmp(bmp3, channels=4)
image5 = image_ops.decode_bmp(bmp4)
image4, image5 = self.evaluate([image4, image5])
self.assertAllEqual(image4, image5)
# Test that 3 channels is returned with user request of `channels=3`
# even though image has 1 channel (grayscale).
bmp6 = io_ops.read_file(os.path.join(base, "grayscale_small.bmp"))
bmp7 = io_ops.read_file(
os.path.join(base, "grayscale_small_3channels.bmp"))
image6 = image_ops.decode_bmp(bmp6, channels=3)
image7 = image_ops.decode_bmp(bmp7)
image6, image7 = self.evaluate([image6, image7])
self.assertAllEqual(image6, image7)
# Test that 4 channels is returned with user request of `channels=4`
# even though image has 1 channel (grayscale). Alpha channel should be
# set to UINT8_MAX.
bmp9 = io_ops.read_file(
os.path.join(base, "grayscale_small_4channels.bmp"))
image8 = image_ops.decode_bmp(bmp6, channels=4)
image9 = image_ops.decode_bmp(bmp9)
image8, image9 = self.evaluate([image8, image9])
self.assertAllEqual(image8, image9)
def testJpegUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
# NumPy conversions should happen before
x = np.random.randint(256, size=(4, 4, 3), dtype=np.uint16)
x_str = image_ops_impl.encode_png(x)
x_dec = image_ops_impl.decode_image(
x_str, channels=3, dtype=dtypes.uint16)
self.assertAllEqual(x, x_dec)
def testGifUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpUint16(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.uint16)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.uint16)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testJpegFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/jpeg/testdata"
jpeg0 = io_ops.read_file(os.path.join(base, "jpeg_merge_test1.jpg"))
image0 = image_ops.decode_image(jpeg0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_jpeg(jpeg0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testPngFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/png/testdata"
png0 = io_ops.read_file(os.path.join(base, "lena_rgba.png"))
image0 = image_ops.decode_image(png0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(
image_ops.decode_png(png0, dtype=dtypes.uint16), dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testGifFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
image0 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_gif(gif0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testBmpFloat32(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "lena.bmp"))
image0 = image_ops.decode_image(bmp0, dtype=dtypes.float32)
image1 = image_ops.convert_image_dtype(image_ops.decode_bmp(bmp0),
dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertAllEqual(image0, image1)
def testExpandAnimations(self):
for horizon in self._FORWARD_COMPATIBILITY_HORIZONS:
with compat.forward_compatibility_horizon(*horizon):
with self.cached_session(use_gpu=True) as sess:
base = "tensorflow/core/lib/gif/testdata"
gif0 = io_ops.read_file(os.path.join(base, "scan.gif"))
# Test `expand_animations=False` case.
image0 = image_ops.decode_image(
gif0, dtype=dtypes.float32, expand_animations=False)
# image_ops.decode_png() handles GIFs and returns 3D tensors
animation = image_ops.decode_gif(gif0)
first_frame = array_ops.gather(animation, 0)
image1 = image_ops.convert_image_dtype(first_frame, dtypes.float32)
image0, image1 = self.evaluate([image0, image1])
self.assertLen(image0.shape, 3)
self.assertAllEqual(list(image0.shape), [40, 20, 3])
self.assertAllEqual(image0, image1)
# Test `expand_animations=True` case.
image2 = image_ops.decode_image(gif0, dtype=dtypes.float32)
image3 = image_ops.convert_image_dtype(animation, dtypes.float32)
image2, image3 = self.evaluate([image2, image3])
self.assertLen(image2.shape, 4)
self.assertAllEqual(list(image2.shape), [12, 40, 20, 3])
self.assertAllEqual(image2, image3)
def testImageCropAndResize(self):
if test_util.is_gpu_available():
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
else:
message = "Boxes contains at least one element that is not finite"
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
message):
op = image_ops_impl.crop_and_resize_v2(
image=array_ops.zeros((2, 1, 1, 1)),
boxes=[[1.0e+40, 0, 0, 0]],
box_indices=[1],
crop_size=[1, 1])
self.evaluate(op)
@parameterized.named_parameters(
("_jpeg", "JPEG", "jpeg_merge_test1.jpg"),
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
)
def testWrongOpBmp(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = "Trying to decode " + img_format + " format using DecodeBmp op"
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_bmp(img_bytes)
self.evaluate(img)
@parameterized.named_parameters(
("_jpeg", image_ops.decode_jpeg, "DecodeJpeg"),
("_png", image_ops.decode_png, "DecodePng"),
("_gif", image_ops.decode_gif, "DecodeGif"),
)
def testWrongOp(self, decode_op, op_used):
base = "tensorflow/core/lib/bmp/testdata"
bmp0 = io_ops.read_file(os.path.join(base, "rgba_small.bmp"))
err_msg = ("Trying to decode BMP format using a wrong op. Use `decode_bmp` "
"or `decode_image` instead. Op used: ") + op_used
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img = decode_op(bmp0)
self.evaluate(img)
@parameterized.named_parameters(
("_png", "PNG", "lena_rgba.png"),
("_gif", "GIF", "scan.gif"),
("_bmp", "BMP", "rgba_small.bmp"),
)
def testWrongOpJpeg(self, img_format, filename):
base_folder = "tensorflow/core/lib"
base_path = os.path.join(base_folder, img_format.lower(), "testdata")
err_msg = ("DecodeAndCropJpeg operation can run on JPEG only, but "
"detected ") + img_format
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
img_bytes = io_ops.read_file(os.path.join(base_path, filename))
img = image_ops.decode_and_crop_jpeg(img_bytes, [1, 1, 2, 2])
self.evaluate(img)
def testGifFramesWithDiffSize(self):
"""Test decoding an animated GIF.
This test verifies that `decode_image` op can decode animated GIFs whose
first frame does not fill the canvas. The unoccupied areas should be filled
with zeros (black).
`squares.gif` is animated with two images of different sizes. It
alternates between a smaller image of size 10 x 10 and a larger image of
size 16 x 16. Because it starts animating with the smaller image, the first
frame does not fill the canvas. (Canvas size is equal to max frame width x
max frame height.)
`red_black.gif` has just a single image in a GIF format. It is the same
image as the smaller image (size 10 x 10) of the two images in
`squares.gif`. The only difference is that its background (canvas - smaller
image) is pre-filled with zeros (black); it is the groundtruth.
"""
base = "tensorflow/core/lib/gif/testdata"
gif_bytes0 = io_ops.read_file(os.path.join(base, "squares.gif"))
image0 = image_ops.decode_image(gif_bytes0, dtype=dtypes.float32,
expand_animations=False)
gif_bytes1 = io_ops.read_file(os.path.join(base, "red_black.gif"))
image1 = image_ops.decode_image(gif_bytes1, dtype=dtypes.float32)
image1_0 = array_ops.gather(image1, 0)
image0, image1_0 = self.evaluate([image0, image1_0])
self.assertAllEqual(image0, image1_0)
if __name__ == "__main__":
googletest.main()
|
cxxgtxy/tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
Python
|
apache-2.0
| 240,446
|
[
"Gaussian"
] |
ccced6757f958347e5ef3d728109587eeb1a3a76ee5a8f9d37476367bd0fe42b
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_native, to_text
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
self.parser.set_description("Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.set_description("Import a role.")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
self.parser.set_description("View more details about a specific role.")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.set_description("Initialize new role with the base structure of a role.")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.set_description("Install Roles from file(s), URL(s) or tar file(s)")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
self.parser.set_description("Delete a role from roles_path.")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
self.parser.set_description("Show the name and version of each role installed in the roles_path.")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.set_description("Login to api.github.com server in order to use ansible-galaxy sub command such as 'import', 'delete' and 'setup'.")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
self.parser.set_description("Search the Galaxy database by tags, platforms, author and multiple keywords.")
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
self.parser.set_description("Manage the integration between Galaxy and the given source.")
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
' file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]),
desc="Perform various Role related operations.",
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
super(GalaxyCLI, self).parse()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.options.ignore_errors:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = self.options.init_path
force = self.options.force
role_skeleton = self.options.role_skeleton
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPLv2, CC-BY, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='2.4',
role_type=self.options.role_type
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.options.roles_path
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = self.options.role_file
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = self.options.no_deps
force = self.options.force
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
raise AnsibleError("Invalid role requirements file")
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, to_native(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and self.args and role.name not in self.args:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % str(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(str(dep_role), role.name, dep_role.install_info['version']))
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if len(self.args) == 1:
# show the requested role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.options.roles_path
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = to_text(self.args.pop(), errors='surrogate_or_strict')
github_user = to_text(self.args.pop(), errors='surrogate_or_strict')
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
veger/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 32,900
|
[
"Galaxy"
] |
cf9d210a177f34cdddd683d75e67ac129ef3ec4e60077997e04f9efca9774075
|
#
# Copyright (C) 2009 Niek Linnenbank
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# vim: set et sw=3 tw=0 fo=awqorc ft=python:
# DistTarBuilder: tool to generate tar files using SCons
# Copyright (C) 2005, 2006 Matthew A. Nicholson
# Copyright (C) 2006 John Pye
#
# This file is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1 as published by the Free Software Foundation.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
import tarfile
import version
import checksum
import SCons.Builder
from build import *
from SCons.Script import *
def DistTarEmitter(target,source,env):
source,origsource = [], source
excludeexts = env.Dictionary().get('DISTTAR_EXCLUDEEXTS',[])
excludedirs = env.Dictionary().get('DISTTAR_EXCLUDEDIRS',[])
# assume the sources are directories... need to check that
for item in origsource:
if os.path.isdir(str(item)):
for root, dirs, files in os.walk(str(item)):
# don't make directory dependences as that triggers full build
# of that directory
if root in source:
#print "Removing directory %s" % root
source.remove(root)
# loop through files in a directory
for name in files:
ext = os.path.splitext(name)
if not ext[1] in excludeexts:
relpath = os.path.join(root,name)
source.append(relpath)
for d in excludedirs:
if d in dirs:
dirs.remove(d) # don't visit CVS directories etc
else:
ext = os.path.splitext(str(item))
if not ext[1] in excludeexts:
source.append(str(item))
return target, source
def DistTarString(target, source, env):
"""
This is what gets printed on the console. We'll strip out the list or source
files, since it tends to get very long. If you want to see the contents, the
easiest way is to uncomment the line 'Adding to TAR file' below.
"""
return " TAR " + str(target[0])
def DistTar(target, source, env):
"""tar archive builder"""
import tarfile
env_dict = env.Dictionary()
if env_dict.get("DISTTAR_FORMAT") in ["gz", "bz2"]:
tar_format = env_dict["DISTTAR_FORMAT"]
else:
tar_format = ""
# split the target directory, filename, and stuffix
base_name = str(target[0]).split('.tar')[0]
(target_dir, dir_name) = os.path.split(base_name)
# create the target directory if it does not exist
if target_dir and not os.path.exists(target_dir):
os.makedirs(target_dir)
# open our tar file for writing
tar = tarfile.open(str(target[0]), "w:%s" % (tar_format,))
# write sources to our tar file
for item in source:
item = str(item)
tar.add(item,'%s/%s' % (dir_name,item))
# all done
tar.close()
def DistTarSuffix(env, sources):
"""tar archive suffix generator"""
env_dict = env.Dictionary()
if env_dict.has_key("DISTTAR_FORMAT") and env_dict["DISTTAR_FORMAT"] in ["gz", "bz2"]:
return ".tar." + env_dict["DISTTAR_FORMAT"]
else:
return ".tar"
#
# Registers the DistTar builder.
#
target.Append(BUILDERS =
{
'DistTar': target.Builder(
action = SCons.Action.Action(DistTar, DistTarString),
suffix = DistTarSuffix,
emitter = DistTarEmitter,
target_factory = target.fs.Entry,
),
})
target.Append(
DISTTAR_EXCLUDEEXTS=['.o','.os','.so','.a','.dll','.cc','.cache',
'.pyc','.cvsignore','.dblite','.log', '.gz',
'.bz2', '.zip', '.bak', '.BAK', '.md5', '.sha1',
'.tar', '.img', '.ext2', '.iso']
, DISTTAR_EXCLUDEDIRS=['CVS','.svn','.sconf_temp', 'dist', 'host']
)
#
# Create a release GZipped TAR archive.
#
releaseTarGz = target.DistTar("AMAYA-OS-" + version.current + ".tar.gz",
[target.Dir('#')],
DISTTAR_FORMAT = "gz")
releaseTarGzMd5 = target.Checksum("AMAYA-OS-" + version.current + ".tar.gz.md5",
"AMAYA-OS-" + version.current + ".tar.gz")
releaseTarGzSha1 = target.Checksum("AMAYA-OS-" + version.current + ".tar.gz.sha1",
"AMAYA-OS-" + version.current + ".tar.gz")
#
# Create a release BZipped TAR archive.
#
releaseTarBz2 = target.DistTar("AMAYA-OS-" + version.current + ".tar.bz2",
[target.Dir("#")],
DISTTAR_FORMAT = "bz2")
releaseTarBz2Md5 = target.Checksum("AMAYA-OS-" + version.current + ".tar.bz2.md5",
"AMAYA-OS-" + version.current + ".tar.bz2")
releaseTarBz2Sha1 = target.Checksum("AMAYA-OS-" + version.current + ".tar.bz2.sha1",
"AMAYA-OS-" + version.current + ".tar.bz2")
#
# Create a snapshot GZipped TAR archive.
#
snapshotTarGz = target.DistTar("AMAYA-OS-" + version.currentRev + ".tar.gz",
[target.Dir("#")],
DISTTAR_FORMAT = "gz")
snapshotTarGzMd5 = target.Checksum("AMAYA-OS-" + version.currentRev + ".tar.gz.md5",
"AMAYA-OS-" + version.currentRev + ".tar.gz")
snapshotTarGzSha1 = target.Checksum("AMAYA-OS-" + version.currentRev + ".tar.gz.sha1",
"AMAYA-OS-" + version.currentRev + ".tar.gz")
#
# Create a snapshot BZipped TAR archive.
#
snapshotTarBz2 = target.DistTar("AMAYA-OS-" + version.currentRev + ".tar.bz2",
[target.Dir("#")],
DISTTAR_FORMAT = "bz2")
snapshotTarBz2Md5 = target.Checksum("AMAYA-OS-" + version.currentRev + ".tar.bz2.md5",
"AMAYA-OS-" + version.currentRev + ".tar.bz2")
snapshotTarBz2Sha1 = target.Checksum("AMAYA-OS-" + version.currentRev + ".tar.bz2.sha1",
"AMAYA-OS-" + version.currentRev + ".tar.bz2")
Alias("release", [ releaseTarGz, releaseTarGzMd5, releaseTarGzSha1,
releaseTarBz2, releaseTarBz2Md5, releaseTarBz2Sha1 ])
Alias("snapshot", [ snapshotTarGz, snapshotTarGzMd5, snapshotTarGzSha1,
snapshotTarBz2, snapshotTarBz2Md5, snapshotTarBz2Sha1 ])
|
dalmemail/AmayaOS-English
|
site_scons/dist.py
|
Python
|
gpl-3.0
| 6,941
|
[
"VisIt"
] |
09738ad8e3a1e06ddf88941efab13e80fa3a2928b73d9f53ba795a7bc56b017d
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for manipulating qualified names.
A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite
(e.g. 'foo.bar') syntactic symbols.
This is *not* related to the __qualname__ attribute used by inspect, which
refers to scopes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class Symbol(collections.namedtuple('Symbol', ['name'])):
"""Represents a Python symbol."""
class StringLiteral(collections.namedtuple('StringLiteral', ['value'])):
"""Represents a Python string literal."""
def __str__(self):
return '\'%s\'' % self.value
def __repr__(self):
return str(self)
class NumberLiteral(collections.namedtuple('NumberLiteral', ['value'])):
"""Represents a Python numeric literal."""
def __str__(self):
return '%s' % self.value
def __repr__(self):
return str(self)
# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans.
class QN(object):
"""Represents a qualified name."""
def __init__(self, base, attr=None, subscript=None):
if attr is not None and subscript is not None:
raise ValueError('A QN can only be either an attr or a subscript, not '
'both: attr={}, subscript={}.'.format(attr, subscript))
self._has_attr = False
self._has_subscript = False
if attr is not None:
if not isinstance(base, QN):
raise ValueError(
'for attribute QNs, base must be a QN; got instead "%s"' % base)
if not isinstance(attr, str):
raise ValueError('attr may only be a string; got instead "%s"' % attr)
self._parent = base
# TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now.
self.qn = (base, attr)
self._has_attr = True
elif subscript is not None:
if not isinstance(base, QN):
raise ValueError('For subscript QNs, base must be a QN.')
self._parent = base
self.qn = (base, subscript)
self._has_subscript = True
else:
if not isinstance(base, (str, StringLiteral, NumberLiteral)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
'for simple QNs, base must be a string or a Literal object;'
' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
def is_symbol(self):
return isinstance(self.qn[0], str)
def is_simple(self):
return len(self.qn) <= 1
def is_composite(self):
return len(self.qn) > 1
def has_subscript(self):
return self._has_subscript
def has_attr(self):
return self._has_attr
@property
def parent(self):
if self._parent is None:
raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0])
return self._parent
@property
def owner_set(self):
"""Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
"""
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
@property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
def __hash__(self):
return hash(self.qn + (self._has_attr, self._has_subscript))
def __eq__(self, other):
return (isinstance(other, QN) and self.qn == other.qn and
self.has_subscript() == other.has_subscript() and
self.has_attr() == other.has_attr())
def __str__(self):
if self.has_subscript():
return str(self.qn[0]) + '[' + str(self.qn[1]) + ']'
if self.has_attr():
return '.'.join(map(str, self.qn))
else:
return str(self.qn[0])
def __repr__(self):
return str(self)
def ssf(self):
"""Simple symbol form."""
ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]
ssf_string = ''
for i in range(0, len(self.qn) - 1):
if self.has_subscript():
delimiter = '_sub_'
else:
delimiter = '_'
ssf_string += ssfs[i] + delimiter
return ssf_string + ssfs[-1]
def ast(self):
# The caller must adjust the context appropriately.
if self.has_subscript():
return gast.Subscript(self.parent.ast(), gast.Index(self.qn[-1].ast()),
None)
if self.has_attr():
return gast.Attribute(self.parent.ast(), self.qn[-1], None)
base = self.qn[0]
if isinstance(base, str):
return gast.Name(base, None, None)
elif isinstance(base, StringLiteral):
return gast.Str(base.value)
elif isinstance(base, NumberLiteral):
return gast.Num(base.value)
else:
assert False, ('the constructor should prevent types other than '
'str, StringLiteral and NumberLiteral')
class QnResolver(gast.NodeTransformer):
"""Annotates nodes with QN information.
Note: Not using NodeAnnos to avoid circular dependencies.
"""
def visit_Name(self, node):
node = self.generic_visit(node)
anno.setanno(node, anno.Basic.QN, QN(node.id))
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr))
return node
def visit_Subscript(self, node):
# TODO(mdan): This may no longer apply if we overload getitem.
node = self.generic_visit(node)
s = node.slice
if not isinstance(s, gast.Index):
# TODO(mdan): Support range and multi-dimensional indices.
# Continuing silently because some demos use these.
return node
if isinstance(s.value, gast.Num):
subscript = QN(NumberLiteral(s.value.n))
elif isinstance(s.value, gast.Str):
subscript = QN(StringLiteral(s.value.s))
else:
# The index may be an expression, case in which a name doesn't make sense.
if anno.hasanno(node.slice.value, anno.Basic.QN):
subscript = anno.getanno(node.slice.value, anno.Basic.QN)
else:
return node
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN),
subscript=subscript))
return node
def resolve(node):
return QnResolver().visit(node)
def from_str(qn_str):
node = parser.parse_expression(qn_str)
node = resolve(node)
return anno.getanno(node, anno.Basic.QN)
|
ghchinoy/tensorflow
|
tensorflow/python/autograph/pyct/qual_names.py
|
Python
|
apache-2.0
| 8,125
|
[
"VisIt"
] |
95d77da66dfb59087a196a8cf191d6f07cb17559dea6e50b6ebb89de03245244
|
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import os
from matplotlib import pyplot as plt
def test1():
img = cv2.imread('./../img/human.jpg', 0)
ret, thresh1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
ret, thresh2 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
ret, thresh3 = cv2.threshold(img, 127, 255, cv2.THRESH_TRUNC)
ret, thresh4 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO)
ret, thresh5 = cv2.threshold(img, 127, 255, cv2.THRESH_TOZERO_INV)
titles = ['Original Image', 'BINARY', 'BINARY_INV', 'TRUNC', 'TOZERO', 'TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
def test2():
img = cv2.imread('./../img/human.jpg', 0)
img = cv2.medianBlur(img, 5)
ret, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
titles = ['Original Image', 'Global Thresholding (v = 127)', 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1, th2, th3]
for i in xrange(4):
plt.subplot(2, 2, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
if __name__ == '__main__':
test2()
|
KaoruNasuno/opencv_tutorials
|
image_processing_in_opencv/image_thresholding.py
|
Python
|
mit
| 1,574
|
[
"Gaussian"
] |
d628a1d1bb0da7cc5617e1e14c7bb6db9b3b8ef1d555a2b2c6670b5d607db0f9
|
#!/usr/bin/python
from multiprocessing import Pool
import time
import os
import sys
import simplejson as json
import argparse
from Bio import SeqIO
from Bio.SeqUtils import GC
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
# Copyright(C) 2014 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description='Convert an operon file into a BLAST query.')
parser.add_argument("-i", "--infolder", dest="infolder", metavar="FOLDER", default='/home/dave/Desktop/all_genbank',
help="Folder containing all genbank files for use by the program.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="FOLDER", default='./operon_query_files/',
help="Folder where the BLAST queries derived from an operon file will be stored.")
parser.add_argument("-f", "--filter", dest="filter", metavar="FILE", default='',
help="File restrictiong which accession numbers this script will process. If no file is provided, filtering is not performed.")
parser.add_argument("-p", "--operon_file", dest="operon_file", metavar="FILE", default='./regulonDB/operon_names_and_genes.txt',
help="File which contains operon information for use in custom operon queries. The file format is opern_name followed by the constituent gene names, tab delineated.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
# These two variables are necessary if we plan on doing more complex operon/neighborhood investigation. There is no way around this, and I know it looks ugly.\
# Screw it, at the end here, these two options will yield a list of organisms that are the reference, even if that list has only one value.
parser.add_argument("-r", "--refrence", dest="refrence", metavar="STRING", default = 'NC_000913',
help="Accession number of the refrence organism. This information is used to determine the product type of each gene (RNA/Protein), a necessary piece of information to classify the operons that are under investigation.")
parser.add_argument("-R", "--refrence_file", dest="refrence_file", metavar="FILE", default = '',
help="File containing a list of accession number(s) of refrence organism(s) . This information is used to determine the product type of each gene (RNA/Protein), a necessary piece of information to classify the operons that are under investigation.")
return parser.parse_args()
def check_options(parsed_args):
# section of code that checks the infolder entry
if os.path.isdir(parsed_args.infolder):
infolder = parsed_args.infolder
else:
print "The folder %s does not exist." % parsed_args.infolder
sys.exit()
if infolder[-1] != '/':
infolder = infolder + '/'
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
if parsed_args.outfolder[-1] == '/':
outfolder = parsed_args.outfolder
else:
outfolder = parsed_args.outfolder + '/'
# Check the filter file
if parsed_args.filter == '' or os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
else:
print "The file %s does not exist." % parsed_args.filter
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
# Whole lotta code to determine what the refrence organism(s) are, and return the result as a list.
if parsed_args.refrence_file == '' or os.path.exists(parsed_args.refrence_file):
refrence_file = parsed_args.refrence_file
else:
print "The file %s does not exist." % parsed_args.refrence_file
sys.exit()
if refrence_file == '':
refrence = [parsed_args.refrence]
else:
refrence = [i.strip() for i in open(parsed_args.refrence_file).readlines()]
# Check the custom operon file
if parsed_args.operon_file == '' or os.path.exists(parsed_args.operon_file):
operon_file = parsed_args.operon_file
else:
print "The file %s does not exist." % parsed_args.operon_file
sys.exit()
return infolder, outfolder, filter_file, num_proc, refrence, operon_file
#this function will return all of the files that are in a directory. os.walk is recursive traversal.
def returnRecursiveDirFiles(root_dir):
result = []
for path, dir_name, flist in os.walk(root_dir):
for f in flist:
fname = os.path.join(path, f)
if os.path.isfile(fname):
result.append(fname)
return result
# convert a file of format : operon name then a list of the full names of the genes within that operon
# into a dictionary that can be easily accessed or filtered later.
def parse_operon_file(fname):
result = {}
for line in [i.strip().split('\t') for i in open(fname).readlines()]:
result.update({line[0]: line[1:]})
return result
# This function creates a dictionary indexed by locus from the input genbank file
# and for my purposes now, it will index genes based on their annotation in genbank
# seq_type will allow us to determine the type of sequence returned in for the dict. default
# will be amino acid because this is a lot less noisy.
def return_genbank_dict(gb_file, key = 'annotation', seq_type = 'amino_acid'):
"""Overview: This function will return a dictionary generated from a genbank file with key value supplied by caller.
Returns: A dictionary created by the supplied genbank file (gb_file) indexed off the key value supplied.
Default: The deafult key is locus, and this is generally the most useful key type since it is garanteed to be
unique within the genbank file. This condition is not necessarily true for any other attribute.
"""
result = {}
seq_record = SeqIO.parse(open(gb_file), "genbank").next()
accession = seq_record.annotations['accessions'][0].split('.')[0]
common_name = seq_record.annotations['organism'].replace(' ', '_')
result.update({'accession': accession})
result.update({'common_name': common_name})
cnt = 0
# loop over the genbank file
unk_cnt = 1
for fnum, feature in enumerate(seq_record.features):
# here i simply check the gene coding type, and identify them in a way that can be used later.
if feature.type == 'CDS' or feature.type == 'ncRNA' or feature.type == 'tRNA' or feature.type == 'mRNA' or feature.type == 'rRNA':
start = feature.location.start
stop = feature.location.end
#print start, stop
strand = feature.strand
synonyms = 'NONE'
'''
try:
gene = feature.qualifiers['gene'][0]
except:
gene = 'unknown'
'''
# this line might be wrong, just trying to get rid of an unneccessary try/except clause
if 'gene' in feature.qualifiers:
gene = feature.qualifiers['gene'][0]
else:
gene = 'unknown'
if 'gene_synonym' in feature.qualifiers:
synonym_list = feature.qualifiers['gene_synonym'][0].replace(' ', '').split(';')
synonyms = ':'.join(synonym_list)
try:
locus = feature.qualifiers['locus_tag'][0]
except:
try:
locus = feature.qualifiers['gene'][0]
except:
locus = ''
print 'No locus associated. This should never be invoked meaning you are proper fracked. (The gbk file has an error).'
try:
seq = feature.qualifiers['translation']
seq_type = 'Protein'
except:
cnt = cnt + 1
seq = seq_record.seq[start:stop]
seq_type = feature.type
if feature.type == 'CDS':
seq_type = 'Pseudo_Gene'
# attempt to fix an error
if strand == 1:
seq = seq.translate()
else:
seq = seq.reverse_complement().translate()
gc = "%2.1f" % GC(seq_record.seq[start:stop])
# Debugging something odd
#print feature.qualifiers['gene_synonym']
#method = "exact"
if key == 'locus':
result.update({locus: (locus, gene, seq, seq_type, synonyms)})
elif key == 'annotation':
if gene == 'unknown':
new_gene = 'unknown_' + str(unk_cnt)
header = '|'.join([accession, common_name, locus, gene, str(start), str(stop), str(strand), seq_type, synonyms, gc])
result.update({new_gene: [header, ''.join(seq)]})
unk_cnt +=1
else:
header = '|'.join([accession, common_name, locus, gene, str(start), str(stop), str(strand), seq_type, synonyms, gc])
result.update({gene: [header, ''.join(seq)]})
try:
for syn in synonym_list:
result.update({syn: [header, ''.join(seq)]})
except:
pass
#print 'The number of non-protein regions in %s is: %i.' % (common_name, cnt)
return result
# This function will allow me to do the main work of make_operon_fasta, but allow parallel
# processing. I will have to make a parallel array, which will take some time to learn. i
# will keep this stub for later to implement.
def parallel_operon_fasta(genome):
organism = genome.split('/')[-1].split('.')[0]
organism_dict_for_recovery = {}
org_dict = return_genbank_dict(genome)
organism_dict_for_recovery.update({organism: org_dict})
return (organism, org_dict)
# This function will make a BLAST query from the parsed operon file. The default behavior of the function is
# to make a query file from all genes in all organisms that are annotated. Later the results will be sorted based
# the needs of the programmer. The defaulted variables allow a single operon to be chosen individually. The program
# will also store teh results of this function in a folder titled blast_query_files, in the recovery folder.
def make_operon_fasta(gene_list, genbank_list, num_processors, folder, ref_list):
pool = Pool(processes = num_processors)
organism_dict_for_recovery = dict(pool.map(parallel_operon_fasta, genbank_list))
protein_match = []
rna_match = []
pseudogene_match = []
missing_list = []
refrence_prot = []
refrence_rna = []
# This list should be updated should other types of RNAs be annotated later as parts of (siRNA comes to mind).
RNA_codes = ['rRNA', 'tRNA', 'ncRNA']
for org in organism_dict_for_recovery.keys():
for gene in gene_list:
if gene in organism_dict_for_recovery[org].keys():
if organism_dict_for_recovery[org][gene][0].split('|')[7] == 'Protein':
outseq = SeqRecord(Seq(organism_dict_for_recovery[org][gene][1]),
id=organism_dict_for_recovery[org][gene][0], description = '')
protein_match.append(outseq)
if org in ref_list:
outseq = SeqRecord(Seq(organism_dict_for_recovery[org][gene][1]),
id=organism_dict_for_recovery[org][gene][0], description = '')
refrence_prot.append(outseq)
elif organism_dict_for_recovery[org][gene][0].split('|')[7] in RNA_codes:
outseq = SeqRecord(Seq(organism_dict_for_recovery[org][gene][1]),
id=organism_dict_for_recovery[org][gene][0], description = '')
rna_match.append(outseq)
if org in ref_list:
outseq = SeqRecord(Seq(organism_dict_for_recovery[org][gene][1]),
id=organism_dict_for_recovery[org][gene][0], description = '')
refrence_rna.append(outseq)
elif organism_dict_for_recovery[org][gene][0].split('|')[7] == 'Pseudo_Gene':
outseq = SeqRecord(Seq(organism_dict_for_recovery[org][gene][1]),
id=organism_dict_for_recovery[org][gene][0], description = '')
pseudogene_match.append(outseq)
if org in ref_list:
outseq = SeqRecord(Seq(organism_dict_for_recovery[org][gene][1]),
id=organism_dict_for_recovery[org][gene][0], description = '')
refrence_prot.append(outseq)
else:
print organism_dict_for_recovery[org][gene][0]
else: # The gene is missing, we will look for it at a later stage in the program
item = '\t'.join([org, gene])
missing_list.append(item)
handle = open(folder + 'protein_matches.fa', 'w')
SeqIO.write(protein_match, handle,"fasta")
handle.close()
handle = open(folder + 'rna_matches.fa', 'w')
SeqIO.write(rna_match, handle,"fasta")
handle.close()
handle = open(folder + 'pseudogene_matches.fa', 'w')
SeqIO.write(pseudogene_match, handle,"fasta")
handle.close()
handle = open(folder + 'missing_operon_genes.txt', 'w')
handle.write('\n'.join(missing_list))
handle.close()
# The goal here is to return the refrence fasta files, so that we can do self homolog dict, which is needed to detect
# fusion proteins later.
ref_prot_outfile = folder + 'refrence_prot.fa'
ref_rna_outfile = folder + 'refrence_rna.fa'
#print "len(gene_list)", len(gene_list)
handle = open(ref_prot_outfile, 'w')
SeqIO.write(refrence_prot, handle, "fasta")
#print "len(refrence_prot)", len(refrence_prot)
handle.close()
handle = open(folder + 'refrence_rna.fa', 'w')
SeqIO.write(refrence_rna, ref_rna_outfile, "fasta")
#print "len(refrence_rna)", len(refrence_rna)
handle.close()
return ref_prot_outfile, ref_rna_outfile, organism_dict_for_recovery
# This function validates the operons in the references. any operons which canot be located in their entirety are omitted
# it returns two dicts. one has just the operons and a list of their genes, the other has operons, list of genes with the
# type of product produced
def categorize_operons(ref_org, genbank_list, operon_dict):
ref_path = [i for i in genbank_list if i.split('/')[-1].split('.')[0] in ref_org]
# dict of the operons that are validated (could cind all the constituent genes in the reference)
result = {}
# dict of the operons that are validated (could cind all the constituent genes in the reference) and the type of product
result_categorized = {}
#print "operon_dict", len(operon_dict), operon_dict
prot_operons = []
mixed_operons = []
#ref_dict = return_genbank_dict(ref_path)
# basically need to update this for a list of refrence organisms... this is not done just yet.
ref_dict = return_genbank_dict(ref_path[0])
for operon in sorted(operon_dict.keys()):
operon_error = False
type_list = []
for gene in operon_dict[operon]:
try:
gene_type = ref_dict[gene][0].split('|')[7]
# if protein
if gene_type == 'Protein' or gene_type == 'Pseudogene':
type_list.append("%s:p" % gene)
# if RNA
else:
type_list.append("%s:r" % gene)
#print "RNA", gene, gene_type
except:
print "Operon", operon, "is not usable as an operon, I should remove it. The gene in error is", gene
operon_error = True
# no errors
if not operon_error:
result.update({operon: operon_dict[operon]})
result_categorized.update({operon:type_list})
#print "result", result
#print "result_categorized", result_categorized
return result, result_categorized
# There are a lot of things that will have to be done here:
# 1) include RNA coding genes
# 2) something else i can't think of right now
def return_self_homolog_dict(filtered_operon_dict, reference_fasta_prot, outfile, outfolder):
gene_to_operon_dict = {}
#print "filtered_operon_dict", filtered_operon_dict
for operon in filtered_operon_dict.keys():
for gene in filtered_operon_dict[operon]:
gene_to_operon_dict.update({gene:operon})
#print "booyah", len(filtered_operon_dict), len(gene_to_operon_dict)
# set up databases for the different types of genes
# for proteins -p must be set to true
cmd = "formatdb -i %s -p T -o F" % (reference_fasta_prot)
os.system(cmd)
# for RNA genes -p must be set to false
#cmd = "formatdb -i %s -p F -o F" % (rna_file)
#os.system(cmd)
# blast each set of genes against itself
#cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 9" % (os.sysconf("SC_NPROCESSORS_ONLN"), reference_fasta_prot, reference_fasta_prot, '1e-10', outfolder + 'self_prot.txt')
#print "outfolder + 'self_prot.txt'", outfolder + 'self_prot.txt'
cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 9" % (1, reference_fasta_prot, reference_fasta_prot, '1e-10', outfolder + 'self_prot.txt')
os.system( cmd )
#cmd = "blastall -p blastn -a %i -i %s -d %s -e %s -o %s -m 9" % (os.sysconf("SC_NPROCESSORS_ONLN"), rna_file, rna_file, '1e-10', './recover/self_rna.txt')
#os.system( cmd )
# in this next section i will read in the resulting blast results, and construct a dictionary which will be keyed off gene name and provide a list
# of homologs from the operon set. This list will allow the program to filter out spurious results. We will miss fusions of homologous genes, but
# hopefully this will be a rare event in our dataset, untill this can be revised
lst = [i.strip() for i in open(outfolder + 'self_prot.txt').readlines() if i[0] != '#']
result = {}
for line in lst:
source, hit = line.split('\t')[0:2]
#source_annotation = source.split('|')[2]
source_annotation = source.split('|')[3]
#hit_annotation = hit.split('|')[2]
hit_annotation = hit.split('|')[3]
# we have two genes in the test set that are homologous
if source_annotation != hit_annotation:
if source_annotation not in result.keys():
result.update({source_annotation: [hit_annotation]})
else:
result[source_annotation].append(hit_annotation)
if hit_annotation not in result.keys():
result.update({hit_annotation: [source_annotation]})
else:
result[hit_annotation].append(source_annotation)
#print "result1", result
for key in result.keys():
result.update({key: list(set(result[key]))})
#print "result2", result
json_handle = open(outfile, 'w')
json.dump(result, json_handle)
json_handle.close()
return result
def make_operon_individual_gene_fasta(validated_operon_dict, org_annotation_dict, refrence, outfolder):
# I use this in a later stage, but declare here.
intermediate_operon_fasta_folder = outfolder + 'cleaned_gene_fasta/'
if not os.path.isdir(intermediate_operon_fasta_folder):
os.makedirs(intermediate_operon_fasta_folder)
for operon in sorted(validated_operon_dict.keys()):
operon_folder = outfolder + operon + '/'
if not os.path.isdir(operon_folder):
os.makedirs(operon_folder)
for gene in validated_operon_dict[operon]:
gene_fasta_outfile = operon_folder + gene + '.fa'
result = []
for org in org_annotation_dict.keys():
if gene in org_annotation_dict[org].keys():
#print "org_annotation_dict[org][gene]", org_annotation_dict[org][gene]
outseq = SeqRecord(Seq(org_annotation_dict[org][gene][1]),
id=org_annotation_dict[org][gene][0], description = '')
result.append(outseq)
handle = open(gene_fasta_outfile, 'w')
SeqIO.write(result, handle, "fasta")
#print "len(refrence_prot)", len(refrence_prot)
handle.close()
# Step1: Cluster the annotated operon genes using CD-HIT, 60% ident seems to work out just fine in my testing so far.
cd_hit_clustered_file = gene_fasta_outfile.replace('.', '_cd_hit.')
cmd = "cdhit -i %s -o %s -c %s -n %s >> cdhit_output_dump.txt" % (gene_fasta_outfile, cd_hit_clustered_file, '.6', '4')
os.system(cmd)
# Step2: From the represenative sequences in step 1, make a BLAST database so an all vs all BLAST search can be performed.
cmd = "formatdb -i %s -p T -o F" % (cd_hit_clustered_file)
os.system(cmd)
# Step3: Run all vs all BLAST to see if there are represenative genes that do not have homologs. These genes will be considered misannoated.
balst_reslt_file = operon_folder + gene + '_BLAST.txt'
#print "operon_folder + gene + _BLAST.txt", operon_folder + gene + '_BLAST.txt'
cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 9" % (1, cd_hit_clustered_file, cd_hit_clustered_file, '1e-03', balst_reslt_file)
os.system( cmd )
# Step4: Read the tabular BLAST file resulting from step 3. Make a count dict keyed on the query (first field) and count the number of hits.
# Any query containing less than 2 hits is removed, since that gene is likely misannotated in the organism's genbank file. (genes will self hit)
hit_cnt_dict = {}
for hit in [i.split('\t')[0] for i in open(balst_reslt_file).readlines() if i[0] != '#']:
if hit in hit_cnt_dict.keys():
hit_cnt_dict[hit] += 1
else:
hit_cnt_dict.update({hit:1})
# The situation is this: if you have the same number of hits as you do represenatives, then you do not want to remove singletons.
# This block of code allows this to be considered.
sum_dict_vals = 0
for i in hit_cnt_dict.keys():
sum_dict_vals += hit_cnt_dict[i]
# Data from the cluster represenatives is not factored in at this point. I suspect that I should include this information.
# This way I will not remove clusters that have many constituent genes but are not homologous with others in the group.
sequences_to_remove = []
for i in hit_cnt_dict.keys():
#print "i.split('|')[0]", i.split('|')[0], "refrence", refrence
if hit_cnt_dict[i] == 1 and len(hit_cnt_dict) != sum_dict_vals and i.split('|')[0] not in refrence:
#print "misannotation", i
sequences_to_remove.append(i)
# Step5: Filter the results, since at this point we have a list of genes that may be misannotated.
fasta_genes_to_keep = []
handle = open(cd_hit_clustered_file, "rU")
for rec in SeqIO.parse(handle, "fasta") :
if rec.id not in sequences_to_remove:
fasta_genes_to_keep.append(rec)
# Step6: Record the results of this pipeline for the individual gene.
final_outfile = operon_folder + gene + '_misannotations_removed.fa'
handle = open(final_outfile, 'w')
SeqIO.write(fasta_genes_to_keep, handle,"fasta")
handle.close()
# I am going to make a new folder to aggregate all of the final results, then cat them into a single file saved in the main folder
final_outfile = intermediate_operon_fasta_folder + gene + '.fa'
handle = open(final_outfile, 'w')
SeqIO.write(fasta_genes_to_keep, handle,"fasta")
handle.close()
# Step7: Aggregate the results of the previous steps into a single operon query file. This file has represenative gene block genes
# that are filtered for obvious possible misannotations. The result is likely throwing out more genes than necessary, but I prefer caution here.
final_result_file = outfolder + "operon_blast_query.fa"
cat_list = returnRecursiveDirFiles(intermediate_operon_fasta_folder)
with open(final_result_file, 'w') as handle:
for fname in cat_list:
with open(fname) as infile:
handle.write(infile.read())
def main():
start = time.time()
parsed_args = parser_code()
infolder, outfolder, filter_file, num_proc, refrence, operon_file = check_options(parsed_args)
print infolder, outfolder, filter_file, num_proc, refrence, operon_file
# This section of code will return a parsed operon file as a dictionary keyed by operon name
parsed_operon_file = './regulonDB/operon_names_and_genes_unfiltered.txt'
# later, this should be done through the cmd line interface... just running out of time here
operon_dict = parse_operon_file(operon_file)
# This section of code will return the full pathways to the genbank files of interest
if filter_file == '':
genbank_list = returnRecursiveDirFiles(infolder)
else:
filter_list = [i.strip() for i in open(filter_file).readlines()]
genbank_list = [i for i in returnRecursiveDirFiles(infolder) if i.split('/')[-1].split('.')[0] in filter_list]
# check to make sure that all operons that we are looking at can be found in their entirety before we use them as
# part of the data set. The validated operon dict contains information about the type (protein/RNA) of gene product.
validated_operon_dict, validated_operon_dict_more_info = categorize_operons(refrence, genbank_list, operon_dict)
# this is a list of all genes that we have in the dataset, reguardless of wether we can find them in the reference or not.
unvalidated_gene_list = []
# this is a list of all the genes that are in the validated dataset. we can find all the genes in the reference organism for the entire operon.
validated_gene_list = []
for operon in operon_dict.keys():
unvalidated_gene_list = unvalidated_gene_list + operon_dict[operon]
for operon in validated_operon_dict.keys():
validated_gene_list = validated_gene_list + validated_operon_dict[operon]
ref_prot_outfile, ref_rna_outfile, org_annotation_dict = make_operon_fasta(validated_gene_list, genbank_list, num_proc, outfolder, refrence)
# currently, we are only looking at protein sequences. The following code reflects this.
self_homolog_dict = return_self_homolog_dict(validated_operon_dict, ref_prot_outfile, outfolder + 'operon_homolog_dict.json', outfolder)
# at this point i have the self homolog dict, i have a file that has all the e.coli operons validated (for one version of K-12)
# and i have the operons validated per operon, at least in terms of prot or RNA. now what i am going to do it to take the operons
# individually, make a folder for them in the outfolder, named for the operon, and create a fasta file of all the annotated examples.
# once this is done, then we will run CD hit on the individual genes to get a set of represenatives per gene. the clustering will be
# done on the a.a. seq at 70%. Then i would like to screne the clusters to try to remove possible mis-annotations.
make_operon_individual_gene_fasta(validated_operon_dict, org_annotation_dict, refrence, outfolder)
# easy way to run this, using all the defaults that make sense
# ./cdhit_operons.py -f phylo_order.txt -p regulonDB/operon_names_and_genes.txt >> test.txt
# ./cdhit_operons.py -f phylo_order.txt -p regulonDB/operon_names_and_genes.txt -o cd_hit_test
# ./cdhit_operons.py -f phylo_order.txt -p regulonDB/operon_names_and_genes.txt -o cd_hit_test_2
print time.time() - start
if __name__ == '__main__':
main()
|
reamdc1/gene_block_evolution_old
|
cdhit_operons.py
|
Python
|
gpl-3.0
| 29,487
|
[
"BLAST"
] |
dd8fcb71d198e4a091254c79915972765a1629227f0b82d14a14720a9d8f747e
|
#!/usr/bin/env python
"""
dirac-rss-set-token
Script that helps setting the token of the elements in RSS. It can acquire or
release the token. If the releaseToken switch is used, no matter what was the
previous token, it will be set to rs_svc ( RSS owns it ). If not set, the token
will be set to whatever username is defined on the proxy loaded while issuing
this command. In the second case, the token lasts one day.
"""
from datetime import datetime, timedelta
# DIRAC
from DIRAC import gLogger, exit as DIRACExit, S_OK, version
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
__RCSID__ = '$Id: $'
subLogger = None
switchDict = {}
#...............................................................................
def registerSwitches():
"""
Registers all switches that can be used while calling the script from the
command line interface.
"""
switches = (
( 'element=', 'Element family to be Synchronized ( Site, Resource or Node )' ),
( 'name=', 'Name, name of the element where the change applies' ),
( 'statusType=', 'StatusType, if none applies to all possible statusTypes' ),
( 'reason=', 'Reason to set the Status' ),
( 'releaseToken', 'Release the token and let the RSS go' )
)
for switch in switches:
Script.registerSwitch( '', switch[ 0 ], switch[ 1 ] )
def registerUsageMessage():
"""
Takes the script __doc__ and adds the DIRAC version to it
"""
hLine = ' ' + '='*78 + '\n'
usageMessage = hLine
usageMessage += ' DIRAC %s\n' % version
usageMessage += __doc__
usageMessage += '\n' + hLine
Script.setUsageMessage( usageMessage )
def parseSwitches():
"""
Parses the arguments passed by the user
"""
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if args:
subLogger.error( "Found the following positional args '%s', but we only accept switches" % args )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
switches = dict( Script.getUnprocessedSwitches() )
switches.setdefault( 'statusType' , None )
switches.setdefault( 'releaseToken', False )
for key in ( 'element', 'name', 'reason' ):
if not key in switches:
subLogger.error( "%s Switch missing" % key )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
if not switches[ 'element' ] in ( 'Site', 'Resource', 'Node' ):
subLogger.error( "Found %s as element switch" % switches[ 'element' ] )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
subLogger.debug( "The switches used are:" )
map( subLogger.debug, switches.iteritems() )
return switches
#...............................................................................
def proxyUser():
"""
Read proxy to get username.
"""
res = getProxyInfo()
if not res[ 'OK' ]:
return res
return S_OK( res[ 'Value' ][ 'username' ] )
def setToken( user ):
'''
Function that gets the user token, sets the validity for it. Gets the elements
in the database for a given name and statusType(s). Then updates the status
of all them adding a reason and the token.
'''
rssClient = ResourceStatusClient()
# This is a little bit of a nonsense, and certainly needs to be improved.
# To modify a list of elements, we have to do it one by one. However, the
# modify method does not discover the StatusTypes ( which in this script is
# an optional parameter ). So, we get them from the DB and iterate over them.
elements = rssClient.selectStatusElement( switchDict[ 'element' ], 'Status',
name = switchDict[ 'name' ],
statusType = switchDict[ 'statusType' ],
meta = { 'columns' : [ 'StatusType', 'TokenOwner' ] } )
if not elements[ 'OK']:
return elements
elements = elements[ 'Value' ]
# If there list is empty they do not exist on the DB !
if not elements:
subLogger.warn( 'Nothing found for %s, %s, %s' % ( switchDict[ 'element' ],
switchDict[ 'name' ],
switchDict[ 'statusType' ] ) )
return S_OK()
# If we want to release the token
if switchDict[ 'releaseToken' ] != False:
tokenExpiration = datetime.max
newTokenOwner = 'rs_svc'
else:
tokenExpiration = datetime.utcnow().replace( microsecond = 0 ) + timedelta( days = 1 )
newTokenOwner = user
subLogger.info( 'New token : %s until %s' % ( newTokenOwner, tokenExpiration ) )
for statusType, tokenOwner in elements:
# If a user different than the one issuing the command and RSS
if tokenOwner != user and tokenOwner != 'rs_svc':
subLogger.info( '%s(%s) belongs to the user: %s' % ( switchDict[ 'name' ], statusType, tokenOwner ) )
# does the job
result = rssClient.modifyStatusElement( switchDict[ 'element' ], 'Status',
name = switchDict[ 'name' ],
statusType = statusType,
reason = switchDict[ 'reason'],
tokenOwner = newTokenOwner,
tokenExpiration = tokenExpiration )
if not result[ 'OK' ]:
return result
if tokenOwner == newTokenOwner:
msg = '(extended)'
elif newTokenOwner == 'rs_svc':
msg = '(released)'
else:
msg = '(aquired from %s)' % tokenOwner
subLogger.info( '%s:%s %s' % ( switchDict[ 'name' ], statusType, msg ) )
return S_OK()
def main():
"""
Main function of the script. Gets the username from the proxy loaded and sets
the token taking into account that user and the switchDict parameters.
"""
user = proxyUser()
if not user[ 'OK' ]:
subLogger.error( user[ 'Message' ] )
DIRACExit( user[ 'Message' ] )
user = user[ 'Value' ]
res = setToken( user )
if not res[ 'OK' ]:
subLogger.error( res[ 'Message' ] )
DIRACExit( res[ 'Message' ] )
#...............................................................................
if __name__ == '__main__':
# Logger initialization
subLogger = gLogger.getSubLogger( __file__ )
# Script initialization
registerSwitches()
registerUsageMessage()
switchDict = parseSwitches()
main()
DIRACExit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
vmendez/DIRAC
|
ResourceStatusSystem/scripts/dirac-rss-set-token.py
|
Python
|
gpl-3.0
| 7,075
|
[
"DIRAC"
] |
7547568ccdd3bc1fb2e6ee218017dc3b28d80cbba7b52b336dafd6d0a7c3508f
|
#!/usr/bin/env python
import os
import discogs_client as dc
USER_AGENT = "discogs-banner +http://github.com/jesseward"
class DiscogsWrapper(object):
"""
Returns an OAuth authentication handle for requests against the
Discogs API.
"""
def __init__(self, config):
self.token_file = os.path.expanduser(config.get('discogs-banner',
'auth_token'))
self.consumer_key = config.get('discogs-auth', 'consumer_key')
self.consumer_secret = config.get('discogs-auth', 'consumer_secret')
if self.is_authenticated:
token, secret = self._get_access_token()
self.discogs = dc.Client(USER_AGENT, consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret, token=token,
secret=secret)
# otherwise handle authentication process.
else:
self.discogs = dc.Client(USER_AGENT)
self._get_request_token()
def _get_request_token(self):
"""
completes the oauth handshakes for the request_token, verification and
access_token. Then persists the access_token to disk.
"""
self.discogs.set_consumer_key(self.consumer_key, self.consumer_secret)
token, secret, url = self.discogs.get_authorize_url()
auth = False
while auth == False:
print '=== ACTION REQUIRED ==='
print 'In order to fetch images from discogs, you\'re required to grant the discogs-banner application access to perform actions on behalf of your discogs account.'
print 'Please visit {url} and accept the authentication request'.format(
url=url)
verification_code = raw_input('Verification code :').decode('utf8')
try:
access_token, access_secret = self.discogs.get_access_token(
verification_code)
except HTTPError:
print 'Unable to authenticate.'
raise
if access_token:
auth = True
# persist token to disk.
with open(self.token_file, 'w') as fh:
fh.write('{token}||{secret}'.format(token=access_token, secret=
access_secret))
def _get_access_token(self):
"""
:return: two strings str a = auth token, str b = auth token secret
"""
with open(self.token_file, 'r') as fh:
token, secret = fh.read().split('||')
return token.decode('utf8'), secret.decode('utf8')
@property
def is_authenticated(self):
""" return True is a token exists on the local file system. """
# very rudimentary check. Simply ensures the file exists on the local
# disk.
if os.path.isfile(self.token_file):
return True
|
jesseward/discogs-banner
|
discogs_banner/discogs_wrapper.py
|
Python
|
mit
| 2,835
|
[
"VisIt"
] |
6ec2cab4374654aac9419b4356ea0457ea478d8e4d6f06d8310a8d04e1ac695f
|
# Taken from: https://raw.githubusercontent.com/lennax/biopython/f_loc5/Bio/SeqUtils/Mapper/MapPositions.py
# Copyright 2012 Lenna X. Peterson <arklenna@gmail.com>
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from __future__ import print_function
from copy import copy
import re
class InvalidPositionError(ValueError):
"Exception for bad coordinates"
class GenomePositionError(InvalidPositionError):
"Exception for bad genome coordinates"
class CDSPositionError(InvalidPositionError):
"Exception for bad CDS coordinates"
class ProteinPositionError(InvalidPositionError):
"Exception for bad protein coordinates"
sentinel = object()
class MapPosition(object):
"""Generic position for coordinate mapping."""
def __init__(self, pos, index=None, **kwargs):
"""Init class from HGVS position.
Parameters
----------
pos : int, str
Position to convert
index : int
Start of counting (e.g. 1 for GenBank or HGVS)
"""
self.index = index
if pos and self.index:
pos -= index
self.pos = pos
@classmethod
def from_dialect(cls, dialect, pos, strand=None):
"""Init class from given dialect.
Parameters
----------
dialect : str
input dialect (HGVS or GenBank)
pos : int, str
dialect position to convert
strand : int
Strand of position (-1 or +1, default None)
Returns
-------
cls
"""
if dialect is None:
return cls(pos, strand=strand)
try:
from_func = getattr(cls, "from_" + dialect.lower())
except AttributeError:
raise ValueError("Dialect '%s' not valid" % dialect)
return from_func(pos, strand)
@classmethod
def from_hgvs(cls, hgvs_pos, strand=None):
"""Init class from HGVS position.
Parameters
----------
hgvs_pos : int, str
HGVS position to convert
strand : int
Strand of position (-1 or +1, default None)
Returns
-------
cls
"""
return cls(hgvs_pos, index=1, strand=strand, post_fmt="*%d")
@classmethod
def from_genbank(cls, gbk_pos, strand=None):
"""Init class from GenBank position.
Parameters
----------
gbk_pos : int, str
GenBank position to convert
strand : int
Strand of position (-1 or +1, default None)
Returns
-------
cls
"""
return cls(gbk_pos, index=1, strand=strand)
def to(self, dialect):
"""Convert position to specified dialect.
Parameters
----------
dialect : str
Output dialect (HGVS or GenBank)
"""
if dialect is None:
return self.to_str()
try:
to_func = getattr(self, "to_" + dialect.lower())
except AttributeError:
raise ValueError("Dialect '%s' not valid" % dialect)
return to_func()
def to_hgvs(self):
"""Convert position to HGVS"""
if self.pos or self.pos == 0:
return self.pos + 1
return None
def to_genbank(self):
"""Convert position to GenBank"""
if self.pos or self.pos == 0:
return self.pos + 1
return None
def to_str(self):
"""Make string representation without conversion"""
return self.pos
def __str__(self):
"""String representation"""
return str(self.to_str())
def __int__(self):
"""Integer representation"""
return self.pos
def __repr__(self):
"""Detailed representation for debugging"""
return "%s(%s)" % (self.__class__.__name__, self.to_str())
class GenomePosition(MapPosition):
"""Genome position for coordinate mapping"""
def __init__(self, gpos, index=None, strand=None, **kwargs):
# FIXME if index is string, error may be raised
if gpos < (index or 0):
raise GenomePositionError("Genome position cannot be negative.")
# call superclass constructor
MapPosition.__init__(self, gpos, index)
self.strand = strand
def __eq__(self, other):
"""Compare equal to other GenomePosition with same pos
or integer equal to pos"""
if isinstance(other, int):
return self.pos == other
return isinstance(other, GenomePosition) and self.pos == other.pos
class ProteinPosition(MapPosition):
"""Protein position for coordinate mapping"""
def __init__(self, ppos, index=None, **kwargs):
# call superclass constructor
MapPosition.__init__(self, ppos, index)
def __eq__(self, other):
"""Compare equal to other ProteinPosition with same pos
or integer equal to pos"""
if isinstance(other, int):
return self.pos == other
return isinstance(other, ProteinPosition) and self.pos == other.pos
class CDSPosition(MapPosition):
"""CDS position for coordinate mapping"""
def __init__(self, cpos, index=None,
pre_fmt=None, post_fmt=None, **kwargs):
# Dispatch types and return anchor, offset
if isinstance(cpos, int):
anchor, offset = self.parse_int(cpos)
elif isinstance(cpos, str):
anchor, offset = self.parse_str(cpos, pre_fmt, post_fmt)
else:
raise CDSPositionError("'%s' is of unknown type" % cpos)
# Set instance anchor and offset
# call superclass constructor
MapPosition.__init__(self, anchor, index)
self._offset = offset
self.validate()
@classmethod
def from_anchor(cls, anchor=None, offset=None):
"""Init CDSPosition with anchor, offset pair.
Parameters
----------
anchor : int
CDS anchor (coordinate of nearest exon position)
offset : int
Offset from nearest exon position
Returns
-------
CDSPosition
"""
if anchor is None:
pos = cls("%+d" % offset)
elif anchor < 0:
raise CDSPositionError("Anchor cannot be negative.")
else:
pos = cls(anchor)
pos.offset = offset
return pos
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, val):
"Validate new offset, then update"
self.validate(offset=val)
self._offset = val
@property
def anchor(self):
return self.pos
@anchor.setter
def anchor(self, val):
"Validate new anchor, then update pos"
self.validate(anchor=val)
self.pos = val
def validate(self, anchor=sentinel, offset=sentinel):
"""Check whether anchor and offset yield a valid position.
Parameters
----------
anchor : int
CDS anchor (coordinate of nearest exon position)
offset : int
Offset from nearest exon position
Returns
-------
bool
"""
if anchor is sentinel:
anchor = self.anchor
if offset is sentinel:
offset = self.offset
if offset == 0:
raise CDSPositionError(
"Offset may not be 0. For no offset, use None.")
if not anchor and anchor != 0 and not offset:
raise CDSPositionError(
"At least one of pos or offset must be defined")
if anchor and anchor < 0:
raise CDSPositionError("CDS anchor may not be negative.")
return True
@property
def pos_type(self):
"Type of CDS position, dynamically determined from values"
# inside CDS
if self.pos or self.pos == 0:
if not self.offset:
return "exon"
return "intron"
# outside CDS
elif self.offset > 0:
return "post-CDS"
else:
return "pre-CDS"
assert False # all integers should return
@property
def sub_dict(self):
if self.pos_type == 'intron':
return {'pos': self.pos, 'offset': self.offset}
if self.pos_type == 'exon':
return {'pos': self.pos}
if self.pos_type == 'post-CDS' or self.pos_type == 'pre-CDS':
return {'offset': self.offset}
fmt_dict = {
'exon': "{pos:d}",
'intron': "{pos:d}{offset:+d}",
'post-CDS': "{offset:+d}",
'pre-CDS': "{offset:+d}",
}
@staticmethod
def _shift_index(pos_dict, idx):
"""Increment value of dict key 'pos' by given index.
Parameters
----------
pos_dict : dict
Dictionary to search for 'pos'
idx : int
Index to add to pos_dict['pos']
Returns
-------
dict
"""
if 'pos' in pos_dict and idx:
pos_dict['pos'] += idx
return pos_dict
def _make_str(self, val_dict=None, fmt_dict=None):
"""Retrieve format string and substitute values.
Parameters
----------
val_dict : dict
Dictionary of values to substitute into string
fmt_dict : dict
Dictionary of format strings for each pos type
Returns
-------
str
"""
# set default dicts if parameter dicts are false
fmt_dict = fmt_dict or self.fmt_dict
val_dict = val_dict or self.sub_dict
return fmt_dict[self.pos_type].format(**val_dict)
@staticmethod
def parse_int(cpos):
"""Parse int to anchor, offset pair
Parameters
----------
cpos : int
Integer position to convert
Returns
-------
tuple
"""
# treat negative int as offset
if cpos < 0:
return (None, cpos)
# treat positive int as anchor
return (cpos, None)
@staticmethod
def parse_str(cpos, pre_fmt, post_fmt):
"""Parse string to anchor, offset pair
Parameters
----------
cpos : str
String position to convert
pre_fmt : str
Format string for pre-CDS positions
post_fmt : str
Format string for post-CDS positions
Returns
-------
tuple
"""
delimiters = "\+\-"
if post_fmt and "*" in post_fmt:
delimiters += "\*"
# parenth causes split pattern to be kept
delim_rx = re.compile("([%s])" % delimiters)
parsed = delim_rx.split(cpos, 1)
if len(parsed) == 1: # may be int
return CDSPosition.parse_int(int(cpos))
# 1 split is normally length 2 but delimiter is also kept
elif len(parsed) != 3:
raise CDSPositionError(
"String '%s' not parseable for this position." % cpos)
if parsed[0] == "":
anchor = None
else:
anchor = int(parsed[0])
if post_fmt and parsed[1] == "*" == post_fmt[0]:
parsed[1] = "+"
offset = int("".join(parsed[1:]))
return (anchor, offset)
def to_hgvs(self):
"""Convert CDS position to HGVS"""
fmt_dict = copy(self.fmt_dict)
fmt_dict['post-CDS'] = "*{offset:d}"
sub_dict = self._shift_index(self.sub_dict, 1)
return self._make_str(sub_dict, fmt_dict)
def to_genbank(self):
"""Convert CDS position to GenBank"""
sub_dict = self._shift_index(self.sub_dict, 1)
return self._make_str(sub_dict)
def to_str(self):
"""Make string representation of CDS position"""
return self._make_str()
def __int__(self):
"""Integer representation of CDS exon, otherwise NotImplemented"""
if self.pos_type == "exon":
return MapPosition.__int__(self)
return NotImplemented
def __eq__(self, other):
"""Compare equal to other MapPosition with same pos and offset
or int if exon"""
if isinstance(other, int) and self.pos_type == "exon":
return self.pos == other
return isinstance(other, CDSPosition) and \
self.pos == other.pos and \
self.offset == other.offset
if __name__ == "__main__":
def print_pos(pos_obj):
print("object: %s" % pos_obj)
print("repr: %s" % repr(pos_obj))
print("HGVS: %s" % pos_obj.to_hgvs())
print()
g = GenomePosition.from_hgvs(6)
print_pos(g)
test_g = GenomePosition(5)
#test_c = CDSPosition("6+1")
#test_c = CDSPosition.from_hgvs("6+1")
test_c = CDSPosition.from_hgvs("*1")
#test_c = CDSPosition(6)
#test_c = CDSPosition(-1)
print_pos(test_g)
print(test_c.pos_type)
print_pos(test_c)
|
kantale/MutationInfo
|
biopython_mapper/MapPositions.py
|
Python
|
mit
| 13,008
|
[
"Biopython"
] |
917378c4de14541210d214d9220de58f26e1d78a18fc7d4ad19bd1be2b45234c
|
#
# pynestml_frontend.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from pynestml.cocos.co_cos_manager import CoCosManager
from pynestml.codegeneration.nest_codegeneration import analyse_and_generate_neurons, generate_nest_module_code
from pynestml.frontend.frontend_configuration import FrontendConfiguration, InvalidPathException, \
qualifier_store_log_arg, qualifier_module_name_arg, qualifier_logging_level_arg, qualifier_dry_arg, \
qualifier_target_arg, qualifier_path_arg, qualifier_dev_arg
from pynestml.symbols.predefined_functions import PredefinedFunctions
from pynestml.symbols.predefined_types import PredefinedTypes
from pynestml.symbols.predefined_units import PredefinedUnits
from pynestml.symbols.predefined_variables import PredefinedVariables
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
from pynestml.utils.model_parser import ModelParser
from pynestml.utils.model_installer import install_nest as nest_installer
def to_nest(path, target = None, dry = False, logging_level = 'ERROR', module_name = None, store_log = False,
dev = False):
# if target is not None and not os.path.isabs(target):
# print('PyNestML: Please provide absolute target path!')
# return
args = list()
args.append(qualifier_path_arg)
args.append(str(path))
if target is not None:
args.append(qualifier_target_arg)
args.append(str(target))
if dry:
args.append(qualifier_dry_arg)
args.append(qualifier_logging_level_arg)
args.append(str(logging_level))
if module_name is not None:
args.append(qualifier_module_name_arg)
args.append(str(module_name))
if store_log:
args.append(qualifier_store_log_arg)
if dev:
args.append(qualifier_dev_arg)
FrontendConfiguration.parse_config(args)
process()
def install_nest(models_path, nest_path):
# type: (str,str) -> None
"""
This procedure can be used to install generate models into the NEST simulator.
:param models_path: the path to the generated models, should contain the cmake file (automatically generated).
:param nest_path: the path to the NEST installation, should point to the dir where nest is installed, a.k.a.
the -Dwith-nest argument of the make command. The postfix /bin/nest-config is automatically attached.
:return:
"""
nest_installer(models_path, nest_path)
def main(args):
try:
FrontendConfiguration.parse_config(args)
except InvalidPathException:
print('Not a valid path to model or directory: "%s"!' % FrontendConfiguration.get_path())
return
# after all argument have been collected, start the actual processing
process()
def process():
# init log dir
create_report_dir()
# The handed over parameters seem to be correct, proceed with the main routine
init_predefined()
# now proceed to parse all models
compilation_units = list()
for m_file in FrontendConfiguration.get_files():
parsed_unit = ModelParser.parse_model(m_file)
if parsed_unit is not None:
compilation_units.append(parsed_unit)
# generate a list of all neurons
neurons = list()
for compilationUnit in compilation_units:
neurons.extend(compilationUnit.get_neuron_list())
# check if across two files two neurons with same name have been defined
CoCosManager.check_not_two_neurons_across_units(compilation_units)
# now exclude those which are broken, i.e. have errors.
if not FrontendConfiguration.is_dev():
for neuron in neurons:
if Logger.has_errors(neuron):
code, message = Messages.get_neuron_contains_errors(neuron.get_name())
Logger.log_message(neuron=neuron, code=code, message=message,
error_position=neuron.get_source_position(),
log_level=LoggingLevel.INFO)
neurons.remove(neuron)
if not FrontendConfiguration.is_dry_run():
analyse_and_generate_neurons(neurons)
generate_nest_module_code(neurons)
else:
code, message = Messages.get_dry_run()
Logger.log_message(neuron=None, code=code, message=message, log_level=LoggingLevel.INFO)
if FrontendConfiguration.store_log:
store_log_to_file()
return
def init_predefined():
# initialize the predefined elements
PredefinedUnits.register_units()
PredefinedTypes.register_types()
PredefinedFunctions.register_functions()
PredefinedVariables.register_variables()
def create_report_dir():
if not os.path.isdir(os.path.join(FrontendConfiguration.get_target_path(), '..', 'report')):
os.makedirs(os.path.join(FrontendConfiguration.get_target_path(), '..', 'report'))
def store_log_to_file():
with open(str(os.path.join(FrontendConfiguration.get_target_path(), '..', 'report',
'log')) + '.txt', 'w+') as f:
f.write(str(Logger.get_json_format()))
if __name__ == '__main__':
main(sys.argv[1:])
|
kperun/nestml
|
pynestml/frontend/pynestml_frontend.py
|
Python
|
gpl-2.0
| 5,775
|
[
"NEURON"
] |
fbf20ef14c8eda55100010a40ec8c5126cb02e7428b7a9e5e1007db8733f2229
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
freeseer - vga/presentation capture software
Copyright (C) 2013 Free and Open Source Software Learning Centre
http://fosslc.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For support, questions, suggestions or any other inquiries, visit:
http://wiki.github.com/Freeseer/freeseer/
@author: Thanh Ha
'''
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QComboBox
from PyQt4.QtGui import QFormLayout
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtGui import QStackedWidget
from PyQt4.QtGui import QToolButton
from PyQt4.QtGui import QWidget
class ConfigWidget(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
layout = QFormLayout()
self.setLayout(layout)
self.label = QLabel("Audio Input")
self.inputLayout = QHBoxLayout()
self.combobox = QComboBox()
self.combobox.setMinimumWidth(150)
self.inputSettingsToolButton = QToolButton()
self.inputSettingsToolButton.setText("Settings")
configIcon = QIcon.fromTheme("preferences-other")
self.inputSettingsToolButton.setIcon(configIcon)
self.inputSettingsToolButton.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
self.inputSettingsToolButton.setToolButtonStyle(Qt.ToolButtonIconOnly)
self.inputSettingsStack = QStackedWidget()
blankWidget = QWidget()
self.inputSettingsStack.addWidget(blankWidget)
self.inputSettingsStack.addWidget(self.inputSettingsToolButton)
self.inputLayout.addWidget(self.combobox)
self.inputLayout.addWidget(self.inputSettingsStack)
layout.addRow(self.label, self.inputLayout)
|
Freeseer/freeseer
|
src/freeseer/plugins/audiomixer/audiopassthrough/widget.py
|
Python
|
gpl-3.0
| 2,366
|
[
"VisIt"
] |
a72defc90ba5f1488e6c147aff2d48ba4355be7edc32e3792878f95d0ce5dd1f
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import os
from setuptools import setup, find_packages
version = '0.0.9'
description = "CP2K tools & scripts"
cur_dir = os.path.dirname(__file__)
try:
long_description = open(os.path.join(cur_dir, 'README.md')).read()
except:
long_description = description
setup(
name="cp2k-tools",
version=version,
url='http://github.com/dev-zero/cp2k-tools',
license='GPL3',
description=description,
long_description=long_description,
author=u'Tiziano Müller',
author_email='tiziano.mueller@chem.uzh.ch',
packages=find_packages(),
install_requires=[
'setuptools',
'docopt',
'numpy',
'parsimonious>=0.8',
'click>=6.7',
'regex>=2017.09.23',
],
entry_points={
'console_scripts': [
'oq = cp2k_tools.cli:oq',
'extract_last_frame = cp2k_tools.cli:extract_last_frame',
'generate_input = cp2k_tools.cli:generate_input',
'cp2k_inp2json = cp2k_tools.parser.input_cli:cli',
'cp2k_json2inp = cp2k_tools.generator.cli:cli',
'cp2k_xyz_restart_cleaner = cp2k_tools.parser.xyz_cli:xyz_restart_cleaner',
],
},
scripts=[
'scripts/cp2k_bs2csv.py',
'scripts/cp2k_pdos.py',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Programming Language :: Python',
],
test_suite = 'tests'
)
|
dev-zero/cp2k-tools
|
setup.py
|
Python
|
apache-2.0
| 1,640
|
[
"CP2K"
] |
7982f4dd1a29917088de2436c9f26804ea1ebd089b5092a388d52bc0ac4243da
|
#!/usr/bin/env python
""" update local cfg
"""
import os
from DIRAC.Core.Base.Script import Script
Script.setUsageMessage("\n".join([__doc__.split("\n")[1], "Usage:", " %s [options] ... DB ..." % Script.scriptName]))
Script.parseCommandLine()
args = Script.getPositionalArgs()
setupName = args[0]
# Where to store outputs
if not os.path.isdir("%s/sandboxes" % setupName):
os.makedirs("%s/sandboxes" % setupName)
# now updating the CS
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
csAPI = CSAPI()
csAPI.setOption("Systems/WorkloadManagement/Production/Services/SandboxStore/BasePath", "%s/sandboxes" % setupName)
csAPI.setOption("Systems/WorkloadManagement/Production/Services/SandboxStore/LogLevel", "DEBUG")
# Now setting a SandboxSE as the following:
# ProductionSandboxSE
# {
# BackendType = DISET
# AccessProtocol = dips
# DIP
# {
# Host = localhost
# Port = 9196
# ProtocolName = DIP
# Protocol = dips
# Path = /scratch/workspace/%s/sandboxes % setupName
# Access = remote
# }
# }
res = csAPI.createSection("Resources/StorageElements/")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Resources/StorageElements/ProductionSandboxSE")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/BackendType", "DISET")
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/AccessProtocol", "dips")
res = csAPI.createSection("Resources/StorageElements/ProductionSandboxSE/DIP")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/DIP/Host", "localhost")
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/DIP/Port", "9196")
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/DIP/ProtocolName", "DIP")
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/DIP/Protocol", "dips")
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/DIP/Access", "remote")
csAPI.setOption("Resources/StorageElements/ProductionSandboxSE/DIP/Path", "%s/sandboxes" % setupName)
# Now setting a FileCatalogs section as the following:
# FileCatalogs
# {
# FileCatalog
# {
# AccessType = Read-Write
# Status = Active
# Master = True
# }
# TSCatalog
# {
# CatalogType = TSCatalog
# AccessType = Write
# Status = Active
# CatalogURL = Transformation/TransformationManager
# }
# }
res = csAPI.createSection("Resources/FileCatalogs/")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Resources/FileCatalogs/FileCatalog")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/FileCatalogs/FileCatalog/AccessType", "Read-Write")
csAPI.setOption("Resources/FileCatalogs/FileCatalog/Status", "Active")
csAPI.setOption("Resources/FileCatalogs/FileCatalog/Master", "True")
res = csAPI.createSection("Resources/FileCatalogs/TSCatalog")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/FileCatalogs/TSCatalog/CatalogType", "TSCatalog")
csAPI.setOption("Resources/FileCatalogs/TSCatalog/AccessType", "Write")
csAPI.setOption("Resources/FileCatalogs/TSCatalog/Status", "Active")
csAPI.setOption("Resources/FileCatalogs/TSCatalog/CatalogURL", "Transformation/TransformationManager")
res = csAPI.createSection("Resources/FileCatalogs/MultiVOFileCatalog")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/FileCatalogs/MultiVOFileCatalog/CatalogType", "FileCatalog")
csAPI.setOption("Resources/FileCatalogs/MultiVOFileCatalog/AccessType", "Read-Write")
csAPI.setOption("Resources/FileCatalogs/MultiVOFileCatalog/Status", "Active")
csAPI.setOption("Resources/FileCatalogs/MultiVOFileCatalog/CatalogURL", "DataManagement/MultiVOFileCatalog")
# Now setting up the following option:
# Resources
# {
# Sites
# {
# DIRAC
# {
# DIRAC.Jenkins.ch
# {
# CEs
# {
# jenkins.cern.ch
# {
# CEType = Test
# Queues
# {
# jenkins-queue_not_important
# {
# maxCPUTime = 200000
# SI00 = 2400
# }
# }
# }
# }
# }
# }
# }
for st in [
"Resources/Sites/DIRAC/",
"Resources/Sites/DIRAC/DIRAC.Jenkins.ch",
"Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch",
"Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch/Queues"
"Resources/Sites/DIRAC/DIRAC.Jenkins.ch/jenkins.cern.ch/Queues/jenkins-queue_not_important",
"Resources/StorageElements",
"Resources/StorageElements/SE-1",
"Resources/StorageElements/SE-1/DIP",
"Resources/StorageElements/SE-2",
"Resources/StorageElements/SE-2/DIP",
]:
res = csAPI.createSection(st)
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/CEType", "Test")
csAPI.setOption(
"Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/Queues/jenkins-queue_not_important/maxCPUTime", "200000"
)
csAPI.setOption(
"Resources/Sites/DIRAC/DIRAC.Jenkins.ch/CEs/jenkins.cern.ch/Queues/jenkins-queue_not_important/SI00", "2400"
)
csAPI.setOption("Resources/StorageElements/SE-1/AccessProtocol", "dips")
csAPI.setOption("Resources/StorageElements/SE-1/DIP/Host", "server")
csAPI.setOption("Resources/StorageElements/SE-1/DIP/Port", "9148")
csAPI.setOption("Resources/StorageElements/SE-1/DIP/Protocol", "dips")
csAPI.setOption("Resources/StorageElements/SE-1/DIP/Path", "/DataManagement/SE-1")
csAPI.setOption("Resources/StorageElements/SE-1/DIP/Access", "remote")
csAPI.setOption("Resources/StorageElements/SE-2/AccessProtocol", "dips")
csAPI.setOption("Resources/StorageElements/SE-2/DIP/Host", "server")
csAPI.setOption("Resources/StorageElements/SE-2/DIP/Port", "9147")
csAPI.setOption("Resources/StorageElements/SE-2/DIP/Protocol", "dips")
csAPI.setOption("Resources/StorageElements/SE-2/DIP/Path", "/DataManagement/SE-2")
csAPI.setOption("Resources/StorageElements/SE-2/DIP/Access", "remote")
# Setting up S3 resources for the Test_Resources_S3.py
# Resources
# {
# StorageElements
# {
# S3-DIRECT
# {
# AccessProtocols = s3
# WriteProtocols = s3
# S3
# {
# Host = s3-direct
# Port = 9090
# Protocol = s3
# Path = myFirstBucket
# Access = remote
# SecureConnection = False
# Aws_access_key_id = fakeId #useless
# Aws_secret_access_key = fakeKey #useles
# }
# }
# }
# }
for st in [
"Resources/StorageElements",
"Resources/StorageElements/S3-DIRECT",
"Resources/StorageElements/S3-DIRECT/S3",
]:
res = csAPI.createSection(st)
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/StorageElements/S3-DIRECT/AccessProtocols", "s3")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/WriteProtocols", "s3")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/Host", "s3-direct")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/Port", "9090")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/Protocol", "s3")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/Path", "myFirstBucket")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/Access", "remote")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/SecureConnection", "False")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/Aws_access_key_id", "FakeId")
csAPI.setOption("Resources/StorageElements/S3-DIRECT/S3/Aws_secret_access_key", "True")
# Setting up S3 indirect resources for the Test_Resources_S3.py
# The Aws_access_key_id and Aws_secret_access_key have to be in the server local file only
# so cannot be added here
# Resources
# {
# StorageElements
# {
# S3-INDIRECT
# {
# AccessProtocols = s3
# WriteProtocols = s3
# S3
# {
# Host = s3-direct
# Port = 9090
# Protocol = s3
# Path = myFirstBucket
# Access = remote
# SecureConnection = False
# }
# }
# }
# }
for st in [
"Resources/StorageElements",
"Resources/StorageElements/S3-INDIRECT",
"Resources/StorageElements/S3-INDIRECT/S3",
]:
res = csAPI.createSection(st)
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/AccessProtocols", "s3")
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/WriteProtocols", "s3")
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/S3/Host", "s3-direct")
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/S3/Port", "9090")
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/S3/Protocol", "s3")
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/S3/Path", "myFirstBucket")
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/S3/Access", "remote")
csAPI.setOption("Resources/StorageElements/S3-INDIRECT/S3/SecureConnection", "False")
# Now setting up the following option:
# Resources
# {
# FTSEndpoints
# {
# FTS3
# {
# JENKINS-FTS3 = https://jenkins-fts3.cern.ch:8446
# }
# }
for st in ["Resources/FTSEndpoints/", "Resources/FTSEndpoints/FTS3/"]:
res = csAPI.createSection(st)
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Resources/FTSEndpoints/FTS3/JENKINS-FTS3", "https://jenkins-fts3.cern.ch:8446")
# Now setting a RSS section as the following inside /Operations/Defaults:
#
# ResourceStatus
# {
# Config
# {
# Cache = 600
# State = Active
# FromAddress = fstagni@cern.ch
# notificationGroups = ShiftersGroup
# StatusTypes
# {
# default = all
# StorageElement = ReadAccess
# StorageElement += WriteAccess
# StorageElement += CheckAccess
# StorageElement += RemoveAccess
# }
# }
# Policies
# {
# AlwaysActiveForResource
# {
# matchParams
# {
# element = Resource
# }
# policyType = AlwaysActive
# }
# AlwaysBannedForSE1SE2
# {
# matchParams
# {
# name = SE1,SE2
# }
# policyType = AlwaysBanned
# }
# AlwaysBannedForSite
# {
# matchParams
# {
# element = Site
# }
# policyType = AlwaysBanned
# }
# }
# }
res = csAPI.createSection("Operations/")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Operations/Defaults")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Operations/Defaults/ResourceStatus")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Config")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/ResourceStatus/Config/Cache", "600")
csAPI.setOption("Operations/Defaults/ResourceStatus/Config/State", "Active")
csAPI.setOption("Operations/Defaults/ResourceStatus/Config/FromAddress", "fstagni@cern.ch")
csAPI.setOption("Operations/Defaults/ResourceStatus/Config/notificationGroups", "ShiftersGroup")
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Config/StatusTypes")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/ResourceStatus/Config/StatusTypes/default", "all")
csAPI.setOption(
"Operations/Defaults/ResourceStatus/Config/StatusTypes/StorageElement",
"ReadAccess,WriteAccess,CheckAccess,RemoveAccess",
)
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Policies")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/policyType", "AlwaysActive")
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/matchParams")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/ResourceStatus/Policies/AlwaysActiveForResource/matchParams/element", "Resource")
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/policyType", "AlwaysBanned")
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/matchParams")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSE1SE2/matchParams/name", "SE1,SE2")
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/matchParams")
csAPI.setOption("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/policyType", "AlwaysBanned")
csAPI.setOption("Operations/Defaults/ResourceStatus/Policies/AlwaysBannedForSite/matchParams/element", "Site")
# Now setting the catalog list in Operations/Defults/Services/Catalogs/CatalogList
#
# Services
# {
# Catalogs
# {
# CatalogList = FileCatalog, TSCatalog, MultiVOFileCatalog
# }
# }
res = csAPI.createSection("Operations/Defaults/Services")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Operations/Defaults/Services/Catalogs")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Operations/Defaults/Services/Catalogs/CatalogList")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/Services/Catalogs/CatalogList", "FileCatalog, TSCatalog, MultiVOFileCatalog")
# Adding DataManagement section of Operations
# Operations
# {
# Defaults
# {
# DataManagement
# {
# RegistrationProtocols = srm,dips,s3
# }
# }
# }
res = csAPI.createSection("Operations/Defaults/DataManagement")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Operations/Defaults/DataManagement/RegistrationProtocols", "srm,dips,s3")
# Now setting the Registry section
#
# Registry
# {
# VO
# {
# Jenkins
# {
# VOMSName = myVOMS
# }
# }
# }
res = csAPI.createSection("Registry")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Registry/VO/")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Registry/VO/Jenkins")
if not res["OK"]:
print(res["Message"])
exit(1)
res = csAPI.createSection("Registry/VO/Jenkins/VOMSName")
if not res["OK"]:
print(res["Message"])
exit(1)
csAPI.setOption("Registry/VO/Jenkins/VOMSName", "myVOMS")
csAPI.setOption("Registry/Groups/jenkins_fcadmin/VO", "Jenkins")
csAPI.setOption("Registry/Groups/jenkins_user/VO", "Jenkins")
# Final action: commit in CS
res = csAPI.commit()
if not res["OK"]:
print(res["Message"])
exit(1)
|
DIRACGrid/DIRAC
|
tests/Jenkins/dirac-cfg-update-server.py
|
Python
|
gpl-3.0
| 15,704
|
[
"DIRAC"
] |
4350d8a380bed56e21a8bc0b4da46627633e0e5b1b32cbf4219e6e514e4b6173
|
import numpy as np
from scipy.stats import gaussian_kde, mvn
from .model_GP import normal_pdf, GP_K, Geweke_Z
def logistic(y, C=2):
"""logistic function: transfer y to psi
"""
return np.exp(y) / (C - 1.0 + np.exp(y))
def logit(psi, C=2):
"""logistic function: transfer psi to y
"""
return np.log((C-1.0) * psi / (1-psi))
def miso_BF(sample1, sample2, max_diff=0.0, bootstrap_size=None,
max_bf=1e12, min_unique=5, log=True):
"""
Bayes factor calculator in MISO.
Assumptions
-----------
prior :
uniform distribution for psi in both conditions.
analytically calculating prior distribution of
difference, with shape like a triangle.
posterior :
smooth the posterior of difference by a Gaussian
density estimate.
Parameters
----------
sample1 : array_like or list
samples of psi in condition 1.
sample2 : array_like or list
samples of psi in condition 2.
max_diff : float or int
null hypothesis: abs(condtion1 - conditon2) <= max_diff.
max_diff ranges from 0 to 1.
bootstrap_size : int or None
if None, don't do bootstrapping.
if int, resampling with size of bootstrap_size.
max_bf : float or int
upper bound of Bayes factor.
min_unique : int
minimum number of unique samples in both conditions
if any unique samples is smaller than min_unique ,
supporting Null hypothesis.
Returns
-------
bayes_factor : float
The Bayes factor supporting alternative hypothesis
abs(condtion1 - conditon2) > max_diff
"""
if min(len(np.unique(sample1)), len(np.unique(sample1))) < min_unique:
print("Improper sampling of psi! Supporting Null hypothesis.")
return 0 #None
if bootstrap_size is None:
idx1 = np.arange(len(sample1))
idx2 = np.arange(len(sample2))
else:
idx1 = np.random.randint(len(sample1), size=bootstrap_size)
idx2 = np.random.randint(len(sample2), size=bootstrap_size)
samp_diff = sample1[idx1] - sample2[idx2]
prior_density = lambda x: 1 + x if x <= 0 else 1 - x
posterior_density = gaussian_kde(samp_diff, bw_method='scott')
#TODO: the decimals make a huge difference!
#Maybe the inverval can be less sensitive!
if max_diff == 0:
diff_prior = np.log(prior_density(max_diff))
diff_posterior = np.log(posterior_density.evaluate(max_diff)[0])
else:
diff_prior = np.log((prior_density(0.0) + prior_density(max_diff)) * (max_diff))
diff_posterior = np.log(posterior_density.integrate_box_1d(-max_diff, max_diff))
if diff_posterior == -np.inf:
bayes_factor = max_bf
elif diff_posterior == np.inf:
bayes_factor = -max_bf
else:
bayes_factor = diff_prior - diff_posterior
if log is False:
bayes_factor = np.exp(bayes_factor)
return bayes_factor
def dice_BF(sample1, sample2, bootstrap_size=None, max_bf=1e12,
min_unique=5, log=True):
"""
Bayes factor calculator in DICE-diff.
Assumptions
-----------
prior :
uniform distribution for psi in both conditions.
analytically calculating prior distribution of
difference, with shape like a triangle.
posterior :
smooth the posterior of difference by a Gaussian
density estimate.
Parameters
----------
sample1 : array_like or list
samples of psi in condition 1.
sample2 : array_like or list
samples of psi in condition 2.
max_diff : float or int
null hypothesis: abs(condtion1 - conditon2) <= max_diff.
max_diff ranges from 0 to 1.
bootstrap_size : int or None
if None, don't do bootstrapping.
if int, resampling with size of bootstrap_size.
max_bf : float or int
upper bound of Bayes factor.
min_unique : int
minimum number of unique samples in both conditions
if any unique samples is smaller than min_unique ,
supporting Null hypothesis.
Returns
-------
bayes_factor : float
The Bayes factor supporting alternative hypothesis
abs(condtion1 - conditon2) > max_diff
"""
if len(np.array(sample1).shape) == 1:
sample1 = sample1.reshape(-1,1)
sample2 = sample2.reshape(-1,1)
k = sample1.shape[1]
# if min(len(np.unique(sample1)), len(np.unique(sample1))) < min_unique:
# print("Improper sampling of psi! Supporting Null hypothesis.")
# return 0 #None
if bootstrap_size is None:
idx1 = np.arange(len(sample1))
idx2 = np.arange(len(sample2))
else:
idx1 = np.random.randint(len(sample1), size=bootstrap_size)
idx2 = np.random.randint(len(sample2), size=bootstrap_size)
samp_diff = sample1[idx1,:] - sample2[idx2,:]
posterior_density = gaussian_kde(samp_diff.T, bw_method='scott')
#TODO: the decimals make a huge difference!
#Maybe the inverval can be less sensitive!
mu = np.zeros(k)
cov = GP_K(np.arange(k), [3.0, 5.0])
diff_prior_log = normal_pdf(mu, mu, 2*cov, log=True)
diff_posterior_log = np.log(posterior_density.evaluate(mu)[0])
if diff_posterior_log == -np.inf:
bayes_factor = max_bf
elif diff_posterior_log == np.inf:
bayes_factor = -max_bf
else:
bayes_factor = diff_prior_log - diff_posterior_log
if log is False:
bayes_factor = np.exp(bayes_factor)
return bayes_factor
def dicediff_BF(samples1, samples2, bio_cov=None, bootstrap_size=None,
max_diff=0.0, max_bf=1e12, min_unique=5, log=True,
post_mode="sample", prior_mode="GP", prior_param=[3, 5],
time_corr=True, is_logit=True):
"""
Bayes factor calculator in DICE-diff.
Assumptions
-----------
prior : uniform or GP
uniform: uniform distribution for psi in both conditions.
analytically calculating prior distribution of
difference, with shape like a triangle.
posterior : mean, kde, or sample
smooth the posterior of difference by a Gaussian
density estimate.
Parameters
----------
sample1 : array_like or list
samples of psi in condition 1.
sample2 : array_like or list
samples of psi in condition 2.
max_diff : float or int
null hypothesis: abs(condtion1 - conditon2) <= max_diff.
max_diff ranges from 0 to 1.
bootstrap_size : int or None
if None, don't do bootstrapping.
if int, resampling with size of bootstrap_size.
max_bf : float or int
upper bound of Bayes factor.
min_unique : int
minimum number of unique samples in both conditions
if any unique samples is smaller than min_unique ,
supporting Null hypothesis.
Returns
-------
bayes_factor : float
The Bayes factor supporting alternative hypothesis
abs(condtion1 - conditon2) > max_diff
#TODO: the decimals make a huge difference!
#Maybe the inverval can be less sensitive!
"""
if type(samples1) is not list:
samples1 = [samples1]
if type(samples2) is not list:
samples2 = [samples2]
for i in range(len(samples1)):
if len(np.array(samples1[i]).shape) == 1:
samples1[i] = samples1[i].reshape(-1,1)
for i in range(len(samples2)):
if len(np.array(samples2[i]).shape) == 1:
samples2[i] = samples2[i].reshape(-1,1)
k = samples1[0].shape[1]
for samp in samples1 + samples2:
for j in range(samp.shape[1]):
if len(np.unique(samp[:,j])) < min_unique:
print("Improper sampling of psi! Supporting Null hypothesis.")
return 0 #None
if is_logit is False:
for s in range(len(samples1)):
samples1[s] = logistic(samples1[s])
for s in range(len(samples2)):
samples2[s] = logistic(samples2[s])
samp_mu = np.zeros((len(samples1)*len(samples2), k))
samp_cov = np.zeros((k, k))
samp_diff = np.zeros((0, k))
for i in range(len(samples1)):
for j in range(len(samples2)):
if bootstrap_size is None:
# idx1 = np.random.permutation(len(samples1[i]))
# idx2 = np.random.permutation(len(samples2[j]))
idx1 = np.arange(len(samples1[i]))
idx2 = np.arange(len(samples2[j]))
else:
idx1 = np.random.randint(len(samples1[i]), size=bootstrap_size)
idx2 = np.random.randint(len(samples2[j]), size=bootstrap_size)
samp_temp = samples1[i][idx1,:] - samples2[j][idx2,:]
samp_cov += np.cov(samp_temp.T)
samp_mu[i*len(samples1)+j,:] = np.mean(samp_temp, axis=0)
samp_diff = np.append(samp_diff, samp_temp, axis=0)
# define biological variance
if bio_cov is None:
if samp_mu.shape[0] == 1:
bio_cov = np.diag(np.zeros(k))
else:
bio_cov = np.diag(np.var(samp_mu, axis=0))
sum_cov = samp_cov / samp_mu.shape[0] + bio_cov
mu = np.zeros(k)
cov = GP_K(np.arange(k), prior_param)
if time_corr is False:
cov = np.diag(np.diag(cov))
sum_cov = np.diag(np.diag(sum_cov))
if max_diff == 0:
if prior_mode.lower() == "uniform":
diff_prior_log = 0.0 # check it!!!
else: #GP
diff_prior_log = normal_pdf(mu, mu, 2*cov, log=True)
if post_mode.lower() == "kde":
posterior_density = gaussian_kde(samp_diff.T, bw_method='scott')
diff_posterior_log = np.log(posterior_density.evaluate(mu)[0])
elif post_mode.lower() == "mean":
diff_posterior_log = np.log(np.mean(normal_pdf(
samp_diff.mean(axis=0), mu, sum_cov, log=False)))
else: #sample
diff_posterior_log = np.log(np.mean(normal_pdf(
samp_diff, mu, sum_cov, log=False)))
else: #integral
upp = np.ones(k) * max_diff
diff_prior_log = np.log(mvn.mvnun(-upp, upp, mu, 2*cov)[0])
temp = np.zeros(samp_diff.shape[0])
for i in range(temp.shape[0]):
temp[i] = np.log(mvn.mvnun(-upp, upp, samp_diff[i,:], sum_cov)[0])
diff_posterior_log = np.sum(temp)
if diff_posterior_log == -np.inf:
bayes_factor = max_bf
elif diff_posterior_log == np.inf:
bayes_factor = -max_bf
else:
bayes_factor = diff_prior_log - diff_posterior_log
if log is False:
bayes_factor = np.exp(bayes_factor)
return bayes_factor
def get_BioVar(samp_mean1, samp_mean2, share=True, diagonal=True):
"""get the biological variance.
"""
N = samp_mean1.shape[0]
T = samp_mean1.shape[1]
R1 = samp_mean1.shape[2]
R2 = samp_mean2.shape[2]
mean_n_diff = np.zeros((N, T*2+1))
mean_n_diff[:,:T] = samp_mean1.mean(axis=2)
mean_n_diff[:,T:T*2] = samp_mean2.mean(axis=2)
_mean = mean_n_diff[:,:T] + mean_n_diff[:,T:T*2]
_diff = mean_n_diff[:,:T] - mean_n_diff[:,T:T*2]
mean_n_diff[:,T*2] = np.sqrt(np.sum(_diff**2, axis=1))
samp_diff_all = np.zeros((N,T,R1*R2))
for i in range(R1):
for j in range(R2):
samp_diff_all[:,:,i*R1+j] = samp_mean1[:,:,i] - samp_mean2[:,:,j]
bio_var = np.zeros((N, T, T))
if R1*R2 > 1:
for i in range(T):
if diagonal is True:
bio_var[i,:,:] = np.diag(np.var(samp_diff_all[i,:,:], axis=1))
else:
bio_var[i,:,:] = np.cov(samp_diff_all[i,:,:])
if share is True and R1*R2 > 1:
sort_idx = np.argsort(np.sum(_mean**2, axis=1))
for i in range(N):
idx1 = max(0, i-250)
idx2 = min(N, i+250)
idx_use = sort_idx[idx1:idx2]
bio_var[sort_idx[i],:,:] = np.mean(bio_var[idx_use,:,:], axis=0)
return bio_var, mean_n_diff
|
huangyh09/diceseq
|
diceseq/models/bayes_factor.py
|
Python
|
apache-2.0
| 12,071
|
[
"Gaussian"
] |
d233ee2e90a80b5b8cb75ba637b9b88c857565bba14061ef7c1df8d4cee61ebc
|
# -*- coding: utf-8 -*-
"""
@namespace Area
Tools and events manipulation
Copyright 2007, NATE-LSI-EPUSP
Oficina is developed in Brazil at Escola Politécnica of
Universidade de São Paulo. NATE is part of LSI (Integrable
Systems Laboratory) and stands for Learning, Work and Entertainment
Research Group. Visit our web page:
www.lsi.usp.br/nate
Suggestions, bugs and doubts, please email oficina@lsi.usp.br
Oficina is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation version 2 of
the License.
Oficina is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with Oficina; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
Boston, MA 02110-1301 USA.
The copy of the GNU General Public License is found in the
COPYING file included in the source distribution.
Authors:
Joyce Alessandra Saul (joycealess@gmail.com)
Andre Mossinato (andremossinato@gmail.com)
Nathalia Sautchuk Patrício (nathalia.sautchuk@gmail.com)
Pedro Kayatt (pekayatt@gmail.com)
Rafael Barbolo Lopes (barbolo@gmail.com)
Alexandre A. Gonçalves Martinazzo (alexandremartinazzo@gmail.com)
Colaborators:
Bruno Gola (brunogola@gmail.com)
Group Manager:
Irene Karaguilla Ficheman (irene@lsi.usp.br)
Cientific Coordinator:
Roseli de Deus Lopes (roseli@lsi.usp.br)
UI Design (OLPC):
Eben Eliason (eben@laptop.org)
Project Coordinator (OLPC):
Manusheel Gupta (manu@laptop.org)
Project Advisor (OLPC):
Walter Bender (walter@laptop.org)
"""
try:
from gi.repository import Gst
_HAS_GST = True
except:
_HAS_GST = False
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gi.repository import GObject
from gi.repository import Pango
from gi.repository import PangoCairo
import logging
import os
import math
import cairo
import StringIO
import array
from Desenho import Desenho
from urlparse import urlparse
from sugar3.graphics import style
from sugar3.activity import activity
FALLBACK_FILL = True
try:
from fill import fill
FALLBACK_FILL = False
logging.error('Found fill binaries.')
except:
logging.error('No valid fill binaries. Using slower python code')
pass
# Tools and events manipulation are handle with this class.
TARGET_URI = 0
MAX_UNDO_STEPS = 12
RESIZE_ARROW_SIZE = style.GRID_CELL_SIZE / 2
SOUNDS = {'arrow': ['oneclick.ogg', False, True, True],
'brush': ['brush.ogg', True, False, False],
'bucket': ['bucket.ogg', False, True, True],
'ellipse': ['oneclick.ogg', False, True, True],
'eraser': ['eraser.ogg', True, False, False],
'freeform': ['oneclick.ogg', False, True, True],
'heart': ['oneclick.ogg', False, True, True],
'kalidoscope': ['brush.ogg', True, False, False],
'line': ['oneclick.ogg', False, True, True],
'marquee-rectangular': ['oneclick.ogg', False, True, True],
'parallelogram': ['oneclick.ogg', False, True, True],
'polygon_regular': ['oneclick.ogg', False, True, True],
'rainbow': ['brush.ogg', True, False, False],
'rectangle': ['oneclick.ogg', False, True, True],
'star': ['oneclick.ogg', False, True, True],
'trapezoid': ['oneclick.ogg', False, True, True],
'triangle': ['oneclick.ogg', False, True, True]}
# this list contain the sounds that should be played manually,
# and no automatically.
IGNORE_AUTO_PLAY = ['bucket']
if _HAS_GST:
Gst.init([])
def _get_screen_dpi():
xft_dpi = Gtk.Settings.get_default().get_property('gtk-xft-dpi')
dpi = float(xft_dpi / 1024)
logging.error('Setting dpi to: %f', dpi)
return dpi
bundle_path = activity.get_bundle_path()
class Area(Gtk.DrawingArea):
__gsignals__ = {
'undo': (GObject.SignalFlags.ACTION, None, ([])),
'redo': (GObject.SignalFlags.ACTION, None, ([])),
'action-saved': (GObject.SignalFlags.ACTION, None, ([])),
'select': (GObject.SignalFlags.ACTION, None, ([])),
}
PENCIL_LIKE_TOOLS = ['pencil', 'eraser', 'brush', 'kalidoscope', 'rainbow',
'stamp', 'load-stamp']
def __init__(self, activity):
""" Initialize the object from class Area which is derived
from Gtk.DrawingArea.
@param self -- the Area object (GtkDrawingArea)
@param activity -- the parent window
"""
GObject.GObject.__init__(self)
self.set_events(Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.BUTTON_MOTION_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.TOUCH_MASK)
self.connect('event', self.__event_cb)
self.connect("draw", self.draw)
self.connect("motion_notify_event", self.mousemove)
self.connect("key_press_event", self.key_press)
self.connect("leave_notify_event", self.mouseleave)
self.connect("enter_notify_event", self.mouseenter)
target = [Gtk.TargetEntry.new('text/uri-list', 0, TARGET_URI)]
self.drag_dest_set(Gtk.DestDefaults.ALL, target,
Gdk.DragAction.COPY | Gdk.DragAction.MOVE)
self.connect('drag_data_received', self.drag_data_received)
self.set_can_focus(True)
self.grab_focus()
# TODO gtk3
# self.set_extension_events(Gdk.EXTENSION_EVENTS_CURSOR)
# Define which tool is been used.
# It is now described as a dictionnary,
# with the following keys:
# - 'name' : a string
# - 'line size' : a integer
# - 'stamp size' : a integer
# - 'line shape' : a string - 'circle' or 'square', for now
# - 'fill' : a Boolean value
# - 'vertices' : a integer
# All values migth be None, execept in 'name' key.
self.tool = {
'name': 'brush',
'line size': 4,
'stamp size': self._get_stamp_size(),
'line shape': 'circle',
'fill': True,
'cairo_stroke_color': (0.0, 0.0, 0.0, 1.0),
'cairo_fill_color': (0.0, 0.0, 0.0, 1.0),
'bucket_color': (0, 0, 0),
'alpha': 1.0,
'vertices': 6.0,
'font_description': 'Sans 12'}
self.desenha = False
self._selmove = False
self._selresize = False
self.oldx = 0
self.oldy = 0
self.drawing_canvas = None
# This surface is used when need load data from a file or a process
self.drawing_canvas_data = None
self.textos = []
self.text_in_progress = False
self.activity = activity
self.d = Desenho(self)
self.last = []
self.keep_aspect_ratio = False
self.keep_shape_ratio = False
self._selection_finished = False
self._set_screen_dpi()
self._font_description = None
self.set_font_description(
Pango.FontDescription(self.tool['font_description']))
# selection properties
self.clear_selection()
self.pending_clean_selection_background = False
# List of pixbuf for the Undo function:
self._undo_list = []
self._undo_index = None
self._keep_undo = False
# variables to show the tool shape
self.drawing = False
self.x_cursor = 0
self.y_cursor = 0
# touch cache position
self._last_x_touch = 0
self._last_y_touch = 0
# used to identify emulated mouse
self._on_touch = False
self._update_timer = None
self._resize_hq_timer = None
self._player = None
self._sounds_enabled = False
if _HAS_GST:
try:
self._player = Gst.ElementFactory.make('playbin', 'Player')
self._pipeline = Gst.Pipeline()
self._bus = self._pipeline.get_bus()
self._bus.add_signal_watch()
self._bus.connect('message::eos', self.replay_tool_sound)
self._pipeline.add(self._player)
except:
logging.error(
"Sound player is not installed/available in the system.")
def _set_screen_dpi(self):
dpi = _get_screen_dpi()
font_map_default = PangoCairo.font_map_get_default()
font_map_default.set_resolution(dpi)
def set_font_description(self, fd):
self._font_description = fd
self.activity.textview.modify_font(fd)
self.tool['font_description'] = fd.to_string()
if self.text_in_progress:
# set the focus in the textview to enable resize if needed
GObject.idle_add(self.activity.textview.grab_focus)
def get_font_description(self):
return Pango.FontDescription(self.tool['font_description'])
def _get_stamp_size(self):
"""Set the stamp initial size, based on the display DPI."""
return style.zoom(44)
def load_from_file(self, file_path):
# load using a pixbuf to be able to read different formats
loaded_pxb = GdkPixbuf.Pixbuf.new_from_file(file_path)
self.drawing_canvas_data = cairo.ImageSurface(
cairo.FORMAT_ARGB32, loaded_pxb.get_width(),
loaded_pxb.get_height())
ctx = cairo.Context(self.drawing_canvas_data)
Gdk.cairo_set_source_pixbuf(ctx, loaded_pxb, 0, 0)
ctx.paint()
def setup(self, width, height):
"""Configure the Area object."""
logging.debug('Area.setup: w=%s h=%s', width, height)
self.set_size_request(width, height)
self.drawing_canvas = None
self._width = width
self._height = height
self.enable_undo()
# Setting a initial tool
self.set_tool(self.tool)
return True
def get_size(self):
rect = self.get_allocation()
return rect.width, rect.height
def _init_temp_canvas(self, area=None):
# logging.error('init_temp_canvas. area %s', area)
# self.drawing_canvas.flush()
if area is None:
width, height = self.get_size()
self.temp_ctx.rectangle(0, 0, width, height)
else:
self.temp_ctx.rectangle(area.x, area.y, area.width, area.height)
self.temp_ctx.set_source_surface(self.drawing_canvas)
self.temp_ctx.paint()
def display_selection_border(self, ctx):
if not self.is_selected():
return
x, y, width, height = self.get_selection_bounds()
if self._selection_finished:
ctx.save()
selection_surface = self.get_selection()
ctx.translate(x, y)
ctx.set_source_surface(selection_surface)
ctx.rectangle(0, 0, width, height)
ctx.paint()
ctx.restore()
ctx.save()
ctx.set_line_width(1)
ctx.set_source_rgba(1., 1., 1., 1.)
ctx.set_line_cap(cairo.LINE_CAP_ROUND)
ctx.set_line_join(cairo.LINE_JOIN_ROUND)
# draw a dotted rectangle around the selection
ctx.rectangle(x, y, width, height)
ctx.stroke_preserve()
ctx.set_dash([5, 5], 0)
ctx.set_source_rgba(0., 0., 0., 1.)
ctx.stroke()
# draw a triangle to resize the selection
arrow_width = RESIZE_ARROW_SIZE
ctx.new_path()
ctx.move_to(x + width + arrow_width, y + height)
ctx.line_to(x + width + arrow_width, y + height + arrow_width)
ctx.line_to(x + width, y + height + arrow_width)
ctx.close_path()
ctx.set_dash([2, 2], 0)
ctx.set_source_rgba(0., 0., 0., 1.)
ctx.stroke()
ctx.restore()
def configure_line(self, size):
"""Configure the new line's size.
@param self -- the Area object (GtkDrawingArea)
@param size -- the size of the new line
"""
self.drawing_ctx.set_line_width(size)
def draw(self, widget, context):
""" This function define which canvas will be showed to the user.
Show up the Area object (GtkDrawingArea).
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
@param event -- GdkEvent
"""
# It is the main canvas, who is display most of the time
# if is not None was read from a file
if self.drawing_canvas is None:
self.drawing_canvas = context.get_target().create_similar(
cairo.CONTENT_COLOR_ALPHA, self._width, self._height)
self.drawing_ctx = cairo.Context(self.drawing_canvas)
# paint background white
self.drawing_ctx.rectangle(0, 0, self._width, self._height)
if self.drawing_canvas_data is None:
self.drawing_ctx.set_source_rgb(1.0, 1.0, 1.0)
self.drawing_ctx.fill()
else:
self.drawing_ctx.set_source_surface(self.drawing_canvas_data)
self.drawing_ctx.paint()
self.drawing_canvas_data = None
# canvas showed when we need display something and not draw it
self.temp_canvas = context.get_target().create_similar(
cairo.CONTENT_COLOR_ALPHA, self._width, self._height)
self.temp_ctx = cairo.Context(self.temp_canvas)
self._init_temp_canvas()
if self.desenha:
# logging.error('Expose use temp canvas area')
# Paint the canvas in the widget:
context.set_source_surface(self.temp_canvas)
context.paint()
else:
# logging.error('Expose use drawing canvas area')
context.set_source_surface(self.drawing_canvas)
context.paint()
self.show_tool_shape(context)
# TODO: gtk3 how get the area to avoid redrawing all ?
self._init_temp_canvas() # area)
self.display_selection_border(context)
if self._keep_undo:
self.keep_undo()
def show_tool_shape(self, context):
"""
Show the shape of the tool selected for pencil, brush,
rainbow and eraser
"""
if self.tool['name'] in self.PENCIL_LIKE_TOOLS:
if not self.drawing:
context.set_source_rgba(*self.tool['cairo_stroke_color'])
context.set_line_width(1)
# draw stamp border in widget.window
if self.tool['name'] in ('stamp', 'load-stamp'):
wr, hr = self.stamp_dimentions
context.rectangle(self.x_cursor - wr / 2,
self.y_cursor - hr / 2, wr, hr)
context.stroke()
# draw shape of the brush, square or circle
elif self.tool['line shape'] == 'circle':
size = self.tool['line size']
context.arc(self.x_cursor,
self.y_cursor, size / 2, 0.,
2 * math.pi)
context.stroke()
else:
size = self.tool['line size']
context.move_to(self.x_cursor - size / 2,
self.y_cursor - size / 2)
context.rectangle(self.x_cursor - size / 2,
self.y_cursor - size / 2, size, size)
context.stroke()
self.last_x_cursor = self.x_cursor
self.last_y_cursor = self.y_cursor
def __event_cb(self, widget, event):
if event.type in (Gdk.EventType.TOUCH_BEGIN,
Gdk.EventType.TOUCH_CANCEL, Gdk.EventType.TOUCH_END,
Gdk.EventType.BUTTON_PRESS,
Gdk.EventType.BUTTON_RELEASE):
x = int(event.get_coords()[1])
y = int(event.get_coords()[2])
# seq = str(event.touch.sequence)
# logging.error('event x %d y %d type %s', x, y, event.type)
if event.type in (Gdk.EventType.TOUCH_BEGIN,
Gdk.EventType.BUTTON_PRESS):
if event.type == Gdk.EventType.BUTTON_PRESS:
# http://developer.gnome.org/gtk3/3.4/
# GtkWidget.html#gtk-widget-get-pointer
_pointer, x, y, state = event.window.get_pointer()
button1_pressed = state & Gdk.ModifierType.BUTTON1_MASK
else:
self._on_touch = True
button1_pressed = True
self.tool_start(x, y, button1_pressed)
elif event.type in (Gdk.EventType.TOUCH_END,
Gdk.EventType.BUTTON_RELEASE):
# set _update_timer = None to avoid executing
# toolmove code after mouse release or touch end
self._update_timer = None
if not self._tool_sound['full_play']:
self.stop_sound()
if event.type == Gdk.EventType.BUTTON_RELEASE:
_pointer, x, y, state = event.window.get_pointer()
shift_pressed = state & Gdk.ModifierType.SHIFT_MASK
else:
self._on_touch = False
shift_pressed = False
GObject.timeout_add(10, self.tool_end, x, y, shift_pressed)
def tool_start(self, coord_x, coord_y, button1_pressed):
width, height = self.get_size()
# text
design_mode = True
if self.tool['name'] == 'text':
self.d.text(self, coord_x, coord_y)
design_mode = False
# This fixes a bug that made the text viewer get stuck in the canvas
elif self.text_in_progress:
design_mode = False
try:
# This works for a Gtk.Entry
text = self.activity.textview.get_text()
except AttributeError:
# This works for a Gtk.TextView
buf = self.activity.textview.get_buffer()
start, end = buf.get_bounds()
text = buf.get_text(start, end, True)
if text is not None:
self.d.text(self, coord_x, coord_y)
self.text_in_progress = False
self.activity.textview.hide()
coords = (coord_x, coord_y)
if not self._selresize:
# if resizing don't update to remember previous resize
self.oldx, self.oldy = coords
if self.tool['name'] == 'picker':
self.pick_color(coord_x, coord_y)
if button1_pressed:
# Handle with the left button click event.
if self._sounds_enabled and not self._tool_sound[
'play_after_release'] and not self.tool[
'name'] in IGNORE_AUTO_PLAY:
self.play_tool_sound()
if self.tool['name'] == 'eraser':
self.last = []
self.d.eraser(self, coords, self.last)
self.last = coords
self.drawing = True
elif self.tool['name'] == 'brush':
self.last = []
self.d.brush(self, coords, self.last)
self.last = coords
self.drawing = True
elif self.tool['name'] == 'kalidoscope':
self.last = []
self.d.kalidoscope(self, coords, self.last)
self.last = coords
self.drawing = True
elif self.tool['name'] in ('stamp', 'load-stamp'):
self.last = []
self.d.stamp(self, coords, self.last)
self.last = coords
self.drawing = True
elif self.tool['name'] == 'rainbow':
self.last = []
self.d.rainbow(self, coords, self.last)
self.last = coords
self.drawing = True
elif self.tool['name'] == 'freeform':
self.configure_line(self.tool['line size'])
self.d.freeform(self, coords, True,
self.tool['fill'], "motion")
if self.tool['name'] == 'marquee-rectangular':
if self.is_selected():
# verify is out of the selected area
sel_x, sel_y, sel_width, sel_height = \
self.get_selection_bounds()
if self.check_point_in_area(coords[0], coords[1],
sel_x, sel_y, sel_width,
sel_height):
# be sure to have the last coords
# because can be older if was resized before
self.oldx, self.oldy = coords
# inside the selected area
self.d.move_selection(self, coords)
self._selmove = True
self._selresize = False
elif self.check_point_in_area(coords[0], coords[1],
sel_x + sel_width,
sel_y + sel_height,
RESIZE_ARROW_SIZE,
RESIZE_ARROW_SIZE):
# in de resize area
self._selmove = False
self._selresize = True
else:
self.end_selection()
design_mode = False
else:
self._selmove = False
if design_mode:
self.desenha = True
def end_selection(self):
if self.is_selected():
self.getout()
self._selmove = False
self._selresize = False
self.queue_draw()
def calculate_damaged_area(self, points):
min_x = points[0][0]
min_y = points[0][1]
max_x = 0
max_y = 0
for point in points:
if point[0] < min_x:
min_x = point[0]
if point[0] > max_x:
max_x = point[0]
if point[1] < min_y:
min_y = point[1]
if point[1] > max_y:
max_y = point[1]
# add the tool size
if self.tool['name'] in ('stamp', 'load-stamp'):
wr, hr = self.stamp_dimentions
elif self.tool['name'] == 'freeform':
wr = hr = 20
else:
wr = hr = self.tool['line size'] * 2
min_x = min_x - wr
min_y = min_y - wr
max_x = max_x + hr
max_y = max_y + hr
return (min_x, min_y, max_x - min_x, max_y - min_y)
def mousemove(self, widget, event):
"""Make the Area object (GtkDrawingArea)
recognize that the mouse is moving.
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
@param event -- GdkEvent
"""
if event.get_source_device().get_name().find('touchscreen') >= 0 and \
not self._on_touch:
return
x = event.x
y = event.y
shift_pressed = event.get_state() & Gdk.ModifierType.SHIFT_MASK
button1_pressed = event.get_state() & Gdk.ModifierType.BUTTON1_MASK
if self._update_timer is None:
self._update_timer = GObject.timeout_add(5, self.tool_move, x, y,
button1_pressed,
shift_pressed)
def tool_move(self, x, y, button1_pressed, shift_pressed):
if self._update_timer is None:
return False
self._update_timer = None
self.x_cursor, self.y_cursor = int(x), int(y)
# the touch driver trigger many events sensing movements up and down
# by only a pixel. This code caches the last position and ignores
# the movement if is not bigger than one pixel to avoid redraws
if abs(x - self._last_x_touch) > 1 or \
abs(y > self._last_y_touch) > 1:
self._last_x_touch = x
self._last_y_touch = y
else:
return
coords = int(x), int(y)
if self.tool['name'] in ['rectangle', 'ellipse', 'line']:
if shift_pressed or self.keep_shape_ratio:
if self.tool['name'] in ['rectangle', 'ellipse']:
coords = self._keep_selection_ratio(coords)
elif self.tool['name'] == 'line':
coords = self._keep_line_ratio(coords)
if button1_pressed:
if self.tool['name'] == 'eraser':
self.d.eraser(self, coords, self.last)
self.last = coords
elif self.tool['name'] == 'brush':
self.d.brush(self, coords, self.last)
self.last = coords
elif self.tool['name'] == 'kalidoscope':
self.d.kalidoscope(self, coords, self.last)
self.last = coords
elif self.tool['name'] in ('stamp', 'load-stamp'):
self.d.stamp(self, coords, self.last,
self.tool['stamp size'])
self.last = coords
elif self.tool['name'] == 'rainbow':
self.d.rainbow(self, coords, self.last)
self.last = coords
if self.desenha:
if self.tool['name'] == 'line':
self.d.line(self, coords, True)
elif self.tool['name'] == 'ellipse':
self.d.circle(self, coords, True, self.tool['fill'])
elif self.tool['name'] == 'rectangle':
self.d.square(self, coords, True,
self.tool['fill'])
elif self.tool['name'] == 'marquee-rectangular':
if self._selmove:
# is inside a selected area, move it
self.d.move_selection(self, coords)
elif self._selresize:
self.d.resize_selection(self, coords)
else:
# create a selected area
if shift_pressed or self.keep_aspect_ratio:
coords = self._keep_selection_ratio(coords)
self.d.selection(self, coords)
elif self.tool['name'] == 'freeform':
self.configure_line(self.tool['line size'])
self.d.freeform(self, coords, True,
self.tool['fill'], "motion")
elif self.tool['name'] == 'triangle':
self.d.triangle(self, coords, True, self.tool['fill'])
elif self.tool['name'] == 'trapezoid':
self.d.trapezoid(self, coords, True, self.tool['fill'])
elif self.tool['name'] == 'arrow':
self.d.arrow(self, coords, True, self.tool['fill'])
elif self.tool['name'] == 'parallelogram':
self.d.parallelogram(self, coords, True,
self.tool['fill'])
elif self.tool['name'] == 'star':
self.d.star(self, coords, self.tool['vertices'],
True, self.tool['fill'])
elif self.tool['name'] == 'polygon_regular':
self.d.polygon_regular(self, coords,
self.tool['vertices'], True,
self.tool['fill'])
elif self.tool['name'] == 'heart':
self.d.heart(self, coords, True, self.tool['fill'])
else:
if self.tool['name'] in ['brush', 'eraser', 'rainbow', 'pencil',
'stamp', 'load-stamp']:
# define area to update (only to show the brush shape)
last_coords = (self.last_x_cursor, self.last_y_cursor)
area = self.calculate_damaged_area([last_coords, coords])
self.queue_draw_area(*area)
if self.tool['name'] == 'marquee-rectangular':
sel_x, sel_y, sel_width, sel_height = \
self.get_selection_bounds()
# show appropiate cursor
if self.check_point_in_area(coords[0], coords[1], sel_x, sel_y,
sel_width, sel_height):
# inside the selected area
cursor = Gdk.Cursor.new(Gdk.CursorType.FLEUR)
elif self.check_point_in_area(coords[0], coords[1],
sel_x + sel_width,
sel_y + sel_height,
RESIZE_ARROW_SIZE,
RESIZE_ARROW_SIZE):
# in de resize area
cursor = Gdk.Cursor.new(Gdk.CursorType.BOTTOM_RIGHT_CORNER)
else:
cursor = Gdk.Cursor.new(Gdk.CursorType.CROSS)
self.get_window().set_cursor(cursor)
elif self.tool['name'] == 'freeform':
self.desenha = True
self.configure_line(self.tool['line size'])
self.d.freeform(self, coords, True, self.tool['fill'],
"moving")
self.get_window().process_all_updates()
return False
def check_point_in_area(self, x_point, y_point, x_min, y_min,
width, height):
return not ((x_point < x_min) or (x_point > x_min + width) or
(y_point < y_min) or (y_point > y_min + height))
def tool_end(self, coord_x, coord_y, shift_pressed):
coords = (coord_x, coord_y)
if self.tool['name'] in ['rectangle', 'ellipse', 'line']:
if shift_pressed or self.keep_shape_ratio:
if self.tool['name'] in ['rectangle', 'ellipse']:
coords = self._keep_selection_ratio(coords)
if self.tool['name'] == 'line':
coords = self._keep_line_ratio(coords)
width, height = self.get_size()
private_undo = False
if self.desenha:
if self.tool['name'] == 'line':
self.d.line(self, coords, False)
elif self.tool['name'] == 'ellipse':
self.d.circle(self, coords, False, self.tool['fill'])
elif self.tool['name'] == 'rectangle':
self.d.square(self, coords, False, self.tool['fill'])
elif self.tool['name'] == 'marquee-rectangular':
private_undo = True
if self.is_selected() and not self._selmove and \
not self._selresize:
self.create_selection_surface()
self.emit('select')
else:
self.apply_temp_selection()
elif self.tool['name'] == 'freeform':
self.d.freeform(self, coords, False,
self.tool['fill'], 'release')
private_undo = True
elif self.tool['name'] == 'bucket':
self.get_window().set_cursor(Gdk.Cursor.new(
Gdk.CursorType.WATCH))
GObject.idle_add(self.flood_fill, coords[0], coords[1])
elif self.tool['name'] == 'triangle':
self.d.triangle(self, coords, False, self.tool['fill'])
elif self.tool['name'] == 'trapezoid':
self.d.trapezoid(self, coords, False, self.tool['fill'])
elif self.tool['name'] == 'arrow':
self.d.arrow(self, coords, False, self.tool['fill'])
elif self.tool['name'] == 'parallelogram':
self.d.parallelogram(self, coords, False, self.tool['fill'])
elif self.tool['name'] == 'star':
self.d.star(self, coords, self.tool['vertices'], False,
self.tool['fill'])
elif self.tool['name'] == 'polygon_regular':
self.d.polygon_regular(self, coords, self.tool['vertices'],
False, self.tool['fill'])
elif self.tool['name'] == 'heart':
self.d.heart(self, coords, False, self.tool['fill'])
if self._sounds_enabled and self._tool_sound[
'play_after_release'] and not self.tool[
'name'] in IGNORE_AUTO_PLAY:
self.play_tool_sound()
else:
if self.tool['name'] == 'marquee-rectangular':
if self.is_selected():
self.getout()
if self.tool['name'] in ['brush', 'eraser', 'rainbow', 'pencil',
'stamp', 'load-stamp']:
self.last = []
self.d.finish_trace(self)
self.drawing = False
if not private_undo and \
self.tool['name'] not in ['bucket', 'marquee-rectangular']:
# We have to avoid saving an undo state if the bucket tool
# is selected because this undo state is called before the
# GObject.idle_add (with the fill_flood function) finishes
# and an unconsistent undo state is saved
self.enable_undo()
if self.tool['name'] not in ('marquee-rectangular', 'freeform'):
self.desenha = False
self.queue_draw()
self.d.clear_control_points()
def flood_fill(self, x, y):
bucket_color = self.tool['bucket_color']
r, g, b = bucket_color[0], bucket_color[1], bucket_color[2]
# pack the color in a int as 0xAARRGGBB
fill_color = 0xff000000 + (int(r / 255 * 65536) +
int(g / 255 * 256) +
int(b / 255))
logging.error('fill_color %d', fill_color)
# load a array with the surface data
for array_type in ['H', 'I', 'L']:
pixels = array.array(array_type)
if pixels.itemsize == 4:
_array_type_used = array_type
break
else:
raise AssertionError()
# need copy self.drawing_canvas in a ImageSurface
# because 'cairo.XlibSurface do not have get_data
image_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self._width,
self._height)
ctx = cairo.Context(image_surface)
ctx.set_source_surface(self.drawing_canvas)
ctx.paint()
pixels.fromstring(image_surface)
# process the pixels in the array
width = self.drawing_canvas.get_width()
height = self.drawing_canvas.get_height()
old_color = pixels[x + y * width]
if old_color == fill_color:
logging.debug('Already filled')
# reset the cursor
display = Gdk.Display.get_default()
cursor = Gdk.Cursor.new_from_name(display, 'paint-bucket')
self.get_window().set_cursor(cursor)
return
if FALLBACK_FILL:
logging.debug('using python flood_fill')
def within(x, y):
if x < 0 or x >= width:
return False
if y < 0 or y >= height:
return False
return True
if not within(x, y):
return
edge = [(x, y)]
pixels[x + y * width] = fill_color
while len(edge) > 0:
newedge = []
for (x, y) in edge:
for (s, t) in ((x + 1, y), (x - 1, y), (x, y + 1),
(x, y - 1)):
if within(s, t) and \
pixels[s + t * width] == old_color:
pixels[s + t * width] = fill_color
newedge.append((s, t))
edge = newedge
else:
logging.debug('using c flood_fill')
pixels2 = fill(pixels, x, y, width, height, fill_color)
# the c implementation returns a list instead of array.array
pixels = array.array(_array_type_used, pixels2)
del(pixels2)
# create a updated drawing_canvas
self.drawing_canvas_data = cairo.ImageSurface.create_for_data(
pixels, cairo.FORMAT_ARGB32, width, height)
del(pixels)
self.setup(width, height)
self.queue_draw()
self.enable_undo()
display = Gdk.Display.get_default()
cursor = Gdk.Cursor.new_from_name(display, 'paint-bucket')
if self._sounds_enabled:
self.play_tool_sound()
self.get_window().set_cursor(cursor)
def pick_color(self, x, y):
# create a new 1x1 cairo surface
cairo_surface = cairo.ImageSurface(cairo.FORMAT_RGB24, 1, 1)
cairo_context = cairo.Context(cairo_surface)
# translate xlib_surface so that target pixel is at 0, 0
cairo_context.set_source_surface(self.drawing_canvas, -x, -y)
cairo_context.rectangle(0, 0, 1, 1)
cairo_context.set_operator(cairo.OPERATOR_SOURCE)
cairo_context.fill()
cairo_surface.flush()
# Read the pixel
pixels = cairo_surface.get_data()
# the values are between 0 and 255
red = ord(pixels[2]) / 255.0 * 65535.0
green = ord(pixels[1]) / 255.0 * 65535.0
blue = ord(pixels[0]) / 255.0 * 65535.0
stroke_color = Gdk.Color(red, green, blue)
# set in the area
self.set_stroke_color(stroke_color)
# update the stroke_color in the button
self.activity.get_toolbar_box().brush_button.set_color(stroke_color)
self.activity.get_toolbar_box().brush_button.stop_stamping()
def mouseleave(self, widget, event):
if self.tool['name'] in self.PENCIL_LIKE_TOOLS:
self.drawing = True
size = self.tool['line size']
widget.queue_draw_area(self.x_cursor - size, self.y_cursor - size,
size * 2, size * 2)
def mouseenter(self, widget, event):
if self.tool['name'] in self.PENCIL_LIKE_TOOLS:
self.drawing = False
size = self.tool['line size']
widget.queue_draw_area(self.x_cursor - size, self.y_cursor - size,
size * 2, size * 2)
def setup_stamp(self, stamp=None):
"""Prepare for stamping from the selected area.
@param self -- the Area object (GtkDrawingArea)
"""
if self.is_selected() or stamp:
# Change stamp, get it from selection:
if stamp:
self.pixbuf_stamp = GdkPixbuf.Pixbuf.new_from_file(stamp)
elif self.is_selected() and not stamp:
pixbuf_data = StringIO.StringIO()
self.get_selection().write_to_png(pixbuf_data)
pxb_loader = GdkPixbuf.PixbufLoader.new_with_type('png')
pxb_loader.write(pixbuf_data.getvalue())
self.pixbuf_stamp = pxb_loader.get_pixbuf()
else:
return
self.stamp_size = 0
# Set white color as transparent:
stamp_alpha = self.pixbuf_stamp.add_alpha(True, 255, 255, 255)
self.pixbuf_stamp = stamp_alpha
return self.resize_stamp(self.tool['stamp size'])
def resize_stamp(self, stamp_size):
"""Change stamping pixbuffer from the given size.
@param self -- the Area object (GtkDrawingArea)
@param stamp_size -- the stamp will be inscripted in this size
"""
# Area.setup_stamp needs to be called first:
assert self.pixbuf_stamp
self.stamp_size = stamp_size
w = self.pixbuf_stamp.get_width()
h = self.pixbuf_stamp.get_height()
if w >= h:
wr, hr = stamp_size, int(stamp_size * h * 1.0 / w)
else:
wr, hr = int(stamp_size * w * 1.0 / h), stamp_size
self.stamp_dimentions = wr, hr
self.resized_stamp = self.pixbuf_stamp.scale_simple(
wr, hr, GdkPixbuf.InterpType.HYPER)
# Remove selected area
self.getout()
return self.resized_stamp
def undo(self):
"""Undo the last drawing change.
@param self -- the Area object (GtkDrawingArea)
"""
logging.debug('Area.undo(self)')
if self.is_selected():
self.getout(undo=True)
if self.text_in_progress:
# apply the text
self.d.text(self, 0, 0)
self.activity.textview.hide()
if self._undo_index > 0:
self._undo_index -= 1
undo_surface = self._undo_list[self._undo_index]
self.drawing_ctx.set_source_surface(undo_surface, 0, 0)
self.drawing_ctx.paint()
self.queue_draw()
self.emit('undo')
def redo(self):
"""Redo the last undo operation.
@param self -- the Area object (GtkDrawingArea)
"""
logging.debug('Area.redo(self)')
if self.is_selected():
self.getout()
if self._undo_index < len(self._undo_list) - 1:
self._undo_index += 1
undo_surface = self._undo_list[self._undo_index]
self.drawing_ctx.set_source_surface(undo_surface, 0, 0)
self.drawing_ctx.paint()
self.queue_draw()
self.emit('redo')
def enable_undo(self):
"""Save a flag to keep the last change in a list for Undo/Redo.
"""
self._keep_undo = True
def keep_undo(self):
"""Keep the last change in a list for Undo/Redo commands.
"""
self._keep_undo = False
if len(self._undo_list) == 0:
# first undo pix, start index:
self._undo_index = 0
elif len(self._undo_list) == MAX_UNDO_STEPS:
# drop the oldest undo pix:
self._undo_list.pop(0)
# it could be at the middle of the list (clicked many
# times undo) and after that draw anything, so we should
# discart the next redos because they are obsolete now.
self._undo_list = self._undo_list[:self._undo_index]
else:
self._undo_index += 1
# Forget the redos after this one:
self._undo_list = self._undo_list[:self._undo_index]
if self.is_selected():
self.getout(clear_selection=False)
# copy the drawing surface in a new surface
width = self.drawing_canvas.get_width()
height = self.drawing_canvas.get_height()
undo_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
undo_ctx = cairo.Context(undo_surface)
undo_ctx.set_source_surface(self.drawing_canvas, 0, 0)
undo_ctx.paint()
undo_surface.flush()
self._undo_list.append(undo_surface)
self.emit('action-saved')
def copy(self):
""" Copy Image.
When the tool selection is working make the change
the copy of selectioned area
@param self -- the Area object (GtkDrawingArea)
"""
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
selection_surface = self.get_selection()
if selection_surface is None:
selection_surface = self.drawing_canvas
pxb = self._surface_to_pixbuf(selection_surface)
clipboard.set_image(pxb)
def drag_data_received(self, w, context, x, y, data, info, time):
if data and data.format == 8:
self.load_image(urlparse(data.data).path, self)
context.finish(True, False, time)
else:
context.finish(False, False, time)
def paste(self, widget):
""" Paste image.
Paste image that is in canvas
@param self -- the Area object (GtkDrawingArea)
"""
width, height = self.get_size()
clipBoard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
if clipBoard.wait_is_text_available():
logging.debug('Area.paste(self): Wait is text available')
selection = clipBoard.wait_for_text()
props = self.tool
props['name'] = 'text'
self.set_tool(props)
self.tool_start(0, 0, False)
self.activity.textview.get_buffer().set_text(selection)
elif clipBoard.wait_is_image_available():
logging.error('Area.paste(self): Wait is image available')
pixbuf_sel = clipBoard.wait_for_image()
self.load_pixbuf(pixbuf_sel)
self.pending_clean_selection_background = False
elif clipBoard.wait_is_uris_available():
logging.error('Area.paste(self): is uris available')
selection = clipBoard.wait_for_contents('text/uri-list')
if selection is not None:
for uri in selection.get_uris():
self.load_image(urlparse(uri).path, self)
else:
tempPath = os.path.join("/tmp", "tempFile")
tempPath = os.path.abspath(tempPath)
self.load_image(tempPath, self)
logging.error('Area.paste(self): Load from clipboard fails')
logging.error('loading from tempPath')
self.queue_draw()
def set_fill_color(self, color):
"""Set fill color.
@param self -- the Area object (GtkDrawingArea)
@param color -- a Gdk.Color object
"""
alpha = self.tool['alpha']
red = color.red / 65535.0
green = color.green / 65535.0
blue = color.blue / 65535.0
self.tool['cairo_fill_color'] = (red, green, blue, alpha)
def set_stroke_color(self, color):
"""Set cairo_stroke_color.
@param self -- the Area object (GtkDrawingArea)
@param color -- a Gdk.Color object
"""
alpha = self.tool['alpha']
red = color.red / 65535.0
green = color.green / 65535.0
blue = color.blue / 65535.0
# for bucket operation, store the integer values
self.tool['bucket_color'] = (color.red, color.green, color.blue)
self.tool['cairo_stroke_color'] = (red, green, blue, alpha)
rgba = Gdk.RGBA()
rgba.red, rgba.green, rgba.blue, rgba.alpha = red, green, blue, alpha
self.activity.textview.override_color(Gtk.StateFlags.NORMAL, rgba)
def set_alpha(self, alpha):
"""
Set the alpha value used to draw
@ param alpha -- float between 0.0 and 1.0
"""
self.tool['alpha'] = alpha
stroke_color = self.tool['cairo_stroke_color']
self.tool['cairo_stroke_color'] = (stroke_color[0], stroke_color[1],
stroke_color[2], alpha)
fill_color = self.tool['cairo_fill_color']
self.tool['cairo_fill_color'] = (fill_color[0], fill_color[1],
fill_color[2], alpha)
def grayscale(self, widget):
"""Apply grayscale effect.
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
"""
def proc_grayscale(temp_pix):
temp_pix.saturate_and_pixelate(temp_pix, 0, 0)
return temp_pix
self._do_process(widget, proc_grayscale)
def invert_colors(self):
"""Apply invert effect.
@param self -- the Area object (GtkDrawingArea)
"""
def internal_invert(self, old_cursor):
# load a array with the surface data
for array_type in ['H', 'I', 'L']:
pixels = array.array(array_type)
if pixels.itemsize == 4:
break
else:
raise AssertionError()
# need copy self.drawing_canvas in a ImageSurface
# because 'cairo.XlibSurface do not have get_data
image_surface = cairo.ImageSurface(
cairo.FORMAT_ARGB32, self._width, self._height)
ctx = cairo.Context(image_surface)
ctx.set_source_surface(self.drawing_canvas)
ctx.paint()
pixels.fromstring(image_surface)
# process the pixels in the array
new_array = array.array(pixels.typecode, len(pixels) * [0])
for i in range(len(pixels)):
new_array[i] = 0xffffffff - pixels[i] | 0xff000000
# create a updated drawing_canvas
width = self.drawing_canvas.get_width()
height = self.drawing_canvas.get_height()
self.drawing_canvas_data = cairo.ImageSurface.create_for_data(
new_array, cairo.FORMAT_ARGB32, width, height)
self.setup(width, height)
self.queue_draw()
self.enable_undo()
self.get_toplevel().get_window().set_cursor(old_cursor)
old_cursor = self.get_window().get_cursor()
self.get_toplevel().get_window().set_cursor(
Gdk.Cursor.new(Gdk.CursorType.WATCH))
GObject.idle_add(internal_invert, self, old_cursor)
def mirror(self, widget, horizontal=True):
"""Apply mirror horizontal/vertical effect.
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
@param horizontal -- If true sets flip as horizontal else vertical
"""
old_cursor = self.get_window().get_cursor()
self.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
GObject.idle_add(self._mirror_internal, widget, horizontal, old_cursor)
def _mirror_internal(self, widget, horizontal, old_cursor):
"""Mirror the image.
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
"""
if self.is_selected():
x, y, width, height = self.get_selection_bounds()
surface = self.get_selection()
else:
x, y = 0, 0
width, height = self.get_size()
surface = self.drawing_canvas
# create a surface and paste the image rotated
logging.error('create rotate surface')
mirror_surface = surface.create_similar(cairo.CONTENT_COLOR_ALPHA,
width, height)
mirror_ctx = cairo.Context(mirror_surface)
if horizontal:
mirror_ctx.scale(-1, 1)
mirror_ctx.translate(-width, 0)
else:
mirror_ctx.scale(1, -1)
mirror_ctx.translate(0, -height)
mirror_ctx.set_source_surface(surface)
mirror_ctx.paint()
# copy from the surface to the drawing context
if self.is_selected():
# clear the background before rotate the selection
self.clear_selection_background()
self.clear_selection_background(temp_canvas=True)
self.set_selection_bounds(x, y, width, height)
self.temp_ctx.save()
self.temp_ctx.translate(x, y)
self.temp_ctx.set_source_surface(mirror_surface)
self.temp_ctx.paint()
self.temp_ctx.restore()
self.create_selection_surface(temp_canvas=True)
else:
self.drawing_ctx.save()
self.drawing_ctx.set_source_surface(mirror_surface)
self.drawing_ctx.paint()
self.drawing_ctx.restore()
self.queue_draw()
if not self.is_selected():
self.enable_undo()
self.get_window().set_cursor(old_cursor)
def _do_process(self, widget, apply_process):
self.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
GObject.idle_add(self._do_process_internal, widget, apply_process)
def _surface_to_pixbuf(self, surface):
# copy from the surface to the pixbuf
pixbuf_data = StringIO.StringIO()
surface.write_to_png(pixbuf_data)
pxb_loader = GdkPixbuf.PixbufLoader.new_with_type('png')
pxb_loader.write(pixbuf_data.getvalue())
pxb_loader.close()
return pxb_loader.get_pixbuf()
def _pixbuf_to_context(self, pixbuf, context, x=0, y=0):
# copy from the pixbuf to the drawing context
context.save()
context.translate(x, y)
Gdk.cairo_set_source_pixbuf(context, pixbuf, 0, 0)
context.paint()
context.restore()
def _do_process_internal(self, widget, apply_process):
if self.is_selected():
x, y, _width, _height = self.get_selection_bounds()
surface = self.get_selection()
else:
x, y = 0, 0
surface = self.drawing_canvas
temp_pix = self._surface_to_pixbuf(surface)
# process the pixbuf
temp_pix = apply_process(temp_pix)
self._pixbuf_to_context(temp_pix, self.drawing_ctx, x, y)
self.create_selection_surface()
del temp_pix
self.queue_draw()
if not self.is_selected():
self.enable_undo()
self.set_tool_cursor()
def rotate_left(self, widget):
"""Rotate the image.
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
"""
self.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
GObject.idle_add(self._rotate, widget, 270)
def rotate_right(self, widget):
"""Rotate the image.
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
"""
self.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
GObject.idle_add(self._rotate, widget, 90)
def _rotate(self, widget, angle):
"""Rotate the image.
@param self -- the Area object (GtkDrawingArea)
@param widget -- the Area object (GtkDrawingArea)
"""
self.get_window().set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
if self.is_selected():
x, y, width, height = self.get_selection_bounds()
surface = self.get_selection()
else:
x, y = 0, 0
width, height = self.get_size()
surface = self.drawing_canvas
# create a surface and paste the image rotated
logging.error('create rotate surface')
rotate_surface = surface.create_similar(cairo.CONTENT_COLOR_ALPHA,
height, width)
rotate_ctx = cairo.Context(rotate_surface)
radians_angle = math.pi * float(angle) / 180.0
rotate_ctx.rotate(radians_angle)
if radians_angle > math.pi:
rotate_ctx.translate(-width, 0)
else:
rotate_ctx.translate(0, -height)
rotate_ctx.set_source_surface(surface)
rotate_ctx.paint()
# copy from the pixbuf to the drawing context
if self.is_selected():
# clear the background before rotate the selection
self.clear_selection_background()
self.clear_selection_background(temp_canvas=True)
self.set_selection_bounds(x, y, height, width)
self.temp_ctx.save()
self.temp_ctx.translate(x, y)
self.temp_ctx.set_source_surface(rotate_surface)
self.temp_ctx.paint()
self.temp_ctx.restore()
# assign the rotated surface as the selection surface
self.selection_surface = rotate_surface
else:
# create a new canvas with permuted dimensions
self.drawing_canvas_data = surface.create_similar(
cairo.CONTENT_COLOR_ALPHA, height, width)
ctx = cairo.Context(self.drawing_canvas_data)
ctx.save()
ctx.set_source_surface(rotate_surface)
ctx.paint()
ctx.restore()
self.setup(height, width)
self.queue_draw()
if not self.is_selected():
self.enable_undo()
self.set_tool_cursor()
def can_undo(self):
"""
Indicate if is there some action to undo
@param self -- the Area object (GtkDrawingArea)
"""
return self._undo_index > 0
def can_redo(self):
"""
Indicate if is there some action to redo
@param self -- the Area object (GtkDrawingArea)
"""
return self._undo_index < len(self._undo_list) - 1
def is_selected(self):
"""
Return True if there is some thing selected
"""
return self.get_selection_bounds() != (0, 0, 0, 0)
def clear_selection(self):
self.set_selection_bounds(0, 0, 0, 0)
self._selection_horizontal_scale = 1.0
self._selection_vertical_scale = 1.0
self.selection_resized_surface = None
self._selection_finished = False
def set_selection_bounds(self, x, y, width, height):
"""
Set selection bounds
@param x, y, width, height - the rectangle to define the area
"""
self._selection_bounds = (x, y, width, height)
def set_selection_start(self, x, y):
self._selection_bounds = (x, y, self._selection_bounds[2],
self._selection_bounds[3])
def get_selection_bounds(self):
"""
@return x1, y1, width, height - the rectangle to define the area
"""
x, y = self._selection_bounds[0], self._selection_bounds[1]
width, height = self._selection_bounds[2], self._selection_bounds[3]
width = width * self._selection_horizontal_scale
height = height * self._selection_vertical_scale
return (x, y, int(width), int(height))
def create_selection_surface(self, clear_background=True,
temp_canvas=False):
x, y, width, height = self.get_selection_bounds()
logging.error('create_selection_surface %s', (x, y, width, height))
self.selection_surface = cairo.ImageSurface(
cairo.FORMAT_ARGB32, width, height)
selection_ctx = cairo.Context(self.selection_surface)
selection_ctx.translate(-x, -y)
if not temp_canvas:
selection_ctx.set_source_surface(self.drawing_canvas)
else:
selection_ctx.set_source_surface(self.temp_canvas)
selection_ctx.paint()
self.selection_resized_surface = None
self._selection_finished = True
if clear_background:
self.pending_clean_selection_background = True
def clear_selection_background(self, temp_canvas=False):
# clear the selection background
x, y, width, height = self.get_selection_bounds()
if not temp_canvas:
ctx = self.drawing_ctx
else:
ctx = self.temp_ctx
ctx.save()
ctx.new_path()
ctx.rectangle(x, y, width, height)
ctx.set_source_rgb(1.0, 1.0, 1.0)
ctx.fill()
ctx.restore()
def resize_selection_surface(self, horizontal_scale, vertical_scale,
fast=True):
x, y = self._selection_bounds[0], self._selection_bounds[1]
new_width = int(self .selection_surface.get_width() * horizontal_scale)
new_height = int(self.selection_surface.get_height() * vertical_scale)
# create a surface with the selection scaled to the new size
self.selection_resized_surface = cairo.ImageSurface(
cairo.FORMAT_ARGB32, new_width, new_height)
temp_ctx = cairo.Context(self.selection_resized_surface)
temp_ctx.scale(horizontal_scale, vertical_scale)
temp_ctx.set_source_surface(self.selection_surface)
temp_ctx.paint()
# draw over temp canvas
self.temp_ctx.save()
self.temp_ctx.translate(x, y)
self.temp_ctx.set_source_surface(self.selection_resized_surface)
self.temp_ctx.rectangle(0, 0, new_width, new_height)
if fast:
self.temp_ctx.get_source().set_filter(cairo.FILTER_NEAREST)
# Add a timer for resize with high quality:
if self._resize_hq_timer is not None:
GObject.source_remove(self._resize_hq_timer)
self._resize_hq_timer = GObject.timeout_add(
200, self.resize_selection_surface, horizontal_scale,
vertical_scale, False)
else:
self._resize_hq_timer = None
self.temp_ctx.paint()
self.temp_ctx.restore()
self._selection_horizontal_scale = horizontal_scale
self._selection_vertical_scale = vertical_scale
self.desenha = True
self.queue_draw()
return False
def get_selection(self):
if self.selection_resized_surface is not None:
return self.selection_resized_surface
if self.selection_surface is not None:
return self.selection_surface
else:
return None
def load_image(self, name, widget=None):
"""Load an image.
@param self -- Area instance
@param name -- string (image file path)
@param widget -- GtkDrawingArea
"""
logging.debug('Area.load_image Loading file %s', name)
pixbuf = GdkPixbuf.Pixbuf.new_from_file(name)
self.load_pixbuf(pixbuf)
def load_pixbuf(self, pixbuf):
width, height = (int)(pixbuf.get_width()), (int)(pixbuf.get_height())
logging.debug('image size %d x %d', width, height)
# load in the selection surface
self.selection_surface = cairo.ImageSurface(
cairo.FORMAT_ARGB32, width, height)
selection_ctx = cairo.Context(self.selection_surface)
self._pixbuf_to_context(pixbuf, selection_ctx)
# show in the temp context too
self.temp_ctx.save()
self.temp_ctx.translate(0, 0)
self.temp_ctx.set_source_surface(self.selection_surface)
self.temp_ctx.paint()
self.temp_ctx.restore()
self.set_selection_bounds(0, 0, width, height)
self.desenha = True
self._selmove = True
self._selection_finished = True
self.tool['name'] = 'marquee-rectangular'
self.emit('select')
self.queue_draw()
def clear(self):
""" Clear Canvas
@param self -- Area instance
"""
logging.debug('Area.clear')
self.d.clear(self)
# If something is selected, the action will be saved
# after it is unselected
if not self.is_selected():
self.enable_undo()
def set_tool(self, tool):
'''
Method to configure all tools.
@param - tool: a dictionary with the tool keys
'''
# logging.debug('Area.set_tool %s', tool)
self.tool = tool
try:
if self.tool['line size'] is not None:
self.configure_line(self.tool['line size'])
# if self.tool['fill color'] is not None:
# self.set_fill_color(self.tool['fill color'])
# else:
# # use black
# self.set_fill_color(self.black)
# if self.tool['stroke color'] is not None:
# self.set_stroke_color(self.tool['stroke color'])
# else:
# # use black
# self.set_stroke_color(self.black)
except AttributeError:
pass
self.set_tool_cursor()
# clear points in Desenha
self.d.points = []
self.select_sound()
def set_tool_cursor(self):
# Setting the cursor
try:
cursors = {'pencil': 'pencil',
'brush': 'paintbrush',
'kalidoscope': 'paintbrush',
'eraser': 'eraser',
'bucket': 'paint-bucket'}
display = Gdk.Display.get_default()
if self.tool['name'] in cursors:
name = cursors[self.tool['name']]
cursor = Gdk.Cursor.new_from_name(display, name)
elif self.tool['name'] == 'marquee-rectangular':
cursor = Gdk.Cursor.new(Gdk.CursorType.CROSS)
else:
name = self.tool['name']
if name == 'load-stamp':
name = 'stamp'
filename = os.path.join('images', name + '.png')
pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)
# Decide which is the cursor hot spot offset:
if self.tool['name'] in ('stamp', 'load-stamp'):
hotspot_x, hotspot_y = 20, 38
# horizontal center and bottom
elif self.tool['name'] == 'picker':
hotspot_x, hotspot_y = 1, 38 # bottom left corner
else:
hotspot_x, hotspot_y = 0, 0
cursor = Gdk.Cursor.new_from_pixbuf(display, pixbuf,
hotspot_x, hotspot_y)
except GObject.GError:
cursor = None
if self.get_window() is not None:
self.get_window().set_cursor(cursor)
def getout(self, undo=False, clear_selection=True):
"""
Apply the selected area in the canvas.
@param - undo: enable undo
"""
try:
# apply selection over canvas
if self.is_selected():
x, y, width, height = self.get_selection_bounds()
selection_surface = self.get_selection()
self.drawing_ctx.save()
self.drawing_ctx.translate(x, y)
self.drawing_ctx.set_source_surface(selection_surface)
self.drawing_ctx.rectangle(0, 0, width, height)
self.drawing_ctx.paint()
self.drawing_ctx.restore()
self.desenha = False
if clear_selection:
self.clear_selection()
if undo:
self.enable_undo()
except NameError, message:
logging.debug(message)
except Exception, message:
logging.debug('Unexpected error: %s', message)
def apply_temp_selection(self):
"""
Apply the selected area in the temp canvas.
"""
# apply selection over canvas
if self.is_selected():
x, y, width, height = self.get_selection_bounds()
selection_surface = self.get_selection()
self.temp_ctx.save()
self.temp_ctx.translate(x, y)
self.temp_ctx.set_source_surface(selection_surface)
self.temp_ctx.rectangle(0, 0, width, height)
self.temp_ctx.paint()
self.temp_ctx.restore()
def key_press(self, widget, event):
if event.keyval == Gdk.KEY_BackSpace:
if self.is_selected():
# Remove selection
# TODO
if self.tool['name'] == 'marquee-rectangular':
self.get_window().set_cursor(Gdk.Cursor.new(
Gdk.CursorType.CROSS))
widget.queue_draw()
self.enable_undo()
elif event.keyval == Gdk.KEY_a and Gdk.ModifierType.CONTROL_MASK:
if self.is_selected():
self.getout()
width, height = self.get_size()
if self.tool['name'] == 'marquee-rectangular':
self.get_window().set_cursor(Gdk.Cursor.new(
Gdk.CursorTypeFLEUR))
self.set_selection_bounds(0, 0, width - 1, height - 1)
self.emit('select')
widget.queue_draw()
elif event.keyval == Gdk.KEY_d and Gdk.ModifierType.CONTROL_MASK:
if self.is_selected():
self.getout(True)
if self.tool['name'] == 'marquee-rectangular':
self.get_window().set_cursor(Gdk.Cursor.new(
Gdk.CursorType.CROSS))
widget.queue_draw()
elif event.keyval == Gdk.KEY_Return:
self.getout(True)
if self.tool['name'] == 'marquee-rectangular':
self.get_window().set_cursor(Gdk.Cursor.new(
Gdk.CursorType.CROSS))
widget.queue_draw()
def change_line_size(self, delta):
# Used from OficinaActivity
if self.tool['name'] in ['pencil', 'eraser', 'brush', 'rainbow']:
size = self.tool['line size'] + delta
if size < 1:
size = 1
self.tool['line size'] = size
self.configure_line(size)
# TODO: clip
self.queue_draw()
if self.tool['name'] in ('stamp', 'load-stamp'):
self.resize_stamp(self.stamp_size + delta)
# TODO: clip
self.queue_draw()
def _keep_selection_ratio(self, coords):
def sign(x):
return x and x / abs(x) or 0
dx = int(coords[0]) - self.oldx
dy = int(coords[1]) - self.oldy
size = max(abs(dx), abs(dy))
return (self.oldx + sign(dx) * size,
self.oldy + sign(dy) * size)
def _keep_line_ratio(self, coords):
def sign(x):
return x and x / abs(x) or 0
dx = int(coords[0]) - self.oldx
dy = int(coords[1]) - self.oldy
size = max(abs(dx), abs(dy))
if abs(dx) > 0.5 * size and abs(dy) > 0.5 * size:
return (self.oldx + sign(dx) * size, self.oldy + sign(dy) * size)
elif abs(dx) < 0.5 * size and abs(dy) > 0.5 * size:
return (self.oldx, self.oldy + sign(dy) * size)
elif abs(dx) > 0.5 * size and abs(dy) < 0.5 * size:
return (self.oldx + sign(dx) * size, self.oldy)
def play_tool_sound(self):
if not self._player:
return
if self._pipeline.get_state(0)[1] == Gst.State.PLAYING:
self.stop_sound()
self._pipeline.set_state(Gst.State.PLAYING)
def replay_tool_sound(self, bus, msg):
if not self._tool_sound['loop']:
return
if self._sounds_enabled:
self._pipeline.seek_simple(
Gst.Format.TIME,
Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,
0)
def stop_sound(self):
if self._player:
self._pipeline.set_state(Gst.State.NULL)
def enable_sounds(self, enabled):
self._sounds_enabled = enabled
self.select_sound()
def select_sound(self):
try:
soundinfo = SOUNDS[self.tool['name']]
self._tool_sound = {
'filepath': os.path.join(
bundle_path,
'sounds',
soundinfo[0]),
'loop': soundinfo[1],
'full_play': soundinfo[2],
'play_after_release': soundinfo[3]}
except KeyError:
self._tool_sound = {
'filepath': '',
'loop': False,
'full_play': False,
'play_after_release': False}
if not self._player:
return
self._pipeline.set_state(Gst.State.READY)
self._player.set_property(
'uri',
'file://%s' %
self._tool_sound['filepath'])
|
samdroid-apps/paint-activity
|
Area.py
|
Python
|
gpl-2.0
| 72,770
|
[
"FLEUR",
"VisIt"
] |
f569199b6405ee5f280ef2503cd0f2d455f3b03de2486400153c049d0707a44b
|
from tools.load import LoadMatrix
from numpy import where
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list=[[traindat,testdat, 1.3],[traindat,testdat, 1.4]]
def kernel_gaussian_modular (fm_train_real=traindat,fm_test_real=testdat, width=1.3):
from shogun.Features import RealFeatures
from shogun.Kernel import GaussianKernel
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
kernel=GaussianKernel(feats_train, feats_train, width)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Gaussian')
kernel_gaussian_modular(*parameter_list[0])
|
ratschlab/ASP
|
examples/undocumented/python_modular/kernel_gaussian_modular.py
|
Python
|
gpl-2.0
| 795
|
[
"Gaussian"
] |
edee5982048b7f3408be7a7dd866314f6f7fe3b7bbaa85ecbeca328f11749d4f
|
import ast
from unittest import (
TestCase,
)
from darglint.analysis.yield_visitor import (
YieldVisitor,
)
from .utils import (
reindent,
)
class YieldsVisitorTests(TestCase):
def assertFound(self, program):
"""Assert that the yield was found.
Args:
program: The program to run the analysis on.
Yields:
The visitor, in case you want to do more analysis.
"""
function = ast.parse(reindent(program)).body[0]
visitor = YieldVisitor()
visitor.visit(function)
self.assertTrue(visitor.yields)
return visitor
def assertNoneFound(self, program):
"""Assert that no yield was found.
Args:
program: The program to run the analysis on.
Yields:
The visitor, in case you want to do more analysis.
"""
function = ast.parse(reindent(program)).body[0]
visitor = YieldVisitor()
visitor.visit(function)
self.assertEqual(visitor.yields, [])
return visitor
def test_no_yield(self):
program = r'''
def f():
pass
'''
self.assertNoneFound(program)
def test_nested_no_yield(self):
program = r'''
def f():
def g():
pass
'''
self.assertNoneFound(program)
def test_simplest_function(self):
program = r'''
def f():
yield 3
'''
self.assertFound(program)
def test_early_yield(self):
program = r'''
def f(x):
if x < 0:
yield -1
for i in range(x):
if complex_condition(x, i):
yield i
'''
self.assertFound(program)
def test_conditional_yield(self):
program = r'''
def f():
if MY_GLOBAL:
yield 1
else:
yield 2
'''
self.assertFound(program)
def test_yield_in_context(self):
program = r'''
def f():
with open('/tmp/input', 'r') as fin:
yield fin.readlines()
'''
self.assertFound(program)
def test_yields_none(self):
program = r'''
def f():
yield
'''
visitor = self.assertFound(program)
self.assertEqual(
visitor.yields[0].value,
None,
)
def test_yields_non_none(self):
program = r'''
def f():
yield 3
'''
visitor = self.assertFound(program)
self.assertTrue(
isinstance(visitor.yields[0].value, ast.AST),
)
def test_yield_from(self):
program = r'''
def f():
yield from (x for x in range(10))
'''
self.assertFound(program)
|
terrencepreilly/darglint
|
tests/test_yield_visitor.py
|
Python
|
mit
| 2,964
|
[
"VisIt"
] |
be91769e8e18c1835566b58a9f8878eeeb41a1ec6ca5d9a30716bd4f85ab934b
|
# convcmd - convert extension commands definition
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from common import NoRepo, MissingTool, SKIPREV, mapfile
from cvs import convert_cvs
from darcs import darcs_source
from git import convert_git
from hg import mercurial_source, mercurial_sink
from subversion import svn_source, svn_sink
from monotone import monotone_source
from gnuarch import gnuarch_source
from bzr import bzr_source
from p4 import p4_source
import filemap
import os, shutil, shlex
from mercurial import hg, util, encoding
from mercurial.i18n import _
orig_encoding = 'ascii'
def recode(s):
if isinstance(s, unicode):
return s.encode(orig_encoding, 'replace')
else:
return s.decode('utf-8').encode(orig_encoding, 'replace')
source_converters = [
('cvs', convert_cvs, 'branchsort'),
('git', convert_git, 'branchsort'),
('svn', svn_source, 'branchsort'),
('hg', mercurial_source, 'sourcesort'),
('darcs', darcs_source, 'branchsort'),
('mtn', monotone_source, 'branchsort'),
('gnuarch', gnuarch_source, 'branchsort'),
('bzr', bzr_source, 'branchsort'),
('p4', p4_source, 'branchsort'),
]
sink_converters = [
('hg', mercurial_sink),
('svn', svn_sink),
]
def convertsource(ui, path, type, rev):
exceptions = []
if type and type not in [s[0] for s in source_converters]:
raise util.Abort(_('%s: invalid source repository type') % type)
for name, source, sortmode in source_converters:
try:
if not type or name == type:
return source(ui, path, rev), sortmode
except (NoRepo, MissingTool), inst:
exceptions.append(inst)
if not ui.quiet:
for inst in exceptions:
ui.write("%s\n" % inst)
raise util.Abort(_('%s: missing or unsupported repository') % path)
def convertsink(ui, path, type):
if type and type not in [s[0] for s in sink_converters]:
raise util.Abort(_('%s: invalid destination repository type') % type)
for name, sink in sink_converters:
try:
if not type or name == type:
return sink(ui, path)
except NoRepo, inst:
ui.note(_("convert: %s\n") % inst)
except MissingTool, inst:
raise util.Abort('%s\n' % inst)
raise util.Abort(_('%s: unknown repository type') % path)
class progresssource(object):
def __init__(self, ui, source, filecount):
self.ui = ui
self.source = source
self.filecount = filecount
self.retrieved = 0
def getfile(self, file, rev):
self.retrieved += 1
self.ui.progress(_('getting files'), self.retrieved,
item=file, total=self.filecount)
return self.source.getfile(file, rev)
def lookuprev(self, rev):
return self.source.lookuprev(rev)
def close(self):
self.ui.progress(_('getting files'), None)
class converter(object):
def __init__(self, ui, source, dest, revmapfile, opts):
self.source = source
self.dest = dest
self.ui = ui
self.opts = opts
self.commitcache = {}
self.authors = {}
self.authorfile = None
# Record converted revisions persistently: maps source revision
# ID to target revision ID (both strings). (This is how
# incremental conversions work.)
self.map = mapfile(ui, revmapfile)
# Read first the dst author map if any
authorfile = self.dest.authorfile()
if authorfile and os.path.exists(authorfile):
self.readauthormap(authorfile)
# Extend/Override with new author map if necessary
if opts.get('authormap'):
self.readauthormap(opts.get('authormap'))
self.authorfile = self.dest.authorfile()
self.splicemap = self.parsesplicemap(opts.get('splicemap'))
self.branchmap = mapfile(ui, opts.get('branchmap'))
def parsesplicemap(self, path):
""" check and validate the splicemap format and
return a child/parents dictionary.
Format checking has two parts.
1. generic format which is same across all source types
2. specific format checking which may be different for
different source type. This logic is implemented in
checkrevformat function in source files like
hg.py, subversion.py etc.
"""
if not path:
return {}
m = {}
try:
fp = open(path, 'r')
for i, line in enumerate(fp):
line = line.splitlines()[0].rstrip()
if not line:
# Ignore blank lines
continue
# split line
lex = shlex.shlex(line, posix=True)
lex.whitespace_split = True
lex.whitespace += ','
line = list(lex)
# check number of parents
if not (2 <= len(line) <= 3):
raise util.Abort(_('syntax error in %s(%d): child parent1'
'[,parent2] expected') % (path, i + 1))
for part in line:
self.source.checkrevformat(part)
child, p1, p2 = line[0], line[1:2], line[2:]
if p1 == p2:
m[child] = p1
else:
m[child] = p1 + p2
# if file does not exist or error reading, exit
except IOError:
raise util.Abort(_('splicemap file not found or error reading %s:')
% path)
return m
def walktree(self, heads):
'''Return a mapping that identifies the uncommitted parents of every
uncommitted changeset.'''
visit = heads
known = set()
parents = {}
while visit:
n = visit.pop(0)
if n in known or n in self.map:
continue
known.add(n)
self.ui.progress(_('scanning'), len(known), unit=_('revisions'))
commit = self.cachecommit(n)
parents[n] = []
for p in commit.parents:
parents[n].append(p)
visit.append(p)
self.ui.progress(_('scanning'), None)
return parents
def mergesplicemap(self, parents, splicemap):
"""A splicemap redefines child/parent relationships. Check the
map contains valid revision identifiers and merge the new
links in the source graph.
"""
for c in sorted(splicemap):
if c not in parents:
if not self.dest.hascommit(self.map.get(c, c)):
# Could be in source but not converted during this run
self.ui.warn(_('splice map revision %s is not being '
'converted, ignoring\n') % c)
continue
pc = []
for p in splicemap[c]:
# We do not have to wait for nodes already in dest.
if self.dest.hascommit(self.map.get(p, p)):
continue
# Parent is not in dest and not being converted, not good
if p not in parents:
raise util.Abort(_('unknown splice map parent: %s') % p)
pc.append(p)
parents[c] = pc
def toposort(self, parents, sortmode):
'''Return an ordering such that every uncommitted changeset is
preceded by all its uncommitted ancestors.'''
def mapchildren(parents):
"""Return a (children, roots) tuple where 'children' maps parent
revision identifiers to children ones, and 'roots' is the list of
revisions without parents. 'parents' must be a mapping of revision
identifier to its parents ones.
"""
visit = sorted(parents)
seen = set()
children = {}
roots = []
while visit:
n = visit.pop(0)
if n in seen:
continue
seen.add(n)
# Ensure that nodes without parents are present in the
# 'children' mapping.
children.setdefault(n, [])
hasparent = False
for p in parents[n]:
if p not in self.map:
visit.append(p)
hasparent = True
children.setdefault(p, []).append(n)
if not hasparent:
roots.append(n)
return children, roots
# Sort functions are supposed to take a list of revisions which
# can be converted immediately and pick one
def makebranchsorter():
"""If the previously converted revision has a child in the
eligible revisions list, pick it. Return the list head
otherwise. Branch sort attempts to minimize branch
switching, which is harmful for Mercurial backend
compression.
"""
prev = [None]
def picknext(nodes):
next = nodes[0]
for n in nodes:
if prev[0] in parents[n]:
next = n
break
prev[0] = next
return next
return picknext
def makesourcesorter():
"""Source specific sort."""
keyfn = lambda n: self.commitcache[n].sortkey
def picknext(nodes):
return sorted(nodes, key=keyfn)[0]
return picknext
def makeclosesorter():
"""Close order sort."""
keyfn = lambda n: ('close' not in self.commitcache[n].extra,
self.commitcache[n].sortkey)
def picknext(nodes):
return sorted(nodes, key=keyfn)[0]
return picknext
def makedatesorter():
"""Sort revisions by date."""
dates = {}
def getdate(n):
if n not in dates:
dates[n] = util.parsedate(self.commitcache[n].date)
return dates[n]
def picknext(nodes):
return min([(getdate(n), n) for n in nodes])[1]
return picknext
if sortmode == 'branchsort':
picknext = makebranchsorter()
elif sortmode == 'datesort':
picknext = makedatesorter()
elif sortmode == 'sourcesort':
picknext = makesourcesorter()
elif sortmode == 'closesort':
picknext = makeclosesorter()
else:
raise util.Abort(_('unknown sort mode: %s') % sortmode)
children, actives = mapchildren(parents)
s = []
pendings = {}
while actives:
n = picknext(actives)
actives.remove(n)
s.append(n)
# Update dependents list
for c in children.get(n, []):
if c not in pendings:
pendings[c] = [p for p in parents[c] if p not in self.map]
try:
pendings[c].remove(n)
except ValueError:
raise util.Abort(_('cycle detected between %s and %s')
% (recode(c), recode(n)))
if not pendings[c]:
# Parents are converted, node is eligible
actives.insert(0, c)
pendings[c] = None
if len(s) != len(parents):
raise util.Abort(_("not all revisions were sorted"))
return s
def writeauthormap(self):
authorfile = self.authorfile
if authorfile:
self.ui.status(_('writing author map file %s\n') % authorfile)
ofile = open(authorfile, 'w+')
for author in self.authors:
ofile.write("%s=%s\n" % (author, self.authors[author]))
ofile.close()
def readauthormap(self, authorfile):
afile = open(authorfile, 'r')
for line in afile:
line = line.strip()
if not line or line.startswith('#'):
continue
try:
srcauthor, dstauthor = line.split('=', 1)
except ValueError:
msg = _('ignoring bad line in author map file %s: %s\n')
self.ui.warn(msg % (authorfile, line.rstrip()))
continue
srcauthor = srcauthor.strip()
dstauthor = dstauthor.strip()
if self.authors.get(srcauthor) in (None, dstauthor):
msg = _('mapping author %s to %s\n')
self.ui.debug(msg % (srcauthor, dstauthor))
self.authors[srcauthor] = dstauthor
continue
m = _('overriding mapping for author %s, was %s, will be %s\n')
self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
afile.close()
def cachecommit(self, rev):
commit = self.source.getcommit(rev)
commit.author = self.authors.get(commit.author, commit.author)
commit.branch = self.branchmap.get(commit.branch, commit.branch)
self.commitcache[rev] = commit
return commit
def copy(self, rev):
commit = self.commitcache[rev]
changes = self.source.getchanges(rev)
if isinstance(changes, basestring):
if changes == SKIPREV:
dest = SKIPREV
else:
dest = self.map[changes]
self.map[rev] = dest
return
files, copies = changes
pbranches = []
if commit.parents:
for prev in commit.parents:
if prev not in self.commitcache:
self.cachecommit(prev)
pbranches.append((self.map[prev],
self.commitcache[prev].branch))
self.dest.setbranch(commit.branch, pbranches)
try:
parents = self.splicemap[rev]
self.ui.status(_('spliced in %s as parents of %s\n') %
(parents, rev))
parents = [self.map.get(p, p) for p in parents]
except KeyError:
parents = [b[0] for b in pbranches]
source = progresssource(self.ui, self.source, len(files))
newnode = self.dest.putcommit(files, copies, parents, commit,
source, self.map)
source.close()
self.source.converted(rev, newnode)
self.map[rev] = newnode
def convert(self, sortmode):
try:
self.source.before()
self.dest.before()
self.source.setrevmap(self.map)
self.ui.status(_("scanning source...\n"))
heads = self.source.getheads()
parents = self.walktree(heads)
self.mergesplicemap(parents, self.splicemap)
self.ui.status(_("sorting...\n"))
t = self.toposort(parents, sortmode)
num = len(t)
c = None
self.ui.status(_("converting...\n"))
for i, c in enumerate(t):
num -= 1
desc = self.commitcache[c].desc
if "\n" in desc:
desc = desc.splitlines()[0]
# convert log message to local encoding without using
# tolocal() because the encoding.encoding convert()
# uses is 'utf-8'
self.ui.status("%d %s\n" % (num, recode(desc)))
self.ui.note(_("source: %s\n") % recode(c))
self.ui.progress(_('converting'), i, unit=_('revisions'),
total=len(t))
self.copy(c)
self.ui.progress(_('converting'), None)
tags = self.source.gettags()
ctags = {}
for k in tags:
v = tags[k]
if self.map.get(v, SKIPREV) != SKIPREV:
ctags[k] = self.map[v]
if c and ctags:
nrev, tagsparent = self.dest.puttags(ctags)
if nrev and tagsparent:
# write another hash correspondence to override the previous
# one so we don't end up with extra tag heads
tagsparents = [e for e in self.map.iteritems()
if e[1] == tagsparent]
if tagsparents:
self.map[tagsparents[0][0]] = nrev
bookmarks = self.source.getbookmarks()
cbookmarks = {}
for k in bookmarks:
v = bookmarks[k]
if self.map.get(v, SKIPREV) != SKIPREV:
cbookmarks[k] = self.map[v]
if c and cbookmarks:
self.dest.putbookmarks(cbookmarks)
self.writeauthormap()
finally:
self.cleanup()
def cleanup(self):
try:
self.dest.after()
finally:
self.source.after()
self.map.close()
def convert(ui, src, dest=None, revmapfile=None, **opts):
global orig_encoding
orig_encoding = encoding.encoding
encoding.encoding = 'UTF-8'
# support --authors as an alias for --authormap
if not opts.get('authormap'):
opts['authormap'] = opts.get('authors')
if not dest:
dest = hg.defaultdest(src) + "-hg"
ui.status(_("assuming destination %s\n") % dest)
destc = convertsink(ui, dest, opts.get('dest_type'))
try:
srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
opts.get('rev'))
except Exception:
for path in destc.created:
shutil.rmtree(path, True)
raise
sortmodes = ('branchsort', 'datesort', 'sourcesort', 'closesort')
sortmode = [m for m in sortmodes if opts.get(m)]
if len(sortmode) > 1:
raise util.Abort(_('more than one sort mode specified'))
sortmode = sortmode and sortmode[0] or defaultsort
if sortmode == 'sourcesort' and not srcc.hasnativeorder():
raise util.Abort(_('--sourcesort is not supported by this data source'))
if sortmode == 'closesort' and not srcc.hasnativeclose():
raise util.Abort(_('--closesort is not supported by this data source'))
fmap = opts.get('filemap')
if fmap:
srcc = filemap.filemap_source(ui, srcc, fmap)
destc.setfilemapmode(True)
if not revmapfile:
revmapfile = destc.revmapfile()
c = converter(ui, srcc, destc, revmapfile, opts)
c.convert(sortmode)
|
jordigh/mercurial-crew
|
hgext/convert/convcmd.py
|
Python
|
gpl-2.0
| 18,870
|
[
"VisIt"
] |
8d2cfa03202ab59547d1f1b249b7f28f6586e70e280a4ae4985224b411e5a791
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Superimpose two structures."""
import numpy
from Bio.SVDSuperimposer import SVDSuperimposer
from Bio.PDB.PDBExceptions import PDBException
class Superimposer(object):
"""
Rotate/translate one set of atoms on top of another,
thereby minimizing the RMSD.
"""
def __init__(self):
self.rotran=None
self.rms=None
def set_atoms(self, fixed, moving):
"""
Put (translate/rotate) the atoms in fixed on the atoms in
moving, in such a way that the RMSD is minimized.
@param fixed: list of (fixed) atoms
@param moving: list of (moving) atoms
@type fixed,moving: [L{Atom}, L{Atom},...]
"""
if not (len(fixed)==len(moving)):
raise PDBException("Fixed and moving atom lists differ in size")
l=len(fixed)
fixed_coord=numpy.zeros((l, 3))
moving_coord=numpy.zeros((l, 3))
for i in range(0, len(fixed)):
fixed_coord[i]=fixed[i].get_coord()
moving_coord[i]=moving[i].get_coord()
sup=SVDSuperimposer()
sup.set(fixed_coord, moving_coord)
sup.run()
self.rms=sup.get_rms()
self.rotran=sup.get_rotran()
def apply(self, atom_list):
"""
Rotate/translate a list of atoms.
"""
if self.rotran is None:
raise PDBException("No transformation has been calculated yet")
rot, tran=self.rotran
rot=rot.astype('f')
tran=tran.astype('f')
for atom in atom_list:
atom.transform(rot, tran)
if __name__=="__main__":
import sys
from Bio.PDB import PDBParser, Selection
p=PDBParser()
s1=p.get_structure("FIXED", sys.argv[1])
fixed=Selection.unfold_entities(s1, "A")
s2=p.get_structure("MOVING", sys.argv[1])
moving=Selection.unfold_entities(s2, "A")
rot=numpy.identity(3).astype('f')
tran=numpy.array((1.0, 2.0, 3.0), 'f')
for atom in moving:
atom.transform(rot, tran)
sup=Superimposer()
sup.set_atoms(fixed, moving)
print sup.rotran
print sup.rms
sup.apply(moving)
|
bryback/quickseq
|
genescript/Bio/PDB/Superimposer.py
|
Python
|
mit
| 2,351
|
[
"Biopython"
] |
a8a210a5a80cc971b5e1028eb01454b103136cd1531cccbcf10fb937e9de33ae
|
import tyre.utils
import re
import pandas as pd
##
## normalizeXXX
##
## input: item
## outout item
def normalize_brand(item):
if "brand" in item:
s=item["brand"]
s.replace("-", " ").replace("_", " ")
item["brand"] = s
return item
def normalize_load_index(item):
if "load_index" in item:
item["load_index"] = re.sub("\(.*\)","",item["load_index"]).strip()
return item
def normalize_label_fuel(item):
if "label_fuel" in item:
item["label_fuel"].upper()
return item
def normalize_label_noise(item):
if "label_noise" in item:
item["label_noise"].upper().replace("DB","").strip()
return item
def normalize_label_wet(item):
if "label_wet" in item:
item["label_wet"].upper()
return item
def normalize_price(item):
if "price" in item:
s=item["price"]
if bool(len(re.findall("€", s))):
item["currency"] = "EUR"
elif bool(len(re.findall("\$", s))):
item["currency"] = "USD"
s = s.replace("$", "").replace("€", "").replace(",", ".").strip()
item["price"] = s
return item
def normalize_product(item):
if "brand" not in item:
return item
item = normalize_brand(item)
if "product" in item:
item["product"] = item["product"].replace(item["brand"], "").strip()
return item
def normalize_size(item):
if "size" in item:
s = item["size"]
s = s.replace("rinnovati", " ").replace(",", " ").strip()
item["size"] = s
return item
def normalize_seasonality(item):
if "seasonality" in item:
s = item["seasonality"]
s.replace("(EURO)","").strip()
if bool(len(re.findall("WINTER|INVERNAL|M\+S|SNOW|KITKARENGAS|NASTARENGAS", s))):
season = "WINTER"
elif bool(len(re.findall("SUMMER|ESTIV|KESÄRENGAS", s))):
season = "SUMMER"
elif bool(len(re.findall("SEASON|STAGIONI|4MEVSIM|JOKASÄÄNRENGAS", s))):
season = "ALL_SEASONS"
else:
season = s
item["seasonality"] = season
return item
def normalize_vehicle(item):
if "vehicle" in item:
s = item["vehicle"].upper()
if bool(len(re.findall("AUTO|PKW", s))):
result = "CAR"
elif s == "HA":
result = "CAR"
elif s == "PA":
result = "VAN"
elif s == "4x4":
result = "SUV"
else:
result = s
item["vehicle"] = result
return item
def mergeItems(item1, item2, append=False):
## TODO: append=True dow not work
for f in item2:
if not f in item1:
item1[f] = item2[f]
elif append and f not in [ "brand", "ean", "manufacturer_number", "product"]:
## multivalues are allowed
v1 = item1[f]
if not isinstance(v1, list):
v1 = [v1]
v2 = item2[f]
if not isinstance(v2, list):
v2 = [v2]
## only unique values
v = list(set(v1 + v2))
## value will be a string if there is only 1 element in the lust
if len(v) == 1:
v = v[0]
item1[f] = v
return item1
def mergeItemIntoTyre(item, tyre={} ):
if tyre is None:
tyre = {}
if item is None:
return tyre
if "source" in item and item["source"]:
source = item["source"]
else:
source = "Unknow"
for f in item:
if item[f] is not None and not f.startswith("_"):
if f not in tyre:
tyre[f] = {}
if f == "ean":
tyre[f] = item[f]
else:
tyre[f][source] = item[f]
return tyre
##
## extractXXX
##
## input: string
## output: dict, to be merged with item
def extractBrand(s):
result = {}
list = s.split(" ")
if len(list) < 3:
if not "logging" in result:
result["logging"] = []
result["logging"].append("cannot extract brand from description '%s'" % s)
else:
brand = list[0]
if len(brand) < 3:
brand = "%s %s" % (brand, list[1])
result['brand'] = brand
result = normalize_brand(result)
return(result)
def extractOEMark(s):
result = {}
l = re.findall(" ?(MOE|N0|N1|MO|AO|RO\d|NH|MCLAREN|LRO|F0\d|\*)", s, flags=re.IGNORECASE)
if len(l) > 0:
result["oe_mark"] = l[0]
return result
def extractOEModels(s, filename="data/oe_manufacturers.csv"):
oe = pd.read_csv(filename)
result = {"oe_models": []}
for manu in list(oe.MANUFACTURER):
regexp = r"(%s ?.*$)" % manu
l = re.findall(regexp, s)
if len(s) > 0:
result["oe_models"] = result["oe_models"] + l
return result
def extractProduct(s):
result = extractBrand(s)
if "brand" in result:
brand = result['brand']
s = s.replace(brand,"").strip()
regexp_product = "(.+) \d+[/ ]\d* ?Z?R\d+"
m = re.findall(regexp_product, s)
if len(m) > 0:
result["product"] = m[0]
else:
if not "logging" in result:
result["logging"] = []
result["logging"].append("cannot extract product from description '%s'" % s)
return result
def extractEan(s):
result = {}
l = re.findall(" ?(\d{13})", s)
if len(l) > 0:
result["ean"] = l[0]
return result
def extractIndexes(s):
## TODO: gestire 107/110
## re.findall("(\d+)/?(\d+)([RSTUVZ])", s)
result = {}
m = re.search("\(?(\d+/?\d+)\)?([I-Z])", s)
if m and m.groups and len(m.groups()) > 0:
result['speed_index'] = m.groups('')[1]
load_index = m.groups('')[0]
load_index_list = load_index.split("/")
result['load_index'] = load_index_list[0]
if len(load_index_list) == 2:
result['load_index2'] = load_index_list[1]
result["index"] = m.group()
return result
def extractSize(s):
## TODO: manginc C
## Hankook RW06 175 R14C 99Q
result = {}
if s is not None:
match = re.search("(\d+\.?\d*)/?(\d+\.?\d*)? ?(ZR|R|-)(\d+)(C)?", s)
if match and len(match.groups()) >= 4:
## size
result["width"] = match.groups('')[0]
result["series"] = match.groups('')[1]
result["radial"] = match.groups('')[2]
result["diameter"] = match.groups('')[3]
if match[0] and len(match[0]) >= 5 and match[0][4].upper() == "C":
result["vehicle"] = "LT"
result["size"] = match.group()
else:
if not "logging" in result:
result["logging"] = []
result["logging"].append("Cannot extract the 'size' from '%s'" % s)
return result
def extractSeasonality(s):
result = {}
season = normalizeSeasonality(s)
if s != season:
result["seasonality"] = season
return result
def extractExtraInfos(s):
extra = {}
if tyre.utils.isExtraLoad(s):
extra["xl"] = True
if tyre.utils.isMFS(s):
extra["mfs"] = True
if tyre.utils.isNCS(s):
extra["ncs"] = True
if tyre.utils.isReinforced(s):
extra["reinforced"] = True
if tyre.utils.isRunflat(s):
extra["runflat"] = True
if tyre.utils.isSelfSeal(s):
extra["self_seal"] = True
if tyre.utils.isStuddable(s):
extra["studdable"] = True
if tyre.utils.isStudded(s):
extra["studded"] = True
return extra
def extractAll(item):
result = {}
if "description" not in item:
return result
s = item["description"]
result = extractProduct(s)
if "brand" in result:
s = s.replace(result["brand"],"")
if "product" in result:
s = s.replace(result["product"],"")
s = s.strip()
result.update(extractSize(s))
if "size" in result:
s = s.replace(result["size"],"").strip()
result.update(extractIndexes(s))
if "index" in result:
s = s.replace(result["index"],"").strip()
result.update(extractExtraInfos(s))
result.update(extractEan(s))
result.update(extractOEMark(s))
result.update(extractOEModels(s))
return(result)
|
matteoredaelli/scrapy_tyres
|
tyre/item.py
|
Python
|
gpl-3.0
| 8,288
|
[
"MOE"
] |
85043b4a0d70cfb8ab8c5ab3570cc8feb6fcd4675a7792e07ce573b24e000e00
|
""" push new fasta files to EW
Conducts a scan of a directory for fasta files, and pushes those that don't exist into the EW server
THIS ASSUMES THERE IS A CONFIG FILE CONTAINING A DICTIONARY CONFIG, with keys 'IP' AND 'PORT' PASSED AS sys.argv[1]
i.e. the first and only argument.
An example of such a file would read:
{
"DESCRIPTION":"A test server operating in ../unittest_tmp, only suitable for testing",
"PORT":8185,
"IP":"127.0.0.1",
"TMP_FASTA":"/home/compass/tmpFasta", # to covert fasta.gz to fasta
"GLOBPATH":"/home/compass/data/relatednesstest/TB_FASTA/*_v3.fasta" }
"""
import logging
import readline
import time
import json
import datetime
import gzip
import os.path
import xmlrpc.client
import socket
import sys
import os
import glob
import sys
import ntpath
from Bio import SeqIO
# set up logging
# if not within the COMPASS framework, need to decide where to log to
#logging.getLogger()
# check input
json_config_file=sys.argv[1]
with open(json_config_file, 'rt') as f:
txt=f.read()
logging.info("Read config file: {0}".format(txt))
CONFIG=json.loads(txt)
if not type(CONFIG)==dict:
raise KeyError("CONFIG must be either a dictionary or a JSON string encoding a dictionary. It is: {0}".format(CONFIG))
if not set(CONFIG.keys()) == set(['DESCRIPTION','IP','PORT','TMP_FASTA','GLOBPATH']):
raise KeyError("The dictionary must have five components: DESCRIPTION, IP, PORT,TMP_FASTA and GLOBPATH. The latter is passed to glob.glob to find the files. The dictionary actually looks like this: {0}".format(CONFIG))
# try to start the client. Will throw and error if it cannot connect.
# should wrap with try/catch to log to logfile, if any
logging.info("Trying to make server connection ...")
try:
client=xmlrpc.client.ServerProxy("http://{0}:{1}".format(CONFIG['IP'],CONFIG['PORT']))
except Exception as e:
logging.exception(e)
raise e
logging.info("Connected, checking existing guids ...")
guidlist = json.loads(client.get_all_guids())
print(guidlist)
guids = set()
for item in guidlist:
guids.add(item['guid'])
nTested=0
nAdded=0
logging.info("Connected, checking existing guids vs. those found using the glob pattern ...")
fastaFiles = glob.glob(CONFIG['GLOBPATH'])
for fastaFileGz in fastaFiles:
#Check if the sample is already in EW
guid = str(ntpath.basename(fastaFileGz)[0:36])
print(guid)
if guid in guids:
print ('Sample ' + guid + ' is already in the database')
continue
#Convert fasta.gz to fasta to a tmp file
fastaFile=os.path.join(CONFIG['TMP_FASTA'], "{0}.fasta".format(guid))
fo=open(fastaFile,'wb')
# it appears that BioPython can't cope with reading the gzip file on the fly
with gzip.open(fastaFileGz,'rb') as fi:
fileContent=fi.read()
fo.write(fileContent) # so we decompress it
fo.close()
#Process the tmp fasta file
with open(fastaFile, 'rt') as f:
for seq_record in SeqIO.parse(f, 'fasta'):
guid = str(os.path.basename(fastaFile)[0:36])
nTested += 1
if not client.exist_sample(guid):
seq = str(seq_record.seq)
print ('Inserting: ' + guid)
print (' DONE')
result = client.insert(guid,seq)
nAdded += 1
#Delete the tmp file
os.remove(fastaFile)
logging.info("Scan finished. Added {0}".format(nAdded))
|
davidhwyllie/findNeighbour2
|
testScripts/push_new_samples.py
|
Python
|
lgpl-3.0
| 3,251
|
[
"Biopython"
] |
6eef7a360ae0755178ea28d4bda698b572a04c85afe15e8144e30d15dbf04c6a
|
# -*- coding: utf-8 -*-
# Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides getCharacterName that maps punctuation marks and other
individual characters into localized words."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
from orca_i18n import _ # for gettext support
# chnames is a dictionary where the keys represent a unicode character
# and the values represent the common term used for the character.
#
chnames = {}
# Translators: this is the spoken word for the space character
#
chnames[" "] = _("space")
# Translators: this is the spoken word for the newline character
#
chnames["\n"] = _("newline")
# Translators: this is the spoken word for the tab character
#
chnames["\t"] = _("tab")
# Translators: this is the spoken word for the character '!' (U+0021)
#
chnames["!"] = _("exclaim")
# Translators: this is the spoken word for the character '"' (U+0022)
#
chnames["\""] = _("quote")
# Translators: this is the spoken word for the character '#' (U+0023)
#
chnames["#"] = _("number")
# Translators: this is the spoken word for the character '$' (U+0024)
#
chnames["$"] = _("dollar")
# Translators: this is the spoken word for the character '%' (U+0025)
#
chnames["%"] = _("percent")
# Translators: this is the spoken word for the character '&' (U+0026)
#
chnames["&"] = _("and")
# Translators: this is the spoken word for the character ''' (U+0027)
#
chnames["'"] = _("apostrophe")
# Translators: this is the spoken word for the character '(' (U+0028)
#
chnames["("] = _("left paren")
# Translators: this is the spoken word for the character ')' (U+0029)
#
chnames[")"] = _("right paren")
# Translators: this is the spoken word for the character '*' (U+002a)
#
chnames["*"] = _("star")
# Translators: this is the spoken word for the character '+' (U+002b)
#
chnames["+"] = _("plus")
# Translators: this is the spoken word for the character ',' (U+002c)
#
chnames[","] = _("comma")
# Translators: this is the spoken word for the character '-' (U+002d)
#
chnames["-"] = _("dash")
# Translators: this is the spoken word for the character '.' (U+002e)
#
chnames["."] = _("dot")
# Translators: this is the spoken word for the character '/' (U+002f)
#
chnames["/"] = _("slash")
# Translators: this is the spoken word for the character ':' (U+003a)
#
chnames[":"] = _("colon")
# Translators: this is the spoken word for the character ';' (U+003b)
#
chnames[";"] = _("semicolon")
# Translators: this is the spoken word for the character '< ' (U+003c)
#
chnames["<"] = _("less")
# Translators: this is the spoken word for the character '=' (U+003d)
#
chnames["="] = _("equals")
# Translators: this is the spoken word for the character '> ' (U+003e)
#
chnames[">"] = _("greater")
# Translators: this is the spoken word for the character '?' (U+003f)
#
chnames["?"] = _("question")
# Translators: this is the spoken word for the character '@' (U+0040)
#
chnames["@"] = _("at")
# Translators: this is the spoken word for the character '[' (U+005b)
#
chnames["["] = _("left bracket")
# Translators: this is the spoken word for the character '\' (U+005c)
#
chnames["\\"] = _("backslash")
# Translators: this is the spoken word for the character ']' (U+005d)
#
chnames["]"] = _("right bracket")
# Translators: this is the spoken word for the character '^' (U+005e)
#
chnames["^"] = _("caret")
# Translators: this is the spoken word for the character '_' (U+005f)
#
chnames["_"] = _("underline")
# Translators: this is the spoken word for the character '`' (U+0060)
#
chnames["`"] = _("grave")
# Translators: this is the spoken word for the character '{' (U+007b)
#
chnames["{"] = _("left brace")
# Translators: this is the spoken word for the character '|' (U+007c)
#
chnames["|"] = _("vertical bar")
# Translators: this is the spoken word for the character '}' (U+007d)
#
chnames["}"] = _("right brace")
# Translators: this is the spoken word for the character '~' (U+007e)
#
chnames["~"] = _("tilde")
# Translators: this is the spoken character for the no break space
# character (e.g., " " in HTML -- U+00a0)
#
chnames[u'\u00a0'] = _("no break space")
# Translators: this is the spoken word for the character '¡' (U+00a1)
#
chnames[u'\u00a1'] = _("inverted exclamation point")
# Translators: this is the spoken word for the character '¢' (U+00a2)
#
chnames[u'\u00a2'] = _("cents")
# Translators: this is the spoken word for the character '£' (U+00a3)
#
chnames[u'\u00a3'] = _("pounds")
# Translators: this is the spoken word for the character '¤' (U+00a4)
#
chnames[u'\u00a4'] = _("currency sign")
# Translators: this is the spoken word for the character '¥' (U+00a5)
#
chnames[u'\u00a5'] = _("yen")
# Translators: this is the spoken word for the character '¦' (U+00a6)
#
chnames[u'\u00a6'] = _("broken bar")
# Translators: this is the spoken word for the character '§' (U+00a7)
#
chnames[u'\u00a7'] = _("section")
# Translators: this is the spoken word for the character '¨' (U+00a8)
#
chnames[u'\u00a8'] = _("umlaut")
# Translators: this is the spoken word for the character '©' (U+00a9)
#
chnames[u'\u00a9'] = _("copyright")
# Translators: this is the spoken word for the character 'ª' (U+00aa)
#
chnames[u'\u00aa'] = _("superscript a")
# Translators: this is the spoken word for the character '«' (U+00ab)
#
chnames[u'\u00ab'] = _("left double angle bracket")
# Translators: this is the spoken word for the character '¬' (U+00ac)
#
chnames[u'\u00ac'] = _("logical not")
# Translators: this is the spoken word for the character '' (U+00ad)
#
chnames[u'\u00ad'] = _("soft hyphen")
# Translators: this is the spoken word for the character '®' (U+00ae)
#
chnames[u'\u00ae'] = _("registered")
# Translators: this is the spoken word for the character '¯' (U+00af)
#
chnames[u'\u00af'] = _("macron")
# Translators: this is the spoken word for the character '°' (U+00b0)
#
chnames[u'\u00b0'] = _("degrees")
# Translators: this is the spoken word for the character '±' (U+00b1)
#
chnames[u'\u00b1'] = _("plus or minus")
# Translators: this is the spoken word for the character '²' (U+00b2)
#
chnames[u'\u00b2'] = _("superscript 2")
# Translators: this is the spoken word for the character '³' (U+00b3)
#
chnames[u'\u00b3'] = _("superscript 3")
# Translators: this is the spoken word for the character '´' (U+00b4)
#
chnames[u'\u00b4'] = _("acute accent")
# Translators: this is the spoken word for the character 'µ' (U+00b5)
#
chnames[u'\u00b5'] = _("mu")
# Translators: this is the spoken word for the character '¶' (U+00b6)
#
chnames[u'\u00b6'] = _("paragraph marker")
# Translators: this is the spoken word for the character '·' (U+00b7)
#
chnames[u'\u00b7'] = _("middle dot")
# Translators: this is the spoken word for the character '¸' (U+00b8)
#
chnames[u'\u00b8'] = _("cedilla")
# Translators: this is the spoken word for the character '¹' (U+00b9)
#
chnames[u'\u00b9'] = _("superscript 1")
# Translators: this is the spoken word for the character 'º' (U+00ba)
#
chnames[u'\u00ba'] = _("ordinal")
# Translators: this is the spoken word for the character '»' (U+00bb)
#
chnames[u'\u00bb'] = _("right double angle bracket")
# Translators: this is the spoken word for the character '¼' (U+00bc)
#
chnames[u'\u00bc'] = _("one fourth")
# Translators: this is the spoken word for the character '½' (U+00bd)
#
chnames[u'\u00bd'] = _("one half")
# Translators: this is the spoken word for the character '¾' (U+00be)
#
chnames[u'\u00be'] = _("three fourths")
# Translators: this is the spoken word for the character '¿' (U+00bf)
#
chnames[u'\u00bf'] = _("inverted question mark")
# Translators: this is the spoken word for the character 'á' (U+00e1)
#
chnames[u'\u00e1'] = _("a acute")
# Translators: this is the spoken word for the character 'À' (U+00c0)
#
chnames[u'\u00c0'] = _("A GRAVE")
# Translators: this is the spoken word for the character 'Á' (U+00c1)
#
chnames[u'\u00c1'] = _("A ACUTE")
# Translators: this is the spoken word for the character 'Â' (U+00c2)
#
chnames[u'\u00c2'] = _("A CIRCUMFLEX")
# Translators: this is the spoken word for the character 'Ã' (U+00c3)
#
chnames[u'\u00c3'] = _("A TILDE")
# Translators: this is the spoken word for the character 'Ä' (U+00c4)
#
chnames[u'\u00c4'] = _("A UMLAUT")
# Translators: this is the spoken word for the character 'Å' (U+00c5)
#
chnames[u'\u00c5'] = _("A RING")
# Translators: this is the spoken word for the character 'Æ' (U+00c6)
#
chnames[u'\u00c6'] = _("A E")
# Translators: this is the spoken word for the character 'Ç' (U+00c7)
#
chnames[u'\u00c7'] = _("C CEDILLA")
# Translators: this is the spoken word for the character 'È' (U+00c8)
#
chnames[u'\u00c8'] = _("E GRAVE")
# Translators: this is the spoken word for the character 'É' (U+00c9)
#
chnames[u'\u00c9'] = _("E ACUTE")
# Translators: this is the spoken word for the character 'Ê' (U+00ca)
#
chnames[u'\u00ca'] = _("E CIRCUMFLEX")
# Translators: this is the spoken word for the character 'Ë' (U+00cb)
#
chnames[u'\u00cb'] = _("E UMLAUT")
# Translators: this is the spoken word for the character 'Ì' (U+00cc)
#
chnames[u'\u00cc'] = _("I GRAVE")
# Translators: this is the spoken word for the character 'Í' (U+00cd)
#
chnames[u'\u00cd'] = _("I ACUTE")
# Translators: this is the spoken word for the character 'Î' (U+00ce)
#
chnames[u'\u00ce'] = _("I CIRCUMFLEX")
# Translators: this is the spoken word for the character 'Ï' (U+00cf)
#
chnames[u'\u00cf'] = _("I UMLAUT")
# Translators: this is the spoken word for the character 'Ð' (U+00d0)
#
chnames[u'\u00d0'] = _("ETH")
# Translators: this is the spoken word for the character 'Ñ' (U+00d1)
#
chnames[u'\u00d1'] = _("N TILDE")
# Translators: this is the spoken word for the character 'Ò' (U+00d2)
#
chnames[u'\u00d2'] = _("O GRAVE")
# Translators: this is the spoken word for the character 'Ó' (U+00d3)
#
chnames[u'\u00d3'] = _("O ACUTE")
# Translators: this is the spoken word for the character 'Ô' (U+00d4)
#
chnames[u'\u00d4'] = _("O CIRCUMFLEX")
# Translators: this is the spoken word for the character 'Õ' (U+00d5)
#
chnames[u'\u00d5'] = _("O TILDE")
# Translators: this is the spoken word for the character 'Ö' (U+00d6)
#
chnames[u'\u00d6'] = _("O UMLAUT")
# Translators: this is the spoken word for the character '×' (U+00d7)
#
chnames[u'\u00d7'] = _("times")
# Translators: this is the spoken word for the character 'Ø' (U+00d8)
#
chnames[u'\u00d8'] = _("O STROKE")
# Translators: this is the spoken word for the character 'Ù' (U+00d9)
#
chnames[u'\u00d9'] = _("U GRAVE")
# Translators: this is the spoken word for the character 'Ú' (U+00da)
#
chnames[u'\u00da'] = _("U ACUTE")
# Translators: this is the spoken word for the character 'Û' (U+00db)
#
chnames[u'\u00db'] = _("U CIRCUMFLEX")
# Translators: this is the spoken word for the character 'Ü' (U+00dc)
#
chnames[u'\u00dc'] = _("U UMLAUT")
# Translators: this is the spoken word for the character 'Ý' (U+00dd)
#
chnames[u'\u00dd'] = _("Y ACUTE")
# Translators: this is the spoken word for the character 'Þ' (U+00de)
#
chnames[u'\u00de'] = _("THORN")
# Translators: this is the spoken word for the character 'ß' (U+00df)
#
chnames[u'\u00df'] = _("s sharp")
# Translators: this is the spoken word for the character 'à' (U+00e0)
#
chnames[u'\u00e0'] = _("a grave")
# Translators: this is the spoken word for the character 'â' (U+00e2)
#
chnames[u'\u00e2'] = _("a circumflex")
# Translators: this is the spoken word for the character 'ã' (U+00e3)
#
chnames[u'\u00e3'] = _("a tilde")
# Translators: this is the spoken word for the character 'ä' (U+00e4)
#
chnames[u'\u00e4'] = _("a umlaut")
# Translators: this is the spoken word for the character 'å' (U+00e5)
#
chnames[u'\u00e5'] = _("a ring")
# Translators: this is the spoken word for the character 'æ' (U+00e6)
#
chnames[u'\u00e6'] = _("a e")
# Translators: this is the spoken word for the character 'ç' (U+00e7)
#
chnames[u'\u00e7'] = _("c cedilla")
# Translators: this is the spoken word for the character 'è' (U+00e8)
#
chnames[u'\u00e8'] = _("e grave")
# Translators: this is the spoken word for the character 'é' (U+00e9)
#
chnames[u'\u00e9'] = _("e acute")
# Translators: this is the spoken word for the character 'ê' (U+00ea)
#
chnames[u'\u00ea'] = _("e circumflex")
# Translators: this is the spoken word for the character 'ë' (U+00eb)
#
chnames[u'\u00eb'] = _("e umlaut")
# Translators: this is the spoken word for the character 'ì' (U+00ec)
#
chnames[u'\u00ec'] = _("i grave")
# Translators: this is the spoken word for the character 'í' (U+00ed)
#
chnames[u'\u00ed'] = _("i acute")
# Translators: this is the spoken word for the character 'î' (U+00ee)
#
chnames[u'\u00ee'] = _("i circumflex")
# Translators: this is the spoken word for the character 'ï' (U+00ef)
#
chnames[u'\u00ef'] = _("i umlaut")
# Translators: this is the spoken word for the character 'ð' (U+00f0)
#
chnames[u'\u00f0'] = _("eth")
# Translators: this is the spoken word for the character 'ñ' (U+00f1)
#
chnames[u'\u00f1'] = _("n tilde")
# Translators: this is the spoken word for the character 'ò' (U+00f2)
#
chnames[u'\u00f2'] = _("o grave")
# Translators: this is the spoken word for the character 'ó' (U+00f3)
#
chnames[u'\u00f3'] = _("o acute")
# Translators: this is the spoken word for the character 'ô' (U+00f4)
#
chnames[u'\u00f4'] = _("o circumflex")
# Translators: this is the spoken word for the character 'õ' (U+00f5)
#
chnames[u'\u00f5'] = _("o tilde")
# Translators: this is the spoken word for the character 'ö' (U+00f6)
#
chnames[u'\u00f6'] = _("o umlaut")
# Translators: this is the spoken word for the character '÷' (U+00f7)
#
chnames[u'\u00f7'] = _("divided by")
# Translators: this is the spoken word for the character 'ø' (U+00f8)
#
chnames[u'\u00f8'] = _("o stroke")
# Translators: this is the spoken word for the character 'þ' (U+00fe)
#
chnames[u'\u00fe'] = _("thorn")
# Translators: this is the spoken word for the character 'ú' (U+00fa)
#
chnames[u'\u00fa'] = _("u acute")
# Translators: this is the spoken word for the character 'ù' (U+00f9)
#
chnames[u'\u00f9'] = _("u grave")
# Translators: this is the spoken word for the character 'û' (U+00fb)
#
chnames[u'\u00fb'] = _("u circumflex")
# Translators: this is the spoken word for the character 'ü' (U+00fc)
#
chnames[u'\u00fc'] = _("u umlaut")
# Translators: this is the spoken word for the character 'ý' (U+00fd)
#
chnames[u'\u00fd'] = _("y acute")
# Translators: this is the spoken word for the character 'ÿ' (U+00ff)
#
chnames[u'\u00ff'] = _("y umlaut")
# Translators: this is the spoken word for the character 'Ÿ' (U+0178)
#
chnames[u'\u0178'] = _("Y UMLAUT")
# Translators: this is the spoken word for the character 'ƒ' (U+0192)
#
chnames[u'\u0192'] = _("florin")
# Translators: this is the spoken word for the character '–' (U+2013)
#
chnames[u'\u2013'] = _("en dash")
# Translators: this is the spoken word for the left single quote: ‘
# (U+2018)
#
chnames[u'\u2018'] = _("left single quote")
# Translators: this is the spoken word for the right single quote: ’
# (U+2019)
#
chnames[u'\u2019'] = _("right single quote")
# Translators: this is the spoken word for the character '‚' (U+201a)
#
chnames[u'\u201a'] = _("single low quote")
# Translators: this is the spoken word for the character '“' (U+201c)
#
chnames[u'\u201c'] = _("left double quote")
# Translators: this is the spoken word for the character '”' (U+201d)
#
chnames[u'\u201d'] = _("right double quote")
# Translators: this is the spoken word for the character '„' (U+201e)
#
chnames[u'\u201e'] = _("double low quote")
# Translators: this is the spoken word for the character '†' (U+2020)
#
chnames[u'\u2020'] = _("dagger")
# Translators: this is the spoken word for the character '‡' (U+2021)
#
chnames[u'\u2021'] = _("double dagger")
# Translators: this is the spoken word for the character '•' (U+2022)
#
chnames[u'\u2022'] = _("bullet")
# Translators: this is the spoken word for the character '‣' (U+2023)
#
chnames[u'\u2023'] = _("triangular bullet")
# Translators: this is the spoken word for the character '‰' (U+2030)
#
chnames[u'\u2030'] = _("per mille")
# Translators: this is the spoken word for the character '′' (U+2032)
#
chnames[u'\u2032'] = _("prime")
# Translators: this is the spoken word for the character '″' (U+2033)
#
chnames[u'\u2033'] = _("double prime")
# Translators: this is the spoken word for the character '⁃' (U+2043)
#
chnames[u'\u2043'] = _("hyphen bullet")
# Translators: this is the spoken word for the character '€' (U+20ac)
#
chnames[u'\u20ac'] = _("euro")
# Translators: this is the spoken word for the character '™' (U+2122)
#
chnames[u'\u2122'] = _("trademark")
# Translators: this is the spoken word for the character '←' (U+2190)
#
chnames[u'\u2190'] = _("left arrow")
# Translators: this is the spoken word for the character '→' (U+2192)
#
chnames[u'\u2192'] = _("right arrow")
# Translators: this is the spoken word for the character '≈' (U+2248)
#
chnames[u'\u2248'] = _("almost equal to")
# Translators: this is the spoken word for the character '≠' (U+2260)
#
chnames[u'\u2260'] = _("not equal to")
# Translators: this is the spoken word for the character '≤' (U+2264)
#
chnames[u'\u2264'] = _("less than or equal to")
# Translators: this is the spoken word for the character '≥' (U+2265)
#
chnames[u'\u2265'] = _("greater than or equal to")
# Translators: this is the spoken word for the character '√' (U+221a)
#
chnames[u'\u221a'] = _("square root")
# Translators: this is the spoken word for the character '∛' (U+221b)
#
chnames[u'\u221b'] = _("cube root")
# Translators: this is the spoken word for the character '∞' (U+221e)
#
chnames[u'\u221e'] = _("infinity")
# Translators: this is the spoken word for the character '■' (U+25a0)
# It can be used as a bullet in a list.
#
chnames[u'\u25a0'] = _("black square")
# Translators: this is the spoken word for the character '□' (U+25a1)
# It can be used as a bullet in a list.
#
chnames[u'\u25a1'] = _("white square")
# Translators: this is the spoken word for the character '◆' (U+25c6)
# It can be used as a bullet in a list.
#
chnames[u'\u25c6'] = _("black diamond")
# Translators: this is the spoken word for the character '○' (U+25cb)
# It can be used as a bullet in a list.
#
chnames[u'\u25cb'] = _("white circle")
# Translators: this is the spoken word for the character '●' (U+25cf)
# It can be used as a bullet in a list.
#
chnames[u'\u25cf'] = _("black circle")
# Translators: this is the spoken word for the character '◦' (U+25e6)
#
chnames[u'\u25e6'] = _("white bullet")
# Translators: this is the spoken word for the character '✓' (U+2713)
# It can be used as a bullet in a list.
#
chnames[u'\u2713'] = _("check mark")
# Translators: this is the spoken word for the character '✔' (U+2714)
# It can be used as a bullet in a list.
#
chnames[u'\u2714'] = _("heavy check mark")
# Translators: this is the spoken word for the character 'x' (U+2717)
# This symbol is included here because it can be used as a bullet in
# an OOo list. The goal is to inform the user of the appearance of
# the bullet, while making it clear that it is a bullet and not simply
# the typed letter 'x'. "Ballot x" might confuse the user. Hence the
# use of "x-shaped bullet".
#
chnames[u'\u2717'] = _("x-shaped bullet")
# Translators: this is the spoken word for the character '➔' (U+2794)
# This symbol is included here because it can be used as a bullet in
# an OOo list. The goal is to inform the user of the appearance of
# the bullet without too much verbiage, hence simply "right-pointing arrow".
#
chnames[u'\u2794'] = _("right-pointing arrow")
# Translators: this is the spoken word for the character '⁰' (U+2070)
#
chnames[u'\u2070'] = _("superscript 0")
# Translators: this is the spoken word for the character '⁴' (U+2074)
#
chnames[u'\u2074'] = _("superscript 4")
# Translators: this is the spoken word for the character '⁵' (U+2075)
#
chnames[u'\u2075'] = _("superscript 5")
# Translators: this is the spoken word for the character '⁶' (U+2076)
#
chnames[u'\u2076'] = _("superscript 6")
# Translators: this is the spoken word for the character '⁷' (U+2077)
#
chnames[u'\u2077'] = _("superscript 7")
# Translators: this is the spoken word for the character '⁸' (U+2078)
#
chnames[u'\u2078'] = _("superscript 8")
# Translators: this is the spoken word for the character '⁹' (U+2079)
#
chnames[u'\u2079'] = _("superscript 9")
# Translators: this is the spoken word for the character '⁺' (U+207a)
#
chnames[u'\u207a'] = _("superscript plus")
# Translators: this is the spoken word for the character '⁻' (U+207b)
#
chnames[u'\u207b'] = _("superscript minus")
# Translators: this is the spoken word for the character '⁼' (U+207c)
#
chnames[u'\u207c'] = _("superscript equals")
# Translators: this is the spoken word for the character '⁽' (U+207d)
#
chnames[u'\u207d'] = _("superscript left paren")
# Translators: this is the spoken word for the character '⁾' (U+207e)
#
chnames[u'\u207e'] = _("superscript right paren")
# Translators: this is the spoken word for the character 'ⁿ' (U+207f)
#
chnames[u'\u207f'] = _("superscript n")
# Translators: this is the spoken word for the character '₀' (U+2080)
#
chnames[u'\u2080'] = _("subscript 0")
# Translators: this is the spoken word for the character '₁' (U+2081)
#
chnames[u'\u2081'] = _("subscript 1")
# Translators: this is the spoken word for the character '₂' (U+2082)
#
chnames[u'\u2082'] = _("subscript 2")
# Translators: this is the spoken word for the character '₃' (U+2083)
#
chnames[u'\u2083'] = _("subscript 3")
# Translators: this is the spoken word for the character '₄' (U+2084)
#
chnames[u'\u2084'] = _("subscript 4")
# Translators: this is the spoken word for the character '₅' (U+2085)
#
chnames[u'\u2085'] = _("subscript 5")
# Translators: this is the spoken word for the character '₆' (U+2086)
#
chnames[u'\u2086'] = _("subscript 6")
# Translators: this is the spoken word for the character '₇' (U+2087)
#
chnames[u'\u2087'] = _("subscript 7")
# Translators: this is the spoken word for the character '₈' (U+2088)
#
chnames[u'\u2088'] = _("subscript 8")
# Translators: this is the spoken word for the character '₉' (U+2089)
#
chnames[u'\u2089'] = _("subscript 9")
# Translators: this is the spoken word for the character '₊' (U+208a)
#
chnames[u'\u208a'] = _("subscript plus")
# Translators: this is the spoken word for the character '₋' (U+208b)
#
chnames[u'\u208b'] = _("subscript minus")
# Translators: this is the spoken word for the character '₌' (U+208c)
#
chnames[u'\u208c'] = _("subscript equals")
# Translators: this is the spoken word for the character '₍' (U+208d)
#
chnames[u'\u208d'] = _("subscript left paren")
# Translators: this is the spoken word for the character '₎' (U+208e)
#
chnames[u'\u208e'] = _("subscript right paren")
# Translators: this is the spoken word for the character '➢' (U+27a2)
# This symbol is included here because it can be used as a bullet in an
# OOo list. The goal is to inform the user of the appearance of the bullet
# without too much verbiage, hence simply "right-pointing arrowhead".
#
chnames[u'\u27a2'] = _("right-pointing arrowhead")
# Translators: StarOffice/OOo includes private-use unicode character U+E00A
# as a bullet which looks like the black square: ■ (U+25A0). Therefore,
# please use the same translation for this character.
#
chnames[u'\ue00a'] = _("black square")
# Translators: StarOffice/OOo includes private-use unicode character U+E00C
# as a bullet which looks like the black diamond: ◆ (U+25C6). Therefore,
# please use the same translation for this character.
#
chnames[u'\ue00c'] = _("black diamond")
def getCharacterName(character):
"""Given a character, return its name as people might refer to it
in ordinary conversation.
Arguments:
- character: the character to get the name for
Returns a string representing the name for the character
"""
if not isinstance(character, unicode):
character = character.decode("UTF-8")
try:
return chnames[character]
except:
return character
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/chnames.py
|
Python
|
gpl-3.0
| 25,110
|
[
"ORCA"
] |
ccec3316816d8b6fa7d37464243f601a3a4a47c6369b64b67456fa465a7ef870
|
from __future__ import absolute_import
from __future__ import print_function
import sys,os
import pandas as pd
#import get_compounds_from_wikidata as wd
sys.path.append('/global/project/projectdirs/openmsi/jupyterhub_libs/anaconda/lib/python2.7/site-packages')
from rdkit import Chem
import numpy as np
from rdkit.Chem import PandasTools
# MolToSmiles( (Mol)mol [, (bool)isomericSmiles=False
# http://www.rdkit.org/Python_Docs/rdkit.Chem.rdmolfiles-module.html#MolToSmiles
# - isomericSmiles: (optional) include information about stereochemistry in
# the SMILES. Defaults to false.
# https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/RZJQGNCSTQAWON-UHFFFAOYSA-N/synonyms/json
def desalt(mol):
#input is an rdkit mol
#returns an rdkit mol keeping the biggest component
#returns original mol if only one component
#returns a boolean indicated if cleaning was necessary
d = Chem.rdmolops.GetMolFrags(mol) #these are atom indices
if len(d) == 1: #If there are fragments or multiple molecules this will be greater than 1
return mol,False
my_smiles=Chem.MolToSmiles(mol,True)
parent_atom_count=0;
disconnected=my_smiles.split('.')
#With GetMolFrags, we've already established that there is more than one disconnected structure
status = False
for s in disconnected:
little_mol=Chem.MolFromInchi(Chem.MolToInchi(Chem.MolFromSmiles(s)))
if little_mol is not None:
count = little_mol.GetNumAtoms()
if count > parent_atom_count:
parent_atom_count = count
parent_mol = little_mol
status = True
return parent_mol,status
def get_wikidata(terms_to_keep):
prop_file = '/project/projectdirs/openmsi/projects/compound_data/wikidata/wikidata_compound_properties.xlsx'
result = wd.get_wikidata(prop_file)
df = pd.DataFrame(result)
df.rename(columns={'canonicalSMILES': 'smiles'}, inplace=True)
df.rename(columns={'InChI': 'inchi'}, inplace=True)
df.rename(columns={'compoundLabel': 'common_name'}, inplace=True)
df['source_database'] = 'wikidata'
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def get_img(terms_to_keep):
df = pd.read_csv('/project/projectdirs/openmsi/projects/compound_data/img_abc/NPlist32763_30-mar-2016.xls',delimiter='\t')
df.rename(columns={'SMILES': 'smiles'}, inplace=True)
df.rename(columns={'InChl': 'inchi'}, inplace=True)
df.rename(columns={'SM ID': 'img_abc_id'}, inplace=True)
df.rename(columns={'Secondary Metabolite (SM) Name': 'common_name'}, inplace=True)
df['source_database'] = 'img'
df['ROMol'] = ''
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def get_enzo(terms_to_keep):
df = pd.read_csv('/project/projectdirs/openmsi/projects/compound_data/enzo/BML-2865.txt',delimiter='\t')
df.rename(columns={'SMILES': 'smiles'}, inplace=True)
df.rename(columns={'Name': 'common_name'}, inplace=True)
df['inchi'] = np.nan
df['source_database'] = 'enzo'
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def get_msmls(terms_to_keep):
df = pd.read_excel('/project/projectdirs/openmsi/projects/compound_data/msmls/MSMLS map - mz overlap edit sk.xlsx')
df.rename(columns={'SMILES': 'smiles'}, inplace=True)
df.rename(columns={'CNAME': 'common_name'}, inplace=True)
df.rename(columns={'PC_CID': 'pubchem_compound_id'}, inplace=True)
df['source_database'] = 'msmls'
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def dequote(s):
"""
If a string has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the string unchanged.
"""
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return s[1:-1]
return s
def get_metacyc(terms_to_keep):
df = pd.read_excel('/project/projectdirs/openmsi/projects/compound_data/metacyc/MetAtlas_Export_MetaCyc_Compounds.xlsx', encoding='utf-8')
df.rename(columns={'InChI': 'inchi'}, inplace=True)
df.rename(columns={'KEGG': 'kegg_id'}, inplace=True)
df.rename(columns={'PubChem': 'pubchem_compound_id'}, inplace=True)
df.rename(columns={'Common-Name': 'common_name'}, inplace=True)
df.rename(columns={'Names': 'synonyms'}, inplace=True) # reduced DCPIP // "2,6-dichloro-4-[(4-hydroxyphenyl)amino]phenol" // "reduced dichloroindophenol" // "reduced 2,6-dichlorophenolindophenol" // "reduced DCIP"
df.rename(columns={'Object ID': 'metacyc_id'}, inplace=True)
#df.synonyms.astype(str,inplace=True)
#df['synonyms'].astype(basestring)
df.loc[:,'synonyms'] = [[ s.strip() for s in mystr.split('//')] for mystr in df['synonyms'].astype(str).tolist() ]
df.loc[:,'synonyms'] = [[ dequote(s) for s in mystr] for mystr in df['synonyms'].tolist() ]
df['source_database'] = 'metacyc'
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
# terms_to_keep = ['smiles','inchi','source_database','ROMol','common_name','synonyms','pubchem_compound_id','lipidmaps_id','metacyc_id','hmdb_id','img_abc_id','chebi_id','kegg_id']
def get_gnps(terms_to_keep):
from pyteomics import mgf
gnps = [s['params'] for s in mgf.read('/project/projectdirs/openmsi/projects/compound_data/gnps/ALL_GNPS (1).mgf')]
df = pd.DataFrame(gnps)
df['source_database'] = 'gnps'
# df.rename(columns={'name': 'name'}, inplace=True)
#name has adduct "Hoiamide B M+H"
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def get_dr_dukes():
df = pd.read_csv('/project/projectdirs/openmsi/projects/compound_data/dr_dukes_phytochemicals/CHEMICALS.csv',delimiter=',')
df['source_database'] = 'dr_dukes'
print(list(df.keys()))
return df
def get_lipid_maps(terms_to_keep):
df = PandasTools.LoadSDF('/project/projectdirs/openmsi/projects/compound_data/lipidmaps/LMSDFDownload28Jun15FinalAll.sdf')
df['source_database'] = 'lipidmaps'
df.rename(columns={'KEGG_ID': 'kegg_id'}, inplace=True)
df.rename(columns={'PUBCHEM_CID': 'pubchem_compound_id'}, inplace=True)
df.rename(columns={'COMMON_NAME': 'common_name'}, inplace=True)
df.rename(columns={'SYNONYMS': 'synonyms'}, inplace=True)
# Decanohydroxamic acid; caprinohydroxamic acid; n-Decanohydroxamic acid
df.loc[:,'synonyms'] = [[ s.strip() for s in mystr.split(';')] for mystr in df['synonyms'].astype(str).tolist() ]
df.rename(columns={'ID': 'lipidmaps_id'}, inplace=True)
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def get_hmdb(terms_to_keep):
df = PandasTools.LoadSDF('/project/projectdirs/openmsi/projects/compound_data/hmdb/structures.sdf')
df['source_database'] = 'hmdb'
df.rename(columns={'GENERIC_NAME': 'common_name'}, inplace=True)
df.rename(columns={'SYNONYMS': 'synonyms'}, inplace=True)
df.loc[:,'synonyms'] = [[ s.strip() for s in mystr.split(';')] for mystr in df['synonyms'].astype(str).tolist() ]
# 2-(8S,9S,13S,14S)-3-Hydroxy-2-methoxy-13-methyl-7,8,9,11,12,14,15,16-octahydro-6H-cyclopenta[a]phenanthren-17-one; 2-Hydroxyestrone 2-methyl ether; 2-Methoxy-17-oxoestra-1,3,5(10)-trien-3-ol; 2-Methoxy-3-hydroxyestra-1,3,5(10)-trien-17-one; 3-Hydroxy-2-methoxy-Estra-1,3,5(10)-trien-17-one; 3-Hydroxy-2-methoxyestra-1,3,5(10)-trien-17-one; Methoxy-Estrone
df.rename(columns={'HMDB_ID': 'hmdb_id'}, inplace=True)
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def get_chembl(terms_to_keep):
sdf_file = '/project/projectdirs/openmsi/projects/compound_data/chembl/chembl_21.sdf.gz'
df = PandasTools.LoadSDF(sdf_file)
df['source_database'] = 'chembl'
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
def get_chebi(terms_to_keep):
df = PandasTools.LoadSDF('/project/projectdirs/openmsi/projects/compound_data/chebi/ChEBI_complete.sdf.gz')
# df = PandasTools.LoadSDF('/project/projectdirs/openmsi/projects/compound_data/chebi/ChEBI_complete_3star.sdf.gz')
for index, row in df.iterrows():
mol = row['ROMol']
try:
df.loc[index,'inchi'] = Chem.MolToInchi(mol)
except:
pass
df['source_database'] = 'chebi'
df.rename(columns={'KEGG COMPOUND Database Links': 'kegg_id'}, inplace=True)
df.rename(columns={'ChEBI Name': 'common_name'}, inplace=True)
df.rename(columns={'Synonyms': 'synonyms'}, inplace=True)
df.loc[:,'synonyms'] = [[ s.strip() for s in mystr.split('\n')] for mystr in df['synonyms'].astype(str).tolist() ]
# (-)-Epicatechin\n(-)-Epicatechol\n(2R,3R)-(-)-Epicatechin\n(2R,3R)-2-(3,4-dihydroxyphenyl)-3,4-dihydro-2H-1-benzopyran-3,5,7-triol\n3,3',4',5,7-Pentahydroxyflavane\nEpicatechol\nEpigallocatechin\nL(-)-Epicatechin\nL-Acacatechin\nL-Epicatechin\nL-Epicatechol\nalpha-Catechin
df.rename(columns={'ChEBI ID': 'chebi_id'}, inplace=True)
k = list(df.keys())
for t in terms_to_keep:
if not t in k:
df[t] = ''
return df
""" contribution from Hans de Winter """
def _InitialiseNeutralisationReactions():
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y)) for x,y in patts]
_reactions=None
def NeutraliseCharges(mol, reactions=None):
global _reactions
if reactions is None:
if _reactions is None:
_reactions=_InitialiseNeutralisationReactions()
reactions=_reactions
# mol = Chem.MolFromSmiles(smiles)
replaced = False
for i,(reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
replaced = True
# print Chem.MolToSmiles(mol,True)
# print Chem.MolToSmiles(mol), Chem.MolToSmarts(reactant),Chem.MolToSmiles(product)
rms = Chem.AllChem.ReplaceSubstructs(mol, reactant, product, replaceAll=True)
# rms_smiles = Chem.MolToSmiles(rms[0],True)
# mol = Chem.MolFromSmiles(rms_smiles)
mol = rms[0]
# print Chem.MolToSmiles(mol,True)
if replaced:
return (mol, True) #Chem.MolToSmiles(mol,True)
else:
return (mol, False)
# _reactions=None
# def NeutraliseCharges(smiles, reactions=None):
# global _reactions
# if reactions is None:
# if _reactions is None:
# _reactions=_InitialiseNeutralisationReactions()
# reactions=_reactions
# mol = Chem.MolFromSmiles(smiles)
# replaced = False
# for i,(reactant, product) in enumerate(reactions):
# while mol.HasSubstructMatch(reactant):
# replaced = True
# rms = AllChem.ReplaceSubstructs(mol, reactant, product)
# mol = rms[0]
# if replaced:
# return (Chem.MolToSmiles(mol,True), True)
# else:
# return (smiles, False)
|
biorack/metatlas
|
metatlas/interfaces/compounds/not_used/get_compounds_each_provider.py
|
Python
|
bsd-3-clause
| 11,717
|
[
"RDKit"
] |
9d0c51f707a9019c09b0505ce503cf5193f952368f603234540574cc8e8fcb11
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import requests
import time
# from common import basename
from boltons.iterutils import remap
__author__ = 'Austin West'
__license__ = 'MPL 2.0'
TYPE = 'Detonation'
NAME = 'VxStream Sandbox'
DEFAULTCONF = {
'ENABLED': False,
'BASE URL': 'http://localhost',
'API URL': 'http://localhost/api/',
'API key': '',
'API secret': '',
'Environment ID': 1,
'Verify': False,
'timeout': 360,
'running timeout': 120,
}
EMPTY_STR_TO_OBJ = {
'runtime': [
'additionalContext',
'apidb',
'chronology',
'console',
'handles',
'hooks',
'mutants',
'network',
'parameterdb',
'vbeevents',
'createdfiles',
],
'hybridanalysis': [
'streams',
],
'final': [
'business_threats',
'signatures_chronology',
'engines',
'delayed',
'multiscan',
'warnings',
'similarity',
'imageprocessing',
],
'general': [
'yarahits',
'exec_options',
'verinfo',
'tls_callbacks',
'resources',
'exports',
'certificate',
'dictionary',
],
}
EMPTY_STR_TO_LS = {
'runtime': [
'targets', ],
'hybridanalysis': [
'targets',
'dropped', ],
}
# we could use the full path of the keys
# known to cause issues
def visit(path, key, value):
if value == '':
if key in EMPTY_STR_TO_OBJ['runtime'] or \
key in EMPTY_STR_TO_OBJ['hybridanalysis'] or \
key in EMPTY_STR_TO_OBJ['final'] or \
key in EMPTY_STR_TO_OBJ['general']:
# null values should be empty dict
return key, {}
elif key in EMPTY_STR_TO_LS['runtime'] or \
key in EMPTY_STR_TO_LS['hybridanalysis']:
# null values should be empty list
return key, []
elif 'runtime' in path and key == 'parentuid':
# first parentuid is always int, rest are strings...
return key, str(value)
elif key == 'netsim' and type(value) == int:
# sometimes uses 0 / 1 for false / true,
# make everything string
return key, str(bool(value)).lower()
elif '_entropy' in str(key) and type(value) == str:
# entropy shows up as float and str,
# make all of them floats
return key, float(value)
return key, value
def post_to_vxstream(f_name, environment_id,
submit_url, apikey, secret, runtime, verify):
with open(f_name, 'rb') as f:
files = {'file': f}
data = {
'apikey': apikey,
'secret': secret,
'environmentId': environment_id,
'customruntime': runtime,
}
try:
user_agent = {'User-agent': 'VxStream Sandbox'}
res = requests.post(submit_url, data=data, headers=user_agent, files=files, verify=verify)
if res.status_code == 200:
return res.json()
else:
print('Error code: {}, returned when uploading: {}'.format(res.status_code, f.name))
except requests.exceptions.HTTPError as err:
print(err)
def get_file_status(file_sha256, status_url, environment_id, apikey, secret, verify):
user_agent = {'User-agent': 'VxStream Sandbox'}
params = {'apikey': apikey, 'secret': secret, 'environmentId': environment_id}
resource_url = '%s/%s' % (status_url, file_sha256)
try:
res = requests.get(resource_url, headers=user_agent, params=params, verify=verify)
if res.status_code == 200:
return res.json()
else:
print('Error code: {}, returned when getting file status: {}'.format(res.status_code, file_sha256))
return res
except requests.exceptions.HTTPError as err:
print(err)
def get_file_report(file_sha256, report_url, environment_id, type_, apikey, secret, verify):
user_agent = {'User-agent': 'VxStream Sandbox'}
params = {'apikey': apikey, 'secret': secret, 'environmentId': environment_id, 'type': type_}
resource_url = '%s/%s' % (report_url, file_sha256)
try:
res = requests.get(resource_url, headers=user_agent, params=params, verify=verify)
if res.status_code == 200:
# walk entire json blob to fix
# the keys known to cause issues
remapped = remap(res.json(), visit=visit)
return remapped
else:
print('Error code: {}, returned when getting report: {}'.format(res.status_code, file_sha256))
return res
except requests.exceptions.HTTPError as err:
print(err)
def check(conf=DEFAULTCONF):
return conf['ENABLED']
def scan(filelist, conf=DEFAULTCONF):
resultlist = []
tasks = []
if conf['API URL'].endswith('/'):
url = conf['API URL']
else:
url = conf['API URL'] + '/'
submit_url = url + 'submit'
status_url = url + 'state'
report_url = url + 'result'
for fname in filelist:
response = post_to_vxstream(
fname, environment_id=conf['Environment ID'],
submit_url=submit_url, apikey=conf['API key'],
secret=conf['API secret'], runtime=conf['running timeout'],
verify=conf['Verify'])
try:
file_sha256 = response['response']['sha256']
except Exception as e:
print(e, fname)
continue
if file_sha256 is not None:
tasks.append((fname, file_sha256))
# Wait for tasks to finish
task_status = {}
while tasks:
for fname, file_sha256 in tasks[:]:
status_dict = get_file_status(
file_sha256, status_url, conf['Environment ID'],
apikey=conf['API key'], secret=conf['API secret'],
verify=conf['Verify']
)
status = status_dict.get('response', {}).get('state', 'ERROR')
# If we have a report
if status == 'SUCCESS':
report = get_file_report(
file_sha256, report_url, conf['Environment ID'],
apikey=conf['API key'], secret=conf['API secret'],
type_='json', verify=conf['Verify']
)
if report:
# Drop some additional values from report
for field in ['strings', 'signatures_chronology',
'imageprocessing', 'multiscan']:
try:
report['analysis']['final'].pop(field)
except KeyError:
pass
# Add the link to Web Report
report['analysis']['final']['web_report'] = (
'<a href="{base_url}/sample/{file_sha256}?environmentId={env_id}" target="_blank">'
'View the report in VxStream</a>'
).format(base_url=conf['BASE URL'], file_sha256=file_sha256, env_id=conf['Environment ID'])
resultlist.append((fname, report.get('analysis', {}).get('final')))
tasks.remove((fname, file_sha256))
# Check for dead tasks
elif status == 'IN_PROGRESS':
if file_sha256 not in task_status:
task_status[file_sha256] = time.time() + conf['timeout'] + conf['running timeout']
else:
if time.time() > task_status[file_sha256]:
# TODO Log timeout
tasks.remove((fname, file_sha256))
# If there is an unknown status
elif status == 'ERROR':
# TODO Log errors better
tasks.remove((fname, file_sha256))
time.sleep(15)
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
metadata["Include"] = False
return (resultlist, metadata)
|
mitre/multiscanner
|
multiscanner/modules/Detonation/VxStream.py
|
Python
|
mpl-2.0
| 8,242
|
[
"VisIt"
] |
ef8ce46037179240cf9001ac8209f59791ca7fb02702059a88e414277c66d6c4
|
#!/usr/bin/env python
import vtk
def pine_root_connectivity(fileName, noConnectivity):
def NumberOfTriangles(pd):
"""
Count the number of triangles.
:param pd: vtkPolyData.
:return: The number of triangles.
"""
cells = pd.GetPolys()
numOfTriangles = 0
idList = vtk.vtkIdList()
for i in range(0, cells.GetNumberOfCells()):
cells.GetNextCell(idList)
# If a cell has three points it is a triangle.
if idList.GetNumberOfIds() == 3:
numOfTriangles += 1
return numOfTriangles
colors = vtk.vtkNamedColors()
# Create the pipeline.
reader = vtk.vtkMCubesReader()
reader.SetFileName(fileName)
if not noConnectivity:
reader.Update()
print("Before Connectivity.")
print("There are: ", NumberOfTriangles(reader.GetOutput()), "triangles")
connect = vtk.vtkPolyDataConnectivityFilter()
connect.SetInputConnection(reader.GetOutputPort())
connect.SetExtractionModeToLargestRegion()
if not noConnectivity:
connect.Update()
print("After Connectivity.")
print("There are: ", NumberOfTriangles(connect.GetOutput()), "triangles")
isoMapper = vtk.vtkPolyDataMapper()
if noConnectivity:
isoMapper.SetInputConnection(reader.GetOutputPort())
else:
isoMapper.SetInputConnection(connect.GetOutputPort())
isoMapper.ScalarVisibilityOff()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetColor(colors.GetColor3d("raw_sienna"))
# Get an outline of the data set for context.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(colors.GetColor3d("Black"))
# Create the Renderer, RenderWindow and RenderWindowInteractor.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size.
ren.AddActor(outlineActor)
ren.AddActor(isoActor)
renWin.SetSize(512, 512)
ren.SetBackground(colors.GetColor3d("SlateGray"))
# render the image
#
# iren AddObserver UserEvent {wm deiconify .vtkInteract}
cam = ren.GetActiveCamera()
cam.SetFocalPoint(40.6018, 37.2813, 50.1953)
cam.SetPosition(40.6018, -280.533, 47.0172)
cam.ComputeViewPlaneNormal()
cam.SetClippingRange(26.1073, 1305.36)
cam.SetViewAngle(20.9219)
cam.SetViewUp(0.0, 0.0, 1.0)
iren.Initialize()
renWin.Render()
iren.Start()
def main():
fileName, noConnectivity = get_program_parameters()
noConnectivity = noConnectivity != 0
pine_root_connectivity(fileName, noConnectivity)
def get_program_parameters():
import argparse
description = 'Applying connectivity filter to remove noisy isosurfaces.'
epilogue = '''
Applying connectivity filter to remove noisy isosurfaces.
This example demonstrates how to use the vtkConnectivityFilter.
If the extra parameter 'noConnectivity' is non zero, the connectivity filter will not be used.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='pine_root.tri.')
parser.add_argument('noConnectivity', default=0, type=int, nargs='?',
help='If non-zero do not use the connectivity filter.')
args = parser.parse_args()
return args.filename, args.noConnectivity
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/VisualizationAlgorithms/PineRootConnectivity.py
|
Python
|
apache-2.0
| 3,904
|
[
"VTK"
] |
7632866f78ac7a7e94fcccd96e4df31348c222967f2fb6852d55ab94e9eb1b96
|
from hic import *
import networkx as nx
import datetime as dt
import os.path
import argparse
import logging
import pysam
import vcf
import sys
# log config
logging.basicConfig(filename='split-network.log', level=logging.DEBUG)
#
# Read base and quality
#
def read_base_and_quality(alignedRead, pos):
return alignedRead.seq[pos], ord(alignedRead.qual[pos]) - 33
#
# Test for file existence or raise an error
#
def file_exists(fnames):
for fn in fnames:
if not os.path.exists(fn):
raise IOError('Error: \"{0}\" does not exist'.format(fn))
#
# Conditionally add new nodes to the graph, where kwargs contains additional
# key/value attributes for the node.
#
def add_node(g, id, **kwargs):
if not g.has_node(id):
g.add_node(id, kwargs)
#
# User interface
#
parser = argparse.ArgumentParser(description='Build snp graph from HiC sequencing data')
parser.add_argument('-b', '--base_quality', help='Minimum base quality', type=int, default=0)
parser.add_argument('-m', '--map_quality', help='Minimum mapping quality', type=int, default=0)
parser.add_argument('-v', '--variant_quality', help='Minimum mapping quality', type=int, default=0)
parser.add_argument('vcf_file', help='VCF file of predicted variant sites', metavar='VCF_FILE')
parser.add_argument('r1_file', help='BAM file for R1 reads', metavar='R1_BAM')
parser.add_argument('r2_file', help='BAM file for R2 reads', metavar='R2_BAM')
parser.add_argument('output', help='Output graph')
args = parser.parse_args()
try:
# check input files exist
file_exists([args.vcf_file, args.r1_file, args.r2_file])
# get the total number of VCF records for progress reporting
n_vcf = count_vcf_records(args.vcf_file)
vcfFile = vcf.Reader(filename=args.vcf_file)
samR1 = pysam.Samfile(args.r1_file, 'rb')
samR2 = pysam.Samfile(args.r2_file, 'rb')
# Some user-feedback progress reporting variables
varCount = 0
firstTime = dt.datetime.now()
lastTime = firstTime
# Registry objects for tracking fragments and snps
frgRegistry = RegisteredObjectFactory(Fragment)
snpRegistry = RegisteredObjectFactory(SNP)
#
# Here, we iterate over each variant site and find the pileup column
# within R1 and then R2 bam files. For each site, we track Fragments and
# there associated reads, as well as the SNP itself.
#
# These build up a registry of fragments and snps, accessed through object
# identity.
#
for variant in vcfFile:
# Skip any variant that isn't a SNP
if not variant.is_snp:
continue
# impose minimum quality on variants. Quality will depend on tool
# which predicted variant site.
if variant.QUAL < args.variant_quality:
logging.info('%s was below quality threshold: vq=%d', variant, variant.QUAL)
continue
# register/get a SNP
try:
snp = snpRegistry.requestObject(vcfRecord=variant)
except Exception as ex:
logging.info('Skipping: %s', ex)
continue
# Iterate over R1 mapping, looking at the pileup at SNP position
for n, col in enumerate(samR1.pileup(reference=snp.contig, start=snp.position-1, end=snp.position, truncate=True)):
# probably unnecessary, added while trying to understand under-documented API
if n > 1:
raise Exception('Pileup is assumed to produced only one exact column')
for pr in col.pileups:
aln = pr.alignment
# skip secondary alignments and indels
if aln.is_secondary or pr.indel != 0:
logging.debug('Skipped secondary or indel: %s', aln)
continue
# nucleotide at SNP site for this read.
base, bq = read_base_and_quality(aln, pr.qpos)
mq = aln.mapq
# impose minimum quality threshold on basecall and alingment
if bq < args.base_quality or mq < args.map_quality:
logging.info('%s was below quality thresholds: bq=%d, mq=%s', base, bq, mq)
continue
# skip undefined variants
if snp.isUndefinedAllele(base):
logging.info('%s was not ref %s nor alt %s', base, snp.reference, snp.variant)
continue
# Obtain fragment from registry
frg = frgRegistry.requestObject(name=aln.qname)
rpl = ReadPlacement(snp.contig, aln.pos)
if not frg.read1:
frg.read1 = rpl
elif rpl != frg.read1:
logging.warn('Tried to assign different read placement r1 [%s] to fragment [%s]', rpl, frg)
# register allele
frg.read1.addSnpInstance(snp, base)
# Now iterate over R2 mapping, looking at the pileup at SNP position
for n, col in enumerate(samR2.pileup(reference=snp.contig, start=snp.position-1, end=snp.position, truncate=True)):
if n > 1:
raise Exception('Pileup is assumed to produced only one exact column')
for pr in col.pileups:
aln = pr.alignment
# skip secondary alignments and indels
if aln.is_secondary or pr.indel != 0:
logging.debug('Skipped secondary or indel: %s', aln)
continue
base, bq = read_base_and_quality(aln, pr.qpos)
mq = aln.mapq
# impose minimum quality threshold on basecall and alingment
if bq < args.base_quality or mq < args.map_quality:
logging.info('%s was below quality thresholds: bq=%d, mq=%s', base, bq, mq)
continue
# skip undefined variants
if snp.isUndefinedAllele(base):
logging.info('%s was not ref %s nor alt %s', base, snp.reference, snp.variant)
continue
# Obtain fragment from registry
frg = frgRegistry.requestObject(name=aln.qname)
rpl = ReadPlacement(snp.contig, aln.pos)
if not frg.read2:
frg.read2 = rpl
elif rpl != frg.read2:
logging.warn('Tried to assign different read placement r2 [%s] to fragment [%s]', rpl, frg)
# register allele
frg.read2.addSnpInstance(snp, base)
varCount += 1
if varCount % 100 == 0:
curTime = dt.datetime.now()
ta = (curTime - lastTime)
tb = (curTime - firstTime)
print "... processed {0}/{1} variants in {2} walltime: {3}".format(varCount, n_vcf, ta, tb)
lastTime = curTime
print 'Finished reading data, {0} variants'.format(varCount)
samR1.close()
samR2.close()
print 'Registered {0} fragments'.format(len(frgRegistry))
#
# Now we just build the graph.
#
# - Nodes are uniquely defined SNP as: contig, pos and variant base
# - Edges are defined by fragment (R1,R2) placement, where edge weight represents
# accumulated fragment count.
#
# counting various things causing rejection
rejCount = {'contradictory': 0, 'self-loop': 0, 'unpaired': 0}
# empty graph
g = nx.Graph(type='split', version=1)
# iterate over all fragments in registry
for frg in frgRegistry.elements():
if not frg.isPaired():
rejCount['unpaired'] += 1
continue
# iterate over all SNPs contained within a fragment
# nested loops as this is n-way.
for snpR1, baseR1 in frg.read1.snpInstances.iteritems():
# TODO: labels should be generated in class, this is really a form of object identity
u = '{0}.{1}'.format(snpR1, baseR1) # origin
for snpR2, baseR2 in frg.read2.snpInstances.iteritems():
# TODO: ditto above
v = '{0}.{1}'.format(snpR2, baseR2) # destination
# Skipping self loops, often occurring if read pairs overlap
if snpR1 == snpR2:
# just interesting to capture contradictions info.
if baseR1 != baseR2:
logging.warn('Contradictory bases for R1/R2 {0}/{1} at an overlapping SNP position {2}'.format(
baseR1, baseR2, snpR1))
rejCount['contradictory'] += 1
continue
else:
logging.warn('Self-loop for R1/R2 {0}/{1} at an overlapping SNP position {2}'.format(
baseR1, baseR2, snpR1))
rejCount['self-loop'] += 1
continue
#
# WEIGHTING!
#
# Weights by simple occurrence
#
if g.has_edge(u, v):
# add one to edge
g[u][v]['weight'] += 1
else:
# pre-create nodes so we can add some attributes
# TODO: add more or more pertinent attributes.
add_node(g, u, quality=snpR1.quality)
add_node(g, v, quality=snpR2.quality)
# new edge
g.add_edge(u, v, weight=1)
print "Created {0} nodes".format(g.number_of_nodes())
print 'Rejected snp instances: {0}'.format(rejCount)
nx.write_graphml(g, args.output)
except Exception as e:
print e
sys.exit(1)
|
koadman/proxigenomics
|
graph/src/splitNetwork.py
|
Python
|
gpl-2.0
| 9,668
|
[
"pysam"
] |
1831c6c47537c6b076dc737e579e5b3977cb2dec91be88923eb3d4705df6b697
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for Google Cloud Storage command line tool."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import datetime
import errno
import getopt
import logging
import os
import re
import signal
import socket
import sys
import textwrap
import traceback
import six
from six.moves import configparser
from six.moves import range
import gslib.exception
from gslib.exception import CommandException
from gslib.exception import ControlCException
from gslib.utils.version_check import check_python_version_support
from gslib.utils.arg_helper import GetArgumentsAndOptions
from gslib.utils.user_agent_helper import GetUserAgent
# Load the gsutil version number and append it to boto.UserAgent so the value is
# set before anything instantiates boto. This has to run after THIRD_PARTY_DIR
# is modified (done in gsutil.py) but before any calls are made that would cause
# boto.s3.Connection to be loaded - otherwise the Connection class would end up
# with a static reference to the pre-modified version of the UserAgent field,
# so boto requests would not include gsutil/version# in the UserAgent string.
import boto
import gslib
from gslib.utils import system_util, text_util
# pylint: disable=g-import-not-at-top
# This module also imports boto, and will override the UserAgent global variable
# if imported above.
from gslib import metrics
# We parse the options and arguments here so we can pass the results to the user
# agent helper.
try:
opts, args = GetArgumentsAndOptions()
except CommandException as e:
reason = e.reason if e.informational else 'CommandException: %s' % e.reason
err = '%s\n' % reason
try:
text_util.print_to_fd(err, end='', file=sys.stderr)
except UnicodeDecodeError:
# Can happen when outputting invalid Unicode filenames.
sys.stderr.write(err)
if e:
metrics.LogFatalError(e)
sys.exit(1)
# This calculated user agent can be stored for use in StorageV1.
gslib.USER_AGENT = GetUserAgent(args, metrics.MetricsCollector.IsDisabled())
boto.UserAgent += gslib.USER_AGENT
# pylint: disable=g-bad-import-order
import httplib2
import oauth2client
from google_reauth import reauth_creds
from google_reauth import errors as reauth_errors
from gslib import context_config
from gslib import wildcard_iterator
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import ProjectIdException
from gslib.cloud_api import ServiceException
from gslib.command_runner import CommandRunner
import apitools.base.py.exceptions as apitools_exceptions
from gslib.utils import boto_util
from gslib.utils import constants
from gslib.utils import system_util
from gslib.sig_handling import GetCaughtSignals
from gslib.sig_handling import InitializeSignalHandling
from gslib.sig_handling import RegisterSignalHandler
CONFIG_KEYS_TO_REDACT = ['proxy', 'proxy_port', 'proxy_user', 'proxy_pass']
# We don't use the oauth2 authentication plugin directly; importing it here
# ensures that it's loaded and available by default when an operation requiring
# authentication is performed.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
except ImportError:
pass
DEBUG_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with debug output enabled.
*** Be aware that debug output includes authentication credentials.
*** Make sure to remove the value of the Authorization header for
*** each HTTP request printed to the console prior to posting to
*** a public medium such as a forum post or Stack Overflow.
***************************** WARNING *****************************
""".lstrip()
TRACE_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with trace output enabled.
*** Be aware that trace output includes authentication credentials
*** and may include the contents of any files accessed during the trace.
***************************** WARNING *****************************
""".lstrip()
HTTP_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with the "https_validate_certificates" config
*** variable set to False. This option should always be set to True in
*** production environments to protect against man-in-the-middle attacks,
*** and leaking of user data.
***************************** WARNING *****************************
""".lstrip()
debug_level = 0
test_exception_traces = False
# pylint: disable=unused-argument
def _CleanupSignalHandler(signal_num, cur_stack_frame):
"""Cleans up if process is killed with SIGINT, SIGQUIT or SIGTERM.
Note that this method is called after main() has been called, so it has
access to all the modules imported at the start of main().
Args:
signal_num: Unused, but required in the method signature.
cur_stack_frame: Unused, but required in the method signature.
"""
_Cleanup()
if (gslib.utils.parallelism_framework_util.
CheckMultiprocessingAvailableAndInit().is_available):
gslib.command.TeardownMultiprocessingProcesses()
def _Cleanup():
for fname in boto_util.GetCleanupFiles():
try:
os.unlink(fname)
except: # pylint: disable=bare-except
pass
def _OutputAndExit(message, exception=None):
"""Outputs message to stderr and exits gsutil with code 1.
This function should only be called in single-process, single-threaded mode.
Args:
message: Message to print to stderr.
exception: The exception that caused gsutil to fail.
"""
if debug_level >= constants.DEBUGLEVEL_DUMP_REQUESTS or test_exception_traces:
stack_trace = traceback.format_exc()
err = ('DEBUG: Exception stack trace:\n %s\n%s\n' %
(re.sub('\\n', '\n ', stack_trace), message))
else:
err = '%s\n' % message
try:
text_util.print_to_fd(err, end='', file=sys.stderr)
except UnicodeDecodeError:
# Can happen when outputting invalid Unicode filenames.
sys.stderr.write(err)
if exception:
metrics.LogFatalError(exception)
sys.exit(1)
def _OutputUsageAndExit(command_runner):
command_runner.RunNamedCommand('help')
sys.exit(1)
class GsutilFormatter(logging.Formatter):
"""A logging.Formatter that supports logging microseconds (%f)."""
def formatTime(self, record, datefmt=None):
if datefmt:
return datetime.datetime.fromtimestamp(record.created).strftime(datefmt)
# Use default implementation if datefmt is not specified.
return super(GsutilFormatter, self).formatTime(record, datefmt=datefmt)
def _ConfigureRootLogger(level=logging.INFO):
"""Similar to logging.basicConfig() except it always adds a handler."""
log_format = '%(levelname)s %(asctime)s %(filename)s] %(message)s'
date_format = '%m%d %H:%M:%S.%f'
formatter = GsutilFormatter(fmt=log_format, datefmt=date_format)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(level)
def main():
InitializeSignalHandling()
# Any modules used in initializing multiprocessing variables must be
# imported after importing gslib.__main__.
# pylint: disable=redefined-outer-name,g-import-not-at-top
import gslib.boto_translation
import gslib.command
import gslib.utils.parallelism_framework_util
# pylint: disable=unused-variable
from gcs_oauth2_boto_plugin import oauth2_client
from apitools.base.py import credentials_lib
# pylint: enable=unused-variable
if (gslib.utils.parallelism_framework_util.
CheckMultiprocessingAvailableAndInit().is_available):
# These setup methods must be called, and, on Windows, they can only be
# called from within an "if __name__ == '__main__':" block.
gslib.command.InitializeMultiprocessingVariables()
gslib.boto_translation.InitializeMultiprocessingVariables()
else:
gslib.command.InitializeThreadingVariables()
# This needs to be done after InitializeMultiprocessingVariables(), since
# otherwise we can't call CreateLock.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
gsutil_client_id, gsutil_client_secret = (
system_util.GetGsutilClientIdAndSecret())
gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret(
gsutil_client_id, gsutil_client_secret)
gcs_oauth2_boto_plugin.oauth2_helper.SetLock(
gslib.utils.parallelism_framework_util.CreateLock())
credentials_lib.SetCredentialsCacheFileLock(
gslib.utils.parallelism_framework_util.CreateLock())
except ImportError:
pass
global debug_level
global test_exception_traces
supported, err = check_python_version_support()
if not supported:
raise CommandException(err)
sys.exit(1)
boto_util.MonkeyPatchBoto()
system_util.MonkeyPatchHttp()
# In gsutil 4.0 and beyond, we don't use the boto library for the JSON
# API. However, we still store gsutil configuration data in the .boto
# config file for compatibility with previous versions and user convenience.
# Many users have a .boto configuration file from previous versions, and it
# is useful to have all of the configuration for gsutil stored in one place.
command_runner = CommandRunner()
if not boto_util.BOTO_IS_SECURE:
raise CommandException('\n'.join(
textwrap.wrap(
'Your boto configuration has is_secure = False. Gsutil cannot be '
'run this way, for security reasons.')))
headers = {}
parallel_operations = False
quiet = False
version = False
debug_level = 0
trace_token = None
perf_trace_token = None
test_exception_traces = False
user_project = None
# If user enters no commands just print the usage info.
if len(sys.argv) == 1:
sys.argv.append('help')
# Change the default of the 'https_validate_certificates' boto option to
# True (it is currently False in boto).
if not boto.config.has_option('Boto', 'https_validate_certificates'):
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.setbool('Boto', 'https_validate_certificates', True)
for signal_num in GetCaughtSignals():
RegisterSignalHandler(signal_num, _CleanupSignalHandler)
try:
for o, a in opts:
if o in ('-d', '--debug'):
# Also causes boto to include httplib header output.
debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS
elif o in ('-D', '--detailedDebug'):
# We use debug level 3 to ask gsutil code to output more detailed
# debug output. This is a bit of a hack since it overloads the same
# flag that was originally implemented for boto use. And we use -DD
# to ask for really detailed debugging (i.e., including HTTP payload).
if debug_level == constants.DEBUGLEVEL_DUMP_REQUESTS:
debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS_AND_PAYLOADS
else:
debug_level = constants.DEBUGLEVEL_DUMP_REQUESTS
elif o in ('-?', '--help'):
_OutputUsageAndExit(command_runner)
elif o in ('-h', '--header'):
(hdr_name, _, hdr_val) = a.partition(':')
if not hdr_name:
_OutputUsageAndExit(command_runner)
headers[hdr_name.lower()] = hdr_val
elif o in ('-m', '--multithreaded'):
parallel_operations = True
elif o in ('-q', '--quiet'):
quiet = True
elif o == '-u':
user_project = a
elif o in ('-v', '--version'):
version = True
elif o in ('-i', '--impersonate-service-account'):
constants.IMPERSONATE_SERVICE_ACCOUNT = a
elif o == '--perf-trace-token':
perf_trace_token = a
elif o == '--trace-token':
trace_token = a
elif o == '--testexceptiontraces': # Hidden flag for integration tests.
test_exception_traces = True
# Avoid printing extra warnings to stderr regarding long retries by
# setting the threshold very high.
constants.LONG_RETRY_WARN_SEC = 3600
elif o in ('-o', '--option'):
(opt_section_name, _, opt_value) = a.partition('=')
if not opt_section_name:
_OutputUsageAndExit(command_runner)
(opt_section, _, opt_name) = opt_section_name.partition(':')
if not opt_section or not opt_name:
_OutputUsageAndExit(command_runner)
if not boto.config.has_section(opt_section):
boto.config.add_section(opt_section)
boto.config.set(opt_section, opt_name, opt_value)
# Now that any Boto option overrides (via `-o` args) have been parsed,
# perform initialization that depends on those options.
boto_util.configured_certs_file = (boto_util.ConfigureCertsFile())
metrics.LogCommandParams(global_opts=opts)
httplib2.debuglevel = debug_level
if trace_token:
sys.stderr.write(TRACE_WARNING)
if debug_level >= constants.DEBUGLEVEL_DUMP_REQUESTS:
sys.stderr.write(DEBUG_WARNING)
_ConfigureRootLogger(level=logging.DEBUG)
command_runner.RunNamedCommand('ver', ['-l'])
config_items = []
for config_section in ('Boto', 'GSUtil'):
try:
config_items.extend(boto.config.items(config_section))
except configparser.NoSectionError:
pass
for i in range(len(config_items)):
config_item_key = config_items[i][0]
if config_item_key in CONFIG_KEYS_TO_REDACT:
config_items[i] = (config_item_key, 'REDACTED')
sys.stderr.write('Command being run: %s\n' % ' '.join(sys.argv))
sys.stderr.write('config_file_list: %s\n' %
boto_util.GetFriendlyConfigFilePaths())
sys.stderr.write('config: %s\n' % str(config_items))
else: # Non-debug log level.
root_logger_level = logging.WARNING if quiet else logging.INFO
# oauth2client uses INFO and WARNING logging in places that would better
# correspond to gsutil's debug logging (e.g., when refreshing
# access tokens), so we bump the threshold one level higher where
# appropriate. These log levels work for regular- and quiet-level logging.
oa2c_logger_level = logging.WARNING
oa2c_multiprocess_file_storage_logger_level = logging.ERROR
_ConfigureRootLogger(level=root_logger_level)
oauth2client.client.logger.setLevel(oa2c_logger_level)
oauth2client.contrib.multiprocess_file_storage.logger.setLevel(
oa2c_multiprocess_file_storage_logger_level)
# pylint: disable=protected-access
oauth2client.transport._LOGGER.setLevel(oa2c_logger_level)
reauth_creds._LOGGER.setLevel(oa2c_logger_level)
# pylint: enable=protected-access
# TODO(reauth): Fix once reauth pins to pyu2f version newer than 0.1.3.
# Fixes pyu2f v0.1.3 bug.
import six # pylint: disable=g-import-not-at-top
six.input = six.moves.input
if not boto_util.CERTIFICATE_VALIDATION_ENABLED:
sys.stderr.write(HTTP_WARNING)
if version:
command_name = 'version'
elif not args:
command_name = 'help'
else:
command_name = args[0]
if command_name != 'test':
# Don't initialize mTLS authentication because
# tests that need it will do this initialization themselves.
context_config.create_context_config(logging.getLogger())
_CheckAndWarnForProxyDifferences()
# Both 1 and 2 are valid _ARGCOMPLETE values; this var tells argcomplete at
# what argv[] index the command to match starts. We want it to start at the
# value for the path to gsutil, so:
# $ gsutil <command> # Should be the 1st argument, so '1'
# $ python gsutil <command> # Should be the 2nd argument, so '2'
# Both are valid; most users invoke gsutil in the first style, but our
# integration and prerelease tests invoke it in the second style, as we need
# to specify the Python interpreter used to run gsutil.
if os.environ.get('_ARGCOMPLETE', '0') in ('1', '2'):
return _PerformTabCompletion(command_runner)
return _RunNamedCommandAndHandleExceptions(
command_runner,
command_name,
args=args[1:],
headers=headers,
debug_level=debug_level,
trace_token=trace_token,
parallel_operations=parallel_operations,
perf_trace_token=perf_trace_token,
user_project=user_project)
finally:
_Cleanup()
def _CheckAndWarnForProxyDifferences():
# If there are both boto config and environment variable config present for
# proxies, unset the environment variable and warn if it differs.
boto_port = boto.config.getint('Boto', 'proxy_port', 0)
if boto.config.get('Boto', 'proxy', None) or boto_port:
for proxy_env_var in ['http_proxy', 'https_proxy', 'HTTPS_PROXY']:
if proxy_env_var in os.environ and os.environ[proxy_env_var]:
differing_values = []
proxy_info = boto_util.ProxyInfoFromEnvironmentVar(proxy_env_var)
if proxy_info.proxy_host != boto.config.get('Boto', 'proxy', None):
differing_values.append(
'Boto proxy host: "%s" differs from %s proxy host: "%s"' %
(boto.config.get('Boto', 'proxy',
None), proxy_env_var, proxy_info.proxy_host))
if (proxy_info.proxy_user != boto.config.get('Boto', 'proxy_user',
None)):
differing_values.append(
'Boto proxy user: "%s" differs from %s proxy user: "%s"' %
(boto.config.get('Boto', 'proxy_user',
None), proxy_env_var, proxy_info.proxy_user))
if (proxy_info.proxy_pass != boto.config.get('Boto', 'proxy_pass',
None)):
differing_values.append(
'Boto proxy password differs from %s proxy password' %
proxy_env_var)
# Only compare ports if at least one is present, since the
# boto logic for selecting default ports has not yet executed.
if ((proxy_info.proxy_port or boto_port) and
proxy_info.proxy_port != boto_port):
differing_values.append(
'Boto proxy port: "%s" differs from %s proxy port: "%s"' %
(boto_port, proxy_env_var, proxy_info.proxy_port))
if differing_values:
sys.stderr.write('\n'.join(
textwrap.wrap(
'WARNING: Proxy configuration is present in both the %s '
'environment variable and boto configuration, but '
'configuration differs. boto configuration proxy values will '
'be used. Differences detected:' % proxy_env_var)))
sys.stderr.write('\n%s\n' % '\n'.join(differing_values))
# Regardless of whether the proxy configuration values matched,
# delete the environment variable so as not to confuse boto.
del os.environ[proxy_env_var]
def _HandleUnknownFailure(e):
# Called if we fall through all known/handled exceptions.
raise
_OutputAndExit(message='Failure: %s.' % e, exception=e)
def _HandleCommandException(e):
if e.informational:
_OutputAndExit(message=e.reason, exception=e)
else:
_OutputAndExit(message='CommandException: %s' % e.reason, exception=e)
# pylint: disable=unused-argument
def _HandleControlC(signal_num, cur_stack_frame):
"""Called when user hits ^C.
This function prints a brief message instead of the normal Python stack trace
(unless -D option is used).
Args:
signal_num: Signal that was caught.
cur_stack_frame: Unused.
"""
if debug_level >= 2:
stack_trace = ''.join(traceback.format_list(traceback.extract_stack()))
_OutputAndExit('DEBUG: Caught CTRL-C (signal %d) - Exception stack trace:\n'
' %s' %
(signal_num, re.sub('\\n', '\n ', stack_trace)),
exception=ControlCException())
else:
_OutputAndExit('Caught CTRL-C (signal %d) - exiting' % signal_num,
exception=ControlCException())
def _HandleSigQuit(signal_num, cur_stack_frame):
r"""Called when user hits ^\, so we can force breakpoint a running gsutil."""
import pdb # pylint: disable=g-import-not-at-top
pdb.set_trace()
def _ConstructAccountProblemHelp(reason):
"""Constructs a help string for an access control error.
Args:
reason: e.reason string from caught exception.
Returns:
Contructed help text.
"""
default_project_id = boto.config.get_value('GSUtil', 'default_project_id')
# pylint: disable=line-too-long, g-inconsistent-quotes
acct_help = (
"Your request resulted in an AccountProblem (403) error. Usually this "
"happens if you attempt to create a bucket without first having "
"enabled billing for the project you are using. Please ensure billing is "
"enabled for your project by following the instructions at "
"`Google Cloud Platform Console<https://support.google.com/cloud/answer/6158867>`. "
)
if default_project_id:
acct_help += (
"In the project overview, ensure that the Project Number listed for "
"your project matches the project ID (%s) from your boto config file. "
% default_project_id)
acct_help += (
"If the above doesn't resolve your AccountProblem, please send mail to "
"buganizer-system+187143@google.com requesting assistance, noting the "
"exact command you ran, the fact that you received a 403 AccountProblem "
"error, and your project ID. Please do not post your project ID on "
"StackOverflow. "
"Note: It's possible to use Google Cloud Storage without enabling "
"billing if you're only listing or reading objects for which you're "
"authorized, or if you're uploading objects to a bucket billed to a "
"project that has billing enabled. But if you're attempting to create "
"buckets or upload objects to a bucket owned by your own project, you "
"must first enable billing for that project.")
return acct_help
def _CheckAndHandleCredentialException(e, args):
# Provide detail to users who have no boto config file (who might previously
# have been using gsutil only for accessing publicly readable buckets and
# objects).
if (not boto_util.HasConfiguredCredentials() and not boto.config.get_value(
'Tests', 'bypass_anonymous_access_warning', False)):
# The check above allows tests to assert that we get a particular,
# expected failure, rather than always encountering this error message
# when there are no configured credentials. This allows tests to
# simulate a second user without permissions, without actually requiring
# two separate configured users.
if system_util.InvokedViaCloudSdk():
message = '\n'.join(
textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gcloud auth login" command to '
'configure gsutil to use these credentials.'))
else:
message = '\n'.join(
textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gsutil config" command to configure '
'gsutil to use these credentials.'))
_OutputAndExit(message=message, exception=e)
elif (e.reason and
(e.reason == 'AccountProblem' or e.reason == 'Account disabled.' or
'account for the specified project has been disabled' in e.reason) and
','.join(args).find('gs://') != -1):
_OutputAndExit('\n'.join(
textwrap.wrap(_ConstructAccountProblemHelp(e.reason))),
exception=e)
def _RunNamedCommandAndHandleExceptions(command_runner,
command_name,
args=None,
headers=None,
debug_level=0,
trace_token=None,
parallel_operations=False,
perf_trace_token=None,
user_project=None):
"""Runs the command and handles common exceptions."""
# Note that this method is run at the end of main() and thus has access to
# all of the modules imported there.
# pylint: disable=g-import-not-at-top
try:
# Catch ^C so we can print a brief message instead of the normal Python
# stack trace. Register as a final signal handler because this handler kills
# the main gsutil process (so it must run last).
RegisterSignalHandler(signal.SIGINT, _HandleControlC, is_final_handler=True)
# Catch ^\ so we can force a breakpoint in a running gsutil.
if not system_util.IS_WINDOWS:
RegisterSignalHandler(signal.SIGQUIT, _HandleSigQuit)
return command_runner.RunNamedCommand(command_name,
args,
headers,
debug_level,
trace_token,
parallel_operations,
perf_trace_token=perf_trace_token,
collect_analytics=True,
user_project=user_project)
except AttributeError as e:
if str(e).find('secret_access_key') != -1:
_OutputAndExit(
'Missing credentials for the given URI(s). Does your '
'boto config file contain all needed credentials?',
exception=e)
else:
_OutputAndExit(message=str(e), exception=e)
except CommandException as e:
_HandleCommandException(e)
except getopt.GetoptError as e:
_HandleCommandException(CommandException(e.msg))
except boto.exception.InvalidUriError as e:
_OutputAndExit(message='InvalidUriError: %s.' % e.message, exception=e)
except gslib.exception.InvalidUrlError as e:
_OutputAndExit(message='InvalidUrlError: %s.' % e.message, exception=e)
except boto.auth_handler.NotReadyToAuthenticate as e:
_OutputAndExit(message='NotReadyToAuthenticate', exception=e)
except gslib.exception.ExternalBinaryError as e:
_OutputAndExit(message=str(e), exception=e)
except OSError as e:
# In Python 3, IOError (next except) is an alias for OSError
# Sooo... we need the same logic here
if (e.errno == errno.EPIPE or
(system_util.IS_WINDOWS and e.errno == errno.EINVAL) and
not system_util.IsRunningInteractively()):
# If we get a pipe error, this just means that the pipe to stdout or
# stderr is broken. This can happen if the user pipes gsutil to a command
# that doesn't use the entire output stream. Instead of raising an error,
# just swallow it up and exit cleanly.
sys.exit(0)
else:
_OutputAndExit(message='OSError: %s.' % e.strerror, exception=e)
except IOError as e:
if (e.errno == errno.EPIPE or
(system_util.IS_WINDOWS and e.errno == errno.EINVAL) and
not system_util.IsRunningInteractively()):
# If we get a pipe error, this just means that the pipe to stdout or
# stderr is broken. This can happen if the user pipes gsutil to a command
# that doesn't use the entire output stream. Instead of raising an error,
# just swallow it up and exit cleanly.
sys.exit(0)
else:
raise
except wildcard_iterator.WildcardException as e:
_OutputAndExit(message=e.reason, exception=e)
except ProjectIdException as e:
_OutputAndExit(
'You are attempting to perform an operation that requires a '
'project id, with none configured. Please re-run '
'gsutil config and make sure to follow the instructions for '
'finding and entering your default project id.',
exception=e)
except BadRequestException as e:
if e.reason == 'MissingSecurityHeader':
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(message=e, exception=e)
except AccessDeniedException as e:
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(message=e, exception=e)
except ArgumentException as e:
_OutputAndExit(message=e, exception=e)
except ServiceException as e:
_OutputAndExit(message=e, exception=e)
except oauth2client.client.HttpAccessTokenRefreshError as e:
if system_util.InvokedViaCloudSdk():
_OutputAndExit(
'Your credentials are invalid. '
'Please run\n$ gcloud auth login',
exception=e)
else:
_OutputAndExit(
'Your credentials are invalid. For more help, see '
'"gsutil help creds", or re-run the gsutil config command (see '
'"gsutil help config").',
exception=e)
except apitools_exceptions.HttpError as e:
# These should usually be retried by the underlying implementation or
# wrapped by CloudApi ServiceExceptions, but if we do get them,
# print something useful.
_OutputAndExit('HttpError: %s, %s' %
(getattr(e.response, 'status', ''), e.content or ''),
exception=e)
except socket.error as e:
if e.args[0] == errno.EPIPE:
# Retrying with a smaller file (per suggestion below) works because
# the library code send loop (in boto/s3/key.py) can get through the
# entire file and then request the HTTP response before the socket
# gets closed and the response lost.
_OutputAndExit(
'Got a "Broken pipe" error. This can happen to clients using Python '
'2.x, when the server sends an error response and then closes the '
'socket (see http://bugs.python.org/issue5542). If you are trying to '
'upload a large object you might retry with a small (say 200k) '
'object, and see if you get a more specific error code.',
exception=e)
elif e.args[0] == errno.ECONNRESET and ' '.join(args).contains('s3://'):
_OutputAndExit('\n'.join(
textwrap.wrap(
'Got a "Connection reset by peer" error. One way this can happen is '
'when copying data to/from an S3 regional bucket. If you are using a '
'regional S3 bucket you could try re-running this command using the '
'regional S3 endpoint, for example '
's3://s3-<region>.amazonaws.com/your-bucket. For details about this '
'problem see https://github.com/boto/boto/issues/2207')),
exception=e)
else:
_HandleUnknownFailure(e)
except oauth2client.client.FlowExchangeError as e:
_OutputAndExit('\n%s\n\n' % '\n'.join(
textwrap.wrap(
'Failed to retrieve valid credentials (%s). Make sure you selected and '
'pasted the ENTIRE authorization code (including any numeric prefix '
"e.g. '4/')." % e)),
exception=e)
except reauth_errors.ReauthSamlLoginRequiredError:
if system_util.InvokedViaCloudSdk():
_OutputAndExit('You must re-authenticate with your SAML IdP. '
'Please run\n$ gcloud auth login')
else:
_OutputAndExit('You must re-authenticate with your SAML IdP. '
'Please run\n$ gsutil config')
except Exception as e: # pylint: disable=broad-except
config_paths = ', '.join(boto_util.GetFriendlyConfigFilePaths())
# Check for two types of errors related to service accounts. These errors
# appear to be the same except for their messages, but they are caused by
# different problems and both have unhelpful error messages. Moreover,
# the error type belongs to PyOpenSSL, which is not necessarily installed.
if 'mac verify failure' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file_password field in your config file(s),'
'\n%s, is correct.' % config_paths,
exception=e)
elif 'asn1 encoding routines' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file field in your config file(s),\n%s, is correct.' %
config_paths,
exception=e)
_HandleUnknownFailure(e)
def _PerformTabCompletion(command_runner):
"""Performs gsutil-specific tab completion for the shell."""
# argparse and argcomplete are bundled with the Google Cloud SDK.
# When gsutil is invoked from the Google Cloud SDK, both should be available.
try:
import argcomplete
import argparse
except ImportError as e:
_OutputAndExit('A library required for performing tab completion was'
' not found.\nCause: %s' % e,
exception=e)
parser = argparse.ArgumentParser(add_help=False)
command_runner.ConfigureCommandArgumentParsers(parser)
argcomplete.autocomplete(parser, exit_method=sys.exit)
return 0
if __name__ == '__main__':
sys.exit(main())
|
GoogleCloudPlatform/gsutil
|
gslib/__main__.py
|
Python
|
apache-2.0
| 34,238
|
[
"VisIt"
] |
6f607a5c21fcf75808e8261f9a3f053e08979c0e01e54f0378b21c009ec5d322
|
"""this is python equivalent of ./Wrapping/Tcl/vtktesting/backdrop.tcl
This script is used while running python tests translated from Tcl."""
import vtk
basePlane = None
baseMapper = None
base = None
backPlane = None
backMapper = None
back = None
leftPlane = None
leftMapper = None
left = None
def BuildBackdrop (minX, maxX, minY, maxY, minZ, maxZ, thickness):
global basePlane
global baseMapper
global base
global backPlane
global backMapper
global back
global left
global leftPlane
global leftMapper
if not basePlane:
basePlane = vtk.vtkCubeSource()
basePlane.SetCenter( (maxX + minX)/2.0, minY, (maxZ + minZ)/2.0)
basePlane.SetXLength(maxX-minX)
basePlane.SetYLength(thickness)
basePlane.SetZLength(maxZ - minZ)
if not baseMapper:
baseMapper = vtk.vtkPolyDataMapper()
baseMapper.SetInput(basePlane.GetOutput())
if not base:
base = vtk.vtkActor()
base.SetMapper(baseMapper)
if not backPlane:
backPlane = vtk.vtkCubeSource()
backPlane.SetCenter( (maxX + minX)/2.0, (maxY + minY)/2.0, minZ)
backPlane.SetXLength(maxX-minX)
backPlane.SetYLength(maxY - minY)
backPlane.SetZLength(thickness)
if not backMapper:
backMapper = vtk.vtkPolyDataMapper()
backMapper.SetInput(backPlane.GetOutput())
if not back:
back = vtk.vtkActor()
back.SetMapper(backMapper)
if not leftPlane:
leftPlane = vtk.vtkCubeSource()
leftPlane.SetCenter( minX, (maxY+minY)/2.0, (maxZ+minZ)/2.0)
leftPlane.SetXLength(thickness)
leftPlane.SetYLength(maxY-minY)
leftPlane.SetZLength(maxZ-minZ)
if not leftMapper:
leftMapper = vtk.vtkPolyDataMapper()
leftMapper.SetInput(leftPlane.GetOutput())
if not left:
left = vtk.vtkActor()
left.SetMapper(leftMapper)
return [base, back, left]
|
b3c/VTK-5.8
|
Utilities/vtkTclTest2Py/backdrop.py
|
Python
|
bsd-3-clause
| 1,885
|
[
"VTK"
] |
2ab6e5c454743977a9cdcbf2633665b335fd9fedd6641b1dcef7f50f7a4d5e6e
|
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment, Hen, Chick, A, B, C)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b = Building.objects.create(name='101')
dev1 = Device.objects.create(name="router", building=b)
dev2 = Device.objects.create(name="switch", building=b)
dev3 = Device.objects.create(name="server", building=b)
port1 = Port.objects.create(port_number='4', device=dev1)
port2 = Port.objects.create(port_number='7', device=dev2)
port3 = Port.objects.create(port_number='1', device=dev3)
c1 = Connection.objects.create(start=port1, end=port2)
c2 = Connection.objects.create(start=port2, end=port3)
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person=usp)
o = Organizer.objects.create(person=uop)
c = Class.objects.create(org=o)
Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
Item.objects.create(name="item1", child=c1)
Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
def test_null_join_promotion(self):
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
bob = Client.objects.create(name='Bob', status=active)
jack = Client.objects.create(name='Jack', status=active, state=wa)
qs = Client.objects.filter(state=wa).select_related('state')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [jack])
self.assertEqual(qs[0].state, wa)
# The select_related join wasn't promoted as there was already an
# existing (even if trimmed) inner join to state.
self.assertFalse('LEFT OUTER' in str(qs.query))
qs = Client.objects.select_related('state').order_by('name')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [bob, jack])
self.assertIs(qs[0].state, None)
self.assertEqual(qs[1].state, wa)
# The select_related join was promoted as there is already an
# existing join.
self.assertTrue('LEFT OUTER' in str(qs.query))
def test_regression_19870(self):
"""
Regression for #19870
"""
hen = Hen.objects.create(name='Hen')
Chick.objects.create(name='Chick', mother=hen)
self.assertEqual(Chick.objects.all()[0].mother.name, 'Hen')
self.assertEqual(Chick.objects.select_related()[0].mother.name, 'Hen')
def test_ticket_10733(self):
a = A.objects.create(name='a', lots_of_text='lots_of_text_a', a_field='a_field')
b = B.objects.create(name='b', lots_of_text='lots_of_text_b', b_field='b_field')
c = C.objects.create(name='c', lots_of_text='lots_of_text_c', is_published=True,
c_a=a, c_b=b)
results = C.objects.all().only('name', 'lots_of_text', 'c_a', 'c_b', 'c_b__lots_of_text',
'c_a__name', 'c_b__name').select_related()
self.assertQuerysetEqual(results, [c], lambda x: x)
with self.assertNumQueries(0):
qs_c = results[0]
self.assertEqual(qs_c.name, 'c')
self.assertEqual(qs_c.lots_of_text, 'lots_of_text_c')
self.assertEqual(qs_c.c_b.lots_of_text, 'lots_of_text_b')
self.assertEqual(qs_c.c_a.name, 'a')
self.assertEqual(qs_c.c_b.name, 'b')
|
liavkoren/djangoDev
|
tests/select_related_regress/tests.py
|
Python
|
bsd-3-clause
| 9,045
|
[
"Brian"
] |
cdb411c140f006695b0093340e0635236dc8158becc00d64b2622af0538ea29c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.