text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_pymol.py
# Purpose: viz running LAMMPS simulation via PyMol
# Syntax: viz_pymol.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys
sys.path.append("./pizza")
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_pymol.py in.lammps Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on PyMol
# just proc 0 handles reading of dump file and viz
if me == 0:
import pymol
pymol.finish_launching()
from dump import dump
from pdbfile import pdbfile
from pymol import cmd as pm
d = dump("tmp.dump",0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.show("spheres","tmp")
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.forward()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
slitvinov/lammps-sph-multiphase
|
python/examples/viz_pymol.py
|
Python
|
gpl-2.0
| 1,874
|
[
"LAMMPS",
"PyMOL"
] |
9f41969562cda3ceda62589cefc8a68af4030eb552290073d1ab6a508884f5a0
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
from scipy.special import comb
from scipy.misc.doccer import inherit_docstring_from
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import (gammaln as gamln, gamma as gam, boxcox, boxcox1p,
inv_boxcox, inv_boxcox1p, erfc, chndtr, chndtrix)
from numpy import (where, arange, putmask, ravel, sum, shape,
log, sqrt, exp, arctanh, tan, sin, arcsin, arctan,
tanh, cos, cosh, sinh)
from numpy import polyval, place, extract, any, asarray, nan, inf, pi
import numpy as np
from . import vonmises_cython
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (
rv_continuous, valarray, _skew, _kurtosis, _lazywhere,
_ncx2_log_pdf, _ncx2_pdf, _ncx2_cdf, get_distribution_names,
)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - special.smirnov(n, x)
def _ppf(self, q, n):
return special.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - special.kolmogorov(x)
def _sf(self, x):
return special.kolmogorov(x)
def _ppf(self, q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return special.log_ndtr(x)
def _norm_ppf(q):
return special.ndtri(q)
def _norm_sf(x):
return special.ndtr(-x)
def _norm_logsf(x):
return special.log_ndtr(-x)
def _norm_isf(q):
return -special.ndtri(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/asarray(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for ``0 < x < 1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = special.psi(a + b)
func = s1 - n * (-psiab + special.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
gamma(a+b) * x**(a-1) * (1-x)**(b-1)
beta.pdf(x, a, b) = ------------------------------------
gamma(a)*gamma(b)
for ``0 < x < 1``, ``a > 0``, ``b > 0``, where ``gamma(z)`` is the gamma
function (`scipy.special.gamma`).
`beta` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = special.xlog1py(b-1.0, -x) + special.xlogy(a-1.0, x)
lPx -= special.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a, b, x)
def _ppf(self, q, a, b):
return special.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
f1 = (kwds.get('f1', None) or kwds.get('fb', None) or
kwds.get('fix_b', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = np.log(1 - data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return (u1 / u2)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return (special.xlogy(a-1.0, x) - special.xlog1py(a+b, x) -
special.betaln(a, b))
def _cdf(self, x, a, b):
return special.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0)
* (b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
`bradford` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3)
+ 6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of `burr` with ``d = 1``
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
`burr` takes ``c`` and ``d`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * special.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
`fisk` takes ``c`` as a shape parameter.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is::
fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
for ``x > 0``.
`fisk` takes ``c`` as a shape parameters.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return nan, nan, nan, nan
def _entropy(self):
return log(4*pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x, df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - special.gammaln(.5*df)
return l + special.xlogy(df-1.,x) - .5*x**2
def _cdf(self, x, df):
return special.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(.5*df, q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return special.xlogy(df/2.-1, x) - x/2. - gamln(df/2.) - (log(2)*df)/2.
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi, b=pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
`dgamma` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return special.xlogy(a-1.0, ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a, abs(x))
return where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a, abs(x))
return where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a, 1-abs(2*q-1))
return where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
`dweibull` takes ``d`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + special.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * exp(-abs(x)**c)
return where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * where(q <= 0.5, q, 1. - q)
fac = np.power(-log(fac), 1.0 / c)
return where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * special.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = exp(-x)
for ``x >= 0``.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -special.expm1(-x)
def _ppf(self, q):
return -special.log1p(-q)
def _sf(self, x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is::
exponnorm.pdf(x, K) = 1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc(-(x - 1/K) / sqrt(2))
where the shape parameter ``K > 0``.
It can be thought of as the sum of a normally distributed random
value with mean ``loc`` and sigma ``scale`` and an exponentially
distributed random number with a pdf proportional to ``exp(-lambda * x)``
where ``lambda = (K * scale)**(-1)``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and :math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/\sigma\lambda`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), exp, _XMAX)
return 0.5 * invK * expval * erfc(-(x - invK) / sqrt(2))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + log(0.5 * invK * erfc(-(x - invK) / sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return special.ndtr(x) - exp(expval) * special.ndtr(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return special.ndtr(-x) + exp(expval) * special.ndtr(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
`exponweib` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a, c):
return exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -special.expm1(negxc)
logp = (log(a) + log(c) + special.xlogy(a - 1.0, exm1c) +
negxc + special.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -special.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-special.log1p(-q**(1.0/a)))**asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
for ``x >= 0``, ``b > 0``. Note that this is a different distribution
from the exponential power distribution that is also known under the names
"generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
return exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + log(b) + special.xlogy(b - 1.0, x) + xb - exp(xb)
return f
def _cdf(self, x, b):
return -special.expm1(-special.expm1(x**b))
def _sf(self, x, b):
return exp(-special.expm1(x**b))
def _isf(self, x, b):
return (special.log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(special.log1p(-special.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x, c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
`fatiguelife` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) -
0.5*(log(2*pi) + 3*log(x)))
def _cdf(self, x, c):
return special.ndtr(1.0 / c * (sqrt(x) - 1.0/sqrt(x)))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25 * (tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
`foldcauchy` takes ``c`` as a shape parameter.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * log(m) + n/2 * log(n) + (n/2 - 1) * log(x)
lPx -= ((n+m)/2) * log(m + n*x) + special.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
`foldnorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c >= 0)
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*pi)
mu = 2.*expfac + c * special.erf(c/sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
`frechet_r` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x, c-1)*exp(-pow(x, c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x, c)
def _cdf(self, x, c):
return -special.expm1(-pow(x, c))
def _ppf(self, q, c):
return pow(-special.log1p(-q), 1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
`frechet_l` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x, c-1)*exp(-pow(-x, c))
def _cdf(self, x, c):
return exp(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-log(q), 1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2):
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
`genlogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*special.log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2, c)
g1 = -2*zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = pi**4/15.0 + 6*zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
defined for ``x >= 0`` if ``c >=0``, and for
``0 <= x <= -1/c`` if ``c < 0``.
`genpareto` takes ``c`` as a shape parameter.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`::
genpareto.pdf(x, c=0) = exp(-x)
For ``c == -1``, `genpareto` is uniform on ``[0, 1]``::
genpareto.cdf(x, c=-1) = x
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
c = asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c, np.inf)
return True
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -special.xlog1py(c+1., c*x) / c,
-x)
def _cdf(self, x, c):
return -inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -special.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -boxcox1p(-q, -c)
def _isf(self, q, c):
return -boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = arange(0, n + 1)
for ki, cnk in zip(k, comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return where(c * n < 1, val * (-1.0 / c) ** n, inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
gam(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a, b, c > 0``.
`genexpon` takes ``a``, ``b`` and ``c`` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a + b*(-special.expm1(-c*x)))*exp((-a-b)*x +
b*(-special.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -special.expm1((-a-b)*x + b*(-special.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-special.expm1(-c*x))) + \
(-a-b)*x+b*(-special.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter ``c``.
`genextreme` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
min = np.minimum
max = np.maximum
self.b = where(c > 0, 1.0 / max(c, _XMIN), inf)
self.a = where(c < 0, 1.0 / min(c, -_XMIN), -inf)
return where(abs(c) == inf, 0, 1)
def _pdf(self, x, c):
cx = c*x
logex2 = where((c == 0)*(x == x), 0.0, special.log1p(-cx))
logpex2 = where((c == 0)*(x == x), -x, logex2/c)
pex2 = exp(logpex2)
# Handle special cases
logpdf = where((cx == 1) | (cx == -inf), -inf, -pex2+logpex2-logex2)
putmask(logpdf, (c == 1) & (x == 1), 0.0)
return exp(logpdf)
def _cdf(self, x, c):
loglogcdf = where((c == 0)*(x == x), -x, special.log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
x = -log(-log(q))
return where((c == 0)*(x == x), x, -special.expm1(-c*x)/c)
def _stats(self, c):
g = lambda n: gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = where(abs(c) < 1e-7, (c*pi)**2.0/6.0, g2-g1**2.0)
gam2k = where(abs(c) < 1e-7, pi**2.0/6.0,
special.expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0)
eps = 1e-14
gamk = where(abs(c) < eps, -_EULER, special.expm1(gamln(c+1))/c)
m = where(c < -1.0, nan, -gamk)
v = where(c < -0.5, nan, g1**2.0*gam2k)
# skewness
sk1 = where(c < -1./3, nan,
np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = where(abs(c) <= eps**0.29, 12*sqrt(6)*_ZETA3/pi**3, sk1)
# kurtosis
ku1 = where(c < -1./4, nan,
(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = arange(0, n+1)
vals = 1.0/c**n * sum(
comb(n, k) * (-1)**k * special.gamma(c*k + 1),
axis=0)
return where(c*n > -1, vals, inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: special.digamma(x) - y
if y > -0.125:
x0 = exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
`gamma` has a shape parameter `a` which needs to be set explicitly.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return special.xlogy(a-1.0, x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _sf(self, x, a):
return special.gammaincc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + a + gamln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# log(a) - special.digamma(a) - log(xbar) + log(data.mean) = 0
s = log(xbar) - log(data).mean()
func = lambda a: log(a) - special.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# special.digamma(a) - log(data).mean() + log(fscale) = 0
c = log(data).mean() - log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x > 0``, ``a > 0``, and ``c != 0``.
`gengamma` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + special.xlogy(c*a - 1, x) - x**c - special.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = special.gammainc(a, xc)
val2 = special.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = special.gammainc(a, xc)
val2 = special.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a, q)
val2 = special.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = special.gammaincinv(a, q)
val2 = special.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: poch(a,n) = gamma(a+n)/gamma(a)
return special.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + special.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
`genhalflogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
`gompertz` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return log(c) + x - c * special.expm1(x)
def _cdf(self, x, c):
return -special.expm1(-c * special.expm1(x))
def _ppf(self, q, c):
return special.log1p(-1.0 / c * special.log1p(-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, 12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - special.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return log(2) - x - 2. * special.log1p(exp(-x))
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n == 1:
return 2*log(2)
if n == 2:
return pi*pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*special.gamma(n+1)*special.zeta(n, 1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return (sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5,
8*(pi-3)/(pi-2)**2)
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a, b) F[2, 1](c, a; a+b; -z))``
`gausshyper` takes ``a``, ``b``, ``c`` and ``z`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a, b) / special.beta(a, b)
num = special.hyp2f1(c, a+n, a+b+n, -z)
den = special.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` takes ``a`` as a shape parameter.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return (-(a+1) * log(x) - gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0 - special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0 / special.gammaincinv(a, 1.-q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
`invgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
When `mu` is too small, evaluating the cumulative density function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
`invweibull` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
def _pdf(self, x, c):
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return exp(-xc1)
def _ppf(self, q, c):
return np.power(-log(q), -1.0/c)
def _munp(self, n, c):
return special.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a, b > 0``, and ``phi`` is the normal pdf.
`johnsonsb` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = _norm_pdf(a + b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
`johnsonsu` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = _norm_pdf(a + b * log(x + sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * log(x + sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1 / sqrt(2*pi*x) / x * exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(sqrt(1/x))
return special.erfc(sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -special.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(after_notes)s
%(example)s
"""
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0, scale=pi, size=sz)
W = expon.rvs(size=sz)
if alpha == 1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
ialpha = 1.0/alpha
aTH = alpha*TH
if beta == 0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * special.log1p(exp(-x))
def _cdf(self, x):
return special.expit(x)
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
`loggamma` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = special.digamma(c)
var = special.polygamma(1, c)
skewness = special.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = special.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
`loglaplace` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return -log(x)**2 / (2*s**2) + np.where(x == 0, 0, -log(s*x*sqrt(2*pi)))
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
`lognorm` takes ``s`` as a shape parameter.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
def _rvs(self, s):
return exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
return exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(log(x) / s)
def _ppf(self, q, s):
return exp(s * _norm_ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + log(2*pi) + 2 * log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(log(x))
def _ppf(self, q):
return exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = sqrt(p)
mu2 = p * (p - 1)
g1 = sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * log(2 * pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5, q))
def _stats(self):
val = 3*pi-8
return (2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5,
(-12*pi*pi + 160*pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
`nakagami` takes ``nu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu, q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
(df2+df1*x)**(-(df1+df2)/2) *
gamma(df1/2)*gamma(1+df2/2) *
L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
(B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= special.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <= 4, inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
`t` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = asarray(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
`nct` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * sqrt(df) / sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1, 1.5, valF)
trm1 /= asarray(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= asarray(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = gam(df/2.-0.5) / gam(df/2.)
c11 = sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
#kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
`pareto` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask, b)
mu = valarray(shape(b), value=inf)
place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract(mask, b)
mu2 = valarray(shape(b), value=inf)
place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract(mask, b)
g1 = valarray(shape(b), value=nan)
vals = 2 * (bt + 1.0) * sqrt(bt - 2.0) / ((bt - 3.0) * sqrt(bt))
place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract(mask, b)
g2 = valarray(shape(b), value=nan)
vals = (6.0*polyval([1.0, 1.0, -6, -2], bt) /
polyval([1.0, -7.0, 12.0, 0.0], bt))
place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
`lomax` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*special.log1p(x)
def _cdf(self, x, c):
return -special.expm1(-c*special.log1p(x))
def _sf(self, x, c):
return exp(-c*special.log1p(x))
def _logsf(self, x, c):
return -c*special.log1p(x)
def _ppf(self, q, c):
return special.expm1(-special.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
`pearson3` takes ``skew`` as a shape parameter.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, skew, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*log(beta*(x - zeta)) + (a - 1)*log(x)
# - beta*(x - zeta) - x
# - gamln(alpha) - gamln(a)
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
if mask[0]:
return self._random_state.standard_normal(self._size)
ans = self._random_state.standard_gamma(alpha, self._size)/beta + zeta
if ans.size == 1:
return ans[0]
return ans
def _ppf(self, q, skew):
ans, q, transq, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = special.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` takes ``a`` as a shape parameter.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + special.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * sqrt((a + 2.0) / a),
6 * polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
`powerlognorm` takes ``c`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c, s):
return (c/(x*s) * _norm_pdf(log(x)/s) *
pow(_norm_cdf(-log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
`powernorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return (c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0)))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
`rdist` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / special.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / special.beta(0.5, c / 2.0)
res = 0.5 + term1 * special.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * special.beta((n + 1.0) / 2, c / 2.0)
return numerator / special.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
return r * exp(-0.5 * r**2)
def _cdf(self, r):
return -special.expm1(-0.5 * r**2)
def _ppf(self, q):
return sqrt(-2 * special.log1p(-q))
def _sf(self, r):
return exp(-0.5 * r**2)
def _isf(self, q):
return sqrt(-2 * log(q))
def _stats(self):
val = 4 - pi
return (np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5,
6*pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
`reciprocal` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
`rice` takes ``b`` as a shape parameter.
%(after_notes)s
The Rice distribution describes the length, ``r``, of a 2-D vector
with components ``(U+u, V+v)``, where ``U, V`` are constant, ``u, v``
are independent Gaussian random variables with standard deviation
``s``. Let ``R = (U**2 + V**2)**0.5``. Then the pdf of ``r`` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
sz = self._size if self._size else 1
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2, sz))
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * exp(-(x-b)*(x-b)/2.0) * special.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * exp(-b2) * special.gamma(n1) *
special.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
`recipinvgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
`triang` takes ``c`` as a shape parameter.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5 * np.power((1.0-c+c*c), 1.5)), -3.0/5.0
def _entropy(self, c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
`truncexpon` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(-special.expm1(-b))
def _logpdf(self, x, b):
return -x - log(-special.expm1(-b))
def _cdf(self, x, b):
return special.expm1(-x)/special.expm1(-b)
def _ppf(self, q, b):
return -special.log1p(q*special.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return gam(n+1)-special.gammainc(1+n, b)
if n == 1:
return (1-(b+1)*exp(-b))/(-special.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-special.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
if self.a > 0:
self._delta = -(self._sb - self._sa)
else:
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
if self.a > 0:
return _norm_isf(q*self._sb + self._sa*(1.0-q))
else:
return _norm_ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
`tukeylambda` takes ``lam`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = asarray(special.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (asarray(1-Fx))**(lam-1.0)
Px = 1.0/asarray(Px)
return where((lam <= 0) | (abs(x) < 1.0/asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
return special.boxcox(q, lam) - special.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
`vonmises` takes ``kappa`` as a shape parameter.
%(after_notes)s
See Also
--------
vonmises_line : The same distribution, defined on a [-pi, pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return exp(kappa * cos(x)) / (2*pi*special.i0(kappa))
def _cdf(self, x, kappa):
return vonmises_cython.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
`wrapcauchy` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x < pi
c2 = 1-c1
xp = extract(c1, x)
xn = extract(c2, x)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_::
beta
gennorm.pdf(x, beta) = --------------- exp(-|x|**beta)
2 gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to a Laplace distribution.
For ``beta = 2``, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(.5 * beta) - special.gammaln(1. / beta) - abs(x)**beta
def _cdf(self, x, beta):
c = .5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (.5 + c) - c * special.gammaincc(1. / beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - .5)
# evaluating (1. + c) first prevents numerical cancellation
return c * special.gammainccinv(1. / beta, (1. + c) - 2.*c*x)**(1. / beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = special.gammaln([1./beta, 3./beta, 5./beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2. * c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + special.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is::
beta
halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to an exponential distribution.
For ``beta = 2``, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - special.gammaln(1. / beta) - x**beta
def _cdf(self, x, beta):
return special.gammainc(1. / beta, x**beta)
def _ppf(self, x, beta):
return special.gammaincinv(1. / beta, x)**(1. / beta)
def _sf(self, x, beta):
return special.gammaincc(1. / beta, x**beta)
def _isf(self, x, beta):
return special.gammainccinv(1. / beta, x)**(1. / beta)
def _entropy(self, beta):
return 1. / beta - np.log(beta) + special.gammaln(1. / beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names
|
felipebetancur/scipy
|
scipy/stats/_continuous_distns.py
|
Python
|
bsd-3-clause
| 119,828
|
[
"Gaussian"
] |
bdcd2fb237df0904845c90e816dc8c7ee9f2131dd3ededc05dea8b6d5f6d52a9
|
import unittest
import os
import warnings
import json
from sympy import Number, Symbol
from pymatgen.analysis.surface_analysis import SlabEntry, SurfaceEnergyPlotter, \
NanoscaleStability, WorkFunctionAnalyzer
from pymatgen.util.testing import PymatgenTest
from pymatgen.entries.computed_entries import ComputedStructureEntry
from pymatgen.analysis.wulff import WulffShape
__author__ = "Richard Tran"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Richard Tran"
__email__ = "rit001@eng.ucsd.edu"
__date__ = "Aug 24, 2017"
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files",
"surface_tests", path_str)
return path
class SlabEntryTest(PymatgenTest):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with open(os.path.join(get_path(""), 'ucell_entries.txt')) as ucell_entries:
ucell_entries = json.loads(ucell_entries.read())
self.ucell_entries = ucell_entries
# Load objects for O adsorption tests
self.metals_O_entry_dict = load_O_adsorption()
# Load objects for Cu test
self.Cu_entry_dict = get_entry_dict(os.path.join(get_path(""),
"Cu_entries.txt"))
self.assertEqual(len(self.Cu_entry_dict.keys()), 13)
self.Cu_ucell_entry = ComputedStructureEntry.from_dict(self.ucell_entries["Cu"])
# Load dummy MgO slab entries
self.MgO_ucell_entry = ComputedStructureEntry.from_dict(self.ucell_entries["MgO"])
self.Mg_ucell_entry = ComputedStructureEntry.from_dict(self.ucell_entries["Mg"])
self.MgO_slab_entry_dict = get_entry_dict(os.path.join(get_path(""),
"MgO_slab_entries.txt"))
def test_properties(self):
# Test cases for getting adsorption related quantities for a 1/4
# monolalyer adsorption of O on the low MMI surfaces of Pt, Ni and Rh
for el in self.metals_O_entry_dict.keys():
el_ucell = ComputedStructureEntry.from_dict(self.ucell_entries[el])
for hkl in self.metals_O_entry_dict[el].keys():
for clean in self.metals_O_entry_dict[el][hkl].keys():
for ads in self.metals_O_entry_dict[el][hkl][clean]:
ml = ads.get_unit_primitive_area
self.assertAlmostEqual(ml, 4, 2)
self.assertAlmostEqual(ads.get_monolayer, 1/4, 2)
Nads = ads.Nads_in_slab
self.assertEqual(Nads, 1)
self.assertEqual(ads.Nsurfs_ads_in_slab, 1)
# Determine the correct binding energy
with open(os.path.join(get_path(""),
'isolated_O_entry.txt')) as isolated_O_entry:
isolated_O_entry = json.loads(isolated_O_entry.read())
O = ComputedStructureEntry.from_dict(isolated_O_entry)
gbind = (ads.energy - ml*clean.energy)/Nads - O.energy_per_atom
self.assertEqual(gbind, ads.gibbs_binding_energy())
# Determine the correction Gibbs adsorption energy
eads = Nads * gbind
self.assertEqual(eads, ads.gibbs_binding_energy(eads=True))
se = ads.surface_energy(el_ucell)
self.assertAlmostEqual(se.as_coefficients_dict()[Symbol("delu_O")],
(-1/2)*ads.surface_area**(-1))
def test_create_slab_label(self):
for el in self.metals_O_entry_dict.keys():
for hkl in self.metals_O_entry_dict[el].keys():
# Test WulffShape for adsorbed surfaces
for clean in self.metals_O_entry_dict[el][hkl]:
label = clean.create_slab_label
comp = str(clean.composition.reduced_composition)
self.assertEqual(str(hkl)+" %s" %(comp), label)
for ads in self.metals_O_entry_dict[el][hkl][clean]:
label = ads.create_slab_label
self.assertEqual(label, str(hkl)+" %s+O, 0.250 ML" %(comp))
def test_surface_energy(self):
# For a nonstoichiometric case, the cheimcal potentials do not
# cancel out, they serve as a reservoir for any missing atoms
for slab_entry in self.MgO_slab_entry_dict[(1,1,1)].keys():
se = slab_entry.surface_energy(self.MgO_ucell_entry,
ref_entries=[self.Mg_ucell_entry])
self.assertEqual(tuple(se.as_coefficients_dict().keys()),
(Number(1), Symbol("delu_Mg")))
# For the case of a clean, stoichiometric slab, the surface energy
# should be constant (i.e. surface energy is a constant).
all_se = []
ECu = self.Cu_ucell_entry.energy_per_atom
for hkl in self.Cu_entry_dict.keys():
slab_entry = list(self.Cu_entry_dict[hkl].keys())[0]
se = slab_entry.surface_energy(self.Cu_ucell_entry)
all_se.append(se)
# Manually calculate surface energy
manual_se = (slab_entry.energy - \
ECu *len(slab_entry.structure))/(2*slab_entry.surface_area)
self.assertArrayAlmostEqual(float(se), manual_se, 10)
# The (111) facet should be the most stable
clean111_entry = list(self.Cu_entry_dict[(1,1,1)].keys())[0]
se_Cu111 = clean111_entry.surface_energy(self.Cu_ucell_entry)
self.assertEqual(min(all_se), se_Cu111)
def test_cleaned_up_slab(self):
# The cleaned up slab should have the same reduced formula as a clean slab
for el in self.metals_O_entry_dict.keys():
for hkl in self.metals_O_entry_dict[el].keys():
for clean in self.metals_O_entry_dict[el][hkl].keys():
for ads in self.metals_O_entry_dict[el][hkl][clean]:
s = ads.cleaned_up_slab
self.assertEqual(s.composition.reduced_composition,
clean.composition.reduced_composition)
class SurfaceEnergyPlotterTest(PymatgenTest):
def setUp(self):
entry_dict = get_entry_dict(os.path.join(get_path(""),
"Cu_entries.txt"))
self.Cu_entry_dict = entry_dict
with open(os.path.join(get_path(""), 'ucell_entries.txt')) as ucell_entries:
ucell_entries = json.loads(ucell_entries.read())
self.Cu_ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Cu"])
self.Cu_analyzer = SurfaceEnergyPlotter(entry_dict, self.Cu_ucell_entry)
self.metals_O_entry_dict = load_O_adsorption()
ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Pt"])
self.Pt_analyzer = SurfaceEnergyPlotter(self.metals_O_entry_dict["Pt"],
ucell_entry)
ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Ni"])
self.Ni_analyzer = SurfaceEnergyPlotter(self.metals_O_entry_dict["Ni"],
ucell_entry)
ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["Rh"])
self.Rh_analyzer = SurfaceEnergyPlotter(self.metals_O_entry_dict["Rh"],
ucell_entry)
self.Oads_analyzer_dict = {"Pt": self.Pt_analyzer,
"Ni": self.Ni_analyzer,
"Rh": self.Rh_analyzer}
def test_get_stable_entry_at_u(self):
for el in self.Oads_analyzer_dict.keys():
plotter = self.Oads_analyzer_dict[el]
for hkl in plotter.all_slab_entries.keys():
# Test that the surface energy is clean for specific range of chempot
entry1, gamma1 = \
plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): -7})
entry2, gamma2 = \
plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): -6})
self.assertEqual(gamma1, gamma2)
self.assertEqual(entry1.label, entry2.label)
# Now test that for a high chempot, adsorption
# occurs and gamma is not equal to clean gamma
entry3, gamma3 = \
plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): -1})
self.assertNotEqual(entry3.label, entry2.label)
self.assertNotEqual(gamma3, gamma2)
# For any chempot greater than -6, surface energy should vary
# but the configuration should remain the same
entry4, gamma4 = \
plotter.get_stable_entry_at_u(hkl, delu_dict={Symbol("delu_O"): 0})
self.assertEqual(entry3.label, entry4.label)
self.assertNotEqual(gamma3, gamma4)
def test_wulff_from_chempot(self):
# Test if it generates a Wulff shape, test if
# all the facets for Cu wulff shape are inside.
Cu_wulff = self.Cu_analyzer.wulff_from_chempot()
area_frac_dict = Cu_wulff.area_fraction_dict
facets_hkl = [(1,1,1), (3,3,1), (3,1,0), (1,0,0),
(3,1,1), (2,1,0), (2,2,1)]
for hkl in area_frac_dict.keys():
if hkl in facets_hkl:
self.assertNotEqual(area_frac_dict[hkl], 0)
else:
self.assertEqual(area_frac_dict[hkl], 0)
for el in self.Oads_analyzer_dict.keys():
# Test WulffShape for adsorbed surfaces
analyzer = self.Oads_analyzer_dict[el]
# chempot = analyzer.max_adsorption_chempot_range(0)
wulff = analyzer.wulff_from_chempot(delu_default=-6)
se = wulff.weighted_surface_energy
# Test if a different Wulff shape is generated
# for Ni when adsorption comes into play
wulff_neg7 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-7)
wulff_neg6 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-6)
self.assertEqual(wulff_neg7.weighted_surface_energy,
wulff_neg6.weighted_surface_energy)
wulff_neg55 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-5.5)
self.assertNotEqual(wulff_neg55.weighted_surface_energy,
wulff_neg6.weighted_surface_energy)
wulff_neg525 = self.Oads_analyzer_dict["Ni"].wulff_from_chempot(delu_default=-5.25)
self.assertNotEqual(wulff_neg55.weighted_surface_energy,
wulff_neg525.weighted_surface_energy)
def test_color_palette_dict(self):
for el in self.metals_O_entry_dict.keys():
analyzer = self.Oads_analyzer_dict[el]
color_dict = analyzer.color_palette_dict()
for hkl in self.metals_O_entry_dict[el].keys():
for clean in self.metals_O_entry_dict[el][hkl].keys():
color = color_dict[clean]
for ads in self.metals_O_entry_dict[el][hkl][clean]:
color = color_dict[ads]
def test_get_surface_equilibrium(self):
# For clean stoichiometric system, the two equations should
# be parallel because the surface energy is a constant. Then
# get_surface_equilibrium should return None
clean111_entry = list(self.Cu_entry_dict[(1, 1, 1)].keys())[0]
clean100_entry = list(self.Cu_entry_dict[(1, 0, 0)].keys())[0]
soln = self.Cu_analyzer.get_surface_equilibrium([clean111_entry,
clean100_entry])
self.assertFalse(soln)
# For adsorbed system, we should find one intercept
Pt_entries = self.metals_O_entry_dict["Pt"]
clean = list(Pt_entries[(1, 1, 1)].keys())[0]
ads = Pt_entries[(1, 1, 1)][clean][0]
Pt_analyzer = self.Oads_analyzer_dict["Pt"]
soln = Pt_analyzer.get_surface_equilibrium([clean, ads])
self.assertNotEqual(list(soln.values())[0], list(soln.values())[1])
# Check if the number of parameters for adsorption are correct
self.assertEqual((Symbol("delu_O"), Symbol("gamma")), tuple(soln.keys()))
# Adsorbed systems have a b2=(-1*Nads) / (Nsurfs * Aads)
se = ads.surface_energy(Pt_analyzer.ucell_entry, Pt_analyzer.ref_entries)
self.assertAlmostEqual(se.as_coefficients_dict()[Symbol("delu_O")],
-1 / (2 * ads.surface_area))
def test_stable_u_range_dict(self):
for el in self.Oads_analyzer_dict.keys():
analyzer = self.Oads_analyzer_dict[el]
stable_u_range = analyzer.stable_u_range_dict([-1,0], Symbol("delu_O"),
no_doped=False)
all_u = []
for entry in stable_u_range.keys():
all_u.extend(stable_u_range[entry])
self.assertGreater(len(all_u), 1)
def test_entry_dict_from_list(self):
# Plug in a list of entries to see if it works
all_Pt_slab_entries = []
Pt_entries = self.Pt_analyzer.all_slab_entries
for hkl in Pt_entries.keys():
for clean in Pt_entries[hkl].keys():
all_Pt_slab_entries.append(clean)
all_Pt_slab_entries.extend(Pt_entries[hkl][clean])
a = SurfaceEnergyPlotter(all_Pt_slab_entries,
self.Pt_analyzer.ucell_entry)
self.assertEqual(type(a).__name__, "SurfaceEnergyPlotter")
# def test_monolayer_vs_BE(self):
# for el in self.Oads_analyzer_dict.keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.monolayer_vs_BE()
#
# def test_area_frac_vs_chempot_plot(self):
#
# for el in self.Oads_analyzer_dict.keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.area_frac_vs_chempot_plot(x_is_u_ads=True)
#
# def test_chempot_vs_gamma_clean(self):
#
# plt = self.Cu_analyzer.chempot_vs_gamma_clean()
# for el in self.Oads_analyzer_dict.keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.chempot_vs_gamma_clean(x_is_u_ads=True)
#
# def test_chempot_vs_gamma_facet(self):
#
# for el in self.metals_O_entry_dict.keys():
# for hkl in self.metals_O_entry_dict[el].keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.chempot_vs_gamma_facet(hkl)
# def test_surface_chempot_range_map(self):
#
# for el in self.metals_O_entry_dict.keys():
# for hkl in self.metals_O_entry_dict[el].keys():
# # Test WulffShape for adsorbed surfaces
# analyzer = self.Oads_analyzer_dict[el]
# plt = analyzer.chempot_vs_gamma_facet(hkl)
class WorkfunctionAnalyzerTest(PymatgenTest):
def setUp(self):
self.kwargs = {"poscar_filename": get_path("CONTCAR.relax1.gz"),
"locpot_filename": get_path("LOCPOT.gz"),
"outcar_filename": get_path("OUTCAR.relax1.gz")}
self.wf_analyzer = WorkFunctionAnalyzer.from_files(**self.kwargs)
def test_shift(self):
wf_analyzer_shift = WorkFunctionAnalyzer.from_files(shift=-0.25, blength=3.7, **self.kwargs)
self.assertEqual("%.f" %(self.wf_analyzer.ave_bulk_p),
"%.f" %(wf_analyzer_shift.ave_bulk_p))
def test_is_converged(self):
self.assertTrue(self.wf_analyzer.is_converged())
class NanoscaleStabilityTest(PymatgenTest):
def setUp(self):
# Load all entries
La_hcp_entry_dict = get_entry_dict(os.path.join(get_path(""),
"La_hcp_entries.txt"))
La_fcc_entry_dict = get_entry_dict(os.path.join(get_path(""),
"La_fcc_entries.txt"))
with open(os.path.join(get_path(""), 'ucell_entries.txt')) as ucell_entries:
ucell_entries = json.loads(ucell_entries.read())
La_hcp_ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["La_hcp"])
La_fcc_ucell_entry = ComputedStructureEntry.from_dict(ucell_entries["La_fcc"])
# Set up the NanoscaleStabilityClass
self.La_hcp_analyzer = SurfaceEnergyPlotter(La_hcp_entry_dict,
La_hcp_ucell_entry)
self.La_fcc_analyzer = SurfaceEnergyPlotter(La_fcc_entry_dict,
La_fcc_ucell_entry)
self.nanoscale_stability = NanoscaleStability([self.La_fcc_analyzer,
self.La_hcp_analyzer])
def test_stability_at_r(self):
# Check that we have a different polymorph that is
# stable below or above the equilibrium particle size
r = self.nanoscale_stability.solve_equilibrium_point(self.La_hcp_analyzer,
self.La_fcc_analyzer)*10
# hcp phase of La particle should be the stable
# polymorph above the equilibrium radius
hcp_wulff = self.La_hcp_analyzer.wulff_from_chempot()
bulk = self.La_hcp_analyzer.ucell_entry
ghcp, rhcp = self.nanoscale_stability.wulff_gform_and_r(hcp_wulff, bulk, r+10,
from_sphere_area=True)
fcc_wulff = self.La_fcc_analyzer.wulff_from_chempot()
bulk = self.La_fcc_analyzer.ucell_entry
gfcc, rfcc = self.nanoscale_stability.wulff_gform_and_r(fcc_wulff, bulk, r+10,
from_sphere_area=True)
self.assertGreater(gfcc, ghcp)
# fcc phase of La particle should be the stable
# polymorph below the equilibrium radius
hcp_wulff = self.La_hcp_analyzer.wulff_from_chempot()
bulk = self.La_hcp_analyzer.ucell_entry
ghcp, rhcp = self.nanoscale_stability.wulff_gform_and_r(hcp_wulff, bulk, r-10,
from_sphere_area=True)
fcc_wulff = self.La_fcc_analyzer.wulff_from_chempot()
bulk = self.La_fcc_analyzer.ucell_entry
gfcc, rfcc = self.nanoscale_stability.wulff_gform_and_r(fcc_wulff, bulk, r-10,
from_sphere_area=True)
self.assertLess(gfcc, ghcp)
def test_scaled_wulff(self):
# Ensure for a given radius, the effective radius
# of the Wulff shape is the same (correctly scaled)
hcp_wulff = self.La_hcp_analyzer.wulff_from_chempot()
fcc_wulff = self.La_fcc_analyzer.wulff_from_chempot()
w1 = self.nanoscale_stability.scaled_wulff(hcp_wulff, 10)
w2 = self.nanoscale_stability.scaled_wulff(fcc_wulff, 10)
self.assertAlmostEqual(w1.effective_radius, w2.effective_radius)
self.assertAlmostEqual(w1.effective_radius, 10)
self.assertAlmostEqual(10, w2.effective_radius)
def get_entry_dict(filename):
# helper to generate an entry_dict
entry_dict = {}
with open(filename) as entries:
entries = json.loads(entries.read())
for k in entries.keys():
n = k[25:]
miller_index = []
for i, s in enumerate(n):
if s == "_":
break
if s == "-":
continue
t = int(s)
if n[i - 1] == "-":
t *= -1
miller_index.append(t)
hkl = tuple(miller_index)
if hkl not in entry_dict.keys():
entry_dict[hkl] = {}
entry = ComputedStructureEntry.from_dict(entries[k])
entry_dict[hkl][SlabEntry(entry.structure, entry.energy, hkl, label=k)] = []
return entry_dict
def load_O_adsorption():
# Loads the dictionary for clean and O adsorbed Rh, Pt, and Ni entries
# Load the adsorbate as an entry
with open(os.path.join(get_path(""),
'isolated_O_entry.txt')) as isolated_O_entry:
isolated_O_entry = json.loads(isolated_O_entry.read())
O = ComputedStructureEntry.from_dict(isolated_O_entry)
# entry_dict for the adsorption case, O adsorption on Ni, Rh and Pt
metals_O_entry_dict = {"Ni": {(1, 1, 1): {}, (1, 0, 0): {}},
"Pt": {(1, 1, 1): {}},
"Rh": {(1, 0, 0): {}}
}
with open(os.path.join(get_path(""), "csentries_slabs.json")) as entries:
entries = json.loads(entries.read())
for k in entries.keys():
entry = ComputedStructureEntry.from_dict(entries[k])
for el in metals_O_entry_dict.keys():
if el in k:
if "111" in k:
clean = SlabEntry(entry.structure, entry.energy,
(1,1,1), label=k+"_clean")
metals_O_entry_dict[el][(1, 1, 1)][clean] = []
if "110" in k:
clean = SlabEntry(entry.structure, entry.energy,
(1, 1, 0), label=k + "_clean")
metals_O_entry_dict[el][(1, 1, 0)][clean] = []
if "100" in k:
clean = SlabEntry(entry.structure, entry.energy,
(1,0,0), label=k+"_clean")
metals_O_entry_dict[el][(1, 0, 0)][clean] = []
with open(os.path.join(get_path(""), "csentries_o_ads.json")) as entries:
entries = json.loads(entries.read())
for k in entries.keys():
entry = ComputedStructureEntry.from_dict(entries[k])
for el in metals_O_entry_dict.keys():
if el in k:
if "111" in k:
clean = list(metals_O_entry_dict[el][(1, 1, 1)].keys())[0]
ads = SlabEntry(entry.structure, entry.energy, (1,1,1),
label=k+"_O", adsorbates=[O], clean_entry=clean)
metals_O_entry_dict[el][(1, 1, 1)][clean] = [ads]
if "110" in k:
clean = list(metals_O_entry_dict[el][(1, 1, 0)].keys())[0]
ads = SlabEntry(entry.structure, entry.energy, (1,1,0),
label=k+"_O", adsorbates=[O], clean_entry=clean)
metals_O_entry_dict[el][(1, 1, 0)][clean] = [ads]
if "100" in k:
clean = list(metals_O_entry_dict[el][(1, 0, 0)].keys())[0]
ads = SlabEntry(entry.structure, entry.energy, (1,0,0),
label=k+"_O", adsorbates=[O], clean_entry=clean)
metals_O_entry_dict[el][(1, 0, 0)][clean] = [ads]
return metals_O_entry_dict
if __name__ == "__main__":
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/analysis/tests/test_surface_analysis.py
|
Python
|
mit
| 23,554
|
[
"pymatgen"
] |
465a0bf0cc1795ab552a517d08fe8618f39d4107adb7f43a963cad569b79d22b
|
#! /usr/bin/python
"""versioneer.py
(like a rocketeer, but for versions)
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Version: 0.7+
This file helps distutils-based projects manage their version number by just
creating version-control tags.
For developers who work from a VCS-generated tree (e.g. 'git clone' etc),
each 'setup.py version', 'setup.py build', 'setup.py sdist' will compute a
version number by asking your version-control tool about the current
checkout. The version number will be written into a generated _version.py
file of your choosing, where it can be included by your __init__.py
For users who work from a VCS-generated tarball (e.g. 'git archive'), it will
compute a version number by looking at the name of the directory created when
te tarball is unpacked. This conventionally includes both the name of the
project and a version number.
For users who work from a tarball built by 'setup.py sdist', it will get a
version number from a previously-generated _version.py file.
As a result, loading code directly from the source tree will not result in a
real version. If you want real versions from VCS trees (where you frequently
update from the upstream repository, or do new development), you will need to
do a 'setup.py version' after each update, and load code from the build/
directory.
You need to provide this code with a few configuration values:
versionfile_source:
A project-relative pathname into which the generated version strings
should be written. This is usually a _version.py next to your project's
main __init__.py file. If your project uses src/myproject/__init__.py,
this should be 'src/myproject/_version.py'. This file should be checked
in to your VCS as usual: the copy created below by 'setup.py
update_files' will include code that parses expanded VCS keywords in
generated tarballs. The 'build' and 'sdist' commands will replace it with
a copy that has just the calculated version string.
versionfile_build:
Like versionfile_source, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have package_dir={'myproject': 'src/myproject'},
then you will probably have versionfile_build='myproject/_version.py' and
versionfile_source='src/myproject/_version.py'.
tag_prefix: a string, like 'PROJECTNAME-', which appears at the start of all
VCS tags. If your tags look like 'myproject-1.2.0', then you
should use tag_prefix='myproject-'. If you use unprefixed tags
like '1.2.0', this should be an empty string.
parentdir_prefix: a string, frequently the same as tag_prefix, which
appears at the start of all unpacked tarball filenames. If
your tarball unpacks into 'myproject-1.2.0', this should
be 'myproject-'.
To use it:
1: include this file in the top level of your project
2: make the following changes to the top of your setup.py:
import versioneer
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
4: run 'setup.py update_files', which will create _version.py, and will
append the following to your __init__.py:
from _version import __version__
5: modify your MANIFEST.in to include versioneer.py
6: add both versioneer.py and the generated _version.py to your VCS
"""
import os
import sys
import re
import subprocess
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
GIT = "git"
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by github's download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
GIT = "git"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%%s', no digits" %% ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %%s" %% ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.realpath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.realpath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.realpath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.realpath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
# accomodate to our devel build process
try:
from bokeh.__conda_version__ import conda_version
tag = conda_version.replace("'","")
del conda_version
except ImportError:
pass
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.realpath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.realpath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def do_vcs_install(versionfile_source, ipy):
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print("set %s to '%s'" % (filename, versions["version"]))
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print("got version from expanded variable %s" % ver)
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print("got version from file %s %s" % (versionfile, ver))
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from git %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % ver)
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print(" creating %s" % versionfile_source)
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print(" %s unmodified" % ipy)
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
|
htygithub/bokeh
|
versioneer.py
|
Python
|
bsd-3-clause
| 25,745
|
[
"Brian"
] |
4dbcd0d1a24b9ce40a18a281d096f086557192eb160aafddae0805b581d9b2f2
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr5_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom5.phy", header=None, index=None)
print(tott.shape)
|
evanbiederstedt/RRBSfun
|
trees/chrom_scripts/normal_chr05.py
|
Python
|
mit
| 25,843
|
[
"MCell"
] |
9f5cbdb6b6871cf850a2bc563197f96c545addb5d44cf9f6cf553085080b0845
|
__author__ = "Sunil Kumar (kumar.sunil.p@gmail.com)"
__copyright__ = "Copyright 2014, Washington University in St. Louis"
__credits__ = ["Sunil Kumar", "Steve Pieper", "Dan Marcus"]
__license__ = "XNAT Software License Agreement " + \
"(see: http://xnat.org/about/license.php)"
__version__ = "2.1.1"
__maintainer__ = "Rick Herrick"
__email__ = "herrickr@mir.wustl.edu"
__status__ = "Production"
from __main__ import vtk, ctk, qt, slicer
import datetime, time
import os
import sys
comment = """
Timer manages time logging for performance testing and
allows the user to write the log to a file.
Usage as follows:
from Timer import *
timer = Timer(writePath = "./", writeFileName = "timelog.txt")
timer.start(processName = "Download", debugStr = "Downloading...")
>>> Download
>>> 2013-08-21 09:27:11.673000 <--Start timer before Downloading....
# Download code here
timer.stop()
>>> 2013-08-21 09:27:18.396000 <---Stop timer after Downloading....
>>> TOTAL TIME ELAPSED FOR Download: 0:00:06.723000
"""
class Timer(object):
""" Descriptor above.
"""
def __init__(self, MODULE = None, writePath = './', writeFileName = 'timerlog.txt', fileOverWrite = False):
""" Init function. Defines necessary variables.
"""
self.prev = None
self.curr = None
self.debugStr = None
self.processName = None
self.timerStrs = []
#-------------------------
# Override the defaulted 'writePath' if the 'MODULE' argument
# is provided.
#-------------------------
if MODULE:
self.writePath = MODULE.GLOBALS.LOCAL_URIS['settings']
else:
self.writePath = self.writePath
#-------------------------
# Make the 'writeFileName'
#-------------------------
if not writeFileName:
self.writeFileName = os.path.join(self.writePath, 'timerLog.txt')
else:
self.writeFileName = os.path.join(self.writePath, writeFileName)
self.fileOverWrite = fileOverWrite
self.startCalled = False
def start(self, processName = None, debugStr = None):
""" Starts the timer process and tracks
the variables accordingly. Timer process is provided
by the user in the 'processName' argument.
"""
#-------------------------
# Write the start time to console and to file.
#-------------------------
self.startCalled = True
self.debugStr = debugStr
self.prev = datetime.datetime.now()
currStr = ""
if processName:
self.processName = processName
self.timerStrs.append('\n' + processName + '\n')
#print('\n\n\n' + processName)
if self.debugStr:
currStr = "before " + self.debugStr + "."
str = ("%s <--Start timer %s"%(self.prev, currStr))
self.timerStrs.append(str + '\n')
#print str
def stop(self, fileWrite = True, printTimeDiff = True):
""" Writes the the stop time to file (and it's associated
process name) and to console. Only works if the 'start'
function was called before it.
"""
if self.startCalled:
currStr = ""
elapseStr = ""
#-------------------------
# Write the stop time to console and the file.
#-------------------------
self.curr = datetime.datetime.now()
if self.debugStr:
currStr = "after " + self.debugStr + "."
if self.processName:
elapseStr = "FOR " + self.processName
str1 = ("%s <---Stop timer %s"%(self.curr, currStr))
self.timerStrs.append(str1 + '\n')
print str1
if printTimeDiff:
str2 = ("\n\nTOTAL TIME ELAPSED %s: \t\t%s"%(elapseStr, (self.curr-self.prev)))
self.timerStrs.append(str2 + '\n')
print str2
if fileWrite: self.write()
self.clear()
def write(self):
""" As stated.
"""
if self.writeFileName:
f = open(self.writeFileName, 'a')
f.writelines(self.timerStrs)
f.close()
def clear(self):
""" Clears the variables.
"""
self.curr = None
self.prev = None
self.debugStr = None
self.processName = None
del self.timerStrs[:]
self.startCalled = False
|
MokaCreativeLLC/XNATSlicer
|
XNATSlicer/XnatSlicerLib/utils/Timer.py
|
Python
|
bsd-3-clause
| 4,718
|
[
"VTK"
] |
ad2652286b06195f42d717ba5c5f90780131a8e0ea1be0fe018aac6a0c8caf48
|
from .generic import ObsIO
import numpy as np
import pandas as pd
import xarray as xr
class NcObsIO(ObsIO):
"""ObsIO to read observations from a local NetCDF store created by ObsIO.to_netcdf
"""
def __init__(self, fpath, elems):
"""
Parameters
----------
fpath : str
The local file path of the NetCDF store
elems : list
Observation elements to load from NetCDF store when read_obs is
called
"""
self.ds = xr.open_dataset(fpath)
self.elems = elems
self._stns = None
def _read_stns(self):
vnames = np.array(list(self.ds.variables.keys()))
is_stn_var = np.array([self.ds[avar].dims==('station_id',)
for avar in vnames])
vnames = vnames[is_stn_var]
stns = self.ds[list(vnames)].to_dataframe()
stns['station_id'] = stns.index
stns['station_index'] = np.arange(len(stns))
# Make sure all object columns are str
stns.loc[:, stns.dtypes == object] = stns.loc[:, stns.dtypes == object].astype(np.str)
stns = stns.set_index('station_id', drop=False)
return stns
def _read_obs(self, stns_ids=None):
if stns_ids is None:
stns_ids = self.stns.station_id
obs = []
for aelem in self.elems:
obs_df = (pd.DataFrame(self.ds[aelem].loc[:, list(stns_ids)].
to_pandas().stack(dropna=False)))
obs_df['elem'] = aelem
obs_df = obs_df.rename(columns={0:'obs_value'})
obs_df = obs_df.set_index('elem', append=True)
obs.append(obs_df)
obs = pd.concat(obs)
obs = obs.reorder_levels(['station_id', 'elem',
'time']).sortlevel(0, sort_remaining=True)
return obs
def close(self):
self.ds.close()
self.ds = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
jaredwo/obsio
|
obsio/providers/netcdf.py
|
Python
|
gpl-3.0
| 2,227
|
[
"NetCDF"
] |
421ee90bc798ae0764d930d9859aa79b09bc81763b4c8e5d81fe074eb39004b8
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import logging
logger = logging.getLogger('remindor_common')
try:
import dbus
import dbus.service
class dbus_service(dbus.service.Object):
def __init__(self, bus, path='/com/bhdouglass/indicator_remindor/object'):
dbus.service.Object.__init__(self, bus, path)
self._bus = bus
self._path = path
self._interface = 'com.bhdouglass.indicator_remindor'
@dbus.service.signal(dbus_interface='com.bhdouglass.indicator_remindor', signature='s')
def command(self, command):
pass
@dbus.service.method(dbus_interface='com.bhdouglass.indicator_remindor')
def emitUpdate(self):
self.command('update')
return 'Signal emitted'
@dbus.service.method(dbus_interface='com.bhdouglass.indicator_remindor')
def emitStop(self):
self.command('stop')
return 'Signal emitted'
@dbus.service.method(dbus_interface='com.bhdouglass.indicator_remindor')
def emitManage(self):
self.command('manage')
return 'Signal emitted'
@dbus.service.method(dbus_interface='com.bhdouglass.indicator_remindor')
def emitAttention(self):
self.command('attention')
return 'Signal emitted'
@dbus.service.method(dbus_interface='com.bhdouglass.indicator_remindor')
def emitActive(self):
self.command('active')
return 'Signal emitted'
@dbus.service.method(dbus_interface='com.bhdouglass.indicator_remindor')
def emitClose(self):
self.command('close')
return 'Signal emitted'
def bus(self):
return self._bus
def path(self):
return self._path
def interface(self):
return self._interface
except:
logger.debug('Unable to initialize dbus in remindor_common.dbus_service, this features will be disabled')
|
bhdouglass/remindor-common
|
remindor_common/dbus_service.py
|
Python
|
gpl-3.0
| 2,729
|
[
"Brian"
] |
563c183139a7187dbbcb74cd810d85fbc3f8870b97c48b9c422a7f41c616e0cd
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._hascitation import HasCitation
from ._allcitations import AllCitations
from ._changedsince import ChangedSince
from ._citationprivate import CitationPrivate
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._hasnote import HasNote
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasnoteregexp import HasNoteRegexp
from ._hasreferencecountof import HasReferenceCountOf
from ._hassource import HasSource
from ._hassourceidof import HasSourceIdOf
from ._hassourcenoteregexp import HasSourceNoteRegexp
from ._matchesfilter import MatchesFilter
from ._matchespagesubstringof import MatchesPageSubstringOf
from ._matchesrepositoryfilter import MatchesRepositoryFilter
from ._matchessourcefilter import MatchesSourceFilter
from ._regexpidof import RegExpIdOf
from ._regexpsourceidof import RegExpSourceIdOf
from ._hastag import HasTag
editor_rule_list = [
HasCitation,
AllCitations,
ChangedSince,
CitationPrivate,
HasGallery,
HasIdOf,
HasNote,
HasNoteRegexp,
HasReferenceCountOf,
HasSource,
HasSourceIdOf,
HasSourceNoteRegexp,
MatchesFilter,
MatchesPageSubstringOf,
MatchesRepositoryFilter,
MatchesSourceFilter,
RegExpIdOf,
RegExpSourceIdOf,
HasTag
]
|
beernarrd/gramps
|
gramps/gen/filters/rules/citation/__init__.py
|
Python
|
gpl-2.0
| 2,223
|
[
"Brian"
] |
ee9248ce75ea925fe6731afa1f403ffde13ef8862ab9991d5bc7be166600442b
|
import sys, shutil
sys.path.insert(1, "../../../")
import h2o
import random
def milsong_checkpoint(ip,port):
milsong_train = h2o.upload_file(h2o.locate("bigdata/laptop/milsongs/milsongs-train.csv.gz"))
milsong_valid = h2o.upload_file(h2o.locate("bigdata/laptop/milsongs/milsongs-test.csv.gz"))
distribution = "gaussian"
# build first model
ntrees1 = random.sample(range(50,100),1)[0]
max_depth1 = random.sample(range(2,6),1)[0]
min_rows1 = random.sample(range(10,16),1)[0]
print "ntrees model 1: {0}".format(ntrees1)
print "max_depth model 1: {0}".format(max_depth1)
print "min_rows model 1: {0}".format(min_rows1)
model1 = h2o.gbm(x=milsong_train[1:],y=milsong_train[0],ntrees=ntrees1,max_depth=max_depth1, min_rows=min_rows1,
distribution=distribution,validation_x=milsong_valid[1:],validation_y=milsong_valid[0])
# save the model, then load the model
model_path = h2o.save_model(model1, name="delete_model", force=True)
restored_model = h2o.load_model(model_path)
shutil.rmtree("delete_model")
# continue building the model
ntrees2 = ntrees1 + 50
max_depth2 = max_depth1
min_rows2 = min_rows1
print "ntrees model 2: {0}".format(ntrees2)
print "max_depth model 2: {0}".format(max_depth2)
print "min_rows model 2: {0}".format(min_rows2)
model2 = h2o.gbm(x=milsong_train[1:],y=milsong_train[0],ntrees=ntrees2,max_depth=max_depth2, min_rows=min_rows2,
distribution=distribution,validation_x=milsong_valid[1:],validation_y=milsong_valid[0],
checkpoint=restored_model._id)
# build the equivalent of model 2 in one shot
model3 = h2o.gbm(x=milsong_train[1:],y=milsong_train[0],ntrees=ntrees2,max_depth=max_depth2, min_rows=min_rows2,
distribution=distribution,validation_x=milsong_valid[1:],validation_y=milsong_valid[0])
if __name__ == "__main__":
h2o.run_test(sys.argv, milsong_checkpoint)
|
weaver-viii/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_milsongs_largeGBM.py
|
Python
|
apache-2.0
| 1,982
|
[
"Gaussian"
] |
e6700394557860e8132f756632881a2119ab541c1184394baf7dcbb9b449ac75
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from bs4 import BeautifulSoup
import re
from frappe.utils import set_request
from frappe.website.render import render
from frappe.utils import random_string
from frappe.website.doctype.blog_post.blog_post import get_blog_list
from frappe.website.website_generator import WebsiteGenerator
class TestBlogPost(unittest.TestCase):
def test_generator_view(self):
pages = frappe.get_all('Blog Post', fields=['name', 'route'],
filters={'published': 1, 'route': ('!=', '')}, limit =1)
set_request(path=pages[0].route)
response = render()
self.assertTrue(response.status_code, 200)
html = response.get_data().decode()
self.assertTrue('<article class="blog-content" itemscope itemtype="http://schema.org/BlogPosting">' in html)
def test_generator_not_found(self):
pages = frappe.get_all('Blog Post', fields=['name', 'route'],
filters={'published': 0}, limit =1)
frappe.db.set_value('Blog Post', pages[0].name, 'route', 'test-route-000')
set_request(path='test-route-000')
response = render()
self.assertTrue(response.status_code, 404)
def test_category_link(self):
# Make a temporary Blog Post (and a Blog Category)
blog = make_test_blog()
# Visit the blog post page
set_request(path=blog.route)
blog_page_response = render()
blog_page_html = frappe.safe_decode(blog_page_response.get_data())
# On blog post page find link to the category page
soup = BeautifulSoup(blog_page_html, "lxml")
category_page_link = list(soup.find_all('a', href=re.compile(blog.blog_category)))[0]
category_page_url = category_page_link["href"]
# Visit the category page (by following the link found in above stage)
set_request(path=category_page_url)
category_page_response = render()
category_page_html = frappe.safe_decode(category_page_response.get_data())
# Category page should contain the blog post title
self.assertIn(blog.title, category_page_html)
# Cleanup afterwords
frappe.delete_doc("Blog Post", blog.name)
frappe.delete_doc("Blog Category", blog.blog_category)
def test_blog_pagination(self):
# Create some Blog Posts for a Blog Category
category_title, blogs, BLOG_COUNT = "List Category", [], 4
for index in range(BLOG_COUNT):
blog = make_test_blog(category_title)
blogs.append(blog)
filters = frappe._dict({"blog_category": scrub(category_title)})
# Assert that get_blog_list returns results as expected
self.assertEqual(len(get_blog_list(None, None, filters, 0, 3)), 3)
self.assertEqual(len(get_blog_list(None, None, filters, 0, BLOG_COUNT)), BLOG_COUNT)
self.assertEqual(len(get_blog_list(None, None, filters, 0, 2)), 2)
self.assertEqual(len(get_blog_list(None, None, filters, 2, BLOG_COUNT)), 2)
# Cleanup Blog Post and linked Blog Category
for blog in blogs:
frappe.delete_doc(blog.doctype, blog.name)
frappe.delete_doc("Blog Category", blogs[0].blog_category)
def scrub(text):
return WebsiteGenerator.scrub(None, text)
def make_test_blog(category_title="Test Blog Category"):
category_name = scrub(category_title)
if not frappe.db.exists('Blog Category', category_name):
frappe.get_doc(dict(
doctype = 'Blog Category',
title=category_title)).insert()
if not frappe.db.exists('Blogger', 'test-blogger'):
frappe.get_doc(dict(
doctype = 'Blogger',
short_name='test-blogger',
full_name='Test Blogger')).insert()
test_blog = frappe.get_doc(dict(
doctype = 'Blog Post',
blog_category = category_name,
blogger = 'test-blogger',
title = random_string(20),
route = random_string(20),
content = random_string(20),
published = 1
)).insert()
return test_blog
|
adityahase/frappe
|
frappe/website/doctype/blog_post/test_blog_post.py
|
Python
|
mit
| 3,774
|
[
"VisIt"
] |
bd92984b2bc19e3725910ea0c3269dc34883899565cbe4c37cf6e25978bd7c99
|
import os
import unittest
from custodian.vasp.validators import VasprunXMLValidator, VaspFilesValidator, \
VaspNpTMDValidator
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
cwd = os.getcwd()
class VasprunXMLValidatorTest(unittest.TestCase):
def test_check_and_correct(self):
os.chdir(os.path.join(test_dir, "bad_vasprun"))
h = VasprunXMLValidator()
self.assertTrue(h.check())
# Unconverged still has a valid vasprun.
os.chdir(os.path.join(test_dir, "unconverged"))
self.assertFalse(h.check())
def test_as_dict(self):
h = VasprunXMLValidator()
d = h.as_dict()
h2 = VasprunXMLValidator.from_dict(d)
self.assertIsInstance(h2, VasprunXMLValidator)
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class VaspFilesValidatorTest(unittest.TestCase):
def test_check_and_correct(self):
# just an example where CONTCAR is not present
os.chdir(os.path.join(test_dir, "positive_energy"))
h = VaspFilesValidator()
self.assertTrue(h.check())
os.chdir(os.path.join(test_dir, "postprocess"))
self.assertFalse(h.check())
def test_as_dict(self):
h = VaspFilesValidator()
d = h.as_dict()
h2 = VaspFilesValidator.from_dict(d)
self.assertIsInstance(h2, VaspFilesValidator)
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
class VaspNpTMDValidatorTest(unittest.TestCase):
def test_check_and_correct(self):
# NPT-AIMD using correct VASP
os.chdir(os.path.join(test_dir, "npt_common"))
h = VaspNpTMDValidator()
self.assertFalse(h.check())
# NVT-AIMD using correct VASP
os.chdir(os.path.join(test_dir, "npt_nvt"))
self.assertFalse(h.check())
# NPT-AIMD using incorrect VASP
os.chdir(os.path.join(test_dir, "npt_bad_vasp"))
self.assertTrue(h.check())
def test_as_dict(self):
h = VaspNpTMDValidator()
d = h.as_dict()
h2 = VaspNpTMDValidator.from_dict(d)
self.assertIsInstance(h2, VaspNpTMDValidator)
@classmethod
def tearDownClass(cls):
os.chdir(cwd)
if __name__ == "__main__":
unittest.main()
|
xhqu1981/custodian
|
custodian/vasp/tests/test_validators.py
|
Python
|
mit
| 2,302
|
[
"VASP"
] |
2d69c5a9042c73ce4865f1c107e311f9bbc6a3d4bc9add6f6989a4d70d8dc7ca
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
from TestHarnessTestCase import TestHarnessTestCase
class TestHarnessTester(TestHarnessTestCase):
def testRecover(self):
"""
Test that --recover returns two passing statuses (part1 and the OK)
"""
output = self.runTests('-i', 'always_ok', '--recover').decode('utf-8')
self.assertIn('PART1', output)
self.assertIn('RECOVER', output)
# Assert if not exactly two tests ran and passed
self.assertIn('2 passed', output)
self.assertIn('0 skipped', output)
self.assertIn('0 failed', output)
def testRecoverPart1Fail(self):
"""
Test that --recover still checks status on Part1 tests
"""
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.runTests('-i', 'exception_transient', '--recover').decode('utf-8')
e = cm.exception
output = e.output.decode('utf-8')
self.assertRegexpMatches(output, r'test_harness.*?part1.*?FAILED \(CRASH\)')
|
nuclear-wizard/moose
|
python/TestHarness/tests/test_Recover.py
|
Python
|
lgpl-2.1
| 1,329
|
[
"MOOSE"
] |
8e1875258afb7ad9f6c3bc7ef1e8263dc363287e93a415b5a4ff604b2fbf0fca
|
#-----------------------------------------------------
# Author: Yaqiang Wang
# Date: 2014-12-27
# Purpose: MeteoInfo Dataset module
# Note: Jython
#-----------------------------------------------------
from org.meteoinfo.data.meteodata import MeteoDataType
from ucar.ma2 import DataType
from ucar.nc2 import Attribute
from dimvariable import DimVariable, TDimVariable
from mipylib.numeric.dimarray import DimArray, PyGridData, PyStationData
from mipylib.geolib.milayer import MILayer, MIXYListData
from mipylib.numeric.miarray import MIArray
from mipylib.dataframe.dataframe import DataFrame
import mipylib.miutil as miutil
import mipylib.numeric.minum as minum
import datetime
from java.util import Calendar
from java.lang import Float
import jarray
# Dimension dataset
class DimDataFile(object):
# dataset must be org.meteoinfo.data.meteodata.MeteoDataInfo
def __init__(self, dataset=None, access='r', ncfile=None, arldata=None, bufrdata=None):
self.dataset = dataset
self.access = access
if not dataset is None:
self.filename = dataset.getFileName()
self.nvar = dataset.getDataInfo().getVariableNum()
self.fill_value = dataset.getMissingValue()
self.proj = dataset.getProjectionInfo()
self.ncfile = ncfile
self.arldata = arldata
self.bufrdata = bufrdata
def __getitem__(self, key):
if isinstance(key, basestring):
vnames = self.dataset.getDataInfo().getVariableNames()
if key in vnames:
return DimVariable(self.dataset.getDataInfo().getVariable(key), self)
else:
print key + ' is not a variable name'
raise ValueError()
else:
print key + ' is not a variable name'
raise ValueError()
def __str__(self):
if self.dataset is None:
return 'None'
return self.dataset.getInfoText()
def __repr__(self):
if self.dataset is None:
return 'None'
return self.dataset.getInfoText()
def close(self):
'''
Close the opended dataset
'''
if not self.dataset is None:
self.dataset.close()
elif not self.ncfile is None:
self.ncfile.close()
elif not self.arldata is None:
self.arldata.closeDataFile()
elif not self.bufrdata is None:
self.bufrdata.closeDataFile()
def dimensions(self):
'''
Get dimensions
'''
return self.dataset.getDataInfo().getDimensions()
def finddim(self, name):
'''
Find a dimension by name
:name: (*string*) Dimension name
'''
for dim in self.dataset.getDataInfo().getDimensions():
if name == dim.getShortName():
return dim
return None
def attributes(self):
'''
Get global attributes.
'''
return self.dataset.getDataInfo().getGlobalAttributes()
def attrvalue(self, key):
'''
Get a global attribute value by key.
'''
attr = self.dataset.getDataInfo().findGlobalAttribute(key)
if attr is None:
return None
v = MIArray(attr.getValues())
return v
def variables(self):
'''
Get all variables.
'''
return self.dataset.getDataInfo().getVariables()
def varnames(self):
'''
Get all variable names.
'''
return self.dataset.getDataInfo().getVariableNames()
def read(self, varname, origin=None, size=None, stride=None):
'''
Read data array from a variable.
:varname: (*string*) Variable name
'''
if origin is None:
return self.dataset.read(varname)
else:
return self.dataset.read(varname, origin, size, stride)
def dump(self):
'''
Print data file information
'''
print self.dataset.getInfoText()
def read_dataframe(self):
'''
Read data frame from dataset.
'''
df = self.dataset.getDataInfo().readDataFrame()
return DataFrame(dataframe=df)
def read_table(self):
'''
Read data table from dataset.
'''
dt = self.dataset.getDataInfo().readTable()
return minum.datatable(dt)
def griddata(self, varname='var', timeindex=0, levelindex=0, yindex=None, xindex=None):
if self.dataset.isGridData():
self.dataset.setTimeIndex(timeindex)
self.dataset.setLevelIndex(levelindex)
gdata = PyGridData(self.dataset.getGridData(varname))
return gdata
else:
return None
def stationdata(self, varname='var', timeindex=0, levelindex=0):
if self.dataset.isStationData():
self.dataset.setTimeIndex(timeindex)
self.dataset.setLevelIndex(levelindex)
sdata = PyStationData(self.dataset.getStationData(varname))
return sdata
else:
return None
def stinfodata(self):
'''
Get station info data
'''
if self.dataset.isStationData():
sidata = self.dataset.getStationInfoData()
return sidata
else:
return None
def smodeldata(self, timeindex=0, levelindex=0):
'''
Get station model data.
'''
if self.dataset.isStationData():
self.dataset.setTimeIndex(timeindex)
self.dataset.setLevelIndex(levelindex)
smdata = self.dataset.getStationModelData()
return smdata
else:
return None
def trajlayer(self):
'''
Create trajectory polyline layer.
'''
if self.dataset.isTrajData():
return MILayer(self.dataset.getDataInfo().createTrajLineLayer())
else:
return None
def trajplayer(self):
'''
Create trajectory point layer.
'''
if self.dataset.isTrajData():
return MILayer(self.dataset.getDataInfo().createTrajPointLayer())
else:
return None
def trajsplayer(self):
'''
Create trajectory start point layer.
'''
if self.dataset.isTrajData():
return MILayer(self.dataset.getDataInfo().createTrajStartPointLayer())
else:
return None
def trajvardata(self, varidx, hourx=False):
'''
Get trajectory variable data.
'''
if self.dataset.isTrajData():
if hourx:
return MIXYListData(self.dataset.getDataInfo().getXYDataset_HourX(varidx))
else:
return MIXYListData(self.dataset.getDataInfo().getXYDataset(varidx))
else:
return None
def timenum(self):
"""
Get time dimension length
:returns: (*int*) Time dimension length.
"""
return self.dataset.getDataInfo().getTimeNum()
def gettime(self, idx):
'''
Get time by index.
:param idx: (*int*) Time index.
:returns: (*datetime*) The time
'''
t = self.dataset.getDataInfo().getTimes().get(idx)
t = miutil.pydate(t)
return t
def gettimes(self):
'''
Get time list.
'''
tt = self.dataset.getDataInfo().getTimes()
times = []
for t in tt:
times.append(miutil.pydate(t))
return times
def bigendian(self, big_endian):
'''
Set dataset as big_endian or little_endian. Only for GrADS binary data.
:param big_endian: (*boolean*) Big endian or not.
'''
datatype = self.dataset.getDataInfo().getDataType()
if datatype.isGrADS() or datatype == MeteoDataType.HYSPLIT_Conc:
self.dataset.getDataInfo().setBigEndian(big_endian)
def tostation(self, varname, x, y, z, t):
'''
Interpolate data to a point.
'''
if isinstance(t, datetime.datetime):
cal = Calendar.getInstance()
cal.set(t.year, t.month - 1, t.day, t.hour, t.minute, t.second)
t = cal.getTime()
if z is None:
return self.dataset.toStation(varname, x, y, t)
else:
return self.dataset.toStation(varname, x, y, z, t)
####################################################################
#Write netCDF data
def adddim(self, dimname, dimsize, group=None):
'''
Add a dimension.
:param dimname: (*string*) Dimension name.
:param dimsize: (*int*) Dimension size.
:param group: None means global dimension.
'''
return self.ncfile.addDimension(group, dimname, dimsize)
def addgroupattr(self, attrname, attrvalue, group=None, float=False):
'''
Add a global attribute.
:param attrname: (*string*) Attribute name.
:param attrvalue: (*object*) Attribute value.
:param group: None means global attribute.
:param float: (*boolean*) Transfer data as float or not.
'''
if float:
if isinstance(attrvalue, (list, tuple)):
for i in range(len(attrvalue)):
attrvalue[i] = Float(attrvalue[i])
else:
attrvalue = Float(attrvalue)
if isinstance(attrvalue, MIArray):
attrvalue = attrvalue.array
return self.ncfile.addGroupAttribute(group, Attribute(attrname, attrvalue))
def __getdatatype(self, datatype):
if isinstance(datatype, str):
if datatype == 'string':
dt = DataType.STRING
elif datatype == 'int':
dt = DataType.INT
elif datatype == 'long':
dt = DataType.LONG
elif datatype == 'float':
dt = DataType.FLOAT
elif datatype == 'double':
dt = DataType.DOUBLE
elif datatype == 'char':
dt = DataType.CHAR
else:
dt = DataType.STRING
return dt
else:
return datatype
def addvar(self, varname, datatype, dims, group=None):
'''
Add a variable.
:param varname: (*string*) Variable name.
:param datatype: (*string*) Data type [string | int | long | float | double |
char].
:param dims: (*list*) Dimensions.
'''
dt = self.__getdatatype(datatype)
return DimVariable(ncvariable=self.ncfile.addVariable(group, varname, dt, dims))
def create(self):
'''
Create a netCDF data file according the settings of dimensions, global attributes
and variables
'''
self.ncfile.create()
def write(self, variable, value, origin=None):
'''
Write variable value.
:param variable: (*Variable*) Variable object.
:param value: (*array_like*) Data array to be write.
:param origin: (*list*) Dimensions origin indices. None means all from 0.
'''
if isinstance(value, MIArray):
value = value.array
if self.access == 'c':
ncvariable = variable.ncvariable
else:
ncvariable = self.dataset.getDataInfo().findNCVariable(variable.name)
if origin is None:
self.ncfile.write(ncvariable, value)
else:
origin = jarray.array(origin, 'i')
self.ncfile.write(ncvariable, origin, value)
def flush(self):
'''
Flush the data.
'''
self.ncfile.flush()
def largefile(self, islarge=True):
'''
Set the netCDF file is large file (more than 2G) nor not.
:param islarge: (*boolean*) Is large file or not.
'''
self.ncfile.setLargeFile(islarge)
##################################################################
# Write ARL data
def setx(self, x):
'''
Set x (longitude) dimension value.
:param x: (*array_like*) X dimension value.
'''
self.arldata.setX(x.aslist())
def sety(self, y):
'''
Set y (latitude) dimension value.
:param y: (*array_like*) Y dimension value.
'''
self.arldata.setY(y.aslist())
def setlevels(self, levels):
'''
Set vertical levels.
:param leveles: (*list*) Vertical levels.
'''
if isinstance(levels, MIArray):
levels = levels.aslist()
if levels[0] != 1:
levels.insert(0, 1)
self.arldata.levels = levels
def set2dvar(self, vnames):
'''
Set surface variables (2 dimensions ignore time dimension).
:param vnames: (*list*) Variable names.
'''
self.arldata.LevelVarList.add(vnames)
def set3dvar(self, vnames):
'''
Set level variables (3 dimensions ignore time dimension).
:param vnames: (*list*) Variable names.
'''
self.arldata.LevelVarList.add(vnames)
def getdatahead(self, proj, model, vertical, icx=0, mn=0):
'''
Get data head.
:param proj: (*ProjectionInfo*) Projection information.
:param model: (*string*) Model name with 4 characters.
:param vertical: (*int*) Vertical coordinate system flag. 1-sigma (fraction);
2-pressure (mb); 3-terrain (fraction); 4-hybrid (mb: offset.fraction)
:param icx: (*int*) Forecast hour (>99 the header forecast hr = 99)
:param mn: (*int*) Minutes associated with data time.
'''
return self.arldata.getDataHead(proj, model, vertical, icx, mn)
def writeindexrec(self, t, datahead, ksums=None):
'''
Write index record.
:param t: (*datatime*) The time of the data.
:param datahead: (*DataHeader') Data header of the record.
:param ksums: (*list*) Check sum list.
'''
cal = Calendar.getInstance()
cal.set(t.year, t.month - 1, t.day, t.hour, t.minute, t.second)
t = cal.getTime()
self.arldata.writeIndexRecord(t, datahead, ksums)
def writedatarec(self, t, lidx, vname, fhour, grid, data):
'''
Write data record.
:param t: (*datatime*) The time of the data.
:param lidx: (*int*) Level index.
:param vname: (*string*) Variable name.
:param fhour: (*int*) Forecasting hour.
:param grid: (*int*) Grid id to check if the data grid is bigger than 999. Header
record does not support grids of more than 999, therefore in those situations
the grid number is converted to character to represent the 1000s digit,
e.g. @(64)=<1000, A(65)=1000, B(66)=2000, etc.
:param data: (*array_like*) Data array.
:returns: (*int*) Check sum of the record data.
'''
cal = Calendar.getInstance()
cal.set(t.year, t.month - 1, t.day, t.hour, t.minute, t.second)
t = cal.getTime()
ksum = self.arldata.writeGridData(t, lidx, vname, fhour, grid, data.asarray())
return ksum
########################################################################
# Write Bufr data
def write_indicator(self, bufrlen, edition=3):
'''
Write indicator section with arbitrary length.
:param bufrlen: (*int*) The total length of the message.
:param edition: (*int*) Bruf edition.
:returns: (*int*) Indicator section length.
'''
return self.bufrdata.writeIndicatorSection(bufrlen, edition)
def rewrite_indicator(self, bufrlen, edition=3):
'''
Write indicator section with correct length.
:param bufrlen: (*int*) The total length of the message.
:param edition: (*int*) Bruf edition.
'''
self.bufrdata.reWriteIndicatorSection(bufrlen, edition)
def write_identification(self, **kwargs):
'''
Write identification section.
:param length: (*int*) Section length
:param master_table: (*int*) Master table
:param subcenter_id: (*int*) Subcenter id
:param center_id: (*int*) Center id
:param update: (*int*) Update sequency
:param optional: (*int*) Optional
:param category: (*int*) Category
:param sub_category: (*int*) Sub category
:param master_table_version: (*int*) Master table version
:param local_table_version: (*int*) Local table version
:param year: (*int*) Year
:param month: (*int*) Month
:param day: (*int*) Day
:param hour: (*int*) Hour
:param minute: (*int*) Minute
:returns: (*int*) Section length
'''
length = kwargs.pop('length', 18)
master_table = kwargs.pop('master_table', 0)
subcenter_id = kwargs.pop('subcenter_id', 0)
center_id = kwargs.pop('center_id', 74)
update = kwargs.pop('update', 0)
optional = kwargs.pop('optional', 0)
category = kwargs.pop('category', 7)
sub_category = kwargs.pop('sub_category', 0)
master_table_version = kwargs.pop('master_table_version', 11)
local_table_version = kwargs.pop('local_table_version', 1)
year = kwargs.pop('year', 2016)
month = kwargs.pop('month', 1)
day = kwargs.pop('day', 1)
hour = kwargs.pop('hour', 0)
minute = kwargs.pop('minute', 0)
return self.bufrdata.writeIdentificationSection(length, master_table, subcenter_id, center_id,\
update, optional, category, sub_category, master_table_version,\
local_table_version, year, month, day, hour, minute)
def write_datadescription(self, n, datatype, descriptors):
'''
Write data description section
:param n: (*int*) Numer of dataset.
:param datatype: (*int*) Data type.
:param descriptors: (*list*) Data descriptors.
'''
return self.bufrdata.writeDataDescriptionSection(n, datatype, descriptors)
def write_datahead(self, len):
'''
Write data header with arbitrary data length.
:param len: (*int*) Data section length.
:returns: (*int*) Data section head length - always 4.
'''
return self.bufrdata.writeDataSectionHead(len)
def rewrite_datahead(self, len):
'''
Write data header with correct data length.
:param len: (*int*) Data section length.
'''
self.bufrdata.reWriteDataSectionHead(len)
def write_data(self, value, nbits=None):
'''
Write data.
:param value: (*int*) Value.
:param nbits: (*int*) Bit number.
:returns: (*int*) Data value length.
'''
return self.bufrdata.write(value, nbits)
def write_end(self):
'''
Write end section ('7777').
:returns: (*int*) End section length - always 4.
'''
return self.bufrdata.writeEndSection()
#*********************************************
# Created by addfiles function in midata module - multiple data files with difference only
# on time dimension.
class DimDataFiles(list):
# dataset must be list of DimDataFile
def __init__(self, dataset=[]):
list.__init__([])
ndataset = []
ftimes = []
for ds in dataset:
if len(ndataset) == 0:
ndataset.append(ds)
ftimes.append(ds.gettime(0))
else:
idx = len(ndataset)
ftime = ds.gettime(0)
for i in range(len(ndataset)):
if ftime < ftimes[i]:
idx = i
break
ndataset.insert(idx, ds)
ftimes.insert(idx, ftime)
self.extend(ndataset)
self.times = []
self.tnums = []
self.tnum = 0
for ds in ndataset:
tts = ds.gettimes()
self.times.extend(tts)
self.tnums.append(len(tts))
self.tnum += len(tts)
def append(self, ddf):
self.append(ddf)
tts = ddf.gettimes()
self.times.extend(tts)
self.tnums.append(len(tts))
self.tnum += len(tts)
def __getitem__(self, key):
if isinstance(key, str):
#print key
return TDimVariable(self[0].dataset.getDataInfo().getVariable(key), self)
else:
return list.__getitem__(self, key)
def filenames(self):
'''
Get file names.
:returns: File name list
'''
fns = []
for df in self:
fns.append(df.filename)
return fns
def datafileindex(self, t):
"""
Get data file by time
:param t: (*datetime or idx*) Time value of index.
:returns: (*int*) Data file index
"""
if isinstance(t, datetime.datetime):
t = self.timeindex(t)
nn = 0
idx = 0
for n in self.tnums:
nn += n
if t < nn:
break
idx += 1
return idx
def datafile(self, t):
"""
Get data file by time
:param t: (*datetime or idx*) Time value of index.
:returns: (*DimDataFile*) Data file
"""
idx = self.datafileindex(t)
return self[idx]
def dftindex(self, t):
'''
Get data file index and time index of it.
:param t: (*datetime or idx*) Time value of index.
:returns: (*list of int*) Data file index and time index of it.
'''
if isinstance(t, datetime.datetime):
t = self.timeindex(t)
nn = 0
dfidx = 0
tidx = 0
sn = 0
for n in self.tnums:
nn += n
if t < nn:
tidx = t - sn
break
dfidx += 1
sn = nn
return dfidx, tidx
def timeindex(self, t):
'''
Get time index.
:param t: (*datetime*) Given time
:returns: (*int*) Time index
'''
idx = 0
for tt in self.times:
if t >= tt:
break
idx += 1
return idx
def gettime(self, idx):
'''
Get time by index.
:param idx: (*int*) Time index.
:returns: (*datetime*) The time
'''
return self.times[idx]
def varnames(self):
'''
Get variable names
'''
return self[0].varnames()
#############################################
|
meteoinfo/meteoinfolab
|
pylib/mipylib/dataset/dimdatafile.py
|
Python
|
lgpl-3.0
| 24,253
|
[
"NetCDF"
] |
d6c2a0d6d1903c2612901cdadce04af274bfb8b28da5808e445fd2255ceee08a
|
#!/usr/bin/env python
# Convert DES_BCC Galaxy catalogs (Risa Wechler et al.) to a Root tree
import os
import argparse
import numpy as np
from astropy.io import fits
from root_numpy import array2root
parser= argparse.ArgumentParser(description="Convert a DES_BCC Galay Catalog (fits) into a Root tree")
parser.add_argument("--input", action="store", help="input file path")
args = parser.parse_args()
ext = os.path.splitext(args.input)[1]
fn = os.path.splitext(args.input)[0].split("/")[-1]
path = os.path.dirname(args.input)
output = os.path.join(path,fn + ".root")
hdulist = fits.open(args.input)
bcc = hdulist[1].data
length = len(bcc)
# Root N-Tuple content description
root = np.zeros(length, dtype=[('id','i4'),('index','i4'),('ra','f4'),('dec','f4'),('z','f4'),('gamma1','f4'),('gamma2','f4'),('kappa','f4'),
('size','f4'),('eps1','f4'),('eps2','f4'),('mag','f4'),('teps1','f4'),('teps2','f4'),('tra','f4'),('tdec','f4'),('mu','f4'),('tsize','f4')])
root['id'] = bcc['ID']
print "ID done..."
root['index'] = bcc['INDEX']
print "INDEX done..."
root['ra'] = bcc['RA']
print "RA done..."
root['dec'] = bcc['DEC']
print "DEC done"
root['z'] = bcc['Z']
print "Z done"
root['gamma1'] = bcc['GAMMA1']
print "GAMMA1 done..."
root['gamma2'] = bcc['GAMMA2']
print "GAMMA2 done..."
root['kappa'] = bcc['KAPPA']
print "KAPPA done..."
root['size'] = bcc['SIZE']
print "SIZE done..."
root['eps1'] = bcc["EPSILON"][0:,0]
print "EPSILON 1 done..."
root['eps2'] = bcc["EPSILON"][0:,1]
print "EPSILON 2 done..."
root["mag"] = bcc["TMAG"][0:,2]
print "TMAG done..."
root["teps1"] = bcc["TE"][0:,0]
print "TEPS1 done..."
root["teps2"] =bcc["TE"][0:,1]
print "TEPS2 done..."
root["tra"] = bcc["TRA"]
print "TRA done..."
root["tdec"] = bcc["TDEC"]
print "TDEC done..."
root["tsize"] = bcc["TSIZE"]
print "TSIZE done..."
root["mu"] = bcc["MU"]
print "All Done !"
array2root(root,output,'bcc')
|
boutigny/GPU4Cosmo
|
util/readCat.py
|
Python
|
gpl-2.0
| 1,897
|
[
"Galaxy"
] |
d0e85151c53dbfcd5cf4a3b0f793f577147d105dde7adf59b0143625d92a7422
|
# Copyright (C) 2016 The BET Development Team
# -*- coding: utf-8 -*-
import numpy as np
from dolfin import *
from meshDS import meshDS
from projectKL import projectKL
from poissonRandField import solvePoissonRandomField
import scipy.io as sio
import sys
def computeSaveKL(numKL):
'''
++++++++++++++++ Steps in Computing the Numerical KL Expansion ++++++++++
We proceed by loading the mesh and defining the function space for which
the eigenfunctions are defined upon.
Then, we define the covariance kernel which requires correlation lengths
and a standard deviation.
We then compute and save the terms in a truncated KL expansion.
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''
# Step 1: Set up the Mesh and Function Space
mesh = Mesh("Lshaped.xml")
# initialize the mesh to generate connectivity
mesh.init()
# Random field is projected on the space of Hat functions in the mesh
V = FunctionSpace(mesh, "CG", 1)
# Step 2: Project covariance in the mesh and get the eigenfunctions
# Initialize the projectKL object with the mesh
Lmesh = projectKL(mesh)
# Create the covariance expression to project on the mesh.
etaX = 10.0
etaY = 10.0
C = 1
# Pick your favorite covariance. Popular choices are Gaussian (of course),
# Exponential, triangular (has finite support which is nice). Check out
# Ghanem and Spanos' book for more classical options.
# A Gaussian Covariance
'''
cov = Expression("C*exp(-((x[0]-x[1]))*((x[0]-x[1]))/ex - \
((x[2]-x[3]))*((x[2]-x[3]))/ey)",
ex=etaX,ey=etaY, C=C)
'''
# An Exponential Covariance
cov = Expression("C*exp(-fabs(x[0]-x[1])/ex - fabs(x[2]-x[3])/ey)", ex=etaX, ey=etaY, C=C,
degree=10)
# Solve the discrete covariance relation on the mesh
Lmesh.projectCovToMesh(numKL, cov)
# Get the eigenfunctions and eigenvalues
eigen_val = Lmesh.eigen_vals
eigen_func_mat = np.zeros(
(numKL, Lmesh.eigen_funcs[0].vector().array().size))
for i in range(0, numKL):
eigen_func_mat[i, :] = Lmesh.eigen_funcs[i].vector().array()
kl_mdat = dict()
kl_mdat['KL_eigen_funcs'] = eigen_func_mat
kl_mdat['KL_eigen_vals'] = eigen_val
sio.savemat("KL_expansion", kl_mdat)
|
smattis/BET-1
|
examples/FEniCS/Compute_Save_KL.py
|
Python
|
gpl-3.0
| 2,350
|
[
"Gaussian"
] |
000781159459b815d5687737d4d70af64be7ee1028842cdfdda10a633a148e0e
|
"""
Pydwolla is a client library for Dwolla's API version 2. By using Pydwolla, you can
do Dwolla things like register new user, add bank account, or transfer funds.
For more information, visit https://github.com/roycehaynes/pydwolla
Author: Royce, royce.haynes@gmail.com
Publish Date: 01 Jul 2013
Reference(s):
http://developers.dwolla.com/dev/docs/auth
"""
import requests
import json
import urllib
BASE_OAUTH_URL = "https://www.dwolla.com/oauth"
BASE_REST_URL = BASE_OAUTH_URL + "/rest"
API_VERSION = "v2"
OAUTH_TOKEN = None
CLIENT_ID = None
CLIENT_SECRET = None
class DwollaError(Exception):
def __init__(self, message=None, http_body=None, http_status=None, json_body=None):
super(DwollaError, self).__init__(message)
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
class APIError(DwollaError):
pass
class APIConnectionError(DwollaError):
pass
class AuthenticationError(DwollaError):
pass
def init(client_id, client_secret, oauth_token=None):
global CLIENT_ID, CLIENT_SECRET, OAUTH_TOKEN
CLIENT_ID = client_id
CLIENT_SECRET = client_secret
OAUTH_TOKEN = oauth_token if not None else OAUTH_TOKEN
def request_token_url(**kwargs):
""" Use this method to create and return a URL that sends folks to Dwolla's OAuth permissions dialog pop-up.
"""
global CLIENT_ID, BASE_OAUTH_URL, API_VERSION
data = {
'client_id': kwargs.get('client_id', CLIENT_ID),
'response_type': kwargs.get('response_type', 'code'),
'scope': kwargs.get('scope','AccountInfoFull')
}
data.update({k:v for (k,v) in kwargs.items() if k not in data})
request_token_url = "{0}/{1}/authenticate".format(BASE_OAUTH_URL, API_VERSION)
return "{0}?{1}".format(request_token_url, urllib.urlencode(data))
def get_oauth_token(**kwargs):
""" Use this method to exchange code for oauth_token
"""
global OAUTH_TOKEN, CLIENT_ID, CLIENT_SECRET, BASE_OAUTH_URL, API_VERSION
try:
data = {
'code': kwargs['code'],
'client_id': kwargs.get('client_id', CLIENT_ID),
'client_secret': kwargs.get('client_secret', CLIENT_SECRET),
'grant_type': kwargs.get('grant_type', 'authorization_code')
}
except KeyError as e:
APIError(message='Missing required field {0}'.format(e))
if kwargs.get('redirect_uri', None):
data['redirect_uri'] = redirect_uri
oauth_token_url = "{0}/{1}/token".format(BASE_OAUTH_URL, API_VERSION)
resp = requests.get(oauth_token_url, params=data, verify=True)
if 'access_token' not in resp.json():
return resp.json()
OAUTH_TOKEN = resp.json()['access_token']
return OAUTH_TOKEN
class Resource(object):
""" Dwolla Resource abstract interface
"""
def __init__(self, **kwargs):
self.request = Requestor()
@classmethod
def retrieve(cls):
raise NotImplementedError()
@classmethod
def create(cls, **kwargs):
raise NotImplementedError()
@classmethod
def all(cls):
raise NotImplementedError()
@classmethod
def delete(cls):
raise NotImplementedError()
@classmethod
def filter(cls):
raise NotImplementedError()
class Requestor(requests.Session):
""" Network transport
"""
def __init__(self, *args, **kwargs):
super(Requestor, self).__init__(*args, **kwargs)
self.api_url = BASE_REST_URL
self.headers = {
'Content-Type': 'application/json'
}
def get(self, controller, append_slash=True, *args, **kwargs):
url = '{0}/{1}'.format(self.api_url, controller)
url = '{0}/'.format(url) if append_slash else url
resp = super(Requestor, self).get(url, *args, **kwargs)
try:
return resp.json()
except ValueError as e:
return {'Success': False, 'Message': 'Something bad happened: {0}'.format(e), 'Response': str(resp.content)}
def post(self, controller, append_slash=True, *args, **kwargs):
url = '{0}/{1}'.format(self.api_url, controller)
url = '{0}/'.format(url) if append_slash else url
data = kwargs.get('data', None)
if data:
kwargs['data'] = json.dumps(data)
resp = super(Requestor, self).post(url, *args, **kwargs)
try:
return resp.json()
except ValueError as e:
return {'Success': False, 'Message': 'Something bad happened: {0}'.format(e), 'Response': str(resp.content)}
class User(Resource):
""" A Dwolla User
"""
@classmethod
def all(cls, **kwargs):
""" Grabs account information
"""
return cls().request.get(
'users',
params={'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN)}
)
@classmethod
def retrieve(cls, account_identifier, **kwargs):
""" Grabs basic information
"""
params = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}
return cls().request.get(
'users/{0}'.format(account_identifier),
append_slash=False,
params=params
)
@classmethod
def create(cls, **kwargs):
""" Create a new Dwolla account
"""
try:
data = {
"client_id": kwargs.get('client_id', CLIENT_ID),
"client_secret": kwargs.get('client_secret', CLIENT_SECRET),
"pin": kwargs['pin'],
"email": kwargs['email'],
"password": kwargs['password'],
"firstName": kwargs['firstName'],
"lastName": kwargs['lastName'],
"address": kwargs['address'],
"city": kwargs['city'],
"state": kwargs['state'],
"zip": kwargs['zip'],
"phone": kwargs['phone'],
"dateOfBirth": kwargs['dateOfBirth'],
"type": kwargs.get('type', 'Personal'),
"acceptTerms": kwargs.get('acceptTerms', 'false')
}
except KeyError as e:
raise APIError(message="Missing required field {0}".format(e))
data.update({k:v for (k,v) in kwargs.items() if k not in data})
return cls().request.post('register', data=data)
class FundingSource(Resource):
""" Sources of funding
"""
@classmethod
def create(cls, **kwargs):
data = {}
try:
data = {
'account_number': kwargs['account_number'],
'routing_number': kwargs['routing_number'],
'name': kwargs['name'],
'account_type': kwargs['account_type'],
'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN)
}
except KeyError as e:
raise APIError(message="Missing required field {0}".format(e))
return cls().request.post('fundingsources', data=data)
@classmethod
def all(cls, **kwargs):
return cls().request.get(
'fundingsources',
params={'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN)}
)
@classmethod
def retrieve(cls, funding_id, **kwargs):
return cls().request.get(
'fundingsources/{0}'.format(funding_id),
append_slash=False,
params={'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN)}
)
@classmethod
def verify(cls, **kwargs):
try:
data = {
'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN),
'deposit1': kwargs['deposit1'],
'deposit2': kwargs['deposit2'],
}
funding_id = kwargs['funding_id']
except KeyError as e:
raise APIError(message="Missing required field {0}".format(e))
return cls().request.post(
'{0}/verify'.format(funding_id),
append_slash=False,
data=data
)
class Transaction(Resource):
"""
"""
@classmethod
def all(cls, **kwargs):
""" List all transactiosn for a user
"""
params = {}
if OAUTH_TOKEN:
params['oauth_token'] = OAUTH_TOKEN
else:
params['client_id'] = CLIENT_ID
params['client_secret'] = CLIENT_SECRET
params.update({k:v for (k,v) in kwargs.items() if k not in params})
return cls().request.get(
'transactions',
params=params
)
@classmethod
def retrieve(cls, transaction_id, **kwargs):
"""
"""
params = {}
if OAUTH_TOKEN:
params['oauth_token'] = OAUTH_TOKEN
else:
params['client_id'] = CLIENT_ID
params['client_secret'] = CLIENT_SECRET
params.update({k:v for (k,v) in kwargs.items() if k not in params})
return cls().request.get(
'{0}/{1}'.format('transactions', transaction_id),
append_slash=False,
params=params
)
@classmethod
def create(cls, is_guest=False, **kwargs):
""" Send funds to a user
"""
controller = 'transactions/send'
try:
if is_guest:
controller = 'transactions/guestsend'
data = {
'client_id': kwargs.get('client_id', CLIENT_ID),
'client_secret': kwargs.get('client_secret', CLIENT_SECRET),
'firstName': kwargs['firstName'],
'lastName': kwargs['lastName'],
'emailAddress': kwargs['emailAddress'],
'routingNumber': kwargs['routingNumber'],
'accountNumber': kwargs['accountNumber'],
'accountType': kwargs['accountType'],
'destinationId': kwargs['destinationId'],
'amount': kwargs['amount']
}
else:
data = {
'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN),
'pin': kwargs['pin'],
'destinationId': kwargs['destinationId'],
'amount': kwargs['amount']
}
except KeyError as e:
raise APIError(message="Missing required field {0}".format(e))
data.update({k:v for (k,v) in kwargs.items() if k not in data})
return cls().request.post(
controller,
append_slash=False,
data=data
)
@classmethod
def stats(cls, **kwargs):
params = {
'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN)
}
params.update({k:v for (k,v) in kwargs.items() if k not in data})
return cls().request.get(
'transactions/stats',
append_slash=False,
params=params
)
class Request(Resource):
"""
"""
@classmethod
def retrieve(cls, request_id, **kwargs):
"""
"""
return cls().request.get(
'{0}/{1}'.format('requests', request_id),
append_slash=False,
params={'oauth_token': OAUTH_TOKEN}
)
@classmethod
def create(cls, **kwargs):
try:
data = {
'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN),
'sourceId': kwargs['sourceId'],
'amount': kwargs['amount']
}
except KeyError as e:
raise APIError(message="Missing required field {0}".format(e))
data.update({k:v for (k,v) in kwargs.items() if k not in data})
return cls().request.post(
'requests',
data=data
)
class Balance(Resource):
""" Represents a user's Dwolla balance.
"""
@classmethod
def show(cls, **kwargs):
return cls().request.get(
'balance',
params={'oauth_token': kwargs.get('oauth_token', OAUTH_TOKEN)}
)
|
roycehaynes/pydwolla
|
dwolla/__init__.py
|
Python
|
bsd-3-clause
| 12,175
|
[
"VisIt"
] |
70d656c5d6a8a2bc4a611117940bc9963e2487971a37020edebe2f2c1584cf1f
|
# TODO
# * default settings for color and rep
# * make the final viewing step a function
# * setup_map(name,levels=,colors=,reps=)
import pymol
from pymol import headering
import Pmw
import Tkinter as TK
import os
import string
class PyMOLMapLoad:
def __init__(self,parent,app,f):
self._parent = parent
self._app = app
self._fileName = f
self._fileData = None
# model state
self._amplitudes = None
self._phases = None
self._weights = None
self._min_res = None
self._max_res = None
self._fofc = None
self._name_prefix= None
# reflection file header data
if f[-3:] in ("MTZ", "mtz"):
self._fileData = headering.MTZHeader(f)
elif f[-3:] in ("CIF", "cif"):
self._fileData = headering.CIFHeader(f)
elif f[-3:] in ("CNS", "cns", "hkl", "HKL"):
self._fileData = headering.CNSHeader(f)
def pack_and_show(self):
self.pack()
return self.show()
def pack(self):
# MAIN DIALOG
self._d = Pmw.Dialog(self._parent,
buttons = ("OK", "Cancel", "Help"),
defaultbutton = "OK",
title = "PyMOL Map Generation",
command = self.run)
self._d.geometry("+%d+%d" % (self._app.winfo_reqwidth(), self._app.winfo_reqheight()))
self._d.withdraw()
self._d.protocol('WM_DELETE_WINDOW', self.quit)
#
# COLUMN LABEL GROUP
#
self._col_gp = Pmw.Group(self._d.interior(),
tag_text="Column Labels",)
self._col_gp.pack(fill='x', expand='yes')
defaultListHeight = 125
FCols = []
FCols.extend(self._fileData.getColumnsOfType("F"))
FCols.extend(self._fileData.getColumnsOfType("G"))
if not len(FCols): FCols = [ "" ]
self._ampl_chooser = Pmw.ComboBox(self._col_gp.interior(),
label_text = "Amplitudes",
labelpos = "nw",
selectioncommand = self.set_amplitudes,
scrolledlist_items = FCols,
dropdown = 1,
listheight=defaultListHeight,
sticky='ew')
self._ampl_chooser.pack(fill='both',expand=1,padx=7,pady=4)
_FC, _PC, _looksLike = self._fileData.guessCols("FoFc")
_2FC, _2PC, _looksLike = self._fileData.guessCols("2FoFc")
# be nice and choose the most appropriate col
if _2FC!=None:
if _2FC in FCols:
self._ampl_chooser.selectitem(_2FC)
elif _FC!=None:
if _FC in FCols:
self._ampl_chooser.selectitem(_FC)
else:
self._ampl_chooser.selectitem(FCols[0])
PCols = []
PCols.extend(self._fileData.getColumnsOfType("P"))
if not len(PCols): PCols = [ "" ]
self._phase_chooser = Pmw.ComboBox(self._col_gp.interior(),
label_text = "Phases",
labelpos = "nw",
selectioncommand = self.set_phases,
scrolledlist_items = PCols,
dropdown = 1,
listheight=defaultListHeight)
self._phase_chooser.pack(fill='both', expand=1,padx=7,pady=4)
# be nice and choose the most appropriate col
if _2PC!=None:
if _2PC in PCols:
self._phase_chooser.selectitem(PCols.index(_2PC))
elif _PC!=None:
if _PC in PCols:
self._phase_chooser.selectitem(PCols.index(_PC))
else:
self._phase_chooser.selectitem(PCols[0])
WCols = [ "None", ]
WCols.extend(self._fileData.getColumnsOfType("W"))
WCols.extend(self._fileData.getColumnsOfType("Q"))
self._wt_chooser = Pmw.ComboBox(self._col_gp.interior(),
label_text = "Weights",
labelpos = "nw",
selectioncommand = self.set_weights,
scrolledlist_items = WCols,
dropdown = 1,
listheight=defaultListHeight)
self._wt_chooser.pack(fill='both', expand=1,padx=7,pady=4)
self._wt_chooser.selectitem("None")
#
# INPUT OPTIONS GROUP
#
self._input_gp = Pmw.Group(self._d.interior(),
tag_text="Input Options",)
self._input_gp.pack(fill='both', expand='yes')
if self._fileData.reso_min!=None:
default_min_res = float("%3.5f"%float(self._fileData.reso_min))
else:
default_min_res = ""
if self._fileData.reso_max!=None:
default_max_res = float("%3.5f"%float(self._fileData.reso_max))
else:
default_max_res = ""
self._min_res_fld = Pmw.EntryField(self._input_gp.interior(),
labelpos="wn",
label_text="Min. Resolution",
value = default_min_res,
validate = { "validator" : 'real' },
entry_width=7,
modifiedcommand=self.set_min_res,
command = self.set_min_res)
self._min_res_fld.grid(row=1,column=0,rowspan=2,sticky='ew',pady=4)
self._max_res_fld = Pmw.EntryField(self._input_gp.interior(),
labelpos="wn",
label_text = "Max Resolution",
value = default_max_res,
validate = { "validator" : 'real' },
entry_width=7,
modifiedcommand=self.set_max_res,
command = self.set_max_res)
self._max_res_fld.grid(row=1,column=1,rowspan=2,sticky='ew',pady=4)
#
# MAP OPTIONS GROUP
#
self._options_gp = Pmw.Group(self._d.interior(),
tag_text="Map Options",)
self._options_gp.pack(fill='x', expand='yes')
self._name_prefix_fld = Pmw.EntryField(self._options_gp.interior(),
labelpos="wn",
label_text = "New Map Name Prefix",
value = "",
validate = { "validator" : 'alphanumeric' },
entry_width=20,
modifiedcommand=self.set_name_prefix,
command = self.set_name_prefix)
self._name_prefix_fld.pack(fill="x", expand=0, anchor='w')
self._fofc_chooser = Pmw.RadioSelect(self._options_gp.interior(),
command = self.set_fofc,
buttontype="checkbutton",)
self._fofc_chooser.add("FoFc")
self._fofc_chooser.pack(fill="none", expand=0, anchor="w")
def show(self):
self._d.show()
def quit(self):
if __name__=="__main__":
# TODO--remove me; use for development only!
self._parent.destroy()
else:
# TODO -- use only this in release
self._d.destroy()
# UI SETTERS
def set_amplitudes(self,arg):
self._amplitudes = arg
def set_phases(self,arg):
self._phases = arg
def set_weights(self,arg):
self._weights = arg
def set_min_res(self):
self._min_res = self._min_res_fld.getvalue()
def set_max_res(self):
self._max_res = self._max_res_fld.getvalue()
def set_fofc(self,arg,state):
self._fofc = state
def set_name_prefix(self):
self._name_prefix = self._name_prefix_fld.getvalue()
def update_state(self):
# grab all values
self._amplitudes = self._ampl_chooser.get()
self._phases = self._phase_chooser.get()
self._weights = self._wt_chooser.get()
self._min_res = self._min_res_fld.getvalue()
self._max_res = self._max_res_fld.getvalue()
self._fofc = len(self._fofc_chooser.getvalue())>0
self._name_prefix= self._name_prefix_fld.getvalue()
def report_state(self):
print "Here is the state of the box"
print "Amplitudes:\t%s" % self._amplitudes
print "Phases :\t%s" % self._phases
print "Weights :\t%s" % self._weights
print "Min Res :\t%s" % self._min_res
print "Max Res :\t%s" % self._max_res
print "FoFc :\t%s" % str(self._fofc)
print "Name Prefix :\t'%s'" % self._name_prefix
def show_help(self,msg=None,title=None):
# TODO -- CHANGE THE HELP TEXT
if msg==None:
helpText = pymol.cmd.map_generate.__doc__
else:
helpText = msg
if title==None:
title="PyMOL Map Loading Help"
h = Pmw.TextDialog(self._parent,
title=title,)
h.insert("end", helpText)
h.configure(text_state='disabled')
def run(self,action):
if action=="OK":
self.update_state()
#self.report_state()
if self._name_prefix==None or self._name_prefix=="":
# grep the dataset name from amplitudes
if '/' in self._amplitudes:
pfx = string.split(self._amplitudes,'/')
if len(pfx)>=2:
pfx = pfx[1]
else:
pfx = self._amplitudes
else:
pfx = self._name_prefix
# to ensure a clean name
pfx = pymol.cmd.get_unused_name(pfx)
if not len(self._amplitudes):
missing_ampl = """
To synthesize a map from reflection data you need to specify at
leastone column for amplitudes and one column for phases. The
amplitudes column name was blank, and therefore PyMOL cannot create
the map. Please select an amplitude column name from the file and try
again.
"""
self.show_help(missing_ampl,"Missing Amplitudes Column Name")
return None
if not len(self._phases):
missing_phases = """
To synthesize a map from reflection data you need to specify at least
one column for amplitudes and one column for phases. The phases column
name was blank, and therefore PyMOL cannot create the map. Please
select an amplitude column name from the file and try again.
"""
self.show_help(missing_phases, "Missing Phases Column Name")
return None
try:
r = pymol.cmd.map_generate(pfx, self._fileName,
self._amplitudes, self._phases, self._weights,
self._min_res, self._max_res, 1, 1)
except pymol.CmdException as e:
print " MapLoad-Error:", e.args
return None
if r==None or r=="None" or r=="":
print " MapLoad-Error: PyMOL could not load the MTZ file '%s' due to an unspecified error." % self._fileName
print " MapLoad-Error: This typically occurs with bad data or blank column names. Please try again"
print " MapLoad-Error: or contact 'help@schrodinger.com' for more information."
return None
skin = pymol._ext_gui.skin
try:
pymol.cmd.set("suspend_updates", 1)
if self._fofc:
toShow = pymol.cmd.get_setting_text("default_fofc_map_rep")
if toShow=="isosurface":
pymol.cmd.isosurface(pymol.cmd.get_unused_name(r+"-srf"),
pfx, level=1.0)
elif toShow=="isomesh":
meshName=pymol.cmd.get_unused_name(r+"-msh3")
pymol.cmd.isomesh(meshName, pfx, level=3.0)
pymol.cmd.color("green", meshName)
meshName=pymol.cmd.get_unused_name(r+"-msh-3")
pymol.cmd.isomesh(meshName, pfx, level=-3.0)
pymol.cmd.color("red", meshName)
else:
# setup volume view
volName = pymol.cmd.get_unused_name(r+"-vol")
pymol.cmd.volume(volName, pfx)
# if you don't do this, PyMOL will crash
# when it tries to load the panel
if skin.volFrame not in skin.dataArea.slaves():
skin.toggleFrame(skin.volFrame,startup=1)
skin.volFrame.addWithoutGUI(volName, data=3.0, alpha=0.5,
col=[0.0, 0.85, 0.0 ],
kind="triplet")
skin.volFrame.addWithoutGUI(volName, data=-3.0, alpha=0.5,
col=[0.85, 0.0, 0.0 ],
kind="triplet")
else:
toShow = pymol.cmd.get_setting_text("default_2fofc_map_rep")
if toShow=="isosurface":
surfName=pymol.cmd.get_unused_name(r+"-srf")
pymol.cmd.isosurface(surfName, pfx, level=1.0)
pymol.cmd.color("blue", surfName)
elif toShow=="isomesh":
meshName=pymol.cmd.get_unused_name(r+"-msh")
pymol.cmd.isomesh(meshName, pfx, level=1.0)
pymol.cmd.color("blue", meshName)
else:
# setup volume view
volName = pymol.cmd.get_unused_name(r+"-vol")
pymol.cmd.volume(volName, pfx)
# if you don't do this, PyMOL will crash
# when it tries to load the panel
if skin.volFrame not in skin.dataArea.slaves():
skin.toggleFrame(skin.volFrame,startup=1)
skin.volFrame.addWithoutGUI(volName, data=1.0, alpha=0.5,
col=[0.0, 0.0, 0.85 ],
kind="triplet")
except:
pass
finally:
pymol.cmd.set("suspend_updates", 0)
if r!=None:
# setting?
if pymol.cmd.get_setting_boolean("autoclose_dialogs"):
self.quit()
elif action=="Cancel":
self.quit()
elif action=="Help":
self.show_help()
if __name__=="__main__":
a = TK.Tk()
t = PyMOLMapLoad(a,None)
t.pack_and_show()
a.mainloop()
|
gratefulfrog/lib
|
python/pmg_tk/PyMOLMapLoad.py
|
Python
|
gpl-2.0
| 15,967
|
[
"PyMOL"
] |
30ec8d0005a8c3852108d18cdeb9a4ab2ae265fa222ca54c58266be625e9781f
|
# Licensed under an MIT open source license - see LICENSE
"""
Utility functions for fil-finder package
"""
import itertools
import numpy as np
from scipy import optimize as op
import thread
import threading
import time
import os
def removearray(l, arr):
'''
Removes an array from a list. Code from
http://stackoverflow.com/questions/3157374/
how-do-you-remove-a-numpy-array-from-a-list-of-numpy-arrays
'''
ind = 0
size = len(l)
while ind != size and not np.array_equal(l[ind], arr):
ind += 1
if ind != size:
l.pop(ind)
else:
raise ValueError('Array not contained in this list.')
def weighted_av(items, weight):
weight = np.array(weight)[~np.isnan(weight)]
if len(weight) == 0:
return sum(items) / len(items)
else:
items = np.array(items)[~np.isnan(weight)]
num = sum(items[i] * weight[i] for i in range(len(items)))
denom = sum(weight[i] for i in range(len(items)))
return (num / denom) if denom != 0 else None
def raw_input_with_timeout(prompt, timeout=30.0):
'''
Manual input with a timeout. Code from
http://stackoverflow.com/questions/2933399/how-to-set-time-limit-on-input.
'''
print prompt
timer = threading.Timer(timeout, thread.interrupt_main)
astring = None
try:
timer.start()
astring = raw_input(prompt)
except KeyboardInterrupt:
pass
timer.cancel()
return astring
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return array[idx]
def timeit(method):
'''
Timing decorator from
https://www.andreas-jung.com/contents/
a-python-decorator-for-measuring-the-execution-time-of-methods.
'''
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '%r (%r, %r) %2.2f sec' % \
(method.__name__, args, kw, te - ts)
return result
return timed
##########################################################################
# 2D Gaussian Fit Code from
# http://www.scipy.org/Cookbook/FittingData
# (functions twodgaussian,moments,fit2dgaussian)
##########################################################################
def twodgaussian(h, cx, cy, wx, wy, b):
wx = float(wx)
wy = float(wy)
return lambda x, y: h * np.exp(-(((cx - x) / wx) ** 2. + ((cy - y) / wy) ** 2.) / 2) + b
def moments(data):
total = data.sum()
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
wx = np.sqrt(np.abs((np.arange(col.size) - y) ** 2 * col).sum()/col.sum())
row = data[int(x), :]
wy = np.sqrt(np.abs((np.arange(row.size) - x) ** 2 * row).sum()/row.sum())
b = abs(np.median(data.ravel()))
h = data.max() - b
return h, x, y, wx, wy, b
def fit2dgaussian(data):
params = moments(data)
errorfunction = lambda p: np.ravel(
twodgaussian(*p)(*np.indices(data.shape)) - data)
fit, cov = op.leastsq(
errorfunction, params, maxfev=(1000 * len(data)), full_output=True)[:2]
if cov is None: # Bad fit
fiterr = np.abs(fit)
else:
fiterr = np.sqrt(np.diag(cov))
return fit, fiterr
##########################################################################
# Simple fcns used throughout module
##########################################################################
def chunks(l, n):
return [l[x:x + n] for x in range(0, len(l), n)]
def eight_con():
return np.ones((3, 3))
def distance(x, x1, y, y1):
return np.sqrt((x - x1) ** 2.0 + (y - y1) ** 2.0)
def padwithzeros(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 0
if pad_width[1] > 0:
vector[-pad_width[1]:] = 0
return vector
def padwithnans(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = np.NaN
vector[-pad_width[1]:] = np.NaN
return vector
def round_figs(x, n):
return round(x, int(n - np.ceil(np.log10(abs(x)))))
def shifter(l, n):
return l[n:] + l[:n]
def product_gen(n):
for r in itertools.count(1):
for i in itertools.product(n, repeat=r):
yield "".join(i)
def planck(T, freq):
return ((2.0 * (6.63 * 10 ** (-34)) * freq ** 3) / (9 * 10 ** 16)) *\
(1 / (np.expm1((6.63 * 10 ** (-34) * freq) / (1.38 * 10 ** (-23) * float(T)))))
def dens_func(B, kappa, I):
kappa = 100 * kappa
return (I / (B * 10 ** 20)) * (1 / (kappa)) * 4787 # into sol.mass/pc
def red_chisq(data, fit, nparam, sd):
N = data.shape[0]
return np.sum(((fit - data) / sd) ** 2.) / float(N - nparam - 1)
def try_mkdir(name):
'''
Checks if a folder exists, and makes it if it doesn't
'''
if not os.path.isdir(os.path.join(os.getcwd(), name)):
os.mkdir(os.path.join(os.getcwd(), name))
def in_ipynb():
try:
cfg = get_ipython().config
if cfg['IPKernelApp']['parent_appname'] == 'ipython-notebook':
return True
else:
return False
except NameError:
return False
|
keflavich/fil_finder
|
fil_finder/utilities.py
|
Python
|
mit
| 5,115
|
[
"Gaussian"
] |
e5927f1c17b40e8b0b6a1c0fd607daa1e55c5a015ba00289afdcdd42d07070b7
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the astrophysics-specific units. They are also
available in the `astropy.units` namespace.
"""
from . import si
from astropy.constants import si as _si
from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes,
set_enabled_units)
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# LENGTH
def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True,
doc="astronomical unit: approximately the mean Earth--Sun "
"distance.")
def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True,
doc="parsec: approximately 3.26 light-years.")
def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns,
doc="Solar radius", prefixes=False,
format={'latex': r'R_{\odot}', 'unicode': 'R⊙'})
def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'],
_si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'R_{\rm J}', 'unicode': 'R♃'})
def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns,
prefixes=False, doc="Earth radius",
# LaTeX earth symbol requires wasysym
format={'latex': r'R_{\oplus}', 'unicode': 'R⊕'})
def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m),
namespace=_ns, prefixes=True, doc="Light year")
###########################################################################
# AREAS
def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True,
doc="barn: unit of area used in HEP")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad,
namespace=_ns, prefixes=False,
doc="cycle: angular measurement, a full turn or rotation")
###########################################################################
# MASS
def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns,
prefixes=False, doc="Solar mass",
format={'latex': r'M_{\odot}', 'unicode': 'M⊙'})
def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'],
_si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'M_{\rm J}', 'unicode': 'M♃'})
def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns,
prefixes=False, doc="Earth mass",
# LaTeX earth symbol requires wasysym
format={'latex': r'M_{\oplus}', 'unicode': 'M⊕'})
def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass",
format={'latex': r'M_{p}', 'unicode': 'Mₚ'})
def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass",
format={'latex': r'M_{e}', 'unicode': 'Mₑ'})
# Unified atomic mass unit
def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns,
prefixes=True, exclude_prefixes=['a', 'da'],
doc="Unified atomic mass unit")
##########################################################################
# ENERGY
# Here, explicitly convert the planck constant to 'eV s' since the constant
# can override that to give a more precise value that takes into account
# covariances between e and h. Eventually, this may also be replaced with
# just `_si.Ryd.to(eV)`.
def_unit(['Ry', 'rydberg'],
(_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV),
namespace=_ns, prefixes=True,
doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg "
"constant",
format={'latex': r'R_{\infty}', 'unicode': 'R∞'})
###########################################################################
# ILLUMINATION
def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns,
prefixes=False, doc="Solar luminance",
format={'latex': r'L_{\odot}', 'unicode': 'L⊙'})
###########################################################################
# SPECTRAL DENSITY
def_unit((['ph', 'photon'], ['photon']),
format={'ogip': 'photon', 'vounit': 'photon'},
namespace=_ns, prefixes=True)
def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz,
namespace=_ns, prefixes=True,
doc="Jansky: spectral flux density")
def_unit(['R', 'Rayleigh', 'rayleigh'],
(1e10 / (4 * _numpy.pi)) *
ph * si.m ** -2 * si.s ** -1 * si.sr ** -1,
namespace=_ns, prefixes=True,
doc="Rayleigh: photon flux")
###########################################################################
# MISCELLANEOUS
# Some of these are very FITS-specific and perhaps considered a mistake.
# Maybe they should be moved into the FITS format class?
# TODO: This is defined by the FITS standard as "relative to the sun".
# Is that mass, volume, what?
def_unit(['Sun'], namespace=_ns)
###########################################################################
# EVENTS
def_unit((['ct', 'count'], ['count']),
format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'},
namespace=_ns, prefixes=True, exclude_prefixes=['p'])
def_unit((['pix', 'pixel'], ['pixel']),
format={'ogip': 'pixel', 'vounit': 'pixel'},
namespace=_ns, prefixes=True)
###########################################################################
# MISCELLANEOUS
def_unit(['chan'], namespace=_ns, prefixes=True)
def_unit(['bin'], namespace=_ns, prefixes=True)
def_unit((['vox', 'voxel'], ['voxel']),
format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'},
namespace=_ns, prefixes=True)
def_unit((['bit', 'b'], ['bit']), namespace=_ns,
prefixes=si_prefixes + binary_prefixes)
def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns,
format={'vounit': 'byte'},
prefixes=si_prefixes + binary_prefixes,
exclude_prefixes=['d'])
def_unit(['adu'], namespace=_ns, prefixes=True)
def_unit(['beam'], namespace=_ns, prefixes=True)
def_unit(['electron'], doc="Number of electrons", namespace=_ns,
format={'latex': r'e^{-}', 'unicode': 'e⁻'})
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit (see
# https://arxiv.org/pdf/1308.4150.pdf for more)
# Also note that h or h100 or h_100 would be a better name, but they either
# conflict or have numbers in them, which is apparently disallowed
def_unit(['littleh'], namespace=_ns, prefixes=False,
doc="Reduced/\"dimensionless\" Hubble constant",
format={'latex': r'h_{100}'})
# The torr is almost the same as mmHg but not quite.
# See https://en.wikipedia.org/wiki/Torr
# Define the unit here despite it not being an astrophysical unit.
# It may be moved if more similar units are created later.
def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="Unit of pressure based on an absolute scale, now defined as "
"exactly 1/760 of a standard atmosphere")
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
|
stargaser/astropy
|
astropy/units/astrophys.py
|
Python
|
bsd-3-clause
| 7,750
|
[
"Dalton"
] |
df0d564a9a79b30f97724c0935e4f4c87d1c0f0898a8a271b9ed6f8bb500b298
|
"""
This class builds the structure of k-knights and chessboard
"""
from libs.graph.Graph import GraphAdjacenceList
class Knight:
def __init__(self, x, y, k):
"""
This class builds up the knight whit its attributes and methods.
@param x: int, the row position in chessboard
@param y: int, the column position in chessboard
@param k: int, the value of k-knight
"""
self._row = x
self._col = y
self._value = k
self._k_moves = 0
self._moves = []
self._tour_buffer = [] #temporary used to bufferize the tour
self._distance_from_target = -1
self._turn_from_target = -1
def move(self, pos):
"""
This method allows to move the knight. It also clears the list of moves of previous position.
@param pos: tuple, the new position (row, col)
"""
self._row = pos[0]
self._col = pos[1]
self._k_moves = 0
self._moves = 0
def set_value(self, k):
"""
This method sets k-value of the knight.
@param k: int, the new k-value
"""
self._value = k
def get_moves(self, r, c, debug=False):
"""
This method allows to get all the possible moves that a k-knight can be done in a r x c chessboard. It checks if
the moves was already calculated in this case it return that, otherwise it calculates.
@param r: int, the number of rows in chessboard
@param c: int, the number of columns in chessboard
@param debug: boolean, a boolean that control debug
@return: list, the list of possible moves
"""
if self._k_moves == 0:
moves = []
for i in range(-2, 3):
if i != 0:
for j in range(-2, 3):
if j != 0 and abs(j) != abs(i):
row = self._row + i
col = self._col + j
if 0 <= row <= r - 1 and 0 <= col <= c - 1:
moves.append((row, col))
if debug:
print(str(self.get_position()) + " can moves: " + str(moves))
self._k_moves = 1
self._moves = moves
return moves
else:
if debug:
print(str(self.get_position()) + " can moves: " + str(self._moves))
return self._moves
def get_other_moves(self, r, c, debug=False):
"""
This method returns other moves that a k-knight can do. It appends this moves to other yet calculeted ones but it
returns only the new moves.
@param r: int, the number of rows in chessboard
@param c: int, the number of columns in chessboard
@param debug: boolean, a boolean that control debug
@return: list, the list of possible new moves
"""
moves = []
assert self._moves != 0, "Non sono ancora state calcolate le mosse base"
assert self._k_moves < self._value, "Non possono essere calcolate altre mosse"
for n in range(0, len(self._moves)):
for i in range(-2, 3):
if i != 0:
for j in range(-2, 3):
if j != 0 and abs(j) != abs(i):
row = self._moves[n][0] + i
col = self._moves[n][1] + j
if 0 <= row <= r - 1 and 0 <= col <= c - 1:
if not (row, col) in self._moves:
moves.append((row, col))
self._moves.append((row, col))
if debug:
print(str(self.get_position()) + " can other moves: " + str(moves))
self._k_moves += 1
return moves
def get_knight(self):
"""
This method returns the knight.
@return: tuple, the tuple with all attributes of knight
"""
return self._row, self._col, self._value
def get_position(self):
"""
This method returns the position of the knight.
@return: tuple, the tuple with row position and column position
"""
return self._row, self._col
def get_row(self):
"""
This method return the row position of the knight.
@return: int, the row position
"""
return self._row
def get_col(self):
"""
This method return the column position of the knight.
@return: int, the column position
"""
return self._col
def get_value(self):
"""
This method return the value of the knight.
@return: int, the value
"""
return self._value
def set_distance(self, distance):
self._distance_from_target = distance
self._turn_from_target = 0
if distance != 0:
while True:
distance = distance - self._value
self._turn_from_target += 1
if distance <= 0:
break
def get_distance(self):
"""
This method return the distance from the target.
@return: int, the distance
"""
assert self._distance_from_target != -1, "Non e' ancora stata settata la distanza dal target"
return self._distance_from_target
def get_turn(self):
"""
This method return the turns that knight have to do in order to arrive in target position.
@return: int, the turns
"""
assert self._distance_from_target != -1, "Non e' ancora stata settata la distanza dal target"
return self._turn_from_target
def is_found(self):
if self._distance_from_target != -1:
return True
else:
return False
def getMoves(self):
"""
this function can be used as an interface to manipulate the next-move list
:return: list, the knight's next moves
"""
return self._moves
def refreshBuffer(self):
"""
this function is invoked to refresh the moves' buffer
:return: None
"""
self._tour_buffer = []
def calculateWeight(self, dist):
"""
this function calculate the effective weight of a move using the distance between
the initial position of the knight and its specific k-value
:param dist: int, distance of the move
:return: int, the weight for that distance
"""
weight = 0
knight = self.get_value()
while dist > 0.0:
weight += 1
dist -= knight
return weight
def singleMove(self, position, rows, cols):
"""
this function calculate all the available moves of the knight from a specific position
this function is specifically optimized to check and avoid cycles in the knight moves
(it is achieved using a list as a buffer of the knight moves previously calculated)
:param position: tuple, the position by which calculate the moves
:param rows: int, rows of the chessboard
:param cols: int, cols of the chessboard
:return: list, if moves have been added, the list of the moves calculated, else an empty one
"""
x = position[0]
y = position[1]
move_list = []
for i in range(-2, 3):
if i == 0:
continue
newX = x + i
if (newX < 0) or (newX > rows - 1):
continue
if abs(i) % 2 == 0:
val = abs(i) - 1
newY = y + val
newY_bis = y - val
else:
val = abs(i) + 1
newY = y + val
newY_bis = y - val
#we will bufferize the moves previously calculated using a support list
if not (newY < 0 or newY > cols - 1):
pos = (newX, newY)
if not pos == self.get_position():
if not pos in self._tour_buffer:
self._tour_buffer.append(pos)
move_list.append(pos)
if not (newY_bis < 0 or newY_bis > cols - 1):
pos_bis = (newX, newY_bis)
if not pos_bis == self.get_position():
if not pos_bis in self._tour_buffer:
self._tour_buffer.append(pos_bis)
move_list.append(pos_bis)
return move_list
def completeTour(self, rows, cols):
"""
this function can be used to calculate a definitive tour for the knight
:param rows: int, the rows of the chessboard
:param cols: int, the columns of the chessboard
:return: None
"""
count = 1 #a deep-level counter
value = 1 #a counter to keep trace of the value of the knight during the "visit"
#using a support list we will extend the fringe of the previous calculated moves in order
#to accomplish an entire knight's tour, keeping trace of the level of the tour from the knight
moves = [self.get_position()]
start = 0
stop = len(moves)
#if no other move is possible the while ends
while start != stop:
for move in moves[start:stop]:
new_moves = self.singleMove(move, rows, cols)
for new in new_moves:
self._moves.append((move, new, count))
moves += new_moves
start = stop
stop = len(moves)
value += 1
if value > self.get_value():
value = 1
count += 1
class Match:
def __init__(self, r, c):
"""
This class builds up a r x c chessboard and it keeps a list of knight over it. It also keeps a max, that is the
maximum k-value among all the k-values of the knights.
@param r: int, the number of rows of the chessboard
@param c: int, the number of columns of the chessboard
"""
self._rows = r
self._cols = c
self._max = 0
self._num_pieces = 0
self._knights = []
self._knights_nodes = []
self._total_k = 0
self._graph = GraphAdjacenceList()
self._knights_found = -1
self._turns = 0
self._is_finished = False
def add_knight(self, knight):
"""
This method allows to add knight to chessboard.
@param knight: Knight, the knight to add
"""
if self._max < knight.get_value() != 1:
self._max = knight.get_value()
if knight not in self._knights:
self._knights.append(knight)
self._total_k += knight.get_value()
self._num_pieces = len(self._knights)
def get_knights(self):
"""
This method returns the list of the knights in the match.
@return: list, list of knights in the match
"""
return self._knights
def get_rows(self):
"""
This method returns the number of rows of the chessboard.
@return: int, the number of the rows of the chessboard
"""
return self._rows
def get_cols(self):
"""
This method returns the number of columns of the chessboard.
@return: int, the number of the columns of the chessboard
"""
return self._cols
def get_max(self):
"""
This method returns maximum k-value among all the k-values of the knights in the chessboard.
@return: int, the max value of the match
"""
return self._max
def view_knights(self):
"""
This methods prints all the knights of the match.
"""
for knight in self._knights:
print(knight.get_knight())
def view_specs(self):
"""
This methods prints the specifications of the match.
"""
print(self._rows, self._cols, self._knights)
def is_finished(self):
"""
This methods return true or false in case of tha match is finished or not.
@return: bool, true if match is finished
"""
return self._is_finished
def knight_found(self, knight, distance):
"""
This method allow to
@param knight:
@param distance:
"""
assert len(self._knights) != self._knights_found + 1, "Sono gia' stati trovati tutti i cavalli"
self._knights_found += 1
self._knights[self._knights.index(knight)].set_distance(distance)
turns = knight.get_turn()
self._turns += turns
def finish(self, force=False):
"""
This methods close the match ONLY if is time to close and calculates the numbers of turns in order to complete itself.
"""
self._knights_found += 1
if len(self._knights) == self._knights_found:
self._is_finished = True
else:
self._knights_found -= 1
if force:
self._is_finished = True
self._turns = float('inf')
else:
raise Exception
def get_turns(self):
"""
This method returns the numbers of turns in order to complete the match.
@return: int, the turns to complete match
"""
return self._turns
def reset(self):
"""
This method reset the match.
"""
for knight in self._knights:
knight._k_moves = 0
knight._moves = 0
knight._distance_from_target = -1
knight._turn_from_target = -1
self._knights_found = -1
self._turns = 0
self._is_finished = False
def validate_positions(self, positions, debug=False):
"""
@param positions:
@param debug:
@return:
"""
valid = []
for pos in positions:
if 0 <= pos[0] <= (self._rows - 1) and 0 <= pos[1] <= (self._cols - 1):
valid.append(pos)
if debug:
print("Valide positions: " + str(valid))
return valid
def getKnights(self):
"""
:return: list, the list of knights' nodes
"""
return self._knights_nodes
def setGraph(self):
"""
this function is used as an interface to manipulate the graph
:return: GraphAdjacenceList, the graph related to the current match
"""
return self._graph
def makeGraph(self):
"""
this function can be used to create a graph by the knights stored
:return: None
"""
r = self.get_rows()
c = self.get_cols()
#first of all... initializing the knights and storing them as initial nodes of the graph
for k in self._knights:
kgt = self.setGraph().insertNode(k.get_position(), k)
self._knights_nodes.append(kgt) #storing the list of knights' nodes
#node with a knight: knight_position + knight_weight
k.completeTour(r, c) #calculating the complete tour for every knight
for knight in self._knights:
for step in knight.getMoves():
move_from = step[0]
move_to = step[1]
node = self.setGraph().insertNode(move_from)
moveNode = self.setGraph().insertNode(move_to)
self.setGraph().linkNode(node, moveNode)
knight.refreshBuffer() #just to free some memory...
def makeGraphBFS(self):
"""
this function can be used to create a graph by the knights stored
:return: None
"""
r = self.get_rows()
c = self.get_cols()
for k in self._knights:
kgt = self.setGraph().insertNode(k.get_position(), k)
self._knights_nodes.append(kgt)
kgt.set_distance(0) #setting the 0-distance of the knight from its position
k.completeTour(r, c)
how_many = len(self.getKnights())
minimum = float('inf')
for knight in self._knights:
for step in knight.getMoves():
move_from = step[0]
move_to = step[1]
node = self.setGraph().insertNode(move_from)
moveNode = self.setGraph().insertNode(move_to)
#it is no longer necessary to link the nodes in order to accomplish the visit
#but it is necessary to update the node deepness using the data from the moves' list
moveNode.set_distance(step[2])
knight.refreshBuffer()
for node in self.setGraph().getNodes()[0].itervalues():
if node.get_count() == how_many:
if node.get_distance() < minimum:
minimum = node.get_distance()
return minimum
def minMovesBFS(self):
"""
this function is the bulge of the problem. It is used to calculate the minimum number of moves
to make all the knights converge using a previous result by the BFS forest previously build
:return: int, the minimum moves number
"""
#selecting knights' nodes and visiting...
knights = self.getKnights()
how_many = len(knights)
forest = self.setGraph().visitNodesBFS(knights)
#retrieving from the tuple the list of nodes
nodes = self.setGraph().getNodes()[0]
#finding the minimum moves number
minimum = float('inf')
#examining the forest generated by the visit
for tree in forest:
knight = tree.getRoot().getElem().get_weight()
knight_val = knight.get_value()
leaves = tree.getLeaves()
for leaf in leaves:
dist = leaf.getDistance()
weight = knight.calculateWeight(dist)
node = nodes[leaf.getElem().get_index()]
node.set_distance(weight)
if node.get_count() == how_many:
if node.get_distance() < minimum:
minimum = node.get_distance()
return minimum
def minMovesDijkstra(self):
"""
this function is the bulge of the problem. It is used to calculate the minimum number of moves
to make all the knights converge using a previous result by the forest previously build using
a Dijkstra algorithm for the shortest path
:return: int, the minimum moves number
"""
#selecting knights' nodes and visiting...
knights = self.getKnights()
how_many = len(knights)
forest = self.setGraph().visitDijkstra(knights)
#retrieving from the tuple the list of nodes
nodes = self.setGraph().getNodes()[0]
#finding the minimum moves number
minimum = float('inf')
#examining the shortest-paths-tree generated by Dijkstra
for tree in forest:
knight = tree.getRoot().getElem().get_weight()
knight_val = knight.get_value()
leaves = tree.getLeaves()
for leaf in leaves:
dist = leaf.getDistance()
weight = knight.calculateWeight(dist)
node = nodes[leaf.getElem().get_index()]
node.set_distance(weight)
if node.get_count() == how_many:
if node.get_distance() < minimum:
minimum = node.get_distance()
return minimum
def minMovesFloydWarshall(self):
"""
this function is the bulge of the problem. It is used to calculate the minimum number of moves
to make all the knights converge using a previous result by the Floyd-Warshall algorithm
:return: int, the minimum number of moves
"""
INF = float('inf')
FW = self.setGraph().FloydWarshall()
knights = self.getKnights()
how_many = len(knights)
nodes = self.setGraph().getNodes()[0] #retrieving from the tuple the list of nodes
#finding the minimum moves number
minimum = float('inf')
#examining the knights' paths
for k_node in knights:
index = k_node.get_index()
knight = k_node.get_weight()
knight_val = knight.get_value()
for to_index in range(0, len(FW[index])):
dist = FW[index][to_index]
if dist != INF:
weight = knight.calculateWeight(dist)
node = nodes[to_index]
node.set_distance(weight)
if node.get_count() == how_many:
if node.get_distance() < minimum:
minimum = node.get_distance()
return minimum
|
IA-MP/KnightTour
|
libs/structure.py
|
Python
|
mit
| 20,687
|
[
"VisIt"
] |
dac81dee13135fc75ed9a4ad12e72d1b4b5b2392e44c62eaf4980727c4101e04
|
#!/usr/bin/python
import os, sys, string, anydbm
from low import *
from orthomcl import OrthoMCLCluster
# =============================================================================
def usage():
print >> sys.stderr, "add significant BLAST hits (e.g. in-paralogs) to an existing orthomcl cluster.\n"
print >> sys.stderr, "usage: (1) " + sys.argv[0] + " noparalogs.orthomcl.out blastout.add.dbm"
print >> sys.stderr, " or (2) " + sys.argv[0] + " noparalogs.orthomcl.out all.fasta all.gg all.blastout"
sys.exit(1)
def plausi():
if len(sys.argv) != 3 and len(sys.argv) != 5: usage()
return sys.argv[1:]
def read_gg(inGG):
outHash, speciesArray = {}, []
fo = open(inGG)
for line in fo:
line = line.rstrip()
cols = line.split()
species = str(cols[0])[:-1]
if not species in speciesArray: speciesArray.append(species)
for col in cols[1:]:
outHash[col] = species
fo.close()
return outHash, speciesArray
def get_seq_lengths(file):
lengthHash, id = {}, ""
fo = open(file)
for line in fo:
line = line.strip()
if line.startswith(">"):
id = line[1:]
if id.count(" ") > 0: id = id[:id.index(" ")]
lengthHash[id] = 0
else: lengthHash[id] += len(line)
return lengthHash
def main():
args = plausi()
in_orthomcl = args[0]
EVALUE = float('1e-20')
IDENTITY = 30.0
if len(args) == 4:
in_fasta, in_gg, in_blast = args[1:4]
gene2species, speciesArray = read_gg(in_gg)
gene2length = get_seq_lengths(in_fasta)
dbmfile = in_blast + ".add.dbm"
dbm = anydbm.open(dbmfile, "c")
fo = open(in_blast)
for line in fo:
line = line.rstrip()
cols = line.split("\t")
qid, hid, evalue, identity = cols[0], cols[1], float(cols[10]), float(cols[2])
# ignore self-hits and between-species hits, check e-value threshold
if qid == hid: continue
if gene2species[qid] != gene2species[hid]: continue
if evalue > EVALUE: continue
if identity < IDENTITY: continue
# check that blast alignment spans at least 75% of the longer sequence
alnlength, qlength, hlength = int(cols[3]), gene2length[qid], gene2length[hid]
lengthcutoff = 0.80 * max([qlength, hlength])
if alnlength < lengthcutoff: continue
if not dbm.has_key(qid): dbm[qid] = ""
else: dbm[qid] += " "
dbm[qid] += hid
fo.close()
dbm.close()
else: dbmfile = args[1]
dbm = anydbm.open(dbmfile)
fo = open(in_orthomcl)
for line in fo:
o = OrthoMCLCluster(line.rstrip())
oldsize = o.get_count()
additions = []
for geneid, species in o.get_gene_hash().iteritems():
if not dbm.has_key(geneid): continue
[additions.append([x, species]) for x in dbm[geneid].split()]
for x, species in additions: o.add_gene(x,species)
o.to_s()
newsize = o.get_count()
print >> sys.stderr, "%s\t%s\t%s" %(o.get_name(), oldsize, newsize)
main()
|
lotharwissler/bioinformatics
|
python/orthomcl/add-blasthits-to-cluster.py
|
Python
|
mit
| 2,938
|
[
"BLAST"
] |
c53dd5a392f196dff3dd772814c9bb7142190148d8051ec7791ecf9724d9c168
|
from toee import *
from utilities import *
from batch import *
from itt import *
from math import sqrt, atan2
import _include
from co8Util.PersistentData import *
## Contained in this script
TS_CRITTER_KILLED_FIRST_TIME = 504 # KOS monster on Temple Level 1
TS_EARTH_CRITTER_KILLED_FIRST_TIME = 505 # Robe-friendly monster on Temple Level 1
TS_EARTH_TROOP_KILLED_FIRST_TIME = 506 # Earth Temple human troop
TS_CRITTER_THRESHOLD_CROSSED = 509 # Time when you crossed the threshold from killing a monster
#########################################
# Persistent flags/vars/strs #
# Uses keys starting with #
# 'Flaggg', 'Varrr', 'Stringgg' #
#########################################
def get_f(flagkey):
flagkey_stringized = 'Flaggg' + str(flagkey)
tempp = Co8PersistentData.getData(flagkey_stringized)
if isNone(tempp):
return 0
else:
return int(tempp) != 0
def set_f(flagkey, new_value = 1):
flagkey_stringized = 'Flaggg' + str(flagkey)
Co8PersistentData.setData(flagkey_stringized, new_value)
def get_v(varkey):
varkey_stringized = 'Varrr' + str(varkey)
tempp = Co8PersistentData.getData(varkey_stringized)
if isNone(tempp):
return 0
else:
return int(tempp)
def set_v(varkey, new_value):
varkey_stringized = 'Varrr' + str(varkey)
Co8PersistentData.setData(varkey_stringized, new_value)
return get_v(varkey)
def inc_v(varkey, inc_amount = 1):
varkey_stringized = 'Varrr' + str(varkey)
Co8PersistentData.setData(varkey_stringized, get_v(varkey) + inc_amount)
return get_v(varkey)
def get_s(strkey):
strkey_stringized = 'Stringgg' + str(strkey)
tempp = Co8PersistentData.getData(strkey_stringized)
if isNone(tempp):
return ''
else:
return str(tempp)
def set_s(strkey, new_value):
new_value_stringized = str(new_value)
strkey_stringized = 'Stringgg' + str(strkey)
Co8PersistentData.setData(strkey_stringized, new_value_stringized)
#########################################
# Bitwise NPC internal flags #
# 1-31 #
# Uses obj_f_npc_pad_i_4 #
# obj_f_pad_i_3 is sometimes nonzero #
# pad_i_4, pad_i_5 tested clean on all #
# protos #
#########################################
def npc_set(attachee,flagno):
# flagno is assumed to be from 1 to 31
exponent = flagno - 1
if exponent > 30 or exponent < 0:
print 'error!'
else:
abc = pow(2,exponent)
tempp = attachee.obj_get_int(obj_f_npc_pad_i_4) | abc
attachee.obj_set_int(obj_f_npc_pad_i_4, tempp)
return
def npc_unset(attachee,flagno):
# flagno is assumed to be from 1 to 31
exponent = flagno - 1
if exponent > 30 or exponent < 0:
print 'error!'
else:
abc = pow(2,exponent)
tempp = (attachee.obj_get_int(obj_f_npc_pad_i_4) | abc) - abc
attachee.obj_set_int(obj_f_npc_pad_i_4, tempp)
return
def npc_get(attachee,flagno):
# flagno is assumed to be from 1 to 31
exponent = flagno - 1
if exponent > 30 or exponent < 0:
print 'error!'
else:
abc = pow(2,exponent)
return attachee.obj_get_int(obj_f_npc_pad_i_4) & abc != 0
################################################################
################################################################
################################################################
################################################################
def san_dying(attachee, triggerer):
# in case the 'script bearer' dies, pass the curse to someone else
not_found = 1
for pc in game.party:
if pc.stat_level_get( stat_hp_current ) > 0 and not_found == 1 and pc.type == obj_t_pc:
not_found = 0
attachee.scripts[12] = 0
attachee.scripts[38] = 0
pc.scripts[12] = 439 #san_dying
pc.scripts[38] = 439 #san_new_map
pc.scripts[14] = 439 #san_exit_combat - executes when exiting combat mode
return
def san_exit_combat( attachee, triggerer ):
if attachee.map == 5066: # temple level 1
grate_obj = OBJ_HANDLE_NULL
for door_candidate in game.obj_list_vicinity( attachee.location, OLC_PORTAL ):
if (door_candidate.name == 120):
grate_obj = door_candidate
if not game.combat_is_active():
harpies_alive = 0
for obj in game.obj_list_vicinity(attachee.location, OLC_NPC):
if obj.name == 14243 and obj.leader_get() == OBJ_HANDLE_NULL and obj.is_unconscious() == 0 and obj.stat_level_get( stat_hp_current ) > -10:
harpies_alive += 1
if harpies_alive == 0 and (not grate_obj == OBJ_HANDLE_NULL) and game.global_vars[455] & 2**6 == 0:
game.global_vars[455] |= 2**6
#grate_obj.object_flag_set(OF_OFF)
grate_npc = game.obj_create(14913, grate_obj.location)
grate_npc.move(grate_obj.location, 0, 11.0 )
grate_npc.rotation = grate_obj.rotation
#grate_npc.begin_dialog(game.leader, 1000)
return
def san_dialog(attachee, triggerer):
if (game.leader.map == 5008): # Welcome Wench upstairs - PC left behind
if (attachee in game.party):
triggerer.begin_dialog(attachee, 150)
else:
triggerer.begin_dialog(attachee, 200)
return SKIP_DEFAULT
def san_new_map( attachee, triggerer ):
cur_map = attachee.map
###########################################
### PC Commentary (float lines/banter) ###
###########################################
if game.party[0].type == obj_t_npc: # leftmost portrait an NPC
daemon_float_comment(attachee, 1)
game.timevent_add( daemon_float_comment, (attachee, 2), 5000, 1)
#######################################
### Global Event Scheduling System ###
#######################################
## Skole Goons
if tpsts('s_skole_goons', 3*24*60*60) == 1 and get_f('s_skole_goons_scheduled') == 0 and get_f('skole_dead') == 0:
set_f('s_skole_goons_scheduled')
if game.quests[42].state != qs_completed and game.global_flags[281] == 0:
# ggf281 - have had Skole Goon encounter
game.quests[42].state = qs_botched
game.global_flags[202] = 1
game.encounter_queue.append(3004)
## Thrommel Reward Encounter - 2 weeks
if tpsts('s_thrommel_reward', 14*24*60*60) == 1 and get_f('s_thrommel_reward_scheduled') == 0:
set_f('s_thrommel_reward_scheduled')
if game.global_flags[278] == 0 and not (3001 in game.encounter_queue):
# ggf278 - have had Thrommel Reward encounter
game.encounter_queue.append(3001)
## Tillahi Reward Encounter - 10 days
if tpsts('s_tillahi_reward', 10*24*60*60) == 1 and get_f('s_tillahi_reward_scheduled') == 0:
set_f('s_tillahi_reward_scheduled')
if game.global_flags[279] == 0 and not (3002 in game.encounter_queue):
# ggf279 - have had Tillahi Reward encounter
game.encounter_queue.append(3002)
## Sargen Reward Encounter - 3 weeks
if tpsts('s_sargen_reward', 21*24*60*60) == 1 and get_f('s_sargen_reward_scheduled') == 0:
set_f('s_sargen_reward_scheduled')
if game.global_flags[280] == 0 and not (3003 in game.encounter_queue):
# ggf280 - have had Sargen Reward encounter
game.encounter_queue.append(3003)
## Ranth's Bandits Encounter 1 - random amount of days (normal distribution, average of 24 days, stdev = 8 days)
if tpsts('s_ranths_bandits_1', game.global_vars[923]*24*60*60) == 1 and get_f('s_ranths_bandits_scheduled') == 0:
set_f('s_ranths_bandits_scheduled')
if game.global_flags[711] == 0 and not (3434 in game.encounter_queue):
# ggf711 - have had Ranth's Bandits Encounter
game.encounter_queue.append(3434)
## Scarlet Brotherhood Retaliation for Snitch Encounter - 10 days
if tpsts('s_sb_retaliation_for_snitch', 10*24*60*60) == 1 and get_f('s_sb_retaliation_for_snitch_scheduled') == 0:
set_f('s_sb_retaliation_for_snitch_scheduled')
if game.global_flags[712] == 0 and not (3435 in game.encounter_queue):
# ggf712 - have had Scarlet Brotherhood Encounter
game.encounter_queue.append(3435)
## Scarlet Brotherhood Retaliation for Narc Encounter - 6 days
if tpsts('s_sb_retaliation_for_narc', 6*24*60*60) == 1 and get_f('s_sb_retaliation_for_narc_scheduled') == 0:
set_f('s_sb_retaliation_for_narc_scheduled')
if game.global_flags[712] == 0 and not (3435 in game.encounter_queue):
# ggf712 - have had Scarlet Brotherhood Encounter
game.encounter_queue.append(3435)
## Scarlet Brotherhood Retaliation for Whistelblower Encounter - 14 days
if tpsts('s_sb_retaliation_for_whistleblower', 14*24*60*60) == 1 and get_f('s_sb_retaliation_for_whistleblower_scheduled') == 0:
set_f('s_sb_retaliation_for_whistleblower_scheduled')
if game.global_flags[712] == 0 and not (3435 in game.encounter_queue):
# ggf712 - have had Scarlet Brotherhood Encounter
game.encounter_queue.append(3435)
## Gremlich Scream Encounter 1 - 1 day
if tpsts('s_gremlich_1', 1*24*60*60) == 1 and get_f('s_gremlich_1_scheduled') == 0:
set_f('s_gremlich_1_scheduled')
if game.global_flags[713] == 0 and not (3436 in game.encounter_queue):
# ggf713 - have had Gremlich Scream Encounter 1
game.encounter_queue.append(3436)
## Gremlich Reset Encounter - 5 days
if tpsts('s_gremlich_2', 5*24*60*60) == 1 and get_f('s_gremlich_2_scheduled') == 0:
set_f('s_gremlich_2_scheduled')
if game.global_flags[717] == 0 and not (3440 in game.encounter_queue):
# ggf717 - have had Gremlich Reset Encounter
game.encounter_queue.append(3440)
## Mona Sport Encounter 1 (pirates vs. brigands) - 3 days
if tpsts('s_sport_1', 3*24*60*60) == 1 and get_f('s_sport_1_scheduled') == 0:
set_f('s_sport_1_scheduled')
if game.global_flags[718] == 0 and not (3441 in game.encounter_queue):
# ggf718 - have had Mona Sport Encounter 1
game.encounter_queue.append(3441)
## Mona Sport Encounter 2 (bugbears vs. orcs melee) - 3 days
if tpsts('s_sport_2', 3*24*60*60) == 1 and get_f('s_sport_2_scheduled') == 0:
set_f('s_sport_2_scheduled')
if game.global_flags[719] == 0 and not (3442 in game.encounter_queue):
# ggf719 - have had Mona Sport Encounter 2
game.encounter_queue.append(3442)
## Mona Sport Encounter 3 (bugbears vs. orcs ranged) - 3 days
if tpsts('s_sport_3', 3*24*60*60) == 1 and get_f('s_sport_3_scheduled') == 0:
set_f('s_sport_3_scheduled')
if game.global_flags[720] == 0 and not (3443 in game.encounter_queue):
# ggf720 - have had Mona Sport Encounter 3
game.encounter_queue.append(3443)
## Mona Sport Encounter 4 (hill giants vs. ettins) - 3 days
if tpsts('s_sport_4', 3*24*60*60) == 1 and get_f('s_sport_4_scheduled') == 0:
set_f('s_sport_4_scheduled')
if game.global_flags[721] == 0 and not (3444 in game.encounter_queue):
# ggf721 - have had Mona Sport Encounter 4
game.encounter_queue.append(3444)
## Mona Sport Encounter 5 (female vs. male bugbears) - 3 days
if tpsts('s_sport_5', 3*24*60*60) == 1 and get_f('s_sport_5_scheduled') == 0:
set_f('s_sport_5_scheduled')
if game.global_flags[722] == 0 and not (3445 in game.encounter_queue):
# ggf722 - have had Mona Sport Encounter 5
game.encounter_queue.append(3445)
## Mona Sport Encounter 6 (zombies vs. lacedons) - 3 days
if tpsts('s_sport_6', 3*24*60*60) == 1 and get_f('s_sport_6_scheduled') == 0:
set_f('s_sport_6_scheduled')
if game.global_flags[723] == 0 and not (3446 in game.encounter_queue):
# ggf723 - have had Mona Sport Encounter 6
game.encounter_queue.append(3446)
## Bethany Encounter - 2 days
if tpsts('s_bethany', 2*24*60*60) == 1 and get_f('s_bethany_scheduled') == 0:
set_f('s_bethany_scheduled')
if game.global_flags[724] == 0 and not (3447 in game.encounter_queue):
# ggf724 - have had Bethany Encounter
game.encounter_queue.append(3447)
if tpsts('s_zuggtmoy_banishment_initiate', 4*24*60*60) == 1 and get_f('s_zuggtmoy_gone') == 0 and game.global_flags[326] == 1 and attachee.map != 5016 and attachee.map != 5019:
set_f('s_zuggtmoy_gone')
import py00262burne_apprentice
py00262burne_apprentice.return_Zuggtmoy( game.leader, game.leader )
##############################################
### End of Global Event Scheduling System ###
##############################################
if game.global_vars[449] & (2**0 + 2**1 + 2**2) != 0: # If set preference for speed
speedup(game.global_vars[449] & (2**0 + 2**1 + 2**2) , game.global_vars[449] & (2**0 + 2**1 + 2**2) )
if game.global_flags[403] == 1: # Test mode enabled; autokill critters!
#game.particles( "sp-summon monster I", game.leader)
#game.timevent_add( autokill, (cur_map, 1), 150 )
autokill(cur_map, autoloot = 1)
for pc in game.party:
pc.identify_all()
if (cur_map == 5004): # Moathouse Upper floor
if game.global_vars[455] & 2**7 != 0: # Secret Door Reveal
for obj in game.obj_list_vicinity( lfa(464, 470), OLC_PORTAL | OLC_SCENERY ):
if obj.obj_get_int( obj_f_secretdoor_flags ) & 2**16 != 0: # OSDF_SECRET_DOOR
obj.obj_set_int( obj_f_secretdoor_flags, obj.obj_get_int( obj_f_secretdoor_flags ) | 2**17 )
elif (cur_map == 5005):
## Moathouse Dungeon
ggv402 = game.global_vars[402]
ggv403 = game.global_vars[403]
if (ggv402 & (2**0) ) == 0:
print "modifying moathouse... \n"
modify_moathouse()
ggv402 |= 2**0
game.global_vars[402] = ggv402
if moathouse_alerted() and (ggv403 & (2**0)) == 0:
moathouse_reg()
ggv403 |= 2**0
game.global_vars[403] = ggv403
elif (cur_map == 5008):
print "Welcome Wench upstairs"
for dude in game.party:
if dude.type == obj_t_pc and dude.scripts[9] == 439 and get_f('pc_dropoff'):
print "Attempting to remove " + str(dude)
game.timevent_add(dude.obj_remove_from_all_groups,(dude), 150, 1)
set_f('pc_dropoff', 0)
elif (cur_map == 5110): ## Temple Ruined Building
game.global_vars[491] |= 2**0
elif (cur_map == 5111): ## Temple Broken Tower - Exterior
game.global_vars[491] |= 2**1
elif (cur_map == 5065): ## Temple Broken Tower - Interior
game.global_vars[491] |= 2**2
elif (cur_map == 5092): ## Temple Escape Tunnel
game.global_vars[491] |= 2**3
elif (cur_map == 5112): ## Temple Burnt Farmhouse
game.global_vars[491] |= 2**4
elif (cur_map == 5064): ## Temple entrance level
found_map_obj = 0
for pc in game.party:
if pc.item_find(11299):
found_map_obj = 1
if not found_map_obj:
map_obj = game.obj_create(11299, game.leader.location)
got_map_obj = 0
pc_index = 0
while got_map_obj == 0 and pc_index < len(game.party):
if game.party[pc_index].is_unconscious() == 0 and game.party[pc_index].type == obj_t_pc:
got_map_obj = game.party[pc_index].item_get(map_obj)
if not got_map_obj:
pc_index += 1
else:
pc_index += 1
if got_map_obj:
game.party[pc_index].scripts[9] = 435
game.party[pc_index].begin_dialog( game.party[pc_index], 1200 )
else:
map_obj.object_flag_set(OF_OFF)
if game.global_vars[455] & 2**7 != 0:
for obj in game.obj_list_vicinity( lfa(500, 500), OLC_SCENERY | OLC_PORTAL ):
if obj.obj_get_int( obj_f_secretdoor_flags) & 2**16: #OSDF_SECRET_DOOR
obj.obj_set_int( obj_f_secretdoor_flags, obj.obj_get_int( obj_f_secretdoor_flags) | 2**17 )
elif (cur_map == 5066): ## Temple Level 1 ##
if get_v(455) & 1 == 0:
record_time_stamp(460)
set_v(455, get_v(455) | 1)
modify_temple_level_1(attachee)
if earth_alerted() and (get_v(454) & 1 == 0) and (game.global_vars[450] & 2**0 == 0) and ( ( game.global_vars[450] & (2**13) ) == 0 ):
set_v(454, get_v(454) | 1)
earth_reg()
xx, yy = location_to_axis(game.leader.location)
if (xx - 421)**2 + (yy-589)**2 <= 400:
game.global_vars[491] |= 2**5
if (xx - 547)**2 + (yy-589)**2 <= 400:
game.global_vars[491] |= 2**6
elif (cur_map == 5067): ## Temple Level 2 ##
if get_v(455) & 2 == 0:
record_time_stamp(461)
set_v(455, get_v(455) | 2)
modify_temple_level_2(attachee)
if water_alerted() and ( get_v(454) & 2 == 0 or ( get_v(454)&(2**6+2**7)==2**6 ) ) and (game.global_vars[450] & 2**0 == 0) and ( ( game.global_vars[450] & (2**13) ) == 0 ):
set_v(454, get_v(454) | 2)
if get_v(454) & (2**6 + 2**7) == 2**6:
set_v(454, get_v(454) | 2**7) # indicate that Oohlgrist and co have been moved to Water
water_reg()
if air_alerted() and (get_v(454) & 4 == 0) and (game.global_vars[450] & 2**0 == 0) and ( ( game.global_vars[450] & (2**13) ) == 0 ):
set_v(454 , get_v(454) | 4)
air_reg()
if fire_alerted() and ( get_v(454) & 2**3 == 0 or ( get_v(454)&(2**4+2**5)==2**4 ) ) and (game.global_vars[450] & 2**0 == 0) and ( ( game.global_vars[450] & (2**13) ) == 0 ):
# Fire is on alert and haven't yet regrouped, or have already regrouped but Oohlgrist was recruited afterwards (2**5) and not transferred yet
set_v(454, get_v(454) | 2**3)
if get_v(454) & (2**4 + 2**5) == 2**4:
set_v(454, get_v(454) | 2**5) # indicate that Oohlgrist and co have been moved
game.global_flags[154] = 1 # Make the Werewolf mirror shut up
fire_reg()
xx, yy = location_to_axis(game.leader.location)
if (xx - 564)**2 + (yy-377)**2 <= 400:
game.global_vars[491] |= 2**7
elif (xx - 485)**2 + (yy-557)**2 <= 1600:
game.global_vars[491] |= 2**8
elif (xx - 485)**2 + (yy-503)**2 <= 400:
game.global_vars[491] |= 2**8
elif (cur_map == 5105): ## Temple Level 3 Lower (Thrommel, Scorpp, Falrinth etc.)
if get_v(455) & 4 == 0:
record_time_stamp(462)
set_v(455, get_v(455) | 4)
xx, yy = location_to_axis(game.leader.location)
if (xx - 406)**2 + (yy-436)**2 <= 400: # Fire Temple Access (near groaning spirit)
game.global_vars[491] |= 2**9
elif (xx - 517)**2 + (yy-518)**2 <= 400: # Air Temple Access (troll keys)
game.global_vars[491] |= 2**10
elif (xx - 552)**2 + (yy-489)**2 <= 400: # Air Temple Secret Door (Scorpp Area)
game.global_vars[491] |= 2**22
elif (xx - 616)**2 + (yy-606)**2 <= 400: # Water Temple Access (lamia)
game.global_vars[491] |= 2**11
elif (xx - 639)**2 + (yy-450)**2 <= 1600: # Falrinth area
game.global_vars[491] |= 2**12
if game.global_vars[455] & 2**7 != 0: # Secret Door Reveal
for obj in game.obj_list_vicinity( lfa(622,503), OLC_PORTAL | OLC_SCENERY ):
if obj.obj_get_int( obj_f_secretdoor_flags) & 2**16: # OSDF_SECRET_DOOR
obj.obj_set_int(obj_f_secretdoor_flags, obj.obj_get_int( obj_f_secretdoor_flags) | 2**17 )
elif (cur_map == 5080): ## Temple Level 4
if get_v(455) & 8 == 0:
record_time_stamp(463)
set_v(455, get_v(455) | 8)
xx, yy = location_to_axis(game.leader.location)
if (xx - 479)**2 + (yy-586)**2 <= 400:
game.global_vars[491] |= 2**13
elif (xx - 477)**2 + (yy-340)**2 <= 400:
game.global_vars[491] |= 2**14
elif (cur_map == 5106): ## secret spiral staircase
game.global_vars[491] |= 2**15
elif (cur_map == 5081): ## Air Node
game.global_vars[491] |= 2**16
elif (cur_map == 5082): ## Earth Node
game.global_vars[491] |= 2**17
elif (cur_map == 5083): ## Fire Node
game.global_vars[491] |= 2**18
elif (cur_map == 5084): ## Water Node
game.global_vars[491] |= 2**19
elif (cur_map == 5079): ## Zuggtmoy Level
game.global_vars[491] |= 2**20
return RUN_DEFAULT
def modify_temple_level_1(pc):
# Gives Temple monsters and NPCs san_dying scripts, so the game recognizes the player slaughtering mobs
#gnolls near southern entrance
for gnollol in vlistxyr(558, 600, 14080, 25):
gnollol.scripts[12] = 441
#gnollol.destroy()
for gnollol in vlistxyr(558, 600, 14079, 25):
gnollol.scripts[12] = 441
#gnollol.destroy()
for gnollol in vlistxyr(558, 600, 14078, 25):
gnollol.scripts[12] = 441
#gnollol.destroy()
# Rats
for vaporrat in vlistxyr(497, 573, 14068, 30):
vaporrat.scripts[12] = 441
#vaporrat.destroy()
for direrat in vlistxyr(440, 571, 14056, 15):
direrat.scripts[12] = 441
#direrat.destroy()
for direrat in vlistxyr(534, 389, 14056, 15):
direrat.scripts[12] = 441
#direrat.destroy()
#undead near secret staircase
for skellygnoll in vlistxyr(462, 520, 14083, 100):
skellygnoll.scripts[12] = 441
#skellygnoll.destroy()
for skellygnoll in vlistxyr(462, 520, 14082, 100):
skellygnoll.scripts[12] = 441
#skellygnoll.destroy()
for skellygnoll in vlistxyr(462, 520, 14081, 100):
skellygnoll.scripts[12] = 441
#skellygnoll.destroy()
for skellybone in vlistxyr(496, 515, 14107, 100):
skellybone.scripts[12] = 441
#skellybone.destroy()
#Gnoll Leader area
for gnoll_leader in vlistxyr(509, 534, 14066, 100):
gnoll_leader.scripts[12] = 442
#gnoll_leader.destroy()
for gnoll in vlistxyr(518, 531, 14067, 66):
gnoll.scripts[12] = 442
#gnoll.destroy()
for gnoll in vlistxyr(518, 531, 14078, 66):
# Barbarian gnoll
gnoll.scripts[12] = 442
#gnoll.destroy()
for gnoll in vlistxyr(518, 531, 14079, 66):
gloc = gnoll.location
grot = gnoll.rotation
gnoll.destroy() #replaces gnoll with non-DR version
newgnoll = game.obj_create( 14631, gloc )
newgnoll.rotation = grot
newgnoll.scripts[12] = 442
#gnoll.destroy()
for gnoll in vlistxyr(518, 531, 14080, 66):
gloc = gnoll.location
grot = gnoll.rotation
gnoll.destroy()
newgnoll = game.obj_create( 14632, gloc )
newgnoll.rotation = grot
newgnoll.scripts[12] = 442
#newgnoll.destroy()
for gnoll in vlistxyr(511, 549, 14079, 33):
gloc = gnoll.location
grot = gnoll.rotation
gnoll.destroy()
newgnoll = game.obj_create( 14631, gloc )
newgnoll.rotation = grot
newgnoll.scripts[12] = 442
#newgnoll.destroy()
for gnoll in vlistxyr(511, 549, 14080, 33):
gloc = gnoll.location
grot = gnoll.rotation
gnoll.destroy()
newgnoll = game.obj_create( 14632, gloc )
newgnoll.rotation = grot
newgnoll.scripts[12] = 442
#newgnoll.destroy()
for ogre in vlistxyr(508, 536, 14249, 35):
ogre.scripts[12] = 442
#ogre.destroy()
for bugbear in vlistxyr(508, 536, 14164, 35):
bugbear.scripts[12] = 442
#bugbear.destroy()
#Earth critters near Ogre Chief
for gnoll in vlistxyr(445, 538, 14078, 50):
gnoll.scripts[12] = 442
#gnoll.destroy()
for gnoll in vlistxyr(445, 538, 14079, 50):
gloc = gnoll.location
grot = gnoll.rotation
gnoll.destroy()
newgnoll = game.obj_create( 14631, gloc )
newgnoll.rotation = grot
newgnoll.scripts[12] = 442
#newgnoll.destroy()
for gnoll in vlistxyr(445, 538, 14080, 50):
gloc = gnoll.location
grot = gnoll.rotation
gnoll.destroy()
newgnoll = game.obj_create( 14632, gloc )
newgnoll.rotation = grot
newgnoll.scripts[12] = 442
#newgnoll.destroy()
for ogrechief in vlistxyr(467, 535, 14248, 50):
ogrechief.scripts[12] = 444
#ogrechief.destroy()
for hobgoblin in vlistxyr(467, 535, 14188, 50):
hobgoblin.scripts[12] = 442
#hobgoblin.destroy()
for goblin in vlistxyr(467, 535, 14184, 27):
goblin.scripts[12] = 442
#goblin.destroy()
for goblin in vlistxyr(467, 535, 14186, 27):
gloc = goblin.location
grot = goblin.rotation
goblin.destroy()
newgob = game.obj_create( 14636, gloc )
newgob.rotation = grot
newgob.scripts[12] = 442
#newgob.destroy()
for bugbear in vlistxyr(467, 535, 14164, 27):
bugbear.scripts[12] = 442
#bugbear.destroy()
#Temple Troops near Ogre Chief
for troop in vlistxyr(440, 500, 14337, 30):
troop.scripts[12] = 443
#troop.destroy()
for fighter in vlistxyr(440, 500, 14338, 30):
fighter.scripts[12] = 443
#fighter.destroy()
#ghouls and ghasts near prisoners (Morgan etc.)
for ghast in vlistxyr(545, 535, 14137, 50):
ghast.scripts[12] = 441
#ghast.destroy()
for ghast in vlistxyr(550, 545, 14136, 50):
ghast.scripts[12] = 441
#ghast.destroy()
for ghast in vlistxyr(545, 553, 14135, 50):
ghast.scripts[12] = 441
#ghast.destroy()
for ghoul in vlistxyr(549, 554, 14095, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
for ghoul in vlistxyr(549, 554, 14128, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
for ghoul in vlistxyr(549, 554, 14129, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
#harpy area
for harpy in vlistxyr(406, 564, 14243, 100):
harpy.scripts[12] = 441
#harpy.destroy()
for harpy in vlistxyr(407, 545, 14243, 100):
harpy.scripts[12] = 441
#harpy.destroy()
for ghast in vlistxyr(423, 541, 14135, 50):
ghast.scripts[12] = 441
#ghast.destroy()
for ghast in vlistxyr(420, 547, 14136, 50):
ghast.scripts[12] = 441
#ghast.destroy()
for ghoul in vlistxyr(413, 566, 14129, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
for ghoul in vlistxyr(413, 566, 14128, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
for ghoul in vlistxyr(413, 566, 14095, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
for ghoul in vlistxyr(410, 526, 14129, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
for ghoul in vlistxyr(410, 526, 14128, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
for ghoul in vlistxyr(410, 526, 14095, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
# Gray Ooze and Gelatinous Cube
for gelatinouscube in vlistxyr(415, 599, 14139, 100):
gelatinouscube.scripts[12] = 441
#gelatinouscube.destroy()
for grayooze in vlistxyr(415, 599, 14140, 100):
grayooze.scripts[12] = 441
#grayooze.destroy()
#spiders near wonnilon hideout
for spider in vlistxyr(438, 398, 14417, 50):
spider.scripts[12] = 441
#spider.destroy()
#ghouls near wonnilon hideout
for ghoul in vlistxyr(387, 398, 14128, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
#ghouls near northern entrance
for ghoul in vlistxyr(459, 600, 14129, 100):
ghoul.scripts[12] = 441
#ghoul.destroy()
#ogre near southern entrance
for ogre in vlistxyr(511, 601, 14448, 100):
ogre.scripts[12] = 441
#ogre.destroy()
#Temple Troop and bugbear doormen near Earth Commander
for troop in vlistxyr(470, 483, 14337, 25):
troop.scripts[12] = 443
#troop.destroy()
for bugbear in vlistxyr(470, 483, 14165, 25):
bugbear.scripts[12] = 442
#bugbear.destroy()
#Temple Troops and bugbears near Earth Commander
for earthcommander in vlistxyr(450, 470, 14156, 35):
earthcommander.scripts[12] = 444
#earthcommander.destroy()
for lieutenant in vlistxyr(450, 470, 14339, 35):
lieutenant.scripts[12] = 443
#lieutenant.destroy()
for troop in vlistxyr(450, 470, 14337, 35):
troop.scripts[12] = 443
#troop.destroy()
for bugbear in vlistxyr(450, 470, 14165, 35):
bugbear.scripts[12] = 442
#bugbear.destroy()
#Earth Altar
for worshippers in vlistxyr(482, 392, 14337, 50):
worshippers.scripts[12] = 443
#worshippers.destroy()
for earthelemental in vlistxyr(482, 392, 14381, 50):
earthelemental.scripts[12] = 442
#earthelemental.destroy()
for largeearthelemental in vlistxyr(483, 420, 14296, 50):
largeearthelemental.scripts[12] = 442
#largeearthelemental.destroy()
#Romag, Hartsch and their bugbear guards
#for romag in vlistxyr(445, 445, 8045, 11):
# romag.scripts[12] = 444
# romag.destroy()
for hartsch in vlistxyr(445, 445, 14154, 11):
hartsch.scripts[12] = 444
#hartsch.destroy()
for bugbear in vlistxyr(445, 445, 14162, 11):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(445, 445, 14163, 11):
bugbear.scripts[12] = 442
#bugbear.destroy()
# Bugbears north of Romag
for bugbear in vlistxyr(427, 435, 14162, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(427, 435, 14164, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(427, 435, 14165, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(418, 443, 14163, 5):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(435, 427, 14163, 5):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(435, 427, 14164, 5):
bugbear.scripts[12] = 442
#bugbear.destroy()
# Bugbear "Checkpoint"
for bugbear in vlistxyr(504, 477, 14164, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(504, 477, 14162, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(504, 477, 14163, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(504, 477, 14165, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
# Bugbear "reservists"
for bugbear in vlistxyr(524, 416, 14164, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(524, 416, 14163, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for bugbear in vlistxyr(524, 416, 14162, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
# Wonnilon area
for zombie in vlistxyr(546, 418, 14092, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for zombie in vlistxyr(546, 418, 14123, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for zombie in vlistxyr(546, 418, 14127, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for bugbear in vlistxyr(546, 418, 14164, 35):
bugbear.scripts[12] = 442
#bugbear.destroy()
for zombie in vlistxyr(546, 435, 14092, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for zombie in vlistxyr(546, 435, 14124, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for zombie in vlistxyr(546, 435, 14125, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for zombie in vlistxyr(546, 435, 14126, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for zombie in vlistxyr(546, 435, 14127, 100):
zombie.scripts[12] = 441
#zombie.destroy()
for bugbear in vlistxyr(546, 435, 14164, 35):
bugbear.scripts[12] = 442
#bugbear.destroy()
# Turnkey
for bugbear in vlistxyr(570, 460, 14165, 15):
bugbear.scripts[12] = 442
#bugbear.destroy()
for turnkey in vlistxyr(570, 460, 14229, 15):
turnkey.scripts[12] = 443
#turnkey.destroy()
# Ogre and Goblins
for goblin in vlistxyr(563, 501, 14186, 50):
goblin.scripts[12] = 441
#goblin.destroy()
for goblin in vlistxyr(563, 501, 14187, 50):
goblin.scripts[12] = 441
#goblin.destroy()
for goblin in vlistxyr(563, 501, 14185, 50):
goblin.scripts[12] = 441
#goblin.destroy()
for ogre in vlistxyr(563, 501, 14448, 50):
ogre.scripts[12] = 441
#ogre.destroy()
# Stirges
for stirge in vlistxyr(410, 491, 14182, 50):
stirge.scripts[12] = 441
#stirge.destroy()
return
def modify_temple_level_2(pc):
dummy = 1
return
#104 - romag dead
#105 - belsornig dead
#106 - kelno dead
#107 - alrrem dead
def earth_alerted():
if game.global_flags[104] == 1: ##romag is dead
return 0
if tpsts(512, 1*60*60) == 1:
# an hour has passed since you defiled the Earth Altar
return 1
if tpsts(507, 1) == 1:
# You've killed the Troop Commander
return 1
if tpsts(TS_CRITTER_THRESHOLD_CROSSED, 1):
also_killed_earth_member = (tpsts(TS_EARTH_TROOP_KILLED_FIRST_TIME , 3*60) == 1) or (tpsts(TS_EARTH_CRITTER_KILLED_FIRST_TIME , 6*60) == 1)
did_quest_1 = game.quests[43].state >= qs_completed
if (not did_quest_1) or also_killed_earth_member:
if tpsts(TS_CRITTER_THRESHOLD_CROSSED, 2*60*60): # two hours have passed since you passed critter deathcount threshold
return 1
if tpsts(TS_CRITTER_KILLED_FIRST_TIME, 48*60*60) == 1: #48 hours have passed since you first killed a critter and you've passed the threshold
return 1
# The second condition is for the case you've killed a critter, left to rest somewhere, and returned later, and at some point crossed the kill count threshold
if (tpsts(510, 1) == 1 and tpsts(505, 24*60*60) == 1) or tpsts(510, 2*60*60):
# Either two hours have passed since you passed Earth critter deathcount threshold, or 24 hours have passed since you first killed an Earth critter and you've passed the threshold
return 1
if (tpsts(511, 1) == 1 and tpsts(506, 12*60*60) == 1) or tpsts(511, 1*60*60):
# Either 1 hour has passed since you passed troop deathcount threshold, or 12 hours have passed since you first killed a troop and you've passed the threshold
return 1
if tsc(457, 470) or tsc(458, 470) or tsc(459, 470): ##killed Belsornig, Kelno or Alrrem before completing 2nd earth quest
return 1
return 0
def water_alerted():
if game.global_flags[105] == 1:
##belsornig is dead
return 0
if tsc(456,475) == 1 or tsc(458, 475) == 1 or tsc(459, 475) == 1: ##killed Romag, Kelno or Alrrem before accepting second water quest
return 1
return 0
def air_alerted():
if game.global_flags[106] == 1:
##kelno is dead
return 0
if tsc(456,483) or tsc(457, 483) or tsc(459, 483):
##any of the other faction leaders are dead, and he hasn't yet given you that quest
##Kelno doesn't take any chances
return 1
return 0
def fire_alerted():
if game.global_flags[107] == 1: ##alrrem is dead
return 0
#if (game.global_flags[104] == 1 or game.global_flags[105] == 1 or game.global_flags[106] == 1):
# For now - if one of the other Leaders is dead
#return 1
if tsc(456,517) or tsc(457, 517) or tsc(458, 517):
# Have killed another High Priest without even having talked to him
# Should suffice for him, since he's kind of crazy
return 1
return 0
################################################################
################################################################
################################################################
################################################################
def is_follower(name):
for obj in game.party:
if (obj.name == name):
return 1
return 0
def destroy_weapons(npc, item1, item2, item3):
if (item1 != 0):
moshe = npc.item_find(item1)
if (moshe != OBJ_HANDLE_NULL):
moshe.destroy()
if (item2 != 0):
moshe = npc.item_find(item2)
if (moshe != OBJ_HANDLE_NULL):
moshe.destroy()
if (item3 != 0):
moshe = npc.item_find(item3)
if (moshe != OBJ_HANDLE_NULL):
moshe.destroy()
return
def float_comment(attachee, line):
attachee.float_line(line,game.leader)
return
def daemon_float_comment(attachee, line):
if attachee.type == obj_t_pc:
attachee.scripts[9] = 439
attachee.float_line(line,game.leader)
attachee.scripts[9] = 0
return
def proactivity(npc,line_no):
npc.turn_towards(game.party[0])
if (critter_is_unconscious(game.party[0]) != 1 and game.party[0].type == obj_t_pc and game.party[0].d20_query(Q_Prone) == 0 and npc.can_see(game.party[0])):
game.party[0].begin_dialog(npc,line_no)
else:
for pc in game.party:
npc.turn_towards(pc)
if (critter_is_unconscious(pc) != 1 and pc.type == obj_t_pc and pc.d20_query(Q_Prone) == 0 and npc.can_see(pc)):
pc.begin_dialog(npc,line_no)
return
def tsc( var1, var2 ):
#time stamp compare
#check if event associated with var1 happened before var2
#if they happened in the same second, well... only so much I can do
if (get_v(var1) == 0):
return 0
elif (get_v(var2) == 0):
return 1
elif (get_v(var1) < get_v(var2)):
return 1
else:
return 0
def tpsts(time_var, time_elapsed):
# type: (object, long) -> long
# Has the time elapsed since [time stamp] greater than the specified amount?
if get_v(time_var) == 0:
return 0
if game.time.time_game_in_seconds(game.time) > get_v(time_var) + time_elapsed:
return 1
return 0
def record_time_stamp(tvar, time_stamp_overwrite = 0):
if get_v(str(tvar)) == 0 or time_stamp_overwrite == 1:
set_v(str(tvar), game.time.time_game_in_seconds(game.time) )
return
def pop_up_box(message_id):
# generates popup box ala tutorial (without messing with the tutorial entries...)
a = game.obj_create(11001, game.leader.location)
a.obj_set_int(obj_f_written_text_start_line,message_id)
game.written_ui_show(a)
a.destroy()
return
def paladin_fall():
for pc in game.party:
pc.condition_add('fallen_paladin')
def vlistxyr( xx, yy, name, radius ):
greg = []
for npc in game.obj_list_vicinity( lfa(xx,yy), OLC_NPC ):
npc_x, npc_y = lta(npc.location)
dist = sqrt((npc_x-xx)*(npc_x-xx) + (npc_y-yy)*(npc_y-yy))
if (npc.name == name and dist <= radius):
greg.append(npc)
return greg
def can_see2(npc,pc):
# Checks if there's an obstruction in the way (i.e. LOS regardless of facing)
orot = npc.rotation ## Original rotation
nx, ny = location_to_axis(npc.location)
px, py = location_to_axis(pc.location)
vx = px-nx
vy = py-ny
# (vx, vy) is a vector pointing from the PC to the NPC.
# Using its angle, we rotate the NPC and THEN check for sight.
# After that, we return the NPC to its original facing.
npc.rotation = 3.14159/2 - ( atan2(vy,vx) + 5*3.14159/4 )
if npc.can_see(pc):
npc.rotation = orot
return 1
npc.rotation = orot
return 0
def can_see_party(npc):
for pc in game.party[0].group_list():
if can_see2(npc, pc) == 1:
return 1
return 0
def is_far_from_party(npc, dist = 20):
# Returns 1 if npc is farther than specified distance from party
for pc in game.party[0].group_list():
if npc.distance_to(pc) < dist:
return 0
return 1
def is_safe_to_talk_rfv(npc, pc, radius = 20, facing_required = 0, visibility_required = 1):
# visibility_required - Capability of seeing PC required (i.e. PC is not invisibile / sneaking)
# -> use can_see2(npc, pc)
# facing_required - In addition, the NPC is actually looking at the PC's direction
if visibility_required == 0:
if ( pc.type == obj_t_pc and critter_is_unconscious(pc) != 1 and npc.distance_to(pc) <= radius):
return 1
elif visibility_required == 1 and facing_required == 1:
if ( npc.can_see(pc) == 1 and pc.type == obj_t_pc and critter_is_unconscious(pc) != 1 and npc.distance_to(pc) <= radius):
return 1
elif visibility_required == 1 and facing_required != 1:
if ( can_see2(npc, pc) == 1 and pc.type == obj_t_pc and critter_is_unconscious(pc) != 1 and npc.distance_to(pc) <= radius):
return 1
return 0
def within_rect_by_corners(obj, ulx, uly, brx, bry):
# refers to "visual" axes (edges parallel to your screen's edges rather than ToEE's native axes)
xx, yy = location_to_axis(obj.location)
if ( (xx - yy) <= (ulx-uly)) and ( (xx - yy) >= (brx-bry) ) and ( (xx + yy) >= (ulx + uly) ) and ( (xx+yy) <= (brx+bry) ):
return 1
return 0
def encroach(a,b):
# A primitive way of making distant AI combatants who don't close the distances by themselves move towards the player
b.turn_towards(a)
if a.distance_to(b) < 30:
return -1
ax,ay = location_to_axis(a.location)
bx,by = location_to_axis(b.location)
dx = 0
dy = 0
if bx > ax:
dx = 1
elif bx < ax:
dx = -1
if by > ay:
dy = 1
elif by < ay:
dy = -1
if (ax-bx)**2 > (ay-by)**2: # if X distance is greater than Y distance, starting trying to encroach on the x axis
aprobe = game.obj_create( 14631, location_from_axis(ax+dx, ay) ) # probe to see if I'm not going into a wall
aprobe.move(location_from_axis(ax+dx, ay) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
if can_see2(aprobe,a):
aprobe.destroy()
a.move(location_from_axis(ax+dx, ay) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
return 1
else:
aprobe.move(location_from_axis(ax+dx, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
if can_see2(aprobe,a):
aprobe.destroy()
a.move(location_from_axis(ax+dx, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
return 1
else:
aprobe.move(location_from_axis(ax, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
if can_see2(aprobe,a):
aprobe.destroy()
a.move(location_from_axis(ax, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
return 1
else:
aprobe.destroy()
return 0
else:
aprobe = game.obj_create( 14631, location_from_axis(ax+dx, ay) ) # probe to see if I'm not going into a wall
aprobe.move(location_from_axis(ax, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
if can_see2(aprobe,a):
aprobe.destroy()
a.move(location_from_axis(ax, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
return 1
else:
aprobe.move(location_from_axis(ax+dx, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
if can_see2(aprobe,a):
aprobe.destroy()
a.move(location_from_axis(ax+dx, ay+dy) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
return 1
else:
aprobe.move(location_from_axis(ax+dx, ay) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
if can_see2(aprobe,a):
aprobe.destroy()
a.move(location_from_axis(ax+dx, ay) , a.obj_get_int(obj_f_offset_x), a.obj_get_int(obj_f_offset_y) )
return 1
else:
aprobe.destroy()
return 0
return 0
def buffee( makom , det_range, buff_list, done_list ):
# finds people that are on a 'to buff' list "buff_list" (name array), around location "makom", at range "det_range", that are not mentioned in "done_list"
# e.g. in Alrrem's script you can find something like buffee( attachee.location, 15, [14344], [handle_to_other_werewolf] )
xx0, yy0 = location_to_axis(makom)
for darling in buff_list:
for obj in game.obj_list_vicinity( makom, OLC_NPC ):
xx1, yy1 = location_to_axis( obj.location )
if obj.name == darling and obj.leader_get() == OBJ_HANDLE_NULL and not (obj in done_list) and ( (xx1-xx0)**2+ (yy1-yy0)**2 ) <= det_range**2:
return obj
return OBJ_HANDLE_NULL
def modify_moathouse():
for obj in game.obj_list_vicinity(location_from_axis(490, 535), OLC_NPC):
if obj.name in range(14074, 14078):
obj.scripts[12] = 450
obj.scripts[13] = 450
obj.scripts[14] = 450
obj.scripts[15] = 450
obj.scripts[16] = 450
if obj.name == 14077:
obj.npc_flag_set(ONF_KOS)
obj.scripts[22] = 450 #will kos
obj.scripts[41] = 450
obj.npc_flag_unset(ONF_WAYPOINTS_DAY)
obj.npc_flag_unset(ONF_WAYPOINTS_NIGHT)
for obj in game.obj_list_vicinity(location_from_axis(512, 549), OLC_NPC):
if obj.name in range(14074, 14078):
obj.scripts[12] = 450
obj.scripts[13] = 450
obj.scripts[14] = 450
obj.scripts[15] = 450
obj.scripts[16] = 450
if obj.name == 14077:
obj.npc_flag_set(ONF_KOS)
obj.scripts[22] = 450 #will kos
obj.scripts[41] = 450
obj.npc_flag_unset(ONF_WAYPOINTS_DAY)
obj.npc_flag_unset(ONF_WAYPOINTS_NIGHT)
return
def moathouse_alerted():
if game.global_flags[363] == 1:
# Bullied or attacked Sergeant at the door
return 1
else:
ggv = game.global_vars
bugbear_group_kill_ack = 0
gnoll_group_kill_ack = 0
lubash_kill_ack = 0
ground_floor_brigands_kill_ack = 0
if ggv[404] != 0 and ( game.time.time_game_in_seconds(game.time) > ggv[404] + 12*60*60):
bugbear_group_kill_ack = 1
if ggv[405] != 0 and ( game.time.time_game_in_seconds(game.time) > ggv[405] + 12*60*60):
gnoll_group_kill_ack = 1
if ggv[406] != 0 and ( game.time.time_game_in_seconds(game.time) > ggv[406] + 12*60*60):
lubash_kill_ack = 1
if ggv[407] != 0 and ( game.time.time_game_in_seconds(game.time) > ggv[407] + 48*60*60):
ground_floor_brigands_kill_ack = 1
return ( (ground_floor_brigands_kill_ack + lubash_kill_ack + gnoll_group_kill_ack + bugbear_group_kill_ack) >= 2 )
return 0
def moathouse_reg():
found_new_door_guy = 0
for obj in game.obj_list_vicinity( location_from_axis(512, 549), OLC_NPC ):
if obj.leader_get() != OBJ_HANDLE_NULL or obj.is_unconscious() == 1:
continue
xx, yy = location_to_axis(obj.location)
if obj.name in [14074, 14075] and xx > 496 and yy > 544:
# Corridor guardsmen
if xx == 497 and yy == 549:
# archer
sps(obj, 639)
obj.obj_set_int(obj_f_speed_walk, 1085353216)
obj.npc_flag_unset(ONF_WAYPOINTS_DAY)
obj.npc_flag_unset(ONF_WAYPOINTS_NIGHT)
obj.move(location_from_axis(481, 530), 0,0)
obj.rotation = 2.35
elif xx == 507 and yy == 549:
# swordsman
obj.destroy()
elif xx == 515 and yy == 548:
# spearbearer
sps(obj, 637)
obj.obj_set_int(obj_f_speed_walk, 1085353216)
obj.npc_flag_unset(ONF_WAYPOINTS_DAY)
obj.npc_flag_unset(ONF_WAYPOINTS_NIGHT)
obj.move(location_from_axis(483, 541), 0,0)
obj.rotation = 4
elif obj.name == 14075:
# Door Sergeant - replace with a quiet sergeant
obj.destroy()
obj = game.obj_create( 14076, location_from_axis (476L, 541L) )
obj.move(location_from_axis(476, 541), 0,0)
obj.rotation = 4
obj.scripts[12] = 450
obj.scripts[13] = 450
obj.scripts[14] = 450
obj.scripts[15] = 450
obj.scripts[16] = 450
obj.scripts[41] = 450
# Create a new door guy instead of the Sergeant
if game.global_flags[37] == 0 and game.leader.reputation_has(15) == 0: # killed Lareth or cleared Moathouse
obj = game.obj_create( 14074, location_from_axis (521L, 547L) )
obj.move(location_from_axis(521, 547), 0,0)
obj.rotation = 4
obj.scripts[9] = 450
obj.scripts[12] = 450
obj.scripts[13] = 450
obj.scripts[14] = 450
obj.scripts[15] = 450
obj.scripts[16] = 450
obj.scripts[19] = 450
obj.scripts[41] = 450
return
def lnk(loc_0 = -1, xx = -1, yy = -1, name_id = -1, stun_name_id = -1):
# Locate n' Kill!
if type(stun_name_id) == type(-1):
stun_name_id = [stun_name_id]
if type(name_id) == type(-1):
name_id = [name_id]
if loc_0 == -1 and xx == -1 and yy == -1:
loc_0 = game.leader.location
elif xx != -1 and yy != -1:
loc_0 = location_from_axis(xx, yy) # Needs location_from_axis from utilities.py
else:
loc_0 = game.leader.location
if name_id == [-1]:
for obj in game.obj_list_vicinity(loc_0, OLC_NPC):
if ( obj.reaction_get(game.party[0]) <= 0 or obj.is_friendly(game.party[0]) == 0 ) and ( obj.leader_get() == OBJ_HANDLE_NULL and obj.object_flags_get() & OF_DONTDRAW == 0):
if not obj.name in stun_name_id:
damage_dice = dice_new( '50d50' )
obj.damage( game.party[0], 0, damage_dice )
obj.damage( game.party[0], D20DT_FIRE, damage_dice )
obj.damage( game.party[0], D20DT_COLD, damage_dice )
obj.damage( game.party[0], D20DT_MAGIC, damage_dice )
else:
damage_dice = dice_new( '50d50' )
obj.damage( OBJ_HANDLE_NULL, D20DT_SUBDUAL, damage_dice )
else:
for obj in game.obj_list_vicinity(loc_0, OLC_NPC):
if obj.name in (name_id+stun_name_id) and ( obj.reaction_get(game.party[0]) <= 0 or obj.is_friendly(game.party[0]) == 0) and (obj.leader_get() == OBJ_HANDLE_NULL and obj.object_flags_get() & OF_DONTDRAW == 0):
if not (obj.name in stun_name_id):
damage_dice = dice_new( '50d50' )
obj.damage( game.party[0], D20DT_BLUDGEONING, damage_dice )
obj.damage( game.party[0], D20DT_FIRE, damage_dice )
obj.damage( game.party[0], D20DT_COLD, damage_dice )
obj.damage( game.party[0], D20DT_MAGIC, damage_dice )
else:
damage_dice = dice_new( '50d50' )
if is_unconscious(obj) == 0:
obj.damage( OBJ_HANDLE_NULL, D20DT_SUBDUAL, damage_dice )
for pc in game.party:
obj.ai_shitlist_remove( pc )
return
def loot_items( loot_source = OBJ_HANDLE_NULL, pc=-1 , loot_source_name = -1, xx = -1, yy = -1, item_proto_list = [], loot_money_and_jewels_also = 1, autoloot = 1, autoconvert_jewels = 1, item_autoconvert_list = []):
if get_f('qs_autoloot') != 1:
return
if get_f('qs_autoconvert_jewels') != 1:
autoconvert_jewels = 0
money_protos = range(7000, 7004) # Note that the range actually extends from 7000 to 7003
gem_protos = [12010] + range(12034, 12045)
jewel_protos = range(6180, 6198)
potion_protos = [8006, 8007]
tank_armor_0 = []
barbarian_armor_0 = []
druid_armor_0 = []
wizard_items_0 = []
autosell_list = []
autosell_list += range(4002, 4106 )
autosell_list += range(4113, 4120)
autosell_list += range(4155, 4191)
autosell_list += range(6001, 6048)
autosell_list += [6055, 6056] + [6059, 6060] + range(6062, 6073)
autosell_list += range(6074, 6082)
autosell_list += [6093, 6096, 6103, 6120, 6123, 6124]
autosell_list += range(6142, 6153)
autosell_list += range(6153, 6159)
autosell_list += range(6163, 6180)
autosell_list += range(6202, 6239 )
autosell_exclude_list = []
autosell_exclude_list += [4016, 4017, 4025, 4028] # Frag, Scath, Excal, Flam Swo +1
autosell_exclude_list += [4047, 4057, 4058] # Scimitar +1, Dagger +2, Dager +1
autosell_exclude_list += [4078, 4079] # Warha +1, +2
autosell_exclude_list += range(4081, 4087) # Longsword +1 ... +5, Unholy Orc ax+1
autosell_exclude_list += [4098] # Battleaxe +1
autosell_exclude_list += [4161] # Shortsword +2
autosell_exclude_list += [5802] # Figurine name IDs - as per protos.tab
autosell_exclude_list += [6015, 6017, 6031, 6039, 6058, 6073, 6214, 6215, 6219]
autosell_exclude_list += [6239, 12602]
autosell_exclude_list += [8006, 8007, 8008, 8101] # Potions of Cure mod, serious & Haste
# 6015 - eye of flame cloak
# 6017 - gnome ring
# 6031 - eyeglasses
# 6039 - Full Plate
# 6048 - Prince Thrommel's Plate
# 6058 - Cloak of Elvenkind
# 6073 - Wooden Elvish Shield
# 6214, 6215 - Green & Purple (resp.) Elven chain
# 6219 - Senshock robes
# 6239 - Darley's Necklace
# 12602 - Hill Giant's Head
for qqq in autosell_exclude_list:
if qqq in autosell_list:
autosell_list.remove(qqq)
if loot_money_and_jewels_also == 1:
if type(item_proto_list) == type([]):
item_proto_list = item_proto_list + money_protos + gem_protos + jewel_protos + potion_protos
else:
item_proto_list = [item_proto_list] + money_protos + gem_protos + jewel_protos + potion_protos
elif type(item_proto_list) == type(1):
item_proto_list = [item_proto_list]
# pc - Who will take the loot?
if pc == -1:
pc = game.leader
# loc_0 - Where will the loot be sought?
if xx == -1 or yy == -1:
loc_0 = pc.location
else:
loc_0 = location_from_axis(xx, yy)
if loot_source != OBJ_HANDLE_NULL:
for pp in (item_proto_list + item_autoconvert_list):
if type(pp) == type(1):
if pp in item_autoconvert_list:
pp_1 = loot_source.item_find_by_proto(pp)
if pp_1 != OBJ_HANDLE_NULL:
if pp_1.item_flags_get() & (OIF_NO_DISPLAY + OIF_NO_LOOT) == 0:
autosell(pp_1)
elif pc.item_get( loot_source.item_find_by_proto(pp) ) == 0:
for obj in game.party:
if obj.item_get( loot_source.item_find_by_proto(pp) ) == 1:
break
else:
if loot_source_name != -1:
if type(loot_source_name) == type(1):
loot_source_name = [loot_source_name]
else:
loot_source_name = [-1]
for robee in game.obj_list_vicinity(loc_0, OLC_NPC | OLC_CONTAINER | OLC_ARMOR | OLC_WEAPON | OLC_GENERIC):
if not robee in game.party[0].group_list() and (robee.name in loot_source_name or loot_source_name == [-1]):
if (robee.type == obj_t_weapon) or (robee.type == obj_t_armor) or (robee.type == obj_t_generic):
if robee.item_flags_get() & (OIF_NO_DISPLAY + OIF_NO_LOOT) == 0:
if robee.name in autosell_list + item_autoconvert_list:
autosell_item(robee)
elif robee.name in autosell_exclude_list:
if pc.item_get(robee) == 0:
for obj in game.party:
if obj.item_get(robee) == 1:
break
if robee.type == obj_t_npc:
for qq in range(0, 16):
qq_item_worn = robee.item_worn_at(qq)
if qq_item_worn != OBJ_HANDLE_NULL and qq_item_worn.item_flags_get() & (OIF_NO_DISPLAY + OIF_NO_LOOT) == 0:
if qq_item_worn.name in (autosell_list + item_autoconvert_list):
autosell_item(qq_item_worn)
for item_proto in (item_proto_list + item_autoconvert_list):
item_sought = robee.item_find_by_proto(item_proto)
if item_sought != OBJ_HANDLE_NULL and item_sought.item_flags_get() & OIF_NO_DISPLAY == 0:
if ( (item_proto in ( gem_protos + jewel_protos ) ) and autoconvert_jewels == 1) or (item_proto in item_autoconvert_list):
autosell_item(item_sought, item_proto, pc)
elif pc.item_get(item_sought) == 0:
for obj in game.party:
if obj.item_get(item_sought) == 1:
break
return
def sell_modifier():
highest_appraise = -999
for obj in game.party:
if obj.skill_level_get(skill_appraise) > highest_appraise:
highest_appraise = obj.skill_level_get(skill_appraise)
for pc in game.party:
if pc.stat_level_get(stat_level_wizard) > 1:
highest_appraise = highest_appraise + 2 # Heroism / Fox's Cunning bonus
break
for pc in game.party:
if pc.stat_level_get(stat_level_bard) > 1:
highest_appraise = highest_appraise + 2 # Inspire Competence bonus
break
if highest_appraise > 19:
return 0.97
elif highest_appraise < -13:
return 0
else:
return 0.4 + float(highest_appraise)*0.03
def appraise_tool( obj ):
# Returns what you'd get for selling it
aa = sell_modifier()
return int( aa * obj.obj_get_int(obj_f_item_worth) )
def s_roundoff( app_sum ):
if app_sum <= 1000:
return app_sum
if app_sum > 1000 and app_sum <= 10000:
return 10 * int( (int(app_sum) / 10 ) )
if app_sum > 10000 and app_sum <= 100000:
return 100 * int( (int(app_sum) / 100 ) )
if app_sum > 100000 and app_sum <= 1000000:
return 1000 * int( (int(app_sum) / 1000 ) )
def autosell_item(item_sought = OBJ_HANDLE_NULL, item_proto = -1, pc = -1, item_quantity = 1, display_float = 1):
if item_sought == OBJ_HANDLE_NULL:
return
if pc == -1:
pc = game.leader
if item_proto == -1:
item_proto = item_sought.name
autoconvert_copper = appraise_tool(item_sought) * item_sought.obj_get_int(obj_f_item_quantity)
pc.money_adj( autoconvert_copper )
item_sought.object_flag_set(OF_OFF)
item_sought.item_flag_set( OIF_NO_DISPLAY )
item_sought.item_flag_set( OIF_NO_LOOT )
if display_float == 1 and autoconvert_copper > 5000 or display_float == 2:
pc.float_mesfile_line( 'mes\\script_activated.mes', 10000, 2 )
pc.float_mesfile_line( 'mes\\description.mes', item_proto, 2 )
pc.float_mesfile_line( 'mes\\transaction_sum.mes', ( s_roundoff(autoconvert_copper/100) ), 2 )
return
def giv(pc, proto_id, in_group = 0):
if in_group == 0:
if pc.item_find_by_proto(proto_id) == OBJ_HANDLE_NULL:
create_item_in_inventory( proto_id, pc )
else:
foundit = 0
for obj in game.party:
if obj.item_find_by_proto(proto_id) != OBJ_HANDLE_NULL:
foundit = 1
if foundit == 0:
create_item_in_inventory( proto_id, pc )
return 1
else:
return 0
return
def cnk(proto_id, do_not_destroy = 0, how_many = 1, timer = 0):
# Create n' Kill
# Meant to simulate actually killing the critter
#if timer == 0:
for pp in range(0, how_many):
a = game.obj_create(proto_id, game.leader.location)
damage_dice = dice_new( '50d50' )
a.damage( game.party[0], 0, damage_dice )
if do_not_destroy != 1:
a.destroy()
#else:
# for pp in range(0, how_many):
# game.timevent_add( cnk, (proto_id, do_not_destroy, 1, 0), (pp+1)*20 )
return
################
################
### AUTOKILL ###
################
################
def autokill(cur_map, autoloot = 1, is_timed_autokill = 0):
#if (cur_map in range(5069, 5078) ): #random encounter maps
# ## Skole Goons
# flash_signal(0)
# if get_f('qs_autokill_nulb'):
# if get_v('qs_skole_goon_time') == 0:
# set_v('qs_skole_goon_time', 500)
# game.timevent_add( autokill, (cur_map), 100 )
# flash_signal(1)
# if get_v('qs_skole_goon_time') == 500:
# flash_signal(2)
# lnk(name_id = [14315])
# #14315 - Skole Goons
# loot_items(loot_source_name = [14315]) # Skole goons
#if get_f('qs_is_repeatable_encounter'):
# lnk()
# loot_items()
################
### HOMMLET #
################
if (cur_map == 5001): # Hommlet Exterior
if get_v('qs_emridy_time') == 1500:
game.quests[100].state = qs_completed
bro_smith = OBJ_HANDLE_NULL
for obj in game.obj_list_vicinity(location_from_axis(571, 434), OLC_NPC):
if obj.name == 20005:
bro_smith = obj
if bro_smith != OBJ_HANDLE_NULL:
party_transfer_to(bro_smith, 12602)
game.global_flags[979] = 1
set_v('qs_emridy_time', 2000)
if get_f('qs_arena_of_heroes_enable'):
if get_f('qs_lareth_dead'):
game.global_vars[974] = 2 # Simulate having talked about chest
game.global_vars[705] = 2 # Simulate having handled chest
if get_f('qs_book_of_heroes_given') == 0:
giv(game.leader, 11050, 1) # Book of Heroes
giv(game.leader, 12589, 1) # Horn of Fog
set_f('qs_book_of_heroes_given')
game.global_vars[702] = 1 # Make sure Kent doesn't pester
if game.global_vars[994] == 0:
game.global_vars[994] = 1 # Skip Master of the Arena chatter
if (cur_map == 5008): # Welcome Wench Upstairs
if get_f('qs_autokill_greater_temple'):
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
# Barbarian Elf
lnk(xx=482, yy=476, name_id = 8717)
loot_items(loot_source_name = 8717, item_autoconvert_list = [6396, 6045, 6046, 4204])
game.global_vars[961] = 4
##################
### MOATHOUSE #
##################
if (cur_map == 5002): # Moathouse Exterior
if get_f('qs_autokill_moathouse') == 1:
lnk(xx=469, yy=524, name_id = 14057) # giant frogs
lnk(xx=492, yy=523, name_id = 14057) # giant frogs
lnk(xx=475, yy=505, name_id = 14057) # giant frogs
loot_items(xx=475, yy=505, item_proto_list = [6270], loot_source_name = 14057, autoloot = autoloot) # Jay's Ring
lnk(xx=475, yy=460, name_id = 14070) # courtyard brigands
loot_items(xx=475, yy=460, autoloot = autoloot)
if get_v('qs_moathouse_ambush_time') == 0 and get_f('qs_lareth_dead') == 1:
game.timevent_add( autokill, (cur_map), 500 )
set_v('qs_moathouse_ambush_time', 500)
elif get_v('qs_moathouse_ambush_time') == 500:
lnk(xx = 478, yy = 460, name_id = [14078, 14079, 14080, 14313, 14314, 14642, 8010, 8004, 8005]) # Ambush
lnk(xx = 430, yy = 444, name_id = [14078, 14079, 14080, 14313, 14314, 14642, 8010, 8004, 8005]) # Ambush
loot_items(xx=478, yy=460)
loot_items(xx=430, yy=444)
set_v('qs_moathouse_ambush_time', 1000)
if get_f('qs_autokill_temple') == 1:
lnk(xx=503, yy=506, name_id = [14507, 14522] ) # Boars
lnk(xx=429, yy=437, name_id = [14052, 14053] ) # Bears
lnk(xx=478, yy=448, name_id = [14600, 14674, 14615, 14603, 14602, 14601] ) # Undead
lnk(xx=468, yy=470, name_id = [14674, 14615, 14603, 14602, 14601] ) # Undead
if (cur_map == 5003): # Moathouse Tower
if get_f('qs_autokill_moathouse') == 1:
lnk(name_id = 14047) # giant spider
if (cur_map == 5004): # Moathouse Upper floor
if get_f('qs_autokill_moathouse') == 1:
lnk(xx = 476, yy = 493, name_id = 14088) # Huge Viper
lnk(xx = 476, yy = 493, name_id = 14182) # Stirges
lnk(xx = 473, yy = 472, name_id = [14070, 14074, 14069]) # Backroom brigands
loot_items(xx=473, yy=472, autoloot = autoloot)
lnk(xx = 502, yy = 476, name_id = [14089, 14090]) # Giant Tick & Lizard
loot_items(xx=502, yy=472, autoloot = autoloot, item_proto_list = [6050])
if get_f('qs_autokill_temple') == 1 and game.global_vars[972] == 2:
if get_v('qs_moathouse_respawn__upper_time') == 0:
game.timevent_add( autokill, (cur_map), 500 )
set_v('qs_moathouse_respawn__upper_time', 500)
if get_v('qs_moathouse_respawn__upper_time') == 500:
lnk(xx=476, yy=493, name_id = [14138, 14344, 14391] ) # Lycanthropes
lnk(xx = 502, yy = 476, name_id = [14295, 14142]) # Basilisk & Ochre Jelly
if (cur_map == 5005): # Moathouse Dungeon
if get_f('qs_autokill_moathouse') == 1:
lnk(xx = 416, yy = 439, name_id = 14065) # Lubash
loot_items(xx=416, yy=439, item_proto_list = [6058], loot_source_name = 14065 , autoloot = autoloot)
game.global_flags[55] = 1 # Freed Gnomes
game.global_flags[991] = 1 # Flag For Verbobonc Gnomes
lnk(xx = 429, yy = 413, name_id = [14123, 14124, 14092, 14126, 14091]) # Zombies, Green Slime
lnk(xx = 448, yy = 417, name_id = [14123, 14124, 14092, 14126]) # Zombies
loot_items(xx=448, yy=417, item_proto_list = 12105, loot_source_name = -1 , autoloot = autoloot)
lnk(xx = 450, yy = 519, name_id = range(14170, 14174) + range(14213, 14217) ) # Bugbears
lnk(xx = 430, yy = 524, name_id = range(14170, 14174) + range(14213, 14217) ) # Bugbears
loot_items(xx=450, yy=519 , autoloot = autoloot)
loot_items(xx=430, yy=524 , autoloot = autoloot)
if len(game.party) < 4 and get_v('AK5005_Stage') < 1:
set_v('AK5005_Stage', get_v('AK5005_Stage') + 1)
return
# Gnolls and below
lnk(xx = 484, yy = 497, name_id = [14066, 14067, 14078, 14079, 14080]) # Gnolls
lnk(xx = 484, yy = 473, name_id = [14066, 14067, 14078, 14079, 14080]) # Gnolls
loot_items(xx=484, yy=497 , autoloot = autoloot)
loot_items(xx=484, yy=473 , autoloot = autoloot)
lnk(xx = 543, yy = 502, name_id = 14094) # Giant Crayfish
lnk(xx = 510, yy = 447, name_id = [14128, 14129, 14095]) # Ghouls
if len(game.party) < 4 and get_v('AK5005_Stage') < 2 or ( len(game.party) < 8 and get_v('AK5005_Stage') < 1 ):
set_v('AK5005_Stage', get_v('AK5005_Stage') + 1)
return
lnk(xx = 515, yy = 547, name_id = [14074, 14075]) # Front Guardsmen
loot_items(xx=515, yy=547 , autoloot = autoloot)
lnk(xx = 485, yy = 536, name_id = [14074, 14075, 14076, 14077]) # Back Guardsmen
loot_items(xx=485, yy=536 , loot_source_name = [14074, 14075, 14076, 14077], autoloot = autoloot) # Back guardsmen
from py00060lareth import create_spiders
if get_f('qs_lareth_spiders_spawned') == 0:
create_spiders(game.leader, game.leader)
set_f('qs_lareth_spiders_spawned', 1)
lnk(xx = 480, yy = 540, name_id = [8002, 14397, 14398, 14620]) # Lareth & Spiders
set_f('qs_lareth_dead')
lnk(xx = 530, yy = 550, name_id = [14417]) # More Spiders
loot_items(xx=480, yy=540 , item_proto_list = ([4120, 6097, 6098, 6099, 6100, 11003] + range(9001, 9688) ) , loot_source_name = [8002, 1045], autoloot = autoloot) # Lareth & Lareth's Dresser
loot_items(xx=480, yy=540, item_autoconvert_list = [4194])
### RESPAWN
if get_f('qs_autokill_temple') == 1 and game.global_vars[972] == 2:
if get_v('qs_moathouse_respawn_dungeon_time') == 0:
game.timevent_add( autokill, (cur_map), 500 )
set_v('qs_moathouse_respawn_dungeon_time', 500)
if get_v('qs_moathouse_respawn__upper_time') == 500:
lnk(xx = 416, yy = 439, name_id = 14141) # Crystal Oozes
# Bodaks, Shadows and Groaning Spirit
lnk(xx = 436, yy = 521, name_id = [14328, 14289, 14280])
# Skeleton Gnolls
lnk(xx = 486, yy = 480, name_id = [14616, 14081, 14082, 14083])
lnk(xx = 486, yy = 495, name_id = [14616, 14081, 14082, 14083]) # Skeleton Gnolls
# Witch
lnk(xx = 486, yy = 540, name_id = [14603, 14674, 14601, 14130, 14137, 14328, 14125, 14110, 14680])
loot_items(xx = 486, yy = 540, item_proto_list = [11098, 6273, 4057,6263, 4498], item_autoconvert_list = [4226, 6333, 5099])
if (cur_map == 5091): # Cave Exit
if get_f('qs_autokill_moathouse') == 1:
if get_v('qs_moathouse_ambush_time') == 0 and get_f('qs_lareth_dead') == 1:
game.timevent_add( autokill, (cur_map), 500 )
set_v('qs_moathouse_ambush_time', 500)
elif get_v('qs_moathouse_ambush_time') == 500:
lnk(xx = 500, yy = 490, name_id = [14078, 14079, 14080, 14313, 14314, 14642, 8010, 8004, 8005]) # Ambush
lnk(xx = 470, yy = 485, name_id = [14078, 14079, 14080, 14313, 14314, 14642, 8010, 8004, 8005]) # Ambush
loot_items(xx=500, yy=490)
loot_items(xx=470, yy=490)
set_v('qs_moathouse_ambush_time', 1000)
if (cur_map == 5094): # Emridy Meadows
if get_f('qs_autokill_moathouse') == 1:
if get_v('qs_emridy_time') == 0:
game.timevent_add( autokill, (cur_map), 500 )
set_v('qs_emridy_time', 500)
elif get_v('qs_emridy_time') == 500:
set_v('qs_emridy_time', 1000)
game.timevent_add( autokill, (cur_map), 500 )
lnk(xx = 467, yy = 383, name_id = [14603, 14600]) # NW Skeletons
loot_items(xx=467, yy=380)
lnk(xx = 507, yy = 443, name_id = [14603, 14600]) # W Skeletons
lnk(xx = 515, yy = 421, name_id = [14603, 14600]) # W Skeletons
loot_items(xx=507, yy=443)
loot_items(xx=515, yy=421)
lnk(xx = 484, yy = 487, name_id = [14603, 14600, 14616, 14615]) # Rainbow Rock 1
lnk(xx = 471, yy = 500, name_id = [14603, 14600, 14616, 14615]) # Rainbow Rock 1
loot_items(xx=484, yy=487)
loot_items(xx=484, yy=487, loot_source_name = [1031], item_proto_list = [12024])
if get_f('qs_rainbow_spawned') == 0:
set_f('qs_rainbow_spawned', 1)
#py00265rainbow_rock.san_use(game.leader, game.leader)
#san_use(game.leader, game.leader)
#game.particles( "sp-summon monster I", game.leader)
for qq in game.obj_list_vicinity( location_from_axis(484, 487), OLC_CONTAINER ):
if qq.name == 1031:
qq.object_script_execute( qq, 1 )
lnk(xx = 484, yy = 487, name_id = [14602, 14601]) # Rainbow Rock 2
loot_items(xx=484, yy=487)
#game.timevent_add( autokill, (cur_map), 1500 )
lnk(xx = 532, yy = 540, name_id = [14603, 14600]) # SE Skeletons
loot_items(xx=540, yy=540)
lnk(xx = 582, yy = 514, name_id = [14221, 14053]) # Hill Giant
elif get_v('qs_emridy_time') == 1000:
set_v('qs_emridy_time', 1500)
loot_items(xx=582, yy=514)
loot_items(xx=582, yy=514, item_proto_list = [12602])
if game.leader.item_find_by_proto(12602) == OBJ_HANDLE_NULL:
create_item_in_inventory(12602, game.leader)
##################
### NULB #
##################
if (cur_map == 5051): # Nulb Outdoors
if get_f('qs_autokill_temple') == 1:
game.global_vars[972] = 2 # Simulate Convo with Kent
if get_f('qs_autokill_nulb') == 1:
# Spawn assassin
game.global_flags[277] = 1 # Have met assassin
game.global_flags[292] = 1
if get_f('qs_assassin_spawned') == 0:
a = game.obj_create(14303, game.leader.location)
lnk(name_id = 14303)
loot_items(loot_source_name = 14303, item_proto_list = [6315, 6199, 4701, 4500, 8007, 11002], item_autoconvert_list = [6046])
set_f('qs_assassin_spawned')
game.global_flags[356] = 1 # Met Mickey
game.global_flags[357] = 1 # Mickey confessed to taking Orb
game.global_flags[321] = 1 # Met Mona
record_time_stamp('s_skole_goons')
game.quests[41].state = qs_completed # Preston's Tooth Ache
game.global_flags[94] = 1 # Nulb House is yours
game.global_flags[315] = 1 # Purchased Serena's Freedom
game.quests[60].state = qs_completed # Mona's Orb
game.quests[63].state = qs_completed # Bribery for justice
if get_f('qs_killed_gar') == 1:
game.quests[35].state = qs_completed # Grud's story
game.leader.reputation_add( 25 )
if (cur_map == 5068): # Imeryd's Run
if get_f('qs_autokill_nulb') == 1:
lnk(xx = 485, yy = 455, name_id = ([14279] + range(14084, 14088)) ) # Hag & Lizards
#lnk(xx = 468, yy = 467, name_id = ([14279] + range(14084, 14088)) ) # Hag & Lizards
loot_items(xx=485, yy = 455)
lnk(xx = 460, yy = 480, name_id = [14329]) # Gar
loot_items(xx=460, yy=480, item_proto_list = [12005]) # Gar Corpse + Lamia Figurine
loot_items(xx=460, yy=500, item_proto_list = [12005]) # Lamia Figurine - bulletproof
set_f('qs_killed_gar')
lnk(name_id = [14445, 14057]) # Kingfrog, Giant Frog
loot_items(xx=476, yy = 497, item_proto_list = [4082, 6199, 6082, 4191, 6215, 5006])
if (cur_map == 5052): # Boatmen's Tavern
if get_f('qs_autokill_nulb') == 1:
if game.global_flags[281] == 1: # Have had Skole Goons Encounter
lnk(name_id = [14315, 14134]) # Skole + Goon
loot_items(loot_source_name = [14315, 14134], item_proto_list = [6051, 4121])
for obj_1 in game.obj_list_vicinity(game.leader.location, OLC_NPC):
for pc_1 in game.party[0].group_list():
obj_1.ai_shitlist_remove( pc_1 )
obj_1.reaction_set( pc_1, 50 )
if (cur_map == 5057): # Snakepit Brothel
if get_f('qs_autokill_nulb') == 1:
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
lnk(xx = 508, yy= 485, name_id = 8718)
loot_items(xx = 508, yy = 485, loot_source_name = 8718, item_autoconvert_list = [4443, 6040, 6229])
game.global_vars[961] = 6
if (cur_map == 5060): # Waterside Hostel
if get_f('qs_autokill_nulb') == 1:
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
# Thieving Dala
game.quests[37].state = qs_completed
lnk(xx = 480, yy= 501, name_id = [14147, 14146, 14145, 8018, 14074], stun_name_id = [14372, 14373])
loot_items(xx=480, yy= 501, loot_source_name = [14147, 14146, 14145, 8018, 14074])
for obj_1 in game.obj_list_vicinity(location_from_axis(480, 501), OLC_NPC):
for pc_1 in game.party[0].group_list():
obj_1.ai_shitlist_remove( pc_1 )
obj_1.reaction_set( pc_1, 50 )
##########################
### HICKORY BRANCH #
##########################
if (cur_map == 5095): # Hickory Branch Exterior
if get_f('qs_autokill_nulb'):
# First party, near Noblig
lnk(xx = 433, yy = 538, name_id = [14467, 14469, 14470, 14468, 14185])
loot_items(xx=433, yy = 538, item_autoconvert_list = [4201, 4209, 4116, 6321]) # Shortbow, Spiked Chain, Short Spear, Marauder Armor
# NW of Noblig
lnk(xx = 421, yy = 492, name_id = [14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=421, yy = 492, item_autoconvert_list = [4201, 4209, 4116])
# Wolf Trainer Group
lnk(xx = 366, yy = 472, name_id = [14466, 14352, 14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=366, yy = 472, item_autoconvert_list = [4201, 4209, 4116])
# Ogre Shaman Group
lnk(xx = 449, yy = 455, name_id = [14249, 14482, 14093, 14067, 14466, 14352, 14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=449, yy = 455, item_autoconvert_list = [4201, 4209, 4116])
# Orc Shaman Group
lnk(xx = 494, yy = 436, name_id = [14743, 14747, 14749, 14745, 14746, 14482, 14093, 14067, 14466, 14352, 14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=494, yy = 436, item_autoconvert_list = [4201, 4209, 4116])
# Cave Entrance Group
lnk(xx = 527, yy = 380, name_id = [14465, 14249, 14743, 14747, 14749, 14745, 14746, 14482, 14093, 14067, 14466, 14352, 14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=527, yy = 380, item_autoconvert_list = [4201, 4209, 4116])
# Dire Bear
lnk(xx = 548, yy = 430, name_id = [14506])
# Cliff archers
lnk(xx = 502, yy = 479, name_id = [14465, 14249, 14743, 14747, 14749, 14745, 14746, 14482, 14093, 14067, 14466, 14352, 14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=502, yy = 479, item_autoconvert_list = [4201, 4209, 4116])
# Giant Snakes
lnk(xx = 547, yy = 500, name_id = [14449])
loot_items(xx=547, yy = 500, item_autoconvert_list = [4201, 4209, 4116])
# Owlbear
lnk(xx = 607, yy = 463, name_id = [14046])
# Dokolb area
lnk(xx = 450, yy = 519, name_id = [14640, 14465, 14249, 14743, 14747, 14749, 14745, 14746, 14482, 14093, 14067, 14466, 14352, 14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=450, yy = 519, item_autoconvert_list = [4201, 4209, 4116])
# South of Dokolb Area
lnk(xx = 469, yy = 548, name_id = [14188, 14465, 14249, 14743, 14747, 14749, 14745, 14746, 14482, 14093, 14067, 14466, 14352, 14467, 14469, 14470, 14468, 14185, 14050, 14391])
loot_items(xx=469, yy = 548, item_autoconvert_list = [4201, 4209, 4116])
if (cur_map == 5115): # Hickory Branch Cave
if get_f('qs_autokill_nulb'):
if get_v('qs_hickory_cave_timer') == 0:
set_v('qs_hickory_cave_timer', 500)
game.timevent_add(autokill, (cur_map), 500)
if get_v('qs_hickory_cave_timer') == 500:
lnk()
loot_items(item_proto_list = [4086, 6106, 10023], item_autoconvert_list = [6143, 4110, 4241, 4242, 4243, 6066, 4201, 4209, 4116])
loot_items(xx = 490, yy = 453, item_proto_list = [4078, 6252, 6339, 6091], item_autoconvert_list = [6304, 4240, 6161, 6160, 4087, 4204])
if (cur_map == 5191): # Minotaur Lair
if get_f('qs_autokill_nulb'):
lnk(xx = 492, yy = 486)
loot_items(492, 490, item_proto_list = [4238, 6486, 6487])
##########################
### ARENA OF HEROES #
##########################
if (cur_map == 5119): # AoH
if get_f('qs_autokill_temple'):
#game.global_vars[994] = 3
dummy = 1
##########################
### MOATHOUSE RESPAWN #
##########################
if (cur_map == 5120): # Forest Drow
#flash_signal(0)
if get_f('qs_autokill_temple'):
lnk(xx = 484, yy = 481, name_id = [14677, 14733, 14725, 14724, 14726])
loot_items(xx = 484, yy = 481, item_autoconvert_list = [4132, 6057, 4082, 4208, 6076])
##################################
### TEMPLE OF ELEMENTAL EVIL #
##################################
if (cur_map == 5111): # Tower Sentinel
if get_f('qs_autokill_temple'):
lnk(xx = 480, yy = 490, name_id = 14157)
loot_items(xx = 480, yy = 490)
if (cur_map == 5065): # Brigand Tower
if get_f('qs_autokill_temple'):
lnk(xx = 477, yy = 490, name_id = [14314, 14313, 14312, 14310, 14424, 14311, 14425])
lnk(xx = 490, yy = 480, name_id = [14314, 14313, 14312, 14310, 14424, 14311, 14425])
loot_items(item_proto_list = [10005, 6051], item_autoconvert_list = [4081, 6398, 4067])
loot_items(xx = 490, yy = 480, item_proto_list = [10005, 6051], item_autoconvert_list = [4081, 6398, 4067, 4070, 4117, 5011])
if (cur_map == 5066): # Temple Level 1 - Earth Floor
if get_f('qs_autokill_temple'):
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
#Stirges
lnk(xx = 415, yy = 490, name_id = [14182])
# Harpies & Ghouls
lnk(xx = 418, yy = 574, name_id = [14095, 14129, 14243, 14128, 14136, 14135])
lnk(xx = 401, yy = 554, name_id = [14095, 14129, 14243, 14128, 14136, 14135])
lnk(xx = 401, yy = 554, name_id = [14095, 14129, 14243, 14128, 14136, 14135])
lnk(xx = 421, yy = 544, name_id = [14095, 14129, 14243, 14128, 14136, 14135])
lnk(xx = 413, yy = 522, name_id = [14095, 14129, 14243, 14128, 14136, 14135])
loot_items(xx = 401, yy = 554)
# Gel Cube + Grey Ooze
lnk(xx = 407, yy = 594, name_id = [14095, 14129, 14139, 14140])
loot_items(xx = 407, yy = 600, loot_source_name = [14448, 1049], item_autoconvert_list = [4121, 4118, 4113, 4116, 5005, 5098])
# Corridor Ghouls
lnk(xx = 461, yy = 600, name_id = [14095, 14129])
# Corridor Gnolls
lnk(xx = 563, yy = 600, name_id = [14078, 14079, 14080])
loot_items(xx = 563, yy = 600, loot_source_name = [14078, 14079, 14080, 1049])
# Corridor Ogre
lnk(xx = 507, yy = 600, name_id = [14448])
loot_items(xx = 507, yy = 600, loot_source_name = [14448, 1049], item_autoconvert_list = [4121, 4118, 4113, 4116, 5005, 5098])
# Bone Corridor Undead
lnk(xx = 497, yy = 519, name_id = [14107, 14081, 14082])
lnk(xx = 467, yy = 519, name_id = [14083, 14107, 14081, 14082])
loot_items(xx = 507, yy = 600, loot_source_name = [14107, 14081, 14082])
# Wonnilon Undead
lnk(xx = 536, yy = 414, name_id = [14127, 14126, 14125, 14124, 14092, 14123])
lnk(xx = 536, yy = 444, name_id = [14127, 14126, 14125, 14124, 14092, 14123])
# Huge Viper
lnk(xx = 550, yy = 494, name_id = [14088])
# Ogre + Goblins
lnk(xx = 565, yy = 508, name_id = [14185, 14186, 14187, 14448])
lnk(xx = 565, yy = 494, name_id = [14185, 14186, 14187, 14448])
loot_items(xx = 565, yy = 508, loot_source_name = [14185, 14186, 14187, 14448])
# Ghasts near prisoners
lnk(xx = 545, yy = 553, name_id = [14128, 14129, 14136, 14095, 14137, 14135])
loot_items(xx = 545, yy = 553, loot_source_name = [1040])
# Black Widow Spiders
lnk(xx = 440, yy = 395, name_id = [14417])
# NW Ghast room near hideout
lnk(xx = 390, yy = 390, name_id = [14128, 14129, 14136, 14095, 14137, 14135])
if get_v('qs_autokill_temple_level_1_stage') == 0:
set_v('qs_autokill_temple_level_1_stage', 1)
elif get_v('qs_autokill_temple_level_1_stage') == 1:
set_v('qs_autokill_temple_level_1_stage', 2)
# Gnoll & Bugbear southern room
lnk(xx = 515, yy = 535, name_id = [14078, 14249, 14066, 14632, 14164])
lnk(xx = 515, yy = 549, name_id = [14067, 14631, 14078, 14249, 14066, 14632, 14164])
loot_items(xx = 515, yy = 540)
# Gnoll & Bugbear northern room
lnk(xx = 463, yy = 535, name_id = [14248, 14631, 14188, 14636, 14083, 14184, 14078, 14249, 14066, 14632, 14164])
loot_items(xx = 463, yy = 535)
# Earth Temple Fighter eastern room
lnk(xx = 438, yy = 505, name_id = [14337, 14338])
loot_items(xx = 438, yy = 505, item_autoconvert_list = [6074, 6077, 5005, 4123, 4134])
# Bugbear Central Outpost
lnk(xx = 505, yy = 476, name_id = [14165, 14163, 14164, 14162])
loot_items(xx = 505, yy = 476)
# Bugbears nea r Wonnilon
lnk(xx = 555, yy = 436, name_id = [14165, 14163, 14164, 14162])
lnk(xx = 555, yy = 410, name_id = [14165, 14163, 14164, 14162])
lnk(xx = 519, yy = 416, name_id = [14165, 14163, 14164, 14162])
loot_items(xx = 519, yy = 416, loot_source_name = range(14162, 14166), item_autoconvert_list = [6174])
loot_items(xx = 555, yy = 436, loot_source_name = [14164], item_autoconvert_list = [6174])
loot_items(xx = 555, yy = 410, loot_source_name = [14164], item_autoconvert_list = [6174])
# Bugbears North of Romag
lnk(xx = 416, yy = 430, name_id = range(14162, 14166) )
loot_items(xx = 416, yy = 430, loot_source_name = range(14162, 14166), item_autoconvert_list = [6174])
elif get_v('qs_autokill_temple_level_1_stage') == 2:
# Jailer room
lnk(xx = 568, yy = 462, name_id = [14165, 14164, 14229])
loot_items(xx = 568, yy = 462, item_autoconvert_list = [6174])
# Earth Altar
lnk(xx = 474, yy = 396, name_id = [14381, 14337])
lnk(xx = 494, yy = 396, name_id = [14381, 14337])
lnk(xx = 484, yy = 423, name_id = [14296])
loot_items(xx = 480, yy = 400, loot_source_name = range(1041, 1045), item_proto_list = [6082, 12228, 12031] , item_autoconvert_list = [4070, 4193, 6056, 8025])
loot_items(xx = 480, yy = 400, item_proto_list = [6082, 12228, 12031] , item_autoconvert_list = [4070, 4193, 6056, 8025])
# Troop Commander room
lnk(xx = 465, yy = 477, name_id = ( range(14162, 14166)+ [14337, 14156, 14339]) )
lnk(xx = 450, yy = 477, name_id = ( range(14162, 14166)+ [14337, 14156, 14339]) )
loot_items(xx = 450, yy = 476, item_autoconvert_list = [4098, 6074, 6077, 6174])
# Romag Room
lnk(xx = 441, yy = 442, name_id = ([8045, 14154] + range(14162, 14166)+ [14337, 14156, 14339]) )
loot_items(xx = 441, yy = 442, item_autoconvert_list = [6164, 9359, 8907, 9011], item_proto_list = [10006, 6094, 4109, 8008])
if (cur_map == 5067): # Temple Level 2 - Water, Fire & Air Floor
if get_f('qs_autokill_temple'):
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
# Kelno regroup
lnk(xx = 480, yy = 494, name_id = [8092, 14380, 14292, 14067, 14078, 14079, 14080, 14184, 14187, 14215, 14216, 14275, 14159, 14160, 14161, 14158])
lnk(xx = 490, yy = 494, name_id = [8092, 14380, 14292, 14067, 14078, 14079, 14080, 14184, 14187, 14215, 14216, 14275, 14159, 14160, 14161, 14158])
lnk(xx = 490, yy = 514, name_id = [8092, 14380, 14292, 14067, 14078, 14079, 14080, 14184, 14187, 14215, 14216, 14275, 14159, 14160, 14161, 14158])
loot_items(xx = 480, yy = 494, item_proto_list = [10009, 6085, 4219], item_autoconvert_list = [6049, 4109, 6166, 4112])
loot_items(xx = 480, yy = 514, item_proto_list = [10009, 6085, 4219], item_autoconvert_list = [6049, 4109, 6166, 4112])
loot_items(xx = 490, yy = 514, item_proto_list = [10009, 6085, 4219], item_autoconvert_list = [6049, 4109, 6166, 4112])
# Corridor Ogres
lnk(xx = 480, yy = 452, name_id = [14249, 14353])
loot_items(xx = 480, yy = 452, item_autoconvert_list = [4134])
# Minotaur
for m_stat in game.obj_list_vicinity(location_from_axis(566, 408), OLC_SCENERY):
if m_stat.name == 1615:
m_stat.destroy()
cnk(14241)
loot_items(xx = 566, yy = 408)
# Greater Temple Guards
lnk(xx = 533, yy = 398, name_id = [14349, 14348])
lnk(xx = 550, yy = 422, name_id = [14349, 14348])
loot_items(xx = 533, yy = 398)
# Littlest Troll
lnk(xx = 471, yy = 425, name_id = [14350])
# Carrion Crawler
lnk(xx = 451, yy = 424, name_id = [14190])
# Fire Temple Bugbears Outside
lnk(xx = 397, yy = 460, name_id = [14169])
loot_items(xx = 397, yy = 460, loot_source_name = [14169])
if get_v('qs_autokill_temple_level_2_stage') == 0:
set_v('qs_autokill_temple_level_2_stage', 1)
elif get_v('qs_autokill_temple_level_2_stage') == 1:
set_v('qs_autokill_temple_level_2_stage', 2)
# Feldrin
lnk(xx = 562, yy = 438, name_id = [14311, 14312, 14314, 8041, 14253])
loot_items(xx = 562, yy = 438, item_proto_list = [6083, 10010, 4082, 6086, 8010], item_autoconvert_list = [6091, 4070, 4117, 4114, 4062, 9426, 8014])
# Prisoner Guards - Ogre + Greater Temple Bugbear
lnk(xx = 410, yy = 440, name_id = [8065])
loot_items(xx = 410, yy = 440, loot_source_name = [8065])
elif get_v('qs_autokill_temple_level_2_stage') == 2:
set_v('qs_autokill_temple_level_2_stage', 3)
# Water Temple
lnk(xx = 541, yy = 573, name_id = [14375, 14231, 8091, 14247, 8028, 8027, 14181, 14046, 14239, 14225])
# Juggernaut
lnk(xx = 541, yy = 573, name_id = [14244])
loot_items(xx = 541, yy = 573, item_proto_list = [10008, 6104, 4124, 6105, 9327, 9178], item_autoconvert_list = [6039, 9508, 9400, 6178, 6170, 9546, 9038, 9536])
# Oohlgrist
lnk(xx = 483, yy = 614, name_id = [14262, 14195])
loot_items(xx = 483, yy = 614, item_proto_list = [6101, 6107], item_autoconvert_list = [6106, 12014, 6108])
# Salamanders
lnk(xx = 433, yy = 583, name_id = [8063, 14384, 14111])
lnk(xx = 423, yy = 583, name_id = [8063, 14384, 14111])
loot_items(xx = 433, yy = 583, item_proto_list = [4028, 12016, 6101, 4136], item_autoconvert_list = [6121, 8020])
elif get_v('qs_autokill_temple_level_2_stage') == 3:
set_v('qs_autokill_temple_level_2_stage', 4)
# Alrrem
lnk(xx = 415, yy = 499, name_id = [14169, 14211, 8047, 14168, 14212, 14167, 14166, 14344, 14224, 14343])
loot_items(xx = 415, yy = 499, item_proto_list = [10007, 4079, 6082], item_autoconvert_list = [6094, 6060, 6062, 6068, 6069, 6335, 6269, 6074, 6077, 6093, 6167, 6177, 6172, 8019, 6039, 4131, 6050, 4077, 6311])
elif get_v('qs_autokill_temple_level_2_stage') == 4:
set_v('qs_autokill_temple_level_2_stage', 5)
# Big Bugbear Room
lnk(xx = 430, yy = 361, name_id = (range(14174, 14178) +[14213, 14214, 14215, 14216]) )
lnk(xx = 430, yy = 391, name_id = (range(14174, 14178) +[14213, 14214, 14215, 14216]) )
loot_items(xx = 430, yy = 361, item_autoconvert_list = [6093, 6173, 6168, 6163, 6056])
loot_items(xx = 430, yy = 391, item_autoconvert_list = [6093, 6173, 6168, 6163, 6056])
if (cur_map == 5105): # Temple Level 3 - Thrommel Floor
if get_f('qs_autokill_greater_temple'):
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
# Northern Trolls
lnk(xx = 394, yy = 401, name_id = [14262])
# Shadows
lnk(xx = 369, yy = 431, name_id = [14289])
lnk(xx = 369, yy = 451, name_id = [14289])
# Ogres:
lnk(xx = 384, yy = 465, name_id = [14249])
loot_items(xx = 384, yy = 465)
# Ettin:
lnk(xx = 437, yy =524, name_id = [14238])
loot_items(xx = 437, yy = 524)
# Yellow Molds:
lnk(xx = 407, yy =564, name_id = [14276])
# Groaning Spirit:
lnk(xx = 441, yy = 459, name_id = [14280])
loot_items(xx = 441, yy = 459, item_proto_list = [4218, 6090], item_autoconvert_list = [9214, 4191, 6058, 9123, 6214, 9492, 9391, 4002])
# Key Trolls:
lnk(xx = 489, yy = 535, name_id = [14262])
lnk(xx = 489, yy = 504, name_id = [14262])
loot_items(xx = 489, yy = 504, item_proto_list = range(10016, 10020) )
loot_items(xx = 489, yy = 535, item_proto_list = range(10016, 10020) )
# Will o Wisps:
lnk(xx = 551, yy = 583, name_id = [14291])
# Lamia:
lnk(xx = 584, yy = 594, name_id = [14342, 14274])
loot_items(xx = 584, yy = 594, item_proto_list = [4083])
# Jackals, Werejackals & Gargoyles:
lnk(xx = 511, yy = 578, name_id = [14051, 14239, 14138])
lnk(xx = 528, yy = 556, name_id = [14051, 14239, 14138])
# UmberHulks
lnk(xx = 466, yy = 565, name_id = [14260])
if get_v('qs_autokill_temple_level_3_stage') == 0:
set_v('qs_autokill_temple_level_3_stage', 1)
elif get_v('qs_autokill_temple_level_3_stage') == 1:
set_v('qs_autokill_temple_level_3_stage', 2)
# Gel Cube
lnk(xx = 476, yy = 478, name_id = [14139])
# Black Pudding
lnk(xx = 442, yy = 384, name_id = [14143])
# Goblins:
lnk(xx = 491, yy = 389, name_id = (range(14183, 14188)+ [14219, 14217]) )
loot_items(xx = 491, yy = 389)
# Carrion Crawler:
lnk(xx = 524, yy = 401, name_id = [14190] )
# Ogres near thrommel:
lnk(xx = 569, yy = 412, name_id = [14249, 14353] )
loot_items(xx = 569, yy = 412, loot_source_name = [14249, 14353], item_autoconvert_list = [4134])
# Leucrottas:
lnk(xx = 405, yy = 590, name_id = [14351] )
elif get_v('qs_autokill_temple_level_3_stage') == 2:
set_v('qs_autokill_temple_level_3_stage', 3)
# Pleasure dome:
lnk(xx = 553, yy = 492, name_id = [14346, 14174, 14249, 14176, 14353, 14175, 14352, 14177] )
lnk(xx = 540, yy = 480, name_id = [14346, 14174, 14249, 14176, 14353, 14175, 14352, 14177] )
lnk(xx = 569, yy = 485, name_id = [8034, 14346, 14249, 14174, 14176, 14353, 14175, 14352, 14177] )
loot_items(xx = 540, yy = 480, loot_source_name = [8034, 14346, 14249, 14174, 14176, 14353, 14175, 14352, 14177], item_autoconvert_list = [6334])
loot_items(xx = 553, yy = 492, loot_source_name = [8034, 14346, 14249, 14174, 14176, 14353, 14175, 14352, 14177], item_autoconvert_list = [6334])
loot_items(xx = 569, yy = 485, loot_source_name = [8034, 14346, 14249, 14174, 14176, 14353, 14175, 14352, 14177], item_autoconvert_list = [6334])
game.global_flags[164] = 1 # Turns on Bugbears
elif get_v('qs_autokill_temple_level_3_stage') == 3:
set_v('qs_autokill_temple_level_3_stage', 4)
# Pleasure dome - make sure:
lnk(xx = 553, yy = 492, name_id = [14346, 14174, 14249, 14176, 14353, 14175, 14352, 14177] )
lnk(xx = 540, yy = 480, name_id = [14346, 14174, 14249, 14176, 14353, 14175, 14352, 14177] )
lnk(xx = 569, yy = 485, name_id = [8034, 14346, 14249, 14174, 14176, 14353, 14175, 14352, 14177] )
# Smigmal & Falrinth
ass1 = game.obj_create(14782, location_from_axis(614, 455) )
ass2 = game.obj_create(14783, location_from_axis(614, 455) )
lnk(xx = 614, yy = 455, name_id = [14232, 14782, 14783] )
loot_items(xx = 614, yy = 455, item_proto_list = [10011, 6125, 6088], item_autoconvert_list = [4126, 6073, 6335, 8025])
lnk(xx = 614, yy = 480, name_id = [14110, 14177, 14346, 20123] )
loot_items(xx = 619, yy = 480, item_proto_list = [12560, 10012, 6119], item_autoconvert_list = [4179, 9173])
loot_items(xx = 612, yy = 503, loot_source_name = [1033], item_proto_list = [12560, 10012, 6119], item_autoconvert_list = [4179, 9173])
if (cur_map == 5080): # Temple Level 4 - Greater Temple
if get_f('qs_autokill_greater_temple'):
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
game.global_flags[820] = 1 # Trap Disabled
game.global_flags[148] = 1 # Paida Sane
# Eastern Trolls
lnk(xx = 452, yy = 552, name_id = [14262])
# Western Trolls
lnk(xx = 513, yy = 552, name_id = [14262])
# Troll + Ettin
lnk(xx = 522, yy = 586, name_id = [14262, 14238])
loot_items(xx = 522, yy = 586)
# Hill Giants
lnk(xx = 570, yy = 610, name_id = [14218, 14217, 14219])
loot_items(xx = 570, yy = 610)
# Ettins
lnk(xx = 587, yy = 580, name_id = [14238])
loot_items(xx = 587, yy = 580)
# More Trolls
lnk(xx = 555, yy = 546, name_id = [14262])
if get_v('qs_autokill_temple_level_4_stage') == 0:
set_v('qs_autokill_temple_level_4_stage', 1)
elif get_v('qs_autokill_temple_level_4_stage') == 1:
set_v('qs_autokill_temple_level_4_stage', 2)
# Bugbear quarters
lnk(xx = 425, yy = 591, name_id = [14174, 14175, 14176, 14177, 14249, 14347, 14346 ])
lnk(xx = 435, yy = 591, name_id = [14174, 14175, 14176, 14177, 14249, 14347, 14346 ])
lnk(xx = 434, yy = 603, name_id = [14174, 14175, 14176, 14177, 14249, 14347, 14346 ])
lnk(xx = 405, yy = 603, name_id = [14174, 14175, 14176, 14177, 14249, 14347, 14346 ])
loot_items(xx = 435, yy = 590)
loot_items(xx = 425, yy = 590)
loot_items(xx = 435, yy = 603)
loot_items(xx = 405, yy = 603)
elif get_v('qs_autokill_temple_level_4_stage') == 2:
set_v('qs_autokill_temple_level_4_stage', 3)
# Insane Ogres
lnk(xx = 386, yy = 584, name_id = [14356, 14355, 14354])
loot_items(xx = 386, yy = 584)
# Senshock's Posse
lnk(xx = 386, yy = 528, name_id = [14296, 14298, 14174, 14110, 14302, 14292])
for obj_1 in game.obj_list_vicinity(location_from_axis(386, 528), OLC_NPC):
for pc_1 in game.party[0].group_list():
obj_1.ai_shitlist_remove( pc_1 )
obj_1.reaction_set( pc_1, 50 )
loot_items(xx = 386, yy = 528)
elif get_v('qs_autokill_temple_level_4_stage') == 3:
set_v('qs_autokill_temple_level_4_stage', 4)
# Hedrack's Posse
lnk(xx = 493, yy = 442, name_id = [14238, 14239, 14218, 14424, 14296, 14298, 14174, 14176, 14177, 14110, 14302, 14292])
for obj_1 in game.obj_list_vicinity(location_from_axis(493, 442), OLC_NPC):
for pc_1 in game.party[0].group_list():
obj_1.ai_shitlist_remove( pc_1 )
obj_1.reaction_set( pc_1, 50 )
loot_items(xx = 493, yy = 442)
lnk(xx = 465, yy = 442, name_id = [14238, 14239, 14218, 14424, 14296, 14298, 14174, 14176, 14177, 14110, 14302, 14292])
for obj_1 in game.obj_list_vicinity(location_from_axis(493, 442), OLC_NPC):
for pc_1 in game.party[0].group_list():
obj_1.ai_shitlist_remove( pc_1 )
obj_1.reaction_set( pc_1, 50 )
loot_items(xx = 493, yy = 442)
# Fungi
lnk(xx = 480, yy = 375, name_id = [14274, 14143, 14273, 14276, 14142, 14141, 14282])
loot_items(xx = 484, yy = 374)
loot_items(xx = 464, yy = 374)
lnk(xx = 480, yy = 353, name_id = [14277, 14140])
##################################
### NODES #
##################################
if (cur_map == 5083): # Fire Node
if get_f('qs_autokill_nodes'):
# Fire Toads
lnk(xx = 535, yy = 525, name_id = [14300])
# Bodaks
lnk(xx = 540, yy = 568, name_id = [14328])
# Salamanders
lnk(xx = 430, yy = 557, name_id = [14111])
# Salamanders near Balor
lnk(xx = 465, yy = 447, name_id = [14111])
# Efreeti
lnk(xx = 449, yy = 494, name_id = [14340])
# Fire Elementals + Snakes
lnk(xx = 473, yy = 525, name_id = [14298, 14626])
lnk(xx = 462, yy = 532, name_id = [14298, 14626])
##########################
### VERBOBONC #
##########################
if (cur_map == 5154): # Scarlett Bro bottom floor
if get_f('qs_autokill_greater_temple'):
game.global_flags[984] = 1 # Skip starter convo
game.global_flags[982] = 1
if (cur_map == 5152): # Prince Zook quarters
if get_f('qs_autokill_greater_temple'):
game.global_flags[969] = 1 # Met prince Zook
game.global_flags[985] = 1 # Mention Drow Problem
game.quests[69].state = qs_accepted
game.global_flags[981] = 1 # Zook said Lerrick mean
game.global_vars[977] = 1 # Zook said talk to Absalom abt Lerrick
if game.global_vars[999] >= 15:
game.quests[69].state = qs_completed
if (cur_map == 5126): # Drow Caves I - spidersfest
if get_f('qs_autokill_greater_temple'):
# Spidors 1
lnk(xx = 465, yy = 471, name_id = [14399, 14397])
lnk(xx = 451, yy = 491, name_id = [14399, 14397])
lnk(xx = 471, yy = 491, name_id = [14399, 14397])
lnk(xx = 437, yy = 485, name_id = [14741, 14397])
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
# Key
loot_items(item_proto_list = [10022], loot_money_and_jewels_also = 0)
return
if (cur_map == 5127): # Drow Caves II - 2nd spidersfest
if get_f('qs_autokill_greater_temple'):
# Spiders
lnk(xx = 488, yy = 477, name_id = [14741, 14397, 14620])
# Drow
lnk(xx = 455, yy = 485, name_id = [14708, 14737, 14736, 14735])
loot_items(xx = 455, yy = 481, item_autoconvert_list = [4132, 6057, 4082, 4208, 6076, 6046, 6045, 5011, 6040, 6041, 6120, 4193, 6160, 6161, 6334, 4081, 6223, 6073])
if (cur_map == 5128): # Drow Caves III - Drowfest I
if get_f('qs_autokill_greater_temple'):
# Garg. Spider
lnk(xx = 497, yy = 486, name_id = [14524])
# Drow
lnk(xx = 473, yy = 475, name_id = [14399, 14708, 14737, 14736, 14735])
lnk(xx = 463, yy = 485, name_id = [14399, 14708, 14737, 14736, 14735])
loot_items(xx = 475, yy = 471, item_autoconvert_list = [4132, 6057, 4082, 4208, 6076, 6046, 6045, 5011, 6040, 6041, 6120, 4193, 6160, 6161, 6334, 4081, 6223, 6073])
lnk(xx = 456, yy = 487, name_id = [14399, 14708, 14737, 14736, 14735, 14734])
lnk(xx = 427, yy = 487, name_id = [14399, 14708, 14737, 14736, 14735, 14734])
loot_items(xx = 465, yy = 486, item_autoconvert_list = [4132, 6057, 4082, 4208, 6076, 6046, 6045, 5011, 6040, 6041, 6120, 4193, 6160, 6161, 6334, 4081, 6223, 6073, 6058])
loot_items(xx = 425, yy = 481, item_autoconvert_list = [4132, 6057, 4082, 4208, 6076, 6046, 6045, 5011, 6040, 6041, 6120, 4193, 6160, 6161, 6334, 4081, 6223, 6073, 6058])
loot_items(xx = 475, yy = 471, item_autoconvert_list = [4132, 6057, 4082, 4208, 6076, 6046, 6045, 5011, 6040, 6041, 6120, 4193, 6160, 6161, 6334, 4081, 6223, 6073, 6058])
loot_items(xx = 425, yy = 481, item_proto_list = [6051, 4139, 4137] )
if (cur_map == 5129): # Drow Caves IV - Spiders cont'd
if get_f('qs_autokill_greater_temple'):
lnk(xx = 477, yy = 464, name_id = [14524, 14399, 14397])
lnk(xx = 497, yy = 454, name_id = [14524, 14399, 14397])
lnk(xx = 467, yy = 474, name_id = [14524, 14399, 14397, 14741])
lnk(xx = 469, yy = 485, name_id = [14524, 14399, 14397])
if (cur_map == 5130): # Drow Caves V - Young White Dragons
if get_f('qs_autokill_greater_temple'):
lnk(xx = 489, yy = 455, name_id = [14707])
if (cur_map == 5131): # Drow Caves VI - Adult White Dragon
if get_f('qs_autokill_greater_temple'):
lnk(xx = 480, yy = 535, name_id = [14999])
loot_items(xx = 480, yy = 535)
if (cur_map == 5148): # Verbobonc Jail
if get_f('qs_autokill_greater_temple'):
game.quests[79].state = qs_accepted
game.quests[80].state = qs_accepted
game.quests[81].state = qs_accepted
if game.global_vars[964] == 0:
game.global_vars[964] = 1
if game.global_flags[956] == 1:
game.quests[79].state = qs_completed
if game.global_flags[957] == 1:
game.quests[80].state = qs_completed
if game.global_flags[958] == 1:
game.quests[81].state = qs_completed
if (cur_map == 5151): # Verbobonc Great Hall
if get_f('qs_autokill_greater_temple'):
game.global_vars[979] = 2 # Allows meeting with Mayor
game.global_flags[980] = 1 # Got info about Verbobonc
if (cur_map == 5124): # Spruce Goose Inn
if get_f('qs_autokill_greater_temple'):
if is_timed_autokill == 0:
game.timevent_add(autokill, (cur_map, 1, 1), 100)
else:
lnk(xx=484, yy = 479, name_id = 8716) # Guntur Gladstone
game.global_vars[961] = 2 # Have discussed wreaking havoc
loot_items(loot_source_name = 8716, item_autoconvert_list = [6202, 6306, 4126, 4161])
return
#######################
#######################
### END OF AUTOKILL ###
#######################
#######################
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/co8fixes/scr/py00439script_daemon.py
|
Python
|
mit
| 95,140
|
[
"CRYSTAL"
] |
ecf650f2a991aef93f8db5ceea1854ae81ae5c0ca7dbfd1fba73169e44cdf484
|
import glob
import MDAnalysis as MDA
import numpy
#targets = glob.glob('../CSAR_FULL_RELEASE_29NOVEMBER2012/*/SETUP_DOCKING_FILES/PROTEIN_ALONE/*pdb')
targets = glob.glob('../CSAR_FULL_RELEASE_29NOVEMBER2012/*/SETUP_DOCKING_FILES/COMPLEX/*pdb')
for target in targets:
print target
for rotation in range(5):
u = MDA.Universe(target)
atom1 = u.atoms[numpy.random.randint(0,len(u.atoms))]
atom2 = u.atoms[numpy.random.randint(0,len(u.atoms))]
angle = numpy.random.randint(0,360)
u.atoms.rotateby(angle, (atom1, atom2))
rotFilename = target.replace('.pdb','_rot%i.pdb' %(rotation+1))
writer = MDA.Writer(rotFilename)
writer.write(u)
|
j-wags/POVME
|
POVME/tests/surf_area/rotateProteins.py
|
Python
|
gpl-3.0
| 706
|
[
"MDAnalysis"
] |
248d6d8abafe83cd9ff13570f079d6bdce81024d2965063bd5126230b1057966
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import _ni_support
import _nd_image
from scipy.misc import doccer
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
_input_doc = \
"""input : array-like
input array to filter"""
_axis_doc = \
"""axis : integer, optional
axis of ``input`` along which to calculate. Default is -1"""
_output_doc = \
"""output : array, optional
The ``output`` parameter passes an array in which to store the
filter output."""
_size_foot_doc = \
"""size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either ``size`` or ``footprint`` must be defined. ``size`` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
``footprint`` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and ``size`` is 2, then the actual size used is
(2,2,2).
"""
_mode_doc = \
"""mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'"""
_cval_doc = \
"""cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0"""
_origin_doc = \
"""origin : scalar, optional
The ``origin`` parameter controls the placement of the filter. Default 0"""
_extra_arguments_doc = \
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function"""
_extra_keywords_doc = \
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function"""
docdict = {
'input':_input_doc,
'axis':_axis_doc,
'output':_output_doc,
'size_foot':_size_foot_doc,
'mode':_mode_doc,
'cval':_cval_doc,
'origin':_origin_doc,
'extra_arguments':_extra_arguments_doc,
'extra_keywords':_extra_keywords_doc,
}
docfiller = doccer.filldoc(docdict)
@docfiller
def correlate1d(input, weights, axis = -1, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Calculate a one-dimensional correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
one-dimensional sequence of numbers
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = _ni_support._check_axis(axis, input.ndim)
if ((len(weights) // 2 + origin < 0) or
(len(weights) // 2 + origin > len(weights))):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return return_value
@docfiller
def convolve1d(input, weights, axis = -1, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Calculate a one-dimensional convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
one-dimensional sequence of numbers
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
@docfiller
def gaussian_filter1d(input, sigma, axis = -1, order = 0, output = None,
mode = "reflect", cval = 0.0):
"""One-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
%(output)s
%(mode)s
%(cval)s
"""
if order not in range(4):
raise ValueError('Order outside 0..3 not implemented')
sd = float(sigma)
# make the length of the filter equal to 4 times the standard
# deviations:
lw = int(4.0 * sd + 0.5)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd = sd * sd
# calculate the kernel:
for ii in range(1, lw + 1):
tmp = math.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
# implement first, second and third order derivatives:
if order == 1 : # first derivative
weights[lw] = 0.0
for ii in range(1, lw + 1):
x = float(ii)
tmp = -x / sd * weights[lw + ii]
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
elif order == 2: # second derivative
weights[lw] *= -1.0 / sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (x * x / sd - 1.0) * weights[lw + ii] / sd
weights[lw + ii] = tmp
weights[lw - ii] = tmp
elif order == 3: # third derivative
weights[lw] = 0.0
sd2 = sd * sd
for ii in range(1, lw + 1):
x = float(ii)
tmp = (3.0 - x * x / sd) * x * weights[lw + ii] / sd2
weights[lw + ii] = -tmp
weights[lw - ii] = tmp
return correlate1d(input, weights, axis, output, mode, cval, 0)
@docfiller
def gaussian_filter(input, sigma, order = 0, output = None,
mode = "reflect", cval = 0.0):
"""Multi-dimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
%(output)s
%(mode)s
%(cval)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
if not set(orders).issubset(set(range(4))):
raise ValueError('Order outside 0..4 not implemented')
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sigmas[ii], orders[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def prewitt(input, axis = -1, output = None, mode = "reflect", cval = 0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, mode, cval, 0,)
return return_value
@docfiller
def sobel(input, axis = -1, output = None, mode = "reflect", cval = 0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
axis = _ni_support._check_axis(axis, input.ndim)
output, return_value = _ni_support._get_output(output, input)
correlate1d(input, [-1, 0, 1], axis, output, mode, cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, mode, cval, 0)
return return_value
@docfiller
def generic_laplace(input, derivative2, output = None, mode = "reflect",
cval = 0.0,
extra_arguments = (),
extra_keywords = None):
"""Calculate a multidimensional laplace filter using the provided
second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See ``extra_arguments``, ``extra_keywords`` below
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = range(input.ndim)
if len(axes) > 0:
derivative2(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return return_value
@docfiller
def laplace(input, output = None, mode = "reflect", cval = 0.0):
"""Calculate a multidimensional laplace filter using an estimation
for the second derivative based on differences.
Parameters
----------
%(input)s
%(output)s
%(mode)s
%(cval)s
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@docfiller
def gaussian_laplace(input, sigma, output = None, mode = "reflect",
cval = 0.0):
"""Calculate a multidimensional laplace filter using gaussian
second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments = (sigma,))
@docfiller
def generic_gradient_magnitude(input, derivative, output = None,
mode = "reflect", cval = 0.0,
extra_arguments = (), extra_keywords = None):
"""Calculate a gradient magnitude using the provided function for
the gradient.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See ``extra_arguments``, ``extra_keywords`` below
``derivative`` can assume that ``input`` and ``output`` are
ndarrays.
Note that the output from ``derivative`` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
axes = range(input.ndim)
if len(axes) > 0:
derivative(input, axes[0], output, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, mode, cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
if numpy.version.short_version > '1.6.1':
numpy.sqrt(output, output, casting='unsafe')
else:
numpy.sqrt(output, output)
else:
output[...] = input[...]
return return_value
@docfiller
def gaussian_gradient_magnitude(input, sigma, output = None,
mode = "reflect", cval = 0.0):
"""Calculate a multidimensional gradient magnitude using gaussian
derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes..
%(output)s
%(mode)s
%(cval)s
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode, cval)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments = (sigma,))
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(int):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if (lenw // 2 + origin < 0) or (lenw // 2 + origin > lenw):
raise ValueError('invalid origin')
if not weights.flags.contiguous:
weights = weights.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
return return_value
@docfiller
def correlate(input, weights, output = None, mode = 'reflect', cval = 0.0,
origin = 0):
"""
Multi-dimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
input : array-like
input array to filter
weights : ndarray
array of weights, same number of dimensions as input
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
See Also
--------
convolve : Convolve an image with a kernel.
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@docfiller
def convolve(input, weights, output = None, mode = 'reflect', cval = 0.0,
origin = 0):
"""
Multi-dimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
input : array_like
Input array to filter.
weights : array_like
Array of weights, same number of dimensions as input
output : ndarray, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
the `mode` parameter determines how the array borders are
handled. For 'constant' mode, values beyond borders are set to be
`cval`. Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+j-k} W_j}`, where
W is the `weights` kernel,
j is the n-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e. where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`.
>>> a = np.array([[1, 2, 0, 0],
.... [5, 3, 0, 4],
.... [0, 0, 0, 7],
.... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0,1,0],[0,1,0],[0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
[1, 0, 0],
[0, 0, 0]])
>>> k = np.array([[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@docfiller
def uniform_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : integer
length of uniform filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return return_value
@docfiller
def uniform_filter(input, size = 3, output = None, mode = "reflect",
cval = 0.0, origin = 0):
"""Multi-dimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode)s
%(cval)s
%(origin)s
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
input = numpy.asarray(input)
output, return_value = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return return_value
@docfiller
def minimum_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return return_value
@docfiller
def maximum_filter1d(input, size, axis = -1, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculate a one-dimensional maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D maximum
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = _ni_support._check_axis(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output, return_value = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return return_value
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable= True
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
if numpy.alltrue(numpy.ravel(footprint),axis=0):
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
axes = range(input.ndim)
axes = [(axes[ii], sizes[ii], origins[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
return return_value
@docfiller
def minimum_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@docfiller
def maximum_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@docfiller
def _rank_filter(input, rank, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0, operation = 'rank'):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origin)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origin)
else:
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
return return_value
@docfiller
def rank_filter(input, rank, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional rank filter.
Parameters
----------
%(input)s
rank : integer
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@docfiller
def median_filter(input, size = None, footprint = None, output = None,
mode = "reflect", cval = 0.0, origin = 0):
"""
Calculates a multi-dimensional median filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either ``size`` or ``footprint`` must be defined. ``size`` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
``footprint`` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust ``size`` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and ``size`` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@docfiller
def percentile_filter(input, percentile, size = None, footprint = None,
output = None, mode = "reflect", cval = 0.0, origin = 0):
"""Calculates a multi-dimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@docfiller
def generic_filter1d(input, function, filter_size, axis = -1,
output = None, mode = "reflect", cval = 0.0, origin = 0,
extra_arguments = (), extra_keywords = None):
"""Calculate a one-dimensional filter along the given axis.
generic_filter1d iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : callable
function to apply along given axis
filter_size : scalar
length of the filter
%(axis)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = _ni_support._check_axis(axis, input.ndim)
if ((filter_size // 2 + origin < 0) or
(filter_size // 2 + origin >= filter_size)):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments, extra_keywords)
return return_value
@docfiller
def generic_filter(input, function, size = None, footprint = None,
output = None, mode = "reflect", cval = 0.0, origin = 0,
extra_arguments = (), extra_keywords = None):
"""Calculates a multi-dimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1D array of double values.
Parameters
----------
%(input)s
function : callable
function to apply at each element
%(size_foot)s
%(output)s
%(mode)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint)
footprint = footprint.astype(bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output, return_value = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return return_value
|
ygenc/onlineLDA
|
onlineldavb_new/build/scipy/scipy/ndimage/filters.py
|
Python
|
gpl-3.0
| 40,022
|
[
"Gaussian"
] |
213975213fff1dc4539db294aa976777fc87a5355388a7cc1ba3e1a447d385cc
|
from __future__ import division
from builtins import range
import os
import numpy as np
import numpy.random as npr
from matplotlib import pyplot as plt
plt.ion()
npr.seed(0)
import pyhsmm
###############
# load data #
###############
data = np.loadtxt(os.path.join(os.path.dirname(__file__),'example-data.txt'))[:1250]
data += 0.5*np.random.normal(size=data.shape) # some extra noise
##################
# set up model #
##################
# Set the weak limit truncation level
Nmax = 25
# and some hyperparameters
obs_dim = data.shape[1]
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
# instantiate a Sticky-HDP-HMM
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams)
for state in range(Nmax)]
model = pyhsmm.models.WeakLimitStickyHDPHMM(
kappa=50.,alpha=6.,gamma=6.,init_state_concentration=1.,
obs_distns=obs_distns)
model.add_data(data)
##############
# animate! #
##############
from moviepy.video.io.bindings import mplfig_to_npimage
from moviepy.editor import VideoClip
fig = model.make_figure()
model.plot(fig=fig,draw=False)
def make_frame_mpl(t):
model.resample_model()
model.plot(fig=fig,update=True,draw=False)
return mplfig_to_npimage(fig)
animation = VideoClip(make_frame_mpl, duration=10)
animation.write_videofile('gibbs.mp4',fps=40)
|
mattjj/pyhsmm
|
examples/animation.py
|
Python
|
mit
| 1,424
|
[
"Gaussian"
] |
47eef1e81b52394d7b72443acfbe05984b3284457ad31bef090492ff09a67721
|
#
# Copyright (c) 2015, Daniel Guterding <guterding@itp.uni-frankfurt.de>
#
# This file is part of dhva.
#
# dhva is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dhva is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dhva. If not, see <http://www.gnu.org/licenses/>.
#
import os
import numpy as np
import sys
from mayavi import mlab
def main():
if(2 <= len(sys.argv)):
filenames = sys.argv[1:]
mlab.figure(bgcolor=(1,1,1), fgcolor=(0,0,0))
for filename in filenames:
filehandle = open(str(filename), 'r')
lines = filehandle.readlines()
filehandle.close()
fermiindex = 0
infoindex = 0
bandstart = 0
bandend = 0
for i in range(len(lines)):
if(lines[i].find('Fermi Energy:') != -1):
fermiindex = i
if(lines[i].find('BANDGRID_3D_BANDS') != -1):
infoindex = i+2
if(lines[i].find('BAND:') != -1):
bandstart = i+1
if(lines[i].find('END_BANDGRID_3D') != -1):
bandend = i
fermi = float(lines[fermiindex].split()[2])
energies = []
for i in range(bandstart, bandend):
line = lines[i].split()
for val in line:
energies.append(float(val))
nkpoints = np.array(lines[infoindex].split(), dtype='int')
energies = np.array(energies, dtype='float')
energies = np.reshape(energies, (nkpoints[0], nkpoints[1], nkpoints[2]))
a = np.array(lines[infoindex+2].split(), dtype='float')
b = np.array(lines[infoindex+3].split(), dtype='float')
c = np.array(lines[infoindex+4].split(), dtype='float')
#be careful, this only works for orthorhombic volumes
x,y,z = np.mgrid[0:a[0]:nkpoints[0]*1j,0:b[1]:nkpoints[1]*1j,0:c[2]:nkpoints[2]*1j]
src = mlab.pipeline.scalar_field(x,y,z,energies)
mlab.pipeline.iso_surface(src, contours=[fermi], color=(1,0,0))
mlab.axes(ranges=[0,a[0],0,b[1],0,c[2]], x_axis_visibility=False, y_axis_visibility=False, z_axis_visibility=False, xlabel='k_x', ylabel='k_y', zlabel='k_z')
mlab.outline() #draws box
mlab.show()
else:
print 'Wrong number of input arguments.'
print 'Usage: python fs3d.py input.bxsf'
main()
|
danielguterding/dhva
|
scripts/fs3d.py
|
Python
|
gpl-3.0
| 2,660
|
[
"Mayavi"
] |
6a2f9c41c0a63adce80918c203f801ce3b74cf26690d62a7bd98385b663c793f
|
# Computes the gaussian gradients on a boxm_alpha_scene
import bvpl_octree_batch;
import os;
import optparse;
bvpl_octree_batch.register_processes();
bvpl_octree_batch.register_datatypes();
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
print("Computing Gaussian Gradients on Alpha");
#Parse inputs
parser = optparse.OptionParser(description='Save gradients to binary and text');
parser.add_option('--model_dir', action="store", dest="model_dir");
options, args = parser.parse_args();
model_dir = options.model_dir;
#model_dir = "/Users/isa/Experiments/boxm_cit_only_filtered";
grad_dir = model_dir + "/gauss_grad_alpha";
print("Creating a Scene");
bvpl_octree_batch.init_process("boxmCreateSceneProcess");
bvpl_octree_batch.set_input_string(0, model_dir +"/alpha_scene.xml");
bvpl_octree_batch.run_process();
(scene_id, scene_type) = bvpl_octree_batch.commit_output(0);
alpha_scene= dbvalue(scene_id, scene_type);
print("Creating a Scene");
bvpl_octree_batch.init_process("boxmCreateSceneProcess");
bvpl_octree_batch.set_input_string(0, grad_dir +"/float_gradient_scene.xml");
bvpl_octree_batch.run_process();
(scene_id, scene_type) = bvpl_octree_batch.commit_output(0);
grad_scene= dbvalue(scene_id, scene_type);
print("Compute Gradients");
bvpl_octree_batch.init_process("bvplGradSceneToBinProcess");
bvpl_octree_batch.set_input_from_db(0, alpha_scene);
bvpl_octree_batch.set_input_from_db(1, grad_scene);
bvpl_octree_batch.set_input_string(2, grad_dir + "/scene_gradients.txt");
bvpl_octree_batch.run_process();
|
mirestrepo/voxels-at-lems
|
bvpl/bvpl_octree/gauss_gradient_to_binary.py
|
Python
|
bsd-2-clause
| 1,608
|
[
"Gaussian"
] |
e07582b93a692833caf8383a931dc059df75543e015588cd992a050845a601a6
|
#Translates DNA sequences in all 6 reading frames, ignoring start / stop codons.
from Bio import SeqIO
from Bio.Seq import Seq
from Bio import AlignIO
import sys
import tempfile
import subprocess
from collections import Counter
from scipy.spatial import distance
import numpy as np
import os
def translate_6frames(input_file, min_size):
input_handle = open(input_file, "rU")
f = tempfile.NamedTemporaryFile(delete=False)
for record in SeqIO.parse(input_handle, "fasta") :
if len(record.seq) >= min_size:
#Frame 1
original = record.seq
f.write(">" + str(record.id) + "_1\n")
f.write(str(record.seq.translate()).replace("*","") + "\n")
#Frame 2
f.write(">" + str(record.id) + "_2\n")
record.seq = Seq(str(record.seq)[1:])
f.write(str(record.seq.translate()).replace("*","") + "\n")
#Frame 3
f.write(">" + str(record.id) + "_3\n")
record.seq = Seq(str(record.seq)[1:])
f.write(str(record.seq.translate()).replace("*","") + "\n")
record.seq = original.reverse_complement()
#Frame -1
f.write(">" + str(record.id) + "_-1\n")
f.write(str(record.seq.translate()).replace("*","") + "\n")
#Frame -2
record.seq = Seq(str(record.seq)[1:])
f.write(">" + str(record.id) + "_-2\n")
f.write(str(record.seq.translate()).replace("*","") + "\n")
#Frame -3
record.seq = Seq(str(record.seq)[1:])
f.write(">" + str(record.id) + "_-3\n")
f.write(str(record.seq.translate()).replace("*","") + "\n")
return f
def calc_tetra(seq_record):
tetramers = {}
for a in ['A', 'C', 'G', 'T']:
for b in ['A', 'C', 'G', 'T']:
for c in ['A', 'C', 'G', 'T']:
for d in ['A', 'C', 'G', 'T']:
tetramers[a+b+c+d] = 0
start = 0
end = 4
for i in range(0,len(str(seq_record.seq))):
if len(str(seq_record.seq[start:end])) == 4:
try:
tetramers[str(seq_record.seq[start:end])] += 1
except:
pass
start += 1
end += 1
#Normalize
total = sum(tetramers.values())
for k in tetramers.keys():
tetramers[k] = float(tetramers[k]) / float(total)
return tetramers
def find_markers(assembly, blast_path, hmmsearch_path, output_dir):
input_file = assembly
#Keep only contigs bigger than X bp
min_size = 10000
#Translate contigs in all 6 frames
print "[SEARCH] Translating DNA into reading frames, creating /tmp/ file"
tempfile = translate_6frames(input_file, min_size)
#Search for markers using hmm file
## Check for required files and programs
if not os.path.isfile('./marker_genes/ribosomal.hmm'):
print "[ERROR] Could not find marker gene file ./marker_genes/ribosomal.hmm in local directory"
sys.exit(1)
if not os.path.isfile('./marker_genes/markers.pin'):
print "[ERROR] Could not find BLAST marker gene DB ./marker_genes/markers.pin in local directory"
sys.exit(1)
try:
subprocess.call(["which", hmmsearch_path])
except:
print "[ERROR] HMMSEARCH is not installed and available with the specified path: " + hmmsearch_path
sys.exit(1)
try:
subprocess.call(["which", blast_path])
except:
print "[ERROR] BLASTP is not installed and available with the specified path: " + blast_path
sys.exit(1)
#Run search
print "[SEARCH] Searching for marker proteins with hmmsearch"
output = subprocess.Popen([hmmsearch_path, "-A", tempfile.name + ".aa", './marker_genes/ribosomal.hmm', tempfile.name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#Write out markers
i = 0
fname = output_dir.rstrip("/") + '/.'.join(input_file.split(".")[:-1]) + ".markers"
print "[SEARCH] Writing marker proteins to file " + fname
f = open(fname, 'w')
for al in AlignIO.parse(open(tempfile.name + ".aa"), "stockholm"):
for seq in al:
i += 1
f.write(">" + '/'.join(str(seq.id).split("/")[:-1]) + "\n")
f.write(str(seq.seq).replace('-', '') + "\n")
f.close()
if i == 0:
print "[ERROR] No marker proteins found on all contigs in the assembly. Try running the database matching -d program."
sys.exit()
#BLAST markers against blast db
print "[SEARCH] Blasting marker proteins against reference DB"
output = subprocess.check_output([blast_path, '-query', fname, '-db', './marker_genes/markers', '-outfmt', '6 qseqid stitle pident evalue', '-max_target_seqs', '1'])
lines = str(output).splitlines()
fname = output_dir.rstrip("/") + '/.'.join(input_file.split(".")[:-1]) + ".blast"
print "[SEARCH] Writing marker protein BLAST matches to file " + fname
f = open(fname, 'w')
for line in lines:
f.write(line + "\n")
f.close()
#Calculate most common species for a contig.
contigs = {}
for line in lines:
contig = '_'.join(line.split("\t")[0].split("_")[:-1])
try:
genus = line.split("\t")[1].split("[")[1].split("]")[0].replace("Candidatus","").lstrip().strip().split()[0]
except:
genus = ''
if contig not in contigs.keys():
contigs[contig] = [genus]
else:
contigs[contig].append(genus)
names = {}
for contig in contigs:
count = Counter(contigs[contig])
names[contig] = str(count.most_common(1)[0][0])
#Calculate tetranucleotide frequencies for all marked contigs
print "[SEARCH] Calculating tetranucleotide frequencies for marker contigs"
tetramers = {}
sizes = {}
input_handle = open(input_file, "rU")
for record in SeqIO.parse(input_handle, "fasta") :
if record.id in names and len(record.seq) >= min_size:
tetramers[record.id] = calc_tetra(record)
sizes[record.id] = len(record.seq)
return [tetramers, names, sizes]
|
alexcritschristoph/VICA
|
marker_genes/meta_marker.py
|
Python
|
gpl-2.0
| 5,461
|
[
"BLAST"
] |
36ef5f3b0dbeb67e1c3c7100980bc5a71df8cf637b6b48b5f42be78910b88120
|
#! /usr/bin/env python
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# a fully functional basic pytest script.
#
# Pytest is a thing that tests packages, pytest itself is a package that some-
# one might want to install, especially if they're looking to run tests inside
# some package they want to install. Pytest has a lot of code to collect and
# execute tests, and other such sort of "tribal knowledge" that has been en-
# coded in its code base. Because of this we basically include a basic copy
# of pytest inside this blob. We do this because it let's you as a maintainer
# or application developer who wants people who don't deal with python much to
# easily run tests without installing the complete pytest package.
#
# If you're wondering how this is created: you can create it yourself if you
# have a complete pytest installation by using this command on the command-
# line: ``py.test --genscript=runtests.py``.
sources = """
eNrUvWuXG1lyINa7fgqrx+7alp97Tg64FAASlazijC0t3OgR1U2OuOrp7kNyZihXl9BZQKIqhwAS
RCZYVTNqHf8t/wZ/9vG/8Nmf4Hjde+M+EgX2jOR1a8RKZN5n3LhxI+LG43//59+//2T49k8++eST
7V0+m9eLMm/q/W5evv9nb/+Pf/fJJ8tdvc5ms+W+3e/K2Syr1tt612ZX5abcFW29a3ryprmzj9Wm
2Zbzdpy19btyU/2mNB+2dz1qr73blo1p6uf1Yr8q38Cr3nx7B0NYb6tVmU0zeer12t3dpJfBf1Jj
VjQt/ebBwS/z5Zu7z1/Mvv7qy7+dPXv9JiuaDP/OXnz57Ge98nZebtvsJRV8vtvVO27TloAeT/kN
NjjNvqo30HdvviqaJntNMBnWl7+GiY24Zr/fzwroeb1vi0sYMX/MruvVotpcwScGZIZAhaEWV+ty
046pKv63rZumulzdZYuy2izgE1aq2ty0zWMRIMzr/aYtd3aMi3IJq1JtqnY2GzblajnOHm2LXdvA
30fvbordVSOjxP+wQL6qNgD0aWb+nl/Y7wsEN9fKr8p2ODBDGoyzN7t9ObIld027q7ZBaX4ZlV3W
uwzHBPhAfxs3IFrMZbapW/rifyDgwNvEQKVe1QCKtcUGVgQLjmV5RofbwWeGglesXKVaHLb77aoc
A7SadnRPw+e3OYNg2P920x/RxG/NrC+O6Ay23uW+WgEG5LPLoimxsc3VMdNptqsKVuDbzWAUFYae
eFhxO/jfzTVuM9tiupC0Y0udn5xd5DzXUXcN/O9yVxbvOkvYBvNtDU0FMGrKw1MHrOO9JfAz6Dpd
lKPECsMEFve0aFoY2pf+mHis5W1bbha6jNqM5Xu7Fev2utwp+FgCZrdRCeR04+3LKdcKhi9U61kL
IL/ct6UiXOn9QI2MM1iixPqYbtsdDXRke00N7kUB66AnCFCv2nJtZ/muvFN9+MOAb2PYAe3onnmf
Q0G3Q+KVh1ahBGBcuSVqAZtqiIR5nJ2l5ldUTZm9hJW8JUAN+/Nig9WaVQWEuBA6kd1U7TXSZ2i1
P+ocIM2YasKUeRSEbfxYb73VX5UbAYwal7QG34ZuyqMApqYHBqr0AWim2tmUN3KWTM1JNIo/Wgqv
wEvNTaC1i3BQtpobDu/rYA54yrkq5kyTs44A2QLYobcrD5DFZgHzLugkvFwVm3dy7uzKdf2hXOS6
fXdO2cnjQTe2gHOTZaJF5bJPqSB2REscTLqDTHHVx9PsLGgT2/pMPifahM+dpA+rnugWuxdLr9T5
5OLYxQpXagssxw5YgoVgzWUJZ045HcAZXCyBT6AnoYmDbJA9yn6SXtICuJPtXVYvMyADicX15jng
bgYEnwF1NMhudsV2W8I60HgsAxMuLVd1IOHfDjDUmvtMP0fHbQB7DGdDnnL2mN7xQYxPSDYcnC8O
bB0elvx8nJm/PLyANndvJDlLeG1+78tQrFYyMG4agH95RxWvqg/lRt6eMBORXo2PJCjnAtjHx4G1
GzBI7toCDhEHHqy7qQFvm6bctVW9mdLBk4aT0G/bBu7d+TUAb9MWcPYgEDxQMUCoi/36EhBsyIz0
gkWH09ERdIhmqAe+KzZX5TAa96iXOET0nk5CgRv7AaAYqpMiI2ZVgNFsC4ZEtgb5YF2sQvonoNuV
V9CFD0Gk1g6ReDxpEAn3PjzNPp1KQSDIlmCH5350MEuVet8i1hMU1FEMEtjYX4UIZiil2UXwDwnL
5pj6DvSWzxNGbblsynaKHEXXxowPPdMG7ztuIvem+3Ipr4ElIjkSgbrJrvYliJKF2aEFLrEpiPjo
tYHLsKyAgQc4b07cEZpn2ev9ZVO+3+Mi8g7lNbwuPgCP47Ot9Q0gfaK7mwqIyCXKptsK5vGh3F3C
9zUAPhzEHVD2ElFyv4aCbY21SKqDZVvvQWZh1jiJJA+yt2/fZuviDursG4KC1QnssK3trt6WO5CB
r+FIAQy2DaomGGuRljVZ2c7z7fanP4iO8XHrIQB/MGgwOoayw+7aNSXK+4JCVvYg4TeJRPgFN4wM
FXDCNjLOrsv9DmTMag5k/c4Dvq8XQFyDERQAoTSwIyHDKF2wr10oUUR6EEsg7gBVbmfz63L+jk7k
VbG+XBTZ7cQoZIa3Yzj9mwWwGIPytpwr0TNm4KPmeDh5swdZotfzRDSeblDfLK8RWnK7gqOD/Yb1
Rt2AemAmxpUeozg9zvq3ffgHJxjICP6UdKVRCOW/Ke8u62K3eInKm91+2ybIYljnOf2BvTrplsq6
561QTssZAIVO8QSVFvmv66pDSLHAIXRfwuOmWJdTlsLW9QLYTsKCcSSOLVfF1dRpCnNpaTfDD3Hx
BZxBs2oD4mjVToH7B+Fos9yZvtJbS5pcsJKNCXSO2GTGaSiwvzM2H5B4AiUGQlYtq3lVrFwNpqYL
2Oqr4i6ih4JZT4gB2tZNJWSVvuEmBiynUR88N21nqCO7y7dFe52vaqg+NF9GOSEY/Z6ejiKx2ELH
zDAWh12RKapnc5Q26ffwbASE1Q3Yq7ncjOVwnroW8iXrhuf1zAxw7H3lKl5DqMmCNvqfPlycYGUo
nfWzhyJc+4pNf4Ox9E2a19ksLOoLbwmYxoCw0J7yoB5ng4fN5OHiswEMZwgTFg7mGD1Uqq1dppuz
8Ek1bOlSctdha0BHuknVHFdFqcmF+rh9yVtyTHsvIkeviXIliH55KygCBWELLuvh6PzM11w+gC13
siu3u7LBvcNEMCuxrYa5aaTseFZt79rremPObK+RdXOVUFJMytuc4RRpeuELH87xQpi2cpQ/QRLu
Z/1HtjgC8u8CnWhUYUhE46ZoHBkBhuSyQKU8zQhIUTmBtR0h2tqN6TUKPAIBT4GW1LG8tKbLUaKS
GenUTTJRyG5F+5wo1Ja30g4+JThvKnZYzYYIk/2Zuw7p1B7O61gzSjLiLQA9UICnpEPpD7EN2M4G
qCdh3PnpRfbZNPtx3C9zkdu7Hw8ac7WUWw0wrspwlBGtbKjjAnfAflUm2kG9RZFty+2PT5+izq4u
FnAazbI8z7ObEg+gQSts9IFGWj5sBGdOgG3GQwEb2JTMIi+LdyWybIg/Md4CsNyd17A/295he9Lc
bNuU+0U94+77sVZ/i8rHRW4AweWayzsExLnB0Qtzf9VRG4EvNc+xHJZfxyPNFYxCoist4eLOCwB/
Tv96IxiejTNmE4TPTm8hh1e9B70H2XZ/uarm2bNvXmbNNfCocxARl/vNHM/ZBkr0FFMyi+hfgi8h
1G6mvmKgiysJuBB11WcuJp18LLQcFryAfXajeRDH3SDOrUG0RvGaZT84wzbAbxC8hNmAU7i+aYDV
1kchwGVXlR+gqK8X4vtEwHQ6wF2fXpfF5g4amO9hg30ogZ7Nd2XR+nxS4181Ik9BV6kNnaYktmJL
SqtvgI3lciAUiivhTQoLtl8trJpmvyUJBHYUngtQ4ach73nsUdb4C8MI9HH8TeNkRO6JG6ET1WLB
sKNzx4vaJ66ukLcnup1lw4Qab4sV/rzSqtwMmT3iHIfI+1k+wa4rY5gwtD1RKmgk4IsNEKwXJSDN
GqovrGqoD5LLydnIW16PlSAsmOIeJq7uc/iHRttTzAJSp4BViNiR5Qa3eUCSfNqMzQKXm7gJjKvZ
ChHnkhiMlmBovk6aXDYIVBjackMoHHDYGxyQTx/tMXtypll1bsjvMwICoeLYNbEE+bSx9gIBTynS
99eJyfCdZMMck39G2wnRcuFzPHL6RBojxSewKlHvYaMyw8s5jcHchZkGk+LrcrVFcSakvsEMJzGC
8Vch+3aIwYqnACUA0jfisLXLWyW1WqGZy0byLJ8AspBp9VCo3ybFmtzhB/ptN5GLXnQj48Bl9Lp2
OpEhBrxTWw7KwpEx9zaeD8F21+zmUCPaVhGwXjr9XoBWtpH+t/2/2l9d3RnmXDgv1Ps2FRzw2X57
taPburEhLdCfzPJbISExMnH7fN2soWPorHy2kGA4WQ2Op37TUn4VKlAnnlWJWZyE4QJ9Yo00CDNb
2P1tcdkEFgbGDCZfyZJHzGm8My23jjpuugc5IW13zKH5lg8xs21bOg3mOoVX8eVxJXpu6QlY+cAw
BkEKXS7w9cwyNlLnaMDdVSWc3FQgEEL5fhQ1SqkagwHTXdTtdY4iRm9TFGvlGyO13GPu4FWazbDa
bNaLG8epAkWG/w3pLIbHETyX7nmGUDCq6FyGW87oTTM0/cRaF2ou+8wiQZOwQqD1B/7jOZw8NV4G
bvd424Ls3uJQe2ZlE00qrG7Ouc5JdnbRjeHSvIfkfCcPb84njHAXedUA1UeqmLblUd2eixh+ARP7
QvZvVMfMwEjYBMOezzHA0lQIfLn44iUpE9BmrvILkskMxaB7qmqzR3aSd0G9TFSy1wi8xE02rPIy
V69FNzE6agbNeXXh0dthSHCdpWP+Bh/opZqOPdUfZM8WzJvLxQ2wb9UCZ9iUMMDn+RUpL0FMKI0W
mPQTdOaJ0YQdolgj8RB9BLrwqLF5b0+pmb1Km9ElmWkG+YINHElk9DZxJpaX0PvcGlnyr9muurpu
ZVZywlNjDUrIpzLU29Ye/pbfYnpyNglYmBn2ytZ0/OLkzCkSzKgQ/UhIwatEAKDYa7RKOIXDCU6p
GZVAJZ0a7VD1FKvm0IJJ1/2MaX0AkGQ3nvUH2fjZK1Fb/1zXuDCKHdjGvDH9rvVBcLg3dTBLkzDu
IzpV12S37YblgXStXjf/7y1wsvbjM91rdAtlb4T9du2GIfSABTdtq27cBNw3wG3za2jmNQp176ZE
wjDI3DabuhYTx2qqCXuiKd61u9tofDdKrZAYLtkbc1r5YUlSNkvcckgT6uDVvq+D3QLZarM+72ug
YH19S540ZFN34LL5HYAQOGnD6pXjLkyfWLrP5GGc9Zd49jfyO5/xT3jPY4f30FEBp/hQytv3pyOL
B7i5m/0l39KAENeWTYtXbmgdjn8v68Ud/uW74R321q93yE/1aQSbYkVF3Dpij4wFqm/pglVRHjJQ
caXuOGRXiWXFGjhxPBtajKVGscHvddHQgLgRA4xkQ3LuuIaiNbf4C1BYaTK/clLIAUsJkVsCGxN8
QZgRcOHyNmbD8SDGA1kuWJVCpUO/i6Tw6fjPR+F9BjfzmC9EuimNGYh1CxhKVYAn925va8fZ2enT
n4zwXMIHwrNnr9+EtOeXxWqf0incY3ZSrxbdwEwqNlkeDfZp2MuhI1lvWoGDaEgeaK3zvN7t8JhG
LRFUn0iJE4TdibmcIV6QLjywr8aWwW3ljIOabFHTthBleFNCffiWKQqItdB2GO2M4GxFVVS53rbC
2hgEUrTNgZlnTeKTEsRHmhSJ4ny7K5fVLYkW+bpo59fDXf/b5lF/7FtSssXnKL/a1XsjzlWEm/D+
8ZliOyqmqz7zgSV1c9VFeGZQKaTNRvj7FJ94dCNDtM8n6uXFj6b85GOXLxzuyrXhr1XVyUUsmMo4
sAL0Bn/OTy+m08GDwSRosnL3owzmStMIQZ9x6mD4GET3LNN+N6u038UijW+3gF12jARDCqg+durN
CBUbiKVLNFT1dd+svW6vAVNX9VU1R5StN6s7tFoCIW4japOn+U/oxL4sV/WNVDzLSR3GutlWDKPk
B3fu+GhU99Rbg+LmDgP3YiFkgc7EVpxmfENDZEJOznyLe7seSSVIYosEV7pkZFbj5YOTqNDci67T
9ysAAB7K5WKcza8JhGTwPS+Cay0YyaDZb9EcWJQLbCKM13/BK+O0ZF8fY8W/qt7hHUaBZzlZBfQj
jU2fQdE3XgK2xz41572PuxSxstTnj72ojqA44SV5rG7IH2Tv99X8HdBL+IcM25BilvZO3Br9laJj
8ax6H4S4AML6kLeBXLADCgoH0EebSdTZod6nGUUjzpn7Hp6OswFu7NvbWxDvB15BqxQdfLvJ5Kra
1B8Fpnf2v7/LSFfgX2omDBT8idjeIpX+UF2Zj7OvgVdYAh7KT3c4JxgGWik1zKdqC5asdIk3oNld
dCD5e+vx2VgdRqPHoWuJhhg/pEz1BRFt4VzbCh5yxvH4AkZ+bU0AnHf2oVhVC6PLZmIm9vaETA8X
ZFhjSAUwB73e+3/+9k/Fq3O7q+dl0+Qw/XflArX57/+Tt6s//uSTnqF/L+jLC/iCFhAfALuADmQ3
xR3i8m6PdrDmEgCBV2TcVCYt98ytI+qMQRiv2sZM7wMuJB43CzT1xTLwCEiNWIx6sQKVIKuV6Jez
proCtr6hguVtRWfSvtk3OQ1V+ZHKU53yP10D2K/hhHH6jv3mcr9cAuIuZlU9XC7UVbSoOWBp6y3i
AHCz9htwlDdCavAyaJH9aJotc/pc6yWtm3yx3z4dum/Qgchu7D/6bN/WL1b7Rt0l4dhudlVrDUmL
tgjQZJlzAfoUfFlia4pBsP5EKDpYdyI1xQDvjJBkCmkuwY52OLIesA5FuLnnb1++ef3m2ZtfvJ49
f/v582/evPz6KwDij3u9LgdVwCDgIHZXjdzR852A/NigE9Sq/FCupqexaSAcP9VqMQNRgraL1LEv
EVNCI0E8HaH5tHUcffE8S9HR7Ka7gnybZr/93veqhUnhld9+47+W0vjH/2Ab4gf/I5Cr7aJCEdY9
eXeX+frdAj9pjyKs+Or5m18++9LVy8tNs9+VwwEsKOzAQVD89Zsvvv7Fm0Rx3qaJ4s9fvUoXh608
UCrebYUsZ820xmdf4dMEqTX7xSK/p6mH1yG3Av/6tyhSm5b9vsqR9YtYHWLdoUO2cYBZAVJpXz2u
Ktvq6Aa0hQTpIIERufGkNnTnceR1nF2ivQsZ/rtSqpF1sSUPoJdfwyEHtJaFr2vSG7hlA+FbSO40
sw8xJTwba4QYhfWRTk8z+xDXfzrWGOIZ0APaLY2ZH6NnTgS2f3OpuDdHRex9WNqWfalgnrica3L8
rBZ2dP+dPbTpr123LihNhhJ3LjxxM29Y1OEjSw/wHlTRgK7KhujLKQYny3qLV1L4bTQ6OAvEuI+Y
BBYfpuwT4nrwnpQ47gLbmqm/JHPRqAZjjcwFT1Vpgqx00I50vivwgElMyUMKgljqsHFcHWoCV3eh
FgeRPp+v6qYMb395ZKlPZgXCb4Bf/Oos8e6p946h6gasSMhNUbUw2AqmzWQEX5S7KdTCJyBc+s6o
Ai4F9iHQXGaCABZcfmjIHGpTA5WbLR1tH+jkVy9fvH75s6+effn8i6EuO0qtt+G9mJz/6s3zVz+H
yn697HF29vQvjjCajppz8PFbPHBt7rXhSAVzjGwB5mD1Z9np7Z8vQ+2JasK6+FL1Sa97E2viNdhd
Do6gLNzADNk2Ul3hrxwvlQN0S2Kua+AAiuKNg9AHtNA09GHmc4pJ9xCu7Z2P7qhwh0E4XnccOIIf
laEzll28AzZFOMtXZbNftUO3EGNZgbEMzYgKY+lQn8Cq4cCTBZFfMVDiOhG6fusS3NbQc8ZflKvY
ScY0jWyFJq14FpOKaL4qQbrfb5E1QUPPbs7Eh4xhrAUkXmiZBPf8ESALAr94G8f98Au5bUQP/keL
Nfzgf9RMRtCxYh967//Tt/9aRxmyqqv3/9nb//sPP/kkFuNAyuv9FRuBPTOFWcU31fZh8rfJ/UK9
3oxOmXpNihjGeGQgcT0BVtYDEi1EyZNT7uh2JZnobklbwNWrBl18tnKBPoMP66IFQr9dFRuyAhiq
Z6cUfYP6RC7c8J2+LcRo9xV+Qy0bMnTl+rJcLGAo1sIHx10282KLCrjr+gb4mp2o3I0Grr3elaUz
Fmgm2beb347hn++Jwn27+Yc8y95YV9L2pqZWcYabhZijzGtsd0OKPj1GRDRo3Ar+ZFZsLHO8glZ1
Wd4W6+0Khj7MP1QNYO/nMDOUhvmXteuBY1/GhZe7Ga5M5Vq5QWdZfKf6IE64QYYZNclsyoHaZTEh
K9FyAuvx9mHfLVS8LarlMjB03hU3M6Pc0wuHivXBYBTF1Hkgi+AWhtSjpDZlzAGIM7z/oee5RNie
zk8vLuz0Vqx/Mp/OJhcetVlxnJEGNYTDwW8HfL3gvfw+9fIfBqNJImaNMWQ5dCrY0D54dzD4FmaO
sQ/ErovIkzU+mojSE3qev8M5nqrfc7oQNK8ig7N4qmieFMw2NrmSlnF88UmJcB4g6LNscAQjQsVv
aAsNwlu/+TsLLDjqed6jUVxKxhL7oZmvppnT8OTGFs1HADFCeQDjfkQdUu3RyRlyVSb2BEJsFBuW
hMgw6bIGCQsmphzHX7LTiD/xFM6pBMKAgGAGegDBuob0D4PDIALYONBEMDGtuhJTsyTCbjhPLFlO
e+r6h8Uwec50H8aPggBryep5slKooDrCuJwO03VzJZftWAv2WNKw/IDddNrp+JDQp/rtf3p+uUPT
NoAEHquz2UX2kCy9Hp7eLj7rZw+HSVlTxuq8OcdZtbAzOESTlkrWfEFOHaGLxxGMuFXbL7khvNMS
l/6EHO388w8L0MmuYsdzvgXwQp4s8zjGhx80L+Us0GEklew16fXVTQ/T7vI8cuVon6fspB3K0SVV
/vyrr59/9ebAIiTH9oCvXfFutoC1yer5fL/LFns00LS82K5ka8sx8wZhmBdzhzbTXt9oUQ3sQv/T
3X7zWT/vJRf7INKr3p1z0licnGbLololFq/j3NFbaU5OUohj1ncnQ9UIMoowyc/6Ke9mq0bqaFzU
ykPT0xhoXcoM57Ns+GNUHHAzEcVir0Dy9+sbzlrYJgcNtA3AAvgnXCIabPEBgENBMSkI5N2P+z0H
F3YWNnLAzO4EbM1GEJUm8U4o6Dg9qylaF2X/C/FFRCmArWsRpPq86f8aBtYfjQ6PAyO+3DMONRv1
1kcZGmvv/X/+9l+B2DPb3uFNes7H0Kq+ev9fvP3LP/rkEzSdgF8gOM6vAQ1O7I0hXbzDyjbkpiFM
mEISvoaDSVabHrn/IupHt2Qkr3DXs2KxqElIGHIIDoECmcy4uBxArtiIps+YCWIf0Htojq0N+3YC
0Pf+qkKxiEQA0elS3dx1NTg5sTUwXoj5ecK/C5Ispv2mrXdl3937rAGZPhS7aR+vPvpjE/iE731s
KXRP4iKEZgaGqJQgEArUoKu8P/JgMa83y+oK7zD4aTRRvC5ODs3M8EPO83DrJhIBLDCFkECtEBMq
UxM931doNYV2NiAI3S7QZtDcIbpyRgFlbAO5PwAR1SanAc3YQZUlh8UlRZRtByrcDDAaolhhZ0Y9
ry31cCIzPTnWOXxZX9lupf1RWI1XeQ0i0lWJWpMrmE1pRqsa9eELIuP9EDY3kHbmM40qzlpTg037
gsirXIYeacsW5Sqe/OH57Td2huHMyEekms8Q3YYYB1NmNL9Gnc80w1c5mujRCxnGlbjtndNLZITI
N7rn+/SZb/zGfiS7YhfvBVGcbTE3UsUTp4Bq2RbJKFe1Zo4RV2SadG0Uj8dUQF6aiWXMJ4lws4kh
Hz4T/Sbzg2GE/LJPgu5t1xT2xkKEPYMJGPjsgQJ+Y8QBgOVgeD5IdeZLP94AfB92B3YDX08EEQGE
Ghj5Sj/cgPfp/cINGij3+LOlV0HIZ0s0zBNRLqAPFOPCpxc9zyhhBjVmaDl3J+PAfS0eomULG2RO
ZoSrenOF8oC+uHAiCJkuz4Z9kBQeNmyhouqaFkdsjTHVQx6l3dakN1bRkE4hVPImepe+2dMo0ZOd
OE653rfzem1sMvjou3fKZiaKqJmaA0S/ajFImJ/bWkmLA9UmtxVuaJpEuFb3rJKKIMqEeochdeAv
tMC9eBP31eAyDOLAfwTcH2pO+3SOyRf02igXKWsTTbXNFYIQ4Ou6fpeb0VA7M3xmHbWAcSrDCf3X
d6hT8mw4+MM0G9wGe9qAQEQcGbEDjKdjsa287W5lMPDrHICBqqUiQelaKEscqHX/iOVr867CYKwf
09D504tRRDXsNjBofB8ezevVCsjYvTjENsydkDoMEbXy/Rf92HniSIB1n0j2BkADs3MIrw8OoU8h
oSZCfILBxHdDPwToJpQIid/2mmgeECd7166ok5QC8mS/RhQqJmhcEIQA4cojitZNzaSx/vxmMWGI
+DEZRofJGpO0wY8G5Gpuhj8a9d7/l2//UIlXeP/2/g/e/oc5S1ZzDDlc4Y3EWgXAJNJKgar4om6S
4YE7NsLWOBOiCJCvtzldGzinhNgEkbseZzIGZ5lI1/dj7N9lsBAzcNosKDbZJBicQ+LnwGCQuXCT
yePPq1uQ8LqCNlJzvwCk/aJy/pn4TNWiZjj7hpVF9yDq7pw1Og1qBks3wzML024U8/ISdcH7NmGL
NjTtzEjtMpuNcihF7DGg6gO6aZyTDORi1ZBNMJ3EPbQAmH39N3SXT89vnr9+8/rFs5dfPv+Cgg3R
y5dfvXn+6tUvvnlDL5+ql2jG8OrV16/I5pBe/+L1s589N+9+0ushlzbb8Tlh47v0/+68OPnNs5P/
bXbx7c2jfxvIhR0yssjG8BUwBQ1yObJOCfMlrzT4C6Crd3eI57gn6aKq+FBXC5q8BOKpN0rERS+3
aR+1Nkq+PR/kj1BM/vyXr/HPbFHs5g0+/RYerr/Hp0d5eXU1uDDOYf7IeAFoBNIVGXH37bAwlES1
pJsxyT/iQtE0Zm8Qk+Q1wIPFdRPHMzvg/qNHTwh0j/L2ttV1bPRDV2JL/nzw+xGd8fhbnAxHh3UR
7KC/IgUEYC1b1C+QVvA+SmohZloNccvaB0RKugoNlQ+zFkQHNTH2igsJPGBzO+3bRvpRAVZJsB0W
uQ62wGfD+Pj6lQg1aqj4dCNg5/3RuGvQJ+viFovCYK1GZLNfx90GehRer2pDqTFwxNKOmt/poaFz
MHaJPrxf03D3yLrhFS85JOTdkKbA4/MuAKd7RRNz3oIUc4UakABMNwV5PTbZJYZIK+/rv3+CDoAW
WojJBh4NOl8yPGCzGFVx15DQrCYzWgw+Ooh6focVv6PVLQv2NdjRfoIdTzuppKtr8UHAw6eaV23Y
Em4wUkodRHuhyYj27szo1rRJGbRKYWyXFyfy5uB68KzZooWrEXpy4K0B2k4BEWsp/Nk6wFlvENs7
pGnH9QaQQ7ApLSvawu6u9uyDCMeXhKvZwilUXJXBsutVP6muNoz7pl+W1TUyMNsSjIHrMYMitw4O
1tmQDA1OCr7rH5n+H/A9BDlFsYdaRSQfQb5ZngCgToAUj62S6S77HJ5ouzdli+xGJWY0D8Swwcbf
8Twikou8WfKBPDDY7F51qkotDKhQatEtvgtlHqDCewXo+qFkqws8ODBWOFpP3Yu5i/Jyf8WzV931
PaU2lbkytNzbIN04jjFX0YDKTt68CKfeNWVv5hTBFWvXuwJw0Z3jzLLgouKIgTzRlDWrgLTDC55i
ds0Umpm3w5fQ7BT/GcPa877cTe3TOHuBigj8Z5y9ZoBM5a/nikFtSdNT+XuULluYs0A/JK5mAORL
VJLc9azcrLXd9oCbhOpSKSDnCfFpPdYZFduZLKxV6C7qSuVKe/0OTuuWYk45NhB5cED8tbHNMbgx
NUAxk9JffYs2YSR5Jhv+UFoTUe+uNuGtRrNa1AqKgWuhavEsVVfrL2R8bBwuP6aNXtVUs0/9qO8A
NAvBqK69c6XF/UUDRDFxMyx3gVFA39xzOTHaNbyWRGEhfbdoPQHElLtPHDbKtd9u8NqfrhoDA5LO
ZXIs+kdGSv84+/PE0ryT9lloxg6MQfpU/h47ByWS9A5ZUPygIW/qFtjzmTXoM4Mc+xvwo8ZqJKU4
tjIb5AM4VKCL12RG/dz3XenGBpS4UU6eZPNij54or7dwiNYwBNfQj77d9EeHow+bKbDur0v50z1R
JTz2kpbVbjXIDhTDc90yawZnH0fsIsV4ucHgmnfzlYrfbrqlfc3mxfDvMIgcZDb0Z7CjJ0dSCvEG
6JqooR3jAwb10xgo3QM76yJ/+sLunjtAdv6aNdfAcdQ35qbL5rwJB+OfVWuOD4dYE14MUv3kOULF
5dDTVR19dHEIhAeIDpm2hsOTo48x91X9RixKPBgb6ZLQUOm2MZOSNSBNrKdjGdN0P6XsZi0Tbp64
Ti/Wraq2fZgZmG/LHZoHmPJDv5VEdy7Mi2nCP+WVPBGFJfQTTGCEkvKW0gMOK8/njZioqtEOxFY1
IkEVVOl3ZUn6L31ykSEPs9uwpSkZyoZCLpO2zdWV1qiNxm+hRVtvinVgEiasymV7RHpGhgpOqzmv
tL//PaZZKvJLz17eVmO6JMazFtNRUcTBodeHl/KPwSlBrRxw3d7GC+duvJoZpKfL6mlFnKhpZ2oe
fP8hQ+fIwAlTHKayOJlS9qQuF8O4pkcVGFm05pqELouprN712VeSKtprpVOkUXJFfNM4Iw20CjSi
C32jUDlGZp5Z0RA/TbfJlryfsFjiGgzrvNpj0I7W9QadGbFTWrZ2Cq64uvVQLZs4Uee+JvVWhcx3
TfhB/Fi3vvGas77Zr/8aEOCbXX17d+99sonz60HbuTPbyMmJy8bwrrl3tN85nyDh1R/WoqA+sKml
xFCNwuHmberOkBadwnsPOb6XNBG6I81QhJrNzk1U+tuQlN32XEz5ouWUVO3dUE2BYh5jquPAN+gB
fNmCyEiR1U8oxZVw5p69QeBxby4M2IrAW2PTN3ZmbQW+At4R2OOberdohlqf373IKrSbBYSK8ed/
EG9ssZTgXwEM18XuXbkj/3drTjGhbf398RliD2bBNV0ECWGtcJBOfKvH35kGJzZ41h1z7fydAJj7
15mT4jmNOapFCGBvCgAqKhT4lx0AD1NXF/jEJq9doNxcYmnc/2agpBnoe/5r6BwaO7A1Zbmh/SM5
z8wwR71uQKbtn7GlfL9dFBKzIoRd5O0n3qrl5uNT5drZjFRd6KjpqogUn/N8qa7YVL0r79Wnemc5
+6qHzWdkq2J3zThzfgNfScRuPw086ZG4ADZj1TzEQaISiFPfoS5JKRfbXSlShqtgIhyVDcdeI2dt
gPKYmmGfMGuZSSaGOdl8dhECpoy0TiZqBtFQ+WEYzyB4xoMJ8Ff7TfV+X7IZFZrPVhsOqzMHImX0
24IyMVExxldIB3WzqtLcTppoSgdREnoUtXEn0KQjKUiAkTqtMs5Gj/iqjy/VpNE9Ectqoh5KqsPA
WdJqqcyTa1zeeK2Tup/EYT7G2QHJ9cMpiAgaJjXpUIKV+Xfs/hltDxTqeZwN+Iu9yddDMHv0iSHl
QX+oeaeVDeOVmO0x9c8htdUMwqCOPKObyCtED2B1doUjV8Cy4/nIhrp85oduqlB+JuVnVEaMhdqh
7oljsnGgtJr09V4St1vY2yWGxqIirNHFtQFEbHci9AV0G5H0KefdcfU51ovr1o0RN5gZ0V+aI9tl
iETmJJVEW5atAfm5Iq06FsQj/1bCzJlgIILa+LnRqeP0qWUYbkABLEet+LwT3y7giNyg1ageyLD6
vEy+UCwXgF/JHrAfy0WeoREC21OIaQy1dHmXNXg758gXi8tUgrMdSehOxWH1f66THH1O5DMu9LkK
+PZStFSJYuaTlHxhpM24pPlkSlbJseFr0yvLYVGP8No7gJEh3QNSrgkEaU54vmrUzlVFPCs2KPSj
aZphjMwd0QR6VmwrvCgd9p/mp3hdiFutXjJhfUgCuONU4XPEI/Ud70r5qFhwWhfvShI78zwfEX5S
FqGMZ5lqRp1udDjhQepPzyTMWTUfcVCjAemOvMzC3IC0eWezcadnE14jedAeYA1DI73oHLuNtbld
0AVSg/r1YBubXAEUH40D8lJQRBekkuzDKO0TN2Avk3zR3UsLGafWmEuYzp02NLZZg0IU6psvKbMs
VS/JJB9ql8lJ1GrCX083QsZvtPju9fnkqQolTutYab0F4LEAlOY+9RZhKn8PrrOKS05Gt1PLwVWL
Iyu64U7doyGmeKV4XQurCIdUuTPhmJuOw4D7Tp0GRTaZnJiwugtJSAB18AKA8v02IaeIR+sOw/gd
zvrrCVXcfyhOdaXXUCci1UNh2UjeTAsWw2SE41u1laVceidrqUH6eJz1J5M++jwbtlHTheuiue6k
C/hxqNZYbWZgGvZRlBCKKm+LtGWxIwX2wVKzdbmu8eTDo1k2Be4PyfApR8lIXzSYz7gPZuXtDCdn
3rm9zCY10YZz1VMbWSp1Smja/XdXkkQ5lErnpxdj08D5mXrW9r8qVrk/1cMxBIOyh5Cz4Wh5rM5O
3GsedF9O5UsOqJAFq3/5GeB4F8Sl+uhAr01yzmjtHMu/u1JhkvMNSpADJcXiuU1sYCAloY0IWWJg
x3HEcqQaxMnv6praCAVNj2wYryUdAZG5HGw+SP3DWusunKOmjGeMr1NWzdKRF+iWuCZ60u08xy3D
IuBnBz8gf6K6ENjzjwCQiztYDk6mjjUwkScVExERAYgnNUuctuZ333Gp774zQaILQ5LJf1WMi3fv
8keZOaHT+Z09C1usYS184fmLcl5TWqReOskAD2LsbQNULfBQgj0o85r6LUsbgXsABT+O+/FqjlLK
fq2OEgzohszPqdn+KC1BnnOvudHD8k/vDslf34DuMPPVRitKEHeytGT2A8g6MiQLni3qstEpXrkd
Yd+Klnr094mKA2bmgTLXsINAY/HOjXIQNV7SJX8aSWJEoahPQ6/02DYz6g42jMGbPIJE0t87T6j3
IS5ZKwu6hRPKFEj2kufXph3lXc56KUerlXDvpPpDtIeuzOS+TPIIW+fK0NJCN240lERxkkqFiNL4
DfgQIjuvDgbk/Jb1XGG+YzvOCw+99R1HB2907M2Ipopk6wAsitk3y2oTnS7s1BrcwrroVxJVQem9
TKMLRSUpnhXzwEQmyW7wUCNoivlB+V0KHsE5RvAZ65hRxvhS1BvIvo3SJNZTgMyoKBk45B4sltWG
g5OO/DUg5DRy3qpJEBgiF+hSr5Sb2bDa4L0c0T00/cd21Sk5J78XivIuGgpRkIpISDoMff7ud6Lf
TJy35qNxEddSIX8Kh+43KU/hcWuOVf6qWNztbr8prbeHcyQiq6hOxphEdmGYgKr51VByvVuVoV55
aXa5Wz/RuIl1Se9e+6TlOn/BVb4E4XG/TQWJt9uaG6CLmJxDJ+CoFeFpL5dwztIx6vkOmysR3yAC
w8cQnMJYozjVPjpq9Q9IyCxHBcBOGp+pUZEfAkcc0PWADmOYgzs60oBMxkGz7lbkwtgv9m3dT5hS
cYEsGLWXxtnATwK2ohsL0Ec6Aii0JeJnW5vbiOHIOFTBeH7aiwaTVD2k4dxeuvFTDvBDE+ACx8Rg
8afc68AWme0QKSUFKEfEuFd9AKO4ocv8ZpqYkPt6f0OETPTv2CLC1DzY+2K392BG4V40l1X2Wmn4
lbsQRjLn7pvMvmqsbk8unDC0YL2/us6cOZFVVlGuhX1D93tkIY7nwB6jchfqUsvqf73BMDNpTSFD
tchGnGQiU/yxyQiDtkSigBSlkFI18z0+D7hTzMIWjKRlp0v8QiOm6EbkGkXGshysyNdypo8pZqC/
qlulMBdOugD8hp3sXd/qJewkwTyHLciVdD7ogUhVn33tpKKEo3pFIpZqTsFCFQFNp5cjZ8w8Cknm
aaHCkyI4I2QHjwKly4FlNI6vJkG18CR0gQIMnsQGIX82YzNmsdgHkG+CoFU9g5m9IRuMs1WxvlwU
E3fNnNsGtQ752KM00rOYq7so84ih924x7Ds/E6Muap/z+Z6tq6ZJuxrjlK3qqsoxBfU62fi9iN0S
dRZ4jo5SdtF5R1tM54YjZ9vkqJh9ut+86aOvvO2tapAsnkEGB+Pt7S1zvEQGOFyBCUGzvftpGHaE
K+YovofBR+6zs9iVK9cA/GhrueLtWsBdKia+jAO+9RKvSdSFjblCD5a6AX5sO876T7Twjgmdhgr8
wk67sIbauMAFKol8FpKGZfcrjFdSQVUXYOgDFrVdHopxqHOYDsWoeNKPQGuDaqykyQMwsHq8FQ9a
UBIdiRVgug1BDH22FsV8187BuvjU4sJ4lxge0gW2Vs0zcYv8YO9w6PYIDpaStRbYEchV0MnKReuV
6MBeej4bNDfVYhAkV9mfOivW35+BCSMXu0f9DlhlIoY0xp2eNJqeytAvYQLvX6NFDZlySRpJnzjL
y0kyhrfXo1F8DhNN+qc7WZEv60PSvyFeDDrD08TXSuZiKmzr8GWQqfUx10HqTk8igdt5BKlpAcGA
G5DtRQbf7/eAQFmDViEeVlqBlc7qwCRUrDVRAyCm/MP+TDVNlfrj7LffHxH+04XYwlrn9ibSRGM+
yszQa8iTXQ2Jd6Mbqi7iCFnxKGJD2wDoQ4MSthZe3SDbZV88vYjcr/RqU/CnYOG13Zz5Zu3bfk5p
NkMGnThPykKO+8PopjIxURTH58J9ILWZR+KMQ19MOIUEKnvyyBssYABdmrCsctWcs6bH43lBLQcm
qOUAUFaCU/YOx95Sfaux58my5jzySeJhmU/oZUDppr6OSW+Sjph8jiADS82NoM07bFkgUEGkUsYR
5Wzl59QgLLe2/FFMN86kgCybs4qnnzR9UpDHDuv6dlniZ9iwFNq2PhFiI2jA+GFFzFrY0bJ5aq2j
lD3X/ffUWpz0vXCMb2cyR8R9jhNQxPea8Or8XqJzyWqG0SVtOLKbouGQEqnkGB5CROHNndOvRsYr
SgmxJ1dADjIRsaemooTsCPr5bGoKdES29RARwwlIjByyUXu4sNEpyGg3udWiTkfKRdqPXUU4lQa+
Wx4UpCuKsyYLQlx5hEXGZYO7Fys6cdxQKmnfgM+IMEcf6xrJz7nukQb03fXF8kP7lZiDSK39fbYg
oUOaXJm7pH9X5YZYTyZObnBkGRm7fBxID0a6G2OqEvbLybdMZ/6o0058eGJgmBwsP9QEedzroN5q
rOzm1UyD3pLJfjq6t1miUkeBM65fa318N7TV3D8uP6JegES2QqT4w37Qb59RhHseJWrwGcGhxX0i
Y06HljI0+9YJ8R6KLhGDIru2STRhMMX8Vd9Jjtp1+MKb9gTDKEzz7gpRK8V86UEYCYF+HCyMDmIg
snDJQJ22pTPSD9E1TLAIVQKj5OxwsRTvXZYTvSzmdLMrE6gKOUOxB0oFTlJ/OSqYbsMkIUECP9zU
rKR4uKOl2NyhHu/hbkT03bSYJ7MguPEYqPc5Vwv2SCELJA4pNUOJ3OMGHrx9+3bCvKRntelO8ij8
wvAR9xrlazcbL0m/YSUksPRh53gsl4qWGEYFdu1Nuo9BlvXFD5HemDEOyWssSdQDWtOhml2yF0Tb
JA49eBuRGFztyaTP8XHXxXZIEZp4nyQ2iqE3fMnEhv8cKqhP1KarSjfB6RYcvQt9fZCM0qC9q8rV
QrkWqrNXJKpJImPBTSnZmCifN0pP70pRMPAZ2NTpWsYzjLwbVnd0NWsvbCgNthHf0iyV3YZWfUEb
IgoYMlKOF/eRC5UpNTyBtF8zyoX30FETTplESAzDfBpFmZQMbwCA6Vk6/Q3Ch1oYZ/1qwwm0Eece
7tCMHKcbX+4Yho16oJRVmAt6ysr/7HaS3Uq/FEX4bHSUUTEs0dSojkikGWeXS75UhPUFghwwP4dx
EHseBvzhIUTszMWjoCiz6XX3TmI9GToPO0bDgmcqlaQZUYQfVDnJP1eaAfQ4ZF/9rVNjKJ48AZ5K
HHPybgf0qfaL1vxctzlVbLOYOn5nFkpT724i9FYU9PghIHFe8n7sj9/TlIMgzrJV9KEeCPQR8DW2
bQibsO9uyPr6hqPxIQV9G9SrcwkOhraYwQEBQPkAe4ZC28ny3Kqxg7AQZrcxcf/rhvUTxSWJTxTm
/gKN3e/4gwdTzlwDIpRJQZeHUvoDcf9Hrqi95shzRTswhpmsKOZ2UPUf1EVaQG5aXHVVvSszE7CP
8yHihTqaGUPPQV3YXXMKZ5iDbLIQjRpXKubtnuxtqekK6ftNvXsnBnyhlngPeIMjbvbbbS1ub2hD
VtMhuEPdRegvGTQhc+WQgNFdJgd9ZYrFPvwHPMONyHo/Q7AY4zJzJEnUIwaQsLcW4e1cV+Tee4eR
tmFBJ4DF/RmqlgubDSMEEE3g/OkFXo/hJL75m5/Nvnj56vnnb75+9bdxayEiw27CqQ5h2qOLIwZs
6kP5i3CjwTstvxpeIMU2KBt5q4ngAydr93ilxdogojC8LUrCIgyU1LSBzwyznpJKbXSPzRdvjxTv
KlqlgDwEnIxIjdKXbGzkekeJq04j+NlLyf4TYG35ZjJij/RY3V0ekjuT8W7FcALaE2h/5dxURHl0
jFlWChRE/zivFcGbjgbamZmSvI4xz9KtdFbtkr+w8uMshj3m+Jj6RMQq5+CzQ7+Awxnbm4yYsdHC
iKvWj6t8lPKD0wU5bys7nM5WMTLvlCImUMHRPQPMWKmEkUo+y2C0UB1YYx77RysEqO9pdprSc5sr
pGDgmDo0OmvZeU5r5Y9eBkFie+NEgrDw/ElxO3I7V6YQjUlaqiUIbbaB9+FGduEKOuMlS91mCQOd
khHPTf+TA4b9HG5C8PulyzIUnnLpiSaGY1PWoGyfUl621Saw7Yp9Ub1Rudu7AMhJFVXc7z0Kjeui
ETv9RTKXkCec3KfxgL7ERH/Ky9adAVHDTGtHFEae316MHSKMRp1t+XPwDIt9Q9tiztzBFw2bcnCo
ZXJP6Q9HfbYhodtRa6fZhQ66Twp8R1nCDHg4NSp9cADBLtIAsZOE/Y/IMDwNvdR/n6AjG8qPVFw6
DzvTtb5T2RjNfVcwJKGOpmRfSvV+2K6MJ4CtWvtBjsAW7wSWjL3YKZ3i+v9Hu3HJIWmO1TD6WgO3
DtzCqHv3pfRnH4MWvd773ts/0vnl8Z/3/+LtH7yzqeVdyg5MMk+WWADTbekyZ3z+9eyXz149e/Wz
12N5/pvnf/urr1998brXE7uBmaR3oSwuPfx3VV36CemRJeQGh5ReZSBpVqDgIJ2UE7mUz6bZj1VS
D2cdKWOzSedFsTdDW8TurJrbu6dHNmFtxVOBjTAeJ4qUxY6uZL7hcOwU4YDLNpazTthP7Iqbub/u
jljxtTQXoPj2NnlrP/JEvJGUOyaQLpro80vTwgGfY7asMolhp6a5XPWYKr9rWs7VG1ZxH07C/N8q
8JGq4vVwhM0Vc1FvQE4Tc3EWeBXQJ6LFNOAbBfwbvHdD0NYt5Xu7NjUaB3bYoVH9KRfBH7oJICX3
NGFUcraJLjM2e4GeFvQKlnTEzXMLMk0reRYkozBBBGiaMYRUyga2eSQhB+tRzNGN01SQVEge05gB
zpcOt5FZiYGJRpmRcplZF3eXpZzdlNKAfAREFI2ErrTExfwAt4RtXBcYX4J1NCjM3pTVboEZ6qp5
qBGSsFxwOCwKLAP06BwxlMzNxBuSni/IW+in/r2mtezz5xeJS12L6FJqH15K2bqveemM765kBsJG
zKqyGLmUjEHqbIzcrA21M/QtyOyNjY6zmUvFjfoaNVSccsRQYCUVRuJ3nZjatKjzkjJIdlOzemB3
oSsoTRWtCUXmA8UsjzcKNT1taILSu2xdTIxAvHXXtFirYiPbmJs2ub9JTC9SNA2gj4FJxI08MOf3
lh7IyZLakqWG0hVl7QU09poSt1yVF4QcTcUJpgOO19AKIxX6lsz3vCIAg1buDRvlp3Oj9oBWEs3h
zGuZkOLWMG98h+849D5Xe2zrLVfFVZP9meIuPq6G5UFC3pcLwxBEvjYtXVhDdYx6Fh3qv/LP9MKc
6hwc+bpekZfrcsYOayQzLGdXq/oSfkmWBXF5LW9xFSjlgYmbUKIxGIa0ORiWkLoKeXJ73NLXHAYQ
H7N8OJvRqLJ6fKqgzEG1SS9SxyWVCO0tPbaDWCTTEHMdHUSR/JHXxtE4NF41H/mgsDGpYYsHvlt2
ELmjsmltdpoM9PujDi9zr0nkp/wRy1oo8oHr6kXHevQIEC90nzarnw2w0MCcTQxab8BIHZoBXX+z
drFYUexAeaRlsjSh6SUm21DTkuNbiIj0r++9vVPD4YOHHwCQ7Z26zLPvJZQAzTTyF0WISJQuDyvH
tr6G3205nx0BQCj2Q4D3vx4C3j8eKJTUwxM8DI8s8Hc0fKQmUNFJNGiKJUAD+LzNiSTS+wB9wBY4
IQMeE46EBDOk8gM5b9NemXajVHWOLdM4ZATawpMyVnUNUFrRJ98POGONDyqdtU0md0gcW5jNp3Q6
rGMPWA5UUEaHqjlIw8Pn2EN1R4a+nSZzjrAYKEDnkTvjXfL2C9oNDFE0Sp7Dq9DH4V6PDbKIYV6A
2XGMVzEH7OGJ7sl+6Jvmbl7HKqWWwqTwEfrGCNHPKRFpJCBbxytKVMohLpzgbc9A9oE1Huj2qhgP
UJWWpHdAkKb2o5DT5gNzKfTYcaiaz3l7qQ9WHTFNDdJo333pXXRRa9EGDcVzdywu9QnnLDfntRVF
EzIE8SlpU3xzpL2wZdyscS7MSnS1vCvR/bc85HRlgcFIR8c7H5BO1P+IUJU+wmSUBdhFrlTNK3Nq
7uTx2Q/jJ+6RQCQsiOM6vFgqXGcaT95yBxHQjuIZjpf2SciXsFxasO9y0w5A6FFfJhgHXexiPtAe
hDa0q23JuOKJQ56UBJINjHO5WxV3HHWNlFb6aLMZDRMTfuW+8vleVOSh7haI086YSAULIKfkKIGE
AhV4gVVfcVnvMUEKsI84fEnliryBWGYLLx7ylZb2JBlKhxjio57b8Y1yPHG3gQPgrWKUZx4EqKWx
Av9YAoxQTIDgalppSpTK+5YjoXbE9te6MgdbhmCJCvbNifAIpDbLoLmUfakixrdJ3DEFPJSzVCKt
K3iA2V//PQtYzO4rBaLITCdnGZx+ZJGkFAP21vJ2eIAyjbNTX8hXwxgDwW5J8xO63yo2xKKf23p5
IEzflBS6m0O3N+QCuLFN49aVrO5owIHvMQRIo+tz8HirdyZMzhCWrVaQ+RpsI+2wakdTm12xuSpn
hUr+8zFUrDJanYOClGfMhZkXpjgxSaHhfUQ/CgOKTrscbiFJvgLEpywPh8x73DBMt+RvR47HQbB4
dFVTVnMhph7QmUsn42w2BsZmwVFK4wXQdH8sYD0y0G56VlP5GwUGfX23aYtUpiUenT7IHyt1QdKc
/hgQs6uuJPKQQaUPwnMC8wTGIY7nFhn1ccIvPQHjulosykPBQImlr5beKS46mgIPBuCvgR2xzCa0
V85mYbjVevVBlObOrNHZLTatKBhtRJV9k1a4HvRB8w/V80E0qsGRHmldAkLUk4iWh7s6SlDwDE51
3pc2weVF41tu9BHpm/+5mLbkHJInBkDVBz/96U8HoVFKSCs8nXw0DPGdSZ3Vq/CwPi7h5v2hdo2/
Dow+YtT6Elb/4Q64Xzwt0PUmszlDl5uA8TVxJEigd6uw6RYLQviIMtSCScDoZXMKuDkpHrJz2Fx/
FMl/Q5TWleBnP9iL0HIzL7bNfkWe8BtMDb4sd9l1dXWNFznlBxWIBe0+moZ3pWkGCWtVBmE4notw
50sVXWJiexkFSjKZG/h05WiEklS7DWYQhY4yt7Tt5TgbgKiF9huhRTKJbxiWMMGc2WiG9xus8v0/
LSoL2thisiQ2RyEOcxlRgJRNG3rmL2ECo0Oh61OVoIryd9q3yipfnFcZd+WH4sjkjY6H1M19FQoB
bOhIunRHVDQ3PsQ42oI+Eb+8QyT/wKnDN5i8fX1ZbTj0Q81Slxkrq/I17whHi0+P8KMcMnjPShwE
pS6qTy7LE8tSK9MCzCzBIQDLhT8yGjVlhWkQgq2zy8VOzNySEACeJo/cUVj6aVnXVzQo5Qx35br+
wOwrDHm/oXOs5LvdSwyiz2ZVZeEHQaJ7LbwjWkmyGs9n6omd3iitO9VWLmFGVWLNbhVpir6bmCRd
LOBwaGKoUCBjCiNkarEt6yi6JMP/dAguXVsbXJiWyCRl1daDURYEGhbTm9Z2KkGXVOOjjv4Fy1TX
t1b/NBUc7KiqpSKvflrqwfbUzwMRjx39vnV6o+StyPGJ7CQoNIUwsu27OEauMlbrMCkjfh1t8YfN
qoLfp6NwEir0tM13goGfo8F7QZ0lfJpQvo3yXcM1vc0t3zn6GIKE22UO5ygGjWfPadp44Y5fbsJU
r7j7kP11elKjcR7rrtCmLsiPzl2POWgExyPVfLEEG5bg9FLgkpJklD5+iWpJLRTHVKUWACgROTUp
hIl+8TyJjvnNcDo2vgdFZQuD1WsLU985Rn+EmpoP5ejQtYTDVllHwymNfCF/viuaa0LlQ8kEQG4h
5QcPQBFtVhnT4qzKwoFLQOUEdaLPLjZfJzWsyDgQxcLhCTBzJyaN34h+hR6aRmONJc6ri5Tuh9W7
Dnad+9tXeavtfH5ydqFVcnRxVG/Qnv32ANAIpbCMORWIAD3xll3yt9k2vbHVu+oKz18My7tEvQ5w
oLsKfo9NXHqYoKvLlxI7hbNemGgJkfXb731wj911g04l7E1KrIMWnrEG3XUjN1aWC/a9IlctNgUI
qrJVEa0qRtgsYCZXAIw1p2CmzwsTfZ6sjkSBs62ChniTmMRJjW8+SFhIFs+YzuUJ3X5l5fs9cK3t
XS/0y6eBN2HyZ6diYbyJtOzVYhh9QfMYgaOcUn5vpJ8CKcG4FCEY3bIVeO0BffqMpAk1psOXEaU/
v4hUnIlwN0t/BnGIhBqjsi5jMwaNHGRyhyVhjVadZvHoMMtXnMtcbrJnBPVu/c1sZqdPk5RBzM6m
8PDx1Z5OzUhHo0Nm+exX68PZ3BbqRXWX7ildXs/MTxK5oUvuoHNGyF90jnuQnOvgp2jni6AcWOHR
BlHDtAxp49omcLq3+8JKkG6VMdqtOIp+qK4Ky1AHBNoQkBlJ/pjiAZUMg06hcb+1Igvrt0N5paIr
6rS+FD8cTLZDIYj9mwaam+KFoA0Ka/eMrhcBXKmIxMF1qUk9ZOoO1s3VINSAHhhDGuNUB41t/HB7
OBfzgyOTNcguDQdyWToYHQjG4S+RP/9JNgjuUyVyLgb43W/D7KXu0CbcC9wdgNHkatrPKappVsSv
zCSZa58dqI1HSxycmPu/lPpPD9SnQW4iIyx8rZVi+Nvm/TvQmuV2huZaKuZsU35vEpI4qjSUmXxM
gmGPAGQP4bS7BNZoyjfCkmHYzG/skh87+dOzIJqjPajZtbs7unw/ZGHiA4R0xnwP5rO8xAgPTIMD
0RqXjVEajyXgu4cp0FiYeYyv/nwkHrMNK5mQkvG1aiNMrcs6Aw+jTKhzm7zQaiCGTZ3YMFkYroel
FJMjqAQJAZgzzDGYZsPpdgSP2JSnhFuusdoY6hbFUFr2v6W2oq/4J9+FOlmksAL/6LqCaijCEhCO
BHlRXZ27hNqqRuSJja8VpkWh4kmddf8thY8ffjIUjAKVkHjCmPQEU+7Oz6yKuEBiT7jZZF8Y8mC2
id0g/v2ylj3cPst9mcr3tbbqswR7GVjNd/CTMtdXMAVUi39pwvfq1lEJLmP3ZT1lKKeSTNC2H3up
R3y2RCJOmII2kwTvaJvc4gD5wBNwRNZjwT40Gkn/QtdX2LmhTujZWFmg4XEgiMa5VCYZTujvaf3+
flP//YaSTKh6VCqZMmWCgnhplOnZkEW16JYIEbQlxAjUDsC/c+pchuyUu3bwM/oImRylR6aQHIu0
dKxzsQy4sUHyIg2Rw+cX8Y09fb6iqnF4S6C7TbtQmBySrgMRMYmUdX/mrXiguukTIxZY3VpcYTQS
eulvYx1barlGgvWCFbzl4jkzOkOdVMVlUPFSpCRxXv4m8qcozDcPsQvIus3pXC/NMIJAPem7wTRl
ITWEL79FZIBpBo9Y0w+8yh77VG0Uqy45FrQ3vv2mQur0H80YZTwyTuOcEK52JB8J0WF9g7V9EoMn
Y7disnmzxPQzVjJgAEC7DTnOcxtetLHX5Kq+mdlMiv3PuAa2rd4+73ZkuIciW4xkqnssETZhb2+c
jbb9EYZCZutKnyAKp2qTSJnHwDtCupeMrHGATxk8WkBc+lHK+bMxewkiK1MM54XLsia2D9omalld
YeYiXEcJMEHuOXQ7GdjrxP6c5qI7YYdI3A53d3I2+v3feSftE/wB4WbqslU+1Hk8gK5BqI11Gu60
n8B+ZCiMshMWKKwFwCjgqGgbe+ZikY2xMWXhHe9bl9n1H6Vtq9IXvs5Ui5dvUQqujNLmNWrI1lrf
GuUnrYUDu/4wxZIy0A+naywjzW5IxsRNRzll5g8eWFiuNh6rqCziu6L94XdtA88NCudupmPaH6Wj
dBLRhrE/w774aPOyq3prbcwykYzPSOM9PTmzuYpM9o77RMylIeKc8B5J5gIDiMkykwyUJ0UsnXn4
sEmeM6gKvJvQ8mQU2aTwZLx49qhtdV8+DcPWqG+POaCOMdeyQ9jC4TGD82hZYct9goI3/lh047nL
knoNPDbIT0XOXfcXxrL2kLXCkrTtG0Iyr52JaigdXrdrOGx2012aNoU6KR8TBPoHJjI6bsiuxuOz
ye80aJfprJv2yFEzBSpplkxi4hnu1JxbwfK7ofhxc4nNJp7HpjTjFqb8h3NUFysxco5IHLXp71Ff
yxM2+xPXYrgpU/vKzLcP//dIfqpT2CQ73ZWeXkbzWnBCqwPR9GFzyiVlbRO98NvNYOTlu7fjYYRS
TNZjGZwXVEljjfR9YIepDlIIbaNFEfQSVy96fAGk0qtFp4lyOBiLiJvOC2GE4XgC8Znyrryjt8iK
ExDkMkckTsr/gyFDMMHVX/bjujmGlR3GW9DEl8QyMQSMmleMOClNdzLYkEQPGsxmJpfKbDZIK7m9
FerrCtDRp+bXZ/1EFO9kjDjGW8pDrKyB5nT5jzf0lyVb9cAhdHkXWTe5FmacBtYaKoxdDl/SJUm4
lhxPVIBYRyuLqrnaV8T0E9X5UO7uJJjqfoeKkzwtPIP0KFFkgvM9UCh6veGy48kklUdwjP35qTHv
8VODd0ntDw5ZYJN945h9EccZZmHqupbzF/XhydkpYisFVhczSzvIjrkcWlx720F+W6b5b78lhTk1
39WqDaPR/Vm0JVu625U/AjEcdFmsp0aURQJ3s6so52oHr/Ulb35R9PqEwYqbM2emIRynz2Sl2G17
HmkWPbBc+kdnmuJFeuDkgMiXwPmyjJU1zil6v/yazE27u/TUCSfk8NLdj/N36SCgRiLmJpQP4pEZ
ia1EbcXrRMWE7+MkVH+y70WQxzgM0rXTmTacfMJrbeNbO07Fcz2xwqfhRgyDH7H2Oi83HdT07yEG
Jzo9Exwu+RBwCl5kMsjMG4kAgz6OF9cR99O2YBg4vlKVLKHm9g8oXb8fpORwcVApnRN5GDpFVXAA
VqsSjjYhvB2qeI2KqJCXsY2i9GyrRhqKFkSv2SRlweHqqlUXPkL6D+wYQi0xERRarrHFobFqeqwn
KxqpFKc8+R0Y3J/E3Gw0Nm1tL/IkD8XPwtWdR0nHdxcV1BH6mE0cwWnkJenrjNL89evDTiIpa12K
v7rF41iOZ25+1BkKebNN5YnqOj46UwgflxrYcKD2eqQzvXAiA68bpjaeSzjApXNMv9pvMBp4ypgD
6mC6sGq9XyubqgWswTWtBRqn9Ula5ATTLgt10hIwGJ6bSmD256ZEtpGqpKcCN5dH3rkCG2NXiM+L
N392n2IiGxvluav8wAZSLDmGPBI5r0aa9iHJi8gLXcVpLX6S2zB8BuFMZHtZOfbNNhfRewHmNABv
yijHAab/ox/9COiAW9CWAmBmwwZJuAgwf5Zt64ZilYz6UWuXwIS9S1EWZ4chUxi7nu2VkD2zQ3ZM
X+WkdhMW0rshAVqDh15WJr7LUlswuOPq3XvL5/U8dm06DyRyrihWWHVy3yWUiyCtLoF8pjERe/JT
E3rSayYvN3RrM9i3y5O/GMQK2qOunB5kL/72Jd8XUxiO1crYY3BQz5NbEKMoOFq5I1cle8cc57B5
oHUTTriqaib4VZ2/AZx4+bV2bb1R3xiQvyKunrOeVHVoAlS3UmzYxtHhoBWb79C5wR9vD/TQBfLF
cE8PT29daAhr5E/mqsboW5AgxhuNFt3p0DuxK7hsCvHf+x0XNVhvn4Nbqc50zH4aZpYWJV0yaru3
0/5JP7oIC1Mtx9X01YZaQTGVujk4264lNxoTvycb6saMKjhRbzBW/5AyiccMMHwlqdYkiNar6+hb
YmVZDqSYvv1Z1n0dach5kjymoOCOOvWrF4RFcPTdPh+6hTxyJdjA3AyAA3aSHfiiQrNXjllyp+NX
4VEZx9C53yhezzRmRJjndoIaiXMJoypZu34/PlDvunDInKWGOwwHgyF4EgwsgHkWHfJS57x6fJbU
yyXngTzEtyl9SFiaxVPOG2u7vx8qHs4jsgiiKhndxRsR3Jkk94RfJr01xAjGe9dN+9pL1sxM0sja
Z2uc/qFNcW5lGunatHnRvUfE3NyRbVU9sbWj4XzkdIwKRD4fu/sA7KTdAnxmc0XX2GgUDT81cObm
jiBKSlI1N6v8y9jzJKTVjknyFKMFU5YM+mdc0NpTuB+J1ng4aBbhBvZ7IHj2NiDYdIlw1WoYh0iL
d69m4ZSSkBYCPG1I2n+Ol3ijTkrHR8dlvVqIvQo0M92Z5MmWjHcRxiBpmJm9XqCumcvnQyfzfdM+
fsrHT1dPIXWd80BTQrs9xlmfFcUd/YZwC7roAoIHT0aVdA6fDpy4t/sO/LtPAjG8LidbtQlX+99u
YkJzb6yYABbHl5fBe5TM0/cdxz5rA12nDQyIk9FDGtVPRwC+wCaYM2RY3ac8Hc882Yj2lOyDO6n3
7VZCHZcFxuf1DTIfSNjDYqNKgjDFPngYDSYrFxUaz2WY+3xHIcVdSPXGJr0yg7WohhNorijANq20
7/WMF5UnAZ/DrcG/5xPlAWqREsPmNRPRJ1soe1FDxljbP6vkQua4tT14mn7UWXoMPdJUxsdKY7Bz
3KjJrCcYtBx9dOR95FkUU2G+bFkWZPnYT94hS4xSO910VjAS/2Ahg7vBjjtOo0kFnvgxPdlx4Iun
2WcIQQzndVMt2utOt0Zbq9ujUK8Ed9B9yylwgLl8xAX1ccNw7T+eYjoutKZJdHO4q3CgQQOHR3IP
IPQBAf/BuWju68XgU4I6zq/ttf6wMA45coSKp6U2eiPD3n1rPhH58v14MttPW0ts8kFjXzprMoq8
JhUnhzMD2HI97aqrpqQddgO3ooHvvWt9T1XsY9tK4MGYipPcUZbCjMmL4JOZrEBsctQcpPDHDF6q
HB61WTZ3sSxvGB/2mx+GEeyodRRS0AJLyPpjkeIo+CtQnoc4cEF5qEedS3EYVF7LZmVMkz3j5yMZ
berLX5N739zajWkwEXOlHOiV4bQxZnHA8K7HXMxqVDXWNljcPRlpoLxyt6XBDar1DDsbsBHuwaJY
jno7qvDRJc0MwrLsSDenwNmoylChb7iin8JnEKuD/OLQD7QF/Yxse2JDIqmj8qqhs3zomyOb/255
5Gptc9OkW2Q25UsdiWYwt/eOPFjl28Sy93rv//Dtv5QcWav6yoa8ev9Hb/89vEYEg9dXyCI+++Zl
Nhxsd/ViPy8p1jzMfoBJelGxBr/hccN3Ox+qwoQWh8ObIl6//+O3fwgNziRH2aZuyvd/8vb/+gPu
Y7ffUPiGrNlXGA4C9RBtuZFciY2EH9W5unpRBi9umZN4STeG1u832Bx8tDtr1ryrts4jqjHhROg1
rZ5k3jRGczgSzus7MzZzw4FpF938zfNT/IFjHkTpoPGehdtoKECnam+UCrJG2YUHr2FIb7CT0J5E
DTUvFgssnpvC3J5x5UQDrqEuPxJISBK1HfL28Bev3iWTGsbQGdPdjHSLJA5+5doiQ78IbnqTMPak
lVXJOZ8vm7Zq921p3EWpdQovVJiMdthQppW72PFTiqwBA3ChkDiyUXoXqKboSt4bu5jQ84Rz9H4e
eX3lzs+euja/AYx/KQ2jGWkO2xc10inYAirttwRWB9CqmW1rVPtXxWqGOIMldZk4SBR+zDkwsE34
Zz2cAgy5KjGWpKqRskoxyAaFkbjhKOCRRpsiKESoTHoOrJQzVZQKKYrlYcUmHPxL43efXra4P65/
T7dY0JtLmHRTZu83T6DyGg6t3egYkUznFL8x49bZ23knXi7mIOYS4X19os+Dc6IgN3xbut6uUAt9
I3mgaDRtWewW9c2GnfZ5OBjBR8UPoj4aTsuTMyaSgxSSjSXKg9Vvyt3QbB/bIgGQ0RG2Bv1lspEo
8NE4fRD6poNwAe4DnavX01ayBse9zdON62w2mujLq9HRHQvbK73t/Ho+6UWSa5JYzoT2zm0CTwVV
u4fs14P73x+9rROgGA/l3iV7wLd8nMDOBIqCEWEcOMxyR4tJf9BzgnTZ1MMTAyBpBe0/qh3uDHNa
Smk5lnrpKAB8FslkjXMnc1/f9nR8wqiO6SbHM/HzoiltbZm6DyYCzsbloVqXIFIsQk7TxY0V5r9Z
VrctOi9O3dFNNUGEF05EShh+vG/FBulCOahYrtK1i2+UOIGDRlty6cXD3JdCiAYNhf4Edg0KriXo
ZhgPpZZg+YA8GJC2vS60YUWD6kSMBk3e7BJZyMXk485jexYis+//5ds/Ebayqh1X+a/e/hvhKqsN
SPxPROeIUYywfWTy3v/rt/9Cappa/9Xb//CnVKsHbwlrGCCr6nICY99WC2IeWY+5gN0HVLfeUt6E
fVutgFkk6ilke4+sHVTavrvKtxzqelX85u5kVRcUMRU4Eina9IjJERYry95QILiqhZonaA2PSLO7
y7LLEmGLvH+1xLtyRJCTz1ghtua4rY0EiOOXYhhdbCjcFSe9Q0+mnmoVnQg3hoElu8VebzgfZX9d
r66gzt/synclSzTUSDPOnp6e/uTk6enZT3oIqtnMmPSgY8XgLP9J/vTPB72eKENMyzOGRK/3AD0W
OKmgiU79yKWY5uQR1fwdIZ5mxBVTez4wVQcYI12azp+tqqL5ORUZ9k0JVG/hGpsfA64H+9JUE0gP
TVyfcYY7a/rbgRQYTEwP31OcKhgQ7Ohm+lshNi6gMp6SXi5Ms7wUpAskN2hqgNFQFhP8MU42gAZq
txgHBQStBiPnaJolo6dm6HHCL8asfB0AFQDCN8iowAwj55GxLb/l/gayXNjI9m6ilg8EigcZ3mvf
4PhRV1C01WVFwfBkqGI39TQ/lUDwy1XROrwam3wNBS6FN/0HOL93ZblFjQRsYTKzmFNEPu5HjN0H
JOdkNDgWedzrXI6Xzs9rDKpb+p+ld8zljKHma4kpvD0Rtkqt166QiCAofeKaYEu/tXQHJPJFPQc4
mRcEZSk6MR/dnccAxoOpyZLlc/k4MYVUvXfVajVQ3IhXDz/i84RKqVoviBfDg2cQ11rSRzwWJqoc
1/7eYI/guz9p2R4D0yIXmpjXagDPthXvvoFX0r0OuqvQbg89loIeX26qz+W9m4gtPHGfVd/fYAIQ
1pel6qjPwSCQ4h21zGgAnljj5sPmZj4I1wrJKH2ZvP6w+dXnn9frNaDZN9iXX3e/Uyvt1YUvWLmj
Kl1KJrulLxO6jgorQXPP9jjd7rHSdx9ED0zQdlE7YZRq1CidSFRNPOGfffOSwcnpAw+C03SMRZO7
RpSUqfKSmmUiZbx67GWkq8X1pIyq9TkNOEvXoiFSCb3P0K9+cKgGl1BVPPPJQaqKX0JVtcZGg67e
XAlVDf2XGlHXdQFDl/Gris5w0NGjKqHq+dr3QaJeUELVDXX3g6huVELVDgK/DbyenUo+vFtRDaik
UTM4nwbpBsJSHS0MQqAlWwhqb3d0EO7KA7W9Yrq6Dby0XRUcf3+QbiBRMNzsiEnI43BQG5PakzKP
GdjzVpdfxxBPKZra7dZccpAo7z6qGrsSOZZyMUj1YD9qMguzccWDCvJRFQdxJiYipjh+1GX9gzoo
65/PjcWM1DB8hADm+Tckxqeg4j6qGn8FQqclIoOghv9R1bIS/fPbqg1r+R81vgEvXt52AFQ+asKA
rq6zjuLy0d8MJC0m19d+1BXa3R0LCoNEBfdRYx1mjB10rAV/1B1cAvw4fOUg7kB99AZVY2TDjn0g
H3X5qrlEljU9a/PRr3CgA/moy2OMiTUaTaSg5D4GVZBlRGlxkKpiPwaV9OERVQrPDe/ECCukyD2u
z1JxCNHi0UfNUYgeI1nBftRD6lyJaBn0GnglFfwdcSVtxImvjRBmuL6fbTJsbZ0ipIv9dhmwTbZ8
Pi+2qOCZmEKawSC3lRQLpOpJIU1vEBBhvbCaKaS5py8+54+DA/VcIc3ftYu4alhTFUpWffHF4P6q
UMgDkPbdGfiVjaETu+tPgrLeqdJUMyJ2idEHraiyPl82MwVnZIo06GwhUVafRBL0YZBaO/NxYkuF
SNyszR33uiw22e169eS6Xa8yJw8wSsOHI3Ca+oWiUDuF1thygJxeFfquV6u4Cot75fG7ZiaKm4PF
8bsq/pXRdAzSxd13Ta8aQLHUxpRK8j0QTFd1IBc/+J1vyI9YCOgitQooy98Uu80gUT7HDzCsiS2k
5XIZZLIidmYL+JySmcwgWUkX8PgSCcg36OjMfg/Oq4OVrhKVSLJOoY2dVih6v/ni61+8GXRXkAJ+
leevXh2uggV0lbuG0Ka7ChdwqPb9qPf+v7YqdLWa7/+bt//PH3zyibV5GLM1BBuPfoGeKmzN/Csu
PoxfdRuSrpsr3745tCq1FrpJa+c4HGTK0PkYp0jKkH3Y1PexCTGKFr9HGqCb8nz1Y7bFkJwBRNsq
QIBX83ekiJw+HVsrKh2c5YFo/elegXJmfv71Ny+ff2H9WK/rzdP8J09k2Zp8ezdoaIzU50ga+VnZ
si/hbRslc/av/dyQwsTHnJuNzUuI+6O06GfKxI80qlOVBHWJuV3Qm0XSZIg8b4NWs9m263LE9tq2
Pqfxo9phsrOg1dDkYF4f7mca24VLTRpzR+IE2wDimpdWsduRvNuM1kDHMxhwgHysIakXwMzbc5CN
u/BGyy3xxkLL6CFfqwo6whYY4R4wSGnxlBeXUClEWPeIe8Ti7tLdfFosDKuepXEdgW/SdyUj6VnH
g2loCkfxelbl0PWubZQ3tTIaVTZxtnTuW7j1YoBG4T7InXwXbQeFY2EI2F/inVwiyocklDVNzVCi
CrLp2kkcyi/s2uGxuVy16bZsqcA9BJZhYC7DBmiAJq0EXhR8yznNVD5cqXNxYJC2Wv9TFpo/66eW
Vxol8zVoGL+gJaRFEympUGOzEq852u9AMr04Ilhns8rLzUK8sYBOzvuYgTB+X4cRldS4zOP5xIva
sYra/7eIbnhYHtEYug2tgGkcDmwtNO5DUj5KoCLe5gsU0f1xti6qzWzWPyI8jeobUQ1juuqkLEeY
5Kp4XOvLEhPeZVajCRQjW9SbgfhSmx7w/Cyzy/1V9uAvfvzvzv7ns0PDGpjpDEIbnnjJg6oMk54K
d+MH4jFFHUlDXnqaYmZiBqWnQrjZoxYfSJdazat2KK/Rtq8tr+rd3VSaG0cIPkXDQCnP8aLGAb2a
mq/icKRYjKsKat9Nw8EYuqFN8T1QAaaYwqYRwJpx9tvveXZAsN//6ds/MjYR5vL4/X/79s8/UYxg
jy6TI+hSHd6moxwKULQjYC7/u7d/rMxx2/UWvr3/79/+n2KQ2+y31C5aR2Au2YpMI9oSe0PrBDbj
qcknGu9u0SjDUG00lUALBBnaruwFJrpuzNpYN1/XGxCJ6DrBWCeoVz0X/IQG+9cU1HHXyczyDWPI
wfJbSviFD4lcP/Yb/yRK12f49HXqHRBzcG+ttyZ4MlFNDJO4m54FkZGHC8HlcjFy+VVCWN7x/kA6
FSXAkTjo1AcaH0iOL453j1lzAOYv0WQo2zd7OD/8DBqXZYtR32Gh0AgA6w54RgN3sJsknBz20o+q
RghQUv4WGHV7h3ZV7/flybbcnZDhQbX5IF6UbjZ+pGxK75Vd7YtdAXSJ8wxeltxcOk3Ggy2GPbvK
LZ/e/+wsP0O7EZoEjT8efj+ZyBVXEfXBtF6jnBdvqJcM/l+t7vqdWlmsyDZfmz0QV/QBDiJ2m6ZV
4D3XW0gwTSM+wcS6pk6+fgfDGZp+D0V+juKa5WRQaPqYYTscHnlqpxHReTT/mJ6Os11dtwgLMw4K
IPduhhG76n079ROpuR0z7DO4YGm2EfS3XixjC5XubJtYJLU5/JDh0TGqF3tmuul9xOkZrqEQAbYN
zKMWZUHN+/gkVu35FNl8SNpKWxQgU8thh1G0LcU+UsNRZ++CSscEXewacDdGiTHPySDhLj5TzbUa
tzGytBxCUSVBp015Y8sDUrXJsIetTnC8qeIkULpFLtEfeaa3JiwvBd9xZwUNWR08MlCchH/0mGo9
FnwJXWbzVVls9lsTNPEcZgzCJOZVz3kQ4noH7xsxKuWqaJDMRI3DFaOTYTsmA00gUtohSFXlmaD1
nD2OsFrufkYtWIcFMS5lg27qeLgrgao3Kt+KzQDdeVqxpXtPlAIcL5lPB6Tw5HLuMQeZOyqYEM0B
IShZAOWi21+qDiTdrk8ReNUdVSCrTJsDTgzvKeX0dx4afzdzLCiX8lNjCrcqIMg3KJLazIP2Yw4j
HPbPv/3VBR5EyKY5Cv3zZ29/+exLKPbj0572X8YC2Wfy2eGntMlSC39klv9WDcOglYcZuRxPqUOp
53l8vf8fiM0TM1y5UXn/P769PbGcY91oa87Iqwo7WpIvpyW5xLRVteHRXtMp+vLrnhDZl/RaUVgq
b0pFtXqJAHTZp9nwx+NTwUPD9lEYN1NPbXRSZ1DgGj6vF0VbxE58gX04FhqbLEQJMovfYR1MmiIu
bszAuZ/BjKLh0QVzNvjFmxcYEW+MhhAkNQZU0Qw8j4bac/SYJwn9OvB0gV3u8DqhzlCTUj8cbB8B
MrYlf3O35eMV6DiAvMjwapbtnifZwx1qs6ih0dHg6REZBhxBQQodzU8nZLVbbQDuZ/wM/An8eMo/
yt1u8L3NKWXuLJ3LrLzIANJt/YTgWgiHXTdif4o4DyzvHBMk1jvxP+wKwwTMZ9kuF2OkoxRDkEPb
buobcXE2M0jmTWlQKjdtZK5TTrJZb0skwHAm+kKBJcjYIdv8owxAqcRlGGHqS2OhvhCCjmXyN/Iw
HIHkwfHysctyEWbBIx31Gslikl3n49bMYmon5Pnh2nE5Twdb5UfTMAnLkjJWqnHShF9Qnuaby8fB
BjONTzO5TR9SdHXeodM+7c8gsA1weSv0YAq5WtuSPAVJd3HBaI51k0NfQ2/qHr9v1j0R1Qgt1RpK
6mCpyl3jMAVx/dy0eREIETeJ9kg7rCNP8ouAKYr4ZpjCEq/RhnpqkYo0GQWZ97vTng77WHsRbB0i
vLD9K/SGiWhGf1Etsrt6z65sMoesvanm5U/7cWQjh1+ALNZRRi9agEGySojRIIR/2OxXK9Yfwcuv
Z6+++PqrL/92FAIE1vTpELfzafSJ8WW5GHX5iJujgVc35UHZdC63N8WLcfZFvWlflcXiBVCol2ih
cjgLkBm5BkdOqq16aPItxlj6jzh+PRCFmItaYvsFdFBMSdmzGNnoDJ3smJdhjYe3O0lxKKxcOnWv
BxBB7U442MVNb4QIA2Ny5cG9Kct3w9OkW/dhEH8keKWVtNbDkK+ed97Xu6v0kc8J6KGEzQqdiV9G
vauu0GaFDxu3uZNgJ5K97abdo4PUKLFi3KBF5VF0EMJHZhuYYXBq3Q2qwiap8obws0SoTgw84eT8
vtxj1FE8PU4DMUodLOqeTGkx6MDms1uhKflXofcQdriqUCGIQs6ypwMF0PFovy8M70r8yRjN8mRQ
ZE7S7mp1jcTfsqb6TYk6R1I2yyoCV2BrsuvkQGY00Bwhxbq6uS7JLw3VfTp9MxH8BfsZUyrnwEfI
qNKVFLZFI+cFRUuAeUiP+MHkIAbk8k5YU7VFFLAciZHvRp7I5qEPkfqlRRIfolP+g7dK6KxWHhPl
xI33UEpIk/SOxcTyRvMFhn6kJJzPplrEsfFaGam685Wp2djbqf4lyqIBa2MXO7zHNsE/4Nxf0LFI
g2a8VyjvMHyc0V5ZagHTP3yWPOWD7Y0+bp4yyucUbnzxIuDm4vy8htvXFbzksQm2fcbpfVSzk5DR
owLoyshPQYRXM4upbSGgtMm0lL5QRR7FaZmKg9xgfBiJuu71OvJvOcMmfRMR1nw0zSHugXprWsmQ
mQKEEFk/gya9o3hhKogeJo7SoiYL04MBh3u0JfxeNAlXaybssV025Z+tMECL5rldWVbOCG6I2Bfj
BfKewzmFnd1v5uPsEcejffQIPS692HoehR/uMIYtSJ5jdAsdcZgV9vP1Ibtnh2Uo5NKm87FKBsdP
yFdUfng1F3vawVZ3hkbUez/QvTF5ULdEtjgHUYEZPOGJRDcyohDyxyGdAgVO9eeJfGStsGoOnei7
0iQTHoZgPXxOG7hSXtgcgZZICOutQM/CYsraD/FQpyA2OvkDNpW8gIAPRKlZofCEVQkEKRNliwG2
MMEyi0YYpSCjaYLbo9ZDZi8WoW5QD7oC7F3c8YD64V7kYQZUHcaDJx/BQWRXKs28NvBRYUQpBd4+
HR967NKaFbDkNzNNi2idkEpJCRx4oNn3KvvXYWawsjvo+UfTqHv5lOyep2BKJLr3KscYZJHHyc37
xqY4TW/9+X7Hseg3xba5pjgmhBYNxoNdA6/8m9LyvgFiQHeC0xyWtljwm+HouKVMjZ9GLzROG8sP
5Ulxp5QmkUtSQAi6vDdUgOgvvXnxxRkB/8UXT3uaXqyLO2PDDPTvq198+aVon7DKaTaksAzo6uVl
yiwWEqFPtla1GbGmCgNUiI7/dHw2fhpKF45gYeybDSkRgC4xV4iIcVnaHekr8xOnPd5gsjIO4CVP
6+q2XAhHr1KkzkKtHf806ryITeA7QoqB6Vt2QJ/9CaGY/x5GAO/h3+A9jQe+0N/gGwwLvsC/wXsz
SPhoHoMSMG74CP+6998nNFrDY5VMeFqOvCTcHyJ5HsZp084IeM4H8FLZoDHhCIqgHlfZb7mt4oqg
dtcVIVDFhei1KmZAE5c0XwZe8mkY6jFp3rEpnqpVMw9PU2rgAMd8vkywzDx8XB4yj61j2j3p0Ium
EtPZ2IZIBwdEAlJKH9dIHfAo3YDhBXaAOXOAkb8Hg3c7sN0HomgnCh6ZxYYfeajJ/SGgBcyL7kcM
c0cIdwhuh0eyUkuBdOGopTA8z+F7/QPr3712vPPc2j39J1w72uAGYvDjh6+drwtHuhWSqYhPQxo1
SlA+eG9V7N21Ee1StXHd76+NE0/VRhhE6n1gN/brsoOPhS9yMiIrT3KAVeGRUUaCNekk8FZfO6ar
qvAMVIzRMORdWNPUtPXWDSdiiBw2p/C0A8i+8l/tqy4mNdyGATvVsRp+NwoZu5hR3Q0Wv7ebBMIF
RANRL24GlyJ1rRQc5Y5N9BZG45HlQbs5XsvpcvyqGneUFpHs4t4nBxFLNEpJEEa4AW5ROhuGS3vw
8oMaOdg1cl2jlPTQ1bVa7kNdGykqhPm5kS1UvrO4l3GmYb6MLg5EgA5Fm3IzV1eHND1RDPV1bDqn
cEuJ5SqmnLi+s06jRC1IX/SLSgBFeypMxNKWeoSpMRvpPCGY/HCxxInmfy94B1zxCYlcd31PUCEJ
gYVrUmpgDvesNdbKCMpAIsnRehZjamq9MUVzi3v9e6pB0chIb8wRORZ1ScpMpwqp9/PrbL+BKqs7
JMOolMy+cNJNNkRDVi2xkbk13hGMfpA44yQXLdgckFtWC9l7dopxCdkiXEJzHLYEwMKVqDa9BHfk
Bd0mTpPc3ZOEIMrXqJhJPcIOhqyDEeNphMwr8V16cPfwXtxKeoRWE9FLiAfw7w+Trg5wLgYuQUt2
LdVpF2deCyHnFlidXlG1SCyyq56p0wqfE3fX/xFxFBalIi4idazHIJWtcw+PkVSDJS6GLfpEzMaB
0Xiy8WoRyQIRK5LUinWN5sBCT/XO/xg+45+KX/1H5WgYCaIjv3M7+vtQpYpNCwmp89WXIrrRp5O1
QUw61DHxOYc6JgEkfcY7/am4oIf73p7y1ggAIL2/5OMe9YCLEpX/Er2aMAytdyn2ZrmgG2JR7Bv0
gGXbrxao/DNGv0sgJIQ027ppqktoDW8B6NbJ6Ad7cmGBSXH4/oUtG+A0LssNmRFDt65pYxyOIWhw
EPX+6ppF3styXuDBjSzAvq3XdElPNs4Awgb1k9CQ87jhvLg2tC78vaJE2mgN3paru/ikJ06PqeOj
4P6JWZqXX8tlAZakCyLiZQh27Jkt1xOUXZnn3zeWwcVCklLio/eu0S+B3YAJUOxYeqeM7emCPfSw
jy4yoE8y0gbgvDarum3K/aJmAoFxSje1aW7Ud078FR7Fdx0+/MTTuJs7a7GjSrLob3UbyuxLrub5
Re/A/T+yLk3OFtrTbLBpB9puVrc3+OoXXw4Sd+JBqSfw+wm+GPTe/0+e6x2HCnj/b97+cvLPyPVO
8tgQRi2qZl5/KDFxOwd2p7uoPeXCDn3ulMvdcrMml7nImlvcspMW31H2DEppYMyNfw7PX5TzmgKP
jelnueNcRF56pVznWNL5y6wToLTem+/blMOi6TzltdjDcxwNjNG4CP4MWdD56us3r5+/4btymB0U
rBp7CTrNXFoW87InGVq8j/SmZ8JO+dKRedvDfHnsvIYh9rNiR6ECChPwQdvLoJXHCgMOwDtyC9mF
tp4D+2UwzkzwfZ6SyVNiAkGqXDIPstvbW8yeATgm69FkZTsX8947NN4qNmjssiDLi54L2OAlskG/
0x3mflrMZloudmYHqkAvVBdTGHmSD2eFZ8PmqpuvbL3epBz+velROQ6KqM0ZTJnzswsUcdoRGVRo
PwdTwkKtWtOFM/4jI0ulPcLPiGaYjuiovEddTmfUkCRA+hiXM1Xdyskv2CvHxKnnfdZpwgLUYUs6
1l2xTqXCxAMKENZIgndVuVrM21vzu1o02n7NcVXYLHIP+DeM9YJdUbQXfAiyDnJ3mLiMn/zPpnv4
bh4DWWlBW3LRaPsPTo1kdCUmMkRgT8MJlxJfk4dTUnXd5wUQtyg2TBI3aNjxQ7z7vKRTn2zW9qTC
0LoRk2orHfFfeLEUDhJFYtyVSkMC/LRv0wWYJZYbpWBZg2Ukj2NjpTJyVAKpFpH0wvQD3BB7WtnB
M+CJF3L1JFw9Ub6hMOf4P3xE/pbGRnEGRtgHmxHybnY92Uxk5LBFR6vc/YZFqL9VgfzTZSn7XMKS
L1gxROxX2yhfsqy4RqUOtAin44Z91Zu2mdCfngtw0eC4TX4n7Mc4ROssPvDOYMFQHpyX15p2JE7D
O3+pMeZ4gP3EVs20KAo7WopQkEEGizgr4ztxm5sXMhBTzdA/59tG56wq0tNoZ00lq82vieuSpZxA
X7yPJ8Lv4tZGPRP3S5kRbJ/IARc7ZLvRPaNexvb6FiWtgSnQY946aEbCUMYnSTfT1+NgJJ5QtlnJ
uZGhIRium0MiGRlNiNd6jXas2+AGiK+ZDQI0ndhE4XKhKflOeBE1xFlwq1aPVnbZBCkMq9XC9llS
aasPJAMgUDkhD/RAarxE9qQNRd2oWraAoA2cDbFdA03sZMOmtxzAIpG6W3YDmU9yMhZMUiAD0aPU
0wEqMbHQFvNuJLjkowlbHbB9W28WyurbJ/EmPQR8ueN0EMWu1VCF1nJxQMJ2qQhHDrBOQFTXYKrE
JUR3JJajqjlJghbPuX+Zg8rmaPOfEDKzRkjOJe1ZZE+jKcNZp0mRNDRC43QWUCNnpM7h4YH9YE9h
0+9IhtezvkBmhIlML5bbMY0McaXGbFM48oQbOX/xu5T2nD7TA/cZBTNEPjoqDEFIxw+dyrN/qkOI
ejvpOoqoleHzt988f/Xy58+/evPsy5E+ndriXSlOIZTx2lJUILST7d2EUip857s6f5ddCgGGvQXL
3yTOJxMko8i++44G+N13RIrldMHXPKvvvutJMK+2XItCQdDdNQu7/HXJZOu6bbeTJ09kRPXu6km5
eYKnXNM+oY5MFYrZyekG6RhY1vn/PzeA4jdJwav3gy8s/xDUvS/vO//njSBAdyS3OHrAHQ5owEc7
BYDiHWDjBYDAgSk7tiCa3A2VAbLIG6EGYsNW+iYWCGcBG14FTvvUTmB0tSjvtYknz3QjAS1qvAbs
SyaIh41zNKFmoDk0KJX8LIaHGApDR9s172cPM72uOk7X2CMtFgIwE75kNP/CMIKUksViwUc80igQ
6QQ6VyAwb1l6gJcIHHoz7PNhsBKGml7mro3BiaETFIELfvG66ZDPRuTh8HX9BqhJOWth2QEzFzAk
eHVd35hm6CWhQIdtFkaB5ipZ8aGoWClgaoPkxWLB5V22Xe2vUBcIgjIGt7EXpjJFjNS/qYZ9xVRi
TIs7JK04AzWQ8ws3Cu7eHNdSwgknHD2HOHJiyGm9YX1w8dP9M4qQUiUcgHYA4oFQ3p7ZI4x0BuB+
NBNN1SAaIMaDO+EcbnTdsC1Q74kpmYGAaU2WREazCq2DgxQGvRNOfeS9++NoNLyNmcnHkQnvHY6F
Bb3jhmIZ/O7BtL/TYDw+NXCQ7Ut2OD1UL4IJJ1ai0Gx+EBOKtKkD2GjcV9K+euuFM1G7/tTf2IZr
I5xoMAVdofQsDxgT11XTbMvVSnx85vV6DTM8EUm8MJ7oc76d5xRbqPbB8xyQeheqm0wvuYEWKp+M
wFD9xs+1y7E0f3v6PemY/XLjzI7L6fpNESrRD3TaStOJ8edyjtEajyg3YRaFpHvKIj5ZkG2L66nO
j1FC8aq4nMR8ZgEBkj4mOkEfd6V6GD4S+dX4YshP45JxRHwcwSveJ3SnRIEqh30ZgKInfd0zdEA7
YIzMGtVrRhP21yiCrWDkPbUf+hiFiv0JCalQ2F6SHNT64jRCJ3ddiJSBcoVqDKUmQ0QLKxT9v8y9
W3cbR5YuWM94mHmah3mZSYOjRsIGIVJyt30wBVfbslyl1bbkJcnHrkNzIBBIklkEAVRmQiT78hfm
f86fmDWxbxE7LpkAXTWnT61uCwTiftmxr9/mGpAvnsfqkvpRwlXgrFB1odoCqc82gfx6vbcxO0H8
Yaybe3k/vzW/TLJ/1os3MI2cmtN7djp6dj7M7vDoroApBVntboPraGVg1RwzeGKicvcHpiIBQqdT
8veXhITu+2dj1RZIrjEbyzysGm2ChQ2oW7+gWdbc/PDRJ6tFRwMItu6vZ6NsPB6bU4YSB8nlc5KZ
Md2ka04pCBxKhnQwzjpnrhn3IzWuTOamLxWrRQScAf+Q3OOSoJcbuJ2v51fIjzJj/AN9YavtTz5t
MwAKkhjStHpsk86gOp5kJMPjuO8126cSBfDIBhNZGwVy7gmPpoT3twftD0MADH365KUYozyCXnIC
TlI3yeiDofwvEIl1kuG/I0hMR8Kz+Uo+6swWfOTNr9+518Mm0TFf289e7qZytbJcJszX/MkbS9ET
/2GWPxCVRec1VHtAhzqG1lJaRz74wrvT7bPJJxeUaQ7jKkFtZu4MS4LWzU5jRR2Qknz7gDYPDHuk
zyrXL+qundLa/W5lDtBluG9nZY07TVkMkVvXeCOqtfxTXc2FAAbBxLzmft/yrdcwF/u3/1C45lWG
uq61N0TeJpSlhRqHqADU3Jn5zzkHLNq/W2bzqVTyhCQMy/OfUUrujAADBD5CAiEvE8DqEHg+gMbJ
CuO3ho0xXHc/ypvNaaWFYID6qGzQRoqgtQEGCkC4zhtcE6rHdBYMZYbTHWjJIOX0Rjg212O2KkMf
bbiEhwGsKxYG28ccoNNwVlfA+25ujFBz/0DzCjlTrDi2h1oWGvAKSdzAalO95lNe+p5/H1qqBjvF
3f6QKBBcOhgY2U6r9o7gbOps3gTn5gy9m50hHogRhxes5/x2+RdYIggHBUhhq2aEEkkVArGz75rN
9lWj88EcQdlr9LvyVAWwdRjMPAJjMyQv2IBziw1vVna3KMDd5RvHAjAzc7xMt3kQvQ3/e8GmeFcJ
01HtjAR/S+33XxBid+hZhYtgzveikGXAggyMxzvusqkLufEGCPc5HB9yK6GxnOyQffzRpR1HgHt+
rlFdBWshWjitcLvdLG6Aj7glV2bQTmS79QX6D5CYV2e5EZt2xedfPB+C166RM1YQSqpMAGRc97KR
98WC3aez04sw7yzF8reJOC5ZBkTYBbP8tP/iWR8CWevaMBvTWOG3mK+hXa6aPamcR1QjJ2/u7H0x
5tMTPuneUMM54X2BOQW+ofbkiSC6qRLzclfFvu1th+KACCpqCRXu3hldW+VA7u7uAYfU/MMEyJvG
HneFljwJlCNhNTdP4T9kz5+Zk2Zb9HXpabkSEpmQk4uyZSJOPCDGGLLlJfE4Av8McNaETPAEK9Mo
U44O2fX8tdi54UdQpL6gdCc+DINNPxDoWvtEbPnqL1Z1oogmDtZqGhcT1rA/9IbzQ3lfrnM9MqXb
NV/mAWTiZULf2xpL56ESC//0SEh/W5ncaqz/rRkHDq+tU90dDlxgD0Ykmw6ToHTYAf4enT9ZyUtU
/0Jzov7tqygHVkXT7vKu2MF7ccM8gbSfn4fnwZwBAZvAF4zroVTw5gAhAyRg7BvzFx0qGxyxWO2W
/LoncRjtPcMJoOdeVRgZr/xYiEUUEJfmJfoXUEO+c/Dieu4cpIFe4Bdqi/DvMSdDVd+DKRWO9tm5
x8qSPWBN1bqgOdcItiMnPEEMYeDlOkhnIwi0HtRtVwd0GRPNs9cWmg68nB39+Hh6uLfHz89T/Kbe
vzSriTHaakcnrfYgXNsx8GLrZe7jqXfzrh0V6ad4H2EP++M+wdxgoRiYzeE2jc/AUeKsH1wK6xTX
ejO8EmMfj8e0vqkaEHXC+kfZL7/8ki3mC2D2/hDwFNJSKn6OnP0I7GvmZ+QJ5QVgJmu4JwQMVq7K
xk8EcFmzSywEFLBPzhk5G9Jfs9m59f0MDyRVjrLFJBKqSC/0wc8K47J5sKNicmYdAX/U6Mg1kvRk
5GdNpsu2QKFTrkzs8ci5RdDZMXIY8/oeSQ/uSXshHErunreRKHnsj2bNnEe3zwF3oSzRI4FiYFHP
yMIx21SYz2gmtrrQcjKwOEzObdsXCv5eXbLdKO5wT30OqcM/Zoyq7Q/Gp/LF4gY1uw5zCYkad0Ka
HexIbD/HbBdj3zgFiguOwqWLNmgHW4L3YCOuxV6KApbj1fAT6NdAmVHpSDeHCg9bOZYo9dkRTZtc
gHC2KyPj1uYc3oAKne2A6FR0UVyCcAPyEgYnsDO635rIDGguwjgFczLJhwqo6gP1YWbK7kkilRKR
GAWtgbzG/aATm2G572Cg4OexrvkBhy7i6L988Ckm0+K1Nf0P/hB+caa+IOHw1+QbwkOwmhKWCh6z
1j7bbC8MzT8RBIjQkJpXOkSAkgDl8556HF5/M87uzF5+3JTLrDKT3NxKyxThui2KG/F/laxG4huj
2snhlZHordUDUDHKJcreB3NDpH58QCU8MGrgDAZW+j84UgdNIzcUT2wgHQ8wQZLPLkGiABAR8FLZ
/HO3lXsnzcuCNGI2C1YEu5S3ntuxqZt0oGex9jWOq5hrM5XIPmWajFVxVlLkMmNQA9V5Wu8mlwD6
TXM4ScZOhnoG9c9TmQydWEtJMUUd5eTY1HBYtxS7puwdTQzAj14UAU5gPLoz80/MIa4kvQZK0u5b
cMrIb4qHKQVTZDClCf53rNii4dnk2Xn0sK5U9h9ZDfcuhSCOR/xqk9s66iGF5+AllYc65fWtarVr
BtOKmqkb09QObOqzgL5qQk8CvvUnYgXvwFmJWX2PJTR3wowjLosC+FAXI1xAKExgzvARmgJq6sXi
NobQ1JdFNWMTX84jBNDResSjU072t9J7i7FMYQpalT+wgbdjp6izvLFdD+xtGBnOTcUf+KMbl2pk
pJ/hboc00/5UzWgaTizSHohK0FwBljST/hfnj4k3UW2ep/h8HGA/2U/IZHNTQjERBDI9wiBK3jv1
fkmMsjIXu5rn3PzIbsXU+pm4J+s7P7Qi1hpLgX6kFLWuEajVDag0attt677ekI+NOwNT9bkLJMTw
KMulNSvlQ7BDc1VzZVEnHFipnqmffeTO5XLGDc0oolIuwcyUlVzOPEMz2Fu1ZvIK4bTNC4mOI12L
gRu3u2Chvf+kPntSn0MSD1oWaWdcLuMXI7GQU27LX9B9vpzSy1Q+UMdA+eRe7muiZcv21pMk7dN/
U1OdwHv6H6zD3bMdysCj9sQjWr7vlB/RQQRyU91iaIHjg8nfAxynnN6WWyHBhIJQ2M3XGlXzclyM
5VvnsjF3uXkda34HzqPrhkNx0FBtAWHFd+aC0B+rAp1jesKNm4LmZSfQE5zptwhlUe0W5GzNkRLW
cWfccwqKuwLDJLbV5mKOodzIkOLSzFdXhrFsrm/xIpuCCNLykL0wjb0zW/NM1nHjQnEWc+4QE01h
dIOLj8CrBwIRsIyG5xSy1munFWcn59YaHQmr4Raid5Mgw5BaPb32PHAxqtj9ApdzDIOhswe8Le65
77IkQUXPbHQAc6jwnQTz8VcH3362pdNtFX8q1EbKRZBRpllY5pC4CWL/VOUgflBfc/7tjKsCAyv9
+z3YGc9wPabqG51u1c7i7HyY7rBcG0688HqE1F1BB8Ou3uUplJFGGTaChbBbk6C38PV6d8u5oHGE
M1ve/IBJSWVWhznCZ22dzOLU3HqGWEyvC35Rn0l1x0jExwJDm0Xlf8QpciGvgGHEL8tFOV8p8jCo
bYQRERwINfJojlzu8rYQIsF0BwKw0TqhyQ2knwMqhAdZwjwwin5s74YmA8o3c5Z6fr37gFuMh463
kk9ddBOOmFLh3Z9z+B3O0tCJ6/IK3O0AHEKR/QJlY9UChszcmMEjHFZBroigJeHkeUBw54uFeaZQ
32LHnamojiNYo6I6pgEYCbesx9nPMJRdzdZoQgRfXBcBhUTsrGttBES7BNpGKPTMBhY61ZVQnfgM
hVESETYP1vxkqkJL/CvC9cD5AcvO4Av9zqo4k0RwRVp6FXmOhj2VkEZOXGulVtXLDwEL75YHQxbx
V04mZmsR9YWWeSNyMvPcJcwkNEtbNYoTQYdNpiklh4Wg9u+ZwGzIBk4iuqXOtr7bZx2N2HJdWeTV
oZlm6jLmPsMzygaDUXYIAYNNphdzhjfMslbIR3XVjA9eZ3F7oUeSmiQATN+3dO7787aTlw7rkc3e
t+4uLsl827NYB+3LE/gcBt6CWN6iDpjivFuvDFfcijfgfLjxnxngr4B3J/6plyZEFLA+0FPbhl/A
a4/Ng/bvuGhAucOvaHX2Kxcs5w2MZ6MQgIxUeo3x59mCbbBA49CNmON9EKmPvuI2+Adu4baorgoJ
zimsVRnLjO2Dco3QXZgfJxhTUitAcjMOY8p1x+67x+gCIk2db+91jSaVdBJ5QI6WUjQ+1/CtrHpn
GmvbhivNR1O87ciA9R1iBykrl3KofeFRWRfpwwCNAVTOYW4IbH0qbjflvxZL9JgdgJPEQNJvEagN
jp7fg31K+y711RiDkCgUtuSMKrEFeWce8ly8obHIWPrSWsBwaLE9GNT95M8DnyrmLNgUswE3GuCp
qHaHqmpjAdjZQLt9oL5zykNcP+DXKn+QOqTvHtbN/L41vSA2yzvbAt4hjlHWef0VannRUarYVjmG
p2EoX9X0h9EA9MApi+oPZY32m8SgCrZcA3IPK5MDy/LBw+4LWBPEzd1yl5Nf1/22ktb6BT53ENqE
BEts5Q6PNN1Glj2pkz/YtM3wPLmA6trF9eMIRRuAHuk4r0f286dXr99PMspYbka9fUBe0wz8aQZm
fIL4g5v61NxeCsFOtMK5pSWZPV71h82uUiNlr4K4cvYkK8ZRIhx3II62lRGv1XKryMU+0mb/ErP9
lWi63GDzZV0u4dGhSwcUPbzB0JJD+ASRJCYS5suZVc7fm2k3riAb+cwXP209BzmLCKhqJ1neQ9u3
hbo7SHM2R+ZVnWiHEHNqFzewkGSQxV9o9QJdKTxGDcf4zwHVr2S0ELN7I9TUOPWQBFFQFGoIcX8E
UuVdgYFj5MLFyVt48Cp/uedaZM2HcF7M51xPeXh2cp5MjGlLKM+c/f6lXkUvLZJGDHM20MaI1d9u
7tYcGZPAe74kvL14SzrbXJo2eRgdjf733uvBf8Jmm6mm9xhTVRlWAKKyBLHNfPeo7dZtXAbLQczy
cmnL5PbT0GKEkZP7wUyQKDU9tqfdo8C8LMDzuykl/ETRW7z/4hT81wJ/cBVh7buGw4uVJOkiYwBP
SjriDaQJz5TRniNrW5wYwm/S1iDn/0uWiH4+7HtmiOH5QURZkAOTNHOgygwSJJMqt94ivwd3Te23
kDCAVHaDkS49fHQzEjZwYDuhqyEf5EQnMUmh1UhTlH0rkurDfuethv12+KgG1Dq0tBDfSXXzw9Wx
cosct8Q97RA7lDOmc3pOpHO4+Mt/LznDRlS2Sxrr4s4KjqlBdHqta0ELp+U0EggC49zz8aMidbfw
N0Pms5zOQAOQj0zDKtggEIek033ToXkzebS7EfAqdQPAQzqMem/WP8z2WH0slrSbg2SKT1qZoGhr
ek91OFpdvuNjpPcoBS3dtUWtyUL9Y9pLvKmlJGFsfU2AwrB1mYkn1RgE8ZlMS6So41eC0vFgU33I
Bh7aS1zetQPepAzU4b0KHK4gnSdeg6D2nmfBL51gOXzmDrv3ZvRYnq6T8Hkkz1K1bbVbQ3zSorgw
7B9fAyMso3vEpDMb+sVfwnQ7PoLJ5W61wpZDRdayUKCyL2yi3IgbI38s5eKN6nVTh37AjykHcPZT
oikB+DtNZ2y/860Tuqj9PF7sGh2NqvqZqs+x0UK1pppLaJB1t+uWfpNOfQd3sa8bsy4Qj4G9Ebhz
lMnOXze/CfYFH4biAYREPv+nzxnEAazhF7uGFhCPC0S3Z5cVapebTVAbFVoE4pQBcAiYZCCOnsMb
yd8YPIyxgXD1E6ewuSDRA0xEAEXWT/ptgvE4mu4w+yp7ll5WHIZZiwf0eA4rnp1C/EJ7hAvWBIP3
DPRdMxxgPkB912Do5R4yvwLOA1gItg/+7RxlrCdbbdZX/SAvOY+oqKpIRxwARyRczyGDKzeAVhbQ
WMf3WPMCkIhF12hhTTweQfiTaJYyP5+04Vzxv37sjK0arg5llAjhktlsyGldWeQ2QhH6QNOXOzxo
CKhrnnjIsbBUSSzoMHWcNO/V6Dp60hRtYFqR3bY2wXowD+ZiZ4OVTrO0LXzoEbFMLkQUNc3oSe3i
zeFruNYELrqqN9qAvv64uWHT/VN50MAovd1sd6t5JSYt7YNersnj/OKBmUPkC/sEa9YHexFBQAHY
LRlpKN46S/ksWiYah4CIj2OzlcD5BJzzESLFeKs1dsD9DAAjbczK2jF9s9PnnweplQKGsIPhCpzQ
Yy91oC/lKLtH8rLe3aLXpX0m82Fwa51vH5r8auXyi9CT4MF4P0xddItMiUm3EzccteMA4c9pIp5U
GcOHmPprW5/OzBPQAyiREkc0HPaSXvJtln1xXTx7sgTHxaw8QEVj6wye1AOslYqP7PbOj1NimLk6
zFtCI+Kjtz5mvTY26aYdgUTu9+xfiSMSLlvgxWqtuPWUAMDEjRK3q8snXm89h0uFoA+Bgz/uWI6A
WGxDTAopWE7fIMI9bviOttgnTaWzk/ORl8EObAIUgpQ4zhTqZ+okhgA/nE66/Bq4Bc9VxbseMp6w
KcHsDC4Um+9F1eaWEv4MefiBKMg8FQoKWWXj4+BD7U8kD8ZY6iWDfwinRmDnHbhRCKIPojD8jph+
6F8kvonoENUmDwcIBkigGVeH0PI00d5jr9bmeQDnmq+I68d4i5Kh2QjSl/yRyGVMeyxZO/ym+Et9
Y1aJDfEZ5P3bIRg8m4qxZAQ3RCFmd+wvhc5cNgdIdlE2BGG4MnwmDHPsByzYSe8PWkjGKmioQJT6
7Rd4Pnyhzq1wEAJRHrIdDmzsLX2Rx+6B4kEy846NvsIoBWLqK++w/KX2wdfXxZ1CewqeKyGul2UL
ZJOqbamhnY//S7w4ql/VThKaOD3ZXm+2hoyZZd3UOvOMqOjFI9mHuUi41AQAmvb5V0Eo8tEvkF46
9kpYskOsxxXo9B1RFYjgNFI+6DDczFrKzCxT0IRZELkM9tPVlHZnxeGw4xyFgNYYq3WlvKTND2bh
N7tqEXoOiYd+YlLs1ku/KD7Vyj6JxV9Acbd93l6oGKfaubnutkvLTcmXXkn2DdXlNHA7l5J5eOXk
S68kT8sryN955XyfYV3a+8WvY48O4t3o4+SXU+fFPxzJUjPNRvrfJsrL6YlOVPiuLnSmGAxQBlqx
aRino1g6vzWfV2GQOH0nHHSct5N7Usn0lztgV8ALl9g2sFoOfWwS7CAd5N6WUkgNLCBh/Fb+S/GQ
cE8RnYW3juAkYq/hAXxqjJHhyZB6O7DUPwsyjMvgtmxxp+ofMzbG7XybG14NFGGo9SGx2TttehEN
oUHIVIdfA1jM2h/xI3hrNoDwPrIxOyPr5J5w/SR/ScjEdz+Mgh5Gpjk4Cf9abvOwjyRuTvLswanr
Bd6sSufJsxgK5qD5O5V33kYmnEvWJRp1YhT6VksN+Ts8JzjD7pNBjRa3WzNMPIoWGTWQO/S+ifih
A8I8UiaEyJE2vc2S6nZkWQfaUxxAsFEozqbvaceu3A/T76iMKybhwFlb7Xzbgu1bA8kAsq8dHVjc
fo0TXfuk0yeXCheza726zs9MpapyJjq5HC/Q5wMYeCc1AAx6we7uNrbjBxsD51nofKaTUxusBw2G
QiBLD0paQs8giONL1XvfB5xxrnB+5oyYUOlGUjZA80iB14I0h+v44YNCGK4/fCCAzao4fjZ+7o+j
30+r4XR96+YqgcbpVW3nHx2isxeZzEHJGHhMxJUjj1P53hh/dsq1AkZUnMQCP1B7eTgG1wObcwXU
XJ0HO6p2O1y8Z3FwTlgzGR5Ls5H486BNCpmMeeIZpZ1TqgTia+2KayDu0A/eAo0DXA+54ocZcUZ+
UEy4A3DOvl4uQQ7x8llxXI2CVvNB0illFRQBkuM8CC4V+LkYxwmkRkZtpN4f/fBDTM5K9m4VebSk
fLukQEU1LRTdXs/rgtJXPWx29oKSmtMBrqS4dQgYBanQlG0IxZ1XDF4TjNpEiFF2vpcAMGoZwr7G
zveEMnSJYGiEcIRJPq5BK4v6NQajWRaGZsIHTGjGiOQSd9pmUeG9tcjuT1GLxUuLNbn5OjEiWvoJ
prmTME23I0vIRHCL2XbBKrW5bGBjIixi2BDSe7N6Oka759hP2glBpw/DVVCKBlxX93a7sUBErcsI
VgIgu9fo61RrdXZXVAolXV2B7HZXNz6k/utjQsT3GTCElSWSjj8fFyvKoeQQ8uecmBs110bEhuxk
cAA/JqcYboMcLJu7jTqzUaxRurP52q1WGcAEXZBDISxd7YfPQE1/cLYVnaMv1D7Yx7BsKNSZPU3w
bLobRJYCdQtjnwC6jJSYilJQ09XBiMJ5o69t6v60pITjky+6+LHX7yspaYrhomJ+j0T2OHcswsxx
EcpemEUu7FBGAu6gEl8DDqN4qe19yMXEa7smf4IAbhqPjcx2U8nLDFQZ09fRa4kz8NLSOdV9uWTC
GmT4syORDHegz4gmk0iPd3hePJ9Q6FUO95ZzPoJEaK9+2RA9lMxpGILIa+/yL/ojfm8Lmo22ChpM
0GTTbGIaJKlvMViDhl7xuoB1L4NsNhWmSls/yF05tveDUdIkvS0qVrUtACePT47ZyeUDWAoWPEh+
FRlVH+BcMeNnO76aZ2hclmY/dkYy8hRPjugXTS3ZUDk1Msb/mh/AjU41tbm0OV/8FJ938KJDXVx0
viAYyrndrh78kDJWPDeOVQbOT6tP07ov6gDyNrs7GKFUlUK4fZugys0SkOwSXf2tBYYKjvzE4AmL
nxklx6t5Jbk+wqU/LoydK6J2F/wiZB42h03LCFjXa8VQBRBQIphYa8Uz/mAHe05NeRvlD89CFtA4
Q/jJxA75+AEKHlJtYaJar8MC57jULjOcCs08ux8DXdzmQydfW6ax3hqZJu+P+kPoypaMol0ZOAEr
DcFZ4XTSskh8LnM4R9QjH0f783k4O8df9drbc2KyafZTf0DnvTjWPB0hwwHsLgq9l8BnIDSEMSpk
8iDuXDZEeJEoXkKeMgi+A6JICIvCs2+2GAtRIN34QxR+yeJ82jaitJwMNxEJYy2oaymjNdrH1hag
iBWdh11YT0okRaxbIdaAQc5vXiTKE0Tapj5QbpHzVThXubxcJ3ADrOsBJHgM1KJUAxKL+1/XiXbw
cQa/MnNsoCkwqPIRSlLGaMkGT8S6715e5OGfAOtQD/aund/byI5kGJ+tZT1JTKlcAl5ElafFVLMY
Q/2IJOTiECRGC9DmpzPfPjI8T2UcsYpKUZwe9tAoHRjMm3WuQEe8e5wi1nbECjfFGns6a4ytgrlT
tVyfqWmdH/hk6TcHcFu8lTHPTqeWOvU/rblunZTFrXaTHLboQ6SIU3jMlxTq7CtiWY9ULqeKwNJo
1DeBZiN3Xm8jjOpUnNUQ9R5z1HwgUlKk8vAy7zj5qUMxoWWhHdj3X2+awmfVRYSTWQ4V/qxqmXnI
oPltVQLxXWsIXzNFs9SGQQQGDhNx79iTzIHXmJH8ebMTio/iOsa4yhj0zfbUKkhCHlzGOJfszs3U
mrUDGcDqeh0Ly0cR0UkhzK564CyikHUcNQ4Ro23DgFt6KZcTJxgIzuYVIafAqgDWD2mZtrvKvGmi
XjDT9DFYUeFb75ARNkz2h3L5AQVzkYIydpIql7GsFA4KjxnohZxcotLKX6Ccvak5czojDoW5oH3K
alVzZkWM9HNF/iSQDlppBD58CJXPIY4z0zZrvZeEzcCSO9bNGT9go3yXfVWzJWZKu08k1f3age1x
DIJy+s3lbYZAP2gFDwFCZSgMtLF4z3XFk6R9GazdJZELOeJTDFnCdUQuEQTGYtlPmHDazC5oyIYX
RtFGVR3N3E0VWnTKpXsdo7e4y1ZsKiIXlUVmMmgHSBN21eWV4Jnj2XKWspmlqL7F7awlbbPp1rxz
yIo7lKtyec/8gs0qC391mK+J+7jEdzvy06zbPb5D87b1Jw9wQszt6cXegDjq/HK1mTeImg/+zNUo
u9hsVuQeBQ6nwwS6h9lRO1D1HS/A8DPa8vuhXiMwDuDCCF/gOAW1VHAHZnhszvYsbS8RC6DLih06
7I4lr9Cyzh1LgqF9nKC2RhDzeOamaf7xOLhorr143AexfOc9JSzSghr2Nvu9ZXW18x8kBM5u5w/k
dmd1ZvPsYr4k1p4o+20xX5NiS/RrSJTwzej5DPIZbuww+wxfNtbnlSP5yxs1DMdbahAjcHFT2ZNp
4Ah0PrMZ4SDIisA+QH6XHKC6Tf19Lra7me4AkzzLQYx+sHXSKUVZwzxzwZtIN3aVkbrI9IXwJyvz
wq1cKjmbXxBA7fzs9uz3o34HXEz6JeFFh0peVwEtmCUaq5ta++TapvuTSegpckbDHV+Ak2+x4nSH
VWPqD8/NTuLn08m5kpKoOZtwRbpnsJe62I6y/lNJv9Lc0UqUm/F7NNHMVz9XpQtJ+lhUFwBiIFZS
YObxOOd9/klaYpTqtK8nYQ/ajPJW7FLwgvhu30ZG0EkvhlmrQ4dR+M4BXtICRLlGdH3FRYTSZbLv
JHiUh/umEdK04sucLnJtXjEjlbuSY/IJoC1W45V1kgcrBwKhqoHPt5lgl+QUdKISzXTVShw2M+xD
O7Ib4L4TdyY3JUSrtzcRbHvW1G53Q9IawHPOqS8yHpN6TNTyg0pIevBTMnpdfCLm/GS4FiOOq3wh
fSFmqWw7zd0Y8zQPU7+YW5abx8ncNJsXWdT7SCWf1KBM4rkN4ybCpQm8EMCTiS/m76fZiUZFNIQB
Y6Vm/f3QZ9LGV9lJmjMltUL/SZ0dH/OY7fLLhhzC4VI7XLUXrqAqNcquqqJYB6BZv+EOLbFKfAvM
97MZqti8CHbzdSxNYMwk5KvYLETvK/rgX9ddR6JPmFCfYQtStWudvIpPaoCUgV7ZXGePNqy/WYJR
4kKaSfKiCUSiWyurh+RFYr6H3maOWad7t46eRQUTg6l8rWM8TceGFOuQFIiakUSNWHacSDIFMh0k
hVs1m9wblx1I+LPH+JlFerKE9bhcS1qoz07N5F1iR766lG6b3bJoceir/CXHTliue5R9SmLEp5+y
VcW5dfEbghbdOQViXJgVunnqo15zb/8ctU1UEOyyKB/NM45HZNYfc+6AEfvO/MpS/XsQKNmdbEvJ
ZMBD4MOHFug3I4VzCAzVf3XJdsEfCYXx2fgf0cR9sflori+oW4C9tOmI7ETA90sy06MpkR/xycRJ
Zl999RVpd3kt/1tRbb4tP5bw+KPYpzZzPB7DP6dPT6j+G8QuQ4ui6Hmc2Z2MqhRzM8+azfFFccxa
Kg7DD0bRNoCRRSoyHbu793tv0WBsX1F7m8SowNBwUTYV6IvsAClRueijwuGgf11+P5zIST19eq9X
4sCxX46yPYM+uJ376SHT/xoOQbUEJKpaHApLdMciUDomRQSAz5FGy8P3on+Znwz7B4zjR+LZMZsp
KC3oGB2H/6PC78rbEkJhMc3V7uq60bcJrwIaoun8jzjGrkR8fADpFp0fUjqzX+hqtMCAet49c+pa
rpu9bGgax2bgqqIgxvfNiORFta1QC2eOlGlrt0XHmStzqiA62qlZ+ca+4FGZVuBZViPCPI1w++13
2eJhwRxB/uFDOLbj46/iJYEvMeLf7CYAEcB2yRrAuP2SgiKma8D3tFofzUDhOgBnQIcxGsRQJFQE
OeNFuSmKLcIZyOrZCS2dDIlUFucHMc2Ar4DkwNQKO2eIWh4rt0DJrFfgTbQzTM8Ki6yBkkGb5SKx
AaQ2f1cU1uNoc8lg7TzwDx+a6sGsLAYxo1LZPNBIA0g9eiludsuiMaSdpwOOm9XtXO2xaEVnM4t0
cG0ke8OUKPdy8zhGLwhcx6/xFZJrFYCaWqBKTAg3DwqrsiwZZgAgWe8uyBXXxgrK+6lFI4rwvm6a
bT15+tSQxIvd4qZoxpvq6im9s8fL4iN/fIrgFU9Pv/gn/oLIgnvctZpgLoMc72CnmCn5hsbgzyBI
P2eeqfQre1uDU2/et8fZuehtVstjQgyQwMdN5WGz9c32VebgMJv+jRFCFHsAUsITm3TZU8YlmAn0
Pgic982Y4MxGpZMCiYMTIrgtU/mA4HKY/pMMzHlYQaV/T7UZjUTrC9tbjquxoOc8FuI8IW/xHEgm
7LiJXgr4CT1JIMi4bjRTscDUvNPMi6WKk4pSMaiqjh+FNQOgLSaehb/z01DawK/HlzOkODVZVL0y
NqIjcMARPFVmBZl8kl8aab/9ptvVygHIzzv0LMYpDTF7a7kKpU/FhcMbPeP5S4cULAEZ6DaLEHEG
PDOW5XI9eI8qxcyWBmlSxoo5JiFvzJXEJP8haEa0wAT9YRZ+82AuE3q+au7HKrw770GQAz7gdxMa
Nw4d5TPTvrIYc4DMPujOFMP/9x+gZzr69tW32es377O3X79699KlZPcvxr7Y2a4ri9aO+OmYtpBK
W4UhUUQJo7ougIvhvsOOXC0be78u7kzh5IKk0e+4Da/LezXbT5ut6rbz1WT602xBB5I0oHVuRmpq
Fk0gN8MIxw/v3IVCQrQQRuhKkd4KEFGNkEov7bw2zAhjuzpvR8NemDIBImAIRsMzeWUKj7LO6BmU
Xl3zUAMWh1yXDa+/IndziU5g1BY24QPbRXw5cUGBSdxjaK6KNWqxrRovcXZ1FjSG5CD/CtIuB5Ej
mUadwFRoXJjxO8zuvXv53oU+TiWgEpXQbY3ppGhhXJCHsGTBlWQO/uh5yIckQvRcw2hwYrKIQ4Dq
8FljNy/Mnsp6a5z4pA3RkEv3ukMRFdJsmPGVe4QlT3SS8DiKC4W9WaejOLjb9CZfTvY3FERQWkg/
B/vQggGtcSE6YUIDIIp0hKicAofTI2FY8VGwvkQW5G9W1gwAJJdJKwipSIhkYf/6u8SawZFmJw4/
f4L6PkXzW50WWueVjPSkzHAaDLEvJ6Q/HGWJxAECmGQzWmgvq2HGgMxoLbXek1061wiNUQ0g5rLV
mffuQJsXn8BCebnBWmKNhXCqXIspvxWbsNSmgySX0X5LLmEXTa/b9kNkBSCtBQaFPC46wkhtInnt
AKeC9zgBzMAiiTKnEIWIJpCUNeSzh8LcgSTMkEU2Y447oJwvx1yX9TLvn/WHgNiSSEcooh0Yi45P
k4BIaGaZlOedc3CAZQx/0LaMRhKGadrUy3olc4qsBcc5chVHkYBAf3h5QfZ+Nn7GQHiyk6xdSUbi
6sse3dY0goEfwt/HV6rPGDyeLdWBCu7WTYKEADPC2sPOOE8fYjiVORpxYdGL0K3fVJ3XVpTjI/Y4
Ly/D/Tc3DO2trOJdZwgC4Aeaxr4/mhokcQMek6DIuvq02NPqkTO5WNziy5q+i4RBZBHrm3Kb968A
jxKn41wD0bfFqv2fQMh3Zu0qvU6fdotOP1Jj0i9YkpGi/dDQvQpdiPeuh6LyM7gm8MYAZyIuDFN4
I9XPZ33O0nGO+h5ycACbq03e4RWm3HY2uj9q4zPThnm++6OgnigT4qrSItS0pUZZUN+GUkT1VcvQ
hC1opiBcTp8NelhPaAdB+ROzu9wsNBcLZZcUblRgnTyEWUK3a0fIwPDBtlHaUWdChSBw8jsK75By
yrT10CdQzY5YLPjivNUDDuXw2E+dFE/+PcnBCo2qJetFUsKhpZSCSzFWgX4lb8ktasEAaVgx8p9d
X572iMMi6b/aguwZI731dkAR/lO6V0b72hJvgpizfrmgiGSX503srjsW+41P+jH4vYYEmUVto+q5
SBgraG2TmKxinREU63wFto/QvZey2daFNRrIeCCWXocPSmzL6iEBPx9IhY6Ch9ymehxREyZ/OGo+
cSuyEW9nVEDJepiPFwU+llGof8g/h3ExpvF3GBBF1q8RhdNfZt5NRZJhaQ4mEEJqFTxfXZFVxO4L
Y53CKfN9nzRonCckuAXyxIe/CWYiVH26NtDpMTHa3yiR+FKYP5tOv7KYp1pvYpoFd0yxG8r2g5kq
c4pnAFxFa/qSI0Tkog3YBJ5gLGFZOKYvPZ9dBQuUW1cfU2PY5uaW3DEvE3IK3t5zdEukLaLMd2Do
INWsJKzB4AoVXczxKUFlYDnosJInoMyBQZ854sDQkvXcjAOiuIMGgIzfrAHPeI45wcEK+TSI2m/J
sOrWpFVoR2fA3p5gWvUYKBpj80YEHoStdyedp1THROuhU+pksym3t7sGnzDK3AS+rIDTBCtfQHbj
mnFCFN6VXLbg+vlH4WSYHWene84CvCL5MbX3Vea7FtbDZJwfv2XfGw58t6Xn2FvDVuqg18efj3AA
ai0pwKztTpOeLXWrOdMov2iA6cNvoHmGN4sSQQXYvOneBV/C8O6zOg8MDcRj8pmw4X5JGAbnxXDx
wMpL/R5zkihIVO3AMjCx9EFjtJq+YHzMnrphmpcqNUJiV3KO8CeZjrBKoiAovNV8UYulPzzTuvnF
JXZxAyQwQHOvBDz/hZ9uqLzkyknjCv2kZhkD77Wk4YHJyW/B/DbiPn/w9I7Ap74haJ7dFq3UlOsN
LONizZYi4/fmPy8Mm/RdGFTVCX2oF20G7VhO60DxUQFTBSK74lj890Kx8uJl0J6x2dm7tLNfeQud
KMzc7tuSzsuKyYdIW+TlrfstR7FtTaODyNnMW6+QuuIoXLZRH8zAWT/UwKYiPrOGduHpMByGjNjl
VTps7DSAtisg4mJqgPLbU8HmUFGQEnavGCPMyt06JPxVWmwbjYR0pLeY0K2whNgND1oBCSDRAbsu
SQ9H7dq8gV63pqT7yWVTsGeJ3NrYz3B+2SCYkLt5mGwFxw3PCPsctQhT0E9ZX3tqHxsN6xMUMLnv
6h3C3OBpcBmR6uv5EoL5wB1S+bmzTmkBgkiELZzMn8hx1lOPLbVcadcKck3P52LFgliK7/VhINp2
z0sn4XUf5q+jr6dqPNz/lP9VE0EYGTregnGMf4SAcxRkyz9SECyDJ/m0xYW9OpeF9/4OGXb5creS
wGWK4hUPLDRCzOXi8b1zWHVrdGVLdBbBGVFNCOidbB8m+FBPPrgIq+pm7MHLfEjl+gR+sqGTNLcB
5+DV6Bp5/fUPL/PxeDz88CEdvpvWenq04IzG6qHFBBM/4AGLokd5H5ViGUoU+BvvdH0VbLO4WMdc
q/NyI2xATogU0B+XyzoUMhJ8sMPwpaGo2xWrN1Vsir5ICb1GIEPLa16ivb+vf+2PYql7GGKgOlz5
tlBnAk7RsdH07LRHUuufNZ8vSVOSMqICJIdE2MuZ09Fz2jiXvk3M+UjAnILFENlqbo5dClLSMwm+
pYeEdGdApwWdkT3a8U58+IC9fviQ/YNt6cMHGYL5mkKH4UscCCjD1uAWLMMwX1h4QwIU0E+H15Tj
XTgcn18cgYqrdxc1vCprth1q0duN8w7BFKqCLja/RTQxM8yf4YRbu9BzpE6It6hCY3CHEWvhwwdv
GyCGwLBV4ggvGjyQSlcQWjqnzfBIWaSRZPi5KyPsFqTIlMy/DCGg5qRQKdxLgGTIJ4SyiBPXixla
UX6kOAGz5x/Lza6GtGeIEGgXJICHgx+BnK43xxYcwkVwwIJSg231yY3PusMTriKAexqG78MHaenD
hxGsLJBr+khn98MHPw1LhZuKz6JZd8S8pv7RpdnsC3xelZcFuUJvLv299ocmx3ECbBEhPKADOuhx
pS3zM7SSKwBgeeXTdJ+9ObWdfCwxtwM6M3h+Bil4cA4YVcUo5QExPnfF/KYqLv+gMsuYEjDCaZaH
BG3UznE4ejD0mwryi6lhtD9rhGGGhc5kPAeC0Cvs7RkZXlomwfB+Ab/keg+NZHZCbkQJ2HTQKAhb
2ynCUeYbe9XSrgPLYtW6Cj5oK3UZAx8czJf6y8C8P8xO5xLwnpI2xSnQ/29JcYksdQWmiAI5MSi5
TBAqJgGK2nyNCDOOfiL4pllQ0MkVCIFjWfaLommYiyQrCwZwhS+4oB2apwaYXlTsNRkZWsr1dqeg
bVmwCEFUnU1c0IcRSAcJtBEHloIvTMFe8zUOw3bPeMJ+BlfUsHbFgFEEDC1fiV7j2E/r2LKLzfIh
TUNCVflsjpixyqRhmYMxP0aGxO1WEOLuK9ATNdvOQreaJaXAPeyWR+1GceB2noG63zJAuib3FbOV
bcB2ZKWaZn1+OPvp+0sqvR/rYrfccOPfFpftuTS9dRcCPMrQ7z5WFx0IGJhY/nBE6WAGL1TldgNH
EIGm4FTCS48fSed7vVk/d3FLWu0OVtOVeWNIk84GUCAM6mDybJ0jhSYzKoi9xVKnmV5qrN1qlzYU
tGvDg5MvYkQj+YW1WM7WKppIW3pEgjI9nfRaT67IGNwe+Pm431v1hioIvy0X4WoMm1bVqVh5m3PP
r8DAB6k98KfMn4wMhYo9m2DNW77E86Fanng5vDDBJXB4u4vAon5RXEJ8F5whzAjXhpt1lOWrOT4N
SsxIzcTe5wSIgkdh9wmLcglVMz6fHpEtcQt0hg7xYLKZeyKCxcQq9+X3kZLWh+lOyFu514LmZwqc
dLg8EUAxxEDelhDOCJzNBcKEw/FzAV2SrUoB3IGGGNC5OnqO5y9ppgK7WjimSweyBv4OGiiNAJoZ
ZJXhnwX5OWgG8agbRHmj8gISYfUUnehs5GfmcFnj2fjJsNScoovst9XJQspxIxjYM6+mQppVN2ia
vdtdaKfSkaBcY+WRD5yp70rKbW+eXZujUFTHK0NWVhk7AFmGB5kxDJUHPLQqo1Lar8ONzOfYfbuw
z6jrgUCYLsaPmSsxQ9cLpg0CJehtXc5mjYvd1R90vkDwMm1qzhTtRyYTajmeHHjDWh4w6B2LK7oa
3fQj1vntO0skfCgKwm6aOa/U1C2aduBbAys8iW9HxGtfYlAsTIvuMepIRUtQKPV8IsvPXt2xxvlA
9fvhwNqqK3cqQLPZLZREkp5GOeNTMOMTxK2CliXQqB/E1AVI/gLgfFvWtxDpmx/eHROd6oH4ezRi
uog+iF+/3AUvMebssC4Xc8hZ/WBDsNIOpxRdhY5TP/AYJwgDCvLEUslLcCafVHJ/Y8ax76CX2SVX
Ffcf51GqOizMCmN5cdxlUf+6JhCbqGweLFrHbgL2C4F04eIMhwk0Fs71TjlxPJVwuIIBLyfrHaAS
+1BHTpTy2MJgo7mrGAWn3aNY+xJz9WCHU+b9AGBMg0ddBpDFHDgiQcgUG8/QP/mnCmtGvpNRDONT
KTwiQcFMaIGf1C0bvJVpjmRhlAszhn7GeaChF5+R9N28YguZBVefaokouoBoSUIqiAjomDsG0mFf
ekF6y6LeAhI6PBLO/zJKyd1iLvXg4Ghv6QTPrHOcLh4ppEryorNOQ6D5cHMj15JwYpfmMYBTDXf8
Usczwhy0bKLG1TpuG0ijuCZJLA3fKWdSQ8oSIaoCFfR73xMXL9ST6iubZJ2IvbjtKkbFr+eFU5q3
wnPXvTbLg1LBFYdRhv67vQjP+SkXcManhGcs93EouxR5z3pCEYqpLtVciwdshywSZvNrDUNM5+Nz
P1n+O5XZ0XOX9eWYUKxOC9NYSvMHwazUL13expZFTElckcuuS5kcBnulPHhVguWWfG4JL11bKfip
wzXXq5JY5sDMKMW9XH17vHTDqXiuuQfdT3fl4H1317PdDqAcxt3F1QwH2WbDCHlys49YU+BLFNSR
SzoWlUQfnh6rUxbX7LhHYgad2LwYX43Nd+/EwWVdsC1vrrwARZGCTvEkRoEOTbxi2GOI5mWfEcae
4+gSnZwyTMnhnp6hCx6x/CKrTJiirIs7/YrJa6Kbs0Wyr/wfdEvDMPBBG8nVZ7UVCzRkAiGpxFJr
2TBpJcvNuGtOWmVYMUigPOwkmZZxs7Tztr5KJoB0+peQ0OHPh1FNAkZS1yDmy4LMkghPY/7rbgjx
QXBLwjvSXCQ4QkPAEJpwKj8zK+T64TGd6XmEPqPnfnEj7zXATgHyLCGnZfeT7J7hFaMZK3aJJiRz
048F/NKqSZBB4r+Y04gVIuhtJVePfTnWWcGgbkUzqEFNV3dkMlSSaJNRCjbtrlVrRcwlxz+xKw4N
J4y6A2a2jU+2Xo977QS4V6Nspjhg4CnRmou/tTTGirdXb1pMA7g0M9qGProiPmGuHrEk2ecBQ0e9
AKpYZOKTlbsWDRH2p//ZacCMxyo73VI0nv4BTcq+lCOLwemQqUnqStsy+Fog7GbFuJvJgnZ4Dqcz
XRDQuqG1lQUAdfCwA3N3B8N2qwqCtvUSFyKBTmD1u23PqTf6GEg5zIIQwuMmkJUJwTplAia20WW5
T1EQL1Ii3r35AoMdphjzkEPv+PhYSVypbEa2v8QxAFUStZVe5wgrOVZu2othlQkI87zZrZeOyZBX
I6762RREfrXmQgYnjIgLMV+kELBlhu3tAI8x2D6MURY4PrbwwGfwBdyK8wFJFIAmiGk3zGKrhH38
VMaP7Ft4PvyLNZIHAt/AkffqDdufa2xJ0MDhj448zOZ2y3Gz+K6qWyQkEj0Xm2lxQO6N408BEo9r
whRRf4XP8cpJMPQxLGAHiGV8ONoOpsDOvdk0vCg8++ZOG48siG/CnMtHxduAkQLt1deXVsEqenhR
0oDB9KOldX5DXjNq4dpBjD0c4hSIcWrMMRC2bUaiyS0nv9JsvNoDfAL8A/kDo856IEPsPu6DapcE
B4KpdxzeIrpRG2IDHt3mF7bFaZBJSIAEPbL7x7eUBUnFoV0imasLR7V01iSbnAicKVB+wMI97fzh
J/fkgaI23xnBLRQmTYB8tujQzRt0NdstcLat4wTPMK1agPvO5rDj22JxPV+Xi5pDYRqIh6KIXwSF
4nEgDKuNuCg5wy26BUBgEqpcEOv4mDIPW4/oljbML5ReSNxI0ApqUaiguVoJUkDlzJmAkOUNSF9O
f0KiqUovxF0XbT1fb1bL2jsI5B5jzwwss1v9msWeVfERvLcp9hqyKpSLHWDLKu+brykVKYCCSvpg
22hJ7YDS6vYC0YrLG3KqYYzcY6h7LPY98B7nqvwr5Hsy3x6jw/NSjXa1iZN+mS0193q3lZMc2HSO
dfs2JetcQlGe4ql3yNVoIOOKvh+81WUQ3K51T+WcrfCzju5NbxnlSr7dAmIwLxLNh6KAxa/Rjhcc
U3V2dcxtwusuhUjlXo6zYhxVdNZ2hRWkDhja95FZ4PGwbc0bPWwwn7WftvQM49ocqwzUNfnxe5NG
I6C9Z8uRO//YFjrhr/Bh4HmXVXiQg3h5YNuMvGO6AS6G8U94Y2YzIosJT/dp4suWsvDEJ8vDD61B
+37OldBW53KB9BJOoSpTCEdY7tN9RRHxdVGst6vdlVltcpqMQtSBEBSVoQpQtKUMdQQWw1QfxNzO
DMGY8fW0OWPzvvjdu7wopp28r25ef6jyxPrzHdPQJa5A7KA6Wo9+6+so8gCYjCH1COKTsiAAuKdF
G8NnepLMogY0UHv2UiP99cZBZ0XhAabxPX4BIBqJ5vH0ACHRq+AhLyj3GA5/UD7LJCePVPUunC/V
jNp+TSCd6xO16+1hFF9PlEialNb1Kuu2WxcsaEfVweTJhu/SJXqeoIiBN22QCBqsykItIPP9rCOD
DpfLdae/xYgeWngiq1pA7FSOru4BsxX16Ijcx5F7A0QoSS7poilQeiqB9SsXGXoL1vQaM+2UtOiF
bVDiL+4KDuRC54QGcnAQUGex2txF1bF7ezcFmArv9cw5NgjECX7v30b6zll0FUnr9ACw+aK8KxP7
eEXJPKjx8QwikwqN9/i3I1MdQfwjsaqCbVrIQC0/SRzhjhNQogdFiEsB2+B4H3CfLh1GqWLLKNVw
PgcRAPIELIOGLh74cUWGJsRygMXHLErIzOqMP7C76MWwfUjh6tml346XZYU27iFnKdGPQbXZNF46
Gq/jutgCulz/aYv3ru0kyOS1Vcm7QpuaFlJom+M8WNEpw4yLfDIllleFBZoThSJaeMbtDeCz7SWB
w7Ni3+XkawcuhlQi9wVWdyGS14Dm2nbPvHmIwV4/2+q9NKsShAzaUDEAbVecPVElDhCGk+vHCoZs
ge+2ITms5KTV7q63sBYJHHg8AuqIcvKvpIc4/ZY+VSWn+uT66ZNX3DdGtqjs2TsrJ+Vnp+dtqlFb
HvkJ+YPjB/uTtgMuyWW8fFSpJRWjhF1A7T8HeSUylCC8XQCiAUnGSBLQTnzg2HiCaoGAM/W6xARl
3qgg1tCZRNI4MPfnkPVtLP1Ez57uIsXQyfvreSfzw0jqUXxNPRhDkgdDWUrJJHVmmaSrIPQ1Rqjh
1as9ecvwzGMMijFPozyKQJEx26xug52NfH3Mw938YYRYQ+DmYu9Ym/katnS7RQsNP8ji8lhUGO5S
FaA9hc7NcVtuijoTyFfVBrq2HS9LsxUfi8oDwJlfgdiMeRjm5kVSE9fO21IVBoJKa+XtXbOCgMKH
oeZT8ZIBVM51bb5XvqJKke42MdSZ6/UX9kdHJXg0zKrKPZi+26K6KnIM/AHBdRjnMsOU1ZxjKq1N
J19EubypQaUvc6qkKONNg8pOiINMhyl3CnxgEFyhCHd8GsQzyE+fTDWQkTeWYDFcY60VUouXCqBu
Xxfl11mus9aMlgfRwwT2VVviy/hwpJGgWkwpj4O28uwbemslBebY5uxtCXPpZvh7IdshWc8RGUde
c9FzBnlW1OrbwID22HdnrbKFuyDX4iCYNguV76lEpbzsoMn9IR+pPcK2xIbMuLATYuVfN3ELwzpQ
IQsDlmzNtyBwmj/PzuCL8/M0b2DEpJty62LCPbRHmw+tjVMIsAX0yFXyysTPZyfn7UyEnaIaTJ5I
EMor+kiBViAsUZEiAathjM1j21zWuolyWR9iTRfiABJSKWkGU1o/ABGwAZ69mHEXDTJkgy0vH8gC
TDcJP3sMRg2xUAAx4G01+eB7QBb12YRi4DbVsqhm1Cq1p8bgCymOGZ9tqhnBZOO7aLM6CJgWe1FP
ehFb3OWHaLV/zMtyLx0ibEuNsa4ViWdSKHjTAa7dNmeZfk8j2SniJ8qjsGa/GR4ogggVNCdZVfb7
pikL2bClJFtHMu5P8O3B2p3HTba+aEcJ3TzD5YSq7Yy13rn5F6EsMNArjMRCYl9kfZFcufk+mT18
tQDj/kwV382gQd6KirMGlW4Na+RVCFUIVj6g4Q8f+dzbMZo7M9+tRO9JjQWAOilcesotHwzhPKAx
kgzMJcqilkdCU2y2mRBGSfPqV5BKj5NEkXgxm/EpMrunwnwAfHwHHpB31UbZ7eMzYW1Ghx2mFtcj
l4Fh3960B8M6A6GnNpA7AdfjIMLPqEgshvJfj3mNEKB/0dxPua78fVhtS0PlwwhfIG4ren0cI5TG
nTW0n48l58I5O++ICB5fz+uZpCGetPkI7Yv1TW+yD6yKh2+9yaQzsvGR/HhpzlyTqI8yZolmcXNc
ap1HUh+FGA3ONVHcl+RRqMeCJlnQWyD0iqjCzKFONMBhg8r6iE0w/PgSwbZZIVmPe20anbNL8mJE
zol3UJLZe3uQ4OxkE2gV8nKUte2DpYhjfnEm3aqbpD8WcIKt6q5OzZi0l/PzayY8MDyrLjIcpjQr
KCIFrsGRDjDOELcfhfmR0BEs+3g6+/hscylUQzK5anWcq+1EtNYzKB6Gu9eJ2cchZq3CYlIlqa58
q3YS6ZYHcICu6RAsKBYszAagPmtst/Y4NVd+aLP5slM8+h+Jx6lOE46njdxio/yKUYM6rBEd+bL+
5Nf1r2vwjaoBFYngmAijIh8OoQD9KkPZHyBIgGgruxSzeC2sOzulKhzZl0HyuV/abyYhgFNZi/S8
qbwJhnJwx2bEnEp9NaWsTDPrrW3GWIKWjHbb5cotHdimMl6UjY7NoaSMUSZGTNzrIeBBPmnINHjf
zGb9BP451kiybem24Nu+VlLAlYcvc1/LZtGOhgeAwgT1FXl412y2rxrYjOSz6Bna2h/Ax+0V2jn8
vbJOPhDEckvP0JwCVgZYcuCtSQKnTdYjlYkTk7j+/3XofFZfSRKcBAy0xB8+4CQ+fBhnaUia/ivD
+xbzpWFKIcqSQOtIQw0mqHXhfL+e2nm0tQW1VwUrrM1IwFmdAu/RqQ93u0AUCPPHODps7TeAhwR+
Yr57pMXzoaAm4I/nJQ6asodECvnOoBnfh2BkLVdOl0IeFyoKMcW/WuY0EN6J60zF4MTxZC3uDFia
hmUK8QdkAvwimt9xBXWOKt8rmDLGenHRhwQHdcQnKkAOL1qKqsAZSpBCFxtZS1hk3Uv6yVgYcjhZ
p9BePKtHuM5MU1402IhspmlEPvoF7EjcoIKUXUuM9l7GMYQuMPPs/NGAzkEjToCQoiqyCn27oqiq
kGCTMSFod5LM46jyT0j/200qzAQv816oDPBPIakJW5tbrIwaDWcc8TZvUsgx7J7rAc3tTXGogL5S
Dg6A/McpzVRJt6CCCuKFJ3taQ3PghA9QemEfFBQyzyMAN22PtuqKmbKsvDWxJYg4BklwfI2/dwU6
AK+8kL12VLqAP4f1QMI4wyWa3RQPI8zr7gUZ+8vnt9CFPCO/MqUNtc3+YGgtOmDIAuX7J53AcS0h
0uK6DzulTJu3D27+ai1TAd0hulyQmsA/kwl+zq/fahWRrdHbUlWC55ncC5BpvXlMXe2kwg8a3GuW
0ekRqgJ19fmnpmZMI9Ksnf/uh2M+srZ4h3sL6gUXtxwaZ2yKLlb29SJFiGkR4YDBExqwjUrTGtxP
gTxzwLjr4i4LHTXVAQnoYFeyV48Q9aJMsvKipPZYaDCSWDl5MrtkNvc4k6LjuHRrCSob8cuwUuIR
dAFRZOJAbkjILoBlPcrC4VnfBozHvNtUN9Y5ow/D6JtmYXfroJ15zXk/i+U4PMNRH51H1J865u+w
efjGTuxNq/Ww1idqxTqdAoS7cn+NUUVs+M5wzAfEz9rNf6Sk/BgLncf5JMU3CyowSQPeRliahGKu
6czIAw/LE2GPPg5mul2hd37LPvX0ycihkAy5xUyB8CRMNinAS/CBeGr4BCgNMZaSF1DpY6k5Jn7I
Wg/AMbjdmIdwC3orzCzrwqCdhCPIL7vbC+AfLhVngf6kuy14e0FD6pcc3r71w9CFj7E7T+jzTdbx
vv3Ze4VYh2J/jJaL4Aux7ynuLIXZkM9AH77vj+Lvba4gLoAd2h6xteQ1FnQiUPxuyW0SPSZleK2n
XeYxtlYWBEBCzmw7BsIOsESm3/G3L7/7+qfv3597Iih0aDsZsvouJV1wxlcnYSi5j5y30V2EaB6k
PhtvHwa1YV8r5+p+pF4OiZUvaxSAgyB58zStFA6GFwdFbL28PLokapDuKpAclrNZ37sCXnv6z7Gq
IvukRLNI4+SJbW4WlEPETcMJcic9S9DVID5xs0q2/dm0+xLFYM3+tHpBlEQAw5WLftT8Xc3v4KNr
ewggzUxVwCyUvliY/nsmRQatBDlRc2arzWbisTLEDHY0LzNz1TWcUfnTXV5XJLpFpGeXyZ+5VZ0c
q2r+RWivY8r1zEu9utpU5mzf4uUEd1KEL1lzijePNbMZKchrw5DGsu4dMea4imcQncJ0epJJOuQh
BTHT/aXcprV0Z9pABo45E4VQY4rsFtfo57nJbg3TdgvwpY6u9gFbk4r2e0fs7YF9iLqvpquf8vaY
yFkyT1E9c5j/PZHSHMCoeZTm66siP3HOubOA7kdtnVmUUbCPxzIgQY0R5Fgg+EEjHAMG/IfeBMtJ
QJlcgaGBH29kA4NCCbH5DOrBsOB3X3Onlmk2J7AcWq4RhaSN/Eki2my8wmHV8mq9Ya85XdcO3BoF
7JJ/NY1XGm4SXBravuz32fPoiuBPPevqM1tu1gp3IoKVpkIkOLBT0awmIEX8jB6oCDtEMzBN/Roz
UqtyUeiT1TJfdyCGyQGETkl2ETtZwmAKyYWWjsWaJDpvO9+2zKsAz48IsRsZGuH+g1u6p0tU8pNa
OWk2ZTRU+/OZN4lgbaCEeNbrYsEawlPgpvOZHoUrKHto95MZg44dJADPmbeelj0wkui2BARBlLyA
vOGlphCiHcH6hvIsJ07oeQJXIXCFQLo4JS3JzRDghSGhD+jc/nNBg2W/9y0b6o94EciavsZcYRL1
TSux3qCpVR9y6EzXVqslNzGY+ATKLj0JHP4yYiH4L5gH4A9qU2hHKnGt074QFaIigCclzoazvfQC
NB0hkA5NpwydxXl0orwOhousK+GuxTomrNcperrG+dNYlBYLMxQ8H8MWZ2qkuzhXFLnT0wxpCMC2
Ul3VZVpdmyAd7NpYnncUrslGQOS/oxxd3giuR9OFO3UOwf4BPEUl0fFw9NKStvfutbvK/qZ9bdlj
xAL1l5fc3O2vv3YS2KCu5BvD/Z8cQJph0UXr3z3odgVbYnv2N6lfLfoQZkdPlkbXTX/Swy6tX/7Y
R9TjT5ncktrB/dcKbIczP5H4LQQQ9xm17qtViyM401wkkYSyAFj3lL9SaLMzSfJDZ3mV3ydYFbj7
WmTxdEQLea4s9n3vgMBWa2v3z8mRaY4SATSFhckCymZY1uXmNqNne3PppCbt3oVqwzUHGykwSWul
Fm4LQpoAnOWqsC2qRtxK1htSyBY1PF23nFsCuPhttbmYX6weUpYQDyUWPclqm9wgifwFmvbazxjg
TAufOObxQE9fzXpOp9nJRJzK/dg7xaCDUis1+GHsvapbPsVngXQrj2l2lKnEunv6eDYh4P/QN+Mx
XfAfi1XgdEm+MvCM0RW9B80QZQGzXti8VaA6UBJ20km7vIzdm6HeMOZI+VbDr06dE/lFR1TAKyIG
aOJ+7SlF10j4DTWZS3UZMc+p3HvvDnPz3uz6nO1UKXijNNbO7e5iY0TUVwChVO226jF06tR2Je4R
QuFbMCYKsSDULIIPEDzQHD08L81ZuMnKWwTRjxIjHDksAdLaIwANGw0iv5VeykeQYV7N7j4jxw8j
7mLBAf45mGSYRp2UKQM6/uZLSmnN30oP5nv2mQFk8VHvP+xboCDBCQNEg8EC4ofcezsOZBWUMdIh
g/iHy8NC51ufzGzgwVkEWVhVJtj+bn2zBp0G6Sa8J2/N+inO7o1X7K//2y//0+9+97vtA8BgCwTC
X//3X/6ff//d7+D44d+Urdvs4gr91DjlL/yMmzwzrw++jjPZaVBazqxfW4+KcernVXkhxfgbcWLh
b+sH8y5varBQGHnSFCjBH3sjP28fqDkZsmvs9tasjPfbmL6UImV9V66fP6MyMDz54d3s1bvvX//L
CD98++otfXj78o889E091l3NL5hUGa7ilolWbb4c0YUGKa5elhX8AxgS8O+qXN+MIL6EwPJg2w2T
DfmeIM4Is6N8leXPRycKRfB2vp2Bnw5iQ6LNBuSa2BCBBQCRVhUa9txjrdrBdMNbCwZtliCnq+yB
F9rwBPHq9/EIAwpEjh6bGhZUrC39upmBb2eQzTbwp6Kl0zVD/xVc9SkWDBxbdC1TQv/ZlnDdXIsi
wg5GFzw8Fqkcy683zSs59sWSrxgm5oQa2kmTD+edymi+bjCxhDmMSCDH6MpgmElMvGRK4l2824EH
GRlzdUwqLzI2YnP8xXO6qjacGDiN1YC/U+gLwjWsgjTOf9/pX1XbA6dvSsL0r6orO/2rvdOn/GkQ
pJW2yPHtTZzIsTmPt5TBwDUEq9HRkrn+h7YEtztsyQWIEGGFJvJhqiNDebo74kSFm7q8/xHAVIiq
jeHzN3Mb9Ife0dfgeUu3a1eD9IMHAGjpYnoSHBHhquFa1NflVvTgFLYP1VFKxRZ8a7r/G2bHvNBy
CwK4ibbcj8Sin0A0ZRvOAh5FiB2NxhO8gotdBcABq5YMmjt0eTRnCgZXLnP4xy33lfyKIzY/47+e
VtL0ECMC3FtXKQR0yGEdNfA3HmpAW74xy5uGGwkKtXrDJG6JeXdwR0foQn8/HGVILbybckhNPJq6
sksUX8yXqdOrSAinb4aMYvXD7cXGSOgQJHAzTiUwbR2NdCSm7qYigcK9djfwcyOAbZvVMpG5dgt3
gEIcs2vDxeJAMFkSJ3Slk6UH1joiHg3w0NyXWixvYGba3thwRUbABmxWRlyenoYXi3KOB+sVYIvw
uubbTbluWNetZzGMCLX0F0BOtc2PR02Tws6G4eK3K4AQmcrmhgZyw02EXlaYSB13xbGKdTZ4OoBr
PV/dzUHXxcwZthrc6pWNMCESOENiZ1gE6o5ho6Bi6GK4WhaOxrYVIzMuFjXz2K0psR5ARAWuNEYw
LWxr5mfCqs4H4/FglA0/XRtmJrejNd8MH7kJ1EF0+lnC0DSLbVI+QxHHohJ5N2dHHT6ky9PM8hbm
HFGbZ8/OtSQA39m+fYro9S7vedw7Pyt+9/QaTDP7uEP/1GwwAPyy1/vu3Td0zqh15CrwXbFvHQjH
wXMn7+H3cN7wPaRmlAjOILcQ2wWMC+EFX84XiMwjzDz0Q9eNTi6wA4Yp1wlszIbPqb7FyR17SMs0
kle4Wn5WFfouTPiJZBVlfTB1bx9onUFthgrsCg1i80yykADVFRy6QW3BTgEaDVheOKdqGC/w7FW1
8Afyt+ofmXDkRAJ63+FOphJnQU0/OfyBuHitzSvENWnbY5sCrino0l67l9+/efPj41tftTTfMmlv
GRNsaCsrSt2MFd/oXO9jHrSVD21vhuTOjoZ0A37V1Ot/AP+a4mGbccAZJ+S9Kenai/utuX9AhyLg
CrgjrwhZC5wudMgR31QkDEKKVFp6/B70xRcFXd2SsLh00DKBlUBmO1vt1SUrODiJGyjoxFsmqC2e
qOlW3KSEpzV0vzSPzzH+UjPeOruhaBdVI2gV5PDqzc2+oIt5ZYSp+dryADjioD7ih2EjoLGnlN52
zXCGAl0O6Cg16HMeQkgNmGxxPzeF0Ganao2znwCssrgz1MqsyhXm7wV/OLDtpFny0l/YRBwzP4St
YuIGFWmLu6XmVVBmtS8RKRHSIlFbX0RZKk+tkG5Xe6kDd0G5AWKbgNv6VKwi9srvztgVzWOdejBS
1jTl+9i1WA+4wChFdHKvazZwBWer3+3y2wdglM36uLjdNg8ZTRwvhH14+95Vv57X163OsvBj3sL5
z2bFXy2NwBdZi9GnlikLNqx+ppX8ZOvrc6G+NLRX0UHtn47R+Ss/wL8au62fJSsc+BIKyC8QvnCh
YDzQvl6eddG6PFa125BHMVTmmasGVs2+Brxz9/v9y6q1hY9r/KtHNA40yL6QcfNKSEUFgtnjAZYZ
mK/ZYYIgK5CYYZKluRFOoLVBmmSJB8JjzhUxr+aq5uEAdYtyleP63r4lX2+Y3X6VnTpVZF6tOMbB
ruNh0np0XoVySTO+FBNvW1Xcbj666LfF9FT8mmZkKkq++VQLPZdWGAFqH9ksx2hd93dTFQWrbIzw
PfaWRvfjnmH+mxzxsCf9jENmQ3gUbUOGgaCGlq0vG4nGqNkxzcAUkwqelGKJ3ZE2IPfyrDekikGK
jYIIyAdmk5ebuzqlVEqeAD8S4towY/nnn3/JWzAEGKhFA7zAyRcnJ73DNFDm27pZjuvrnWFkxtUt
rHyw/Wl/Hm+7vb8OAa5pVyPdomHlUDVG10r5q9S1PB1KLdi8VpUW5/Owb98In8HmYVtM+7fLfzTE
ZHG9W9/Uhs2d/uOzz599+WWauF0X98vyqiCvEmiCVEeYBglcImO1fvRyJZ8ylvKhRbDKIcLBMplU
zwmzSRWnTIusmNfz0xZIY1cOi8UMiOnf/GbEBuzQvCtSwz808K0i0qaWW9phHiG25/6DPMrSwnma
kfoW7dNgVM2uzf8DJDttK+RIg04hZZV0PtLJpYRxAcNwPqguBsN9sdWn8bJd7BAVE/WnuT0uwzac
NVO8M1QSBjq25ykh+mKB3RaCqXPT2J7QbHOBVpu6yNWhBwmBDjtEGLZpRhFZEUwFbAIlDz4Qjjy5
RkSSy83K8FvsyGiEMO1CRSCNEBmDQZ7IZAAGfj2Z9ILpzSdPwX3hKZR52myezp/i1SnuQ6/B+/sO
xnhZlR+LqELwP68Cm333hdWpuoLlfVAdIVO7qji4nlTGW9KkeSFEPGTpD7jQO2b0nJY24oNu7iLU
RCXOJPl4dTo1iCSusjWZO3j4kTfPEWHVWBjoiwewQgUsTJ/akqZs3bChvjedgfw0ADk6nBgU0PW5
EIJtmKaSddIEhtELxHzGoBxPKkBkMheo43XbXkAoxZ0GnPPH5K/VAcINrebN3RnO4Tz1FqRxux6B
yYPScmOT65jPXrrQcVuyUBqb+T37LAtvLIzYzhoCTMzSUCm9o/mAj4BslowBqstP50H33m8QUePd
/niKwX5IVXuSh72WknWxHYyy2DzhXyFxOfGPeP9JLs3XkEBxC//Yfa/Do6Sum5cIgm8Pp64yHwPy
XReoAULOA5OQDKDQgHVuoUEwAFpFmhwQAU6y6CatQ5oI82cFvvykv4DOuMZgNBhmCt4VfZIJ2iHM
86wBXqHVGKoNvdPN0cKtHSQc8snjGSdwdnKe8IJ0TfB2tzbi25eoSUjrfFC+YNlRAr9p6jaUbDsc
eydawMxpTGkC0e0ZXjJsEBpcKwiqwKvbhmxdwnCOT9u9zD1azHfd/a3wiw53XO9qEgIW3JtyVk7a
s3fIWnqUtSOEgA9LK+FNHxygt3sbNYWGv2Eh2t8cOEXJh8cftsbXUvYPpEPM6yGURFJ6Af7Nie5I
xuDsG94Pjcfz9QMbxMkqJ3zd2GdGRIl6GDeiNbxAs+lW5mmxkWI26jCTk1MV41X91AfLEA0KcDJ2
TbCgWhCFstGSSQeoKXYFfjG4yYggslox3jwIZR63BMKHIYa0QOQUMD0FPQcG+GPeebOl6LUI+ZUy
Cu53jwkFyiB1BVNjq0VBrz3ZG4VyKQ+niqn0mUhi5m9PWQapLXRmDSh/rhvoYgwhjhQXD51pB2ag
IWtgVk9GcN6WzKMqQMSAFGM03LSTDunvYLgtKhWnkq+u2krwWHhUyVKU7TwRA4PzZHzZE1q4XmI2
3EsUjUYD4wTEkXdDlxoEQrEhcdtubT1ryIcDQbecFmqcilPzuh08TdB/V0gSdJliwOYEQ1RngT99
hufuM2/BHyWStDBO0f1PcUMos3PyjM2ymA4qM+hiDTk4BKyvWC82cFtDxD59v9fYEKILrYrQAQga
VtZLMCBiB05ryeIyOWx72koE8IRErq36SWorYQdzKdmoCKkvh35dmlpSwZzWhpUb1HOEquFbdJe3
SyUfhgf6jbW0qO0Pl/O6UXQ3cBpbXJer5eFHBou3CKx4IIkEsmdzOAmsvc8vVOLP3I8pzSHqlZNq
lBZBWxxmnSNA8qGDMHbTHEUEhTSib05Cv1VwdZYmIxJh0fPs/1L+uHF/0iwc/kPbxbLUsLX8jNuu
bL3bGuHA2iPpqAxZK4/rxtmBtxgOtobEawJw0P/0D2fgom6NG0KRyCmibmRegC+3YrcFAI5I3Xf0
rXdMDgY2rCEQYLupITPjAwsynHGRCYD5LovwtBCoQmpRirbWOw4NiNcChptSlEMK05dhSzq8IXHG
bap1HQilwhIY5JGvIGcnDYyu1qJuRnu4QR3OA7Wu926Mnlw1pWeBBlMZNfjK+2PDwmldBN4Y7+YP
O3WoZ1j2vO0kh6yI2aOp+Cd89/oH8OsqKn84f+vuVF1JTRIAjy1LlE7bpM4YIqCvaK2GSegxiQ6m
ImFSTXM88dhAbnI8rN0iBmi6OxyT4WdhaHcQBr56oDQDKwlQS3ko8wqiJxW0oBx9IR1QR3+QFI0V
1ywrQQUZARKRfR3e+hmHMCyaaIz4hyK3kTKPQlkSFcAnCUuP202SsAieXzIX4YrKbpnYR+sMy6Xt
8zpOS7QSl2xKfDKlOv5xM0NH04VN9EuF4tQJZvKpwNntA/zSUjeWfckGvci3w9bnZus75R/k918V
i2SDK2YW7lWO22GKx78PlpSrpsvu5dPaQgxOW6gXNqqd2e+tU34KXK81ZNnqLu67zlpq7+8xGP9+
2N1c26m0c9i7HOlj5B0lOxTltGBVBPaMhWQASao1U6mL6Btf5fqIVCx3WwnG4SHeHz6hcQKFF5ZW
nFJmd7sVZxkIDL4o11F0AiDHOAqJoC04G8ED11MJbJh3nTbMTg8C6nUMA+SxXeLwHm9gvL1xnFlK
7cRC0z+k9U+e6glkbl/LJPNNqXxaNwdHNLLbvfV2ejuMd9lNBmH1Z2Yd5pW8BMt5M/dlzWCCBMVP
dbC028RxSo7kFXGgxCV5h8Zi5d9fmMR1Dk4Q6LQuw23GSeUwm2G4OhAjHKwNSZEHrBLUDdYo2+Hs
faQNaXLcLo7LwfpPXb5BSph+5IJ6a0najbvBAWv5P/hRQ5MtWvZi+i+onkr4KOsLCPanFTrETus8
aqvNoqjrDOv397gfxd3icW7p9fHjxJ2FjUHFGe/34eYBrqo7bDZ2hATIemWNlHLi8lY3F9LOtD4R
/rF8LOWf0cabfY9CHfiATYOj4ruiUxEq0xolEVdRfEno3ccF9MCGHU6CJ4dE3GAdeuSS3sUu6OXl
L6/evU9pdCFrEjBvS8xpMkGR6KlpkK8nBehyrGFzDVzgUz7U40RrYHJYAUwPgZcwdhn6Y6YO8J45
x2gj4Qa4NAprle2926DCZAijH+ZkZzjmpx5pPkn5WX7xILEdhmj6Qgyu0nCMKaI2O34bIFo5dDlC
Ky+mux2EjkprKwrCMqGH55LxTOaBESxNADsZkMAgYtobAA5mWkvbcigTXiSKsqbraMq0j+ff2pdq
aC9wanhKypdwtBGeCXjZYG2DDX7rNOo1S9OEMRuFyXIjcNGhnXRcleuzPXTTlAkULh2ULW5YxtnS
wn48nviWdIOre7g4VpOZivaLFnTVvqKPWLFVcsncRheNU7GMSN2SUqaacgk1C3m7FspWTAbQAbYz
iCA/c+T76a4P3d2EgzuoqXOoA52BTycHeMHXyibj4rIIbxBNpZh8Bvy8XM+GPjYQ02U6BPUXvAlt
/I3tOZWbo1sI3EHVyB4CX+4/mI9rND8+lZajE+ven1ev/+vX3/89eiMsfDwbQ9evsoYkgi0VgoUK
DNhQNIUKGtyslnHwX7ez9EYpR208esc6vH7z8vX7VBPegewAauhU7coseowBE6AmySLN69nibtmh
vuR6mSQRI+hBWsJa3wFgFJa7ioUkx8tjvWXWB6GkL82NszdriOAC8gUAUA13R3E3sAymuZZYG7fM
tMMdp5jg32AVOplGU8C2pTEuVtZVZa8jBi8LZYqj6BmN4lCP21S87qiIScnrOEUR54donOcLlDYe
rWue+7rmPdksBhgmmj+phoPsie+PoZqom0QLWiWP1pwMeipqgQqTUf+4Z9RBbxSa4WzwnSA226JC
WROQIz0YGzSyg6AKH8jlxedrzMG9KiiZD1IHI+ChmMORY5v6WPyjsIk6kVfGQdhwaO9+tJpEbnGc
og+o4ATR9w9bkUNxIk8A29IcDmAs1zIJzOGK7Qx/G7JNWsPdQbgw8kKwaXzZ75BajEsTmPS3D9ub
qz0XFs/TQ3MNKArzxc38qrBuVKvNBmO0hWGAS9RTrvj8Auy2hpVeCgJr3UBMtL3187WNXR/r6Oa3
jsVCz6qMR4oesBh/ATxHA4dx3Wa3lSrB84A5SkhElY1BH8+cHZhCptgXUckM36FRoGIoVQzU1AyX
zgbQFjEt7bDkji9G0swvVhAwjV1YE9Fj2nNrQq1EGlP6PVIF1A+1ckKk7+giBWZcq1iKdUM1q06S
vguuEsY2kXWzn1zmWiCaQWcCjbV4kvGvYik9KB4Om/9kaiufnZzvad25ktXe1WLEEUtWMW8RYzLo
JU3JYMrnEVSOcwFWBCGb7iIBbPpOTdyJsLwjvKD2dvKVIyUhXuSe9oSANLFNtVuQVL9YGDkcilI/
hPZky783zfEPiHBg7uJ21zyF7ij3Ee+NJBiKDwfP3ZJzKzWgKyXwRQgkjpOG/CFeiBKYnfNmjrRH
3TAO5yRw/AViEA2jYFPzXKLcgMoC2UOrb4EANArnna9AWfOQ0FLLtk3ljPrjlgWhH4uln46B++DW
bZrcpT7N3ctFUceGWfIEtw3CXGKZa+y57nwNleYo+QgGHDcbWvcR1jI6goH+w1Yjomtfn4gYSMlO
9H5TCFd9KsUT2sjQ7wM7hsA5w1BP+31BveKmhlGURIubOsYDIJ2S89cSh4mFWyD+Za1MK+P+2PqF
HBa2a+feooaNO6E115FrseY/IPbeXyPp1O9EhZByX/7vFGiqqMEZFzvvpdSZLvBCLa45Kv1WBwPo
QOEb46kGT2zKa2Ko5x1Ykq6umzav/SPKbMtoiWaui8KyOuPx2BCKTVYublDmxdbD+SFBmsKnsWBI
Jfwt4Iez488n59BXPjBzWgxGGfy7SQViee1i3UkYg4LGfP51bIgNx3X9H5AhCzGZD2z2v5xnn+FA
Bi3Ddo0rHz5Tx9ugNEvkb+oneze1derPzg8Is65rddYtpgW3skfHn9AteG36ICIebwYl2gxbOJQU
bhlfg5FMkn0qe+kj3qFW7kQT23fv9sYa6vyuCyMIbW4zO/IlZKHZ1sVuuTkmKtASZ86uCRC4XY8J
EhzkrDTF4Gr2LjHXmHb2SU2QDs7hsGjKJgf5pMkb1hOazFgg+4J2YPYXsMXmB9mr9+1AWgGdOAFO
vftQwziVueYjmGu2YA+YbbZN3aYyWG52DRnqMWyFkmdTIl3C42N/RTZRjYLA+aKibDAE68Lx3wCq
iZoAag0EFF9wB2ZRDLnmPbeyf7n+iOyihIY219Vmd3XtxlJfF6tVmo0hXPHdhW2X8BV+JDf2H1/9
+FKHN34kMG7nS9tUGPfxUcnQdu3OBrROFNzqf20oBX7tdQBjgO/QGnNmz805BVSEO6NCcqAXkM6X
lBYa2kEE0N0auLrChzCWAnfzsgmsrQkTNjUe4czh/idt0HY0e63QqOVogJKfxCJyeiiQazphfbUT
Tw7H/HbIcDyeVUyri9slnMYxEdoKPMLwP+5SPyoHr7dTo9g3Hn/3ridGaS5WtaTDIM1MxeIfRv10
BbN4GHKUYEqpW9hGQrfEN60q59wfv37/Jz/gEJVzKAnSaDR37e9kY6UYuaTmfrNf/S3nXKVZjFEw
mrOPlO+LOl+zngZnMGJdTs2zLHwCDfafiaEVJRIJSSSD4xMoQNBAoXWc0EtT84e844CRjQnLTPnF
w5Up3CoKUSxaQoeypWPpEjWkPbb32mitDbbTc4WiwJIi076YMgmgrN0ja1i0Yv2xrDbrswEoggfn
Esn9f7ZHDQ8GolOh1oAoAwCA/rIj/hePAqtZWoOTW99ffsfNbrJMo6fw7s/v3r/84e2bN+8H5y1o
BXs4mFbUhAODm3l5z6pibJ6cfPDkHY71rRnrk8FIjZx1YftpC2kgESKQmj9/hCNR13abO++2ezKI
cCjmy2Ucwtl1vrjOZ/6eQD8vf3lvu2KpIMZ6xtpyMPr9YS+tHG85XhiNuVwCvwIpFbGxljWJ7uv9
0EnUCCBOajZ4lLHFx5/QQ+57UJ4J7WTviWBJgssn3ekf5TXeSYM6paCvX7x4+e7AO6TdIPgOY87H
KcEkc2Jp/nboA2Bcb27B+QVz40zarZH3ATX405sfXio60Hn3k7sZNNiHBr99++q/vuyfU4ia1xVd
qMcJTOGq6LDFVZ2zU75ag2C91C+8ZkfybB3hez5fMQy1VdkC4B5clhBemy6W34ZkWjI/b4FoBTsQ
WH6onYFh8AtgsdEPWgw73sxzFg9MMVLDzs1fu3oHlmPrDKed0NMpDdQNFpmRWwQ+ED6j9YWa0dMI
FlH/5Hu0w7fEmsFv4F/WxYj9qBgxL3+AEUGK+hqN2QcsDad7lXXY7CqKn0wzJiTMyMQ9zpuG3OLU
wj/CSiDb7PZ4mDqJabuhXW9eK4TTmQLnzO0PefX592Dh+Vu95vObYkYpSkwffOdHoIi/LO+nA060
dTzwN2SU3RTFdvq8i1NfQYp5sL2TWHP6xbMvT06GE1RaNHebbDl/qFPbagSsv+60MwvFKEgelSvc
JTCazNcak9pX+83vy9vdbQa5i0nG5dpg+K7r3S1bGBAkwsq880tomKY+jlN5F1uo3lRqcASYqYe3
Qh8FGFtuBmG+PIaK/sMr7DvBJraHtf728+Qhm2/n5jzDHueJmE6MhIECuJaSq2bXUKITZINyso2g
aMHeALRGQ2/AyOGvBSskCcBmGrpYa5QrbucRIPkSNr5u8ov1GYRxSxvnrfD4znu9jWer617Pd/Td
NbQicpJ4ZfiwoYIEoEYsloGqTgOyX4Dd3bQSmmMI6TD2zKRTkwFKUK+NK+W9l3DYlM2a2gi2Pmkw
MQU7DTneoMwHPtcjqBmwjEdITugkGdKbJrjJrd3RUZaJkds3LSTgDQC4Jfb62enw7+UMnnb8tq8A
h0slGqIE4tltMV+jm6QhMBjrvqP3B1O5plbaHoQpr+fkEZpGd4qo7mFpVcnLQJ/NHXiswxt+Q5Za
noy3XeTpGVCpbN54Z5xWC7ISergKiuj704Nf2G4Am80uGDiSQBK6fdhilioCvYeULJFofz2vMcRM
Gh1lAxXRmbKsSEkv8hPPFfR2EOqYbYKjh1oqA8k1Z3xG6zaTWqlLekSGLXO1b/JhVpfNDlVBI4qv
Eacdu9j19Wa3WqaONoEnQwVcUIaiviuZrNNB52YMCa92a49mqZbK+gbN1UXBfo/mXnoslPn/GkTG
eWUO/neIjX+XNkuEo5Kzxt49OMe8HJsLdFfwq5xoyHodo/K7QriIdWmYO8687locjlO3Tp0iUIvi
jnU9LYc/RfY8MM73AelZ9rxBXhwGZvw0bV9BqFSVJ47U0LvZZlOto6kwJ72AgZn8ZzwpZEOAPN1T
YYqyYxxOiyANMPAKcSxJJA5SCjSQ1GB1KW79DDvQWhoyGthNPawOqwY8Xhf0Ihd13jw7bk6H2e87
aGIbDccNrW/Krcdokm8etFYsD1MX7Ne4YU909+imAaNnHrba3jsLnj9YXQ4evwXsAIwXhPDX96gJ
u6JA9j+TyuaHtMMMHb0L0LnIW5dR9jMBeuFf4ErQrVbpBUwOpv5TlaJVgMwm7NyhFRc/vXv5dnCu
SZxpaXc/yiCRzOpv0J109Pf6a9DLQF8pKP29OhPV8oAZ4IFbj7paZGwE3qFWxD2ElDupWpxNzH8E
HfN4gNY386/5rzTdEYRQj3drBGdYekmzedRv3iUG7RHTVIvMAuRmWKMs2W7ODY+yEDQ9kQx2mOg+
lOl3wkxGEncoo4e/c3o8B/9hB03d+rkJXDEwp1ULzuaI5VVLFockaMxCq5tRERa/OfDXc4glMYTh
CngDtBxi4UvYe9zhENzdW/RLPgmYffAQBIV9KPB4mv5GIPi0Ay4OlRm8A2DfKZ+iCu+LiuFYbQQv
waEpZ2Fl3RIUVHgt8fuzk/Ox4bVW2+u54V0Yych8iTipM83k8uUkDFqGF+zP+oAXO0ylyKE8yJwl
FLqA53zY+2v2y//CSdDLzZi8t+crXIzqr5/88v+eUC703p+K1dbwkzYjPeEgm2KsfpOaZKhCheO4
h3nSe0Fyc5XMfPvw/IZd0XQ6cJjvV9PsuU1ozsZop4e7b0Z8JHtoJpmZTmcLdGaxfkGLzcowsLdz
EcI577gNVwN3NsrlPImPr02sTm30FJVIJoSIjrM0gGMKNEHRgL0cN/vyThCl6YnafmmYDEyvZt3Y
7SqZHdnUo8vFulmNSC1N1x0gVs2Ngu/H5WbRrPLTEZcev3/15sUff371+t1/G/V/PTk56X/6JR0n
IxWYN3V0Vy4FqdK0Z8g0+OXlWf/a/A+SekDbw+xs4ue85MoZ1u7Z9JszOTUz/EEmQKVNJ9LZSbxB
XouQNyNYip7PHFl3BvTUURFAlqMIvYaOiFcCM7uZ04WZpeEnVkrucRTfnCwehueCcZR99/X333/z
9Yt/0d99mkEaBu4OHIXJO1ixd1BG0puY/ytu54va3i+O2TA3fgOQ4vnJ6ERDZtBigKbM8QMUMf3i
zfc//fD63WCUfXkicVhHGSQ0B2mKeYTMW0VJbn2xudqZq7sqmoHhSOdrCg2/YC2Anf3vs8/V7GUs
X57og8AHwN93gguKDkOvR+PbYYeodJsV9WK+BUxRQ4d4PzCYzAx4tq1g3kQcTDnELhT//nVxZx5/
CiQxP6x29bWH9kFgbwkvaVZfAOUgbxM6ioSojUGAFSHADqUd07X1oFIxTjgijDFWRBzKGgEJfovq
e8TqE0usLH0FIt40D1pq42HlgwFJUWeDX+9PL86e1LcQ2LbYLJFqw7+GmJp+zodZ9lks/EIr8dfU
1sntYMin9OvX714RhYQmi6wPcW9N3z5stOTB6D4z79iv60EvnG1EFTumaaqd8gyCzM0UVumRU7fM
uPj52b2zcsP3gL4Fy3t63pkTmlr2PU5NdXzRzQ//lsOqTLLv3rx9+ce3b356/e3s5z+9ev9ylPBt
BjRexfV7hqnnp6Oh18rbl9+Okh7SVYtAmD9/FjTxx7cvX75ODeSqKop1SyPPU438ezSwo+yhWK0S
2iBq5fOglW++/ymxJKaVi1WLzTx//o+JNuKBoJ6z2q7aWvmnPa3wIh1li4d525p8EbTRusOGRjdt
A/kvhzaCtynZyH84V0ojp5jTJwcRKT0SmrADz1sQDnOAog3t/PtUV3v1+v1Lc8Hf/9kWfPf+29mb
n97/+NP72Z++fv3t9y/RYHHq/f7y7ds3b/XPzwLMVgIesNR0EuS2Wi/xOv2xaN41yz/hn3nYbtc9
bW/BG/lQB2IjCaupzgvz7G1WxSvDhebU1nB8Z6XBuhcuWO7q/0N2cn9yqV7jd7a594by2Ua43RG2
MQxsQ2DcAjo5zL7Knj/74p++nISAqqKKhlJnEywTWMHU43RGbZzrTYDvO1s9fAZ28sxuedsRtWof
Wnh9g3L4nUhLpPOeLTez23l1s9uid497qEPO5sc/zwxz8+btuwFiOw9OB1GEt30RDqh+Eld3ZN+l
EkNbBNsh6IkCdLfw0cp+1SuNadeD7t+/fPvDANXUg+Xu9mIQ16AEp4ob8HIG/WX+cc49m6ZnNqnG
ujFyIKeLf8+c1c8o1uXkPcELOlOvGHjU5xdGlLyZPj8BD93l1DxI9E5MzbvCxH76vM0JAMj41FB9
psVTQ7yRoE4N/SWqODVUNF33G+z3c9PvW9Pv56bfP2K/n5t+/0z9fv68ta7p93PT74/U7+em3xfQ
7+em35+x38/b+gVqSCksjUAxNZ1dIJToP0JAupFKm+kXHre8hPh+8AbngP9yY8GR2nJ/Kz5U6uwD
0m/lRyWIh9tpgdCyQ5sKA2tkICPfwzV+9SY/xPLl6qGiiSg2eFV7sU5KtALRD/UbeFvdyfVb9m9O
H+lDn0oHPyFJ6A+HMa0yw8KT/F3JvXWi7CvTIbdMl7Ofuq4Ud0tCf7Jj+XH8tRE53m9+BraVZgyL
XsxvfdxnGQ480vLRcJ8Cl8n0Q34ywll/11wef9kP4aO5d8/7CH+53K1WnUKUV9osBJFUEGmSVDbo
F2yR5k1CGVwFoJMUJoCxInEFMecix3gdT1JP1d9FYHHySUi+G5sMjbyfcLZ67EGaARIXWiDFE8nw
8CcJgKegTaaph2Ar7taQYHPN45pQEiLykh1GRvCbuzOMV0qkr6sXY51ny46BKgzTuC2yk7QOJCHB
TnrQVbJY5hMg0ZtyZWMpmj2A/HecssEd0SQ10yfYP9I9bREusqvNHOHYmw1Fv6Gt3EiYGNtdZ6sN
BZdyAgHtnYNBGBJyT74gZOsCdguaGIKdMu53n8pQ2fQvwaCNGggxeaMZ35CL3a1OJYN276qAb+ZB
G+CPQfPZ+Va4uzlGuprHqLx8eLoudk01X4G6HlREQSM5xNbOKUwDE7fCUIr7+aKhtnGCw6BSvWHl
DkJ4XIL6B9bXvGwfN+UyK263zQNWr2Vy5iHY7pqWfTw20rTHccJxaXc4QseBu7lZuWfZZ9mzT2FT
DC1agWs48sNQvWWHePXBocD8z9Xnozr89PXBjcj/ogZ0nexYt3acPWtpBGvl7dWG2dOnWe535e/K
6+xvbACWEK8U/ph9mr32fXPgNJjz/KTO8P8QQAfq8PVGtmXVJWu5fetYsJadClbVzEW3kRqom0fe
Wm/Y87xsUNFqPWxqd6eqzeaWYhkga4y5TbZ1QbtD5bLfGiDSlIvdypSi2w7O3iURFnSdQpdQaWhV
3hRZf5b1A0dB01m5Ai6Aotr5UrB1yCwlDwAyOjJ0FMw/TGNDNOszPXmrh/QP+SRey8/sYjrdpf/s
QzHsg8l5GkT5tr5KkXvz9SQdlO3wnqBmjgYceoKHqbj4+oqlWCg/TGITOH4GOI1Uih36mdoidFl6
/t3gD+CFdSPmv745B3G6DT3MHac2cjXU2uGq8ks6HQyitcP6vMDh2KhtdAuHa6mRc1Q10Kx64G+q
S7WdkyT4SAuLlgSDhkrgCVsV2PExPy3NNYUzEwvaT45RH6u22eHxTk+wGrSzqPZmKFZVLZldBrUA
y/Ly8hm+1NOguWPVnFouV+GrMNxWjbNv7v2nrqwVxFFcYJUKC+O+bM5D+x/oqmkFWRyXevA9fLRi
7m9U0CnPTAhUnl3ADANl3cl3J3HOPtKoumopJ4ObOwSQGYDqwNzjEDb+YM1qV9tV8Ruafvvy23SC
ETtic40f3ywoy7vbRRXR4xtGrXt3y6Ry+o1N//vetWmL6ZQWwyNz8kW8a49T9HY8GolnzvY/eXS3
Sjsr5MdqTTwVYEJlhQMk56R2JVWbhiMg0KotsOm7v9pzM/hkzusnSHdEGP7wD5XgrHkuTUafnWMi
lYoaSJgqgrTRSbBQcGe5jKyVVMon5aClish9L+0bgo4ubPjkX00X9DvrHsUsyoiohPPJ7ozIim5Y
5O0y00QmGmWj8e771y/+BSc9pUN/giY68IFoJFI3IA+ZLn4KjC4oZcQ6zNCSQH3GYW28qLr2s5ba
SGOi6uZaZ17nn7dUryTqKrSN6cpfhCUssZYSX/rNlwhvBxIz5FKhDsDlo3MlzUJeVQgiQY3g0sZV
/VU9TVZNrK1qI1zbZ91tqBVWjYQr/Hl3I1ViGcJ1/uIkLBGu85fJTsLVpkP9pzdv34NqlsCMFrP6
WnCPiOy9ePPm7bc5//wOHZd2lSZkhv4Wq2UNuEZn+eAX89hgmy2gJPngz7bEuerm3Q9ff/+9Wa0X
7w/v6/vistnb3fvNdm+ZtyCx7i31zaZpNrfJ0b948/rdm+9fzt69gDMz++an7757+dZsy3dvDp/N
8u5d+a/AX+CKt45iefdiV9Wb6sdNjbq4vRUUgzcYWco4/rmrTl0RcYTJ2o3pGNIPFPRHlbxpnHMi
Vc25uuMGer3VanxTVOti9fzZWJeK643n1ZV4/Z3ZiXwLMzlPlIac5qYEusJxWSLc9qny2OkbiOqP
rJezuAxfnDQD0T63lgpdjaUnTJMItvK8s53EUnzz5s33bm+41rsFELFvdpeXRQVSgqngLKrte9ZS
e1/rndPbixjBw/nxDVC/t3n7FRye7x1I2/qog5KQnRSfRWvVQQYcA9UxDst98twuHqriMofG41xw
8K2PQRE7s/4m2ZHnkp6yUsa9211AUvkFBKYVxISRM+QSwDX9eLoFkizDqc63tWoC5F1W2AET/Os6
W8x3NfqvKb058GvLsjaMqMKvVaswJso5/vPI+/OX7Dg7JZ8IJzFgKmEQFSbKKg2qTgg3qrO++akP
DyWoD+FAsOzvSiD3gm6kPRf7vEKEM4r0tirTsbmOaCUEdWdDOSaKNab2Aa9MYLf/ELvmHiEu1MOz
L3Bx5hcQECie4qgf2mTOnMx603UsPByxhdJiViCEFGhtuevMTKaeQwIHiFanTWBZpNRbtJhv0b9h
AyHt2rt/xdoaK3exv/BP1P5LlCcCD2yZ3D9h1BT6KQsiVnZR4N4LrFtxvzUyVq1iJ5xlv0WcSQb5
eAMVKUe3kQ6P7pqFUmzLLCjS7HIOKY+j1SQsHFrH3t4wxlaHAuvmsycM88j6XE8yslBm4Fcu+w6q
a3Npi8p1a3Wzsj59LnxM9fvD8bKgH+b1oiz7nedAD/Wv/V/+59/97nezLexiM15uFvDvX49++b//
VwyNgHu9MEe8osQru3XGRWpOx7dbsTcrfMmRERAXgeRmNrvcAUsF6UNI7ptfYKKdgkF8JVoCyFQB
K+LCJ6A9yEjELfEAGY2cS31X3kPzb4u/7rD0d7v1gr/D500iLGawNmNcXa4pwuzbYluBS862AsXB
95wolYgSdTmbL5cb9GXPMVBUlJj0x9j8Wq5Lw2LRssyo6OVqfgXM3ID+zPBvvN2yfAP3fgIdm/Yh
+1d/JKq56Vn/5fffv/rx3at3fbZug6iwlXDVCtyr8Ju8b4SHlbmKfVVs7AbdPz7mPo95u/qu6zmG
u0z7dbMxuwRAIGoI5MSjYhJW22k/PAJwcgELPW4bIommfS4rP+8d49Vqc9E2QDW2PhT+dNzcN+ZL
Q/rnH+fVtL+dN/1wwHaw6GSi0QQh97xtcZKpFtNzwKHtnUB5tTZDZXjaYwzP/NtWnBqURdeRM62r
PaM6gpTNgxh6x5pPDSrncsbbpqBzDpszp7i8snkbxvS3uBliECy6twCauIesDDhkWHZMizP2j0Ay
q9S3VISQev3R9KzONLedEqC12SoEtK7qhl0EeagM8TMGQ0HZILI45ecB56Rfe14gLwOsrfFgTHng
5m59BJNQ7m99LHzwuIGXj9dR9JJIXczLw4VzTXvadZSQaUcSN5Nhpw6Vk7oIQI2oP2OLJ/Dw+K/t
ELBPaSTiLqQNMIjvBcyduP1g5eBBv3PG1GH72Maqp+bOrguvx6umuM2ZvMPn9iUhbL3/j713W3Lj
SBIF2/ZtsRc7a7t2bJ/WUqjhZCYJgFWk1OrGCNKwKaqnrCmSxsuIc4o16Cwgqyq7ACSIBOqibvUP
7f7H/sX+wb7sF6zf4pqRCVSJ6jlzzqGZVInMuHh4eHh4ePiFsdvDzWihQ5xOsXpNg7tZ5qvE6kZC
Xg90u3aL/gCoeUQrPbgfqTvUzOJf60pys6AZ9nSrVnv4JzFNONeZy9UYBRSTGBNEHTpa1Bz3hCod
ezQui/7g1v1YIiUHgIS38Pc7bn+XcLSq5rsFS3r5VCcn9C/ZpKgAj6pyAYYWkSe9ZehpjKhzKg3k
g2c/x0h2SzojV7aL4i9KEKvf/mUDfURSXZTNwU/le+A2sCGEiipvt/5AjdO8cEMfiWxvoQqFAKBL
hNuL5u+tcl9WSdRwewJLT7XuCqIqCK/G5+AlWTk9lRiQbunXz169fP12/O7bw+++s6rYr2tzoLiM
G1ZPgZeSfzOVSSaYPLZmulEFQ4WaCWsO6FRIHCdrDnr0Yl8hBY+a+7AB0Hnym2DkT8Xs9FCOiiFX
bsjIQ4V0sNF7+4+nytCoeHAgd/ENAXDRSuVgR+oygW6fvX/y/avnz6LnL58+eXv48kX07sUfXrz8
4UWPzRzOyyuK1gQSGUs8ZGKTrRUxBuKNFJTREqMgfP3113ErWhRFV+UGDsgciJVnM90BPfE333zD
llgxIYj6bceRBm0wGNQSRYTZXZjbpQ14xUmQVTFgi78x2jTksBmgRyOPt5lVemzpDOPx2utDJXfp
flh0d8oEhfvDWAbGy4iOLZrtkprHYa3ANSfkU542j/Co++7Fs/evnj19++zb6Nn7p89eIekMmVS3
BHJdrhIHKu41PW7uTR/kBmhSmq3H2hM4ub8L6CJX+SJTSBhqsd1TRsBN+7+z16pd1tmKKQ0UoDuc
UpENeSVLDQsg3SOhh2NhApz5h9LeKP3fGI+BYwyrDsc2153d3tGlE3JX+fbli7djjJH76u347et3
z8bfvXw9PhhpQg9+rtOsXe53z5+8+MPzwxfPgq3or/VGXrx8/f2T54f/6RlfeL159eSpaSP0sd6E
OtPqaupFvejh76HJZ2NNseNvn719cvhc12z4Xm/o6cvvXz15ffgGinz3/MnvTd/+h9R47lun+MQ5
FVkfMGWmfzzCkwOqA7oBdYAwAYsGxsozqU4cpmg2megYAciP8SXyYw8SS3BXtf4yqnd2hG+c4AWq
uCeV6yONs3xETP/OeKy1iL0NwupeVME6rk5voj+6+ps/2gGG7UBEYje/mKCOBN2RjYvo6Vxn35FD
HxxoqVE7i662fIA2krQlso6yV6EWhA+fznFWrXfJAtjySJyfoMUR/u9WGR7oWrLSLgY8MPHYtzR3
1ON4xdhB/2wHXV6CGq84Wd3M5KWd8h3ZXj7t4enPEuxoJ8Pp9jJ4cP4K4XWcmwZO6NDJDObwMvc1
Ft4SGflrKZAMgyyU8XxdjYjjGVyP/CHhJ0YW70O9uinnuFywukNiIVzmq5OyylHcdOlbqRqCBC26
kR0JWtnwMJLcjGLIFkRx6XlVSD5DlSaZ2cdYl+e+1E9nFm6bmsntSaDU+SKD2vXG8Cz2eH1lSdym
fYrTplxINFwMDZfEmwUxxHWpUC2g31vp1MU+Drbburlh1T4p9wmxC08HnWDe86PjHlS789JWeMVV
gjGIxJX1FivF9haqOGclRimbaa2iIBp1y5SFM8fsy0U1t6NFTZ3To5Kx6b1FRdZyF6e9GgMw4pTS
sehG85PN2Wt6m+il23NYivWcOsoqGged3xCiAYVm42EptqXP19uZtc2VXOzXE/US5HJcqIY60CHf
jyr1c61Hzntu68CoHT7JiwqQENGLWFP08e/e//cSy4qW1Md77/+7/4HjV3GE3wheL0qT1Jr4Xc7X
DocvUfdZVhTCSMWuciNX9bi+4pNsDv+MnbnR/9eOC7dD2vHuvWqAB74VHjUidPpzc6nDk2wm26ej
VnPHaazVg9nYoVo3kvSY82zJybuoIVz/6fZUTnt0bjI1ttJaujUPO66k7tG96liObcntB3bnMdVd
Kzud8RWcwJBYoDHkiFTk0ZAJSDIbcpuPQy8PvtRvKba1vDVlf/fuzb9gtBJeQpNpNF2BsNEj10Fo
6Ptn3x6++x4j+s6rCPaMS3Q60pe3j2xA3n57+Jqbf7Qffv3rL4Pvv9BvKUNJj60hJMnnCaaGWH3T
+clZLN9nF374AbxBnWU/FrMbjFV8WWDueX2edxZonk3OtXNn9Orlm8P3sh61GVdWIY1Q9t6lOHzG
VCQW/pYOoujJbAbn3glGwGWliCXcbk4EWm9RDxSs9Jejh474rwRRwF4ecWXaAC2CFWdvR0nv+eCY
IH9dP28t+9+4ASm93FesXlU+5QSK9NKxhGnt4XKGSdqhDEGbQGVbr89t2LcJmI0kkKzCNdyxGlT5
yMu2LDKuE7JB3hEqE3cLiAowiE6byZBYPumDKAoO4qD7jn2rn2GBe1PkC/jet5egioQg1DEnzMYT
ab8X8YteAw/4c2zYdDyMVcDZuIFjxMKJ4iHFJF2vqDTNwk9pKP2vhxseLsEbQqj+ZiR1O5WIOZJx
lkrxMyMe5+ZboUB+mY73yMYNRIrZQrZRE8IOSDhbkgkRBiFyMkSMx1rhdQ6Lm3KHOvG5mkiDTqI+
jD5lEI4CtOFKtLXwrg17tkVZcC5CsRAkixOJ/qa0cfXA9KhaVhEmpJZwnbTB/99L11zHUD2zLTUI
X0Q16PC9xnNN3R1M4LPDIKcth4LoBawAw+Y8hOqPDTlq0c8d+wcKkQ57KGNdoSs/piQAho9sWqal
E1AANPEsfNxFY9wYprq5bXvnPqKH5iQj7RnuGIP1Gz5+j/l+QAKkaA/o0ryYGKktYqFjK5nUAt11
Orm9N/F+C4Lx37//D5YNEwrfS9ilP8bv/6+/ZyOmarMkSRc3Wvz88JoswUy4V1z/6OKXrype4CIa
m3iusEjqxkpWsFfHbmkX+6FGc56zHGZI+5f6liZxvw/HAoLfZcINhiVoFaIqNFuaOOY9fHmUX+YL
sZW7IVon/Ewjbsm3JUFNACrT+clEs3ItQRQgQztAGV110MzRN7OpK5XILM8WiAO+SZpl85NpNtSb
ubIU66ohQpNp6qj9FuUyad4PakpAKG4uXQC699+RyqwmRtR6hooiywvsbBxGa2RMhhJdITLLcKeL
BFmcJjp2Rzrk8yMFBSlwFti0Tu1VePsFTVoVIrMhYZRKmF82zaIVJDwV5Dp8yKygrJzoSUJeWE2I
LoDPu+T5goFAQW6lEy5DBwDHfvCQDxw95EM3diDC4mx5yyf0UytIiARnffwo0tG/UKy3q5+v18vh
w4dCIuXq7OEMTXnXD9U6H5yv5zOukN4a+TR1BpVodpNVsIxYNQv0KnpE4moqhy4tBR6RGpWeHQvy
jM2kxXoiUuYRgfk7x6LObFnNvCQ6JBthPtpjeFoGkxgaH0b0pdWZstTkNY7dWY3dlJtoWi7iNa9v
im+hc2kHxkM5fCmviVYrWK2Zy70qpSlVg+1F2NEEho/ZN7DZOaXkICz2CEIxw+XeADWV3TDeb1CG
l6sMlROFnbsHh4nJexBllLpM3cDaDbwBIrod5dj8DDcqimChLgvsKzkimRH9XyuVhUUodTJ+MyzE
OgsSAqZsi8+0AWJlsWZ5FObBZoQJJyMjuPiF0Ga3a7XIOxm1pZKrE0pxpwOgl9YmpxJ7CVfh1gZK
nG3dgXlTV6Pkmmnn2h2nxSoFLd/DOhG2o8SIgFlXQVdL3rmRhBf8ggGf4Y/7wQoPzwz3H1naWt/o
Ts5hH8hXbRe42OzgIr+5KldTSqmT6LYdlcxJWc4a1Vz4ketxh5Jtc7woFz/mq5LQqJqwPIKzCtBS
TBsatWVuxlEMS80OflEsqD7Ni7EWS73Q31oKpFiM/CpO/fgY/D50TAnBZQd9vp4ozmiDhkr03B9Z
+xlZ6tROQJq2ArEooMjWw4sFrKpzdHDci97cLNbZddMBgc3tjzjMhV1xUJ6ewqqKHkSfY3ya7r92
e8eh2tooyOpnqOYMQMaXO9mESOiKRqOKMbLm5L4GcfjIE+cthpR0WbOTixwAnOLeClnQim9OPyy6
nWbtIfy7V7UWuVc1f7xnllZPzR4a3qNtjKglq7M2PSSMA5Gg7kgd7Ywo5n2KQ8Hyz3FZkS4CVhBQ
CjySsjtmEQF/akbAr6ybZYzBPrIKlCd/aqbm6WCznMLuoo46AtN4XCNpV9fV1gr+T986+EvRCi8w
Dq84Hb2E+JLXkX08VPhrCEJE1UlR3BCOk0XO4Lle3c3gbGtD4tYWreW9pODP8KcpibzDipCc7FQI
eA3LgUJbkltr0CfZ5Dyfjkkw82miF3Hr0/QO2dwVGpEDd3nj7KbN5qeulgF930S8O8mVrguDaMkF
NUX6a0vmVWMBKJYtcskVK9KkiBVv3r4+fPH7qLuzIUP36jzHlKGkEYcNjpJsZZWJQlgNuul21NMO
ihhOm+aZizYjy6XCmtrg1qSF/+rJdMLz3NK3rNQzR+scc1kd8aVjJ8mWMljcHNU9o3Z735SVxEda
IjGnqts4CpvZgm2GPVaBn1xRgUgiZuO2msCAxYdtekItsyxXQS2h3BB2d8CvKmulOe1K/lQ9lzXm
iAB2Op1/FNLH49oAWPZpsarWtpwvhktjkpSTwjgfIB+gQ/jIlWETllVjPgLHJquFlB8oQcYMxDZw
0MW86RB9BfEc4kEs1Td0zsofOesiTxjTGxA5cUA8DOc4s7yhrYR05PxsDTXQglVG0ms19GLSPKFg
aDhms77HHpw3XMebQb0MINQmRF2MqXaDJItrMEB0ztGqe/Ti5dvX714cEzE5zXjz4hLReVleoI80
HDVCdISJzviAKnOFKFeWg5v1pKRTC938y7ESKUy+UMZr5g4qwY+FLMVFhAYM2pxFisFgi/WaToDE
3lGBk1UkdOV4IGQ3ZZAZNtqto9pQ0B01lWoNq45qBcXXCmAf0AZAZk4wzq7tRnyV620GBLdFhSdv
tDop4Vx2JWhjF1w4pFw76jdqOKvUwGXT5GXPJslENzW4UqcFg+4uW7x1jSPZTpSqc0myuoQsZnI0
Djft4KgHymCbYmtbLzzTdJv8aqqBbcN22mUNkR2o0RsvqTecARtCUnHLsYqUo3dmCeijKU/zh/rS
DaxJDxm1xWrVtQ+u9XEFd4vG2dy2dTSjxS9nYb6ZG7jB4Qn4phXQDDiSFBG8Wvedu8HS2SMIYP5O
bnRQStGOUWbkMzzXGbWgw6/o5ZgU/cBWNlXCbwxDV1yA32MoewGq6846f1ekFDzhs44VRo6a8q5W
l7vIlHa4ZEMziDRu5j38L+m+f/XkzRt4+rOKojck3v+TjxvSMnr4Uevy4XK2OSsWNnJ0jPlqMwem
f5P4lVOVUY+8vtxv9oa4Xg34NXr0W0edPTvgusJOpFaj/FSjtZmq0zROHMt8fKreT72iDCM7/4sC
/J8OX7wdUmjTuL+KxayOjgJ5LqGCu/VGlB6f0cHhR7ITjoUKBNRN61Vc6VnFlgs4elEAfIwojNcQ
DfjCtYZFcJlde2sMnZ7GgsLaRAU8Rey23gfbYrzfpi2cxdPvQo1hXsxZ3tQW7K4MOCZG+O7J4XM0
7mrqoHoT7ICJ5rYjf3YnYMXyA6pjjD0DrDjpVbaybzBeXw0wtn53hAuWYpaxYtomo24adDcOeRpz
i5a3scqlsx1wXCm9iFVnskhkkdXXr7Ws8EkP0GdMCC5Kb2hvGuBZy7IKNS4yx+Rqqi3ox+hMUKBO
fTngx4BjpNIn8hiiewm030ttHGxbAgze9S7j1vxaD/66ZfTXv+zw5Z5r5GyMzQjqvlfrCM0OBEs1
bQIJVu2+lt1IHzPltsPB9hYmIdjmUtuwLYzeYJtfhLHtf/s3xjbuwDrAPuE7ctFl69V8bZpzelQC
hyrEJxyqbqsQlFGA/mgM8h2drLLk84sfYefHnVYrlz3L5EO/4+QXX39tkruhCEJt9udFVUlmk8jV
R+AvCjtj3DkxHBU62jAWMK/1qIvgWbywAWpo5FpkwkQ17GglZdDXkoL2FLVDU9ojqkR2CmW/bdBG
6tnLnDN71OS4i/yGukQXN0xBggcwE8sEaHHF+RGgHOUXewxYodI9rGqZYcBmsBa1FBZGn4hU0RFV
4LIzVzSgkvSZLBqmpJZ1PI5nqpEEweCiaQ9vZ3T/gpaZvWVs2Te3SXhKWBu5klgs72ManiLvGlL3
jBQHZA9SmKnoC3GuAFcXtLr3jORI0lhPq3X7K7YsnmP0GNptLX9f3QDlWBDa8AQ57w7wlA05RmG6
ciKA0ScvnYYeCUkFY5IKLMCVXOBpjElU3sxh+xZvW+XdL6wK999Abw6rtxPKdd/IZETdJstAwwPx
4ei3w+MtW8WbPxy+io7uTdHzd3hv2uZanbSMBdD/MXn/H8XFg3jFmFcYCOmL/Opj+v6Pkq8cnW5Y
IzHPpwW5fuDpec3WPbYJEK9QpNB8Tqtofb4qN2fnkUo6/uTN20HnLSZjlnh0ElA6Mn3DdGOEJ1pb
FKtuwJnP65ZxGZq+GZs4N/yWbk85d431baY+1/ai3zFcT1RhNp7s+PnNnTnFJIGS78xPth59FSWP
etEXveiRjiho2YacbM6qwZ8ophgZiAAj3+QHn//2S7F7By6LFJ90f1eWs5dLJNrfFQt+eLcAcuXH
52SThk+Hp8+u6dW3xcQOJCWr9XlRrZ/CBoAlfk92futyJTX+BdWC+IAFMop21X2KWoVaK+gdj19f
bOb4582afumLRXq3Oakmq2K5pnJwQAzDgl/fYlosWXbjaj1f84i/E7uRb/NTggQNOuT5NTEGGmU+
y7lDmLDibFHv5cnmTH2Kuq9wE8WH70oC+QfMCspoo58wm9Q+KofqTb1d3bDijKBe3XxXkFWU9A7k
Qi0RbZmn74AG6009u84nNAd09YlPMAkE0isYJk0znmB5NhZAjxuNIaQJkqPYuILc8EhyydAWkIxH
nIxqTEQWem9VmeYjta96YV3SklmRG3LIdgQZh3IN0hBwr7WGsP3dGzLgd4zKbUe4LF3oggSXjLzm
lLS0I1DBVrA8yJtidCRRP5KA/RUbQJxSyG3OSEtMdLnKSS+OvJA4W4OhksWojBGWvgTk6JiNFinq
tkq1wBeQ6peIbBqchMOswF6xYpsJTmdIpmd2qog5ZR1AJJAdsVTjXi8BWWsylya3ykPVdr5KqNW0
LjZLlQH9Re9JJ3KoYHbo+JJzxCd30EcHx4FbUBVwQ/4a+ciMrTbfSTcx+4ZyXUc3MFK8ol1ghjpO
tImmhVPX7nZJcqdAn/aGSdpmNi2cFqwTJz1rFL3ZnJ2BQIL318NQeyjAo62k7K3GgCY6yU9R1kKF
o3zEcK5AFP3+ouzzq1SZTuMlWnl6ypF1x5w3iKbFvvBFYWpFvMA3GODX5ljxHf6mPLpo0kFfk4PU
EYANcQX7NeEv/ImSO1GHdgObd2Jieel3QomIbJQcpQAtFUUknO818dX/SlFoNeYEHfIvpQNKI1dM
syRLDu+0f4wRa7rRV18phQH7jzjHN2vIxljJUq5Jjs/uvWqo08w5o9w/tjwfapiRjNWOOOPKPkNF
SzbDsZNgH/x66AT04HygHTSHROlibPtoIqP4XbF+uYqAtP8i+6K8fF/S23913z4BVglv/956+/zN
eXG6xrdffWW9fq1ff/219frJlBp4YL0C4QRf9a1X36P9BLy7b737trjEVw+tV9/NynKl3tsfvi+p
l3vWq2cf8c1oZL16Ua757Wf22+c8FufNM3pll/o9D815Q6W+tku9Kq9oGPY4Dit8VVTOKwCF3yLx
2l8W9HrhQs1vOQlrF91ONyh81qZWGsVy95zuKNUzfvqr8/6dmgn3rZoytOj7Se+rtU2Ee5zm/8yb
htlmdSHcUSMWd9ZldDbLsznyw9PNDLZXaO2M2TKzErI2b9t+mUt5u67ig/TXtncB4bqYjHkjE4dP
V6LYQwscCsPMm8lVLle9lH410/fnpF3GwxODaPOdNrHH3Z2f6V2Cy7keeaU2x1PaIb31tvp6absj
g4qBqNqCYT0azGmNhbeSnfzaNhu0+qLreAYiGMbLOdlxOfdGok3icxEo8VqOsNDxLujroXsCSPk7
oNHGHlQZf2L0CTLYpcQJKxmIXMb+0M4OQDkJiS4pUQDrWkn6TZ1cgDJ2ycEo8qOtYAyoR6WKFO5+
ZR3S1SKm6fu6y01ZHdLSGiNhG/F41WCoxssQC8g1v9+MTK/kAyynXiBapA8JOD44Kac3AXNwWel8
FHAbf0GiUcg7PUCgxg7O5iCuvznrEMSDZSHyGYW7tMLGEE5JNKAbPPoMcouKZKmtbklmwEYGxbRn
K4FrVG0L90Fipj62sIN2Wt6T5NMlSh0bHBha/AP1BftxDX6VIIYfg1f4apA+K2jhFt5MimpEJhPx
YkuM+SmZ8sLbAT47H1wtE71x55vnGT+4l5LlEqgGRKpxSTcwPxbLhHoolxVDQBr9jGQt362Q6jkd
05tQx9KFl51yOa5u5icl4tqW547KpTmZH7fwajf/cR0PuoPdw5f5Y/Lc+jXVj2lXxM19TBMDYFiv
KAUxAWVA2MrVffq/y77YizzARhYt3CKImz+WkTWzP2/zaMRtmxN8m4l0wJRZX32dlNlqSvLaarNc
7+LQz3XrJeu9bFt2ravD5KFc4fRbJ10KO8RrzuE99M7nO3iCpA8WndlgOH0GrooD9wruClvl1R34
GKuThY2hAXxpMzIQikhtYym7uMwAmRFKQy9XwUOqF0GZ2RewDbotkH54A/bG1bIPMnuSZhrlQn3M
bqRebceP14Q0wm00JE5+jA0cQxf1KfI/+t10UE/w7p52Gzqu23CS31bavcOcib5fJo0OYLaZN+eT
gN710eyIngZhXi0I9dkxvwzNgGrMmwd35AoK4qv1DlqlC6uuzbxzUoynaZsn327cFh9G7hB3FUt2
4JC3WHx4g6PWXrEofRliR1GBqg5cgYG2Arc+v2pugL5bauLgfs9Fw5Tk077a8tOGPf92G35tRGnn
7nt9baO/i5D7C2/utY3dnsB/E3p9amIbWf4EykfRnVzba5GpjMyU8bXpOludjZt3Dfr9Z3tOsXY3
GlLjP9mtbPhOubbpwCfccKjrupuf1z3FgwkuEHif1mrKrmDDp8XbRbJA4w9XBq+OVDW0oDF9+S3z
YNQ+puqkbaA7pYOzLEYs6GKuMaJczu+IFak+8CJ7/rL4kU7H1qm2Gt2ThhVEoflyseo1A3VV376h
XhOmwy04m1xac0tA7frPoEOnjZ0wjoW7n4IMu/cFx7fFk1NxC3rYf/DnIMcPYNaCmourafWJUHN3
3OyAHBwQf6OIizCIngqh6bfbeGF0r0rUPlxn1W4Hbsd+dzzyLVsvxZRT/VG4o19wo71/f1F9wt3Q
iM9dDDPw53uIAnz6yRbVlzuonxsFYihNWbUCt3C77seorhbDKSK4sFLRHsuKXSMXRt1HJxfzXqkF
u/W5tWDXmjN5+LkTa2WxuIuWUBpg9Z8nrWgzJMuZ2YsH6kYRwQKDyfqaT7bPy2yaNoPramWpbQ9x
nrDL74LSBfbrbaK19TsQRhFqmxoIgeCtS1J7K4aj6vyNhGA6czlI+ISLtoYts3AHvHYJdeEFe9uE
CZ+qmd0mci+ivFhKx48fUXGvAjcp4m1W9yvDLm8uehQitZisx+OYYsIH1r0l2nwiakGwxwrmX5Jo
/I6ceAhkzGJ9r3lL7ni5uMte8UvvBMLr6LZMMbqq0h6UbKNReSdv8b0MsCKsOuAw7xa2/BYcKxFi
vjB6jvI26vruuk1gDEACkiYwMdWw01ZY8trZr44Ovhj2HzWqH8RYRdhdDQc1sx0LJzuksmCmFLYR
/uQ69xAdWOAGiKE4WxhigB8WdjkOodMWv2qgB6i9ZXMaDAZE98ZyqQHTIoSTuQYKLXXtnt55kxZ+
ytbqIxs2lWuvsc6knI05ZpZbz7y3wMyvxlxIgBWESkUgfeCplYrb7UKzDY5meEKQBKwONGzHrZw4
aHdQo+SAvUGd/9rU8QurneyuOh/vv/+fxAehwCTj6Ir08cH7w/+NQ+meQB+L/jTPphH7XUoAW9J0
QIV+tb6Bl5wIvJM8TaPX5WJxE706zRaLanI+L6brXvRP5ewM6vxhlV/ks6jfj74/fAtTOskXVT4l
74LxWJnxYwS97v7g0WCaXz7qduALnDjp7VF8uCiecigv2FhfISDEC+LjTufpy++/f/bi7dN/evL6
Dbaw9w9dZclkCtaMhANmR66vBsYpM0jX1QdbK7lmS/idN6XzeqpeCoXND+5HnQd+a8YIk4RjqC0T
E91zz+4Lc1Wq1nVAyzccXeoHjnXiJjUI4Eh57YWCOOpE0lYG6eZAjtg4wlWehg9X9mFA/OSs4hJh
zgvhaCIrkT+ZHViJQEfjuFG1XrV34wSIlKZUZflr2q5HYKJMCei+ppGGDmqtPVY8CdWR7vb4CCod
OxZyINQEEoZIcLBRsD1/KEfHbrxkQagL387Id/z+bGeGigK3JgIFFRxxda8bTo3j0gQ7/nnDdFqH
T03NMIEfEQa1PaPmHVupm5fMNFtntQzT1lpGI2bKTQWHB6KHRYGJRG2BB5sIx3475RufhVmjO9z9
r8uLfGEivbMHQEH2/Z4m7JRdZepNwDzOysqJvFlPKhbqBodi22NT1KW046XG4kNEKGuWokUrn4gd
BgC5piYUnkI2Isc4EQTPsB6tUOJjN8XWY5A48ozqI16UuuI5bGqwKUmOlTiUr0LGc+SAdlzn1krA
DZruh2BeuFhpiYvoD2C6odQU61w3dm8V3zOrLJBe1+lKjeXYnaTmsDp6XIuGhu4EPDVJkPNs7wz2
kZoCTkVg1rAdGze8e7O4ZEkD1gbpbN2mUfGy0Y0S/3W4uEjz1o2YYcaO8adN6jrJ8mIzx2tfRpHf
uBLmmRe5C1JHxlCgp55dH4iwgWwNFpGq9Np0jlScqjnnNo/UeF+HFy22k4ZAkXIevW0Dp3EhYWms
eQva4+RSanKwchyiOTV3tYzyuyDB2PzWkDBhv8KsHQu3mJAdLMGCeDCB1ITF2pAFUAKb55oNn3Do
y3KZhMuM5aCGj0Kv+HzUfxTwqMYYMqpOe3TUO42hsT8ELNyPgByjYkmylusaQtdbyIFGO+wfHEcP
Itov7eslEaVqNwxmMRsmo5e07bxwAue3C/Zk6jgxceEsirdFJlRO3SkKz0Pdlh1fyuCfwQqD6C7D
/lbDHe3c92rLXWIGSVqw+Cj2mwJxxQBRCwkwQWZpn/DC4VzUGDgR+yRFZ6zaeGxwcLIwQNJxtzES
KZU7wGn1BllDgRrEwO9xz2PEvOIFpwhjUTnZB1qFQFs4coYbj+BQfJCGSL877CICmxkm74z/jI3y
zhg0aTXfb5GbqBFeACoI7w6d7cIXiF4lkyxHkGqyUJfpEiADsxdk3Y1Z6JkYHEIIn3SdrdM/aWi9
gCOI0kkucSq69/tSrXHjsIgaij2IDkKnZn9P3+H83HTnKKWTNlku3S1DngqEztC0HbTDKQnVDWyL
4M3LQHXvOYFI/56mxOpx+zE9eDq2oOEzstbVNByUd4MAiTYrFpXGClrr1NQPYrnlQCGu6U0by0Qt
45mQ99HwwHFIrTHrzsfe+//FiXMi5+2P/feDX7GecUlBOfCibclgPKQ4Spwci0SyJ68OyV3w46Ap
aEo5m358+P7/fvarX9XydFEaAGmc45SICnelopPQRkyacYwrjy1D39tCmgRvJnohZ+lOB0N5STSW
/Jrt23XwegAvv/YCGjSqJzy/OZUMQrR6khOPntvS/bEGh2NV4R/V+z8X+VUtqSm+jDDCPucwHTCh
HZ5GT5HVZCbVaHlKDfTwFhWIILlOI05WXHGGvOubiNsGysOwDOxTfZ6rt9eDKHqLF7ATDD0T6Uax
CFcX1vaUL10pdRrfA2CUgCy6r0C5j9WeAv44oJOJDbrazACak3xWXmFnZZRdlsWUtJWbSnH4q5xz
9FziwBkKrBuAJ3FH/xRtEBQaGNsozsvwAi1dCzK1HYVkp8thPUx5rBzAQhJ3ca+YfCnbrEv0o51Q
rqgVquiBMq/5jvolpVNio2LSu8yKCxMROrM6g5agFFJsPlU5uqgTXAxCgzYOi3VloUXmS00fkQPm
56XE7rR6dHvicEwNjMdQYzw2gAgWsC0L50CkRstTn8rxGMtCM2uMacOIg/ccf1vSaEkhYKxQjrEK
MP/uRm0kRKrSER4nTOdFpRublyrANmbEcuY7ujovKwsUZJmEcH+WZcWgAgrz9epG0MwAJ1gBkq0w
M/CqnOT51ErihO4vCzaRs4kpxxn4jlJuULq2Hiw60s1xBi0KgR/NyvKCDXV1t9wQwY89aPBHUTIY
DNhJpRfBI98zU0owCqUdTcscHQQp7zqmSOY4bqoHPL+GWywwSBw22FPztIjoAw+nB88KRznIDJJ6
mVOFGVw+RfIB1oVogyVbFdN8xXHHTzgSh0yrWlUzSnyPwRBnN4zhIHlh6jJogPJOL5C8sgUnJMt4
yxBs2ZyK9pM1rbpTb7J72EJ5iRHBMeqGTYI8RnQnpXO1zBrm+MAm8oW0BqOTHPY6VzNHFMKGvECI
NqYdEQBvMcfJqizXBBph2smUO628fQS10jS8Qa22t3OoBUwV/E+6EhXQv7wog1JYoybZdoVqghOp
yhobfE+ykxzZ0BTTgloeiXOrYd2DtKXC9kzf9mw2W+PjCxQRZtF5ARwaVvwNoYk5MG4ddivAoXF9
wcfNUlXnaYorOnVGLnoDiUjUfAmQ9igM/ltyp0l1gze7BdiEJuemCbkrU6jMK1cWFykMDr0G00Pf
+BzvCVRBGMd8VbozUj99SSWXEMQcH5GG3xts6o6eUkESKQzUCKvUfTpQa+w47OPWqD0Iez4f2Vd6
HuGF7gotoEaUYTFBxbF5m9TXXereyXljC2bXsGw0zYB7vLcGgieaMsh4rBqWxcR5WUxMVjqHUnwa
8fVNUrcliZw9XFe3gp4PUp+ilB6Ez6Zc4mj/uIWs8PQ/P0HdEq66OUxmgfxa6oaapazr8TexYE4D
QuZ/9rJBg7fGVRejLfkqjbVFpTNcK8SQvTxTCSvmUQfmvB/qGKv0gaJFwh+oaQoiC/biMGxOjJyy
8NvlloJnVKtix7ylVO76OlbFmwBJMSFpWR83nvBpDLaBPKfDCYsUWXQKWzfHrRf5WKezdI2lSFFn
ZShUVtwNkWXC9s06OVi9wh7IJbNZn4LJIJ42i4tFebWwg4OZ+HvBDU1yUnmxYtzJ9MsP0MpGLCji
rxC8r+PQ1saselthMmfSJ10LiqfwRger5ExdyIPxdS3SiXDQdqty5+S7PbV9m40TLRZX3NBAeLw2
EICv9gqjAxrjU6s5a6Fi4LbbUYEOv3oLIhDLM9axUdAsDJFzZM/ocbqNJADU9knmXnaf4KYgOb/U
xGqkLwrXaLuBS4ai0vmTbCJFkjWkw3ekRWsjU1aTDvcgp42mqFTKjDveEvcFWbk2m9mqOGXMQ9+U
esdaX2L8/YnnIsgAoXeGfseh//seavsW4YzV8+XxXHwcNCR1Bm1lRXU1zP85I6hlM6w2qIZ+oUeV
MnBWzFFLrONkm2pJ+U218W22DxMtgMQF2mExS9GdRiL82BVNAmkHifOGBqkjcPD9CZZ7pPXr5bIu
d3sZFCVZZEDa3osop8pks5bYppjdhXRhRuaommwEXAKyt7ndI8jgSHzU0Nv6pvQoiJuGuXVDFoWi
r6wG9aAZFn4Hny44kW56xxhyIflHYqvdKbIQhTCgEf/8+ESMm5YARS1M4naRgNokNGtVEUBqAT9Z
THdZvFBs14XbEhPHyUZMIllQDKvTdkDeaqLshtg4qxbqdCjImvXO1hVsFU63RdUJLLk4iaMHUUzb
VhwOmROnsZqql6tdZurl6r9N1C8ySYCWtjnq7KGCg2IV2bc9o1HnIs+X2ay4zBnPpP6vlCYYnpSD
HODbjSgL/4ZoKLv2fVTiQITZOPmrVyqVYj+Z5C18b0zURJA+WaE/Woiq6pTFOgTHXt2nL3s4I/OY
7kA8gc19KwUFJst0OpB8GkkcRt7t/rUT5u02JgPj3bYV8lKyifpvvql0WM8rZK0Wr6GoVFbD74rA
ctiN/jEUMtN/4ssMD2p7bBq7Ma4bKvZbK36/mTVVvN9aEcNiN1R82N5j2TjGe60VX5VX+aoB1GZY
w3yA5+jfhBFI3NAAI3CDe6myjYyAhhluyYvypUvfhqlYK3brgg2yHQQ+7smAm9nIzu3RCOKeGonV
3r8lX9Jxvz6B0Mwj+8+Lv1krxaiyMEYYZuzZ6QQsZV1tR1luV+tYN0IWqsTCCFtI45+rvLjdruhD
MbLPsv/GahCxpQowAzcCPOXZCbGBZtn4MqO0VH+2F+PpIh5yWzz8nwLz5xRPYjdmmxa068GgyK7c
BGxhhfQfOBJXQJZVAc+A3AZBt4mMPiEsO/gbmdZq5tZZDb+eUX7WyF9hkEpNbkdKm5J5cL5IEMVu
DXxzJNUoTlWD1K/DvTX6ofF8PBhpIEB278VtgWZNoLlmth3GnuksxohtVS+2Irb1FATpTp1zC14D
DXyfnNvQiHSdrcZ1itKvwytEf07DtW45rVgvbp1M03JgUi0c3sdDWPO0BbFGdSzQQxOo0DVtwNd0
C8KmDRib3hVlaAzUjrLpzji7E9Ko0nQL2sL6Qx0EzfWcIz5raw4xaEDgKO3OCo3DBE0B4H39tBu3
hzx+OgE0tO2N27SHIE+7DOmXvklVLnWIM+suhMkHdRC27p5kh5DqfqUHU79P3SLsxl5oH+I6K5A1
e1HgQo+FoN+LgdMOMpAU/dvcAgQ3YIkKpq/GEJz267GtRLLT4fxvcgdfm0sZaVJX3zuDt71srNBb
ZDOHhjd5ZWyIlTzSYwtk9g6vOG8J5m9bnNUnIInVBUstLldXxeWSSEpxQBCVe01/FnVEL38ubxGS
yxWL6376t5vtTzvdPyeq19+KA5Ci53Xed9PWoPHs6WY2szPgjkb60oHDd+1y70Ald7EBoVzGQWbh
hvfCX0FmscdxdTCDcyzBtDjE19U50LU8j9BSOrbnIOEGw8HCYi9YGGxGSOO10F+DYJywGJGfBiKF
DeqRwgYtkcL2Ik6iTk4XzMayKtKj/MqqalmtuFyP+thu2KPaRBnCB6vTepVidZB++oBkv6y6QFM1
hs3ajaqh5C5U/fO3wK13JqFZ5BBnOIn+vhGyuaIkoEJca2WEz1G75spVglWXlRthzFwv9UyUtJq+
Pm0JEOabaUEXx275NsOsHYyyMCVWwCYrsK20RxX7m4oEKnFgUU2y1U73u1L0P1+SrNGhipeF077D
ALHcLqOzk5413X7S9xoGKAGaX2yAPaksr5x+NkeHxbFK9RpO8ppTt4Oa8Z3OXWq9HDbFKUS/mGHU
5UXsrF9XY+FVYzNedFas1tNysx5crYp1nkiiVZQI6IiHxD6xM6wqh0dXLUFjnmBI16S6U55dP7Pu
lkS8Fq0JoUrkGAw8AhBKXA3ZpizlGgujrhkwlw3qE4kejT7R4QdBxWL7WjfrvCnl87Zkzx5VBeY5
uO1aikv/eM0UkK9WigL8hNDDSFFDA7Uize/d/R/Iluhr/DB6tgD8RssShJgKXt69wTvn+oZ9oUYC
duJvpC7g/XFq0cQeS13dM0A6N8G5HOGhU1fuCgxAzYD0AfouVEzSb+ExHe5O9g4piu+axYV+Do35
CcXvRNoWQbLv9nqVTXKUI6zdVd4MCIVAVJPkv7Ks5fPsrJik3dA6tsJb6Mzh8+oMWNIEJ81wVoe7
0be6E5F8sAhKh9Y8RApQjTJFC5FKiBfod31CUQnWJ3aBNurcq+VEdzOh0/Sf0IZPHQ80LRwp9Z7R
+61X0PNAHzFTJ2TINXmu+UsdygdWOznqdsIuWNe9yAtRSRIXLkvJEqt7kanJkUAWfQQG6OjeivbL
656bFP66lvX9jqIAYkE2312yuOOpMm7KyG6lZFf7eVMYZGxml6TsNTlhh0zscUMm9vhWmdi1oPc3
ljvat4vQTvELsmxfLOh0CvJIptmgEFDoDFgsxmOJBLWnXKFN2IvTpO7w8YUJVxP4+lh/PU+uAz51
C3Qs53MYy4Zd6CO6j20hTF8I35NvxG2TtP4yORWbf6wHzHPfK3PKzZ3pugVg63O7RIHfa23jPSS8
pMr77ieLMTx68PjB50BbszJbYwNMgTBtXWI9br1rNS5TSohaRgd0UZbLKpZqXAI2rx7FeTnoRY/C
Xxh4u6t5dp0cYYsw7mMaw+cuLPF5PpuV8RF+JxI4d3qNzzYXfB97TliAbx/33//PEuoFozQOqsvF
1eTjwftv/k8KGtOB3/2n5XyOc4HRSabRoZO8HZUCGWyVJxJbOvqhXF0Ac3xaLm+iV9gijuDN5eKH
p9IMvowk8kOEBKoDnpALvogOZdXjiDLrYk6BvyhTW76YZqtOPfTM5oSiOVSViSZDIWTlO4bZwYPl
Xv/u/0A6fZpNKGDCLCOVJC7BqliXK4wMglHmYfSIJnzfx+5Rok3OJB4IjHaOo65m5ZUJOTHZrDDm
RFQ4SAWeiyLy+/fvAUEXgKV1v8pO858HvxyryZef1y3gbImuzibOgQgM8gvAWRV2qFmsINFlubHX
MFQS4Rsj6WxWM5y9S55IWF9Ofmk6h0MRzPG5mvl6hEvyOLx0X+tmcItQzzsH78a5wQ3sH+5VkR3C
m8BU3fa8nlJ7uE8N/jCoEkZTwdccMALoOwOZESTJaoLBKfKpTSVMEypeDUx/sdKUgzFGDslzv4Az
aUZDgyM6Ewm2zTFzMOBIjvEz1H5IujkhPtHPoROziWlBu5A0Nooe7XPkwHIxraLsFHYxEKCLyTlG
4wGZFYgVmsjW+BMdLak9iqKhOumEJ9qfUxo1W9joCpNZnq12Lg2idTMJ+aHcKErAZdg+hDHlJAHR
JBSs4FMYEUKSuhGVczq4KlUSDaBm4EOEPeKiSGGBxMf0KUzS4bL1VaHVtnIorXJmTXAkpSpb7OLr
Ni48tJFZ3EloEYfuUQEJ2t4fK3oBWgVALiFLQ0MZCJIH3TrGX1fehNxhPmxZVM9L2hD59Sr6qjZD
DwxvgBUV9u1SA2XKoxPnXB03n61Xi5vGqbFYlYauZ2a+s71s/8Dneep1R3Mi5umamSWkzIlgp0c/
Q9kwMVrZk+fPX/7w7NuxTvgwjvoPP3wY/d3grw/udaO9bDpFFzk4xgEfqfDmaZHjJoyW4XQsx20y
n6JcioIsnA7W6ExM4UCvisXjRxIS1O0HjyvDrtv5+J9evnkLEHglo/gfhzEfB1CghxGIFJLA39HR
sUysI4QLVqCAikohAvgh0OS1F8zmkk8DIlwMJvMpSvhJF3HV/wjHeenPSk1yWYtueUn3LnKkulRn
tUGcHg0fWZEZoSm1eC5rcXEuZZR5NcmW+RjEPHQIxdOQGiMfmujgjG/VWUi0Tg7+PwN4CP9x7dCl
6xvr5L9Dm54PH/4uTsMHsTHJnugmPj7JpmMkiIra6EUZCKNXubwbOZNnRcugYIa6Y4epTwZFlc0W
m7kft1XihTp35tSO0+WWOjIUfffu2qNq9TaMigdlWAXIdefLi5Qk/I+bAiPFbzBc3InasuGkNQEJ
dbOCJQei69mmmJbR1eAbJUatS2RvBcs9Olrrw4cmXivOHZbD2eriTZcVP/e8xBCKUF/yJMCTIquH
boTaPQ4rRuMgZMNgaM1WJkjSok8dTcoZatTXTuzjJDi73H99haZesizoL1hfIPea8O/DaxFzuwp4
hF2UL8TdWB09/nn/9L3T5QKPKL+Dw07CB4eB+m3Cp0T405PaCb9S2xfoB1oWqygpG8yTJUmdIeHN
2oJDwfLQiW1YXkZZHA6ZN4nUtiOj96IuFlJhzvAMR4e7brpVUMaxqcClkei50M4kr/Qwk2IxmW2m
/OWyD6vzJF+leoBNoFs9n2fVeaOMjh8Tu6YdsyK/EtkAY6t5YHMUTAB7Xk6L0wIjDcqZVCGCkBA9
QSfwy1inv3XFhMWUsidgME0Mqq/k3oErIp6juIysBrGgzJNN+m6C4AbtO4tyw/3y2hoO/cDp5+v1
EhY+LqkBkNtD3KUfYoWHuOgHyGbdCn9pOOD9xTJRLVZBI/Gmf39RLeAJ/7Y1uV9gdfmta+sW7FHa
dFSe/KkeL8+NLpfahUU8vriiKM40zeZY55bMNsQ6VVH8qcris5W+h5HZ08jpOYNFvafONYdr8eQG
Y6p59mZd1Yqu5rQBTXQdS9FYfYopsNpVTZaN7fpSiBxZoalgnTBHLRaX2ayY6sCbmaQ7Qd56cdUW
DG55wtiD44bEg0xcmFw0+a4+63pthJywWQskj8XxyIzbra9QHoRCOXD78I2uyL2861dHBrtodQ0j
4VImeKILmMwdACdPaVNJYO6KiODRmVG3WxdkpEdhdqRjT1Q/FaonlvhH47HiqWmZmebWvKrCcDG6
pLMXCQVLpHJ49BgtagmEYIDJEl/TAXuJw0YxsMJY4hbTHuIASNov3Foz3A0zTM4ytXjkgJQq3CwG
ZlRXLx6fZhlrxmFqgf1iLF4GwiHmgctwb8pNNM9upMyNx8mZR7s1bsmgPzl7vitz/rmsuZUxr3LP
f4qna+Rs+SKT1peDE6QdJkIJrzVHACuX3aAhyQQXgcWuVmkwr446WSlY+DBGQJN3gR/3a2a3rJdt
a9PcGLa1gw+RapKtWLheXQkBX1XBwQqDARO7Cykr6LajfxDWR4S2K/M7DrhJBWHe1tTRsDg2u+RR
Ec5Jo7HqbBYNOTMs7DbuJfX5wo1ka4O1faZ94Lx1ohmj7JzflhihmZSkOIOyZwbzGUCvtvSbf9Ta
cQqAHJa+0YaHDoHIF/la4dKOdptnk/OIgn2GJO+kkohHdEuEP7irmo0y/EuMun3EENEPNJcLfUi9
AMiNg1EJQMStRTdiqtMiFHmeHK0aziEoiNOpKtEXJxVOthLOU1FiU8DqOR653UGqfQGhoHaY43OU
c9yrqpjxTLUXKMIPgmxPbAjqzmS18MXiBoYskWOUop+X8DDNENlLlPNDYGGLdSh+6pyE4KTLbl+V
neJXJUt0xOIQp0trVxyupKsmjJu0VPFw1DRaWS/1h02y55wlCi+0BOVnFOxbnVUHDjJVwhpJuSad
BFeQl4hOXVyFDq90oYV5BVSnBHDldq2gtDunBms9S0nTd1X8mIf6fW1wgEUUAlBG8JFC16KN52W+
6x9gI6bXOam827ulexo++4ocj5VsQLZ2St10OioaWoYXWKhrVLFgrZXPmkiVrHY+DUSOdJWVWKYj
bYvesJhN59k1kKM9sj2PqqBEMd/MzTUXKxxwXNRCFSU2q6IlKl+MUmKP4bpUM66U6Xu2ONLj+091
iTBGigCpgkwqGEEWhKZBvJWnJUVWTayFmI71gXPP2QkubQwAvfNdshpbGx6sW+e+xgWCbqk2nOGy
nqimPtpj9hLiGVqNJJCb8nt4I7jIgRyyaFaCpHyVzS4wJQClKFC3jn0ETm1Whc70QS0Ah4aVcOBh
UKvm91xu6uCVxuaQS5rWKy2xT9S2WDmg3QImQfQAphX/Jl47chWQHL6UNDOe5n9Ar9MAyOZSba/p
QnLPlSV0H90JmqvRxqLzbyATVrMdObPNxle00TRSlwSwQM1rvqqUFlP9tlYqUiGgwmcszb45Fssg
PArfuEAqECE8Dnrp3AxyHO2gId2YE3Vc1t1Vubqo7HtXWjPOx13hNgBL9XD2NQPli5fPXrxtBJNv
CLYLjvYFgzMKZMafDunYWvzzx+PG/bkGRFU/F8afA1SYFmj7R3u7cbZcjWlX5MtZtSoLfYm+8s9M
oXNSXSVmViNHSOcFpzrpGAuBlekJjz4dEdtA+KR9V+6qkfXaYMJ6v/cv/Xvz/r3p23v/NLz3/fDe
m657tYbV5hdUybSnjVBegazyHLDybTFZJ+gla91KZBG+xaQ8dAWLMvFpDkeEXLx5Ya9E2983lwtl
06Vs3mCvnGU/FrMbtYU0Ju2+yG9IorLZSEHqWafwEZrX0l5CbOtaEkRQ1eM0mO7bvyZoz+6uxEfp
PFTYltu1N3Pd4MMRRIl6lTDqNGKNtKp3JrkNm0RXXvXXFJNXZ6H0L8ZDF7PKsPL50/GT589HT8WL
Wr2O6eq+xNx7IP7hTd9mcUGyEfq2wAZdlbPL3JwiUSjgjIHaXu3jpmSz+KoCCukcPn/+7PdPnutb
//h+9JfoQ/QwGkZfRV9H30Qf1tGHRfThev8E/zeJPqxipcCJYKXBoMoKTx44405jPCjnFQhi8/Iy
T7hG2jl888Phi29f/vBGjF9tmwFBTQdEq7Mx3fOOp0V1QeYwA8k1l6zif4WjVv/H4w/DDx/Sb47+
dXj8AG+wochhat9X0/ZP10syF7NZfpahxOQAeCRajGqpRAdbloKxaoiti2tuSo0tHsa1KBDeGAZ0
kE+q5bYr0JgmEhWYlMCDExDCsp+UM7yZG6bSFd2ry01ptXTv1PG1StRIyln2xaB0YlY1GcU2gBTe
tCFIxB79CCj5l+OHXqpZ9/p8vC7Hp5XGfy/KptNszZb64uHnT1H7FFB9ImX6ioLXZy6Xp6rxveof
JWZCtezpssoRQzUUqPVPz558q+o5rLpa8rBgVY3JZd+nKh6nwF0bOLsiUUFchGJ7jvYa0OCsOBnQ
2xZKY/3PqIGcuC9L7aqA4Qdj4vHhA9p4PHTJlNoYnK3KzTI58OhStxQ/RMdBwo1bPtD49jhkNFwB
+whNtd0206Gtp62LXBoqu52Qn01LQdG4hYjIDNoQEr+rE1OwN4eUpKZDTiTJDR8+dBtPLcuEJxsg
Hj91pOIDsPZIoYQ3myh/Yd4GbZdgLLRbdvhNlctlJ7p8cTgqsj4dY6O0RHvsegNLvbjM7UVr7Hml
ETRMkUd/u+e2aVnwo1vAdAlFzA+3kAWG8gfiX5bWJLvI4eTGeQprwuxGgHSANnTbZbunbmwr5QDa
qZEUGPbWKug75FyUoB1iPUs0FNNxmvp9BcyoC8InkQJV6blxlBiatnYUhKYdruM1pAMPGVS3tboo
+1ikT6XjhvQDZjram1r0raJxTXqKVTgp+J3ubOX9lawUTX+jexWG9/ja2HsrQk/RLvJ6fDJjWnAk
iQ/V/eTD9EFKf988SKNkcB83WLMcHaeGFmuhZd0kCGS005zMa7IJqhgeupq7kuwxr9iZAhb4ssgt
nfThGs1kdAbTqpgXM9i1oRap+jYLTjOM+zBIVlr4c8tZ2lAag77ExZ4nswIgdc3I2XSJRbV6EsTJ
rOpFVxPsTPJYE8/wLLXDKRAxx1UnHBqRW+xF9TRYUI4/2qkGR1iw8UzJ5d3L6IlwZ2mLJHSnVNFg
44i3KlwpvU2IzZAZ5G4mbwKquN8pxKTeYUoPQGxX9aAUsG5x4bIOf63dZhhzAm12BgvmZJpF10O6
Xbo23aaeJZoYkeEnfc69DLd0zWoDWJzEW0b7Kd9VOO0pbZhrzNZyqWbrEyzkjHT2RqoAnFt/jJUW
dJuJXnHqpICENsZQSlUPnzbFx8ZSuwcSjLHulhTK+LgnZoWJeiPKZseqqR75QLfVc1Jh4z0qz0oa
zkSJvjh3MzX8AZPH5nNnbu54U6D1k5v5Us3syZ+8649lMbmY6YSxsJWUzGtxKpEFNt136KkeUPPY
sHXFc7kIzLWlildeW8rYU2vhf3jax+G79ywtEy7tqWUqHatJ3j2VYZfc9jCbYVdnM1TWk9Fe1DPz
uvMmqi6QrYVtVUXJCllGg2ilYm+yKNrslWMbH9jTg/UGnvRmd4/fYciGpijsauVvNnIBS39w/6Mr
PrpoxoOuJZRROEzbRKQ2wLRhqSAU1D9fYNtJR134/AtuXFhHMW61LO1DoWMfcMlNJwsIKYuVafik
74xNi2iLRBFtnZilMy1vKSnQadWhFdtwHP1pTotr8V3kTFx5dAKsGSMZUPZ2tIAg5nmF2xbpKs3m
wmx/ZGu9ogdRN+qKb3lLyKWgipkzGqMe7Ptnb948+f2zN3XDlfNyNmURJV9cFisQxoJaPLIJ0GUo
ny02/bTeIHvNBVxAfP5JB71igXnig95ECFnYsqQOCJa9hWnKNJ/5jXTqOvfgTZbn+NXqcS6Tmq8o
0xDq8AdoQ7Gqm2RxqQEr33VW5jj1D9Su1OPdC4SyHM28xrvPHu3Dv98Ouz+7bfR1cOCmi3u+BVGQ
R14EOoMRu042W+XZ9Oa2da8OvoChPBruWqHLy0+M3qfFCjbAcnWjMJFuR8Wz94dvQqhwI5MpI9GN
bQNxVZC6MuClh7skf8ZTBpt/vHv93N0RmQEpJh5zeZCajqCtY4uHktRd2o6gcrIAucfn9aQHkfIc
0RTlEoCBmI+S7h0wFJd1D+dQK7hjWdfptq+ye9OL+4oYTPnUiBTW7o0VMs6if55zG9l0xQeDxyHT
ZwQTXehI09Rt0ZYVp83ttjR7b4oihu+MGOZO9rYa91cctHuVsBFZqBAJKA1UMilj3uUtAtksp0D9
Qh5IFDHp11p1VEgoXM8+Xt/wpqppVvt9RBSJMOp/HWHTqUtBIB0QBeHgCIBjXxvSqAjBqmrY3Zom
pNuIhg1amGNlCw0cxUuEDbY1onhU+Si+ir2hU1ltkUQSM68QZF5AmIiFq0lgvRqRmbtzOrLldTGt
2GrXh1KF5ltslycKCCVftxtKebZulkgvEHDfooew7N2so8kC/fagfE2CxYSWxkDMNRfR+jcsMuDQ
G1ACjqueiwQXsHuph6dvrk6f5xeutULtaM7JyCsVIlIwzg5SAdxzUXbyz1iC7KM8pizwedeKkpOb
SLwa4FzpZYhASoGFACNAa3plSZ/V/KDIHISnlxZh7PtQLbTPAPJP4IYTAAPX4Am6cRnaoFkNGmQu
1QyRTHnflXxxjgS/ddWR+iLML586geGcaRxk02ktLTcf3FwPD2Jl5E6EdjC9yJ9RVauBJDTNLQME
10pMSxXIr2u/c8HWIBv1+IWy/QmvU5Et/j68YIVqOG08Dty3lG21krUnrEbk9d3EHLZiKhyn7ea3
ODwcfuAMj17r5WZFikhRs9ZlE+oJSsZ2lnK+zlWbzWR00GOaHR3UGByWlJUSUSYYgzeQ4fIBuiBO
YoACby8Kd4EVZ4sSDS/xxApMFgOC0M/ZVXZTsV14oo5h5akroyyg7OwG9zRy58/n2WJdTBqsmUVh
BJD0SIOAJzrcswR83JLgGwM5u+mGrwy8ReRttnyUJJvpKYZ8EIQn2eJmDoP8BrjznzaV6tLlno7u
krMeyxSkbQE+TmdZQKyjifKuC7GgdRlBReI0RAncL6zo+1TJFlFBdBCSWGcYP8VfQyhaEIdDpTuV
GDgMrCG6ANVTSXuNNX+PHeW5p9QhTrzOaIOESxhrUQugaBeIiCYES7eBDObvIrQOMU5JRl8xh8Fk
tkEyS6NSB5zmYICOuLUxJttaIMIW4rTmHiREWgvSsRcBm6agHBg1hC0w+WSUq/CIW1TymwV6fSy4
MllRXC54HHKRYms/N4um8W8WjAFltzq7IZRQQ1sHzc2Gh42RfW0PSagwjNNPgQUdQDKBPo4+Hzpn
tVmeLTbLsNaU2eHihkZnJZwIzjIHvuIgjigJnBbXKJ2QEnp289lnn23J5yEoT1uDay91pkM60K6z
9aZSx0w6HFSjfeby++TnhNeFs8qR0SxRFoTh2SwnyT2N3lBjSietdcN1A/w95VsIM3RSlhfA3qb9
E0Aj+RnSm/P1fLaH/vuT8/7jfgUN9j8fPB4cWG3Y/x492j/gh4PfPlIv/7SZR5xdwEVxx/Ww5RFu
u4/CqZFtAqaDDrCCvDQQI9X1gS4Xuh86bVXRTW77Pde3/b2DwSMVlKYaGihRW9fv80bZ1299G1ir
cOye1ye+XDJxytQBgRLcp7Mpxh2PaKdlXhHbwZMlsjJ0RKmM6YX8Nfv9nrCpAP73aoMIjdhRXTDh
emoLfkn1N21DtApazdaWGOwJWIQnPepfwpZwPZ9RZFsBL1Lp5uNgunmrrx7LHno47r4eYnx0NXQn
5WYA7r89xERKZbkWKEbRD0/fGNaTDpAxsmYZOSxf27StD6et998/v1VzymtAt2Gf4U9PLa1KQNWm
ffOwqH9uZ4ODswwvIo33AurFEjlU+i7hYrlALkzYWYPAGlLYifYNV1FdaWcrl7r9VaS1V2n7/oqj
0uqmNlUoGY60BlEhhzREELoBio0FHAFgNjl7jgtwotDVI2kC2OSKozKn4QAZFvAEOMHj2NFINgYL
aVRGhREmaQGtd+mFc14E5iWFvAtKpQRFgTyb+hXphqoHZxTVjQyAfPYXmznm3wjkY/oR5BgDWc/0
78cFmpOdjtjsiKmjrlj3D8cY7buEwgBELAGbFAH7HrmXoVzHhkGar4vD7XZzj25+vYTdGgQXtsOj
e1lGRuofXy574n6GKS3mbO5Y1Q45OI+KlJNisSY1qqpJQw85HmPyDbapRZQxSfl2ToP7T+n9OjeO
WxFbPg3Edvrbl2+fPH+eWscerCAsYl6djeJYzsS18w/1SFoCFV2O/O3sfVRKVQExsIjONsDZI7qt
pHOtlgunqJc9yTGvcYSJnL757JuOx+2l9/4cA1d31emlPyvP2GS1OgsZ7/Vqp4iaxIDtYwbCqP8i
7uzM/mubKV7dkakLGQbQdW/t7u4P+U1gOyP51RX666uEQTETL4sFygbVJ0hUc2Nt6/rbVo4DsHg7
hPVGeIxxvG+RvbOTBZ3uau6K1I9vroRaIRASl225eVD9N2VVUkwdeHohpRfzXDO8070ML1ZDQ10A
ulTxkXaZ6sv91oveqdFO7O45fVZDVTOGOOaGB/eZhju0+im2Bbry4ikRky8UM1xDi/wKGYYLJ9Bi
M5zwMV/nPw9UaOMTgap9v+WE1rT1zoFd0hn31PMGR4LUb9gzatA5pJMByhJs50wKakvO0Y5VqlkQ
7DnOM6m1NhSPgb5AY54uNHDkCGCIRIX+67ByEy1Sxpplkss6wqTxULUKik3deX2pDR8DZYSSCthl
jq6V9sF4eHHSgYPh8XFoCI7rGsMtKQYsPdYlJhBsm1wsYExS0DpycaYEK02NhTeZU/SE6ljqzJq6
OjRFLDo5ikDq3ZmjILYbasate/R/GSHu/luEu1sEUqJ7I5d6bANe72YTiYPIooX4mqtvuUXV5Vpu
TP2ILY33jP9OY7fsMAfm4soa/CdALVlcaTccNoA+aMDqIkI3XtwrN5M13ueyfH1JoVwvC7xpsRyA
guaoqg++ZtIy6ECJK2ndkuG03MFMT45RDu/DqnGTM/gOupvdjNOUKnNgjKpeydUyTbxr7qEv5Jrt
0trtx3rcU0MwKguKrgBAl3OaYaP5xKqcdT917/WEp3yQevPPL6KDwWPyG5E5KtHKF9PAkRc4nOTp
0EupAWG1UrwOzIoFZ1+vPSHD/c/w1qcEzJ5AOfI/lpxclPdqg07JpeqsUN16baHoxLnoBoOavRTX
0GIGmifFIcM4Q3jKJtGyPswifT2pLxzi3c3kbJxzH2nInl/86rVHUKLKeuN9SmZ7KyCS7AQjMwPX
wiCHmDkFIC6vKlrLOAXsF4QIIvMwOP7WbBh2DO5te9bgGifG9pnP2W5Pgt0m9A670YPWPbKL+tbP
RsqXRUPV82DydCX+WVl8JDq1gyy8MxeQEvJD0oIWyoiwKlfrVtVmlX/c5Jifu+Stp7JiSUqjnJFD
heEv0BYak3egqo/v/ZX2z+T+YLBQjUNHk4XvFzY5L4tJ3ryJ2ZkFi5mfwoyPp2ipKN5o3734Hg/9
sCbgdeppVzYLstxR9jog2iBMtJk8xyl4ZYVMccKDwMQjZ7c8nX0zE6ypoxwiUaLy0LpY4IOTo5fE
U4TZeIuaq4aaSO48rasCdt9160pCGg7nnErIZhmQCPDAH3gdsgoiglDRCWtWA0BbdEqlYkxrdVMc
oglNp3AcVbIV2+6t4sbtH8uibhF5tJar2HIR6waN5nT7JpycZ7q3Lfhr2IPGroXlgzGIXNVOQyii
QLyRkE0cObvYMUOCoX5UsIC6+ItdaZ61ewwhHUxLdd8S5CbxGGbPt7tObxNc6G8iKwXFpDj9bNQo
nDTB6zR+u/34Dj0FMrzvFoyJjVPOzD3XmKwmjIn5GJY2GnkBuCdwLqkZCAaveZ6XZ88kF41E1vGC
tHV0TyoJGv2QcPqifDfXZNqpt1jpuzGBTdV3EjT5ddlqGaOyyDA8BRc2EIAZhlpydFMJI4TWwaJr
mbrqLTEjs8zBUtpgKHUZCIYVbXArQQPIEsXSdi9QyBhFDmLI6hq5cZcM2cWwnr87tRERo8hCSUPN
3DKu52tD1fFIlZTIddzkKAq5k8DXcrk22bSbFEC6GF46VkORc3Snmr6c/C84PVJPTRaNo39Jo7C6
ZI8qaODS9bCy9ncKCnQCYiHKJFrUN7MG+6D01eDxZtrCbXfRk6NBTx0a2NFqidvK48QC6EGDcUr4
X8zWU2diR6Bu4iMdsuBWrSnM92x09vRge7drrME3ji+X7BHfYhQ7ABkFsl+QyIGoxvnS6Q4Hr2hT
RwfDIOPkGRvZFQ5fPWssC7O6Y1nMM8nhQPR3SwRy6WTEgKPubw4CJ6oeE78wX/5oD+V1ibmAEt3Q
DZlVC2MDkbxE2dn2yQS5tZiW896za8AZ7Yp4NKDsjzAfSauvYY7bpTQwICfGN2wzwd3X7E1MH9ti
Iy3kWkDx5i338iz/nulUZsBz17B38n5EZsO0CTzFQJgDCof5AuS3QFgE1QglosV0xsjN9Mtnz599
DyLJ+MXLb58FI5pbF81qZ0hU7XSrAvu/lgC5u6ay8URu94xix2HG6LgsNSszHm4QwwvwrdUoSmKl
+Y97MZlU4601oO90VkzwJjDeLGSTjikfMdspxfVlHPOVHhXDy6CxaRgbIRNXeiTDp3F2mRUzDBEW
aqpYoBoDm8MaGJdyXlR014y/xZ495ggLF/wk1+7Tustt2mmKTqQiXiiTJDq/mB+0ea1CAUcGXpCP
nRKNctvIGuihHjWDeAw/dMKpDaikmj3/IGOHjeB75yPbnjabzSw3KtJVsNTmXQtNTXrW2/Sv4uVL
IDhOHnNxdYQvj+tcAZtVp/KzGuhpg2PyEVZBJc2B4/Y+HVzkN74vFAzQu8cY4Lu6A8tMxadGBQar
HqsJXsuCsCtaRxR58rhSLhOP4ByboVB7kq+vcthCdYQq5XC5J7Etz+Gwcok5UfFITVo0TihHt73c
RsHV1T0y9kQq0kW8VnGzc3YkPOGLOvhelZhjB1jqqsSo/cPEWORo6z0v8tADtL/5Sz+lpzcP6O/g
wTfw98+Pej+pQESKWCxDP1itWY+M+u60XGp3N4oXaXtmtN3GTkDmicO5QYIGjh5EChgFh5lmYTi8
9hA6d39E8yyYA4TAvqEehuy+sLBSHtdJtJYPkKZPcneKDIQTz5kdyAqh5j5CF++4jePno+FvjvlG
++g3XvKLPTm/TcrZZu6a1k/2e5OD3uRRb/K4N/m8N/mid/3r3uRLlOuxB7cZzPx0P1Y37b5NP8qI
DD5V7fYodVvCPisUOqdaq5f47CmnMTjkPrYdf/P+MKA+Pl3IQAXxTEcHTcoFaAsV9t805OLQPNlQ
Bt+tncJRIzupRgdpWBmgyWsg25QSVvz4Rs6FjEDz/hbQGE1ioy7bKu3dEJpRNAeHIq2k1URdNxkY
tNrTbzPqw19uDmR396FpXm0uzSooker++llMAUg/J5jfxAHyljQs5Vpnoc+nYr+5yid5cYlKUSB3
WbSTfQ+SucWSBhYDFss4XhS7WZAi3F8SpPcbsEvrBZsM5i76lOvAk9G2kUYj91PnB1zjruLOt+Ye
tmkGHRbOpqrVWnFrqAkMQ6HkkwAnu43lXxKn0deN6kQWHciFke7O0Rca9utpSWakg8EAXVvOs2WF
F5lX2QK/NjRUrXl/n5MWb53bN6nk2CgjgX2khwmSV8XZ+bqhLVS2FWtSm7Feb10u+zOQR2bGbQbt
BcWT8qqY5A0tJSXeWkF3ql4vUm/gTLqaA34ifU4gV5y0oSXjZ0oQgThFF8mSD7Ty/HluN5d70UWe
o6nfje8NEDbQ9gOzi6W22pzTnXTANcGjx8u0wez6totzT5ShUlTUoZ3wzvh9gG+E6uPJFPcRzCI5
xdtjti13vIo5p57MqDpOIznXbdUtxqHOfG0Mw95HnjCDfkw/HsTRsK1xotNdW/42bm1LDqu7tva0
vTV1Xt61ub+2N2cfeHdt8rP2Js2JetcGX7c3qM7bW5ujuOL7zVKzI36p+4DWRoML8Wfu4zjug8ZF
ZMHoqDba4FQOfBTZrMRDIPrucRhU7bfHfgY1SB4RJM95cXxBP/7QDhYrQtrgaRcvbrH5h2OmYsuG
p20hHV8/EuYkQW1JiC94upPAHm8EiOGOsg93bn5sP+3V3d9IZtMnaVS0o0nGhKPsJuaLjgBse9yp
B1kyn/5UzptejC3FUQJdqxB72qBrzUaRFPlhnXJsnyyq9PE9eFpnusejtXZq7DlhWapzdHYncWNI
YoRVlXYdIwMYk8MeXVyR0EFlTjcz/o7QFqd2mMHznEMvXWVkkEziCbkH6YMOCGS2dyEKIaXdxDTP
ZtpuhS5aKZUFAg/ooAMK5bdYR33+TO5cKGdZjRhPW1w/2coWn8RbOUOBEMZhiVH2hZKRqMoFK4rk
ctfSnlSlAjA6hT5ImVIg/L+89kRdkUS3vyOZlpOGKxKkxp0vSLabJdSEPnTAsR3bNmhGT97QABPe
CT0TL8rf3bzNzjA9pz6quJHJpWKT+6zHRrgwJmXFPp6orJtkxe/f5NDSwauRfEaKqUa4qFBcixFF
4qU04PVGsYg9tyUCN5+5dQK9XU36XBaOW/sulomaFYmpBr2usUxc3zzsqiO8J9CmHIFNqlF4Fpus
8Nn29vqd4AmDhBkPXH063A3WLfqfZt2PPb6w9mc3zc8dtD4740LdyvwC09agEro7qOZ66ZeAdidR
u1mFJVklw8sowC/CK4nv8iIJ7mt9gJ0wnHgae4336/dnWhKLv6l/1LJX6CPFRBz5CaEDM9JVp4lu
yC6yqnY4AIieXfGwdeluQQbl5AO/nd1xwTqvU6K0tBMWJzXKVLHdZrGO1Ljl+5j3LbuL0OZFZRs6
wqBquq1ho1qZkj9RfGhdGNBiWQwMW22zZTgPAFYyIKDtOHzBFySYGhhbhoyt0MR9aAVrJ/gJIg/y
zu3ZRSegkrGWAJoz6+tyXLNhdU0SuHQvph9uqcCp7cIAh7rUxbvfeicimw4dY4B6MWfztu786yWV
voQLGmuDUNfIrRQ7cwv8RL5FBo09C6U+yteUXVHdNBtu3XOGn9YqtR5+yc97m+BDhers2q4rd+YJ
j7anEZ7eXRPx7+6QbuvmmAvgH9/ZBYhvOsspi20l4qiKe4LGkPOSNOanpefwrKam2sr27Zbrk2Ya
CuDOkqRNudD2sXLk5VW7wBxg2XZ9Ihttt6N5SDr8uVc8vgvoxS7Y4xhyNeuxfJFIC+kdlFifUsHi
O1UNm0yDxNnKjp6FRmRDP+zZu9fPh8ohGTNkVnDUvxgs8jXGYHuIzlTkmLxeATd8OC2qtfXObek1
Ul5BrPvdu8Nvh9HpdH/65cnpo/709OTX/f3HB/v930wfH/RPvswnp/lvf51l08ypLxdp0aODL+x4
brjDRX8oYLBmd7A+v4FNZrqZ5UNRlVifnqN921PZQp7QuoXBLi+aigAI2Pv+flOBb4HkoMT+/uM+
jObRl/A4/Pzx8ODz6ME+VIuS71HTA+9fwmaGxWz741ccX6HIK270HVHwVLV3ACiKDj4ffv7l8PPf
OO3B+xflpbTXZuekbEGUl+CntwYxeV1dy4d4GKPhg18WCsH/9eWkDi0T4WL3Fppqlf4GL4inKh/E
pSOANQQ9pOj006MY8w/tGEOGtS3OHduLBv+Mrqcs9xU1vaixqqjw63Z3nL8aYUZZDX/FxyqJuLjm
khaRgimjlOWU3IIPc/cMtbT8fpzuhhmrCdKhhdMVOwFqoRtS1/i5jcnW1c4tTPaxjm4qRsNUEdQo
bAOqkQIAcfSH6dgZm1f3uLFlOVk0NY4lx3rXdxuWqsdNTZME39TwXLJhc9buqwnu92Ss6/ZBbRwH
YvRIdaut+9HBPv27QwKw8RiDpnCmOCqn39i5xS0o3ezixqK4gvaAZ1D2PVRzw3YwgQPEu7dPjREx
apUz1C3cgYlylDNllxKjOWBf/ovgv6H8l0bJ0YP+MT0N7gOfcRKV161X6tfqUoEt3bxIZ02Zz7mb
H9HRpnZ1voeXaNiCCH+6JAWKx7hJPSc3thXRC5B3+yzqUTiLOjpnLKbZiujnbO5mUlfJQUPxdK4m
KLG0Z/TjHae9zCq/ds06u9aOWC6imIw4h920RlputCFxHu5/bUfPMZGGNLGZsDwmHE99Z0SSuJZc
9diJ2VXZ4Z9asbJUFQu19ZGrT7Jv+VxwHD4hUdcEI2w2td2mQ0Ja9cm6MWjcIczXEJ0tslt23jiJ
LYpA7EgZ1VsuGBYSJGMYxjPaP3YCKsM519fiS2seqoLbuu5ZOw/Li1omP10SqH2OxkTn2WXOyZRU
9Cqgpc+s0N04o0eMBBQcnHhL6vpIt+osF6ra4ZVh7oQ4CsnRsclXT29qrJXeavE+gqqDKd5sUUPq
4sj9TvO9QsU2gKVKmpujjnH3l6xmR4ELrGNvySMUcnRQniuNRwbt0TLsNEgO2mOmSRvoXgLNluS6
6Pnt6EZaHXawquutQ2/aXXWcii+I/FCTzJt1+ETpOi9wbcupqFndR1rgQG/z6qyhK13etN+st+Pd
vTq7HVDN6uVAuwEtZdOgSBZpsB2kjXz/y/6j376FjXz/i+HBweCL3/7m14+//E/BCrJh3X5gnHiG
dSsslWTL1diRSXYeEEUaaCMJcU/yuGHNAyRM4dRfI3n7irQaqS93IPVGgBUTxdM+e6pRc2m6a+rM
+KvnyuUOrTBAnhATjHsVqbTg79d1D07FKXr2iuqZOUNfro+P3v+HX/3qV+PlDaoOMF34Oj8pFh8f
v//r//irX1GgzM0Jhic9zYoZGqKifzR6E1ToeFAunBzuZMqhmoASq8tiIu5qwju5G/OL3CbVTwyk
iXpp4fBcdpxNJbsmS0dKyKWdVVHdCrkfxwTtwpzPMfQlKfTIZV1kLvo+sNrj5OMELMik83ydXWar
URfjTXSN2JSRkDOKyYoWygHxrEddVbHbUymI2DbG+N9yhJTRUYyYY1UayJXxsSmCmWRH3QodubnM
XzJlgIKeOdQFqn3qKMXkDf8oUzbPVhcDoAw8pth4Qw14cYYpl/jJ2h7p94CxoKec7swAAOu6jDiC
lF7ONmfFYp4tsjNGN79IYoVvRnduXwvtsYgVeVOSwyKlymhhIla6PZSvzslAhpJ2KkPnf3CscDD+
WoYui7ieQA7KJbinSiYeYcCJzZoNW7KommWUImbqmAOdk3MVpXsmTPWvUYfWE/MiOgrZIiIgoTFy
t+BmrHBIfqIjTciDtzlSNgjI36FsHV898PQr5WzKqc6gDhzy1lccCtYLvwsw5HDc4/xpwWB2fntJ
MOxbG9QSg5YypXj27jZkNDiBxVmlMAGN9HaeVXRDwu8x/7Tdc+woRNF2XJOL3DCxlCvRXYlBtI6k
yvML+wggnAod30fhGhgg3BKAg2Ums7KyVSIUFTlQ0LG1mmJkMXJKnBBti7lVbT18shWHUMl8KZXB
UUyzZSlG9iS/XrWZA/O4sTrnSR5XOfDREbC27huJAOVx+Vc44OgNcyM7nZcgghV9rAoaL/IrxlBi
ZiKtd4pnkqRrsToqi2Gw4YxVfViwfk63LwfkWh+KZsyBlf4+pXIqmigVFSZg8VnFXlllM1RNDaW8
+s1feWuGj5LCk80Rq7W5Ga0cCGSTW+VqWaCVmWgiydIs+ipKHrv52FSK0FlxourDLwxUQIlGKcgA
t+dKmFY9IO6PG9yw3frBony8NQWlfaXuyOaVc5cb49d4qIdrdrZ4Box4hXeswB7Oy8XjODRgOLLi
pvOYgFdFrfvYGI5dxeoGmzm4gkUtn36ysr7HeE1SDR8+NLMYy2mrWpaLKidDORpxQknh0RZwpIeW
8KjS1GYBotSq8mxFWq3zVX466j5cZVcPkw9XD9IuRZHn5rUGa16X2e5VD9Es9aFk7KDu7cjl9WlT
NU+yqe4BBh890L8cnqv4wFgWcuIzBsOC/S+DBhngM5ABWBbp+mB1NH/yG1OdKEmH2NwKtRF24oga
CD+T15xSyJwldeY3TV1zkkOBaZfgWXwEg0YGs3JxhgI53o6s1qtskp9kkwv6hVJ4kbPZJf5Gnj8r
J6FYWPo43hABi/tDdg2Qinx9LrkqEmjb24avWJ1flCBW8Hh/QAyuJM13UXJeO8/uaznAOCVcPllf
ebpr7P9qoBpAjJG609MGiaoH71w9cWIbw69JEjavh8NLv/+15GgDZPQc/p52Pn7unE02C8z+UK0/
fvH+//nf+WwCghu5QdxwVKTNYiGR1Kv1tA8cDUWvqKsqwllmfYPKXngWnSRxv/H4dIMBasdjxfuy
k6qcwcSN+bc+oChKUC/w8NJ0tIFTzB6Hk1pli+o0J+U1CuvAAaVfdegixheZTqj4WJV11vzyRrJ4
USgkvLRXab3waMchGsqTPwm571EkVTX+6ATjCnA3krqxIKNvFJU3J6ySKtGOroreQvmnWSV+JM5y
UaF2K1UngSY4/g0cnTYzWBwG5ccD1VIwBGPHsciWxG1BzrMX3eQUoxDIcB1jHGYaNQBra9feQb+q
w4SxwTlJRxpLJoKJU1jm4il+EVAXJZ4rJJ8Pri0OGIazQhN7Wlwj2czxaogENHkRiLjnGJygG2cO
IKgUtygikmYCAFX3EjoNiUDQ2JoJaQMYqGW2m8x0REDbtwKmUJktTdCVIh6P1YyNq4tiOR7DHhdK
NStoRqdSKLa0wmBT95Y5FLcLb98tCafQIPmLuLmFsU7j6YpHZImKsCNP0c/D7wU/fAsfGjvSNVv6
Qg3JdEpZM4ofgauqOtZBYrOE9zbVSLaqgYLUSj1D1OZPBwtbajnKgsfGnpfZ1DoGNE8bQu8Mvjse
07SNYQsN5DX18o6yWopF68HYEDAdMUj8O81wkcAOp8J8C7Te5jIjgHFdaOitmWI+oIaARxdahGqR
fU+fU2dsdGOAPsqsBRh5gRTtkDHcNzar5gF1iAzwwDA/9e/aohdVxE/xQV3gGRaYoSl9jYGB5mP8
AFR17W9mLp9OpH4vonlhFLhVbop8NtWs9TtRWrh8yg1rHsSMiQ5pEYVbyiOCzWLNEUDqeIjhI0JU
WzbKMEzqNq4cEsbX4jUjGwCJXt31VYHHoQFIF9lMb9/dUEfKAcZEVUTpTroGoXSz1vvIQAAO618b
UGyN0sGz3gtqNYRU1W+hqTHsVRJL29hcNHJfvszHdiZZZRlYUJJswD5Pg0uJXMelQ7cd0eC61Sw1
i186JtjGbMAWh2LV6sIDu2iiQWnsBtmPnC+7wYblI7Ka2UzYTWUzSsVlfdS1jUfV2W1IXml7VHZq
1RUxE5WbWCpbLbu5WFGLLKSgQrNmV/LC0WdtFlerbBktSzwhwzIwqWtZ0Qu180gWSkQLRW/9J3Cu
sOR104FtdKxf4iZufsUOQM1mSKZFOFngiXigpTA0a0xCjYi4hjcjgYNN8GQVfKkibZnzFZ/7xhpF
yf1Q/278rWKBZ5NkH427Xr59NowOyVUGjo8nMysZMJpCsqZZXD+b0+N14VSxnGWUoHyRrSkRz/DD
4sOiG4ZBOAUe4JJul6/zZykGU6aRSUTe+jWYDsKsqxvk99Dm66QEyj0EHrhabZbrtCX0fUPjw+3w
Pnv9+uXrIYjCnC66AXktyFo5eAU8kT2ETZjtqDAmbu5wh7vEUFYxpwMYHIZw0krrPg8W/SnyRLni
SeKxWV9Hx6m619MkagLjTSVSgMtPGtiEZOMzLMUne2nyO9YRfIJG7VbfgCRfbzLPqtIOX19Pa8dY
x3NAIqU74YnBIk0TU4PS9b31UCrpKrfjQQ1g1O3uMIZrWgt49yEDaRrJdRuN3Woo7xYq9+abDaXQ
aJgBdwDcxUbXrbguaa2wtDuvwYYbt7RqXS5vswWKdNYu8yQceFVJW3r7XELtXDN+lSymRsOeEDZo
r2fNsxRA5wL+OLB2GbYLcJ0RsvnJNIuuhyjjXg9OV5i19XR8NitPgGWxJtGckW2jGrohlKaDe+vA
hsZokGr3t2RzYat6BMGk6GHNJvkl9dBqcGZUu1aqDf7si7KuVIVlBoqNebeZ0O7AcEmnpG9X17ir
c7ZQaEhC7t5BL8p0thcWizzEnZflBUpYcDIP4W65KtclHMYJNbfBGWnHtK9fHD7KkKbbOvaYofgH
oiPdAiv7BqLutW7lhKkpA7CIDj3ycqDeWuYF+eRCL7OxTs9SjQnEsVKFOzeWpzgDZMdgVs6YFL4S
2AN/r0Gus3+eeIYNTClLlOT/OVtVfhQQwbBuWR3oArkq+IRgi7Uux9zBQ1d6QpibOxLS41KwBuFP
ouFLd4ErUdUtlCn07LAmvKn1kW/wrtoMG+3aWLeeg/JUg3R+J2isyBZ1imQWS0TVcQ7hWyp5UARu
w7gVuebdldwx37JjCzp1SMJSh5Eu7sdyiamaoOYpMGalkSOVgA76ZFzCIpcNFMyYpNLha/s6zGsi
8RlMz5QXEQ/gVPLkQdr5+Ov3/9G6++AbGNwCN+ti9vHL9//vfboBeQe/CjLWRgWZLgVr/WRzdoa2
T5btFXmzW/cVHU2sNDjRWVIIKSnzRpJLdYSqDum1RVeqANkUw/bV+R1Hx3+iIFEhXKzA+fK3GriF
OptgfH3cCN6e5xEZyKlEZJnaQTi9bx4hVpTWEfjApuIoQ/gJ7/s1YqAxmm04s6hMVujJhszeIG+V
0y3VihX1a7xl4MwobLR0lVXQDCkh5RIFYBBr/hl1iZtShfy2QB8jir6cVZR4VGIUQv1vcYYOFSz5
atBxRqj0S0T9+lwMx9IFgZ1Yz8bS4S3ZQlFhtJmJrEIse70gb7zZDYUNy+cnOUVplPTFUAU6phzm
+dSEmebo0Zks7/U5piLQJ/RqGH1Y/LkH//uJd8zFXwcRzReHEl5fldQqIn0xlesCjlwGHeLmbsGI
1zvQuDbmolnRM20X7OiYNmyvVUXJAO2d1+OnMLIETpz8yxhEp6nARfH1UcFYmFbIJAzfWX0AfCBf
U+a/JaByjslqlsrsHhMbng2oHi9eozGYFqenkhdNWX7YrY6iMa6zbAlc7BRPw85MOj4SY/KIaJx1
ZebAGRrGQiNUOZGExdaNzYY9K1g7wbVSoa42gDRpPZUyOJhswXHmgJZZSc+sQKJGkTsBRfuCCbyJ
unJ5TCWBVq6InOgXgD0YDIB0GE0n6KrroY5Lj6J9y/sD1d/WeUPy8qiSFvSS2VL1rPrs9riwI8NL
9Xp6pxM4WRnnVI50quBhT4n8cjxxOz4ioNycGUWP40zki808X2F6kVqF4XFdjpLmH0BltM+EYQTC
mTBUD+wQ9trQutbAT40N9P0GbL9ULBEWrVwMOTiZtKUfIj8bdw9IupsFHLhQMAf+jWelSjnY2Ojq
2Qd08s9kKngQFZaTjNCDm1HJRjpW7UcH7M8Jq8M/vtmr1pmtIc8v9FebwwcHXwyhXUwT35pc0oPj
wcHQcytk+HFKvrTXscPSefm2swm9gl/7ScAwxfZlMd2ACMMsR6wTa7sGbSt2DknTBlckCMhdDOgT
DZ2BysQtFjaEmJf0E2AG5Mxo9htq80TvOczwObCdwEKuXRlFGhbeCFslAIzThYFIgETW6KDjMA0Q
3ceKidq4wDsl4INweq+ljdd+abru0b6khyevGYTHfILJcs7UM3bDQns2KHcUEw5+wv/9Ff/3dewl
q2D3KxH2Zq15sqk/oiakURz1g2hmEwSVUKQQ2AT0/H9H3ziUoTfz9Wkmn3AqqwbD0/lXG+tIAGi7
gD5PClUY9gS3QcD52SY7sxI/sjNYFDNhA+Mni3hoFB+5dVky9AJlv160zjjLH2WfJZLVKUgzNNJS
AZ/FZLNG4wJRLk5yHp2ozZOxPJQ4f7COSVejfPbo92Sxtl7Zjv0NJMF+/MYf788Bh3/VMk5wOJcj
kCtiBp35djgNSw3GcD0m3+RC01yuhYBAKYGovqGor6qZfd/YzM7KBIBED+IU6HXD3oj3qVtqI+0f
4IcKiRkzUTjcj/2DfPz9VLvmowGhWio8hMAnBvCISuAIaYgKgJZVqOzfsKRa42pl1/sO445JF4lN
IyFIJ38FlJE9rFWu77VVRzSiV3rwUWob73FzIwWblxyrQ/EwMDN7hElxxd8aRitGvOa8aH0eWT/U
MRHNMaxDolMaHoRbMWDk9ySnHu0hUC57APApZiDEWPS1TUy8tIvKO0DQwqRA98hrsnW5qh7S02Ja
aZGymJL37W/2Ea9fwP8QNeUS0fwIr2zhHfK4yuY2veiAc+vAaYZcW8sl7xsAJJ3btG1mlZ3mlLuE
4Z9n1xhLYYSumtTzw0ey5GhkDXXpm6lMFfsU8kb1J42I5S+ve5XQDygI5CVdlNFpuhM9fFFV1uFd
6b+jxNKNXvegGfKNoWTRPX3cT9P24GyLqZgI2k0ZMpDqqEu5Xju9N9aQCuIN3lQBP6d6dG0lE/jc
Q73Hj/kCHu3bGTw5AsmczPLENSaq6fbI+taxj7BWFSOAUgQ6RdoVdE4WWfqikmxqv5CzfK0c2OSb
sHpXYtVxdX3jzXJJAu9oFNc2JIEXSScV+1B6Ya/CZhl5jGffsW5BaE4nkNxRn2vo0oFDv2yCJXjS
JraCYTecNtshM/EkkYY8INZ37n99u64pzIPdN724W+e6re29Fw792/3rl9tg8Dp3GtsOQP2c1m56
Fhi9csbHV+kd/LFdROIvVzaRBcQR9j75IoJmi8W2VdRsMu22duT0BntEorxqtbaTtZrDyDXkQOF5
mq+zYlaJc+oAhAwP9hjEhROY2xvK9s7Bw88pQjqZS9xot+ZBGnsxhjdJgxFGetyxo8JYw6n5yRgL
PHlxJNuhdzJXp6N29uRkzjaihnci1gmisTGdq4/2MfhwgshlkN4tZngz3+9bGa1JLb3Wxyy0iwAJ
IiOvFySU9QowrfLMWOcszm5NelSUSIpJRh5nnJlGwYKnsHk2k5PQ4amc9Jaoh4OaBBq+u9HHbhQ3
YO5gWwEY6aK1pJG4pyTOpQc9WE5oC/wd2HPELd+9YWVk2xcACIq1ELAASTBabkpBfAUp2kQnW+FR
nRQLcRroQmaypQ8qYTrhtbhLL0iEMoXWLooqQJQVKX0SRo/NSGI/JSrgvnAh/Jiv0F9hcWaFFSNl
IOsTFmd5MkdrMZHs0h7JogxcGojAAWWOimM0iKUy8FxnJ646DvEUfR19/shrjDRL+wHnBHUCICma
PPuu1y16sSPgKWhAtKTQTJVFoIqw41ZRMYotOsfgXEBYPVwoUf+SlO/n5RXJs4V70hKaYYx4eQzV
XAuOhk5IMI1rCmCisT3s+HEkrDnSddJwfBMEom/PSz80MWFdacME+ZO0w8T4k/OgbXYUq9k2PT9n
jvx5GvaLUOBSe7J0EX8gVlxEszxD4T+UWoZYFM0aK/tYJ5b2dhiuBsupmR5v0cPuKvM4G00Th1E7
GkzgO41oco7HuzVMVILjiwWovWh6s8jmxUQ7keG1VZ5PN0vll2a5IAsz73SsCeLL4cGSFWZM7c7w
O/ZkeaV5CdWL11nFdwryoQI9ILfZMTDdfKoM+MDMrVIQpLvPTetJwJmb4N52F/a9C+sOrNwna1JB
XOPKJZxh7Dto4F5T6HX1D07/RU912dMdpscNN1sOW/w6yBUD4D1HykFGhJlMOIcwBRHuyd0vzOYq
o1dDUkp0msENqE6ODBTHPUUrMwfSr3aC1JeAXxMN3wLu6J5nM1pX1RxpoDSsdVrEZfrAlkJDa66Z
P+15RnKhJZgG6X19R1InSC0GHvWt1JD00WHepEk7XWvDG1U7ODWW8vKZxre+gqLeAPChH76MkjAJ
J/Aab+jAmy6yA0wdGAn4OwDJg94Kpdf8rcHciak1nvC3zzImPqDfWtnBBlEV2wIlRuchKkcOIUg9
JhfCZi6OmVNfqFQKeGw/Vd8oRDytk4tjjftKcpuH978Av3k5L/huyRFkZOnWpZHGhUSacHSespgK
/m7t/KmMCXvTe1egrLcsqZ/6xkgHNsb4RRvePgvgjXaCNlC/hQK5JBPzodV9ua2EWvIo8s8XQwXW
T3h3E1OCifZL7ijQhhrPT3r/h4WluIzRt/UjQ3cdrRNSRbet1qatyVmpAcnDmz03DlMzwRvAUney
U2uIileacckgqZ47xp1YUuNG9klGqenOH6bwgsA4mzmVpdPCWBQ9ud0NCsQk8YxYEUEGPFiDO8Dg
GPLpaEgFmYZQTaXe0+sHuMap3lAxuRWG6x7LhQM19IDq2evR2pvtCh64DMsiv5JaR3z1grHJeC7E
5m9IZyJvCfDw1cXO54/S+oWyuzQDd4TWoS72j6S1CPHh+v3orjUf1GsKKrz7SHUR+ci92/UVr15l
E0dXXaPw987H37z/Xy2DWGCUF/kNBdb6+Nv3/9//wfFArLdKpzYvJ2RCoKwKM6DamwGrt5QpbFlx
HEJSdRklkjovja0bTtvJQGIzjMdWt4l4vNpGmbmMBejij3+0yv7xjyq8AxpH4OGeFHTi5IbRAfNV
xF6rZIhImThuRNFa9Uz8ZQrWvIJhDPLFZbEqF8Nhx/LF1x2iKxu5jWoPeEkc0CNrLBif5+Vv153m
M7/u9krQIcUokUjQTqeN3VCN23YDI0+cES1hvQFF+T6HXl+1ajt0dlNhVM+xdJC4edjskpPzabFK
rIwjT2YznkUQXfjCWuliMVYcmjqdrtnkNhI6UtpgcoTXlrGoWwTeWFTnqJdHCvvjHwXwP/7RhOtC
u2KyYMbwP2jgdRplOpkAUozriSPX4wwQmedNyb0JE4Tm64cUgI+uBuhuXcGB8R2BhHNPbTwnFFAa
H7M41Mpmp3An3AaXHyAenPXP7zuyk0xBsLlUYXkQsQk/jgnJQ1vLYes9Tv3raKtWz17fKWKlO+iq
9ElWOT88vb7ATTjuNeBMhQ3SIGCcb26Y4luyKWOjxORApfdUittwpDY6RLTOiDrW+SrYMpY/t1xY
c1iBseBvPE64BkdkUP/vjsfTcjIed2t31jXzf2ursJDmg6H+jcfa/e0cmB25gVAsi1YPZRN9XQVn
WtxguCKypx9y9KhWIdSmENdXRwYvQdGFildsGwjjqWVPWRHV8kblhl9otBla2Ql5Gh2EJKI7W2WR
DuYgrD+WnGMESN2+yZ3mM5vZ41O9sNVcKKnUlhZ2dOe727TLdeK9lWIyxhNADIINVEH7i/+fvXdr
ciO50gTL9mXMsLM327VZs3nYDYFGQ0QRGbyU+oYplFRFskrsVpEcFqtFWSobjAQikxCRAIgAmJnS
ap53f9z+hv0V+77n6n7cwwOJZEnqabOVdReREe4efjl+/Pi5fAffEBaWB/l4vto2SpgHoC73v1xS
ha9AUuBf5FaBP3KX/cmwN58i4QV3Hg10JIdAiyBQzgiZjOf0Ph5y9+H0uS/HiaTVM7kSWnDwrShb
aSvEw3avSAuReDW9nKXgSwyoh90YcjCOeQKi8zFM/v0DTJBfJY7+Wy1daxc1XFHmf5iL1+YKdjXn
FXKNfHOtYMrqHR6Qlh5G/huz+UysxHMgHC/zfItZHdGsuZyTg9P1akfAweQ3BiJUZdKSv33LHQRp
jOys3oNcTkIXMIQeymjcnq3Io9Qw+aF3c6X4EmRNrp3TWofsO44zC0LoU8m84soG8qET1/orCkGG
hQOe5D2a+vf7hRz31F9ilI3YCN6+5RoiZKpNn1+tGnjM4VJm1iI5QlYwlB9EGCCyk5woZ3qL0nDo
YKTivIHgjNXsWuGes1wWmgTbgBuwsfuirjAMbSsOyUgNRZnMDX8je5F1grMfg+YD4CQXobo0nKGF
c+ekBqVkKzB04FRY8QB1U7qUia3FMkeKkffjWlKeiY1lAaYuR5TpdhSukLrcT+f4wh6FNzPqohM5
2qIXvzLpz2BLQ2vm6AlHy5MchN0IsdFNTWp3rEaKIeT9zuMh+HRh+ngnqz6ugG0w94Yr13QzX6Oj
KNxW39dBsr/7VIZ/BwZ1pqRy3jD8oczDKIb25rkQscLBiSCWQHo+AqZugG2iaeSWC1uvm7Y8i9f7
W5vFH8LbnzD6tp/lt2+xKnIXvDR7JjrEuMuAi7t2Ym6+7eDmGD40X+2axXWLsT8jPHn3bQw3I5Y+
l+gRw8znjWPzfvG2t+Hqjql7TxVl7t1MPeir5abY15oss3QjxeU3YFDJa1fPymmOnV7MG2zyU7nh
meDo/RWYnpJcsBc7uV0EwhTUvT3PO5wB3p7PySQpQFkwvNZ0CgV0TVnE0W66VhzGIw5zctjLpSOh
OrmUgfxIWhtmLsDpQi7UlhNN7jDOa+L2NGwQFgsDug6kWjtu8y342eauZhzw/hjf+XSVljmm+38z
SwwZIY6rzF7R0jpNC8GuUNa65WCb4ml/Lj6B6T1v4hDKAOSyDh2+LcnquA4n1p+waIop4xYvIDpU
3/GaJXV/ETaJEp9oSSnD88dqM0dXnYAA376lht6+LWV5pEEjbdPx4vyiCK5pRufBdLdBfMf0R8Is
qRXlSKOWRTCXz2bVDAO1YYHdvcL0wX+2tAurghziWEUKVo4Dpu+QxVWAVI3CuDvta3bP1b1nashi
RMCyzAV8qT3CSLx0t91p5Jjr57i96bYuT6TRY/3Ftl5yuW/ai5yKSTXerVnTKZF/Lb+NdNA8kUYJ
qlP3UpYOyAdeKnUjzo787aKB+YTEPLGkmS5bTMOcePy5waSpPtbSlTQKpykg8D7489i4ZOozwx6Q
gGkoFlCZVOmdw+Rk0MH+u1xt3jMggyTq1SQdXmIM0v2yEqZakiJX9QMIqhLm0BXzSxdr9SqWJByR
1cC4S32exFdlvUKfBh6Dq1J3eEr28V/4QssEgZOJSvZYo4Sz+CMmrFHp2yukODKY0E2mQBm7i7oJ
SJ/y3HA4Zob4G+x6gJNIyWcpu5xe0+qzM9SM7cg73kP9rHYIfE4m7NiG5ywi9BmCFrNzjpbT2JxF
sFxWUGqdcPbin5SD7b0qbv6QQKaWtazjmgdbIdTR4Xi8cNQ9LPJP2zes5JA6gVjliJXPRsy9jdXZ
oepV9DZMMYSXKlH6cOqmGaH3X9acIZTiFuoGNueMLt31IZMadbCVdjyYndbUtsCL9zMwz6wUudkW
bwko7RKdfCEdzeR2qytbdPIO0t5+GL3pffbZZ8CeJtV6vn5//uE/vfl//gNZwnv8gHPzbFYLDWPh
zA+UWZWiWVdnxN8o/wQ8eF+d4w0WUXgwsc7o/v319XqumHWrzTn9fZ8b7/XyaZG9Wy3OYYO+39Tv
68UQc0P/Q3aUff/sNZwsU4yt7BkIqlVjM2fwMQ5XRoc3xcDweIvs9SYTzRWEd9jBw/ILOCk/DsSj
ZH09qU5pmnNzHChjkPhgTfROwzz1y0ZZF+g4X9RIi5xVkrrze56L32P8LyIM1GEOJ05zHARsTzAn
B+qB6ECeIBnFhgT34WS6n5UcgMF4JMntcr6Fmc7h/5kP8BLCq4ZNH2Py17HZmbHKnGyq2TncY92y
GqHpir3AEZ5qHp31q8XsYjVLgLlLD8R9zSdkPrOGJqpM2SQwKw4lkfBQ7zB3xq2F3G3MIgrm/ezY
V8ZtdxbnkDPfcORhdy414N9gG1ylNI/3NMq5BRJtuhdBk/p0T4ueLIL25DGxqICa2eGK8hq5z3Bh
F1w1EAvtQC9xnixIvk8sCJeX9Yi6wu+CcdEjXpFyt8Zkobm3/yXG2Vf1ppVUfGus+ZSGhK0xnX29
nvOmT9M4Av2BQHI2vxq790z34oxqIDalwAn5HMxslmiE2odHkmMbqhtNjUtVYazirqw1i5uZCoLD
tL3WvuaWXYAia4PloeqdELFDGyid4blwPmGUIJ0KBs2HWnZ+4BJUOFQHfKpmSj/PnscW1hIKQ8N9
HkmFLXlBGleEbJP46gBrcEC6TpiZXFTrySShbAg+hSi4NFrXQhH2vqkVujjWOfnOBgKDtIPhLJyn
/DrXaRi6Ji1ocQiYycTINIPnTUCsTDJEqdGOk85gbfp2YHaRlyDj0rvjK08E/kM4jVfkfTuZICLA
BkVvRHpG0oyborn17Nq88b2lAv7PTFKtWPGlLURrQhaabkq3SgpJ9D/NE/rhOxk5nZJJi3LyDsOZ
MIJupxweKXqKXrsvdpbsnKU7FiEBm6UkIInEjWB3Ks4ig7tNKYn9EuNIeFqs5zHDk8a6aCgBTGuY
nVRGZsdNHzRrXPSQ+wsaORrnaERTKJhZo0QkjzBMTse8bVJg0WrGkPnj9kkBhr8QVgftAv1UfKW0
LukiB2VHjnDfCUPL9/RxrwvZrs2//zwrHq76Yl418bpLz9JVb7fYyl1LJxJ0D+k2xLEfLMHyGHcj
S5y0vRs9bhY33tTS4h7VVZesvhQZ9ylj5lpvUr5qUez9hAieXe0PSIQeRI1zpbDlRfLQHHzpNj/6
NWnO9oia+pnLIVL09raQqh7MtTsz/TKHGp9F9Yc5IQBcrL3Ljku1haHsHW44O84XEmhiHIvXz6Z5
fC90v7NINV5GiM+1/dKCa8kWISaUaCrKcOUExM/FzJfvSZfTIu3om5HeZK++BPXH3G+jEfDq+fT5
njpYgTZQURQUzS4Eg+AjuZ5Qf5suCeu8xQyKm10vbrL9OHC9fSJ47yaepEi1N+qwvAYmuRIHaa9I
c4VJ65fssbcDrvGxPlKNxXy7E+TBbLbanS7qI851oPnoU1teodVY3pSJJnnL7067Y/mmFHNHyiOK
2GOCLy0Jl9WfS64w2xWqrgWXmiCNtT2kMNTEWUvollxfoCP+PmBShusvI1AK9JarWtJ4tLN2/1LJ
TjVXxHURzmBQJEVLs+lJIr6FOjO+NKSP6AMdWJU2Ym6M49SbBM9zeJWgZxqSayUAWLBAAvA7wt4W
WHI4NgZPmG94mBetXKzwtJ3zMLHxkhn+HC/bi7N0FeZFTG9hvO7LYXlVJO7D6GTs4fGDOQmupUHO
ii6ZQXulAl7HsFJDuTcmf/V7rlDHce37hwc20qWeum4Jr4q4tzJLRFHBbfGQHNQRS9YVT7Hlm5zy
k0hJvpuNMqO2Ab1lykt1JjTqGhbGlo99I1fjSDw6q74IJGYoKxOOaaG/pNQIqPYmAKnJ+vqRc3r6
MH7zf/c+++wO8Mg1mvgkpAuVukePyr8rvxg0JuHc+rp3J3v8q6+ff/f0hxH8PCJ0lQl8j1kmwQLN
hrhU5O61khTXmBWVEgewmbDp3XEpolGdHaH2iwEZDvPFdV5zZg07zyE4rzc4a8KAyvRYoXoRiPJ8
R7klMk4kbGtSphMUJViia3YIH9WIQvj02qnO0J9t4gq7J+J/4yPXvGHJQesKNC4Nf5jVFXzB5W+H
UoLxXIZ5B4YC6tyYuNUqQ9v2QufyP2nigSFOICPLX0PRK5Y9feaBIdoYXSsNVnFI1qTgzx08P8ml
9axQnH5BNkMjvsk1X50iZj2D55KdmD7LensOmJqSbXlmFuECZC1UriOiMhrOYfjsv+zXQmvR7C0u
q+vG+YmbOfSA185BgSIU+fj/GrNFsppCva7ZHP8RbdXoKw1NnO0Wsk4u+GuYAROSNig37BxVjugW
QXlQt/PpblFt4AZAuKncETvDQjjSwldffSWyIBd9OJQfBJs6Q/w4mANJ8nBHKj1eIJw3hTVSTgDG
2DiFkjsCRYO7LSXkcU2a1S6kDfZs377brC6BUBlE3E8wRXlWzXuOKtjMzzGmDA9iENEuSlXVWlxT
2YPfVE3t0OfQL9E498RlaV+Xz+QpHVO2Qm0zEOHFiZMMbakcJ0Vox5ccK143RcEhoyDk7pBFyIHZ
SPIieuduVL0wvM0lZJcW7FQe/PUm8fU72bMtW3RlV/CO+E8EtoBJOKZbICK/dWTyqQolJi5DlPWT
NjDpRXM+zHK8TPPhgmWXK7jWnJ2Rb/JpNaPYYFXhli4rejcWohPawuuJfgTtS/oTlU9fMg1+5a+0
AVD7IMu+xTCt/t2mz/3L7s4Qkf1uu9+hOkD63i1gxd/JsrsNtyxVFQ6pJbzx9OwX3RA9fcv23rFr
cGPAtI5H3MxJuZDvJK6ZSzhBuRHgcFuxH+foNA8MrTptikzDIN/jclA+osX8nDyi9ncpz6flvKG/
ckbMnOJ6DDCIHJuZMsKe1kh1Do9WyU1DZSisBK7CQHvcFR7gQ1QgrleNyd/RuQR3m3/hJRgMWB9j
elC0nO6AfHu9VjuHbLEigeZvwfz3MYfu5A7uVCXCOzqC5cMjmIEKcUKS0omqc6j1hiLLJioRWZ9C
62qiHI9gXKTeKNhBmHMF6Rmj9qj7iT3py40yLRuMtVHro5kpTYAR9nDUZi6aBIelNjuSbubRafpq
u1d2NxJwIXev2C1JIiEXwbuNeJTRvYKODW7dM/kPX7359xb6oNq8//CLNzvOAAZXhWW9mU9BCkCH
rXlzQcuLhTSwBuRwxPKBv8TTQwPCxPDvE4KptfB7SmnPih5/Qo6cXCJ6wCXB7qgPCUYqX1DF++7+
QwrCADTB+Z7oNVZm5Y8DrDsY0be/wzEhfHte/CmsXs1mAn4N8ktT67l2vlnt1mxWgIfosUBP8j7N
TrUQkyo9LE0jbqEGR+8H3m2+oukZ9ymtHTD7GXx63H9fX1+uNjP6m+JbxoMBhjVuq4/VZtx/+ubl
q6c//PDsxfO+bwlRHMZ9Yk+b3ZLC+BXP9YIC5PG4ZLkcTm8ngmGYHKqsyzi4of/10rxmiVxWtUa6
qZiqTJG4AT6lUZXEBmi8PbhPH1GnMCTmHMXqLeMOcMG4oYq9luebjDPa86UeHQY1rDE7eo/Z2jGH
KQVRkWCEDcZNkUg34BnBLqGeywIesKVI2ofpQzxdOrbjhtx9wH52QKcJPeAPJWZ1NpszOggslKwz
T41OCN795PvYF9+zji7wOiJoGM8ey/cwWQOG0ZGPTGTMg4yknNYcw2hrmA2Uo9wXmXxI+JclhJU+
R29x9nVdqKvrRcntyc26m/r7Rxf9m6iftjaQlSH/ft+Q//dfv/on3AI3Ef+FenYy1WOzAcG35qB2
1HSREZN46GDD8K9Hg9Qok4M8YvaE4lswxsl2s6tb/SbZVmpkuWDCDDULIJku680RiLoU673CZDth
T4QZQVeAHPK+/7j+ZDDpgNDh7QCPNjzHB0XI+6YXJLoByaA0QskAPIqoJAfgQZfyAX8QyevZasK/
MH22MXRcuhwUrynYt1r8hlIg5qGRXSGKfCaCYGQxLpCq8DXHjnq4jNrQCtvLkpIu5v0gvS+IAygJ
cEunq8UswmWRqiQZEXRC8k2RmAaY8MREyGH0oJeYcpekWUPqwsVx2TMnjNPDHgcCTxeslmx9JHqf
1UEWTt6xAphYQ6qU7kV7/bOt+v0hLSQzoN7RU5txyVfId4Hk+0d9VsHX55XDn7mDmgQoM6uu0YWX
cK99cVFdndawM5aEdoKCQR+60CfFhdoe7mDdtYLGb+qLFfAwamCx2K6AnPBm36xWSx2YGZR1uoQq
htbC+cSvZqjbNY9dfh8WheBSQIBhnGWj/anjo4cj8jUASg08d0zdIKgy7IFtCQHGVa95IceCB9is
uc16FuLLAi0phCQDutnLZOc6y4tcag9tyZYCVj+sNxWptM+ix17MOgXta2Y4CYFRVx052uQYGzKw
BFK3H4WrVHR5rt84lr0wZ8HatJpwqIvuKy2eitleS+ECvhjv/LF/YDQC+Ibdyt2HA9H7e0bLcre7
l4oOVmUcgyJwWk7S5yQBtQAAZmwDVvyg1eJj7WAWFKCBa6H8KIgMe3BHHCVZQP2La2yiEUDD8JiA
4iZkQSsf4oMl8Xs4CZgOgoKF06+fIKIKXhESZCF9w5M3h6/HoRe+6/LLWmDPOXigy3yhqHSBedI1
I2v4TzziWy2jmyUq/R3JRv46T3Ie7oW1wh9pgNIZJa5lS3CjAqEsdojI1eHD2LScE9Uxlf7dNzfi
l2QaaJtuI+dVZSTiVtXpqCTzHHDZMDcSHb5JfsFno0HgI4A8BNCjGxepjK91B8jMwo21UjnfX8mk
0dJnDGSES6DDXL8zzP74p6HdttqV0u2awvb2ADbd7jN3R0ra2x2iwmtXFxTnRixATAi/wRgHN2rK
hk3KzrPMfX2+nC52MwUz4KXHcMBtI9c6ocgXKMXrBc0UQ0Bkn5+kntMRXmUj2gmjt4/xn7cUR0fN
6HPNg/5WkoOa+9fQfWa1DC5P+65OapHCleWBmasTN+IuR+66RPlOrPQdQdjhYTBz20Fgn8QiM5uF
E2ZDD5kJU7DttV6OaaY68OksZrQSD+786TuU9Nvmfuu0SmMVkVnNExFLtOMgnkgfMCbXeDgVpQWH
ieMLrLsP4x1YhrjatAdmN78dBrWijdjhtDq2r0/RBtWOOHAjNA/tzt/JMssiO+WzcfdzpD/QuoMO
DqaD0HLOQeaG+TXDUBY/jg6F3FZyStV+5kD0DFewjkAa2UR6XCeyk/kOvV5Rmn5PGsc+ESD+9bD8
ot/yxeBeHJuvGDTsLqmbJGvW0Kd7evxzmwbVJMJLfA/LnrS4qinAjFVqxldhd3cL78Ep6Mfw6oTn
5tRERtrbpjTrrluBUOZ0kj7W6tuKA4tx+pWxBbLJWwVrzY4orKWhbcWMCkOy/bffvhWF/Xblbb0G
Icz4xPhR+kfBpbkBktgGuIM4cw6ylrQiRUJH7fE/yWUwG2hDg2B0KJi9tWHQcrS8fRt84q2WKb0l
O3CQ6RKxBKBC3L37k/4hQFDf+9Oc7VDPX7yWrOAEobBbzkBInqJCa68PMC98Opgd2M/0feRKJrQb
rHoeOz1zxeRg2zklz2LhMlblJG7yBzqzBU5sQeuooEmK8qrxkRk4SO/j1AA3q33Qf8sVhxXXojkV
jTEpF8Rdr4ouLJM9E9YJH0YhADCX53hQoZsGVyf7E6mdCo3FbJC8kbpz/E9ovVAywjcUDTclj0Vh
lr/rxZA7rhybeFAn+CUD+30FdX42Nn9aHuRIzPOgr4HE5GFCnchp5fCRU85nv0HXUOCqi7m4aigq
lvg2de11b7e4hjVjaW5Tn43eogPyvP4Ic3d6neFtGDXSLNA40eFLIE8ORcasd1+J2BeMybE9Vryv
zrYo1lKfZgzOhqnzAmhrUgaPAx7+/Ovvn8bWYTkag68FjTxKNEKr/3AskHt31KlmM/8DdCdsqqdg
LugMvMXpRQwJnmGcCrdCTVvktOP5JfWl9wlcm9e0Yz4zQW9AHyQBRaEsUiuUHQzOz8MSAVGkJHFN
Z0VmEqRVbdjYDoeZSOzOE03UUb6LlZPq3dXFea2R+5YIddQs7F0n29HXmhU87lm0qsZP6m67Qret
KRlsWA4SOkeUMnY64WtDJbSvCCyPukfpZMkDBuo7huCDBw+0croNTZjJY/bNaSeGmbEkqee3cfoT
bAUyYMwYzo7NV3PjiY51AqLQWfiiZJpRuuA7MbtW4RRSr9f1Bt0EsNeDM5DgCf5ju9mx0Ex1DdTY
vASBZb4VltiwE2AXSTpcI3XY8u0ENQaN83bn8GEDieQnAzeCXg3mJs6B0ELUBXFbj9ilEWOq0NsP
I/83wLeg7bCXyu/IeRQnmDQtSAx8iY3JBVW2RLLADuEMwfFVjdpGE+RDI5CNkiCdVbJWY9wC94ak
omOUBKC+v3R/xFoecYBqh59iDQw0w3+gJ3mkPeMmUct9qUX++Cfu2S/V+d31D/kZfqEDYNl3hCIc
MvRwhVZnR5TZaJvlj8qflw/hcJWsqRyCfmMw2cxH5Ujk+XS1vjYDkbHPOEQI/xoUbfDnkCbuku+3
FwuyWdATPu9lHT7HmYF/Puc5ilB2CESC6NWzV6hNdntd7JGeGTWfztt3bChFf9UyRA3BLXuJwg1Q
En/5vn4Yih+RCzMrkqS++Lu2AJ6ohloUuIEo5BfoXSgjlswkLxuNFqV2NtHmscxkXSb3/C+UpgYT
hp7E2K10As7u8qOuSMmoCh/+5IFSdEdNooiMFUpfvLNsW1WzGJI6d0/7OsmmfTQMLTiO7qQ7YfXe
dNZBDGMYrHN4I4lupfvU3cy71QLuYCaMhOfeMYFUeIpdMa6ehNVKfkYP2nzvzJjPO/Y3tKyus3Zx
Y3Rta4Dct0+Zf65J9590N4suFTp2whsqL5U3cq2YM76/VJSOuFXh+v6IuJcFsxOiNdBxKPywNMcS
d1uOpfeXRaBhwcXyF5vvxYNNxCS9CMAVo0PV4q4P5WFnpPbDBtqNnILcubDdfGxCrdQpj5t9mNUX
6+01c4OlwZ7rPHNtq7EsGQCDmYa35E/T0XZ0YEf3f3hENibYz7mdFCGM4qR3eLYEJ9nCWUkLDP/K
UuvR2fu0rWeOWt0BnWuItINHoZG0ncTIphJ30MlQE0CzMi3KNYOpKYrE4t2LVs/usWhHWcrc1okJ
xRFcz+vFLL4rNBxns8HwKYlHqtTT84hk3SyGxrMd9yoSGV7IbsJPxlvXjb734Zdv/nvjjso6oQ9f
v/nT/8UOqZjGEGeXVEcrF6yznB+hlzzfk6EOVV5jRoAVWqvR+G69UeFb5D/lAMPeLeorF2Wl8VBh
2JX8Af1cUlSW8Wy9kz158XywDTXElKbNx6Lg5XHXeDSy6fUUw2o3FOxrkcuGCGRGlwOZBK1Byg8E
H7kjAVlwcSEz3XqFkfEGciuoXk4pdx+38ZL8y76vltV5ven1nJMOKTN3694d4ZiPZQ45/u/bar5A
dbj11u3ggBwKWl9NURg0ZOeqlnsrhCTuYD9MACa9kOIYssK/nA10znKiXFHYnS64oyARuVyCFOQ1
QyIkIEi5krJrM8m3TD58+d/slnJLGmFWNRJhnQ09oMv4VkXlpS++ivj66e7jCGvUPRwZheFsF2DM
G6FFodhY3g5ub4He15kdCM+1wmQx+EC4jvQq8FdPLr5JGCkIbhg1gHOfF8cPTw7w9sM6zRYEjU1a
C+zjEOPwgfzzumyTlHXCs1EnBboFxr58WrD/9NWrF68QulDj3DCm3UQFENElmxCq+Xk32F7CZyfh
VjkWq5JKJ1JkhMpEvpvOT+eYSNBDKYq1b06ZdE0SiRyfuYZ+xGjBtl+9Un2tfvXOMQ+DC+nRRwHm
JVsK0glSl8SHiz/uRCgFQ2p43KSh515tZaVxjyxxIwno4+wUwyK2ojtbwySBlCLZJdUVt7+9WM/m
GwvNivgIyGfxKrutT+ED6DQrhLxE/2xOgIXM7xyYIGXZcO39Hj95dbEQfITF6jybrabYh74kFs0L
BXqE0w6WX4amGlDJroXEoXPBkfjyhx7ciFDAVWF5iaPmqs9PvXLetNpOi37cBwzE0R3yx6G08lu6
yovjxnwZ6NWr7AyG+87JqewjTD2QDsDCvaSFD84AEc+DkqVjGY+ZVQRvUV395s0bTu+1rOtZPfuF
s9ArOFZENMYiGnxIs6fRwxwrB2E4QWG/ZBET6+b1qndoXev0vgEcCX5+dB6cZKA2t2rhkRaiuWi1
coxBOyS9mEai6zk3lMMRu6jlpr7X80Kk1W3sgMaWpn9Gs4FYmdjEZBCl7Q1hpOnMqL2ifeFCuUf2
hL+RxWSTIub2OTM/y1qrrWQhR918mS7SJgw9AYX0Wkw4LJ1iuCTf5fs/Mw7+MrfIm8K3ws8DBwLG
Nmne7baz1eXSenxTihphz6ndF/zVLVWp6NeMj/XnidW57tbYVrt9vvAUXubyDblfkczlZr6FvOIh
5Am0dfDyt6+f/vB68uTpNz9+Fyu24KyR/cUnfvhyCUIXW3BVWwNFhtlAXwzg92579veDA7B5+Ess
cMx2a7wFcGva2Fh/JAEuOsLqWgZtm4aAsuCgmw18hwMLzL2r5UPCKxh4khj/Xo7fIDsgh2D6SI7g
232NDxixrrUimiAwuhq3O6MMOKMBxdaTeQqvCmHQS19240WFoTL15poPki2D2BMS+1xC6e+jmxbF
8MOP9QouUnBZKfvFTxkFYhP8tQaB38IxLOiITA8B+ZNc6vwdVm95rSzWOFp8meO9Ydx/9pCilAin
YSyVvN7pJcXoeAmM/xYv/NR9ge6v7jLLoallFru4BKyBpDg9Avm6AtfiNDBotVwtry8Qfn+cvSCq
/44DGae7Zgs3RrlP94cSXTQOeQA3QiFQTSr3o/88YxXJH1EpFjvH3PHoHYxMkKBakKJoqhRfSYta
5D+D9wSekpWVfAOIdF+65RcjHlwYjpYC6w9r5/KJAD2Jg0KtIlATwFEwWF9umam8MQjUliNVkDBX
ZBzXN5NOZbRKJrPIiNyanTaRLJ1ckuPTfEHTA7h4rjBjhX9ChHh0hLI1omesd1tTk/rqv8HGW2p+
yCE9FPi+wYRW0GrUSpD10eUn5zhaUbliyga4ZL9964LpMGskR3I6lVoTGBDn58uKrhBQdbS+HiGj
GL31gXDcjCv/ZahEKl9GBb96S2FHmnkFI/KW4gxsxA3Kxjf/WEvvxVmYB4oT4xztZA7evk1nY8Hp
ck3YndSiRF5D1gWP93uwU1Hv/yNxynZrtwixY2/Pod6DoLPzITRIva2XwJ3QXJjbfheJjvtuM/m0
+n0KBP4+yVA0Rcv83sMhj6Ql9PForbI22Pafw19kGCU8wmiHqViDeZXaKkS7t7CVke4nceCXzC1A
IhvyTqadJNzS7hn88Iio1qv41bOJvRih0xPpddHOnmrn660qKaGjp5tqc21ffikZFBBH2mZQeHRf
Ct/X2uW77cXiq7cTWxtDeNZOO0Uu8qT/CmYGa8/9MMltqvpYzdmWLG6ZokoQj1ZnNuYt/nGOfqih
hyw6P1EyXFSOvn0rf8IFbdfsyMEE5Cuf3PaahDDOx/f2LfJmTF+lM0sgTplE8Jost54w+kdHuFQu
zBg/B3+UZVl07dPopDSxvhF9mTOIrhrebmAdvq0iFjXjqOChfFNO2XxtH4fdgA/yZnU+B3BQ8JMQ
SNQ2kYdVi6QRzb0u6R+s3+R0kb0qPCw4Pj2xNgX/+ciqcOM4UaBp9KG/DtgRfn/NjOul+ULI2xo/
D/znPTYUm/U6STPcvaxWKDyF+9mgTwKXsgenNl224P3kcq2s2M8zbnyV87j7ObbYtqzSucqf0Y53
95AcTvg7uLuIXXXkba98MSLgrmLS+aDD+edLIvzCBAucVs079pxEnbUsrUoVszmGtbOeDLfv4L65
y6UnJf/22a+fTl68mjx59goFKLyLDz4fFKWjmnEHEWmqE2022pkIRhgcFKxcaUmJVFg2uxIab+3Q
WB2A03MUoq3qHYOSIYku75OcxsnUTxFQo21+mJl5ipnQhICTeTcn+dFhHMUqtlr8oHPay1YX6OMO
llPObHTutkIyCk0MgSa3GAWASMjJ/hRf+mtS+whvi8dYWBPn+XL40RHjGUo5LeOOfE5th7o0zD78
Fg9lTG8OM/L2rUIp2KNkJL0fuTTFQiSUWzjqM6cMbEgMhSPwww6EaYcAqOKzpLo/C8bQaP+8ezSe
thXffLfeTdbLyaF7PWfLjUXkx7bMVx0npKjHGZRumeW8an2dJ3Q3x1nCf3WG+kX6oudx3T0VOAIo
Om6Aqn6XQAjNQSJspNP+Qf++cukVYfq0SujLLK4gdAeZL2H25zP1LJsjrEODFLjcWiNfAKiYuKMT
/FyL39Co4JUAfMUnP4I9zGeScrN135wgJlXbuq83XVc/iTLVF/JjPKy+A3p3tYauc/tRuL1AQRBl
0VK4qefHpMm5mF/Mp+wivqzxNo1Jek/rdxWmBdzQTVP4ScnSgFs6hBJF+G1UDnhkpflyOxihg7JX
NA1YGQ6P4Yd//Ccxp9RLdoYkiRfRveCMRS/GNWy+1cyqq0m1J8jmr3/78unkN1+/eu6QHboW+3O5
MyTvIeRQjbz6ghwm1pv5x4qw8QWvhVwFlpk9FxPCKbYruX1ilxu6ouANI6mhQWG46+WMoVSoUVbw
4pNB0aItNxUHIA6TVkAaPR7gX4OTlJPl4K5segLjx3JpWUf1cqyD63RoE5dLYLmXGTlduNtQ1tcv
9RXs9LQedDckiSrJ/bt/N1fWBJtm0J0I+9sdqil+w33tLkZZLRfAvBfjL4rbo8l3h3nBTvGTjjx1
cGDagKDZ9oa/w57SDP/PpigK4GXLssREA/9eYSbqy2q5ZV3sVtS4e6AZoJNkZxMgpHJ/PniEmcAh
jrPB9N1qPq0Hna64+yj2EwiLaIIxjfXw2K6MJqJg0DQxx93dlIP9TWXfcqxzU6OLCM0u/B83hu5+
SwHbb3YSm3JTe46mEdgZ0VxvqpDr3QJOgoLgRmktOHR4f673A8l8P6l72nL6jvpqTR4xPAL2o9pi
hAiaJeCi2VEfzxQryzGcUs2mhfRdx+4Q2DEEkCgPmbCawQnIr7dJW/Nvkub+zVJNvIIqepQqLRzD
vycJTFdSYc8ZNo5txWi2OPrKPJgIs4ez0N42g0NTgJPTrLabUGxd+Kebk9/x2i6+LNQipmI3eQQi
s08SBhdztvNdxPVUT9tPORdcHlyUJCaC0p4brAHj0uAkixTakxVIOuZIhJKoMGzJ40ejk1Iw9PPB
EVqM4+w76anfn9Uj+J6XpfCDD0edOYufLWf11Z78HhIfHNxQukUO9KkhFB926BClM+23gRj0vfDJ
Gp89EVBWIrwXT6S5mJNKqJUahr2LKXzPKLQxgv/0OmXiU9FUCYz0riTPDZzrVUwEbfJgGdQElVCZ
UAND7keSGbA5QBI1ovMx/vfEeB+IqsTlIL1F1HtrdzjSNoI0jT+SYypHZt3CMZapPJU7GRmI3Yii
EdnfiWvBMp1zFfzVKp/oBDaQJia+cvirT8wFnAE2uvc4eBxSFOsVzwRbk0iJHOPUXgwp0JFlIh+F
6e0iFV7h2Gsx0p5SgtsWW5FAMnhRZF9mjxJmtUM3at9pBviS6g7fUdZPV1D8crgDL+oKwW8vV3i5
gLsnXjJpu+NNHBrUTR4yM9t5kHwfdcLcYRHFtzhipBZ88vCEQv+P+sWfgUm58VvudPMsBDOhYtpq
c5EdXQ2z/Ir4DELBz1C3jJNT6JT09oVYFR273bM+ZSY4ewccEsFMjh7JXJrJfPQXmUxjVrzNXBok
kqOjoYTeczSTTuZPmkZ3XASzeEjoDqcSHug85IM2m/Rr1FIuYe17mKnOlIkzJPpXBbxANtc7VMiw
X/BF4g+4N632TX06wqKadGQl+hSBwpDMWHT2jFXUYcuc+OPGlvW06G7c6arjnvPJkvgEVz4+esQQ
pvia/NYl9wwRcL0kqCwFo8j7bvn7UVijoxBpdnT06CQ1vcUgPo00/7RoHI3Hw2h/bF7skiPOELcK
YreWwLFtMaXDTananAkmtlelXBr2qRM5Fm2pPEMgCtQLKIi/Zk5dox+OZCxlbH3Lc/AmqHF+Ctzi
jfE1aSEQG1CuTu9qkuxK0+3B0RGca4TKgrRBfx3xn9a6hX75jWr6yPOGjIS2OEEiUIqhzLdJOB29
0I0BOgNzs5x9/jnjNih4B4me82WlIxP7ValtJW0bzvrnaDaxBLE6Vkc/ceosNdXRnMOkAYWR0thy
zVut9V+uXxzjkuiWr2W96hJjavmx+xJtT3Avm5GptIv7e6c8Tcx8NDCSTDlH9DAfVJE+fq2bPBUn
XDv6pO5NhKfdfKxn/fa1ZG2cNxO7tww9D9VAk+IA5thkI46GJYcOD05HrWv80iahSMbamffGTTHi
Lsaqm2q/TDqUyiDZWzMUG9A4QSD2tPxDSfaCwaY0rPGTzWr9AzGbza+BufwKin6rRVoG68hULSZg
F/iUMAIzKCFdJrcCUpsCNAkslmTwx0iPwK4fm6rNd4sIu+Jjm5JRecY85WOSdilGcEnx1Z6GO5KL
kxHQrc8kH+yWGOJyviQ8KDcm1LYNim7tBenhc2ztLiY44sxC2MEotZPee3nQ1p+gpKjSGVeKD12K
QRba3bfInpLDtXf2QDmKmPufRf50bCeczc/O6HJHmEOCFfruev2uXsr984hQaas1bOnPP8cG4AAI
mtDMheSOJ1d5dhizbXFTinxZCUSTU7phvl8VAekNUh8ck7sNo+Sia5oD2ZEJs6PR9nHEUzTz06UH
XekQhYwSS8E90PnYIYdziMsXotjlK5YgnsOwEexJUyBjl8SDkJ3uzJVZ4JfcjfNRRin62kA/Em/J
DU18OKBuzGlkuV4BIVD+pvRClwc2GASXcJM0AfQT98zPeM9oCi5vFtENkTJHc3UrbBhNTyXoV4OJ
cq1Zu5eDGCyEBN9m1JGDOTqk8XPcB9F4oSAUJnCv9bxgPJtHjGdjHqPNgd/g5jDPH8rzoqUm9iSG
ND44ejcUh2ROUHN0VJ1OSSSrQt1+JeCze+bDDeig6canE4V9OIkmRwz47fWI9tkA8VlDAvH1kygt
Qfs2eoAkDNqSwfPQzS7pYBcslV8P/vuYtQLAZhM58VJpAEQ4ckixDvkW1iaFC5RSIQwMa4kxvzD/
JeooRtnx3eZkcAgUEtptZHQpndPVlRM7dcyjFjKTFlL1Lu5bQcz0S5JQx7rbyNg3EerzEycdymau
ouJy+gWGxfldaty4ir7YsWviBNWA+FJ70CFOJuuajhuFOXocmB5hlMNpDUfd2Zx48WqnmHx0SuD9
mPQ2qKMxnOSOu9JcYsbDek01NqQkXakLqoRekEbVOozcQOKSPPUnE7nZ6bGAmwytORadgZ8dUo53
LH1xiw/6yqSQGA9soOtB7A2rsbRkPtKWf25uy8n2iqGQ87FswvBJJGFkNoZPI2FyKjAlEvQlgWBw
mFbTrQWA2K4Qw65xICeYoJwgBzUXNT4TSIkbMzSsluydnY4WQ4e+R/gd+ESTCsnS6sgf5GdUQntJ
TXHK+VYrWAaOLYQAGJtc4tjdhvwQEebCppQg+TxUgxCGgxT1U0PuwgrLk3HgOOou+q6Zfgh393UT
V0Y42mqmqegwQCgFQBM0YhKo+bg+ZPIX10d4+0WXM5yPQYGbmw0fGP+Fz8NLBl6v5G7zkeIAv8ZI
JfaQIQL5YjOjTMvXGlbNwUz47QgzGWYA5wjTjbsM2M2KBrNyrIFrI1vFWaDsizuUzc/n017o1LWp
066hips/DmPmY5jHcM2lEu8/tzSlLwMC42nDioo0f2/p7hONkKY/TByklOnSkVAVXPjJajOBOoaT
7pazajkF1hVkdnLx7RU5i/oWY/M54dg05Mq9fRfZ6zRV1XI1q4/QpOJTg9voLJrOszlwvf5o1G8x
2TlKy0cPU9ZJB6JzPJpHBkYdUrAGDMmDc/6w9Rn2WM65YjHipFsMiEu+lOzd7OHb29YEDFdBWnTM
QRtLRELYWQ+ytYgwZUqkMNXbn5JxWoVXu5BcUmSIIenCTtVywnlb3b8DB+p2gCC6tMOWzRwhA7HG
54jlgaERgXaBqpcM4w4vYdLbygZ2v+eSeDZBubxPTfbT/nFXexq8aUBXYVxr/NrjNhUHJLRtHyXH
+NeBjifT9kXC7znOELLknYF8sMnTk9FiOpK4NngIUsRiu8q52Y4ZSwr2Ghmum581b2439Q0eWT8p
0trKsmy4jTrXjQ9S6j+fup68TUsdGcqs+AQNpXwBEquFLAJrppaYXziK2SBcCAq1E2kisL9EZKPS
y7iTGMOhUE6yFZkfEFwZj/NcShYHOIFogpIVnIN6BaU/wrwLt/D9aFEEX9uUmuNsDdF6eWwIt2qH
bqlYtDq2Dw5163p/LgQbkOD6Wl7kLfavNTqBUicChsJn2mzSXDe8Pnn4CSiDLr7teU+uG1N81Eme
y/y2sB68QGmAO/uFYQRyltop+xcBAztWszA34nyTmnF53J5vLT9PctJRMniQO09Ce7paOjDNrjJ+
knLQYueYL0pPikzFHbjKybNuD1cJFcLlk9s6fr+zOHWuizc5Pq53jS4fL33fbsJvf8EL7KZW+Mck
UwmIclYviDSE7xxL0ZPe3v3G6RTkcngxY8tP074eIhavw4TziuHG+N+Ve650CVjwEK08gcB7EP6q
77NAledBu0OHQvd8tYV72+jwlp+/eP3D09eYi2RJVekauOVsMcY2gdd0I5sPzEV7ft6eR75RUwCC
QPHwZZ/v18MI5IqInaN+CBjpBtCVEBAtgPi1323dFKN1tPXyGcxSPcXYs4Iz3PrQvXMXwLovZM+h
WtikuJEtEjHdPPH53TFBp0Ez2V0mRTFVBruKDYb9u+pfmB0L1Z6QJlL+U5blCUWaTaohfC5yEjfI
NTHWytAA9dp5jlYwAL7zttoIyixElAvdsSkKbJyox6/KzWq1JZVVn6e+X3RoOwQ4j9U/qmIJeZMW
jZogOKRkF/BNOx6STUEp1cx6+0h8mdsKl0VdLSk6veWXksZgY5rvM92lxx7WRK9QrU3JCxDVU/iF
ex4ju9kkULsNKYbjQLuobrAZW8mvKGmC7OnklJoCqUq5tisDR88I4g39tBfdHwdB8xh+2PrmnxLr
rcB1grDnsV+lAwJSNzxIoa8HqeDbtXvgOwB8G8WQPQP2ajKyxkg2jkA6mUYWk1aKPBP2DgyHXwyx
YnHonDMzwS4MmA3pzApX+lPbdcNQUXSd3PMpV4c+Jana+FN/MhJ8kOQ+OtIMcGjcj31kHmg2WnQR
ZyMcRxEAUbr5dJdu051QvdXqj/1Y0KM9DKGNkOi6T7FPeg9CKGZBU2shZFGCRAxQUrA2Skj2TmJh
4KuUszaBt247v1idS3UGcAs+OZZ/D9tqZ81ClP7qtAJtzWesSw8UKBOFx51cChJyd7hGOG8IzsDU
2tc2QJgir4t+UU62l23cwQD+MiGttCAhDIWgTJ9Cgh1mqZZKxTFue0dFiBlmgK0Ot8nJ9O3yHaoT
RdLRgyvSjhF7j4oQgG4Rl7NkB3t0fnZtIKZ5gIIyrd5usWHEGbPUFw0p7my3WJCEEKkgt9cLPJv7
5Nm/J6rWFURfxY91397dUXT2KOdID+Smi2IhuUsh6wjJFZ0pSenejM8jqJGBfzcY8jaPxDDqypj+
mxK7Nl5TYzeVpgdj9yPp9Vj+vdW5lelIxy1AeNH4mhjhanmdb1p6nyCv5NZ1R5Gf+79bpjSmHqmU
AT3z/rPnr5++ev71rwkv/CsFCMeWi321zxa75p0ltOnlbMI5FT7WE2YRqhCjP4JwK340YLAtraUG
Y5Q/+W6PEDjonGRLwHda56C3S+KNCQ0EDPEKDcHfcf6lxULtFKaQWGW4q0EFfuYUkcGnylMM3q6p
wVxbLtq5e7EFSe9FtznG/3NzhzZNvF6Sh9504UB76M7ZYmScHkJyyDGWAiE9EC717lRuFHWUkuNg
sGNV8xIwdooRxqCoE7U4ip8dyzKNegJHpWPwILmnmwGHWtArziFt6yaxlaVIq8NklZCRQv9s4tMA
zd6I3/HlJVJfsj4kVjwQZ0T/W7Q+RmUZnToPHw6LvaKE63pL6S3fNx02gJ3Oh7kjGze8iUIl6UlH
6Eh4yXIoNlgFH/RSURkU2OWDMvQESUfHIquz4Ryl8nCtkeJhJpqxVV5/CbJOAOlGqe5BJFIYfJ6r
sybSR3eLKS43wL22fTa8Gqc0/PKhlmRARjkx6rsGtHeESjxpCQzR14wDga1QesffSeTyu/fbpcAX
q8zuZ5GKOmCriDGx28HEJrma7IXM8lNO8SUs+hGO2W6dm9aA685RJaamFf/CKMpifG/m60Nl29SA
/2t6huwtUo1E58hN1n0P7oZTMhC+hz427HqbcqHAt9jzwQ2NwURI+ArWgL/mFwQbuak/7OZ4fxEX
GC0UsoM6hmr0XJncC0ZtFQsta7gqcw3UibgC5S5EJ6cAV78DNv3rJ09evHz9g3g7xQnPoubENkZ5
seXT/VYdk0dcRp8fxkmDc+qwKkSK29Vq0UzqJewKSkHUHPg9mI5W4s/EhkSvjNtumJb1KHEDTHIV
yx7oPjvsQkfUPHMGRld7V8Rmtxsy6VDJvdl09DhoSsZX3uBPWd1RAtbiso7QdzQHLfsC3z860uAv
eHe5SqSYvIN+Qb/HsNJFvdXAa9aPbzDNlL9/Cy57vZylDTHRGeHQTsTA0x3UGuXmif26miy/2xSc
r0fS9RwQ0iupGBwzCPZJjKKWVGMx6/ECL/JK3tIBU2q5jvPbsEfcEL/Rm0kZeQdcXHMpRVWdyDdA
OOyogR+jSl9mH1Poy2z6lPZMtqB0xPrdZnR3NlLO2ki9o7sNOfDvYEXckwGKcd0+JjJXKoqycdW8
wINgdRbMYweIDJcYJuakOAyQ9w6/FK9ANbN5Rz0gcQmiFFmfDUtlSscXxdvy8TXM2hPRR7BGzvdN
HzefRXtQdrFCp80lg9CJRUs+3DIyYErrVEZHNZskWTln7PEWJjJyjfboHFWLpM1ZxhinSVG+mGbV
HnzVZGCMHYVF4m1nBSZhRp19CU0D7rl5MkmizEcENWpyUlg3FLrHp9I60rogyEQbYJTQOEX9Q3H3
5OOJvpXGNAzXWJRCMGzVY1pc1wR4gLZNhBUkB1GKRSQ/cPziaQ1CUd0EGOAew0r1GZg2ngYUXmGv
IhEhdKoQgjVga1eSgcg3oFNM0wLUAqI5BgEzTiMZmWactm6+xCKBdjPEc20BtpqrZGCCFb/1ZTba
1Gejt9AK+w5+KSJp89XbMnsWoqL74GS6tsH+w6OOvFtNyrztuw0FoKPL79zkhE6lDMDPftmVJoAw
UAMzRZYrTDn3XvQAsOkrE22Kpyax2lm4TvsdetScd+ydCW504EnVBGLgoHqH79umCfNxCqO3t8HO
9Wx1P8gqEEC3xqwgxHs9bGztGN7dkuS+FDVJoitOd1DskQeZ9oIz/DadQqOXDBEWGfd22g1KJlfK
xk0wdG6zv+5gkFouY7LVhhCbxIHvRiu0bl3WOg7kosMHaZF2vhTdHsff+LsOw1i3h+QyhM/WGmax
8F7GdPVKEueiF+DfuOESvnB6L7V609GEgyZONnPMYXTsslSt80V1cTqrsqsR8EqXZZJZsdErF7Qm
J3t0/QF2chO6o4ebMlAvGK3JRJd6n1tli+zJyREmHW///qB2epK2z6Zp9sDtyj0PRgQNqUNsQIPw
HEQ30lFMUoQXEZ0QHPadHT95GEmlmTnmpFx3/j7fdW57rL9cQFH/fj+qXjb1utjTggzZUTn3wFF5
L7krpFxLA7sIDtoALULRTBgqnR2nhpRbs4XKEJzDbZckpmKDno6B610Jhsos++1qx2Ex6N7OJ/J1
6MFJQg2GLi2yt2+Pjl68fI2I6RqIRr5J2mofdZN9m22lDDuyF05dQ+HOOOB7yYHeGKAQtYLzMsJK
uAjhNQhfGbvebFUzX+d2whDTDWctIiLXeUuFwQhyTKglpqsiL1t4GKfOqAicrrTZAlp3PiwvzIQ9
23G0XfGkgT/zfrHgFq7Q7XNxadwADz8Z013vdivxKltaSEyHiQniV5rfarecpcSCtHzhKYqzb0nu
TK1s9yItfosDp1IWGL8+duszznxFQDVWGvSFIgdy/fZqQ+P9iT0gfsHH74FdER4jqDHYI4kI4uN8
fr5cberxU1Z4usDllButZvL0QQ9B1k1uqVVcXMI0NOVM81iDRKP+5Hrd1ywWV2ECC4dSQ8ktbCA2
wrmd9Drvo9pepPw+6bURQcJarfMPnoXGPOy6VJVAFoluiNXSWsEMly6GZvSJCDDoAL7mQ8mUTUaj
6Hp2uYeD6OisoNel+7N8tpxruuCiy7FccNkH0mep2dTTPeltzOJjv4HOh77usbZ4kqzL+XzNXJGs
zPwCHve7v3iHkT4YcRLu7vXFent9cP/ETc7IREPz357PEc0BmBOUVzA1QG60VXdEPbSps0s83S5p
40reAk7KmczXpVdUzR7NyJyMpCVJQwJqdAnoBb+dFy/qmIbm7iV15N+MKlN4/MmWNToMl1nv3xwU
HRX2JHlvand3vUcKxxAHCWuIKnJ4A3Y9enEgyAMRXFRVA8uKVBvd3e5WZTfvKvbUW5fcQGsUaeAE
qrX32pruE9fsHboeiUElzIUuS3Y8W/MGgxuLfe3FVcIbhJF1TSHZdLE11RlAzc5jnoGPDW3fzPak
zh7BztlXPf8SbtQ/VP+gLQT3LLHpivdKkqf02uRkJnR/NfOJYeYmzHVFzuGgn8fazMkwO7Ycd5j1
t6sr/UlrUELt/kmItiMDSu51ug5qgaUbxs0HZ9i4RGdyFzA0s5STr+PgC1NX7jE0ucVwpGfxgrrm
kZ2rmUjZWRr4MntKC1W2hOBEIjIo3o7uClNtUFlqOHYi4Sg75zWIn+fmQ5OWWppWs24dlsREZf64
5TxDyXx5/F3UiTTl9+QU8hoTMWgIVtLdLg6+SsTaSdPlZIJe35O2Iss6zZjRtsLG4J02Euaeaju4
k56lpXdqeXy2PxrVOfizd/Dqevp79O3jiY6BofwF6ZAhcLo58dIPevbhmzf/7WeffQa8D52ogZWf
f3j85t/9r599hvcGEHzmU0SKOCfHbElqWi3m22sSimaEU4ZRPrPdtN7cR1cAzCmbNSD5452/h/Yp
lwecDXBfv3w2yvKL6holFoRO27J1rsHvoH3p+heFV1vAQ1jhX9OrvENyPVuNof/Y/R9eP3nx4+u0
jXFWn+7ODyko4tc43B5Yiwz5/XcgTa2QzV2uNgsLMYlFpHKiVHpI1Hv5jUgLW/TYGmMm0w4P24OH
EY0D19PxGgqCdXiV7B4fxtolouPe19eEsNoyvrKrh7yFoenPA6x5KG0h7GlHEGHWZ3aOulrM+uVb
CuzCm/psftXVBsWp0Y2/P5Lmgg4XN6dfC1zk+GOFZl3QEZj08LId2sGLNhgQCcDtHNwUYkCEGw1M
ooQsNHJNoa0xM2hBsOsGutsGIuoPfUbiJXt1NAw3iJtrO1vtTMwduzUPGQ5nmMFRX2Y/4oYmoMhm
/rGGe8jpdfby+uX10cPyYZQMT0gGs7vKL0zUs+LLlQD4c/L5+R8qB6WlM/7I8QkKY7uZ3uTXRUXw
tHzjwojY8DBUu72vNyAlfozCZigVZIRF7Soo1llsm99H2vDdoHfpszMsMlY12SR4nv6qqxOWPTwO
V1iFUmV2t8Ew3yzcDG6MwWYQmSHA2g7nfDChS387m/kBaklH/KpSZaBZ+ZqZ9ntZS9PXBCk+RFEm
7bX07frCDo1Cxlx0dcLlnOIFYJvJTlRvfZj9zQo2F8YsKRnnTahmw4PSDSpYNvGTlVrRxJtppRY6
r3T4lmsr6w6bEgbtQrf/iZ9/X3ns5tZ262LnXXsVEbu3mju+g1W22gBmub7OCwsE5hsZZvT7xo6U
GAy0aXkatoqJpz03GiPf+BXwfCZtW6ncMmfqUE2EwJ5GjnX0IrSLOfNuBHai4uQL1OzmW4pdcE0C
i72sFu8xZRbne1Dov5bpBWNth2KEQbeiM8MfvUtJu69VihGRKwOND1h+fgZ7AYviJ8i5kDlGK19r
nJBlTmY7zKNIIKduHofZg2F29PAWyC3pJTzWJwhwdXK7TIptAJe9hCmJMTUPj/JnfV8E4HUd9DN0
sx9REjoIGTIiRRs9NIsYupRgyNDmAgTtPzDjwWOq6bj4+c+D7HLTUQfnPUYq8hHaOvcio3riQHU8
Zs+h6pqdtE5Lpx1K9p+6WKTOEbw4ijUHiqMpSJijy4+HVn2sToKea7KIVI6yAoazkmUtikDDswG3
aZ5Yz0Qgh5aC6aFDoxv11Q+jwzVzY1L28RppX3CAKKwdYWp2GqT7bGQAMwz727kdxA0M0+22Ew68
pIeqXQu3A2KRR9CXaib0k+ElUHuGsiCK2mxKDUGFBv74bAXsoeYZv3aPPDGoO23JCTocnHF54RQu
rsuduzTZYnlDVde+nF/m5OpszxbtBWeo1AxtqG3JQQr3end6d9CdlLrSwF9yztNyty4deN10NHG5
gdboWCBqyomcCgUZ3wNAkwg+V/o/84TvGVjbPfGMbxukirfbB6uv1vWyFSJCfoPj7KxTbPNUeIPQ
hlPQzreM7UdUBqIm01niWuErQYcpHjTe6KZZFy8qV0JUY+9fGXJ1wHlgr3zqV8M3Pay9d21crBDr
gCSDQy+hO1hU1/VswsnQNdHD6Q4x+VHtELuYSMIRahSv7vQjMYvqGIGrKX/Gi+k+gkZL/R07Cof9
S8fm1UsiXHtPcQ8jWfRiNfP+GjIKnNtBxeDpl94V74zC/OplHg6KtIb1X48wEyGCw6xPH+wXN8zH
JxG3x7Rwi3IzTeOIWfMUHQQhMXsSFq5OUOYho4dH3YweP/L01avbfQSOjsNPE96dP1yj7vHmLxDE
M5XNZlV9IUnsO8Gk0EsZNaVs1Qnvze5lUl1gqtL8//rFd5Nnz799EYVh+VL6889PkSAWwQyWPGz5
Jw8+T3InzSpMKMq2E3yDV4P+0++fvvouw2D719njV89eZ7CaGSZPfvb8u+z5i9fPHj/NcFzZk6ff
/PhdX+VQ7ig3M876OPo+EC89aDucqCqAV3HIxTwOZDAAeVsELildDlAE7fbhyZv/SfTjZOvURf7w
9E32GWnJs92SndeJLcB3tvUFsMo5TeOHb9/8z1qdo5bL6cWsvqqnH7578x/+3WefiUoW1bH608W0
e31tj/zS/Ru1Gr3E/T/MXj57+ZR3jDSew79my4hgsUPz5Azhn7eoeF+hU0g93dGdcwA1SJWDN5N1
RZgs2mOmKhZh5QMle8Tjz1o8ANnzT30XziqYjRmrDSmcTBE5GI0amv44n6H0nQ1gew5Maha8uVXz
pR4PVBk/diTdpqkwnyrVuEvb00+RxKI7zz/3SfyFQmO9hNmAz9zXeRGfCsc6gIREFtOiecHdB/Ec
M00P+bOY1ybwLwQB4cfX3x79/SCEateejU03S1pCXC9MwVUvFgnsD+gfGj+rxWRZX6JTSBIghJjp
2LYMVDEUTW/8nDcAqoRxcomDCHXCpOJ8sKSpPgbXLjqRghnRDeTL7AvO4ALk+YV6sUQzOeTXj3Bm
9sUbsLyrkyyRk8nZv5P9/nr7DiN1qms5Kj9iAvnbODV2fE9OI/cUJQReRZ/sZIe+wDLInGZP5y1R
O/6OuV3TlGs7pIg37eBxdXM7eBnYNWbl1qvFwiwZvR5F+LVPeb+vlt/S9sy51DDTf4kMlSoCtwt4
qMdl3AgwN9qgpfUJTByI7mPEItOf9B12UK9hIrNIznGzwD+il+ZDvMjuz7Ag9AHvvBeRbMvLBP+N
IBeJCmhCDrMb9aMZG2V3ZxlBz5D1PDcjGboODV0f8MIHdE0MP2SluyVBkMMzksAH6JATnjK8JoNe
yLbH8Rr23KaMXsDkMx8lQ3viA4OOWtghriM9kI2Zhztz6HUiMm986v7qzX+EY1MxMfkyaWnhw7M3
OzFSY/ZKnBrCyMXEf0s60GZw/mNfyD6MiT0kXwoeTHQB1jbhfAtsoz1zJNNZo70AgQ+u5GtUrOr5
ax6FZV3rWhJmZyFuIDEGXh4k/KMkny7LH3JBepL3yfAbwDRyPtAgSyd/d9AR5T7l/KjovFL3u6zk
zXbc52bw7tNRbPpuNQcyGOf9TU2SJFq6NzVBRq3x93oBZ3h/WHR9hQMcfO10WDCs4cdqM+5//+LJ
044ylLIQFhDFhs1q4deVLeXkuUCIBmUG9Iu92p9DCSQDpBc8zVNtYSs6zv0NaSlgAo1DY0MNDkck
s8Mn7N1rkZX2NkYh/yK/uE4BT9gwPJ4l8nJvQwOZ8UGWG7Vdkclj7eneNswoJDmGA3z3BL9d7Z/m
YDRdQ0HRqZPWl6tbkPtkCxJTfz81dqlN3LZYrviDe0nxydOXr54+/vr10ycZhth/rBY1Yzbo7hzz
3tgzrovqfM5Z3XCQ8tdffIy37b7KA18rQf6wdVobWDf6S9AcLdVybG257+qs8ISkfInUUaLUwX/S
QL/iwBmD+7oOUM8ttGIap1C+M3U4zBwbYllj4X1no0KOUEjbmXhNa9pv66qEb/a8Mx47tyuf3BML
KPsOcYzIfvShioVihitJSMT6ccfA9zi53cm+WW3fZf/IYjhe9R6/5N+Pyr8tH7BA/vUPrzNgmJKx
86J6XwcB3+IH70iCR4ecmpCSL6qF+pqVseElR1EdJmmLTCJIt/d7OCoGON9pwIr4CjP6glzZ80fD
7G+H2YNUZrL0tOiy/Mytlq/JMDSKgBXqDi+Iap280IKf8yiXCpVfgnxnoBIvSu/J52xV8i/a5tw+
ZIkrtcnNmVSGxYuAaASw2nkj7ydFKS0vfMOvdFl/BQXyokWW5PAJQ2LNCgwCquUPhtQeDxvBZSbV
Kcjak4s5JVKbOKox06sTyO/oeMKc2gFfygOm0lVNobodLneqjGDfGXRdSvZHM4Rm3e1KY8zwEcWY
9Q6BRpVvd3VLV4KKxSZMfrgM53TUMdWcFyHneY6Y4WLBYTu5IO+6sBVy3tRvE+qJJKK9RH+zQArA
4BZpSeQagh/iLL0VuoNq3giyDlaI8OC0IyvtzNHVTFPT3Mly9m1zehZpPvhuYWdRgYNvO5shWRPm
uTTlZiScss1uSf9K4AFIRl7z9gM+o0Gq0E+9II8tRPvEiaLOUAUEIlnWl26L0sLKrnJzS/onDPfD
G0VJzl/a0vyMB6VRrkiIWJI98TKGGKZkhPPK8eP9HSuxVzA7aL7MaCgIoUPtaTwRlJ03FJXJJz3d
f1opCgkufr4kmNvVepgt6jO48lPqy1Ax/RjxOG6eMYGuqteaDpoTJ3s3HOo2jL4x3jGSXNlpEF1D
5DLJaNYhWMvZCj0aA6vI59kLOEiA/OG/p6sG7+NwDCwryegB3ZltVsDBZ3A9xwU7+vgR+Bj1ZVaa
Vp5enNYzBM9UpR7VrZtptebwNIIGo1WWvMvmQwGcA/lb2qYZX8WSDsES7MjBUz9792j6rsI8lHAq
CjAtRcfbiwhfO7a40dip866mhcerkJ1sl7laYtFcMk10ceocBBYz/oD1FnMeoksANZ/yO8Ilm8in
xhnutnJuIY7a5JIncFPHVFFPhNV6rCQ5NnQ5ZuoMcUXrSxpBJhlFpCtt/xApl4xUyZvdBXlMrQvW
WpALpdQ4fjg6KbKvsr9/8PnfdyfgwTSifgiKFKsU+WUrk7QLYTdfkaBXlSQmW1joq203gNvgSUql
AneOpQlC7gO598kc9251OSjakRluBuHrSPYelOF3mA2g/zu0zAVwz1rjJOE91nDcVjwGaOu/9AsB
OZbqyeg5O4lpGb9D5ml3A/7rx3IXh3L3br/o8njTBONtLj62vDJ91GzrakOA++a0STVkolKlBTnC
zuC61byLjvg/98HJ1/mgFwn52J2Vv67+MAeu6q4x/nKwIGwz1JW7FEnni9VptVAON7SCLcdQ7VXG
mdKBULpfzL2pTWa22h4P+GYBNhFCLzoRn7BBjRqBxB4ZKtu3tf2jMcBbU4K+gruQvyQ3EkuAdKSi
mpXw+nuVO301knG0v42RSURNhZ3oq4KWMSHRzsOCCwYy40jnCLvf60Z4F/vyKEObcdD4ve5O9xGq
rUIGltDUyRywsRQTDe9pB047PHpJM7+4xnHI7dhRHJyi+xrI8XPXq13GHnJrrn304hcFuyz07mRP
r9YrQplDGE/SpgwaH2VVCUbG5hC5s/v1mDlK+03vwz+++V8im/Z7WG/8/eGf3vyf/40zantd+qqR
iCMxJuoFnmgTxKsvHvVRUeJiFMnVFE0IjKNLycqX20Fir8gHphRm2LtJ2cGJVbC3+TrA6BdXh8i4
kfe3VfMei2f3v83uv3z2JLtLwCNQWd0uQ4Le+4GXr148fvrDD5PXT199/+z516+fwiQ/DFMww8FO
vkU8nhKmZrZYlO/rzbJefPGofLGuly+5j93ndOszkhRimK1jsP+Oz7zmpCTbWr/F/SKn7kPqP14A
ff6K6kjVoufnqXOOVkxINLvZw78RJ6CoIPozvBcXArovwo6bG0Q+03Lvw69blOoMSN+/ycV/4x2I
eUeL+iPm/9udHmm71HHcguTH8fzN/5Y0SK0WM7ezP7x484fvHPWTlgBd6nUTMImm7EwYLYycastG
JNxoIGdtXGwwmoKGqNlD1Co8B8/rZYe9qcRdqxXbIvcw+4ZFpfAs6fWQswrCYn0VCVXQw/rKOZla
tOOE7naZ0NnClhRzpkQps8L2tIWRHFZbsv4N/9Gv//O8vmw5UuJDxlqhQnIvgUvQY5RPKlxVrr06
owaGfN97nF8VGbsaNuwVcnWtnrDVBjGGnAOKPL0q+aojOLmuUWK0VF3kocd4YHJuowXr4RCmo8o+
1658jtUee9gCuibRnY2O19Mabp34sVVWfVzNZ5K5XQPpLmuGrfyIA+dekENMuz95OPrHlB5epoFn
m/IV8PASLV3JZDqrLXunckKPxifrVl0OfxXPj2q3XeEVYSo47hUFC1/xre4FlEU4p3oj12Z0Q0Yl
w5YRqP3HNrVa5Dja2H0E94PQoJ1DwlJ10yLrpctH5PARCJgTiKgagNrjyWiogckEakwmviOlV9CY
OQciZV0ZttteyskEy0IzHL6higb2vFlzJLUUel9fQzmeVejzN9dqr+HYHv4QtGw+PveKCo4tEkkn
XG8Q41BUcF1BAYEmPF5l2TFLuLjtMApVG4HP0AJrR6oNvCUWWc8a1nvSFFJm7J7Etztiwqws2bde
7TeETUcIDqzGEhhcvEhIUIx8lhui/uMXXPdBRi3LcpidrlaLYYZR0ky/qI2k2xwpCBE9lgKvqiUv
q/sCakvTLQIT5waHXiFELyS2Fn7rHCESxTXFaZPl1s7lYyQfzLjFQNOEi++whoy9VXfVgjIsbSnc
lmc4SV6iU5ltKOYMyKuCjYc7qOJTQ2bLcqphDMnnF3uILaxgEJs5uRgZEuQx/lDXGeXDUGWth4Tn
1ubqXReFBVNDUfJKO9NBcCneyyc5GuioazTTEorJ0b2zOMYcvW4YtrtVO3Z/lg1MFeJXrhIVcH/1
TCiTD0l1U5PflI3Aha+6ym42MLTmVtC/raaYFnR7YETKDWnY2sG7+EeAmG7YbIuPc/gZiEbAoWHH
X9M0MQfGo8O2sqlpf6G+c63VeZkGGGSo52TcXYNHwisqD6hHrtMDWUkEYKcB2BH6tdkThCoN+Dm1
LVCUpG+CQuKGjgsFScgIPYi2Wzlv/Cq0ouAawgHigpiEZ7MKV6t9AZZKIZEgt0JCnFOMaRODsor+
8/gxFSRxw/dagtLx4eNS918r+URKcxkqqCyQo8cp9tMXEaWbQbttfafGFLia18vdhX+at/dkEWpc
o7HdhA7rB5yOJ8T/+TLIlEwNY5Jl/ybPBwylxDQSx1NI3T2Y4Ha4YWwkaoWlPt16HyZbkRLHD072
GurhHnFKuPdoZIPFnCMvl7qpZvGCkg9+MZCZcx2hNLEHIw4M7jb53U0xQPtne7jONXAYbE+1qUXU
MV3oFpRsb/CAEqQt8CbnCyJ7DhOAwTsvwyzjdrmliDSu5/ViZiv2/FMo7fxdnul9j2JESZJ2V5Gv
JRXEdlPXdHFhcaPKzuBY5zRJIjs7Vaa5nTllqU71vJnILUxDvTYhBEMIn6nV6o+CCN6ucCfDaO8j
CvXGeVLwdeOAhd1uug87LInBYHDrfOoq5eFixuVLEyU2+BK799Ugdewxq76p8HRFF1S5CJtePIYn
33FGWcpYifkhgQfj47xIc1Can5ImbLpqYWIHt+JEYHJUPhW6LPfllPOy60TEa+XObMmi9QjDganr
ZFE1zZmNutktb0kFTut5CyJgaC8JKccHP2wvtvmxXdGT4iaSgK7uX2T+yuELLOt6VU8nf5WF9Rlo
gWUGRs40l0xYQ+M19h52z2EsecB2NEbKn2M481guYB4+N0CbBprVbkMOc4O7BLjHGWTzQnUYbGKB
B8jJWR9jUd66sl7wxMO30RkxN9uLv1f8mZciyf/g69z7A4f+b3uo+0+IYKxiynFLjVdd/ziYhrzN
nwUoWn8W/yYmaM9Z2OwQK+C5G1XBneNiyShZt6XipvaxbaIl2cmPxSfhgM0sRQ8aibDjUDLR/gT5
64DxpgZJyhFVxw2p3CMSvBmlvi12R0my+OxJIV/cySj96nTHuj0U3t5VpCbzIkeTMs3TnTQgIHvK
HQzmSSOJp4aets+kR8m56Vjb/t0mo//rt3aLn2qreY/nN+T1wdd0z/YneEJsUDk+Qc8U/KB5RA4q
+HnX9M2IO6Hk0dqTt8jrHfVtTCPmlj+tFfa64blRYSYB+rOHSbQZRRezuElAM7uKOuSc3ZezQzYv
FDt043rfsVZ2FOqA7kKSyJJSWJu2E+JWF2XbHqjncUy7ib3gKMiseu/GHWwKJ3ZwuHsTW26QD7J7
2YBBHNi7x3YfEQ4GxUCX6sXmkJV6sfn/F+ovskgYP7tnjXp3UL/x47LaXFtD0Hjce1/X62ox/1jz
PJNloFElMfxao1/hhoL5/ihWGxB9gdbgf6NsgFRnmAr+EF94LPcME01usVz+X6JShRT7k4daZh9X
oibq6dcbDDVIUVWbsliFEHgGxPRlhzP2P4sDiCdxuN9IQYnF8h91vmuD9OTd7n/7CfN2B5Pv46cd
K/jDHk5//UNF0E+FrHXzeooqZDd8M09sh8Po/+vZTOg/j2WGe60ztjAb4ofdaVfFo70Vv98tuip+
vrfik/nHror3939x1TnGu3srvlxd1puOrnb3Nc0HeI3+VRgBdTjJCPBN0SrbyQhomOmWIp9nV/o2
TMXs2Bs3bJLtYOcHQxlwNxs5uD0awWCoIzHt/WvyJedv/mcQmnlk/3XxN7NTvCoLwzu+RfzaQ27A
UjbUdqxWN6t1jEHITJU4H2ELxeCnKi9udyrGvRjbu+y/shpE3KwSzGAZBDDiX0k20C0bf6w2+OSP
djOeLQcjbouH/6fE+gXF80Ega1dO0G7nlQ6RUSvWRws0ZEKWVTRcILcymcKiolfYlwMyhPjWWvk/
q9b8VuEurTr5KwxSteRmVu7OUEeH1kKc4iLKM7hpjqXaCQ0gLfUbLOB0tg9Zj3tj1wmQ3YeDlKqj
dTOputl2R/Yj97HB3WZ8txkODJLrUHtQHPRxbiFqoIPvo0ey4K9sJm2Kco/TO8S9LtK1brmsWG+w
dzF9y4lFNXP4OV7CupctOWtUx3Q9tYA6XbOO+ZrdMGGzjhmbfeqUoZ/Q/imbHTxnnzRpVGl2w7Sl
9Yf53aZoaw+Zz1rNIXrBJ67S4arQOErMHsHx8MMWPKmyV/5xPDp6eNJLTMO+s/Em7SHI0yFD+ksb
UkXNRHNmbCFMPgo1q7p7kh1SqvuNG0zbnHqDsDtAHKc/3kVyx19/Iq6zAVlzmCUMeiwEfSe+TwfI
QFL0r2MFSB7AVJq5KZ+60J395rFP0jvf+sa+D+KNDQ/fU44hQh9x6IK0tZfkvMpemRfV8nxRz36R
DNR0xKODDvKKTib9bryJoJbblVSnw3jRlsFDW4B6zMVr4zzbyF2OvWs4bqYdZ8QlhY3+lEUgv2Oe
ugn2iowVGHFlvnAv66CYvXYOoTfGTzBfGBZ/IZr6q/h6tJiGbKm8bScK5sxAaivGpYXKvKgb78eu
gu+QveA5eVzDkWd1s7WAy27m84Fa8qK5QtzhCWaTIyonjXuCasWA3iZJrtliGntsxjiMiQ7BLafe
v1rfvuVq/3mXO+4r+rQH+Vvs+3+lo4Y0iq/qIxeeVGlW9bPdYmEDHMfjEMrpIAMXw4Qd4GtEcaPJ
UwnfFEG55Kl0hyEBDMoDxmlkA4nU/NIM2/gQhbyBGr/ZzUrbRJGO++Nf9/ZatswHDsp7QvwiHYv1
V9feuLWfny8PXHsoecja/3SJ5EYTVmoVy7LEf2ARY+6a8oA7gv4rcW01XKKiMV5oUIuEZQcT4HUk
rJcn96u2+cSwHs6IucdpDj5xEpbf5yZ3gIscJr1MeMglmK91l/tXPjiFIJ/Mm2m1OcjcLkX/6yXJ
Fh0q9Dwu+wEDxHKHjI5cn6HsPmM0vW/NADwsWsVK/JKMnx20ORZ/ciYLpt+ORkufLVuukA4wzzxM
mthRJMQIJg7ij/dvqECKqnWkF4Db9ghOTTw3SbJEYp8aZ/RaQ1NDLRGDqr+rp+/zRmZ7bND81Xbe
1qHxY45vxdAW+js34dNaYH1dErLFt1TA0JoQKvmT48cHSIySJKWdXYVFttApm8sm1btEj169G/CD
pJ53/173+1zKBVHC9LwV63tstA8tqkrDWbSPXaNHjrUdLWAID9lxpjDMQg0d1EpoC5/+P5DAMK3n
/ezpEuY3W68wHxI8/PQGiRqdPOfkXjEhcuoomsQgk4sA4cckIIQlbQwIzLkwNHGHpa7++UphP/pD
+dFr69qlD4wHQLSMwM7F6HB6D2hQwgsN+/kpxKUBZzF93YqmLfSmRPaXNEVANFMDvO5XoeX0nffz
mAiHFBlOEHrzLeGEbWoSSJCTt+/vfUXHYjK4qGeUiZJ4F8IIbLPZnPOZYfNllv2wOz/Hu99qCfwv
0R5iDeBVso3De1pDF2oD3oraNERICYFgi35qs8qAOZpFcl1dNOfAd6a4QJ59BiyM3rXjtuSFIR4H
Dv8MV1sbZbIVSiT0KEp7vj3F3/n21BbYR4l3pPd+p2EDetjyMUzgBfLh0hHEsapUva4Vbtbb09Ld
tooScyOshV6uKFgw3s9QPrGlKW66l456u4qT/7XS5nnAI5f3cblaHkn2PIGrvFJQE5mAK7d2P/G8
x1mQE9Z0klHvxrYxSQEz+J1NB0blMOfEPTjfs+zLL9XpVg/tokMYwGZYb05NCJ+pr7asfh/5diJh
IFbho+YFqgV4sxEak26SQXD1veLL59X2+OHfjk7s3OJDEalQmvsrCxf7z4TUcfAXZM/x2U8wQT6F
AKmjL4DVTCYDBSOVyHSPQnKWt2Ns/sZkfU28/sK/fpdfJQIZlxjpPxDnH5IB+/CZ7HNsDbv1N8L6
5B1x3bxoP8zPJNQC6wETfRCVOePmzl3dOUzYz22JOb5vtY3mX3hIlR+ErwxveHTvi3s/B/JarKpt
oXnoceX6xH3Celc6Ll9K6FpGh8n6VutmINW4BBxiwwxTHj8cZo/Sb7jz9lMX1VV+jC3CuE9oDD8P
+zLAdDirwTG+Jyp4F3x1cL57z2bwdzQL8O7Dyzf/g03WhMlsVssP//nN//uIc0WYRA8CSTUEuayZ
X2FxA80DUtr3q4+Y0wl1qxTyUUKrvXlDYFVCzx0oVnkSxoolMkWycpfcxzjWeiNXtMmsRqviZLWc
EIRrzerFAeO5olIfGNl7/Hc23+A/qB0YdKcgg1HFWDz4jK4VMGAPxzTftOLU6AR5vto+U2CxeiZY
QW7PJXLuHVBtttoma5oIaVo9RPSJbS6DcmBi7RDqUcANNudF66oAD+O6o9iiTOoiMft2d4X4OLlB
2I/jmnzC+HVcya53zkLr+/iCR3e7huyUYBWjVqgX29VhjXHRsPrZkoLkD2tAC4dNODvZvjY0FydR
tzethS1N8AK787nF319GWhIOZhMjyRLee2/PwArI/uJjg8boDsGadlLL2Sep8JXC56289Z9k20QC
h7oKYo8+74O0UdH1PzBR3NjdG7t9/MXoJO2NeeAQWuCVFrYSv9rplnRYLuUlgrDzUYJAOR71CdZe
xF0Tz0i0dIAzKKZuFJkK5mRTXVIGL+xvAU/RAWS62sGt+asYuSG2KmOVnAXiIvsXWabuWQqc+loz
nnSokq+RzwN/CL5Df9G3f9pnNZuTz/31/MXT568Rn84/eP3k2Sv75Jsff/htMkKQ3mRnNQyEkTbh
igsc5jqbrjYbRF9K1MFrM5o5388ZlWhZI2ofus+D6AOnJnz/+6dPnv34faKuJg+fYpU5UsV8KUib
bTuj8AqnUU2d0Z2zrzXfX3ZPMrriXVKSkmXbM+VWhEDaO7EVIEuAYy1pd79d53CqfmIH5SHxII1p
R+H6FW7lWe6u8zYHmEmd/A0cXe30yc27CsG1EsCnKF5IIk4Sd7iqh9xQoQvmSX9aGWo2/+hEKMLZ
6jjI6GaJxxEXKkRxj4pJagKal8bQhnn9xfsOGSDKzOpOfLiNkT2KDswABT1Ak4GFOb1GwKF8oFUH
hWLDePFB0l5ur3N9NpQc8/xfJx9An1dTgmQx4mFXZ+XVJ/RVavqualOmp/Io7Kg8TPRzDRfIfTOL
7xkokj6kSInY5UN6bJv33bZPbd/t83AA9k1iFCrVRp2Hx/WyERO+9jvLgdktdoTjhs9Qxj1oLNCa
HwKrKVzP4c+ww6j6SFIFdkE2igFuI+/OsPOq9EAIvDmy9BVqxHEAuIk0g0q1vM7OCeOOXkE7O/JQ
KLNOilrWl46ex4OBYL7HnbEybjWbnFJkQmqSCaeS027QN6rslABWSUtGl0EsobPdJnYahgTNw+18
sDkdFOitcJYOMSixtTzu3tbfa1zOz5u6+WOQefUTutrf9P3nxu67h3Q+6L0mnIbOjAebwV92fglB
/bbzS7pAzTS2GT+8sYuIW4abjvWVHjaUeofpvzYoUQjGMoJySkYPlyuY8+3MfFVBUqyr6TtqtUxl
gZ9GUjNmOERonrGaoGFog82PgyIJEcaFk/rUtrx4po0K1caNpn1Y7CTzpEYYPLDNFotEzbNyigjR
dmEwMUBqQ+YgbQG/wAwAhctFyxiXu+V6Pn2/0Hn1k1IEsxmP7XRwM3CJk1n5/jCjZOqaSpy/WmKP
h5nx80sONzFUyr3E5Ac3ceDJ0ZDxvYC7IxfExOxUrEUk8thf24uUb4WXv589/+evf51zrWHCFFQt
6WaCn6cvz5fwbQJl9Tx7dSYQp/3iRi9xOoZliC2nEderN0+e/vOIpPIlXFMQ63izapqjWf1xDmI8
6rvabU9X6+tWy+bLNMUHQNaZo6lS1oklKYus85dza5EUbNiNTy7HhfmouA3w6fh5oIDA79JrxJ3G
qWYQAdX4cW4jOo3ndWPy7OD/fgMsEFMQu9NxaMVrdRKlVllFJULsJTvawN6PGpRZV0XkqNferyBP
jx+6u1NFD9KlHoQ3LHqIfrycrbvGs5161dYdAI3xN/Qj8KBVCMkh6EmLPpjGcNxY7o78Dsf029WO
MHUZM/nagiOyugCWDxWpaDUd2rRlqclhTTOtNc/TUHpJCe+4h2z851S08fQlOH98IXt/SUF0ogXO
RtnDPyWFIb3MMCmWXgcGxNelp2uHGkOHVFwzBKVC1n30HiVZl1qR/FoIHXU0kLYGhsCYuDRk3Wec
imfyc+dTzo16kOWg2C+iYigxYp4MWjtpO6hw3NQfTqIKriTf6T+EFX7GNeIKCpKF5V2FZ26z8fDg
0K3my0Z29VGD6cnQD4/xpCkRGfrekvLdrbkk5qJMUNIQfZyg0BHFmqK5kAVtSPPAgNI2S5lCOPSC
08F0TFGxpYPJ/mVxyLam+eOZgH6c49i2ATWkk3SpDuD591R1kyuRxWB5VuMMvyN7RYo7c1oaBj/n
1EDzj/72qZzPH00rJk+6VAzkE4Ny3+YzpjMpP8yQvw+dKqJInrPG2H53MxL2J73mpVVFBGYQ0aaN
0hHKylPxG9AZCcK8fCGkDf8nuh5gTlIOS6zXsc+fq3bPl/HesC7rTdDc+MCij4Iv2wEJbLc7HoOh
3DIRDJnGw1QwprVVw4YFRK6eor5Ivl5Ye0fMvFOVdNaLTolXWj5G+6upMDoJsXylmP2+Ld37pJal
oE2ezHkLJ2rDk6t4rD2gQl40CK7dGK2pRy7Led0qHG7I37D/2IdK/REJHxYl+BRvddx/6Rbm0D50
d1d2b6N8pBJ+MGvoTbW7cAD7pv8FPsIPcjoDTEPclhNJVWD6ScRFlSi/nuD+Qf1efN47RH6f9EGG
MnPZL2P20k7YfCao9vzRTnIDIp3uNrE0RGqssYrDaOfOZ3UsV8kYsCyJ2NJtygeA9UG+xdw9q8tm
D60n2sWvPrI9YEaOT+Lb6GLG8R8zDpBIF5OpoCbb86BIivS6JANLrswG8aQeHhBejm08CBFSEKqA
GADO7QlIIMs2RPoivTILDf+VEtFtmS4mOmbopPggJS/qXLh3C+tZa2VuMkmb3ctyamERNVK2/1YN
Es6DajeZ8K1EbFW0xmC9QSGvqQNn1RZnQOzwI0kUInoYTH2D5zMmvXdbfh5hmq8Xu8azO76zpjcm
bK6N16v0AvrAJ575AnsCQTOy7FGmjLG2EmpsXMvyq1SdaYv8uRVX8CbctYAIpU4rKFrmN+puKY/z
VgaJhbm7MkdJ20AihS6XZRlMbDIuLwynKQlEMjp3N5y5msXUTc33RdtSeqlkrgOTO8rNV8OM0h38
Yb5W3wAks0L6rn8WLQZ8hYJLt24Lv5da7KteqqBRN1SzWacJyUzfsr60giHP24AqDLJq5kV9J3Af
pAenJ/rXPWuhMl2cXqwP6WKDLlHsJpEfwd32wTC797Ao9x9tqoS8WCtMxoYFaV4O+bP4BK5HTao0
SV6vOjZEmlVTvl93wh7yg15sO8fcNQg7gOzLcASfyLap89CW773v40e4f6j2H7jnmC0wwOPkF6fU
HBvz6TA7PRuL9hlXK8lPKY1BQ6wTOy4pxnj/CpdymQeMQsepe/C2nOMt390pYduhVpRilobKctzF
1V5PndRE+nASSGruEHZeUiKJcoTYQm5SLM1TqSrqWdGLHk8P7CrJgnQR3qwWmOa0xgWI0HqWkjEb
ztcp3bhT3Qx7wOtCnVhmzqDtcqpVWwrF5nyo5O6PPcLW4EigxCjhkQBVUfNg7rRNlp+avGC4krV+
xiXwhuVvzc3pGa4OKQdpASTPKZ0os+07Plsls1eYNCpEaOEvw79rqVRmT/jZyKJN+RBzjFCyH6Yn
QGl4VhOrw8QcaALx6mVKw5jm/cTnkcn/M+4QuGbDUtPG0D2B+4B3AIY8LlMacU7mcWVz2EBxmn4V
SRppYhSCq2y23RDY8JKgF3BFMVVR6jhpSixGZQ9BQ3IVDGdogJk7qWsvz1ZFMJ++m/oMfR3kIMFW
KPjbpTpsmMV13viU943HAcPShIy0GJ3pIPcsUphbyYQSYZUwnEG50djolODvoqMF+l67BbmpTINm
4O8ivLcjg9K1pZbM0ophcNrZ8KK6OJ2xem0U+twlMh/7avDf0E2XRwv/DR8LlxnLbIYvgw09hokO
X6/WW9qBY96IpD4ifkFOy9Lxq1F2ZWhO91HLl7idEUT2NbsWl7in8GbROuPNMFLnY89ydHe59JMD
bTbeoEcDyo/XrDzlRD/b1lXAOu8F9xmag1zODjp5Vrhdpvm6KE7SGQDsHEf0yVmHoGXsB/ZzlHQm
WztPMppcqtHhU8Xsat1LV9fhy5CLpHahlENcR6fPYIjtb8bf0zb+VQat+WZ1p472eLlHFgyVA+nw
HzvFthV+aT+nSdtXtO0EunVVaJeYZjIngiddBLDIo4dIVUaJzhED9llYWcMPuIWfYQtFnMnr1W65
JD3yMvsN62uGgX4f2BXen+QdC1tOud8Mo9YkwWVQW8jjArnLS+xS3EiZvWJk1agxbChZwV8C090q
Y/kwXC0H5BpMEC8WTVWwIPvWI5xMddOyLvF7eLQUx4OPVYR3pAdyKN7J3rx5kzUfl79IKd5Uvzxv
qtMmT6P6hqNnED4dAf8MrDLeQwF9EcSW59zp2dlMP9TrfXj15j+avNV4Ps6qBSz2tr5A3Xv94Yc3
o3//2Wd3fpbd3zWb+6fz5f16+VGdYXt3sl/NMe3tpv4Z/Ea7KZpyQXy/XGGGe9bZUnpQ4DHn8wqu
EacoeYP8yE5W2azaVpnc8YeU2P4CsWqhNTIGU1Mb4F8zFpIv6wEU3a1RXGlW6KmM31iCnLmZr3Yg
A5+vVmwrw6bw39MajYS4LJtquZrPfgaX09fYHboP4BL/7c+dYxOpj1BNQDbYIfdb/3T2ux7abdFS
dw3/XU4lVS80NZ9mPJNZM93M19uyh1HeL/kRfW8rtx+cE3iIRD99X52jWCk1fYbYSl9yeRzuETSH
W5HmKLvE+YSJIMFmAVJRTRZryqjMqpPrgaTflHsJQZnQd+eUfRaaw2bNh+rruNlSB4B8oILWSMlG
3mRb1MksFiQmAgNDa3o9xXBh+ganymUhk7MGr9gM18dbMMwYJhVb1LPzus8jxA+c1ugCsMSB4ico
4yj6cdP3SMGbfVPzFUVdLy5rnnseN/k71ry0sB7oCoKTdubmlwbONZEWy+w3dTZb8QP4PLc938Jd
AzOUIhnRuDHCEFcfLoN3kBVW6/UCvkm3uBleS9AfA/Mp0/zBwtYrdBnAB7MVGldnmOuUmB7vnuyC
TA8rnDboKmbBdqtzKU4csgTq8IbR2NCtWsciy8Zk9uwMO4sL7jffO7rDM7FzKvPZiIaEmn5+gEOF
J2yKm1Mb0Ni76iPOYfxB6Q+PGq6bu4a7RvmMLy442bLT+sGfuI7o+zXK3r4FnkSNHB3BIc8bZAxD
phGX6+u3b8tej2OcG0Lq6Pd++cOLH189fvrDLymFsMTfNdeN/uTdq3/9YTE/9eAwUwyxQvl1E7qG
JyQE+WgsIfi+yC8b2racTTjKVS8y4vbAJ5CN09VYXfWLQFNmtTmnWNo+i9nXTYk6V5hUCoDNvhpn
+aPh37XOeaD0S2SVnDp+q5rUL5mgHpV/l+VsIlvDxyixpX6o6LXDI6A1UguI8ACbkdQKcJjNYCbF
2PaOcp5TXnjcF7hPy5TeKtC52uGqYCdz2KlDS1a+lw1KXazB7ZsKYAasK2F67YIMiAJvYUqP+sPM
le1ZrDfGtciEDhloCe39e1zgVHyUsRxru1Ee3mayfn/eCi/bnyK6q+lwNjs+xDEYAYQUMgHSK1gq
lwjwKORbIl/5jwZElK2ohXJf00+Pe1jE7UA38cSlGIn+3ea+BOLrU7Nk7Yq4YvXGJccOdQA4ynC6
XD3ct1TLrIW/VyLQFdz8EPzK1WAQQ5T5lqsPVUs54iciblFQEya8QjbazxrL4oX0ITa9wAjZpGPh
9jQQUkLRdllsWjH2fY2x7zv0gSTb+mKYPQiwOWDS+hzW62dwsZoWoyz483dL4zGqkdbkSRuBUhp+
XJLYRtA90/m8jwshfgzkNZ6oaHxzmxzPi3JWI3kjqEnOpwk9mdXUQq4nQ5FEHOJeTl9SoyglRB2+
afC4vTjdYjD6T+kuO0jY/vZ8F2sEigpORC3nwLBAkq44KB6OeMR4fDB0laWtmmCKgBJ++fT561e/
/SVrA3Vk9HboUke6TfHh9Zv/0dwwfr8Dmru6WHz48c1/979T0L2opRkHkqGa6OT6xx+h5NGb738t
uVDZ0RLPIDrzfrWbNaQEn2UI6jwjnJRzzosDRL9BwgR5Av2xSCYhf01MNo9yJPHsVyuQE35dgQR6
XdrIf5AXHQaA/trUCeljO8fEjSTfozj3qHxD3fkC/sVFgq6czhfz7XUvsVcQruTL7AtJXo3dwY5N
3RmCzug9T24wa9N3uIrwX/eAgbtg5ekJ3NrOGS5GZaB/xLlGr3SYbwIfbEBerIMYOuj9bxjvGGV5
9EogHfDuFDi3GhfU71o/Cdeoc3R8nE5XGw7FQBEWF+ph+UCkBpQruNZ8Kf7RPEm0VmWW/YocmS/R
z2Qx3eFNs3eHC86ugek4Yd6ZG95Vm5nc0eZbaOA19hNuO9wdLEHfw2vDO8wqMW9G2WP0yxyNxtmd
q3/I/g/479f03yfw3+M7V48eHMHvv/v22xP+++mDB/jk22+/fXICzST+R8UePuByDx9AyW9PepNF
fV4tJvxVYIAPrh78A/DAqwdf039nhZaQeRsLnDsUfPQAi/zd02KoT/6enmCn/DPsFz7Fjvmn1A18
zP2AF+5DsNwTUgIfy5Ihut0RHKEFnqFMSvlidVkMha7yd3CdLFLh2mek+l1dDjMqQli+djS9pBYV
LYdfMluprqQPJ+newcevCm+7sZN5gpmpbJ3efBE1sUF3HxZOdKiD43+525wMYKh7w1BdcQ0JC74E
czGrF0Fv7AMZu3kiHSS8nlM4JfHvuplWION4kALBR1zkpIuBS4m9IMBoYDu5V+X5ZrVb5yGG/zz7
ckyEkJR83ZDuXN198OgNTkE236O6SlX7ua3mVUnIQDbVZR4uQAl8IscBDbWMGbKCIzLfR4eHFZki
GUNIlbs0SkUx3KB8wuPus98c8Gs+HWDbywFJ70vf3ODoSM8UBDyRv474z4p0M+M+egLV/UDPiUfm
uA/F8NBD5Do4AD9Wm3Ff/ha5la3qtuK7erEe9+Xu7D4GbPh6oSZW1hZVWxOsWN7QfTjSz+ZXdgTu
yZ5BuE7DIXBzn8U/POOWSXmD5wTKeHTi+sGsdtv1btsPl3C6Wp7Nz9G5k3/JEsoU0j0BH5c8slKe
9/Q2heo8PteQh8PbxYoU1s0CVQ2EQ5rlVzMX+4HeNxfek9la3fhDMFdUd76EvlqrqvQDqRR69evV
OZxNubQ1jHppJr+IG1gvdufz5UW1rM5rBHE9x/gb/To1H07Qbtk9RQYkw/V+wkRKqgIz5JFJBbew
g9nfv93S9ZB7Rl1jhPwJ9o/WmcR/1XTwygMnvnLa9H65vgZK6vcNUxYCEW+oQV7I3ZGeoyAzdj99
O/f7Q4o2tpyESql0Iutyk24GqIRVsbJIBh6O3yDjbBSGZw2kAiIieobbR5hzJJfyRQTV224Gvb3J
C08fVKcN/d3RguynsXQxfMn6vCDrlZiNGpRM9YL2fr5e058PIjsvoUBqMQqJa6iUny/cUvgVdlAX
N4V1aEHXlW6RA5ckHLf5TKJB+6NR34zRMAld6CA1jFoBefRRTKqra+4VpnSRmCx1WyTxtXQjS7c7
7pd9PsD9p4phy0Qzjg5lHgaMIiqLIv1YNyrPDYJp7fheMcC7rSsfuMsRhuwE2gZCr2cTZqBdq3FG
DkdrVY/lAyiOrB+WtwVBtVj4uN5+v5eCHhm60N/5Uj5WknahppOjyfuPpV+o2iNVyiekbM20ywn7
lOnlvbH2JjZ4+ULtBrbVueGQtPLI3gmj42hwT7+czvAh9AJt5LHs5T5Z2NWSCuInE8hhngyROrRp
LBNXn+AO7lphu8VhRh6GLzqoRVppfejMoJ8nvnXnojl3EUG0+HgVxLBTSiHh0WjrK/iNyvhAqPSu
LEzt/cuqucJP9lPuMTIhrYXgzSpsLL9AqJ1z2JzU0NFFtXmPN0w2WcCkNCB1AmcG+qxni+t+kQjS
VY4YzF5biI0de4LxDzO9FrC7T5HC3aKe0l0iqHuAE1ZnXZr36aZq3pVSppeuF5GrvIii5Cty9eEJ
VlLQCe6soQsVfSHqaGripSI2kngr59FPpmkxG07oRPtLULYdC88df+qGCbk5baejbRmCYjXDqPux
ZCDnNc1W1wzonvmrzIF+7CfPQnoepPnWPASbOZ6IvSTwl19GNteDgLr+pGW8Yfk+lcEeslzaTnFo
rlk3ZuW9zJITw05AgEieEUbOXq6GtNhwC24ISnkf90QB0ZUNMqT9wOMbZf0Eaw7aj2bAvzv+hxho
8BbnFII8G5znfWO8cZLRBAh3bHGkgXr9m2rocviPtCrs3UifxH/lrip29wlcabjIHgKWtWVxZtQO
gmNiRJ91tBP9f8S925IbSZYgVmsmaU1YSQ9jetmHNYsGhwuAhQxeqrenlVNgD5vFqqamiiwjWdNs
ZeWASCAyE00AASIAZmJmesz0JfoFmekL9B8yfYS+QOfqfvwSyGR112xZNzMQ4Xc/fvzcD0puu8cY
f4FtUelo3d9Wk82svlrlKbiQh9ExHyD2mPiKC5J5roxHcjh0fkJf0aR+4SZ1cESMl3LttSS0NXX1
Wj80Idn6nzQj25fCfhtkCDq/ESrYg6wVMj55rXMbFo89pBp+2lrH927bOlBE/NVkYa+n6npK18tx
+/3gZDdo15bgLW3gANvbSdFVngcOy/Awsdehx/M6h4K+dltwmGGmee4tmIuLaJvdQbpmDeeGIBQf
+1cw/tohkqLvY+S1Mfo9/Cnxn35rexhspbmMGzSCm7WYoZdOWtS76plwZb3d9vzo10bRreOo17lh
hKXw5XhWLWg/44pH+Xl5wctuqRKggCe01HQnnpJkpOl9+RuUHIracNR9WD7o+jl1aU7d3zy2mRCC
+h54aHj99HyymV4qLcnDAYP5yID8MOE7AItJCclf0omvdPksxy8Sv+B4RrpmGdFM9275xTne1fHW
+LKDUjUa89UMsM/owSBdoCTolgCcqh3GzW651BiAw0JfiwdV4BYYfeK1B8hd97tHKEn1tlq4mdi7
ozcsyMIgP/yDC08/r8tmcl4hrvjw+7f/13//2WeJchoV0R3JZ0zhq5gK23Q6+C8acFF+BylRjreb
Pdfs97AAypukoI82/xr6fIXxquRL+cpjOwqpujvjgjWh7Q19Z9vNxXyJZpo+CSNK+Zv5P1Va1pwr
Cp/AdpqoeGD9PgXTWlmfNYqbMitmu42zf0Rz1I71/BKdmiLo6xZvczLoRy1IFc6tZDGBVu4EbY4V
kOTzkN3PAiux19vNfLpd7FEXPSEr26tNzZb4qwmckSvWeDdGpi8D3qUOGd1eV8MI7Vp9joM9rSkW
ZLd3FzMuAEjtossQGu11ez+p0R4Qx91ertG8Q9otBue1Az04GZvujz1r9SLw278+OaY9W06u2aol
dLWh2CeD4nERFooWk2Td1yht7ofljr4Y3L//KCQa/uhLx4WP5oPEuE5HOUcN9TUFY7keHP0x4UMQ
50spTDmJhvsNh26h0gmgRtDnpFqt4NcG4RYrhGBtS0mESzIcv06jwiQmi3eKN5s9HcOPk9V8sZjQ
MOkwLyfv0WpkI5bqJhMrmbHzYiaLQ0ETXc+ZCIAOlADXfXrqyOmiARJgyKmYwvwycaxyzXjtVHSL
JkqWvVuhsfrqNnEvtRuR2uQj+N8wu3SGbbOMenQDzan6e1+e3G2Q6YXjKMh1LgEKB6dwKTmXz23x
4Pru9WNKIJztjblk7Rfgx2cZN3mS5jM4GIPD0tv0OMOdkQmsnzvQUDJ3nNuONBWPD/QnndTwtLLT
Ld/SqCIYFtLL6NEvH5hb00cxwhHQRYmqGKhJa69Jts4lzhobDfBW8GWHGZOwhIapdF4ndBGjvtPl
9m5MUFmJU1mxZxmHcsKUYZTHTR3QCW4ofbdOpSfm5WiAtYWDzXfXFE26tnVBSY8kpJm5qyWASsGu
NTo+Pv5XG2RtWCY02dQ7cS/D+/e+0irej2a+dW7t7I60mRWPyl8VeGUHV/8dDGAyr67MZC6h/EJR
TSNEkSNqBv61R/EMJ7gz0VckXVq+sUsLcHu/emDV242TyrLC6MPbt/8d0XNIWn74w9v/79999tmd
4vs/vPndyxfjJ6++efryu++/ffbm2fjl35P9IRc8Roscdr81zkVbTtDOu8gGh2y0OB6TqyJqmHto
oNs7PZQei7PEo58J+bSwEwZ6s5zti56Y6B0thRrudZzXASzVxUXF8MmRIbp4mruo65vN1RuEAhgC
e11fkZkeUZsda89LzXZ8TLzXpOB7dk1miiQ4w4FSZs07hC7UR0FMUYFVIltJtSZlQwhtHusOix+Q
9SWz+CGU21RoPMuWEoDTp0uKRBy0otXHYzXQHHc6Uc0+2rMC9K/rNRkrylTKe6wQR7tK9JDAm1Vm
MkC/tfeAJT78b0TV65A31fRqsll9OHn7f/8HtX9VQ0r8AH8bR/KSdFbPEvtWodFK0eIOo/U7gVkK
NgA3LCIP6hoI4A87H2sN2nqlCOr30sArGpJBK3yvrzf1x/mMQ3rC3qD3XT1rJFLnPfTwqdd9jMV8
UW/27ALz7t2xjYGjQwwCgRRapXQNTRfVBA4s1qZHF9vJTxGLvq6q4nK7XR/fvz+rp+hERAej3lzc
B3SxAQbuvlYoL7fIftUby3B0JJSGjkoGQkF1LappM3YPfXTqxYzDi+B94vqVVyfmGnHfGkpOwSX6
PTHfMjQG04JNtR1LK3H6oUwvKA9xAzGkIu05Wp5RHGrAacgJmaa51yv2q48Boa92PJlWsEqpPwN7
H/wSgKI7LDoPKfjPPR/Gm0hUwFTRmz9xQ9HbPgJ3W3IBPFwSjZKDuogn3bt3WCtJAvDunSQyJFSH
6PEr6Qz2XBYkBIuFt+uBJce9oPSW8+l8a4hJ3SMUStkiss84q+B9vz1VglPrhhRz3Hma38CUah9Z
NKCfPI6W9AoOVuOFCn6nRaVEmmVyQ0H0snvZyQZc/4QRHByFSL+NpR5mdRFF7CVgSLqPg+gefN+F
OUExEKzLxwu4dbYDRJvCHIaDJTAPk6/Cn47KbeSYzqRGqzGdCIaHDuMOM3o90iFHcltvshAbNHAw
P2mOmDp+TMKVaAIUfYys8KhzDJBCD+lH+aQzjhFUy5TjedA1EhjjERF/WV/JZvdvvUSjBzn1LXag
hyPalX6bAckttuMWbCfH/FjMxodnk5Xst8wwtCYw3KuLZZzL1vVloe6ov/qpg0wGFClMohbsdWte
Z+7boI4t6gXB9VolI4tmJFsX5wmq10zrUiSdjWyzdjOMuSPFFxx+s4yDVs2HxRWHptktSVLcd5A0
yBg7qQy2f1X69YKxHohH7AATJ2f43xuxltyewEQjqqIUr8S+wWgpRx4LSvxweRnvOLKFLZLDVDp3
gh2pgbjUUqVQfZli47E8atnx2JX2QSnpRduxZ9Lo5DRw4iaaJa7SBi4ZyOt8+PHtXwUpZSfTywqo
9MWH07f/7f/IWWWJJRb/WBcCnnIKbC8xcs4RiQKoZpStDD3YngA7/3RCsebRKZ+HDzd4vSHJcv99
tR+ykLna6AW+IG88/ATLI2Q6x4pH5qxQM3UWunNyeSL0t/PJ1hPlOB4CNeY02dEZlV7G/w35SMyh
Tq5wjJp/i3EXaMQ3GncDLy3RiUYPH/06uXTcV2bE49hNYke8W+HJYde3vqlzZOrc/3WEQch3F7Mm
/Ok28EOlU4DDhFm80pJEFNa7RTqLDZzAdwN+4/Uurj1kJ8uke5ok7BzQ0FcVhv7o56ZD7WO+dGzD
5OFScDDdIIZCJWIcCjK5WdAFIZ7BrbzQUczAnbRJNq0ntAKzGaKC9A0xxfZ6Mv1mENjfapBhA23n
KdqLne9G9yvec3rLyeftfie7mMtBh4XQASeIhFhwtYLrhbH4VpTmRY7JQjXhtF9hjDJf7vEoPmGx
qB+z62wDH0+eVK+RMYSbjalxEbuyG3DJRfhAtBpB4d3n19Dkz8SzJsl2T9NuNO5h8GE1q9ABw0zx
KEQPyT1KNR4XD/LxyMwEOAANdHxyTJVO82HJMD9R8d2boplvd4y7OZMXY3MJ2HNWFRdxOuAAuizq
jk+pw68Im9O62T6h/KSMaT3SNczvEy77BpDzfS58RNHRWdSRXjc+sAcOndeAIyJJHmSK8oIHYIZ4
n2KEEduCpcLonrDkR5w/tWqO6vOjyRE3cY9ujaNtfURH7AjaODLnBP97QxomeGVChAHm1Vj9PCyO
aEpQWs3itCY8N3MV4D0LtzZ6iTfQyrraTCug2Px8v0Y6LEoUugB6jR24OezZ9nKy4jhodKv5wBO6
tTIaVJ9M2J4kklfThenkeVuOqILgtoTpschPVmW+tNFmxrdAjmR7wrx9ZMUCa4mGSFzDBlxczbLl
BYf9nta5mj0ViHlGcAmN4YEFmgX7GzgONF/6pvuftnmIi8q7mlx8DDQjLhh+E/BU5O3aQPmXPpuw
ydhA9m6XWp8nDeUvcj8SHZvLi8lZlHVFnlzAMhw4nEQVMqQSkmoI6fMLD7cIS6R4qVG6kto+HCSo
WCFVoYwefj8oLcdKp6DvB8lk/MBFBTGU1CAhy6RN0Y/wj9uSReGdm1IV83O5PoFGuF7PN9Wsn2O0
QyI3oS7chR9+t/fzp52v9lPE06GVNGfEnSu19fIr1TYcAzefdHxomQhpYY8xfN/mCIUt2DQOugXR
ydkewhxbR1/IQfrwj0EMEP5TbT6M3/4//8A6kP4MQyCiZ4mPYT1AGGV3atSMigJMdDaUAk+CoLGK
homiVCnig3hwhI00pAfNWVVUknCIuR0Jm6gMj/x0nezO4OzjHddJY4eIroua8cp+pwnD4FLjWJnl
FF3F7+r6/VNg1qrNEOP8j5mHG6smNayIei+t+JotJofFs7fP36BisXOH04ehiRZdorDAXO+InK1l
+Tp/J42dz6/RgLzDFCu9SxVGhdMYsS2JRP2oz89ZcH6Bw63fb1R7gD8YTHgiUp7hBSvgfFOlk0Qg
gy6Y6YMbkyxNJipGhapnjDUXCDvYTaS5EZDkCIZPNhduLi5/uH5pPWXh7E1gZnpN9kD0FDA0wfS5
HVoE34gtAY3YFaD1Ak5jOch2GapfbDslW8qOnUoxOZu2tKgJ0VV0vTtbzNmso+kv/Ea/xARv1nub
Y9UAPcx54RYuUuEEwYtYhR0GH2wQmstoF06uvVP5ghzK0Xsc7evH3dPOnxUvYr1fzM/aYyysaiA5
6wWQoqsjSvEWyEGDCAuUn707lAARq5rq1asoLATnU0hiLPS7cKamNZpIUD1ULn7/5M3vbCSbi5pF
LDDKC4wFWYpSWkNE3Crqwh2mHxztKSfAZaokCrIsaP/QiMPLK/l8YP4DuNHGTke88NEaAj47W0KY
aowpFHHVcXHjVq9e9IJoNJZbuUFDDNjCemo9kDINxa+8OeFf4yricUaDW4xg4AnT7xFSZohLWw84
C5oTJRcTKBLkrdytZ5wpM1R5cSFRbZBa48akss6b37VN6WoHQdiH2YnktUtTu33pp1Tc3fTv3bu7
GTxGYawfC8CpXwGLWdqDZNtYEqn8yX7F42d/x6ogjDzpBa2uv1lNCltEQFO52UQymVn7sDlVqPiJ
m2bKYPsGaT6HaC6Anmq+SFmsm16vfXv7yrh9u2HUDazdXO626NvUz3QwCMTNAW7OSxqD2v0g1P6W
FsMAbdOaI4G+5nIcBHEcJN5DckmcYEcSpQAeVDLDW4sJzdEMe6zhC6gpI9fk23mskm6XqT6OQn+j
9mEehMMwMi7Mx5HwJtgOR58Tc09kQ843mGP64aA8H/O3KFdXVvolMRUo//JIi5Du5MEgCcAwX82G
bpkiFQ4t2Mn8+DSjmwmXccQ7khctMbHY77548t2z7568efq7ruKsEDKj5jG7b59mMTSLM5RuNXjl
cat8Trt9+rtnT//+2SvtmezbqNkBINujx91Dw8gbiycTe3m4j4NdSEAcIK53ecHaHHl72KYkE6CD
nCR1Wji4dN27SPVkR5WfsNLWk/mi353WuwXr/DGKMKBwATXSqBV9A32Bh1W9JvyZjRV641ESPeMh
OE11jbeBT6cfEFg/bVNC4ndvPzJfceCheDGmfKkNYZDHfjWMGJhqoq873gcn3UJcadDQ2saR80My
RvnBLvy4krgx1OIgRbT5lV7o5e3QcRiYRDSmKIZfUH6Bh0Pd0kWaUA/IX1GZRgawyvGKc2QTjE7e
8QA7LY56jTpxpa6+WU/Pbqq54rBQyoBfmxQdZurNwKB+Yo8Dz9E5lsH0eqOu8Sb7SQMckqYyjqwd
GFhvKVkr20Je1g3Gdd+iRFa5bqQjNXF3kDcwtAk5pzQvNieJW3MeeXz5thpliOafVKycGmgdORW7
1zknWq9cCL2oKYoKki8yrOJIotWLz/UWJ9+5Naa8RXY6FzI0mp2bmQmahOZk8NqlZIa54u+0wexg
NM22Ak5Bl6z+YH+FTLyqg0lY10mynkUuRfc/oIhMTLMixGQOo7ddxTze3cNO/sBABgeZlAOLxS8I
ublpDQeJi9MCfSIe3jDKpO8u22c8crla/IlownET3jQjAPR0AD8J5lEPz3bUI0e790lHu5fBPXj+
bjqHA3EfF1/TdKzVzAcnOZj31s6tlx+iCSAP9Ge9207rZZU06sK62XhvLribxTEa2O12eCdY625u
hK2RHbqDXMyGVnwzPw/PchfPbtekV28LhuCnnz14Np5BPpiBWaubW2gLF8Dvs/XVR5HGONSuhoX1
wubktrCEbRus8Mlef9HNGEDGIOGJ5JNLK4XjGD1wI8FHHktgWwjU6ULHjM9u3PgjjA8Y9h8TJQqb
bCXgm00KOoD1JeVVUlT79yX5za3MsphmdHZZWacFJM9Q6RwJoWUbvoWvT/HrIF8bv/tMfi0NSIF8
C/gLE9WFtbfLtXxASnu5fhOVsl34suiafT2WDQrDF3f7P84+HxT9H68+H3QHata7W70iN8oDsmnA
3NAgEbFD9NCXJw1ZmEqtWWK9jQwqpQkU18ljEuJCC+hjFNlhO0Mx8MiuaF/bGiRloZGorDYbldWJ
YLp0eQyyhTdV20nFo0mWw0QzUE5r1mO52aaIsScqOso1QnlrEgSj3dEyut2kzGLINaQ2s9K01ssj
zhnborXZyQC/RuaxOKzDDVFjJ1D0VCzioGqecRbwnKmc0IPxn68KCT4/ZS8t97XUmLbqMmr86TFb
2qZa1h+Bdt/5BEtsCLJc44EThZWX+wBtvFxnmufy5BNYbUrODIF5zfpdOY/G+1wkuNqEcmPOmTXk
pAmkMAh4/+GDB7fhA2ToIx1tuXyP6EJycSD3Oh+0+QlTqI3y2bO3z1+/uSUNHUo0ONSHjoAfIiNK
VqFmwtSiczKyTbInaYHpJaXxxLSBi6vJvinoxS30VuIXoG5DN4rNnfjbwykQso99IA2e2HBws4Fv
mHEynGIkg96L8oI/9tfZ0Jl8OHrjejGDEfQG2UhI9E2Xy0A8GjJsK7EPbjQR2seKM6FRMpuZJBkK
d86Hf4W6jK8wT7FJvyNGdCmagwIZysm6BkEJ8oBnHU0QpTpq65z5v/MgsptmMZdNGbSIGUl6ZNLk
RClyUNk7Rkn/OFKltmkrhCixscKDokDNklv0tNsmz+fPEuSBHgLFbNjxLaCcm7lZL8vlDNFEcBLB
LRqwjexxTsApSk98e8Bkr0FzCnHtTTbp6hpzipOTVaKsUetPgj/+6kDPjgw/RCOgNDRxoBqJG4I5
cVhElzrOTNDNNq7CAxygxU1beyGWlPSePo/wTcjfG6H6aNZ9bUbzyIV8NDQepEbzp1ZsKsW6NFyY
dbTPtAwkaSxX1VUf5jOC/w9aFxNNXcrXnF1KzNLCcweHa68r0wyTqFJ5hYVd1XkjGZJajnaT31Xb
lz4MWh1XMvMb+8w3XoZ74idD9FdA+3E9ciJrTge3hr+wu7CeD8ktjbMKe+ASUskqAnr/nIYZWlf7
6vKkOvBBrvhawnlJWeTIz7qZKHrbfBowD4frFOmgC6HFtvGJb3cBDYwiXdUUTYTtI2lGJkU28WJL
q1h+vadmtdpIKoSNAm69bXvUWg9O7xyutG19nTYIFyB8nZ5ftDXpjqbrWppIQyFhQ0yLPl/NmQoG
CuJEBEy96KaTuXK3n7rwvXK97x1c+u319s9qH+q3dyAklGBFnzY7kweThPBZSBUzD4P3OgkdZrII
aMrlwSGKVQVBprRflfd6x2aS7/lFEBTsKfagifW+tREPKaaqP9XMkPS7ymOx3UoMRRrogMz6MJoJ
P1l4XfkYZZpbxCcd4nWJaqstUUw49Y6PXbAwXDFsJpgOADVtA+mSw88C6WS6lg8HST9G8neQJDqk
1+W62qD9pco2+ye0c4NTtKhd0U0p7hAom765bxc6Mugcsdt8C+Pa7pqRWErmsyCaZcaJm6WOssJP
lcmVMI8bkiSI6RaV/YTtuDYLct5wLnIS5C/Ygipo7uda+Ov/+mvOnTvYXtDPLGDrR8xB05gjIvPE
UGSJqF3qILBr9SSDJlRUbbMulBuWVBrkZgH1TDi3HdVou1HEQpd4PEzGW23iyD9NQGsbFTQ3G11A
d8jYj7ROLNm5ovzqEkpHLAWpN85UjXFtOLILR26jIXRsTE5qxpnlRuSyvverwHMYpVVJo0FfU6Mj
eU8raizQV0i3oVIjyJXKP+D+EtvJ6A4L7mi9V6NVWijHYpsA+utkfZq9BvxI+vcWB0b40N6wljeF
n9plZEOoVChxTmhnNPhpM8lPQfjY3CRMIdIQKttrNU/ddr1SzupBqpGm6Iuha1k12Ah9aTBwzwRj
4ZOHp8nyRrjg3gGiZb4Sq3yaZrgwvaMjwW/1arHvnQb7aKtxN0ztiHRsFEhSWhY1rCF/BwmrfHJN
HLJXH8mis12FLjkWkeFiUoUELqk1XONpDhxvQelNQ6TI2OIp93iccIyJRfK1EhzZ2APT0uTZKTOy
nFwas0EsmMHVEpkCZi/uylvgrWDpzNjVC0T6lEn0E2bcxhnzsHFgywLYnrJNT3q64F97OcoxSrUt
LeIeQy8cRh8nsaUTvg+vNfc6EftxXSung/NAEulque5lNjGUI6e2bbQtsuJd39RIjLIM9Qz/J9Kl
57tL0j2HUd1Syir8jl7xaLqgy8ZgbsXZ9ka8QlugVW9bYHRMuBAlSildkhQxcVart+kcAy0szo84
J0g0mDsuRiZFMdyhO7QJfzrfIqWI6Y3cKZmJgyCls4Vqe9MWBnUv+hRO8ApNOzh7A7nnkvlJs5Vg
juih8LHiFMjV9QQ9VcMoYcxMBCkIU7FvmD+xjEWjcrJzwp9sxsFZnUt52LmxnozVmU0POrcQpKYT
TCgI2arcofLps+Jz1Uq396OwtDIVmLRZ4lsM3Nf7lLE72o4G7egdpAs5QrvTUnczklhLHjYJvUAq
LCF8M1QvWpLD21INP7XP1vgyWLolZAx1E8SNYVH/8Y+ru82PfKWrLVIu/bB07Qk+5gCSdWrChTJL
Av3BFWrWhAcA79oEN1JQ6I0TbuA07NM0Em4Q7yNCDpALQ0IpyN0ngTbeX6HC97DY+TiW5MqAVdr4
p05OcmIIRbzzAwm8DiejBzDVVCCBItY73QjG3Xm5keFF6tythtG0Stpy2Q7iqFVsETK26ncGJaxx
DqfvONvTKqnfM21SIoGJuDwuVY6X1bJWNjdj9swVysOGz+7cUuHATrtS0gs4C7mVyDiC/labDRNj
gUv0R/aTgof5BiAgcgiC1yc9jhuLzmS9U+9V1VRrFoFLEElK/VuchPu7pXyoSF5ezYAiQhH8R6Km
bKPDotcbnJpb+f3VSQ8K9jikzMfkmDjv1/J7mrPO9sZkVLwao3BRRsHaBLyzYm7poE1WSfS3ljGa
rIMtyJscZSWfDIn5MKeV6fI8rEbxUVs5mKAtJx4GyPlinD8DMN3pbgM1R91hJHIz+3P+kE4h+jSX
DHQoKXuIHhNX3Uil82vT7fmjXL1HN9ZLDBuQksonTqHZYdsOPQQQohBw/tDt/vmjFG4oScf4fAao
lOS/4ppNttNX89UXj+LEkULhY1/l1STwbEpiQfIKukQgwetHyesDa735iWu9+aS1ZtsqGDAGDOsP
2PKZVFpxNHsyrIIpHCr4Z66GhG7YLddjbpnPMewRb+yBknTEpSQ8J8jembz11bSNrNqGFsyOAPDs
ATfNS85otoE7Xx8KHmVVhFQh57bjtIJyVrEgRyccna+TIFM/sK7wGSkBM0b01IiYliMvQr9R/Ha3
ASJ/OsFwMPW5gwrOSbceDkJ0uN6fzRWjcQBvJpFiyhbdZAW3uQBZVBdf9n1NFIzgqyxWJaSqLUWh
z0xj0WDSTGEBpSMkMTTBYVkiL+tBHIwKjcKuMKI98FCYZZKjlm+qc4AMCU5FYX/qzQZZM7lFl3EO
YuICp5MVjgeXutvnrBAYVhuDTwy54UGkrfVbg3ZoK2BZOPSRDAPYxaq8KIsJggvxp9BiLmNA3Fvi
zzxEffGq/jA5wHGb/IIASDwXDOcOtDaxr6mrO3HifmciUMJAncH2UaJ1ZOhH6EAWqe74y3Ga5kTh
C3VvXKifaKgzB4y7FP12I8pvNIjj94zCBq2AGS+o7F8IpWZIkTmNJi7/aWCJ8+76cCN/m6oo724G
f6vOaRQgcm8lIQcTYzRhtu9MhhjdvPHUqUeWwGDPAtaW3rTtD6yzlLj1AnePpl3fUzwar9e/l5HH
B3QMiwNXu+VZhckqUT3KCeFHXdfWUZR+7H1VrUdM327qGu3/RtbELJaUWdnW6G6DuUzWwyJBdnda
RGZ31CIYJcarc1gCtKBSxej1ICqZSsvuhLt6J/GguhMN1nczKnvDZKDGRvM6cjvQT6EP+DX7fweR
laRkKsXjMax7ThB68uD0pouBL6KuoL2uS+TjrQLWk6vVOIAMTrpBMYuQi8MLHSnBKDjVn3cyAxyZ
IkREYx8rVHwJ4V2seTRHNF5WuHUHoYFxtYxNslip35W6pvx89bF+T/ZJhZgnobkaTHyYu4fd8g0M
QgBCGesDWRDAsOaN4x6GbmS6rHlMSbNC2jtZ9fBnvHGeib1pu2QNvHSd0WK94S3QJQL88UX5oJs3
od1jFPr1fr0f2ywLvQHnpvnVL+noKQsA6AM92RIZZ7Dz2NjRr35ZnM3ZIlSiSaEmJQA0y1qgexPw
KHDNd7MtX7PzsE54VlcNO3jWm/dIn8wBTxKNwo385hftfQWJkM83VXXWzFog+da9umYySTIzDCnu
ckm8KzMnoWEZrDDZm0q3Fo6k1ZHmLryFGDTIvBh1UgpccSQyA2iJlBRLu6hFwFUBnT1D7uMQoS91
oVTJNZx5Xgv9/lXVQr+rCfrzF2+evXrx5FvchCPk3I64Yb4kUcNNGSrkVJKss5tNpSVMhsn2SJMZ
+uAt6uB0y/j23OO85vsW2n1TXW+fv7RxRKLwHCnj9CjyrrYpNLgA2xFwAzDNPqvPF/uB46UelrmF
m7g0Li5ZfcOS4dL2ePuwBhyUL5i5vyQG+aUpdX9sNI+wRFNV7+1XnhLH0FOf48CaOHUm47ACGHiE
Y8bJMssC292VCq0+N0XsquRSIjSSE8GQhWp3n/GbcMayvn5EKR+EhJC24M+5ADMCIYkVss78cHth
pbam+cEZ3YbeaOq7TGWML4jdh/EGbpV62TZX15G7r8PdaxUhPEpFDNY5NCtkQH9vym8xgrJQQQaK
5FuUiSMXnYSjGcyOC42akkkX0R7npCWXZ+IMTwOMMwBwhnlEbOzzZvUjssyT863z2ThfRdORmCS6
gEn6g6yDHrdC/qb416yXiGZWLYsW5kKAtk/mnz+0STg/ddLSUwuEtYGWphtBdfVDMjB5lNz5sdDJ
FxXRU05+dluYdbjMrIRZhhVgZkm3ErgtGIQdmIzcKvLMoTOyqmnVaKrkyEyasjDJFYVq4oGnG2tG
LFdPJlJTdNC0TovrAu5Pt7qeTCXWw/FPOlzOXV0hVHs9eLC5c6lym465QsEJHl0F7esTR9sWrUmk
eOFmtTug8pik9G0mkYWEAI6yU0bm4DZT9tCr1iqu8KdGb9pUS8rfsVs51HuXOLtKVBlE6dE2Y0rv
d2//k4l86wKXAo2Ms9wAK7r9MHn7v/97l+DbRK5d78M4r742JhhRq5nfMrIIU0t1dlmnFkdWRomo
sm20xx2OxTvZ6mW20mF/sDsUL1OzB6G4FvAYmV5R5dHoEeKTY2NkANdkFP45aONvOOvJGfquov0r
u6gGjYYGXSInplyp8BXjk55VmhKFOiu2u/WiKnN5bLm9JGwLh65lYf8olKi0A11YwdqWtToZsx6/
QZ38ri/1s07FzzQz0Q1tZA9f98uTs039vlo5T11KHQzMwd0H17PH+Xg4cCRkQN7rm/ID6zAPSkPP
DTn5NUX0i+P73SY/llrZnnNDKHXnd7mbQkrPmdHOe1W1dpV0KV5iKIDdAoWDmom+5jwb+nDGLHLP
B9LjnXuOGShawjMlvb5oSzVxOCqfHzUyEzzyWYW5L1ZoT6lk92Fv9Rcvn714c2ATsmO7w9Z30wmw
irA3RT2d7jaaDNVgTIoJwOHA8Fxm2pnWY5dyDlM+VKRm634JrN/jbnJ2eVQHD4Ppve+MsYZ4E+0W
szFeDZnNy6+zabX7pQ8zNaug/SVTSi5BaXScrDItxZ6ucRXoak8oT8imMC36XwwLleQl6JuNnNgO
SC6SptvSEiVDLX5FVrmtUq4/Tj5OVMTVcrOtqiu30HrD+ReTxm5Ex69vS2v1Ynb71j6cvf33ksoL
bvIP07f/x78LbmW5Ol9vZ2FgfpSRoDoDHSnWR4vqY7WQDHjOqR/mPV+gdIjSLgN0a6S1YWQDKzfN
koR5lF2Ok6j4oOa3kwNpeE8JhSrjsJXF0T8IgnxIioZeJ9AljRIqhc5sip3oa05wxtkvg/hyJPmG
hT4u/BmQ1WZxWdiF8G/LTgfqoOvUFlWMH2ZBZgEKAL+uph+qt6/7nFkAXxX4bn4uwUQajv8QJA8Y
itRe/GKQxCvXeyIeNHK/SzPA23CnOPpL/QdtPYetnKOklAPc/CUbj6Kq0xL1cwEUYFriGzTRdaHw
E5y2BtAnJ4+DNhBK4aByvP3i43xSTDhy55YTd8YhFTBlX7XhnhkUBxQa/nBuYCihIWrmlGGIIOLo
sRhgw4DO0PR6Vkkw88XenDMJEEMpininXfuapJ4gg64bmvRZdV6TWJOVpixe4Oz2krm+Yotik4Ul
CNTObkRjKhOHnQizEqvdrG469C7WlDy3IXWkVKcAL+4LmtsXPlV82GdJCMM5uxH3lB0gmc033uoy
HFzfp1geIAKbn+/DRYHilGar0SXjVdIxl8nKtMTyp4Vg0MA26dtRs90v3LrT+YNVkreyRpwbqmRk
RjvpUiotdw1Fw3epnCoS2kyK4011fvxOgPpL/ktOKI/fcSe8xwIKmJSyEA+BMxgiawkpERbHctyt
pPtjGLgkIzgu3tR4OLIQNAzj3x6v98c4aBgSJzLwS1SW5aD4MnJ1+D4q9fidp2GkV1wm0vRI6iw6
j7l+0GUdO3ENtHcGRbEnKvlStgSJM7jbYc/ILYEyfM1cNgOlxrBjui6P38muxb2wOzwsv4I8AC6q
fuZwfe4lpfyRmYBXActCselSvllfDDrYajZhOHabOVzPtK8cfYP2NLNh5aEB4Are1DstHeIo7VTG
oKnU0LSF75rJyoA4qcgUsqsQKyCukhB+kw3JIxHiJ6s9kzToK8oFGSXjJN+9k5FJbnNHdxQU232F
e6gDdCnd3r3jSWnNQlLCYB3dcE6ABb0E2EJruk4PY0rywQocyPDCVv9UvKPZJVkRIeW5CVERG8zT
Wklmjo4QUzaVGzn4iFp+65YVp8Q3GIXgFfdG7GRR1+ssniW64AY0i1fmWFD7WANXNOKeRGqysTd3
D9GhHzfvBDYlmEcaLLRBghQY+SXAEWd3P8/BcTtSbskrYnaAhPdZbGbiU/GN6G1vOMesM1YRBJ4b
uglvlQeTjG9RMka5gZiUY2t0Msi7ngNAlmmbssu4werPfXvQk8oF1qY6fV0ijTErOR7gLATgY3rN
Qs9flpY0wyHw/tmoSd9RZi1lAeW8uRHBJm3R94IjnMO3C0D4KxeEIDh0rtaNlI0rOWaCRV2CJPCA
njcbPiAD6H7ZJgKeMgkMtrqc7Av2sSgoSu8Rp1BSIpM9r+arI8pIU6anzY0wjI7wk0ZDx4pmOkco
jzvjIOAukAP74iQnSIhQXEyOAEBGUViumcPMGPLpytleqiON7ocnvRwRDdVgd+T0UPoQRlAw0tkc
zXJryt6x3sxx42uareZop0jVyqPFWbyyk7olRIy1672sAvqLrrbteGQzIbEGzH3ihr2PZs8zy4Gq
7+624yN/q5aheQ8j7fgFuk7BWFB4Fh0gMq0sngAxgIwZuUyxcqyaUYAHwtghN/HuHXcJNzxKJWQA
pXC3i/oCExHKDRmuQGYmHGpEfevtnebecYrSxrWTu5UCj3eOBeFbuqqKPyKB7wooPU7uWkXLmZN0
BPwnOy5F2gdHNquaygyryV8bbjhN4Stgyr331f4KMEbaLhnCKjjIWHPLqPj03Tv3tXQ+bO/eSRoB
MbMR7/dX1FwAqZnu/g2uJFbH8Oo4Zm1TLchK6ue9pNZ7nS1OnaUPN5y4SfEdFTMgcuDQRbjwSjy2
DVBUk+mlD75Pi8DjCBqocriB23SnGNqlUDHz82Jf7yh8i7I3U+AotpVtnU8to2Ey3prVJGTjlJZ0
5E3pBOHmFu4mtBbWIQdmt4aaOwd4lAySg3PNeQWX983BxKtEZanKBOH+8GiGui3lgYETbrhx2AiU
Y8r6ws8R5iFmmvIaLvZuD52/bti5a+qmXi+qFdm7jJlBWFbbCdY13WqJoq95/QYItLCMwLKR1Igy
cqFoLhrSzyCipMHA9axU8M93fKP7gK8WfBrSxWYWyHCfQ3LpSWPVcJ7qso0TGCtBqh1U19to8yOm
TCtm4tkE1K0jDRlXuJsBRc3TxW7GSdg13kOlKlkmmSx1xFja3cVW8oTtHfumOXU9JU01I/WjQmpO
aUjbjE76WPya4C6E4zWjLO3VEX41ffQVF1Ky9XmT0yPOeakww/gSyCeg/FbIEjGaVPmhkWdQNvRc
Q0aAFW6bLrmKp44FkZzV9aKarI6ZriV9GZyKDUbYEFo1EA00osEjoSFrABJEGEPKTec6Br4+J6tB
q+mpjcmPsNUAWYsxvnDZKYF7fe68EiYFEqKL6hCNEwBjP4O1PG37Ll5CQndU5d279pZ9qaRhwDvs
HFT5WCrv3mHZQw3qzrUfuIARyg773bufDr4Kux4wcoDnK6D6WJtMYViEaHQr50FYCTe2J9PZo8ap
hItLDjoTttWKhKjOaD57tpqab3OddYGRrDyJQFcVgcOR3gZNXr7iJV7vKyE8eUsoq12GKjJCXZV7
ctC2EuOxM7Gp8tyO2v55DGiqS+3nCCqFOqC1N4+5OJ+vzut3rWfTz+ETTmcbZ6CCJLlUc4heUu7Q
HcgeqA7dezl2sb6ciEcvHRM+1DSxn0OHKIPliIj/NsR1Ls6kWUjBPCrcIXnvwKvcUhyRDSBpA0da
bvCyVtyIDrDCxAlf/pdeW8MFE6U6d9o5MUWTVLY/n+aWuiF7K0wZM9l49V29hqulOkcVCmaDTtSM
1fV6MVmxJJz5eKw/b/D2A2oabVcIMnkiUHojuyo4VnK9O/4HEKeQ66ZlGIaLYhV4e6hwmp0rmpJZ
HfnluCV0TGBGeVVdsZnxGSYVX/kX1NC9+eoeXoxU3dWuGiCkSN77oqbLiJOsURMu7zVLsLEKtUQX
/oxVhM1ifrG9XOyHLMqjMNu4WvPGoTDTBPNhMIjdcjnZ7A1y/blgbr46X+wq4Epcxj986gcmC+pM
zkZEk8XgZwNFHsEYNQ8uvNWQyRf0w81cHLJdvHCzeQNQs2d9EzdCFk8iP+HR+2kmolTpnpgmwgkp
Ag/CsB4he3BRb/ZkrbXZLqotaZyBNv1Ybc4wSyRKZjT5mum1rcObrhidxFggpK8vuKVA44OqW9S0
SrDXRiR985VfCmnFDC69y+E2u5pskGwEPrJpJhcUimpWMceExOd5kyE//U0ntY14EM4iMNra2ISa
U/ddViZK25z+wzVf9M8AFeCRZ04EBeew7c7gj9iGynUoFpbbOROUJIbczheTDbV+3w3652BpZ/WU
7o+f95KUXjRc4Vji+Gs8/xRuZVfYKoQSBbAghMkoac4AaEsH/wZyPTL2LCjDEl0hpO0TXfSsOtsZ
EfLPJ+AjVeNYTUmqmdjeoMomMm9CkK2uQtOv4gIjaLjKGZG0TIjm2gc+HS9hpEmmiLXyYmC3CFSp
SRt9X+3PaqAlufXNbr3tH2xQyxeufNqmEyFwo8BtEOfJ9LxG4rXCE/S13cyJF8bQabAysByPyl8O
tGc0sOWjvvICCji5DRv0zeSEIw+0runSJCusMznoMgqy6GAgcWKHQHdEDIx0iW4B+c6EwJ5sFWWQ
4YSus6uwmL+vii56gJfOzL2bZ4Dw8t+M17OzfrrouzUqK2dnJVD/7OLUH2D9D+dv/8oaHFaLNd+A
Hy7e/jd/xSaHYhlLt9qwwCKKRxv0Dp5MOcAn67YnLEKkwXm/D/eEveivuhmyIerNtlUXm3pHQTfY
luei2tKbfo9OJffdG/iixgCpd3QkE8B0D3QdjbpA7gFi2QIKiYJy4OxGXbnT9WQt5mduEQgnqOWt
s2suu7ZzM4vuEeZV7x4dYcPd/ABQtbIddblEZjToHhQsO4vU7ILTWNrGwHEwtGd2GHK9avzhoRqc
kD9cErwMpbkfJ5tRF4Xe3eQzD5SsQY7IxlIUCoySWNx7RPaWbEuQ98RA06+PNdzAxlDEGbSiOJTM
eVb18buCo6sMOVRRS3NYUu6Sd+EGBdBBp0FAaFjoiyN9E7fdsoO0dhwWs2V1qFmnjxZxHZm5sB0J
GuiyKrY7GLYNluD91oPCDaYatxwiVfdYSM82NWHBHRGrZH2hbyXQaz1c4k7n71xA6s17sl++2iDE
bdpNS+WI+zSi+3m1YPToYnJqVkp0xWUKQBw25ucayFYM3Wg8x3EkUYkuWU7OKPuERiR3g7ch7rA4
Ro9gpVrvqpfEzB1TPYlDce5rSuCnruCKRuZ7dBdDqO/pTwqqXdYHHWH82OnVbIR/Kd4qPmD4qCiS
rKzv2MUVGZObuf0dJlcuk6gtkbfDYBDWMLE1h27K9UYCnyarEVpH6/2CpLssyCBIe8NehbpU+IdC
1vj9CACNgsnRKkgw1RDAtpv9YgI3ym3MrJKA0ZhJzu1kL4mhbLY5jRiYmQiy6jdPIxdO2nQUOgnI
d8IcJQaJwuWlDjky6qBzS0PE5JjI/h8IadV3gMY5Ew9NnsifucK7uysR4PmuVK4+twj9LEQngfOJ
p6aEjvxbCuPrJFS3iwyFX9vDJLaVyU6SKnFEt8SL40HHOUeHy4xo9fi2IbfhmsfyyWyyEbf7mUF0
1BvfNiMJnq9MCBVmxH/PcDSQ7z4rGwejZ0oL5iFPDMxjanrgKi0oalHm1x342VTrftHFoK9dweSE
MwGG8cxrTCqt1z1hQDgl+2E1zZyTSxxpByLk2dXgTKv5v2zra/pLsu1yes49HXfjkXVsykJOlBNM
Fw1dybHCbxpOGMjc/bqyJFKmHnp1SMZPA4dYMZ8ejL6MgLYnGYSfG4osJFpW/24zIP9rNjXAGmHk
A4rKVdw9evRLCabVx9pMpJvoqzJ//OfkGH6hr+jVfLa91DDgboWKv23ZR9jGaLsAp6GXuPdRQYwM
h5KGeazRI4FLlvc31he77LQB/RC30EdYX6ERN2chRTTrBT6EtuoNGrGRefhug+YxaAYXA5K6IWrV
gXRE6meV66zy991YChiIceOTT8ca50x+DwfIvc6MgyWfYPRmF1pwrOA/ktjX/V4gHmiQUtXgQ0Jl
E9ziEiD13IM7/dRFlmrFmDzoKA5GgG/zkxYhBfBJWyzXOYB0gzAFXTqgFL7OCClcGL9uGIsGm9cA
fofwNdxXJtWviktstl8KqMAfBm2pL+HAHYnjN28Xdl+uNzXq/8Z8AumVrCMpwAdpRB83V4AsiknJ
YrFOHMznRiF0fnfyFC9BePDasDVtm7FrCDodlQpPwOoe0fRvvKJDmnNgkl58RGvbT7qspUYmPJIm
K5MS0kuyBreY7ER0mCmoaZaJPKjjxj/id5oFOJP81gNpPgNFmL7ZAmcutg/e1dyjFrtNmCNJd6Vw
fhAy6dJ44HJV8CQ0W1EIpZ0Pl2//ZyMjGgMbgDo3TPP9Yf72//yfPvus0+122ZvybNJcHslXVdCp
M5wLQ6G1KWgWaltRRNVhlb/5/Hj0oPwv5a/UvRWN5r4oH93/ovyi6NcL1LM4RguVfh1AfedzSmOD
d9HFfOqiczZ1MX7y6punL7/7/ttnb55RvoDi42RDorcKWkGaZEiae7IRdxEJcUx8L5DDxwydmewE
XEEJxqjjfFT+l6I/WcCzeJEV08vJ6qJSBdxk20H/yyV67Q2gza/ViBQYGru+KgUzZj0i0GPTbJJO
olK6Q32wz2rxRKw5xPON2hhTdE7s680l+2U12gktA6UQQiPMFdJ5KPa8oh4lUDQ5ntaN6HU63mcy
cEUeUkgpdCTAYWBYki6ME4HrflfsX+jFl2+e/PZxt4N7j35jNatQlLrSOkWX/eY8DTqZzcbadV+c
08bmnkebTGSde/d6N2VtGJRu+qNwNTqdl6j6BXBYrxcyMTdNihbM5Zyt1UWtVCo21JnVTpduhiu2
jeyTxvYtokA0/rBN4UfVcc5tVOuqWizKov/8PDxAjaQ3lUPEGl9nkozui95Hbg0D4/SIpGjGimLq
g2Dx+vtnz7764fvOiP8jMFEbSdulhAlHOI4Oe6d/v9pO7+PbsX9bzu6LhMO0UjaXxaADXTfeIBZu
2ovNZInj87EjSA27W2uvakFK6rz1fF12/gAgiwJ7WEUScxcG+TT1siK4hsJ0/pwTiDS3rcV4ESX8
dwrOHmIxxfjl33cajsYurtfRRI6ml9X0/VE1afZHsgtH0rjiho7Za29p5jAVwMZmz3ujB4FUkyjj
RfqY9IukJegQIT3jyFgtg+08f/H6zZNvv73/1bPf/vDNN89ffKMb6v/rvHHTltUQ5EX5vMjuIYB9
6h69dpi7Wu/9NnR4rnDfHZHql7z8XCCythVFP44J2yFs67XOmnzSBBbMokDLOlhzypyhmUVuNnZG
cEpYXh/h1mGEBbHyUHBoWJKt2cT6x3ZIV4Z4EbLD/owElxF6lXj+Hn96n/NegCSAtHhOVqhoSyLm
7RwUFy1hisnVZI8rjeFHppeIloO7yCkoCT76FHIGGUHg9ji2DGMmNnbpFAaTycpV15y37smrp2OC
n9FD6G2zEzHRJ0H/X/dZmQaA5GVa1fSyLv76N52CMRD9fKAadL6yiat1LmIUp3BYPJRIMTh3ONMY
wKHB3HQ4u8adJlI/4KqwlrJg8pY6thA4ehjMUEfYKUT9J1gdXcCvADv9fbXnCCBFj1p4/uJZr5DJ
ochLk+FhTB7AM+wxzhEcMngTMC2p7dLoaHXD0dGwqkIsPmuclq8nzfZrhKunum08tR5+kDPhII6q
9NrinRkHtjgrAkVbsQ5uI1vaBlzBg+JaVHVMJisnTNcqRGtMgLVSd16PrBsbKBd2WyX2TbUm1o86
OHl4fHpc3ImSkOAX4vJHFLVMawoVIYH4Mfi7afNQeK6gxQc2/YBeLEHISNykMwrwdxJIfoAI0dzj
3CKH/v5N9DZhFWSqRw9PMZSbGfOxpPxAyhEGsVvhinsztVVNF3JC+Mj4lJPCn7ImsCS98l4v4oj1
26iwZFR7K4NBmq6zIZlvXyqlYU91VvMGg85fZ3ggHsD9XhTJjjkYIuzXa6WzZMR90pXjGgxpiekn
7hpcVzHT76gTYYmuT/yuH59m0hk6IqfDw9fUZpR2zKIXZejuBKefORkh18hicTtHmRehas80lL86
5lC49/HDfZa5dky0dau3OTl+dFp8KYGqjgPxPFqY9h+KuMyGPhLEYkkxfw10bop95Jo+kraji26U
wVI2VncLdxNkhXXjmuzQPSEsaUJkHWrPJ6ZIBkh2Wx/++PZ/MBztFO71D+/ffvU12TuIGVLxPbHR
32nIG4AruJ7mQWAjzprK2i5G7B6Zpxgebki0pDZmEXckiWtgHkFmY6QGn8430x2ajvF3QL9i1BrK
XjSEd9kdEEg8BkzUe4gSwl/2TodFvytX80Q91gHwlsciBEUxM7q31HUBPDVKypYYXBIO8m4N5DhQ
Xb31vkfysFTis95/IRG50jhsD1xU8DeTizcon9ncMuj7eDu5eIRGY5hS8k/hN1Z+xQH3JG31jAV5
D4LQzfl4YOotrkN7vTuTgiwLsTmqRPMhxhdSDMbYBKGG0iDgFOgS0PiQAj5FOI5CuGo0TCiUy2BC
347tx/Sy0nZgoToZmWaSsiPS4bpFI2nlPbuSnVCQ5kTR7mK/2wDsnNxtTllLzbWG2vsQ2HfpHNfK
9HnayQjQOIwMxs3GCamk7fiAEAs6RzEWybBYQ+yGYJpM0y1FIebFOvF2u2th0MTS5DghSQBDXTeq
FUGR6SFNXioq3l6P149jhx/IphcemxNs+zTbgyB3R9OGFz8iTR9632maeUn4R3xW3Xnkh6C6rGy9
CRbVvW09N1wuDcWP7zFBI8arpUKK+I67B8OtCtJMeqCGBp1Di4iCWB1vgs4AZ7SGFEZlPc84XjH8
gpJa+BN+0OlNJJ5uhsyOAxW7BssMFJcxAMi2LPfxxuS2xDed7GQZb+St0a3EcpSAuX3Xx9CswOcG
A5OGBBlkdkcZiy1Rf7poECDpxzhyMoZdabbzrXrxOVkYNjorulILX7vExF1U+tCguBVovivCaitL
g0JYm31BVdaKz2gbQ6btzl/ZOppMkFSf1b4lb2UaSIQ2FYV1xbYJzuDVTiLHUyfAk/IyFJMLjNvE
Z82ugon+J7nfge1EQaoX10pMLG6ICUudFVk2YhYptrCeN+wFSI71WILssDfoir5x/sAqhTmwCsrX
cfrhYLxuA9gAbDGjRfWaPNpmLz1AYKCBoPsZpzPPcpwXlKfVdpQUjgGTC8/YOxTqD4vFZHk2mxzr
qJIWhD5o4qEO/RBDZYq0l6kgXQwY3Mk6mCCdArRHKz8slo0qBqe1TNN8Ly/m46lGS+Ewpa92KxSY
SJDSeCdR3bnBW/TurEgVf6r/m9ZAJIsOlJ81HLH/vWm2GgEaxyjTCVY2mUx0dn8P3z1M1fb4yXEU
yaPzC3d+ot5okBxTSRAksTEr/igyVpI8Aft4cYFyEgO/MlLXZUcDgLm08E2BnpsvxVBRDr9pW3rz
Y9yi1I2GyMe4Fre+os9b83pbr59vxRh04JUn6qWlQ1Jk43ij4GQF5AAaysTrPCB2lF2+7DoRLRCM
4TjMH3IADLuzOce4pQZFm0qXlTfkNKvVp51OhxuDb4OknW1mcPsRsYgaXs2CQR2ep+MUldM3fYfm
pnr/m1l5yFWgoACMDmjZU6Gas0jd+gFwJBCSHQrC5T19agDy3Tvf/7t3ivwJfmWw4p+L6Jq8vWiJ
3I3he3PuDdUqvBTRHcE6r8p6ifre8VgZAsec3HaS1DnrEBJOyNDfTpqqJXY/VfcjYcHDlNjLfsia
TSvdIqG86EdCyehI+KHT0g/NOEj8Y9rOsAJaM2vgFaSXDLvN8XHKKEibiVhw/0VbaqPq+uThaYkY
0mev6cPLR6eDtlQ8YwGW/r3q2oN2IPIIo4JnQECjUzeSxVQznopJSzfh640pRYa1H5uMoKfRJ7WL
OlCEPz3CQRFEb5pcH2qtlWmB7fNHnr7vD0i617UGId1BtltnCZUbWXO525KXffqNXIZGxe/gz6tq
Mdn33ZrigT45hUVdjqyIfSQy14DRcgbXhzi1rBGxFDU5XDeVjyaKw3rqb2MXApF760T8FV4GLv0l
DaTLBDtaN8bLph+cI59NKIUegEgucCviij4sEkKPEQ/vmltOjBSw59mmBXn+IvT4PE6Uwn5R1BKT
NjG1SI7W9XXgo2COKHw6jBF8J10mBLrC+XB33aOjx/AGmxEsdsMMjnAGrgxxGaMcv6QbidekLG60
IVhJ4bSPDRkIC+yVNTmzMVCTrOKAdK7QpU+l23Pgzfc32D4HUKwGgUQrEg6TzuSnlRO3g5y2orUS
0IsLoOWXPJoM0FJIOWQx28J98tiO0oqzP43z4uHfmasiMCRDvMJ7Tmb8qOg5epi7PTxdx+wT/QHU
kDF2ZFaqR5KT/nzmzB0HyVDmTca7NOLY8znk+IIo0PV7MtsbE04UwpHHCtuT3WB9Y23mhunymCHf
8bir78KrI0IZBo1Yhu7CcJCtQCELb5cGa7bmx4GP4UohyKMrOJHmmofAG40HtxAa6vYdgoILAXh/
uThSo9S22+yEX6FIyrxtv2HZFN1ZEtqZKqzmhIjRTavS1niZUwJGjKjlWLRkchnf0OwNWWZcdZeT
Ptn/OxxuCPg/jKFTNHiPkejGiz0atTgh52mG6C0WRlf9QDTtFxp5M7Ps4fj8h5K2VrpJXTT8VYc4
ZrfKYpkbqWq3CqyrSffmkJA3v8stLWXE8x6Wc5axqaaXK44UDNONnVWLNuBtOU5tJ4QSJeam8Gdv
o082bK/JLCMUknwK58z9umaqFcJl2JJpgxNEhi1FXkwo7RvFneH8B0m5/qCVzr41cW23erqoJoFW
N7hOWu7LzIXojMMl+cAB3OsPT/TOm12HU0OL4ehTNMtgTzmVCw+dHWhu4mUYkfuqtlJCoruZNi3J
jMPBf37TiDEawPx8OW8aT/RH17fNrAUUkl3o6IgGnvntyed5Ne+S2kt69kmN3Mh8V4fUABjUsJ+D
gdyatc9PkqG1AkygIm5Lo34T6jmITNvaYL5RzQvQC8QQex0xD3ny/XO2ogWCHkg25of4m+e4q9VH
h3ERC2+ideCM4ZE9ihTL0DhY/KZz5pOC94a95AqzB1992XGUMWTjzMiTzR1AnUr3+z+8efb6zfj7
b3/45vmL191B5oKSvFwKAetqmuvWOxONyWCUDZzjkSQ7T2Z26/cXKNuh1HeNi+iwpcmgWQm3NSy+
mjdsPz2vVy/q7ddokHj7PGG8ZneKt2/fwro3u8qHJgpWio3cku7V6+vhw9hNRNiBal2K7WJIiMG7
IFNdBo1lGIuTvzk+jZuSHvw2WiGOcaRMPqY9obB2vtpVN+e2dDIi6B292fLZGXNbc8tes7Ibvaz7
0Kt1PRsMcpJNodyCmy7Chg5QXYoqPsaRGAMBoF5vHw7hn0e4lv8EGJfVTWQB8vD4NGNBBxWQrOoe
rbst1LLvnsYIbfWxh+wAXQk3whDZwosAoFb1cTcPklDy5IsIjtx+5pB+IuiUi/tQqlJqx9DPOT6i
nSM6eniYj4lGiYvRLs/J4CuskFlkvc+NDAV/Sga5lEByswtLCqyFL52j2LCd9XYiisjmNwUYCSsf
jTAzJSkogrraWnoi/204cfg4VJftsQ860w/FE1g0y1Zas6ot2jH1F3RC2YYjg9akey49SEIA6NWU
dni7Oyj6qPOPbufU8kTKsHnLAZJY27rptj6Q4LJGk2QeZ9RohrihaFzPNRhXRgx06zsP5iL93PYG
slRUuK7SEN5LgxuHFOlTEq3RSWRk1075Csw5n00qMSx6SAr3KCVsBJIoh/LUsg/UNWjR2GTwlCom
vKc0tLR2vtFAeauvpoehqiRF+yGpTIhI6BAmUNB6/vFAe5DHA2eEavhoRXBNhs9TjiaLPGM29Cb2
x1nVhQwqxn8J34jUO2JkzwOeUN2Ab6ZFUAqQ8S7X6WcpkzAz7C1pEuMljH0CvJnwSTk3YZ2nggzW
ikVpaaMwSbIEyLWIa/nJrWGlbGNGSpdrLCflW+R7X6jbAfaVea0rkdp6mjsLFfsBvnZxFaGvQN+T
MKzfYfwyVKD0RSZEOrnSnQjfgp4FldYO0vuYOxnxn2FholryxVxKEPq+GsAFOFxMwfE2OrZ2Vf59
amTBV0HCj7rvJg8yHfkYMfuMyyfw9/RGNwHTnhnuoWZ9sVPVRbs190YWGp1fsvMCh1QXmklEnShc
kPj7slXlgTTTiXIx3I5YlcP5x0WGLGJ9+RnJ2Lg1KMMPuY8n3fGYBs8Wnt1TQYQ5u4WcTC6Mhmp+
2blSgO3Uyp7C/lLIi5kYRMDM77J9Ice+QEcqO4DBsHCvdMrJjna/dJtGsZ8E0u9uHlNkGup1aNfA
ikQF6KOh4tq4xTQ1c7JSGVdEBrHVzCgokpGUShcnvsMT+Ic5BglioKyuiFca2fvB6SE0TmSvQeQH
CKDAVI7rstnhYKiWIbRGESlEJuZB8Xg8WORmdkp2WjEw/BwkBZVQNeB2nGU4fMjqrEiytX5qM9MI
FnSrjidc4nOjGvza0h02TLc63tO2EZ8gcYcVRejpH4rBbc32gBiLSIxrn6MJV6NpdbHQkMwAu4pF
ugpfkjIQqSdNufI1tKfIArVNXZxUVxMeeQcC8W6eFbvVAoNXYxAGTs5NUlYNWLtbnVGSXGcGhj7g
hPO+UHdUNIBb1erOzbEmwmqypWrqNZ1MLytD3PDSwuzQYQVmNyz++U8Zoz1nKge1AY3pznSDmyGV
mDpTOw77gb5Q5byhZe3fZE4mCg+xGubF73wC1eWllwYJCgAVagcS0iTKebqRKtzwYMk0NCwghzCa
ixl+uM49te7viRYp4Af98LLUczB8uAn7+WEIht1MriioAMWAozgmMBR52WaaiQ1LETTw1V0+8T0f
m8/khrdbbQMYaNsRbFq2IlxzCQHS+K3WN2a7YXX0bbgm2Oz1yfER3lRaQrBzMLMUcLGiHfib/bpq
g14BpWtnNeest262lEN7roPKpVTcYeoiwTFIdZZsLwZ3l/uR2MYtceeXeXu39dLahpHmAhuK7WbE
yXYko08UrCpRcYPI6Z2dwkxL6WIYSmAcKeSSooY6mM3I8ZoINb+GjFOSc8GYRpsb5NTcis3pkjdl
8/5oGUl7MkTXR0CuOamYXEA98xmwAROeSRuXU7EVZFsuoxkL6VbzPLwpDo4SNs0oImuCK1YWbzDI
kA5WKMBjuZym5XSrAi2uM9ogn+eCwsNROFi0zCAgtP4f5nxQazcZK6mFOgWHwBbvCqQWJE/J2ilp
IAMHdFYnOcawuOnSC89nA8iysUFshxROxTMSekg9Qxkyk9qBXYUssWts2KtNCVMeE6pjkrLapMxK
UMFM1FootdimkPk1GuIvm4vUM81wpodM1XJye95ItjSD7ZzPyGJf9tRJw9TSTM5tcwFci3XMiQN9
YpyBZA4GouKDbHDe4JYCHDHmvciciFARG/Al8fJkdSKaWiBPvs+bscagyeEW/YaNtCMXd/NI0Ccs
LZYZjtLx/eSpfoKHLgVtAcLz/QqNnrGdYzxuuam1rqWwXDl26zg3dMqbx2E1yDRDsdqhcToO4S4L
dF0k0h9X3Xa0iYEEd5QP5JyCAdQrcUA+VMXFOMXBkaMwCnMPIGcoMxTHc+hIp34Qm2OYXfYXDlYg
wtnsroQlAvqFb5SDBAwZVmeum6G7Qpw4ZRRYgoWm2WK/SM9hEaOJvq2oI7RhlmEgKRQLV/IlVX7o
1irW3AjPJpDlOijuFM0E1gfDilXT920CIk2i5lY1xciBFCoVPEaXfWn2wdiNx4t0CEzcdR8shN85
d0XcKEVyQh8/yEJFPW5s1sTb2tTl4j5EF6I35g+uRtf2oDUaUcYnNLDUMe4FZexdEAqMKRhBLCr8
Cc0XnxftHd3W3aFNGO2El+3bbqnDGFpCgbN1BIpv31ivRSm3+EJGjAr03UdXw+TzyVwpx5aT4nM/
SrTDKJD5sRMRFDmlIrKKsABwzsXp1CNOw4srvGLYCpJxdix37nyRhyY2BnMO3Bqu8Br9jvU13rPy
TlhtiivZ6XxYvP0Pn332GYZLEXerD8u3+//42WcmJEzHMaQbjD/aEG2sjx3hQl9MlpYLZb8ELgLA
8gFT6FVhpK30c3H0WCuRfRH5pdYY8pQsc50ttFbw1JO6ryfVbYedjNhTmyJST0eURrp1Z4ZLl7Zs
P6H4s43DFiBjlmmbaEjHxZu7nhztZZEGxZID+2FOP2k3De7vRqXz94vLi8VHJi3m/OdC56bwZoW2
cqYY8Jrw4IecNhcjJbLvFMvkPwySLtCSLMHYEaYKK5HjcKbCXPGx6zcmTufF4+JBhipj96TkNeb4
HblZnsxP28TRfprzNpE5NpbZ+MDXN14bWLOL7eX4EjjOG5fITNqfWMziOcJ/D5xT+NonQDgLTHtc
gDT9lvAi163qgZz9Mws+zNAofgL8e2honAD704eGx7B9eKEMJnB4cMPjsHC4+/TQMkhBmuPpEpOS
4L93XLxcDEeMbyYN5pCjRJdwtA2RwYHndA5DLCwOW1BenuSojh6Y6UPD0mFgNgzThnrt2hP0poFa
rawZ+Xlw0/3rYbEfHFSXmJn3r08enEIF+Pc2ivNP6+dQ+2Re0YdJ9ytOLD8YFvrEtqr8w4LK6WF7
Olmhdv2TX3z4NxkN6VkdxNqt0XqtjS9KBIi+lDtkKCMlI48wgpO4oLwOyx6EEln2E106XMb+2Kwr
OtacZowX/MnB+OTnm/qfqhVGiRlFL1pOElk6w2dn4BzV6twhsTbFOt1dXHaqFVySGNgXTW/12Ywi
cNiHMsHvljHEdXx53+43GjHj2fUcJxf8bmmXb92gZD9neIVEKuUn8uEQXNR3jouAOj2KgXdZbSis
B3mOwbt13TQUpXFbG+MigESO0ByHcXz6vQ/+XmH6O9zV+gr6kS82QwJnpLYUlFMwBHMCsoiNJMZj
WJiem0TT63TGQEmS/qefWPANi9f7Bi5HbGJYfFct6w2r44Zh8wOzC6r8FIdYfDxwjWgRJHNSJkXJ
NfhICkXhwwcUPjMNGDjCiIHFAxeHuJoWQN2jUHhMGgeUDv8tvR+P8F+h0gSqBW026lmBMYJb/JEp
ZLD6TY7PADYlmfMILTI9S7att0gP0fCB1KsxvYzcHhxdOGPtZpQ2VO8MuZNU2ijRiVuvDKiLLh1n
fyxnFenntP/b3AOtlXXcFgemyiYadxp5jVtteD8TAhneGWZ23tC8g6CuSq35jq51eWxFWvIb65Hl
LCv1MCmU3zhqEH7T304otp8vkZzrqwo37cOqZtk4APgLrENyauO4oy1iQVSTf1KbVrWeadOpYz+h
PayTaw+PCZqE98+hHAbi1TAdi3qag14qkodK8ZLHY0szRwMGjS5r4wPT4kPZ8nzMAbCbOE2PKUFJ
5sIC6ApKbvgBiFK17LikQera07FrTeCIE+9uuofiJpL7D8oU1yV6tNvULuxFHKnx10kmwCknSVqu
cam5PbhhsWfCU0OM8IpJl+FumW9NyifFc+P+tJb94a2BLfRHWxDcWAUJcAlgaHTFd4TEGFXBKHar
uQv9FaI2/8OeG1tez46eJYUeJATksfWO+PSj74zfP+ngy3gHf/7Jni/paP/XP9htXomHTvrgJ9i9
ZNrDR2oyHTFfvAci7CFNtQS2eDnhA8sxjylG0Xu5rVFaS8RtGYiJMJw7kDFFL4iUDm97SC1xR7E4
ZO3sKMluj0qbRVghQ9n7cRU2Ca9bm+Qqtkksbc8+AXiP8oL6RkgPZmtx3lAUU0q2yHq3DXiCTN9i
Z+iVNSd4rV4PPP/NtXIOD1agNV+RrNVywRziUbPNhZaAk+2YBKexBt71mo5UVe1SM2fSsKgka2QQ
UT/+RvOLrC39cEK5gakHexJdZ2OmT+Ru8deZ/rgthBJ92QKgYx9C63I+Y6o7WDG5KqHP/KVEt+Nt
bktsYcTFWy5Mvvxae5IrMGwnulb1Bm1txDUQDwAX6VGw4LrWtyPiCfVplDGKdgkAOyy2Z4Pj2y+2
qOGUm6AWeqYWSn7DAIHAFzZxKDTt2FyswmLAxt80zoNjdF26Wh0F1pa1u0WjNDYkueFYRtUxxrzo
OKCFPZMH/XusvExMYDlZDkZ9bHZTjNWLeT/3PiWvuA46+NeoSthakik1UqcnN5cx+s86mbU6jLVG
+WvndDIeBN5r34a2m3LSwA+rt/+eNTKoL/pQv/1/Pwu0MXOUC7P6BxVRAGEo9QXgCqgw4ynmEiG4
ZNSAyQeDlHKDgsO4og65J+PpUZQeLtO5Q4u+xExDGCDAJEJqKB0U7mkPizY9Y4IA1WCLVLVQrved
D+u3/9GkOOCtRb3cpiL0+uHD2+m/UL6DV/yicEWKJ6/fULy3DQwUyGMguyrmHguJL93YHDaYf1se
odCq7hhnenLOdy+Wa31cTjbN5WSRpkvYVC5xwnaz88kSTBoFzA7bdEwWBU74FE9Ud2C3nS86JOni
7Aqkf0d1Dc56W2HKqWnDqwdP+BEWsGPUWdAOXNYYC3I7uVBbTYlh8ObJN+x7U8r3PuaZ6R7x566F
B6MfQ5jorvfr/dgi0MAMdLlGyScV6vrk0gg968VkS2H4UGP6x8nHSTet9kcSN3VzRs1SYro2RT6S
rVqM0NN5coZU+EemRyH3ocEhtkDiZPz78FRp+wX+phTgsAPf/+Hp+NnbN9gMwCf6IPXHnPgcmRqg
rLpT0vh2YSGo8Jsnz7+l0ljWjAN/UFOdzqtnv3/1/M2z8Ytnv8e8TK8zs6CMGL8YUZaUv2FTpuTm
+pIurkeDzpPXT58/Hz9/Pf7q2ddPfvj2zfjZi6cvv3r+4ptcww8w+coXqjF/onDHxwlYLTSLCGNo
wqn5/tn3Xzx4JMiQzbrYdl+OZSPHkEWDt8uXAbxdw7LVNCmG4JpcKEwXbo9uyyAeRhyZT3pwKkv6
FQ/jfLWebNl6g76XEvIPebTVnJT1AHHkpt7ExsV+CvJkQrzOV7PQz1yMIyfby7bQQtrcraPDYAWv
lOQRWG+pSkamcxrLLuGnsGDp4zvqsDUm+3mtPryRJZxaLZF9w8ZlcxkWhmpEz0djIBXmDFmTi2qs
uMIVatdJ3Cles33vhfUlKb4ovxi6mpNijJLfZg1T+h7eYcY7AteopemEUjlKaplGWQdYLDgxW9bc
oRanjPJN0LBJvYPPidUjarfpA+K6h5kwIVQfS8BhDGbOw8/kXM95+J7PWGIzq5qpYHMLdLryvCxZ
f99W2qYN3mSc57PDerHzWSJvolkgR4yjPXmU+J7hN57D938YY06q598++yprMhneb3zyx3iXsola
t0WFeL6SNUpq9M9XqXVnXkztGjpfhQlu3FUH8/iFm8frlz+8evosbeZO8RXlT0JLU85gS/ks5015
q11Ix0Zj0uRkxJ6v0c2ZDyaGByRqi1Mtwb3yOd9lRnKwAiJC4BqoO2qGKPdgbe4Uzxse6aQgmgRx
4m9SRznANygBm2+xHWwhPsG/x1SJHytJLgpnDeNRbipM+Ei6rLOKTqY6B3MCTXJVQ/XVZBU1p1Hx
J8V0P11UZc4sJXvVtB8tdLqfbF2AIL4j8vCA50GXDwouJ9vpJR7/FjhMMC5VQFyjK8o2/gXKgTOO
C064vakm7z8BbNuOc1Zg3HLBHQrYcHhKfYzY7jMGY4z55QzTLgyOebqt84yW4Q6lKtigJVPjeUFg
M9E/gjLnGdikDNpCoQBdX7JCdd6Y1ii8Zt1sOY3nZEHBNNW4TFLFHBcYzInNzURSro1y2kxtTG5X
Tq/q6niSnWuXCP+U5ZT9GIMypjW9hWtOITtxCb2B3qc2nBXYFaVWPNdsrY0qDetz0xzcatPdZoN6
fGEpZHqOq8A7VUdOOWqBw4A1k7RuneDATd/vC3LEwrwFhF0wtQ1nw91ONONtjcnVsSkU+23mk9X2
GPfPjorMirErurgXV5M9ohef7JbFhTTll2sxjWoQgOZbXQC7A9t6OYei3798/fxtr5HfMD+iQTDV
AWGTS5jmvoyU4yNGX0Avk+6DXo5RM+C0Dc4JT5JsBhjXIwHJ4EnCb8ebdQOzC2r8Fnc89LB8jz4g
rtvsRf7ydcslng0nUzK/m0FkZM9KX8tnz94+f/0mj0vuFM84NwRuspmjy7m6d6GWq2sgk5qibw9K
RITVy2XNDsskk55vYd/O4PZ5z+myJ5h49QgXfIU+m+jc3N4YMJmUR3aCsa2vqh6GW7hYoVRiTthc
dhXBKduCM2ZILnY6gyeyNi9ePnvxZli4X2++ev7qtG2tXq784cQrFmEbsDBKRq4mTEZN/MoNCYst
9i2N6dUIc9sIWvin+Zrlz9kqCtuhvLp9ak+ePn322k3t1cuvX7dMLMD4uNkF5ct1ExHEXiTnYvCT
xtl6rwWBiRgQheHwPaMikSIyHR99cSqcOPLpnhPZT+Pz7E7c0LRqL6IX9RYlTBSlme1mzu224JIc
hUsyLJ73lsVFbY1R71CCHHTHEDd/jxHlvsJw3ZL0Ht2YlpNFWZahIneMnSFUeyyE4juON6C7NAhj
abf6Z9td1WuhlSQRahqrDd1Y+JBxzDhsb+i3ImGWpgdyDtwpvgfWbHKGGcqL13u4WK4Jz2lGdZKc
fQrjkkG7JHpFy6mxu4NpIWXgwfxoRaf14LZ0EHuUyYYGkjva6+yiWvGHi/yHunao1Wm1hqZAV3Bt
pxKHINkBZWlHChvv2hUjZ9xeITVY6iMp53oodcdHVCWaNowoeEi7gP0CzqXLGFETH4iw6bLov9SM
z8OI/NpUHCtT7QM4VZDLIT+tN3h8FvtykIbRDYTptxS2m6MzlINvF510pPFZfw6Dai7hz5SSrP9x
RxJZvF2WHOpzLLYUM+JghjAXNH/g2IYU4MLY4iEWp7zrmoSdLjUnyHj0+VC4LGj/ivo7q4hkwebd
ctjm9kUwhjIKoZSsgbDC6NclABNOOBeVz4VqJHi0udQiduwF+p0AS6GzUDU/Ah3Orsy0LN5lZDVn
4NwXYCCjAkG0IUbeLj+QN03hamzcMDgYco8iuWdVMvk7ph2wbGzx8Xoyfe8zs0ZHMQ0UcliY82mq
qaxp+w1ym4zMJiuvUR2dl9f8/TdjIH+ePX3z8tUfeAX+jmTLJlmjKCvbBbfTRWSn4Z6fUcz5Qlaz
8LGPJRElAcaMTYYJ3hhSyuI7uDZdokp/IDTuFuYWRE6Ng9zUylrgAs1nFawsJgMus0NKNk91OnZK
cQJ4UmQi0jXnQcL0THB9gOCCaYiOk4LsrDGz5ioKbm67CPNQ3h4wOgZ1vJ6vppXL4I36OQriI4wY
yoCQy1Z+jmJWkYHuMGJ9i684cAlc2B8xmhDdMGtEcMD2bQ1tGs7AwYScb1SSsTY7LBe1riHmPDmv
9zUe/fjOdpnt31TTyxX51u6JWeUs5iqK4r+ae1Qu6eUENYVSv/90wFga+qB1g9Kwf11klLtFc0m4
mk20CcPDXQ+oWXCTtME7XBa/q68qUjPRhdxD4cV2u0AAnzQAhrPq45wmhAz58+ISANRYCSPwivRs
wobehcjLossIBdgih1jCHfy60lYoXytwXuIRYWgHaLH+WJWZyDreRJEIoe7VWZiq8PnLCOQApm/B
fwYkE+trleykTUCgw3SfzTFzJSOOoEBjgBeBcAg2Vfg99HZC784d28Y7cY0IXZxloFsOrLNBAcZq
L5uAUFhtKGsAcqDVao5G95bfPatIZhJkcYHRVlvDXibIOFpTMRtSZSzJoWx44SVtNgeFMZBd0ntD
N87/iZh9U4Je/efiwfXX8l/aK2urS8Su/e6Xi0V3yP0NqT07CtZ8l7Pdck3367lYTiXyw8jYNPCo
evUCNaM/bn5cdUuy8IYd323Pj34NgMSfMh8607p+P0f6nHxNS7Va3XT/8aT4cfvj+em9O+U9Nhc/
OR6d4svTeydHP16Vp59D/d++/G78w5uvf03mdtfV+Y/XZ2fw//NeR41ocvyK132+2ezZ/RLOEedV
ZWC6d766ZxOt8pGaKRldZvOcYvvEmqJDsPXb8Ta9K7bp7W6i4/WMUyXgkY3OmeF4hp7vgQu/VTFs
mQIqJI4kGqHKOQ8gcnD+IcXv4eaud1sS0M1CLnZWwaXPd+jQ1WBSnoR5KBMEzIKV4YCx3a65VkxL
UAYD0tEnHttmsroAFuL1ZOaoyrMK0PUcUMWsrpik4ZgIIZ2toEKCJQwSy+HsVR1IYXNQjFXj6ao2
8JMTLvHwJlZQvICprY4eAhJ9si0WFUbvhZJ7R8MLgQ7DWC/m0/lWVqhg4G3KQfHGjowu9w1dOOjj
Q1hP50R3EzMalCjUjlxRPS1MIHrm3Km6bXDvcBhbQlDhYMriB0DFm+1uBbDOKzrZWsEzC4V36wIl
9MVqtzzDcL3Fm8sdC3n1xmSxBQAu0LgfK9KfwubCjALIgPm1bKkCAFFBq9oAXePWMWB0GMgw6+vX
am2EYVhg64dkZcS5Ku48+tX/UhZ/APoPuWzlqyIN8B00LhZ11mZ+cWloPQCjhw6VEhneR8QU2O4+
yhQYcs3Prf4blqRPYmUua6IAKFoi445OGFlG8F3JyiSue/LgGJs/dTH9b1tPB4XVH/nqqYePM/Rh
HNgdYxAtMrUe5GQzuluNt8zbNZzknRQvTF0JtjwophmmwhomCtwIchG4sgLz7EuPX9W9qDtppvN5
N6P3ZVz7A9vsf0WlW2TrvArfVmgyRqQGgjOg7oXSeDfp4YafoI0jJjVck46KlxXE1T1PkBtSMwh6
j8q/YbVORS6TE8p/Un3AYEBwhr8oH3paHv0q4TBv8PTBFdYU91bzawz/gohAtNMAKLH90nF6i8ly
byrA/EDXvXoBk0UbsxfplbjdVJSAwyE5rhtmwfbCP3t1fVuJ6nxdrxFfEfApVsluRSifmyiRTB0f
5yVy+R1TqkF0fn2cRYZmDvxssMyQ7/4Ny2i7xivw8FSfo1rEe3Y6owG+OuCBnWbJhXB3IWuQ6CYp
q7ZKdVlK9VwOKhkjcB5wvNyA/6NU8dVicePqyfw+cf1YiiKCY6HFPlEQG9NoSJ0aQSthDnh9D8rf
c/RYZJAYRGa5mq+6gcT093Da6qumuIALa0vOuItdM4frTxh6aB05IqJh2NMCCRLHVrL+MchTLLGP
ahYiim4aQ4IzKYTB0EJtzi2Z3IzVJAwfU6cjEsCk6TwVlHk0u2WlStF547SmX+HyRWSsvchxlBOR
ArAuf0owQieQlcm0BNweZbMUBIDady8CgKJjFrziv5+LrST5aTaUTGQ+ixKs3GYNpNXYBxSFDDjR
vhaQ5VLiXxQnqtmnAgTho8VkeTabFNfHRRiq+Xs+hXthCZw6HeaC1IiE/wjBUCIzv9LYLAGIIkQ6
S3fEznS9sz1NGUQ+PsiPb27ix2PNCBnQna8PiCVT5pP5zn6sBrLcJzGeke3XbLKdGIfFh48SaWbM
3iBJW2WSZvd7ftPuNoPjtOLdpodYSHe0GgxuNKS8Uzx1Kjh1UcKAQzuyBZmhJaXKIsqAtqOYvTC3
AZp9PXyElfDnyfEvT9UQzHD1hYlC5HEqseC7lWXCqY1fHp9Ss/2AJb/NkrRPAVfG3q+HFiWBBrrM
VBJASpvzdbqPLl7BbTfQtij32U/YwtQrHFEEWd2XT+GUoRParRaPFfIGBX7KmmmkkloklDGZ4JNN
GRcGRLnixOLcu4g7RiWGHv3IUrva9AflZreSbDeMzZoJsLPVemOCG0D1byqcEH5DdLyhS36lyj5S
OjvaIHSXEKT15tJ6WbBQlPjKPjopSJyxMTJskxUJAAA7kInWXOPoKwUpMsuqmU4wR5ATou5Z6Ows
nKaXE9galIPTgRQkqIolP5iydQAsF2BRwU5myZ7AcOsCoUQLwfEzTLgMxd9F7W02dEw8aIlqwoG0
4C6t5DOMqvfj6p97dP/B0596NCW25ZBpi0Bf1an/6+uXL2gcus660zQ2MsSc12WwpUq/GI2XuiFj
Gatg4ga0FBbJEAhRKY5AEMQQW28cDb9lLhjOFDzgE4Jd3omF9kXVIun2IGYYZ17fgbP3YSKwLN+5
zWVzEcL01yyaJ2KFd9RDqIBvcQF02krWFuUF7EouUNGge4ejTxoPonQpwg7+aw+JNCcaaYc0knEZ
eU61RNHLlixizhlsVCDiOCkeUE1GVHwQm3i/iVHfYYQ98oWLYOQO7c1ifoZalgoQqNNgTIj+ZxZw
WU1WDTMLKJspjDP9HYV1NLeTceGBIKmIHFAMoEpaCpbsKMpAdfZEGildmM2B9OmadSsKrTopHTSv
rZfSxO+0D0A/bFZG8qAr+N+EI9/ZxdVmywNnASGFcikHn9jJPwz70nDUERODfSynz8vrwyPTHDiZ
YQtOYJEfZPOXO63kA5I/p6t/hXNqP92VL3fvdgcOn8igw1mGLUZNBjyc3j6kAGN4YAdQ0k+aJdfI
mSYwokbaYU1m10dtNjMNYvaE6AFzMtfrvjmRmOukGdcbF5TH45dR0e2TBxcVYO8t8t3if+j3gK2/
bIPkGzfoHthLU/ovjoNN22ZDend7tB29u/Cga0LRinD5KXbQBtZr3Qx9piRsCf9Mppdjvyd0w1Ip
LqGZXElCS/QttDIYxO0cMmaY4fXJ5rQ2k05MIGYrxU7rszSsi7N8x1BqiJbtnPMWDnJHjNLyOLuT
+alflugHSk5PQ9M5buumqGlcLN5JuDJ3q8lmDxwBcm//LA7M2/JFvT0uutiii7CNr5+TKxJ8+dfg
9Q+vd2fw8ih8+WQ2g5efw8vOnzqds/mqXif9/Ha+fbmBUv9iKsK7t+jk1f3H8OWTFbb3n83Lb19f
zs9xOF9+ad6+0rePH5u3MhrzRgZt3mBQYHh1z7z6av4R3tw3b75e1PVGXtv339XYASCyIUqjhaY8
dxe9oVRdlWcfoMZoZBqBdaeXv7Avv6UpBi+e4Rtb5huacPACyzy2Zb6vr3B2dnrPG3gzD7a44b1n
gAr2Ht+uwsHSSw7sTbvcUWdHstVAigTNpIekulnVKCdZjOvzcyjhiafXcAljqULrFKibx8Vi+eB0
t0EZ12LvmA9GufPrmxqXI9LlAl1ywEfpx9hZvURWevSVS5NPHT4ELfkubtuar0GiV/0RxA6ZXs4X
ZNaCq0pRZ+nNGBtoaJIRq0iTpzLZ2XdcmdYFCi/AWdVpccEFxo63elb9AzCzW3eTwV48SYW45KZM
HCNfEZ5lQ1eMwieYJw1vzVaLxDgOmNSdrPQwofGHMs8Mc0QpE327vSQhJwr3+FeGYWVd+JHjaZeq
jF2z2Q4rWZfVDC1bNI4zyQsBvrYsTY5YUSCU1WblcrtdH9+/v96fob9xebaoL5p1vS3PqvuPHjx8
eP/B39w/qy5hhEfNtAKy86g+P2K+pDkCmvHIMyeX2+XCXX1I1gKK/TivSPoNBK/Mut68twywiWhQ
qAmtLiPRvbRMHES54kjVslIdJzl3fH245rgEFKF0globxJzR0kKzamsxa2QHPyJs6B6ibxOOciXv
x9wCqosn7zFYNg1H1IWo8AS2FdO+nUlyuSlcqS6BDUr/zXDJ9IGFuDqZejO/IEF1DAPHSOsrCDTO
RlskDm4HGLbqG+HCC48dqHKKMe6NZZjsho4Wmk3QicgyuKIHMEsPCiNO7AZpz6+R01AX7HPkAAz7
SGARry85EGPdj3xW0cUDJrPC6dGrAAowQgWhGBUu+N0uxWutQgxDkMDVabcZbZisetoQLRNL7De8
qaQ/t5PkyxDg46sd3Yo4DQ+SONH3SJWRqyOACerMBAcUFs1SlWMPF8fFE0EEOBYDL+Y0GLiRfelY
RRJFMZKGyT3sDI241UkPdXq7BZkfnu1h3aWAgjwvkbzs+IC8VJrkNn5IwsezpAntRnFNxQYb9omV
6W5sbEcxc/If10481jFljquAcHrKD4Rf8SgRl4qB3z7sKjfIhqJBe1tUGKdpuyj+wZVDMpbbQZU6
bpqdvlH7O5NFiqwBA+7+3XovIpMHXR0vRi9gSzjyvjuwYSib46QKM3LK9hpEf6wYTqG9hnAO75VT
GpnGMLEkB0MxC6smeSqjoWPOh5dgSbshSb5OwIDzWJ1INwiBBdp3o1R+IVBiS1LGSq+GIKi5e2TE
lwScl/Vihkm35SDNN+FR6jjeRVFD46wtWc2MjBsdugjJ6RSdvkDXSoEUHQ2UjRUrFrbvoRYBZ7kG
3NmbvsdJ04OCZ35taGEa541JHeo4yvWuuUw7xgXID0nMCdCX0zFzfpgrlmK5lUW5LtPfnONWPQH9
Oq0qd6+iIE7REAp5SFFZkUc4X0XRRUg4sWI7PFxHCQLJNtw0DsHDVqbqs1DsNBbIMkjIhPJEJW2y
cvh7UP5eYPxHBrVRIgmkylFif1bP9seJUwSZRZGSui5zRtlG1b/iIaAuX6XP6govMs9tvVYAEKtd
lHJPzrd0zwcmU/VU5Z44/vH4fAfdofGqNFmapMVzDLhJ6YRRmos/+1Y0IfE7fc4Rxjf6vptP9OTb
6raGdepyU5QWZbLpmtzEbIkydtOIRQTrGgf8wAccUV7iQUDwU/oG0aik+4OavKQjXC6jTZqT1Tsx
kYALUqstLwAMqpREz3DF19vNIGNKBV2iitUVLpucb25XYgl99fLFm7FY4BBLBNXbjJPeePhAoe1s
3uAdMssZgxyyVkrD9/Eif86ZPGAAg+IokzMjs3epeye5oPYj1Z1fbPZl+HpTL8U7GFZpARhigdk7
cvpUTtUhrjmoPu16mM8ZsjmA4aZDDrQ92gLC3ec2UYieUDk9PO7+CUH+qXKEo5QxHD3IOsQSb4J1
iV/gk3lq/boIjE9gGMfwf/HowgFYnXYNF+N062IzdSyfTCOFdk6jnOT0LWWrSZ43q3I5yH0eUiJf
A7aa3mQ56jT6uBC/mXSxbiBwJwQp5eNhzIeetXeZALjhA/E4wmEIl+9J8wM15ZhtJquGSDBe6fJg
eZiEJnAjv0Pm5KjXA/E+2j2gbbuSBJ0bO1i6fc6v39wwYQdBt+hO86yyWIShBAY6yHui91No0CG1
4lsfTocFV2JNZQk2No4m0r+3VQVVa2PJaYlnvi3yw8QuB22Qa9eL4dHTJY64j8KkOd09wr0WCqiO
O8UP5EDmFeZq+jFHP2MyCwUOe7UVsnArwTwoDcVlgPbFad5wEGqU5VIslTHnE/stO1ZHZxr6LZrE
aX7ymC7+QukyWsJwAdDY7h6+v4cLgRGN7ALIqIPe+3GUNs9lybDkbGO/hKwxWpikZOV7ut6gidOp
DCiZQabOt+TDbDYVLlugzPetE8NUeJEmEYnfUA4RzNUmeOJ8EP2uVu1KF75/KWFjz91L42HTKCZS
2AVaEqftIM3HXsMquMlbii1cBB8vebuRGi7ad19aQq0fQli6hJnFpryB2BpfD32ayBBugqGxKzWT
F5I073xLljfOZUe9ziXaHQWlvRdMXYPY5+bvid8b5+BXwDY4LA7AUMrOtkGTy3qHqIId+Nt4XZY1
QguldwQlToxFDmgPcGPV2L5YIwuZas4xoyovyuLdO/TYezBo3r1jUaVt1sPLasbNowSECjjf07AH
bd2JduZbEwOEHIOJzZYDhRiMJWmAALNMrlmLIO67ctQc1fTTMWKePT9xj6eUqWC9SdKT3iVNte8f
VdCN4WVzTHx6fTxlaRSDRbo5AQRU8do6+xMUo6JweRJoo8k8IwhsIPYnuiYsyi28XEC2MhWRFMDz
enUywaLKZLkNsSab6eZyWnaSjbl6OeEPGtqwS0WLbEPm6DuvndevijGAz7bCDZVPNzIIL1jJj6AF
sNqBIxP0lCQ+enkdqGqTIaSzdbhjMU4RiDGoyolyUHiWP4IRDLnm1R8ad2ul7K+XLTvpoDjvm+3H
EQSyOZWFtkj4VA6JUkePSmYafU0hR5wBZPTNZO5mh0jBC0kDStP4dire93MQCUy8Wi1brss3snsa
clMuECyJ+tI4hq0A42sFAVbfV3vHkMLqY3o8zthAad9WOqQSy/WN0EUAh6SrI9HKT7dYvZELWGvy
aYBLaxBUllq/na9esnEOgcRQtV7ogWn6GGSJUC7w6SjX7+Snk3vY508j9yRX8Ji5OaE7Qq4XgOF3
zknUMSdhHAIxLiFghcEEdIiIJg3zwVyV4z3MuB1QCHlNQ0lTxDbDFLA8oVEqDbuhfOyeX7E6MJ4q
j23cSmVFOodQqWiVnjq3mDi5WatoNIomF/JP0ixGvqI/VbtIt1iA2No1jNkLwqxYIPYIma0D3/SM
sHiL49qX9C7DJFG2rLglgxfTj1lNgmFOwxj96kB0tltNL3H3rGrNX8DrsYspOYzM94y8RACO1Ka2
S6F4Ij2Tbx+lZtqSn5tHQNWFdkYmVWirBShMjG8QbwUDvBWn+fy8r80OqX9kXEL/Jp3NsrmI8u6o
+bNLL89sXS82j+4NbSNxlMjABvLH1WPZEESu5uOBuGZ2HHqbdLsHu7mhD1godMGrgqvCdTNUEzJc
cu3Qmlma0BR+cTJEj3Zj7qflNlrMbmrXjYEw7DLCcU/5v1Bb2cIBVtdTqURMrDYEMAADOU15WEsL
ZGL3Px4VX2SSho+lj1eUQgT6jJvLZCM5UC+ujVCrIM31gjO3qCYb2tB6A+fFaLxRx10xyyEGQdRw
mVA8Xv8fjDHAcQeu7nZ5pc0FE/YUSpGnNIdRYWgFV3Jo9hyHH+70IBNvOsED1Lxds6/n12F4hUBd
1WyXPvq0MbqIujJWf1jDowBVM+jv2A4tFif5LuJbnmbdEk7wKyYPhM5mzyJit2m9zzmU64RzAHmu
0Ohs7rQbjQ+QD1y9b6SRKdoMk/izY1JANfZk0URU8NPlxENdERK1HrKVbUUsghXdkLBxPsMm2Byz
jyTiCdYwFsGzWuz9Q7lcflrdYZRDgQgZwYBwOctV83KDaO+ERzeULk4tTlkrrD4/f3a97mMzQsop
zUb9eNypk8lKY1upwEimyCAhA2WgYAP8INdgM/442RwQyhI/gFxERKPSmULmAncre8DgMCXiRNfa
EK4cOmwtiJiN/tmX1dDQPIGyXnOtl5afUGIBKfNRclLcgQUaYmzLZOgKUlkioKHCVLpkejRUnd6K
rPpefK3JBopC161mQ+S3N9uj6Xwz3bFh6LkYHIWoZT4sPobqsXA4iWZ8nomPjzOer1ZEWGbUceRc
RMEG0aSDwuGuNxitZ1HXazGSRCrhrFrUV/nI83kuDkgpbHloRsDElPqO3dAWuiC7minePrzwAt3i
JhFQox/DUva6zDKfckgS/pMZrZiokiEdoHIshRQ1QGJPgyZhWLBkptkD6PGG3UBDiD62P0jC+s6L
LwXmU+gh2BgFriGBVnLcFm1FKrYQ5vg1JQQOQOlhkp3hzIDYTfUxYS3jixC60mKIKzo3QGhQhnbL
EMw54pVPcXdYGIRIq7Nbsr/TIMTAB6Ap6K5dGhIAsWDbQzfJIrlJdBf5KiHHGIN/1JR2VDiXmRN6
AmSNMWPRen489vsqmHBMZ1R/pGdVm6Dvh2QmFs600tD2MshLVHTcd4u+HcUwvUiJ55F7FL11rGfi
fnlW48idG88JPbXMfVGdb0XGpo/RtLk2fjSjxsBXUs09Z+vR1xCArFfd3aag/w0o/osbwVCmYVu/
acV5Ucx8dNqmkU3Lyls+M1prRyIOCSOalcYoyJhVGC4VzC2crgCWL/GbIUQ2F1Qwkr1gU5IpN3or
qXiT9xh8Bk1rtEicoobMgDYXhSgoyzTfbcuNBAVDjKVjc2xc1QziFL88Jy0R7paIjSlrr45Gft9u
RFKY6Yx0bJKh2CBhqeAqwvfcDZmM25RHtdlIJRChFx+OH1c/XVHdl9wkgmo3jKN7r73nXI5ltwqt
HUtG65u6zfQrDXbvNn09pQ7ah0UP/sd+r641s8g4rpiK8IdG4WpodnF4k6GSrK+rcVB0Ld0HZYJj
eivptbsldmiX5tfjx9U/38Uu8elPtDDa/LDwTzHm8jjHt5cIyB2PIjJy+B36yUW2RFignG6v/Y06
yMe0x9mFCgdqu+PFJmRzSn8yCI36iY5hHhn7KagZK1ZlQ4wsh/Znbc2aaNxgT0relhiOsWTLTvlJ
m9G2XxfJPSFCALkq4FecMPIwk0AXF7Xecg9jk/E1zISUl3RFnDPzbRRe0DllUzPkmW2IAIzirFkZ
PbPxMdgrauwjxyo03RrJPrIj7c0ot3JjO3PczNCT3A+al7YkB3V6YnfsCZruh1grVm/sl9Eb8UvH
l7r4pzHHi9QDahDHQpDhsOfbGP1ey87RU7pztoGEK8QhOKs3aSlipPbLgJzLE3I6R3sRvqbsu8sM
Z8jHhWmvDOllppLWTbrIMI9yrDaRhMwDOcwC+Ued8emthKGWJTbQdjI/PXUneRONJH+uMnsW2UZm
Q9NEEQPOyYYIxYwfgfUKpYzEC8ntF7BdcbCF7vBGo1mc95vdGg09YYdDvukTKvtj/pObkFAQP7G2
iwSRvQIkA6o768XjOAkq3zWRzPPJ/9/cu/W5bSV5gv3Mnd/s7ts8zcDQqAFaSChTvlQVW1S1y5ar
tW3LXsmuUv/SOSxewExUkgQFkJlJuzy33e+yH2j3aT/Nxu1ccUAyJXt63V1KAjj3S5yIOBH/WNFd
j+nc/kjqpEvTae1AqsdebncJvqLQeFu/+Z+tSOTT8Rp9CN42b/7bxxR/vAer4ITUt81mBkzAY/iD
gPeckELfFNOr8apslnmvhxkYK8l3wKE75JFxag2EDq8azosiOaEvyvvv1I3Ll4igbYUWdyO/c2Yy
NTU57zYvvultGeY2gFPSWyOEMLRBbEB+Oh1ECfSwXAGveMa/odPw8IQfoOuJgiNQZqGzWUVAHymj
yspBellX2zWd4fgS4fHoTRoTRzNeyK0ivcytQvQEJycnMht46TnFj8OY6Ii1BwXIfBjPZ7ENblxh
EPPZdh33aVlFMfTRyrYsNkAE6mHM0Uti9CeoymnRDM+T+Qz5ZEiPf1ZVcmGyIVEYxnpF2GsAixko
QLH57G+Q/2+rKj+il43fvREZTcUZG08N41UVU8QY+CkDEvtNIqXwVBDQ9LgNuQG9nljw5stxfZ1j
WO/bGml0bU8j7TIM51qOF9iCOfnUp8W4XuxGHFg6k8nMIsvCeIVb1E6VI8TsioQF3rUYn5kBAhbb
y3K1hE16Sco6J5fzURy21/AI6T7n7nzN39JVo7Zpv12sjjWScnZYBpJYEsSin3kQIQptRGFnyLbU
TcahWQT0nX0WxxsBbm2utrAtbleB2mGGR+qztCCn3CO9WJpAAxbV5SWuIw2gRbevAqXDzg0b5bFs
Yp9jYKZpMZLMI6BwumofqIOTxHtDadmhjnSOi5ya8ty0xPbu2tP5Pa3TAyA42Dq0r1p2OjYpujuy
v5LZaynBv//uo761SHIKQG+GWDh1IF0YDBSavENfDPUuk5AlkrfZNsivyPynGi5LcmNUE7yWD0MB
4aAxkZRYG/Cr73+F6uQrRjLRkCDuuh4YCBbah6OR8mIl4uLLJyN+jVCdHIHJZAdaqzrTUQL0TmUH
1hJoZ1AARdieUhqJ/RqiSyYNnvySb8Mvv/jc3pDa0ceqA+nv+1XyetccrAVI5XGV0BLmWuRnuRrx
z4PGFNGfUPYkY5A03q6I3AXOAkbJlqG3YoZ5K9Wz5hZzGhs7jecwNplizx7JfFHcq78E1GrpzMjw
2OY5tVrs0y+/yVPCnuICJRQbGbzaLQ6YvWC2TnQt+EicG6ld6tGmGiEaSdpvJWrIUKuj2VvgzaxR
aG0h031OHC7Iow88HWa5eIXOCjzMb4DPI80ZHoNWs3msLsVdrj2z9x0nHh+mZYS5zC9CA8W9sLqI
XYD/BdFuuRjLfavVJ277eocv0KfWXRCSjFhOlSKwOiRZR7dVIftWiaTJSdXoX+PqWTbNMQ+WD1cR
7l5XlzqW+i/XGRVZicPsdTFv2geAGTgC1Z+yLzBKrJW22pWXlaeWdDCUJUWmJGmUNfoB23R3V/X9
xdg6bq1v6thldUHo0DWLkDDP5fxFZG2WRdPWBTSkGQRijsByL4il1sqINBbyPxNBLiadbvtKG9r4
LiVCttgPVNY+P3hc7jWtNVr2wl+gwVt1i+jtt70T405FYOxHWJq44sdUS9x/tyZOzeXbfVrI2ujW
DqQiOrrxoJvUKg8/DpTRtcoOjQMW8q7DsAG5hlngX3myVEXBhm7qHSEK+628LnaTCvKNyGi+3q6N
xxCxuYGmFh5rfXxlVAfw+GRG/271tE9gHgMzuBm5rVnlHU1r6FRCqYXJ5Ui2eIrlwfgaKnFsDkMF
ej3qM3YIWN/5jB0JhriyVkj/EVqIv5GnAhL/mRLyKDIlBgOILdl9Xt5h60lk54wpOj0VjYX4WKz4
bsHwg5VEwyZfDCOrPDaCCVWPZ0ej5CmmaZBqjWAzSlca3ZTj6C9/4aodVuMvf1EsOO6axsZrU+6q
f/lLqiYFk29Qv+iGvCAQSByEmO8aqGe53tcD10tDJ6AnXl2B8VZojJyWQSVdbsBoF77k8U19MUPh
vHbOxHx274kg7R6G3K3LNV6FRGc0C0/uPxfz2a82FSixHZqLe8yC3IPaCrpktl0ntiUQj29zXa5T
XgyRmio0cWwwygvp9N5hWj0R1cyqK4tL8k5ZXKaE8vgUzP7GnKZ+tIRz5lP3iCPIPx0UirX4cVih
L/8pMbrV1LBMWOyTCJk/vacYWNxbuOGyHBHnvWVCvVe81rfQre1LeD0mQQlLAK/1JXp3qG67QwrT
dzzHmKR3mxGsbKQLKeOWFZnSbY9UBL5hjIEEY0NlzKam4FTkzU2kRRD81XVXBEUvyikhls6jubjL
6wZ++cUJQy0h6rlEyXApgo7YaERXlFvVa2di3dhFqJ+a5xRCvkrdOKdtfHAkPJOYzSVWTk04IAge
JsDxiog4jej7YGy3FJx5aYVMYCyBDcziEi8YcXAoPBfh/G1CMzYPQLmvilvqF1OjdD7rd/QAWz3w
o9wXiCcFicwYcVHzGcV4osLVCjjtR2TxHE2283mhgzpI255TBN3Zl7Jo9CRhcCJv6Rido52Jl4lC
0MdVi3QrRvfm6SamOIEqBhabmKOjz3r3Efutsh72k08+6XcRS261aZlP8Pg73pbTD89r3qw59dPs
YwlrTLW4oR5cuZbCDsj1lx8xbPJXjgqhIhE7tWKcLXYe9ZVt3Fal7KVIBU6rKBiGNA1/ezhXEiEr
EUMsncKthEuniFPW0TGSLaEH2HPVkXWhNo4EKNbZiDjpvDF3BF1n2FdFrRD37LGXCAaZGgpjDQeQ
MmLsmv6uc8tV63pBSnH+VqPAOcAVqnadOnuupQVga2tqrsrhRmttSfk8+9Q1leOJLYF4h6F3eij/
ts6m5756SuXobHpnjs6m6xwOOoR3WrbBQyiqpkItaFbAuV1hJEEQEsw9t6BXL7bNFcVChJKEjja5
48/si17BA3PfjPHSr+pL9yJl35SZLCR4tTWZ1KKWNOnNZlile8Q0cWn3m6hQHlzgOMqHFlIrr2GV
4Lt8LmYKQdPThx9ew91d5RLu1dNQFv9ew2513O/odqsg7tYi0HN71zqMYGDtYwpLSmPQALzsNe9k
nUceDKzbC4wuB9mS/qH7oltElBGsXKx8jX32OHBuQTtIyoHpwRgq95qcjgzda08yHMFOW9ypJinK
DIWxfggYIGhR45EUKSY1Ow4KREwfMyC2CCH2HgE7LbP93AKw8kABfXUgvqzkRODuqVMOSRwSXPgr
QW1kBbLT05YU4RJclCDZBnJccqlaIDXsvLyIXnwDRPYxKTDHHN4MGETh0ZGVNpqD3EU+9g5gaB0c
/chObtiiyD9sqfsqFbRW/eyWipwcyhtSGGHnW0s8+uZ1QC5SloM0ijxWg0CUcFL28hgHE7UV7Wjs
p/s1jE7bKn3rolVGJ9PKOUKMFyMswv4gk6i4HYZc0kh8VhyH4ma1XSyQfYy7XMl2jRH0jbYpbY9c
NyAodk8ZiTUBydmWLRzzsTTsQ88hYrvxQE1HO+XVYXw7eRTosz0TiFZpGZx1hHA/ZpD0kg5BjHZB
qLZL1ls7dJyaTsuvziQj3j38YKRda19K9DyPSioUuKeaGKCNb7WYzWfDh80zMvV1dlUW2IA+lxoi
xa9pi1nayJWZFA49sCyWwNX9iJG+pRsOEW4RApRY0ZoxDTXI2/qpqxrJFC04eFhiaTOP5NGOZfDT
8Wq3RHNAy+uDKNGT1Jscb9j8ayC9ItocNB0V3oDOFXPrLwsoqiiubdmEDViZDXZP2tq3vgLJ8xj1
is0QQw4PvZysYSm2XD90idm4JqAgFEhQ0YaE9KDQyx3b1Khc3TiCV7jLrm2uv9STxLp2R27iIE9G
puJoFqkYsUxz9io+jdrPRMStuS2uOXD9umoIxif6sagrb117R1lAfekkCaw153vnSoPUrDXt2Cr+
SvR4M3u9ueYBlkQT1Crbq7slOrj7ZW9HPDniUF0tfv3YrelqU1CqY2aGlCHuQpEYDZVZCd7FSu4z
7YGgjl65lo4G/yjlULzdzH8bI/7AmzdvSF9o1G1zUz8vUbvPRlPlj69od4QdNGdc593DHkZOMIvs
w/V8PvNgvEZwsNjGR4jy58KUUDIpi+Cbj+U38HKHspH6EHmlwR5m6YtqtXkFFBHh/l+s1lvfriN8
gpv8bNbesTus43r/udjY46B7njmFvetJoOg+jDVJO1Zbw7Q0dHRoGrqXYu7tBk65hWGjxeXKtzK4
P2k5WOtBgnGv4d9PDXS1jua0/ZU0WKnZda2VqIUxzTXD4tlOCEgdIyG9mBVksUxAubTScUMgbhGI
E7g/5V7FUh9ofFScT7zMnUP7ieHW2nX0OyKio258OXJXFc3HdUTBxDcVWwZM66JYKSRUUzTb0G9r
vBzGRlTbyys+CSfFdCxhjMbbTbUkhFWysYZV2Ehs9Emx0XGtxqjiuxqzcTv8Rad0mACkRBgq0g23
YynpXVUwMTw8VT4cObN5L74RHg9TUhhPstehEWWLb7n0Lhs9KvridzwjTK0h/XTeNfbL0YiCU4zU
O9064dB9brzFf0KdZV0QeO1rNdfrptjOKpYXMdrKqlLF9WPbzBe1Q7sOhp8N6Pdfp67x2vjthryT
4NgaAW2/yhuQLuvF2+2bH3/H3kkS/4QnCBVYsGEnAiaHx8GVugucjC2gbIxLgWY5vWm1XFIoeyg4
yXlViQtANC6XZKyAIRJFCr5ZRWf5R3wFV15eFXUPDfSXiNcAaxjWK9n7jKG62wKWtERwqxeIbC2N
anJykjI+T+jBCP+guQtydpZ7k/gzKWcm7E2GPhm417SzE42LSoLdqVbhb9D42yniOcOPULp8Op5e
FXZE+z9gKKtp1Ww+o+39OX7Pos/Qm4F+93pfPP/D938cymzKOX6z+pwH9VsoNNWV5fAB3/xhrPXL
pHuXFlqxPdkbpcLLGUVZcBpSTS1qENU2Rd+aadkCxOcCL1I0ll5otGjq4oY6B/sg1Kd0Ob7DuJeQ
b3j25Ld9lW1VWRlNt53kp6enIP6P75oCgXSa4aen+akj+KJb/yidLhgw5Qqn+GbIIERAj65CGim+
luOrK52933HDx4VOF03Lh/6GGaIrYEVvXCgFqFd9w99uwJ+x+QjcHP41jdOTCcMwvZ6MZxiQo6E2
OO7tuoQa71LXafI4aUGI1lY63xLuhijWjWchyc12WmzZFRzWNih9J5U/jE7OgjYKCdOY9GHdT6KH
Tmv3emp6mbNICkjtEjLdAnuR4NYEUgFvlZXNchYMY6HCFiJGLxIjIV9ZxCa5cGTUETpjQBNElY8u
3bClPDFAD0OQmXWMNbBtxFJYbdoHoCrwL8lJnZjOXkSPrIAW96zMHSfr4vuXGCV7YBbUcMzxsMGZ
g/Iveq2OxQ+bWC+MEcfjZltdJwIbprcgC1QoJHcAFsqW2i+VjRRwJfVtRLx5eScYo4w2WWj25raQ
sD8UHvAWD3LyVbTgDsQ/Jo5iuXp3SAqRcncu1zViCMY4mpiVXe+5HAdnRKsNkD4sZ5geN2nqJzUX
ku6cOnnUzB41pVGCgcNgshwA+iVKeYZWwaDxYKV9WIbwtbUTiLJ0+yVAeY+GUQL/98ikz9FMET6x
P6vt4uIaTnHfcEHbK1m9DnW3rWqkIZaDP3dK9BSNcJZ7yfLApQPpeXYN+vUR3m7aPz+7aAcKpKsi
jMWcJqg5j57fwQ5ukj4Gmzs5C8eksyrxs6tLv8IpJqSmIzYUusKhsZ8/f/Pi9XdMy9vq0oOLa403
E++wtDJjXLYu1xw/WrHqzKX//3bNUZf9FccvQ2OgRo5i3rUzWrQozMw7hNA+7OxSiJcagQBOAcaV
8xQmslz/+IA3w6OtAYKdRzsv5xaMG2LzbbmuEIkR8ynMd9k4/9bI0EVOUifhm1wMog5pRSXP7D3S
Yr6ixLwthRsZvIk5HMhTaMNXfx9nBzX/lO8hK/ub7Rr5cZD/kJ0gyzgbLFYiuZJZK7JlKSnIzlBX
R+t5vJH2E7Q/iPWbnVhJqqPi3VkDs7sSVZc6ydIjDXO7l08W9fuHWa59bTmp8T7pnVoEQ5DtaZsb
Ao3e2eI+/PlQUMyCKwlXDYvWVb0TOXXuBICid3hOF64fmcRx2Mmio2RQz9aLL+DzNXbj82a9kOAd
eVOsW5Ami370NPo4tEINUX7x8k+ffaUt6TCKkdAyupmKPQ9lKRWY7o+755CYEn/0jpx/EJNSbAWq
0IZJ0u8oTOEOVLNyvjO+yk2Uspi3RGPYSaGiC6KuyBh3Lq9hxo6aZAmO/vfh2eZJlYlELEBH5QHn
TYJxDjQIoZpccmApNhKDfLksN+2oD86RRGlGHDZAUAcvi02acBSFmLrDBq3Am0+wzwKybs0e90Qk
NW+WQqksFj6hChBD5GSZZFZzfL2kdzRYhamN1YbBNIksLVK13jmGHxTbYJjA+1KZ7no99ek8FsFb
CvWNVIjEhELCWupIXliwHxZbXSuouhPoVSI3l90HfXhfqTJiJcA1FCquXC63hA7fspjyuGgmgNiZ
kyWRP/MPEMIUA0/07keU+x4x5Bb2+wcmk5PZE2lpSFcGc9+esJi/zDrWpk9P6d6FgqkdO3FRx8yJ
Jdt7zJvQw/eZtWV1U+hZO8EQZdPCnb39M0dBRX6p6RMQw9Yu7EzszC92RYYEaN/wTPYjfzhuQ3Ja
oInMsNQWGU0RMtZ63tRFwYYFU2R+cPZ7R2xbm9/5HzzV9dLZnmpnBmbvvvNkR9Y0bvCbymO4Wfoh
XS9FLOIIFoaqaomUMqrrFhwlEuIE7B5vTcaLBSsmSm2eACPiahNANkWizpUo/WXw6JL6hm6eVNrf
rZJJA+s+gPjWkV6KzyykyaPWh6fyEkJCOj7Ue+HnIxaDzANrntLEAqpt+ln7AOTGWhO9agw2x37u
hJOyln3MiqoT4Ulo2FlAj1LYn+JGAzKG22Pajv08ejGPdtVWwnfu4H2LaUlgVZLDQ+KuKLw+RE6G
9f9NRHSOjFUwkKLHG4cZnH8dyi2Hyx5uCNkql+NiPujUBZm+ozihfD6ux/UG3ZnxOqOg4WrHp7gT
6S6gMZkAN3R9ELXHdPnlN89ffmeYDL7F25BUwYZlyEp/EDuCwR1y75ylbfoAmVXzcL7hf516HRwd
lOvt1nz3xYtX6R0J9Na8vOa3IZ7/ziIVwmyrxsG+W2wqO5+SCzCJpPYkoCw669vQighAyLPoEKAc
OFr8ZOldWro6yZrLflQVZpEal5aaZ0iXBurOLcSvdQiusWxk0cu6pIyr7YC7dNNKi2kpq7HqAsq8
85J1qao7aZ2jtew8z6zZE4i08CgLj9GmjrQUlLhHOv95W+qzlGN1tcad2uFV1tgabnVTk0imxDci
MoYr54OTswtUw+AlGKyVcUmnJAireFXfc+un+5GWhq27akxv3aqZy/9GVnfywyrw/fwu5zu5vqFC
9O38bHDRQhc1Gs1voUqMWkQRUo13X+Mge82rPX4QcNa8QBg5joeLK1ddqp9IPMsZFYFQpYhg6xB7
hJ41QWU1q+MYgGHmUVO8ZdBNDNtK8UEg8Uh9tnJMdKRFtZod8szGOIx7t3KK77T8mqxCNqfUd8zb
20+OhX02i6LV9j0uJggFIKF6MfkJtdsconJdTT4neMFsj6yKIz8rav9saVG3fWtyQXihJzeJD8HM
NxOHryWOu5pQtr/W9cKqWp0Q04IrpBT7AXWSq4uGsEV891RkXEXIvN6tPn7+5BT++90g/rVr4nsU
PGzn1XY1+9V7lmBVyJpQCFpjX7FL+h8Mf816vyf0EDJsXRerX6+2RXWLi15qFeAQ4oQW+Gl2n5o/
+/zz56/31+xnYbyQdtpDpDxA77ywT0jAFg3ZiCnyHvSh4TThDhIJGRLJNjY9KecIO9GUfATkHEEU
1maSJ4NIriHO8k+QCMy2QAbwA1KnPQE07P5pwDFTOlPmfveY5E1Vt4Oa6UiAKtkvdvd0nCbBvrEw
ZkkoJFQ10eC0w1pELq2OBjvRhxwT9qOvUZzLs+5mvVtjrOboU04OOWXhXS7ENgqnz7eNwgOP4ima
g40QzVeI0a6tw6AMtOWc7KyrOXhHSDquFItSiMrV0AVbp6BpGVxBWSTle2clVjEUA7z8y5dfoyU6
EBd43cmkyJB3syhyc2Jno0sUjyRZrMrQqcOWaTxexWF82IsGN9d1uSJ/wQQF/KRNGB6ItGfdNqJg
c8w1voh57j0+LicTCoRkC2KmiYKl1k73mTBHRwONHbSM2JjZNUwuv+PUIGD4vgG4AIgFp3S8AFs8
PX2zeLRFdakVnDeMbCTrF5+BZqHSE4T6SQVive9a7wDY0LqGY/ar6vI5ikBaQUaqEaPfznu6JqQz
ZBONDyhXaGuvVPQ2pJJjw2lCp0N49j6XUHDcSrIkGjebzrz/9PyzLyALDJt0A3MxgIXW4QTaTMaw
CAws6Fu4nJoI4wRc4i6zd1f7/rofPSBk8BLekmloLWNQkCmGNSlqJIaRMyrkxYxrOsbmxxGZbsh3
J7dyVtbj0ZETUcdd5zVd81AlFXs7LnPYjgVxM4LTA+2v4r3EWCeju+uBxDvRFeqVZUVB4omRfGqa
qBMnN9QFq8q75YLMWYZR58U5LOro5AQScrwVpYU4ktqn0oXMblcWuZfnRtJCC2SoKp/BX1GBLMtV
CY+WjqXA9srrnMzaUulH3wuP47JCxAZVl4UsTjkaUt6hWGxO2+LzKziVYO3Bvy+rWSg2rCqEsNa+
262JhOqXz796/jWwnaOX33zxPCgvQcMUD2MEarVrUlVOMNgfZO31HpyePfno408+/c1vf3fEr09/
03sAZTx58smnXNrV+loVfPbpJ7DGb6InH0dnvxl88ol2scrXux77djXrarPh65k/bmHEs+j1n16i
2Xt+Svb+sxIts1HUGi/KyxV5KJICspGr6VnxwQcfUBPOPjp7Ev21ulqtdtaAnH365DfR1+NddPpJ
dPbx4KMn5I4xmhXTigMjUVvEnNxlPwei0cGKktPfJyydlEyGluVsxv5ZYxS4ZyVfKRHy4FWBti4S
wbuptnipVjZS2qKaXhfkiFewLg5jSgBvzCrrRWMCdNYFBXBCr28zV8l/ij5Mf//tU1j4z36YPepH
j/AJ91NVP8sf/R5fnP6e0zTljwUl6v8+cjXiCX1Hk4NnP9w+ih79MPvpyc/Ro/MfZoMLVSZS0Wf5
h/3/mHRCThF3bgcvp5MDxyTa7NYlXr2QWEAbj7d7E11tNuvB48d5nps2PRjRXJ3BXNF/f90u1afT
6H/bLmByo7NPBk9+C5MPNP/qsYktgqyPYm/06OX0OnWlB3E0pE85xQhBz720ddvF2ltMfc6cSfuK
hhKdo74N2ZfHSQhJT8qx0qOCrp2QuSC6u0j2EGsnLTXLw+eF7UKqPQriQ860p8S/Jd8mHnIiaWsR
VPVGIndzX/FC48IbDTyuR7y69JjwY+LxNLjYdBJ8SC6E01Pl80uSZk7dvEv0R1HRa0b4MKIAPsuy
QXf50a4Y11IIrtlWKyW7VdaHETpLwH+2oX7x1qBl4Q7tsi4Uh2QcPEqn3wAz9mD0Dv8BgXkQcYwr
kgxIrnvHoghpat844WsjMSDfN16NF7sfCyJINDpEyATxBDMCIaN9CsQrll0Kh3lP7s5Iohb/MAyZ
gobfzRVwW/gNq2QQVaaTUjsvufFyUl5W28ZDPBX3oTGIG7MxG/ZuRsi+Dal5+SXNofKL3NB9lXyD
okVLwUsAzjYSpSkfnviUk7PIHsii5OEk0aq92Xh3OP0M0j/h9MSwDiMnCVA66jes5Csg7wPgFrab
og3IiACFA4J1xVIOWGFauNQFxa9124glYNP+JXFuj7B+5le2dC91aqM+mtIHTobMpA9X8k+Dh19D
PR8NPrlotQpnCltgWKaRZodSTJTxrKDb6C5z6sui04z+z5E6df5nXLgH4o7VngCL23uvupQplyw6
IhmXy1SVp+GH0GuMOKVOx/A1roSWL77xMMLPNquHXBxNhPJuT77/7suT3yYBSHYK/MwFXBYbEyc0
4Y9Jv7MIbegtpQDZ/yx0KqGZFm78kdNatzKV5gTT7KnTrtcpdxCG4tFpzMGzt3o8j9C85O3Nm39v
BY0zUQdXxS25QK5hct/evvmvT9hL88uSj52iXhazEq9rC3QLF38/FdkSvWZEGDRh5DdXNTrzRgJb
EH32+ru8Z3v/CmIHosboqoGDhBog/xbvDl2PSyvg3LjZ9Hx/y1afFHFEz8iOJHld6Kpt50lo72cq
De15OK1wenZAtRbjDV51MeJKg2dGGv91fDNWBm4PotfAjgpnNtleNvlfd3DIr/KqvnxMyKNnH//u
N+yqiHEkkRynMQcaRCtzii+OPyS0O/78irCj8NeL+fM7eoXXeS2osBgv+jAIJqb4I4WrA75ccvwL
QvTjj89VQEj4idECWqW8KtZk8P5yu8Q/rzf0pNcVvdtOGKuC0gFHFm4LfqWYjGKTALLwcsM9/lIO
7y+KObUESYb8fkUEhnpZLAqukONxtmv5bHupPkXxt+j4hD++rKjJf0Z/bB42eoTJovLxvGgX9V29
Y2xfajXCTtENttQOq4FKolVifqEDfruo53fFlOZgUU3GC/wFk0BN+ha6SdOMkhfPxgqW21aPEK6J
ERI6vq3bpMpWhs4To2Qjnhj2FS8ia3jvlZnmwxJKymaEbCqWma5c7FmlhSfUchUYVreAa20VhOUf
X5Bpfs9w7Ue2y9L0rgiDGIN2UqDW+zQqWAqmN9ALX47LBZqIaBxowx4SreBLPsEC0OQF1XJIAvfB
3lmBQIdx3EZpR39B/5rVwx42JSDgrXkS0G7dnJRl6QxYVwavIJo8mkPfHH3nkgK14SCw+oazca0U
dZiEmS8KIHQvVNmoPa+1ZYXDxUkWCVi8VHG9hMeSkbVgSvjF3rtlgx0sqVP5q9H7rb615juNU3Ng
YArUJ3DwvgJDMEcIe1kXJ/V2RRunDaUXk7dlSXAT1jlJ6CkNxvmIZiVfTmLxeRS93l5ewimEcssg
VB4qKsitjY9UXMJFQyreSTGvapY95CO0CBfFyQk/D2G+yxXBOZAl73aVojv+Cp3fRiiWyHzbFzSo
WK+JHPh+Q/yabKmQa8m/rMnuG2YCgZnpa3rWd3hCs76C9Uq7rLlSrXAXLsVAlti31he8czVPsggJ
kxPWOu8PtTJw2oHscRD5QcuYxiqm2+gGOHOCPI+ip09V/GoOM+9GADftxkIMPLbQVcJupzjYA1WK
auzpBSIFwMCMRs7u10sX89qcRuKyJQO1Dmzza6kP/5yffTq4sOeHY+dSjHDYhhjmG2PnsoCJ4Y7L
zTd1BKvyb3Kkycs3Fb39T+7bz4DKwdu/t95+9fqqnG/w7dOn1utX+vWzZ9brz2ZUwCPrFfAV+OrE
eoWw3vjuQ+vdF+UNvnpsvfpyUVW1em9/+LqiWh5ar56/xTfDofXqZbXhtx/Yb7/ivjhvntMrO9Uf
uWvOG0r1zE71LUjl2A27Hy8afFU2zitoCr9FqmF/WdHrldtqfsueinHv515vi3xja2qlUEz30Klu
dYMLCD79Z+f992om3LdqyuAt1qXQiHz6zzWCsMb03pyQOhEehhFzKpsqulwU4yWSsvl2AScjlHbJ
FNWzKutEGKtd0z86DBX9or8mK4VxLqcjPoPEctBlBh4g0M1iMp5e8zlwWyCCVbJR4RjIbnq88PBP
cluE3MexuAfrc03gOZ1raFppvadSYC8rzxKzy9qLZXszFDnKbum0Ctp5BaJauOoVxfb4uW36Z9WF
SB6pUggEzBasjJm6L3EsffYxa+4Afk24P+k5Jro4ZvgQohIZ9COG0R49yDL6hYdPBoMvtg4gA3M5
7gGA0YZ4Xa7wCoWOF2Zc+w5qgfSdF7ti/Rj8FhdF3GaEdRZJHD+1BGu1iWn6gO5RUVaFtLVGuLAN
Z9ulIuZtiAlyPq39YmR6tZ+3DWMH5xKuD4lZkk+qWQj2WXY6c/Fu4S+NK5tnPRxYoGotuRTENSJm
8b9kn+CV8FVshW6oE99jIU9Qk0YTP6NxkhggXZLUCC+IWcBC8nJmMR2BVW3z5cHFzN47+8nB/rX8
gKkfOhw1BBJOEGyw+oL1+EZW4j0kN/WChms1Bnb7pt4WKaXrD7rsv3AgfHKxh6J4sy2aDx1ddLm2
ecNivmFLoHWOv50PI6d0euOuCV4L+MH1FqnWsLIQdbpa41T/WK5TqqFaN9wCupgc8wWq12/K51RM
b0IVSxVeWNP1qNktJ9WCcb41z3derY3gfbGHnsdsSCBca3scdAXHm5T5ffKcCvTOGNHJSVZDNDHQ
DOtVjcht1CjThIOU398j73J2ZpHXsKG1Fu5hWOf3ZWjN7PsdMJ1jG+yqbRbmbkRpS9jG1HVcspr5
zxKk84WK0dnR3EDeQ+5Rx2zFvTvGsQQiEZPwxnj/FfusPxslk9oZrKVnt8KpsqXhr0MGve6mC4BP
H0HaWIGs4lfBQ2XTNphUUtRY6i1OkyN9Qibqmzoo1DZtwxyiJHQtIPXwue2jgncfn0yxpJhOdlKH
K+5c0DycVPCQe3hoCYkdAY8GGVqRlRX/Q88dgn2cxtEjPoBIvLfbiUBBcT9+hzkTDb9MGsltDo6m
sl7REt05/crD5FsG1KfQ/DI0A6owbx7cnqtWEKltV7CXKbHy2vS8IFV4v78vhOFxBBh/DN0uHsvN
HEE077H58M5G7b1yVflsxZHcA2XNXR6CTgc3P7/qLoC+W4rhIAvAScMryV/7igvod7AB9+MBWj3q
9979+G+d/e/CG//K533rrLcn8F9lvX5u4p0jAoAtUW1XU3dy8Y27yjBLjq8dj/5R96lBzz/Zc4q5
42hAhf9sl8JwSO1DB8PjlmRzsMjdUL6B6jN6Edog8L7fyimngt0+zfGu0hW68btseXOusl2gCkLX
5Zcs2E5yjqk8/X1Nd1IHZxlHQ3nrqxGR53cdFcmu5O//MeMjlY4sYbgZPpSCVYtC8+WOqlcM5FV1
Z/3jRjpcgnPIeZF/YcRRJ/8e69Ap46gRx8TxL7EM4w9ljO87Tk7GA8PDIArvMzg+jNieobm+nTW/
0NC8+9gcMTjYIf5GmB3QiUxBuvrldnFjsEdSdQ63SbVbQQtvxamOe37g6MUqdH2ETfIrHrQffuj2
+z1PQ8M+w6D9sPrpIQ4B/vrZZtXXR2itOxniGhEo6J93Po9Ryy0WUqNVpy7S7kutw0wrLSFJLua9
0ibG7bm12q6VafLjfSfWKHTfSbkYji/A3IoxaOPBQYMWFx2cem5EW0yQTzd3LNl+VY1n/e7muspc
KtsbOI/Z5XdB7gLr9Q7R1v7NhVCEyqYCQk3w9iVpyxXBUXnec2++lyaMBDFnZIJKrH2R5lnF/PV4
NxELB5WWpXaM490IzNByjHbWs993abP0kDhGeqNRTAbz5itQbuvbHnfx9rWQnhVlYOXPjharMMSq
3PEjzkLSb5t8HKmrpKRUqO0P8e6Twj4fNJQj7AMtMtRzWJU8isx47RmFjrVpl57136ffv8RCfO8j
pbWXzbGS88miscnax8l9va5/qWKOIzMPos/RhVNdXNFyQHQvcmxYaQfR7jusjn1Aq589PmD1//Rz
6FSyGO9fiJaRabRq86+5aPyK/Fsv97t1+xVQuDtpWwfW+sjr9WPYnl+bqZFjm+6L1ZndNI6lP3kO
OoXQm+Cpillz/Ny5E8UKSqhOq+x+7/B1h1X9UWA6RC3CtuK/+O1NaMyt9gYGvrxcmYGHB6tLxJ+4
Q8+vOsYech/gafI8pzVmDOQ6Rl+OLTIOQl63rRTWDFu6h9Chbd2qGtpty/ldd55ptRhV83lTbNx8
5r3VzOJ2xImksTKgkpExyiQEYea15lA7utsTaknAxkW37WIviQxauYSDYrvWLW3CaK+OX1lbaVfV
e3v35n+1fFWAT2ePg7e7N//XvyX3FDblQmtb5Xt3QnCz2K0TRBhhQVtFFaswjCZFkENTMh3HK+Bf
sr6+JK8Ry8dE/ZTm9MSgGhGIRptqTTHOU0tsI9OUXcMwmSt2HXH8eQh73UdKlRy2IeoEEmHax35c
S/i0B8lDhhcSeasJy2ItAAoj6x3uWlctydEP3JgEwcIhVc/M9ldVdb1ds1nSyhj7wliSYKsGCY6L
qtpITDQDa6s8E2kwGFazf476RZVavey7Fq1JLha356oCxG49v8vXsPI0TooFJgKFXJimwdzhdcga
Vo89e6ostB5sz3BPEbMnTY268Z9+1va6ks4bPIau31pxNlVu3WqnvRcEeSkfrBDMZBQGha3H0+vx
pYt7t95xlNOVyck0PPkQZjnxZhC6joMtc8OUzMwQl9UGaaIWQ6YLWrgUd9NqnWWmgwl5jJEgoXmj
HmX5q6zPeVdhxz/9GCoup9cYn/BHtW7F0Zzf57Ptct2oErLoSd9O8yNtCKmOXJyz6HdOCq5FnPA4
yJEVcNMOnouYBuinN26mZZk43cPvPa9vMh8NDaSy4pFmmvVhO95QwoHlek9p8+0anbLT0MJ0fS+7
RlUZ2DNdHDHFTAmmIlPLRjVQOtzuhU7He2G5XrQI1WhEYZBHfQPpyNswQf51Nl4An4tgnwsEbcXl
J1b+EuXWXdX0Fv7NxSUwTf7x9Tffv/r8+et/TFS44a6Ez19+9+pfMBlDcfSco6WQiWKaPRrPBAgs
JWcWpUQi/ALlMl+jCyW9SWMyROYIpXKK0ofclBOfnOhzCbmXKfvuUNhveBZIHoYWsoAbm80wtvMt
QXi6GdfDGEfS8iZDP/dhLEC5ZmDVMcb5QVgTtCyBOKZpUj4g0vXpkqKMwnqBOeIuqd6rdtBSwA8m
+K/VSO2SoF9ZzPwtr4+yyr8r6iW6zf0ZQ0nYjtizgtAUovMEVgNKhdww/Cm/XGgIOC/l3Cafn+hp
lD7JfuPHu4ZCtQoeOk8z6Jv13hJTRn2hTTFT43ZbLhboI4PBRdfko/kk//SEf32Uf/ToUbwPfk2X
++fPXr188fKPgyhcAcobfiUBtx+yj59tyb4yUV1JsIfQOaBYuzz6nmC5DhSBxvSGKzLJyWhpUyI8
o152dTFjzGojk6uV4FMQF/JEs0U4cf8g5//rXQM7/vkdHDnCssmiy2nR9ftJ1po888a0wV6QLtHR
XwKJVbxy73tg9tXmMdtJ1rOIMLrINhc/qRb+iAmtOXUpDZH3NVIoWbGS7KdkXhfFjwWs6eliC4Qg
GUTem58VZ+m+To0PBf19pbD6NQKZRNSlqtGVQgI1kOTM0Ko67DLvZS6Zkk3vRlyfCy+hRUICWOOg
xOPFYiSRdmGe9bFECR91ppTt7p5iSnzt8U1Iq3Q+hjB0bjEv7zA8kDsGLzY0pU1U3QhGEHceUU9h
56l4wNT36Rj958Q3U4CHiGoK9ylV1cV0C1TnhiJG06fnd2P08jCbv7tz0ckzZ8mcJ/sd7b0NoYli
LnGju79X7Y8ghTsvLlygkDnCCBVqTLWpOXBBzjXTlRptOg8pbT4iJILR6Pz0IrNesk4bmO1E0Idc
GqmjxBpWFRmgkZKbS81wEF/OEldOY6vG9RzLuHCveky2QQs5leJ3tqenYtGL+RPuI3FUemGJphv6
EbgE2KHbvBqUR9Fyz5HgJSWB7e2Pb/6tJcXCSQB06O1Pb/6fz1iEBZ60nKI6YIHBrgmLbruiLbuu
q00FHzwvo8aWXieziS2+krqWYGnkJf62pViHcJsw0uTeif+ofOoYRySA4wibHogEXTsTPSYDcns1
azVprsu1/Rmfrc/cgKrmZIPIfraSFXDM2KXgM38G+okQQULl14vtJSyKK5BNm/fiBTcyIoSmTLCN
FBLQehjPIc1QDppFB8OYnJzMtjVPpL2Dfc4RN+sQCIXHQxpO8aUNOsB8IgEJvYwahEDGAw5mZv2Y
RkFXGaUvh6dsULVY9IFJzJxBUZ0cNdvlcoxhoOQF91OPkikP/UzdJLkwkdzhXCdVZMjkbTkc81oS
r/FA0Vw3nXfaQIzDexKoPpF1uvPbNGKTnHrOQqhHL1cqQwsu8Grc0CUGqb8T1dIQVaBWKNazVrH4
RL08cwu3usXZCOL4utgNF4TrEd0Nojs9UH0roQQWSd3S1QDakWeZ/Rk1BSzVIa5MtQrc+Y/7AUq9
J/fDxi8AGCT9YHH3Min093ygUwi0kTX03tigr1rJWJ7rnB+0cBcPBjB/A2wP/Ntvt5c5u4enT/In
8yZ6ePJbcVXpebiWaz24dLORo09nJlX3XTmpYU9MujdO5UGmX57yEW0sQrqBdr/Gh9f4ALPULmhe
rsrm6nBJwKWN61l1u8KTK9UQSuhDi0iKnQhKC4m/5fu+qvfIu8lPp59yyIzUIUOBsNl1gUJii3YG
fuYlEs/cy7eoLnmMvJkcUhb+bVrHb9UTEy7hBLm8jmYMdXvsswYZcA1wEMgNbRsy0q0ux8I7gD1e
Y9QFWq9qw3PGeCRfjLRrpcazmQxNsC8qpcWb0GucHPmktWJ4nqBN24it1Cmwl1RICyCmJuvUFStC
z+GnxjLAVYtBJouZTVYoqSJCHTVMCeGHKnBqOJBNLUfOmvVs9y13ZvqCekTHH/HdKi+fuuw1OCkK
tlWVqKgPotvxakP2v2SdxiC8PNAk76McW0Xj2/GuPRX+oJv5ZPsh56uuwrqd1fIHDUVwY9DMpP5e
CO1c4PfIHYjXaags7LddlAtsh+VKytSBQwncrTyIXm8IAqRG5Cw1UBT1ACHRN0s8J5fGg97QTOAn
5H4FmItJN4wMfdtM8s1khFNMVQLrFX2IeKQfii+9cS1oGNZyQ9C6VEv7o4LooL+BvNgX8rXHii2F
0QIbA/X/sdhEdUmhgEoBjUXpRXGibniW4ASoFbmHyu2lycUd8Gd+ZrcmWkoj/I15txgUDN8YkBV+
zhlXBoFn1N43e82VdCQDg9L4rrh87weNvKxQswusX71ZFHiJr/GbT9CqOXRtExOwO4FS0T+vXn3z
Knad76VuZLrXfuWqFPlI7AJBgv3zi2+/ff5FfDhkQYzJ6X89ZtZfOBKOKNeDhImPbkPhMVTeTCm0
MYuitWrqkQY5eSUHnwXwEf3B9DFnUUg8HQIHH+JzWi0actFY5ZBcKXoGedoGigyeoFIO/xnKglEF
0A3SiHQFKBzc4KW60AMi2lnkrDC/Gp1YF0G4AH57VSlO7Q55lNm4X2sMROQCiQwRKnV+toOiWWx3
hUjp8e24ucN1TwZ7bbMxq1QMUmBZfuKazA0w1z1zgzSd/2E2+d+35UaxhUetpYFeTMo63ZtrNKrD
HDy3suqUxZRZfZkuw5kH9NZBJjBlcWHAJdjLT9rBzRriP5qDVJkNhO0r0ro91qPEx8hYo+laUV1N
KK8HA226pDNSULBq8tdiusnVuSXBLPRJG8JqgXq4pTbiyoBDgtyRKtO0YBBVCkxXkcupi1kIOW1u
JV4Wy0rUKbHLExPpHZqJsBBHORaBQOXuN5fQOlEyjm8ZRBxwgJb6qnW7ura1acgvWmDe1EArbC5n
UnxUOLc+a17QikzmxT177Fq8+ul0wzWuCYBSnOsIqmp3+WZDbSBuXRiPKpRUGxtKcbJr6drjp2pR
83p/iAEXnpl8vDe5bIM41iyAD8VmkdRoAco40+xgE+osFFand4QhM0qylFdntaM9aANwsY+dnSf2
HVdycT746MIRqPw2IN+GpZw/bC5QOD85ib6lKx4LpsG9ZDlPyllykeEPNC4R9FJ8c4O8ArxGyx+O
lmqDgSsTeUVH/jBuild8XvGWh9Hr2uFIjnyBVFmBqituSGHybyql6lGY4lv7XFtUKzwra41NL8+9
gOpGIsri4Pl6GyiUL4Naq4FKpSXRD5Wp6oNyTUv90lWi3OoMdmPfFWLIslC3UhUYtKT/flXivn9O
pgwd1vS6pPjpdkUohRSdTZX7LO7boFgo8jCaugJnJj22p0Bj3b1EbyLDJg7FOvU0UlbUItsIv1Vm
SHWuy+/J3QzKvUjravRg3uxSW20GnZxWHBop5pQivDPffEw2TinZhKs9Jp9igGUY/1Fl0IM6J2Ou
dmxQ43wiGi+GICQtF8GBB0QYj+90HHflYCPORp9uSuWlPuAhcKL9DtkTTvmNouYO4zecp3fZmRVf
lFgTleii7x3yNicleq2p5/0rKks7pRkIPZZ65kI7XrMS7v5pe/1I+Vn4TGzTAlW3TH9wM5O5ydSh
4xyGMMg/yh2Px4R2VqzWj8sBmCoJZpMAR+pxc+VxClYLUzx465yDoEe1skGFXyAhN3jbt4cGHRgI
5dDJS2wo3Fd7/1rNYYUGchYa5JP74xEysWUrLD6U7yyQyVQc3cGKRnZNo/VOL4N7+Uk0m92CZSPv
+mIzoU99o8Zm7gI2hUMGdVtQC9Amh+qNjogYf85XvDOoeqbO7pRKpRr6unT3yvw7WGCvDBnQmtaA
grX9n9rJmZr1LDJHG/Wr1+WLws3PInNLIXyB1SDDIlgBM/5A95t0iSByNbMPUUqhxshOgWxlaeZR
RtTqQ1xqGPhNBc/YEU3vuyYKAfbD1z5ngY63O+qNhO7zMO2bbg9PkbsBKaUeu8LLCnEqF+WP0BsR
PnBFr+hKdeZyQvrKg3/07GLGEdmMNmRKw1fVajfPKjSj4Zh6G4RU16GSMODbVeEUM0UAfT0AHAaW
5gDXSXSCerQlglCgSYSJC8XXx25R0hvoFwpiRX6ZI00YSyhuvMMrV1cFXsfMVBgUUyDfwOdHXE04
Y0BS9MkzwfbFkwnSjCVqJA3CJSma9fEFy8YuYTmu4bBs0PehmpZk9EMRq9UYGPnWbZl1HKqfTsso
t94648XteNdo+VSOsEzT0cxQeK8eQ3Pll1OLkrPHGv4al2XRtGFGHWaYhlP4Yrs4aV9CGwzN7XBb
4V+1zxJUIctaKoz1wRXaiXcL0HYVygIJ9t+UGUSy2Yxon9ggrhJUUPlyenPWXEHLkEX1pFfDVUio
9ancPjpzUzIi4QaVK9AhNLn765YsnwqaOLdQiztSP3v7BRbe9IelZy2kGsqIztlKUpUZh5/P2tKa
RR+yyJZlrVVjUV6eP+L+u0hwx/EuM2kUJ933i4oqtsle5/Lrmr7zi+OG2GJ/ke0dCQ1SmmB51LDC
ovbVajL9PR9ZyiBPNcSnaojPfBee1eWr7GAjyqJmVVmKHaQLRoFK8TBeM6nIEE0M0+Ykkbnp0jbQ
Rrca1Mr3DiyoNYY+g2XXC4OK51PczzWf+q/BpdKFhpJKdMsdHtRudQtGDudbCfu6LLyZ0aJ8yD3c
rvVzrlXtRauYTrZdp7CuiaWYV95C1zShzbdZlrCsTmZtfMLKs4RRhc3Vb27dkVCE76JQIzaChBR8
w7sD0CjfbuNapGYPJxZgNzsRtBy+q4Po+CzUgdP1fgRLK3jlBxzFPuHqPpKOPkLa2gLFDHUcKOJX
3jBXKMG2zav7nEzOROLhtChW3NkhiCJ7T6jWKYUuWtaw9TsOq9b+sC0MuxfQsrncc+DA12NViFoX
5pRhm8BLQy17Hq3nVLIMMCgssSGjJEILsd9kZzdGf6KIJBfNaje4evQ2bvJusaUdBY1vxFtn5hz7
CIIGBY766Wcz7ePZTH/TYdnlGaXJhXXlrXoEFGM8RcZYp1Rxp9mjRHI5yPbyTpnvZXwdT74O0AS3
rKvxxsIgFQMQdFjBKjDgcjWPXMuncCR3cfBWVWtTHHPkyaeMZaO+n3O9ywVmhyggcpmpbqcFg+BV
pBWcNBtIL5GybFclG9Y1EhaAhVdONHjcOWFANzZizmnae37RV1oBq0FmkayrNV2DaxsGb7Wopg6t
llJsRZ8Y6XGmOJdSF1MctThsyHgYJdNyjZjtLyJnNbb6i83QHaWDyL7b8vESOGKSye4ZeJcr9sa0
Cz8C4XiOvif39MnngMrfrBY7ib9ulFS0zifoYwzbY7JDBUadBQqgFT8rMIAYyara90LUIBhNPQBf
g8PSsov1Gt9tvMMFeJ6eZuXDuUAXjCmcqlN7svcsja6JF3dzd5XodRS4RNHrIGjvYq1kbXaTuqCH
/p4cda0Ufw8b1I3wtv7BHeiq3kdXzIFjky1vT/JKNlWEYhq39nUL4pHAvJi9cLvbD5W3Zw6hrBZB
1DAYVrmBzrHlEy+CoPEUGwUWs2I2MuecIHEbWi0POXZnekXuYCG+ynRiU93CryZtFR1ctiq1cJ2t
PMfOjLr5lsN32C7qfKAZHkrUvzgGtrxzxnVflBFh13Yj9oOV48SVNBG6rl1K5HZpXURDq05wPKBO
WE3nbg9bxwr13pTVtgEq5RSf26dvaH7VZrVm9F3nkgnm1MYfokjBviIVg9gT/ZohJFHhYK0IgXB3
dqd9/xQx25ORDDpKyiEb/yDlhKy5nbEfakJ72XirZnARNAvBo1vZ5VaLIw43bA1bqt73iPP70X2o
BMwrfYmxpZJRNk5GgjXmdY49uSRgW3KdemjKs4RjJ+NexZCfX4RelH+1TIbsg7KONqyJ6FVokxxl
82bbzRxjf6db1mGE129L3w+AEr3Hf5AfVZHRNyySPTdqJ+wkh1JvePWio7lcJwDPr0QiP2Mo8mUr
EQesBzrVbCdcjDpSG6NstvX2otsXa7QJCGwsRtFMGEoggmVz8BYIhEPxmYIZQRNjlvPMdOmm5gGh
06Vl8MYSN/VrKZjMsOjX8bZVUFYYklLZUDmN0JL7QzOMKJwLZ6CgBk0wQQkHO4LiRiNon2qOlnJZ
75f6s6ZjGCP/ewXkqGyu8Aoh+ugag6DNYdPh8bKAzaCcaIVENpIR3lb1jOVIsvyXqiwWWgfMJKvl
on68Kqfi3DUSp01qdKKKTuzQq92txmUo54PCrUN0oXJasjYXmyRX9KgVBBZELZ8D1ZJPe8uwL1Ar
so5LFQF0XVeX9XhJrolNBLs/0pbuzWN2sCuLpn9gCcfb1fUKT2rYm021akWFbS/NVkOD67uHN+/r
CpvNJEBu9MQbAhudGu0LdhJf0ZbEqYPOwU7GKzZc0q0qKWzqpi4vL4saJB1roPUYXIF0RONN7jwG
Rui5qrnXwxqNIQUkxW/cPlRcpzQ+sa2gQU8FmnY0aSNKhiSELv+MUkP0zXkEdH9TDIBKJegwTyHH
qagJGdLjcsHgqphRlg1eLJJ5B0Ik0SUjJpoVsEzqQl0vEkapY0u0XeGGgDVOBA4I3axkArwoYT8t
y2Y5Bh6KhlXs8xpluazxH2CpYOzYwm4P3Q+sOR+5ueICnBVQy8Kjj0eMuyIJOKg0/K4ZC7q6cQqB
J8AdxBMQpLC8Ynj3FQu+usepmW5rvBpe7E72T9LXMklMUgeI/y+1DAhYER19KDluAH1B2tCr+RY2
u7o8ta4fAwI1a5IEJ8NsUC3mu8L6PYZTSJWMphki+dvvUTRge3g5hwrUbLk9I5gbX6suy5UYjNoe
bUSBJP7ukpurwCBKcplBxnm8iRYFctWxKSSm7btppE/KFnVksHdhyb3A6xGrZpSnKf7lvIR6CCyU
FnpPdHwVak301lcMvbQHURhQ8pwUuqk8srgTGahBVcNRlsoGz3ws8raqrxteJQ15xOuUjOTEvaAd
FZ/lT/KPYorjjLXxcz4rbs4o1k8FLarvs0MsVD2eiAQv0BFKNOnDmQc7foo7GDho4GHuSDJp2rbF
QIapz0CIpSDH3YvjyXu2nDT/8ZR0R6TxXIrOHk9/1Ry53mF8N+TiBfngXBJoWzx3FkPO1xrT7Qah
NcgLW10sEaRgYi2TxGacJbo4fEwxAU1J++7hHJKkd5aFoU6rbCDzuK+bq9pgaXBMFYRfGz1Vb0zX
+v7oydp7WNNGsNf5Q7oBeLstaaU2Yn7u3XjoSZdK7W3oGmfh2L3925v/xcJ7UEfv25/f/Dt4/aBn
Vw+8xpP8N/lZ0nv7n51c6irj7X95U3zNOBFt/AHSnStq553LQDa/Q618iQAxlxUsDAHZxYFfgJhC
JjSshr+BXVZtG6tg8tvMexbOhABGtIAkLMxEhpr4V4NYGNkYCze4PU9OxBEuMcBcsJFWm7ilr2VE
Lklu4XadZgKtUK5QNmoK8a0rNzvCTuio/i1XD+sKMV6Oq5wSh6oG7uIeVdf7QCW4Jh7Q6RUMf9wJ
LyGfgxATbNbDIEqMEyFCm30wIKgQlhGl8z4dbFkLsSpOn/eJVmZR2vSFWYLfd/255Ejf9MX6O73t
345rtPxq8u55X/DAYys5CkLnaBDorx4Sk8EaEfIlDo8Ap2W8CcXSR+msbPBeifouhfT3NPaEJ+LY
CeueKygxMFXpDJkiFLNhIJGPPan7exqzmSRWkWT02l6q4faJjazVwGS83VRJK/v0qgJJrxmey/co
QR0m/iVJMCG/DXpdrgp6GqPaJblolcRdNOwaiZQUhThKsezHWPBjKvUxFvaYS3q8qvbOCJJSKpTX
ET6eyPPeoTCrKbx07EbzjSFwvsB77tz1w7mRaCNHsd2/eqbVoqrtOaMXx86ZJD5+xnZFY+aHEnfN
ChVtzqtqu1lDZ1MoAIb/MWbtt9AJyeaazHRsaELXFFvIc3Qy9D4Q6bQ8d8nRyrIvoFdSckb8UbOZ
adccKYslqOV4BUIHmg5dlg3mUiWSvaSLRpMYjaGLeIPO93zb5FiS40zzK8tfDMZgubtlsMTN+LLh
ID5+qHOS8WMVvWc5XqMxlST1A4oLCo+NkHJu1X0RoTOqo12SdlKiHOF2UaUsjASGGOdJGggQZqYb
bHzruFboqDt/+jX5rDH5hyeFB+COj3gby5CqZBaemNHG024fpfEXz7999fzzz757/sVAKBxpx4ox
2RUoAhtJBd7WIGxeWQxwCjl3lu3aNaSzWHygmksSKc41C9mIqfRD9StHdncdUK+TZlASW5Z5HVFJ
zMg+wsRtMEhyU7ELvBPDuaMKvIutCeRzPDxf9M2FOKBXrtcYvhLcpXYC7e8+1iGMTGtCIWnslmKu
ts86fn03eAYfZIUxFQjlXll67oFHMKmbdmofyMEk/jIO4D6Qie4HYY8bk3Uet3ufG3M7QYRw3+db
aHVtQIb+zIwVU8pOUyx0qcqU7kyZ9gkrMm800o+ll9A6SkKWG1IJnvKSS0MFJv+6h5mfqRPNQhrj
SqDNo90DYE/H+GAgghBuPibQG8D9qLlyZf8SPLQ82y7gIa+AShXao9WU8mwYnbZTs43fsamR+WGB
cF4F8vhZRqvt0rh5YIktU7RNY2CsXbf5WVm3cFodWsqo5CGTFvoytA5k/wL5VjWez5sukGEXq1zd
sLjzwEwJ0EFiZIL2E7f51bhBrS5Jp1r3c0yZwBUdUaQLTsQFsjZUT69jDyVmlTYJDp21bnq7wnYr
zAUVvMIyZAtA8bZJGZJimO9EzoxkECV3xKYzwcPnJvkZRfcU00oBvrJHUX2/H6YRzKRw7yXsg2c5
XBc2M+RuA5kN5FhgFTJvFHSoddcjl/HBMDADgUn0Z4h/uOvYaZXaFfkEIeaBI0YX4LkX1sFZI8Sk
dXxjVGOp4RHygC0zC5UIh8of2mLVkNU++VU7ft0Zi/Gkvkd/fY/t1Cve6f4HQ8k9uF9P/EHkQvb1
WBzFXcs2aO9gXyZKYXXncENOnpgRk7FaFbfaUPjQgLznMNBud2fMnEno+4l94Y3bsrzTnW4n9ReB
7g47EwYLFScI2+QFBaC7TYpZfKgDeMXEWCXWKTO2y2lIa4Eojv569UY5sJoZ/MBtqOlSXdjj1Nkf
aSJep1MHukYv/qFGeaizOgOMyRU2iAy6KTfCKnRP0OF+cqGquGDt4lcgdUs6yDMklM4gzEawYAds
w1oOdDPVVQylMyksWzXmqcmuBU5hmnApsLib1sagX7H/NBkI8uwtGJVakesfWiah5vQXUfbFy++e
v3r52VcEWvYsUnPXOnvOWs1dVJeiQXQYWsM5tphb22xQdI8Wkd845tyxShGTRbefEfI5XHY6ZVsg
twVDqzEH3delsUOfI5e2t5quDMvkuT2frAEZKdVHMVPnBb0PkcLD+g1Le/HtV9//8cXLyBSvYINS
rsCLD8kmKORaUNyQL/F4B0wL9gE9Nsl8RACOxZSRLiDaRtjscrrFW+DNdjWmy2jBV0dfl1opqK6g
TZ3ZL8f1ZOGmh1G4LUjt4C3SvQuYDRu8kZ8VkIy4b8vStgl4hLhrLjH5EvYjgE2FU8zZW9W0sFvD
7vK2Y7scinwzS4YHFM+qYYUrqrqLOeFR2k7j0ZnCHSa/c7l+YxQY9Mtd2cCU2uTWk1uCJw7TJtVQ
DeSErf9QN79r/F1eiCl+7IT4sNsSOuDnjSWN+qAtHbW6zC2WwLXumRyxZuTZaYH8sS2kpS2T+Ase
V+xADnZoP4y9oaBJGo0BOr+zC9yBRUi5LB8Wp0hlCAz1jurxKiBWIdMh2gzl1INV+3CX67qajCew
c+U6yN36AUO61voy8u9TkHAH+zxAGf2bEaSoVXsXxd75NqjWamgDICjh4gyP4mdsZ6K4CNplAQdQ
+Su0C+evWkZsgYSGm9SBQezQeVXkTwks4WKVDHCyfw5rJrGwELJpoLAaBc4DRQWBSgNl7QqEyg0V
101h7Dn8EB/ClGbPItq7YEJkiefIMIQt9yJvhexLv2eJdfKonZD4Xf0JcdaEUYcHvBrA/BJO39vx
zok235H70Aj4lQU4waMFP9cUPeCBa0eNNfoVTRnC11TPhtFZFzOLFj1cGbAxGD4VuEYrtlBHy/af
C/sRgzu4VoUE7BJwDQR7FB5wR9EGHnhP4ewjO4zO74xJkNQl3tcuabvLlCnmC/SPudinyMRgRMVK
MUP+eaDn0Ttq9pFybJaiBMnzpB8CB6WTVhqROnKr88Xyz+Xb4oP8tVpXT/3TSw68ngNLQK7pls8J
TA5q6xL6yNxi3zGTYsy7UBal9PMykWoXmh9k1GIzDfvwoL3UuBtiV4R/NDQm8c709kkbRpPr3J5w
7wPV4LVW9Dh6OJMkSJksSz018KEV7uVXKxshT8Uk9sjVFR4zt4ofVnFoXQkrQMdDIAxZAFvVkzyY
OHYTPTSBL+c7GtGgR7q3gi1S9Y+2efSm3qH3ll+RF1iDdSlNEwhd4SQlWB+Grs09vFybKpsblcE+
ZtAaExPrhA2ZuE6RUuLgGMMOlBuVOPev4v14hYjkam0WkYGVebcP3crYtRgiWBJkqrKgKy2kpLCJ
693IwY/1DuZD7W0V4DVaNRyX5TkmPnnYwP9fcJzvQkBSwiV9dOF2npY2dJkK4R/kn6hUALvcMozU
RN5619/D0T+LTiO6m28RTW2L4fj4KovWdgYcVli8xaRcKevWQXA8sAvIcjQ8HbkB0e339sr+9oY/
QmbjRZ16ftKYemjlzCJ16zB07iC86rx4Qr56bg6LbwMHAKXtvws1cRttX6/ax9uqlJu/OHZxJWgQ
5LNHOHUe79LHvmRxC2gP9XksuVDzlKkyRQ2VugXrzxaPwXoqDTMUMBtSOrRZyUoMu3smt3e4uCAf
alKwCKV7I91pR3byz2XUecySg6SMDrwjJ1r4HkRese45iTu4eikb/5z/ZnDRPrgUa+fGEDdrToVn
5Q40SuuHlJdp0qLf0t5Szn1nlQ6rFD5HOu9ryfVytdgFHfJRpaX5C3UOclQQj4n0fMUVpyS3pf09
Ej0dOx8QjL3xaRRXlSZuCy9W5Kzja1NYTx4mDjYg4NarFOaBl6fvc9g+IHe4jDSB6L4DXSiAg6OQ
nQ7p9a4eHwQV/wxKiMLbQ6U5TstHZ7r4voOZEpzKkGaVnDfYehQ9TlkeIQ/5q6Ks0UYddc/GkdvK
eFtEG/SHnKLvFzpFFujuiKW80J6u4+txZFHbB9GkmI5RZ4xxsWvlwQLpoIjLinyhMH6vjneDjrho
J6rCIL2DeHAo+clZyDl8u/JNPuwVqUA0aLj20g0LpFYUpslgAEfrWUtn6tZ9Tn4rRGfxkVY8e2Gc
Iud/FmyWAiffCjQ5+RelUgKvgv4h5YXElEM6RRx+ahXaP0azcuz4KNsma4T2JzwyOl53r/yboRCD
3EadKtH9cOOe1gf6GMKL8MEizs8GF4inhNaQER660co2D7OwOtpIDmpZH0Lp6ALoCIN06P4HkIUO
YzsEYR1MkV2oDkQVGFQr5y0DA53240EYUggv+8vVNnCyq1lKTWuiE9hm0YfApwaChbcWvHDiXBBB
kPRD4h3yqLc1dqfuEPGcs5mcnzkSRwhEk2D//fcEfSlaly77APvippxb1VBsrVMgMFn0JIs+DrGx
4rQyYuk/ZKihUqiDeV8adb8aNPho8/UHQ6wSB9/VOVgdT0K8izDe1+KHXSo/bK9RGFONM7RSElRq
L9hD63qyH05BXEnalgdULSPTIG0nULqg/PuaZUHiCyJ+qqL6dWhM4at2KWgrc71YIR21Jv17jrTh
PzoTeW0RAwh973BgVliLIJkM4moeNJ611BzIbzrCJ4xB3PLZj3HrtHApwuyDdpQJoKJK8w4yn+Hj
s927YEFmrJ27GvdGW4FluhDqfc/94hrkx7Rl8eHfCUn/p7cYkG5B3kQjzhQ8VzHQMdVGzCQWlAP9
Z5Gr1QyvPvxzPjhBQi5JA2IXynv0Ud1Dnwd4gAUwWVgg/NF8Q5KjJScwYX08U9gbSb5hBHSSNcf1
eNl06yjPteQWEhocG4gHGitIrl4IvEWQ5vGqfyYWFuSjHsWP4xMLSp9MLdvGm77YgfyFM40uk0SR
Bqm3xM43uoY2KACJOYHbfbR85LpN0T/8gAU/DgnO2CTSEj0VLdHRhqGhmIkc8Umt84vYl5apNjQN
tXYF0Ek5vFBSs/aGB1nkX/CpWnziF95Kgqmkbe8Dq4ETHhET0lbDxmz+83u3Q0QOOrvSwseyEHW4
kRKfSVOWVuy2rqhenfBbHVX0zwefeNLNETXYgyFT+UBAdTRajPggWXyWlc5xxtJgeKiWsewiXeZe
X7p5ygUWes4v+nsNJe4IRG02QYVAMEq6Vg7dtVU8lpWjz0l1mN6KyXp862+5sHUe9sOY5QUv69XX
ronoUoGaywNl3yd9iNtiwy0Ob3dFHlv7Z4oFo64Ebsl15pAt4G1uGzHeajz1fnuINUPbZd0cCn5D
XkirKh4EPA31mFsLT2umQkMuid59xL/87MVX3796/jow1KIo66xify9R8iIw+7DA6BiIONTIMXk6
rDRvG7E0HVivzO/pGn2Cfky9NG4jn/9rL0AQvaA4LZRQ0a3VIwLTr7p2+IL611k6ZDS8Z+EY4nEu
7QjI8+8yKUHbIHTw69LearZpz7rg7kT23Tl79nabSekoXhxMdXC49LECt8YQN0cXrxF+j6tBm/Du
qeToNX3Eeva/htiJg/pzPomnrYCIZI4WjIfoXgKcxJnK31rtUtz5CSrJcEB/CA2lqnSo0w9Ozi72
uKSoiGLtnc0CfOvWn/iMkRUrx7qHj046rusNww7iLMU2kZgd2nhTzE2MXqGtn1LuytGdZDIHubDm
Qbhk6/jHytM2/4IJxcsYU7RHFd8qtsUBTl6PmWR57JPU3C6MKxtQkA3HUF1RP4JHRLS65Xqz44hy
5eVKzKmXrYbdjBdtBodyucjqVv03IVMT3Rd9NfdwphgOlDkhU59CpvX7vjuNubSjAlr2DQRPGT3M
n5DrSgUCL5dacrAKdzlZ6L+WhSbaXSSDyDXQhK4opsKdZbzaF1LtfvBu7T1r0oiDx4cra5/HbcvW
YAH7LBRaNsehA0qcbXybx3ZxQBJOzgaHHA3CLkVtpV77PLc9C/aNa+Du+jrsBy3x1FobZI/MEJ9c
86q89kxSwjUQHuPduvarWO6tYikRp5ctGrwYHDyDiKl4OFOhIAw5m+xUIOswK+lY2Z3bo30BWy+2
bsZdu1AxJKxHjFco14fpje22jtu+7W99I47WtpFOZ/RrHaSaDX6wIzc2Ul363W7NEmwW/QkhMOh3
IMQt8Do30mxtXCLJtOS5cFaea+uZLsgKgk3Z+200jp1tttIdVni3Z29zirte7+1/fdP7u7/7O7RE
ulsuLovV2//25v/9DwS+1hMXFqySsceI3WID4LpEVRI+QzaGVN7AD+j7dkpORj2KEkFCIuMWCtik
Qlrv9dJpP7qqoE44Teriulhk6hGYo2VRw4be3kXFNo+enJ7+zsZlI0uwGo5cpBHe9CLRST/KTq3o
Otu0CcyS+cxBpXHkxYvTs4Cy1EUk+48kx2jkM7JS9l1upUn7oQVy1++ZWelspGqanDnyCItafmnY
h5fATzXr8bT4utiM6VW6gdXqhBgS+y8TpsrVkYiVzPngjJivZOQ59zPAp6vK8cxfhGRjdt2gUCFm
96SxThgJXnYTjUGIQuhwS2RBkKB1MdUiyEhejEZ25SoVlIHMDqEnkn8NXjEK+yNp7t03ap1gQ/70
c+uUQtDpcnq9YyRqL8KHynqewGbBFMmFGBdZxSOThXMmWqjUdFRwrjOMjquKsq+kNta1jjDmi6aN
S7DQUe2/G18SjVGAU/QSe+8Gf+pADgn4zRumuxXvnl3Su1BIPsTPwSKbLaK1QEs5tFbfwDm7Tu5U
rcwL/aZ+GGd4q2azJffEJldbjm+Eh6dtTHFdTK9FQsTahrM+6VZAviaSKNHs/wTUfwPrTZ3PqoB+
foNfUvdmVFEGvAvW9xEHAdjFLqUDOD2AuF7jRlFBix/OnllGIaXwTlCt2bzDEAlK9LsEFjQXlkWw
jH+iGhNrGyccATazPkgrkdGEVaA+2NsMPgnm3M8a1uef4BjC9Q3/s4+Ae04QltI1L5kguKP8Il4M
95gpBiAntOL1Ai8T6NxcqYGSXuDLVI+etNMeFWiu9FSvTzMutsehNcZd8e4Nu5aMs/FkUmfjaV2t
dstsPJvBUd5kMACLYpON62KcTbLJrMom5WU2gRP6OktM7smiml6/3VabIptUs10GJQE53VSrDF2P
UU87LfDeNUN07IyUvBiTxCoBHgn1D94vlzjWs1k2A85gNl9laJ86K2+yGTxusmIJ/z8pZnbuOXI1
QA+zOYj8GVmUz2scRHh1dZZdPcmuPsquPs6uPsmuPs1QeZXhQNtFlFlJWbJyeZmVq/V2A/822fVk
li3GE2jJorjEtbAoM+r9gmxeL+0iluN1Bmz5221RZNCHbYZYhdlyu4AZgt6uKhiWVcWNX1XcQDv/
qmqmdbneZLJhIA8w+zwsYq+9ztZ1kb3NmkySWtmZqc6aJYbYgOWzytDa6LrAPxW0lBSUWbOdwP/W
GRlv29k3NHObWYZYBTThm3lVbbLNFfw/jhjDK2zqbLPJttl2kd0t184iGMOGxH94Emgwr+oMWdxZ
cZfRLWvWjCHTzbjmfEq5kWRJH0/UuwshaRK2AVt89NEUYJiBa2Ntxa2yh2vzzHewO+7MdfGIsDCT
kKLNPm6x5L7mwurxbSt8IkVjHoM8c8fu5IgFroJ+jDVHx2jQnEJA2FmsWU0X2xlqjcoail3sVPwJ
dsffE0UBSoam+KYm/JYZSPihA2OEzqNWIEhowRYY7ZsiuuEkaLZJgqHqR74vNAml1MT3NENQfPNg
0VQvYomRP9FxF/96aFLj6VURAOqiRpJ/108/Rw9UlAAVUGWuulN5jjLapox/tJCr9Hf92wuPQsaz
ocDPpot4FJsH1JXhk6jEkF6jhgcYVnOwmwOGDT28IFmzEm+qYRzqSOJGJQ3vnse4FRs2gHeCSzLH
SR6qmiPovlam+jlWhlZE4LifQzEX/o3yPxe7wE0vqY5NWBxsANS8rCufX27Xd+lsOlWICfyyRxdo
lWOJBcdaR4ZvqQKDMRrxdglYw+lhgrRWzl6gNLLkENr3gPa5QsedAzUnzN8rEGgWBaHDk+5nhnbc
N2PZFGTYPb6pyhnN/lXRmAgTyNARH2i1lLcpv1ARZB2qYXmGIkEIufmkRQPHe5EqHguT2bewSBVD
RbslC2lyEHuacJsklh/wofheCXb2ng8SgnPJgPNw5iiz4cQko174GpBpeOthGqtxyGIKPtH4Mtg2
fL+3ba09BjmEeqhVJbTk3L7gUHET8DQNWWzYhbSB9ayxwAa6YwFvRFBQG26D8peWWqmzAfmhH8IL
Ezqp8C14lyAXQIpaqarf7Sv+wwovxbZpEiVozusW2/fE/jCEnTTh0dCm7F0VQk1P0R74GVQHso40
MDMCJqkGGhq2fsDqVeZKKVkxWZsREQ1Eh5KVV9rdnsLbNtpeDx6rDqgB3jcwJ+GBaRM9LElkbCn3
UWhYgiaUMOeoBgT5oVp1THtwJh7LRKi6s/7RCA92Mc/MkJii9OhYO9rqS2hjP9BKz8JKavY9vEKC
RTIW7ZvcVdOoBDn6Z1hzGDBNoj2I7KFkCVnGKd/keqQboysV1V6vhWTS7D8CFwavwFKwLBS+kjJa
IAGCt8GRWgmvnTy8+Nx0ayGfkPVhMvLVkApGW1Eou5heyOtPG6Amo5Ddge3p17pIdlXzVHWG51mg
HEiFhH/IDZTT7BiIGZXPO0WpmH7Q1DSJHjbD+GETJ5ZShoqxxlxPVGgxW7GT9LTQeQu8VUkiIXNr
UACCdzt8Y+vYompQscDUIadnrQm8j7WhCpdzsc/BH0uXnXZ+9whBWeGY2Imcx9fhqkFK2rsI1oJH
CyXlsUQKAa/+AY4bXsG6JshvmCWbmumh9RYxDBeKeSmefSjN9THyU3VT1HU5A0pLbRQetmjssbUV
kUZAcGovbZPvX75qRuu1dGlKGgyJiFAvR3yjEbHVS5GlX2ot+TSZ1KRfIfUCKwRQM3JVs6qEFCuk
RkiCbHrCehlSLSS27kC873mI7tGccYRar0i0XtEkUuqLaDKrokl5CZJBhDorQk+MZnO0XogoQaCF
SRlB5yJqZHQ9mUWkOIreRk2Eao+IFTQRKWgiVNBErKAJlsVKG5wz1IhHSikTbTbRFmMYJar7sGz7
F+9Fc+nWh1m796C5nLYzmLtntKYWPCn7reWmlP5eL5yK77cnpSbF4urrfcp4nx3WURDn0qqgEZPz
Dvj1FmwqJUZNhTNgQI6QLg3wx9+jXvUfkn6GD0/124V+90y/u6R3fkl/r7/DIpRMcRLrl+uqaWXz
NCqIY13MR3VxRybwuQoQBwX9TZ37Vn/EGKnv+Qihgk2J8hQCsuMmhgs5pyQ5B8s6dUjxCGMEGIVX
K/oannIgupzI6Jooi67W7cDxJuWaS9dU1eQfbUIcvyjw38Dx1lkSiFjbzfzkt4m+gU5knJJ+cGjM
RORAGVLto0VDa4ai19MLS9YjMJ5v//ub/8kKuraeTd7+H2/+73/D8dassM8RoYhcYoNJhfDtF3/I
aAwFv+UL+lzUPHzk2jIazbd4+T8aqYh940lTLWDnSgxAHUptNrFjqXXGW3uvwGoH46WdQDOODnGE
4YO2TQFZjox4RJaBNGD2oMrgzWTwUFvHU+4HB9I3QqkOK0Pz/1PSFBuO2gi8DyeGqUn7uX7/8zFR
hgz0CIwYEe1U9c/GxN8bJujb2eTF6qa6hjYihMxsUtJTIhu+WqB6LIX3pm2ZabLCDZFiTUDFOXpK
WyFwjssPVVXiVevk4DjVuNT1K07UXU6o13bUpNF0UYxXsJxEXoIG6/tHXazRm3/bFFvgI+AdK/ih
jxh7fFPR6oDTfEEtNkE5/fa4iOm6G/5RwhsYZ4DqMiOgdzKs3Lri2LkYhuvFN4IJ3KKCxFuJ8RTZ
e+MzrLH5CJWQFnjqekkAp46iSUv+bj86ZU9dSiAXrk5+gWHrCb9YvrR9W7icgMUwvQdK2eBsSSlp
uRp5SFdE/m+74m346UJ498rK+RmayLlzkL745kQPdoRbuUCsi3k/DsVAcMfA9qSmC1R0P7KqxtXz
Lb4xSzylGTNXU3qrDnynYR2hfKTolHXNkFEI8QDuop4zukAIkoljZq5j1o6dMWs0TGtz8ZrWrb4P
hnvISdtGi0LbrSMA3HWALaXN9BDcHzao3nzYdo2xcs4X2+bKmuXNBM9xYNM2S5IMTejeVLXZrAhI
NeJk6WbSl4O0NVz2SEkXGIH8FunSiYoD7gcVShod6I5j1PRcmBfCL28KBJXR1xENQh8UK2B7cP2T
MzEGUoZTGSHGpYA5X3tR5GFg5wm0HEhtLWHgJXgx7dGj1p2PMgBM6mhz22tvYGfj6nGNVZxbxylC
uUM4eWhksWew6yXbsfPlz5WqUvsYOmG2dcgrNad7K1Bz+lk0q6Z0CH6/Ku7WZHVronALZYapm28X
NAlWm3Ip4ntZDBTWfbHDvhamBA5AJ0auyAtKfYoRDFTba2vYFN6BaNoCmdqygpMH9z5D8T256LUV
SF6WkfzAxDKacJTPRojkOFoBRbwipcmITz8BO+Fmo+/1+A4hNzzkkZ7BcCnZVha/nZcX6I0+H3EM
VfbSbAXfhoXUQkVFJf2ZPfWl4ortZeNY8uFRIEeCZy6BECPUHoXEChy9T7rwc0YdlDLyYK4Wwme3
zI9lHR7YsBczN4b3CRTzrT731giVW2z0k2aygbXnGCXQxrf/5zb//wDWzunR
"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "import pytest; raise SystemExit(pytest.cmdline.main())"
do_exec(entry, locals()) # noqa
|
bonzanini/luigi-slack
|
runtests.py
|
Python
|
mit
| 237,463
|
[
"ASE"
] |
34a36f79008b10ef9f08cbf1e9efdd4df3285989c543cd4727eae3c30ebd4b55
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for DL_POLY Classic, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
from easybuild.tools.filetools import copytree
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_DL_underscore_POLY_underscore_Classic(ConfigureMake):
"""Support for building and installing DL_POLY Classic."""
def configure_step(self):
"""Copy the makefile to the source directory and use MPIF90 to do a parrallel build"""
shutil.copy("build/MakePAR", "source/Makefile")
os.chdir("source")
self.cfg.update('makeopts', 'LD="$MPIF90 -o" FC="$MPIF90 -c" par')
def install_step(self):
"""Copy the executables to the installation directory"""
self.log.debug("copying %s/execute to %s, (from %s)", self.cfg['start_dir'], self.installdir, os.getcwd())
# create a /bin, this way we also get the PATH to be set correctly automatically
bin_path = os.path.join(self.installdir, "bin")
install_path = os.path.join(self.cfg['start_dir'], 'execute')
copytree(install_path, bin_path)
|
hajgato/easybuild-easyblocks
|
easybuild/easyblocks/d/dl_poly_classic.py
|
Python
|
gpl-2.0
| 2,190
|
[
"DL_POLY"
] |
a636f4fd1ab5508bda7748d45171ad984463199f411684d44413148df44afe53
|
from math import pi, sqrt
import numpy as np
from ase.atoms import Atoms
from gpaw.aseinterface import GPAW
from gpaw.wavefunctions.base import WaveFunctions
from gpaw.atom.radialgd import EquidistantRadialGridDescriptor
from gpaw.utilities import unpack
from gpaw.utilities.lapack import general_diagonalize
from gpaw.occupations import OccupationNumbers
import gpaw.mpi as mpi
class MakeWaveFunctions:
def __init__(self, gd):
self.gd = gd
def __call__(self, paw, gd, *args):
#paw.gd = self.gd XXX!
return AtomWaveFunctions(self.gd, *args)
class AtomWaveFunctions(WaveFunctions):
def initialize(self, density, hamiltonian, spos_ac):
setup = self.setups[0]
bf = AtomBasisFunctions(self.gd, setup.phit_j)
density.initialize_from_atomic_densities(bf)
hamiltonian.update(density)
def add_to_density_from_k_point(self, nt_sG, kpt):
nt_sG[kpt.s] += np.dot(kpt.f_n / 4 / pi, kpt.psit_nG**2)
def summary(self, fd):
fd.write('Mode: Spherically symmetric atomic solver')
class AtomPoissonSolver:
description = 'Radial equidistant'
def set_grid_descriptor(self, gd):
self.gd = gd
self.relax_method = 0
self.nn = 1
def initialize(self):
pass
def get_stencil(self):
return 'Exact'
def solve(self, vHt_g, rhot_g, charge=0):
r = self.gd.r_g
dp = rhot_g * r * self.gd.dr_g
dq = dp * r
p = np.add.accumulate(dp[::-1])[::-1]
q = np.add.accumulate(dq[::-1])[::-1]
vHt_g[:] = 4 * pi * (p - 0.5 * dp - (q - 0.5 * dq - q[0]) / r)
return 1
class AtomEigensolver:
def __init__(self, gd, f_sln):
self.gd = gd
self.f_sln = f_sln
self.error = 0.0
self.initialized = False
def reset(self):
self.initialized = False
def initialize(self, wfs):
r = self.gd.r_g
h = r[0]
N = len(r)
lmax = len(self.f_sln[0]) - 1
self.T_l = [np.eye(N) * (1.0 / h**2)]
self.T_l[0].flat[1::N + 1] = -0.5 / h**2
self.T_l[0].flat[N::N + 1] = -0.5 / h**2
for l in range(1, lmax + 1):
self.T_l.append(self.T_l[0] + np.diag(l * (l + 1) / 2.0 / r**2))
self.S_l = [np.eye(N) for l in range(lmax + 1)]
setup = wfs.setups[0]
self.pt_j = np.array([[pt(x) * x**l for x in r]
for pt, l in zip(setup.pt_j, setup.l_j)])
dS_ii = setup.dO_ii
i1 = 0
for pt1, l1 in zip(self.pt_j, setup.l_j):
i2 = 0
for pt2, l2 in zip(self.pt_j, setup.l_j):
if l1 == l2 and l1 <= lmax:
self.S_l[l1] += (np.outer(pt1 * r, pt2 * r) *
h * dS_ii[i1, i2])
i2 += 2 * l2 + 1
i1 += 2 * l1 + 1
for kpt in wfs.kpt_u:
kpt.eps_n = np.empty(wfs.bd.nbands)
kpt.psit_nG = self.gd.empty(wfs.bd.nbands)
kpt.P_ani = {0: np.zeros((wfs.bd.nbands, len(dS_ii)))}
self.initialized = True
def iterate(self, hamiltonian, wfs):
if not self.initialized:
self.initialize(wfs)
r = self.gd.r_g
h = r[0]
N = len(r)
lmax = len(self.f_sln[0]) - 1
setup = wfs.setups[0]
e_n = np.zeros(N)
for s in range(wfs.nspins):
dH_ii = unpack(hamiltonian.dH_asp[0][s])
kpt = wfs.kpt_u[s]
N1 = 0
for l in range(lmax + 1):
H = self.T_l[l] + np.diag(hamiltonian.vt_sg[s])
i1 = 0
for pt1, l1 in zip(self.pt_j, setup.l_j):
i2 = 0
for pt2, l2 in zip(self.pt_j, setup.l_j):
if l1 == l2 == l:
H += (h * dH_ii[i1, i2] *
np.outer(pt1 * r, pt2 * r))
i2 += 2 * l2 + 1
i1 += 2 * l1 + 1
general_diagonalize(H, e_n, self.S_l[l].copy())
for n in range(len(self.f_sln[s][l])):
N2 = N1 + 2 * l + 1
kpt.eps_n[N1:N2] = e_n[n]
kpt.psit_nG[N1:N2] = H[n] / r / sqrt(h)
i1 = 0
for pt, ll in zip(self.pt_j, setup.l_j):
i2 = i1 + 2 * ll + 1
if ll == l:
P = np.dot(kpt.psit_nG[N1], pt * r**2) * h
kpt.P_ani[0][N1:N2, i1:i2] = P * np.eye(2 * l + 1)
i1 = i2
N1 = N2
class AtomLocalizedFunctionsCollection:
def __init__(self, gd, spline_aj):
self.gd = gd
spline = spline_aj[0][0]
self.b_g = np.array([spline(r) for r in gd.r_g]) / sqrt(4 * pi)
def set_positions(self, spos_ac):
pass
def add(self, a_xG, c_axi=1.0, q=-1):
assert q == -1
if isinstance(c_axi, float):
a_xG += c_axi * self.b_g
else:
a_xG += c_axi[0][0] * self.b_g
def integrate(self, a_g, c_ai, q=-1):
assert a_g.ndim == 1
assert q == -1
c_ai[0][0] = self.gd.integrate(a_g, self.b_g)
c_ai[0][1:] = 0.0
class AtomBasisFunctions:
def __init__(self, gd, phit_j):
self.gd = gd
self.bl_j = []
self.Mmax = 0
for phit in phit_j:
l = phit.get_angular_momentum_number()
self.bl_j.append((np.array([phit(x) * x**l for x in gd.r_g]), l))
self.Mmax += 2 * l + 1
self.atom_indices = [0]
self.my_atom_indices = [0]
def set_positions(self, spos_ac):
pass
def add_to_density(self, nt_sG, f_asi):
i = 0
for b_g, l in self.bl_j:
nt_sG += f_asi[0][:, i:i + 1] * (2 * l + 1) / 4 / pi * b_g**2
i += 2 * l + 1
class AtomGridDescriptor(EquidistantRadialGridDescriptor):
def __init__(self, h, rcut):
ng = int(float(rcut) / h + 0.5) - 1
rcut = ng * h
EquidistantRadialGridDescriptor.__init__(self, h, ng, h0=h)
self.sdisp_cd = np.empty((3, 2))
self.comm = mpi.serial_comm
self.pbc_c = np.zeros(3, bool)
self.cell_cv = np.eye(3) * rcut
self.N_c = np.ones(3, dtype=int) * 2 * ng
self.h_cv = self.cell_cv / self.N_c
self.dv = (rcut / 2 / ng)**3
self.orthogonal = False
def get_ranks_from_positions(self, spos_ac):
return np.array([0])
def refine(self):
return self
def get_lfc(self, gd, spline_aj):
return AtomLocalizedFunctionsCollection(gd, spline_aj)
def integrate(self, a_xg, b_xg=None, global_integral=True):
"""Integrate function(s) in array over domain."""
if b_xg is None:
return np.dot(a_xg, self.dv_g)
else:
return np.dot(a_xg * b_xg, self.dv_g)
def calculate_dipole_moment(self, rhot_g):
return np.zeros(3)
def symmetrize(self, a_g, op_scc):
pass
def get_grid_spacings(self):
return self.h_cv.diagonal()
def get_size_of_global_array(self):
return np.array(len(self.N_c))
class AtomOccupations(OccupationNumbers):
def __init__(self, f_sln):
self.f_sln = f_sln
OccupationNumbers.__init__(self, None)
self.width = 0
def calculate_occupation_numbers(self, wfs):
for s in range(wfs.nspins):
n1 = 0
for l, f_n in enumerate(self.f_sln[s]):
for f in f_n:
n2 = n1 + 2 * l + 1
wfs.kpt_u[s].f_n[n1:n2] = f / float(2 * l + 1)
n1 = n2
if wfs.nspins == 2:
self.magmom = wfs.kpt_u[0].f_n.sum() - wfs.kpt_u[1].f_n.sum()
self.e_entropy = 0.0
def get_fermi_level(self):
raise ValueError
class AtomPAW(GPAW):
def __init__(self, symbol, f_sln, h=0.05, rcut=10.0, **kwargs):
assert len(f_sln) in [1, 2]
self.symbol = symbol
gd = AtomGridDescriptor(h, rcut)
GPAW.__init__(self,
mode=MakeWaveFunctions(gd),
eigensolver=AtomEigensolver(gd, f_sln),
poissonsolver=AtomPoissonSolver(),
stencils=(1, 9),
nbands=sum([(2 * l + 1) * len(f_n)
for l, f_n in enumerate(f_sln[0])]),
communicator=mpi.serial_comm,
**kwargs)
self.occupations = AtomOccupations(f_sln)
self.initialize(Atoms(symbol, calculator=self))
self.calculate(converge=True)
def dry_run(self):
pass
def state_iter(self):
"""Yield the tuples (l, n, f, eps, psit_G) of states.
Skips degenerate states."""
f_sln = self.occupations.f_sln
assert len(f_sln) == 1, 'Not yet implemented with more spins'
f_ln = f_sln[0]
kpt = self.wfs.kpt_u[0]
band = 0
for l, f_n in enumerate(f_ln):
for n, f in enumerate(f_n):
psit_G = kpt.psit_nG[band]
eps = kpt.eps_n[band]
yield l, n, f, eps, psit_G
band += 2 * l + 1
def extract_basis_functions(self, basis_name='atompaw.sz'):
"""Create BasisFunctions object with pseudo wave functions."""
from gpaw.basis_data import Basis, BasisFunction
assert self.wfs.nspins == 1
basis = Basis(self.symbol, basis_name, readxml=False)
basis.d = self.wfs.gd.r_g[0]
basis.ng = self.wfs.gd.N + 1
basis.generatorattrs = {} # attrs of the setup maybe
basis.generatordata = 'AtomPAW' # version info too?
bf_j = basis.bf_j
for l, n, f, eps, psit_G in self.state_iter():
phit_g = np.empty(basis.ng)
phit_g[0] = 0.0
phit_g[1:] = psit_G
phit_g *= np.sign(psit_G[-1])
# If there's no node at zero, we shouldn't set phit_g to zero
# We'll make an ugly hack
if abs(phit_g[1]) > 3.0 * abs(phit_g[2] - phit_g[1]):
phit_g[0] = phit_g[1]
bf = BasisFunction(l, self.wfs.gd.r_g[-1], phit_g,
'%s%d e=%.3f f=%.3f' % ('spdfgh'[l], n, eps, f))
bf_j.append(bf)
return basis
|
robwarm/gpaw-symm
|
gpaw/atom/atompaw.py
|
Python
|
gpl-3.0
| 10,523
|
[
"ASE",
"GPAW"
] |
bb5a0f48640dffd17c28f2acf01de35ffe2fae4d0505c87406c03f9dd9f398f7
|
import numpy as np
import tensorflow as tf
from backend.networks import Model
#import backend.visualizations as V
from backend.simulation_tools import Simulator
import matplotlib.pyplot as plt
# Builds a dictionary of parameters that specifies the information
# about an instance of this specific task
def set_params(n_in = 5, n_out = 5, n_fixed_points = 5, n_steps = 200, stim_noise = 0, rec_noise = 0, L1_rec = 0, L2_firing_rate = 0,
sample_size = 128, epochs = 100, N_rec = 50, dale_ratio=0.8, tau=100.0, dt = 10.0, biases = False, task='n_back'):
params = dict()
params['N_in'] = n_in
params['N_out'] = n_out
params['N_steps'] = n_steps
params['N_batch'] = sample_size
params['stim_noise'] = stim_noise
params['rec_noise'] = rec_noise
params['sample_size'] = sample_size
params['epochs'] = epochs
params['N_rec'] = N_rec
params['dale_ratio'] = dale_ratio
params['tau'] = tau
params['dt'] = dt
params['alpha'] = dt/tau
params['task'] = task
params['L1_rec'] = L1_rec
params['L2_firing_rate'] = L2_firing_rate
#params['N_fixed_points'] = n_fixed_points
params['biases'] = biases
return params
# This generates the training data for our network
# It will be a set of input_times and output_times for when we expect input
# and when the corresponding output is expected
def build_train_trials(params):
n_in = params['N_in']
n_out = n_in
n_steps = params['N_steps']
#input_wait = params['input_wait']
#mem_gap = params['mem_gap']
#stim_dur = params['stim_dur']
#out_dur = params['out_dur']
#var_delay_length = params['var_delay_length']
n_fixed_points = n_in
stim_noise = params['stim_noise']
batch_size = int(params['sample_size'])
#task = params['task']
fixed_pts = np.random.randint(low=0,high=n_fixed_points,size=batch_size)
input_times = np.zeros([batch_size, n_in], dtype=np.int)
output_times = np.zeros([batch_size, n_out], dtype=np.int)
x_train = np.zeros([batch_size,n_steps,n_in])
y_train = np.zeros([batch_size,n_steps,n_out])
mask = np.ones((batch_size, n_steps, n_in))
stim_time = range(10,80)
out_time = range(60,n_steps)
for ii in range(batch_size):
x_train[ii,stim_time,fixed_pts[ii]] = 1.
y_train[ii,out_time,fixed_pts[ii]] = 1.
#note:#TODO im doing a quick fix, only considering 1 ouput neuron
#for sample in np.arange(sample_size):
# mask[sample, :, 0] = [0.0 if x == .5 else 1.0 for x in y_train[sample, :, :]]
#mask = np.array(mask, dtype=float)
x_train = x_train + stim_noise * np.random.randn(batch_size, n_steps, n_in)
params['input_times'] = input_times
params['output_times'] = output_times
return x_train, y_train, mask
def generate_train_trials(params):
while 1 > 0:
yield build_train_trials(params)
def calc_norm(A):
return np.sqrt(np.sum(A**2,axis=0))
def demean(s):
return s-np.mean(s,axis=0)
def gen_angle(W,U):
normW = calc_norm(W)
normU = calc_norm(U)
return np.arccos(np.clip((W.T.dot(U))/np.outer(normW,normU),-1.,1.))
def plot_params(params):
params['input_times'] = []
params['output_times'] = []
ordered_keys = sorted(params)
fig = plt.figure(figsize=(8,11),frameon=False);
for ii in range(len(params)):
item = ordered_keys[ii] + ': ' + str(params[ordered_keys[ii]])
plt.text(.1,.9-.9/len(params)*ii,item)
ax = plt.gca()
ax.axis('off')
return fig
def plot_fps_vs_activity(s,W,brec):
fig = plt.figure(figsize=(4,8))
for ii in range(5):
plt.subplot(5,1,ii+1)
Weff = W*(s[-1,ii,:]>0)
fp = np.linalg.inv(np.eye(s.shape[2])-Weff).dot(brec)
max_real = np.max(np.linalg.eig(Weff-np.eye(s.shape[2]))[0].real)
plt.plot(s[60:,ii,:].T,c='c',alpha=.05)
if max_real<0:
plt.plot(fp,'k--')
else:
plt.plot(fp,'r--')
plt.axhline(0,c='k')
return fig
def plot_outputs_by_input(s,data,Z,n=5):
fig = plt.figure()
colors = ['r','g','b','k','c']
for ii in range(n):
out = np.maximum(s[-1,data[0][:,40,ii]>.2,:],0).dot(Z.T).T
plt.plot(out,c=colors[np.mod(ii,5)],alpha=.4)
return fig
def analysis_and_write(params,weights_path,fig_directory,run_name,no_rec_noise=True):
from matplotlib.backends.backend_pdf import PdfPages
import os
import copy
original_params = copy.deepcopy(params)
if no_rec_noise:
params['rec_noise'] = 0.0
try:
os.stat(fig_directory)
except:
os.mkdir(fig_directory)
pp = PdfPages(fig_directory + '/' + run_name + '.pdf')
generator = generate_train_trials(params)
weights = np.load(weights_path)
W = weights['W_rec']
Win = weights['W_in']
Wout = weights['W_out']
brec = weights['b_rec']
data = generator.next()
sim = Simulator(params, weights_path=weights_path)
output,states = sim.run_trial(data[0][0,:,:],t_connectivity=False)
s = np.zeros([data[0].shape[1],data[0].shape[0],W.shape[0]])
for ii in range(data[0].shape[0]):
s[:,ii,:] = sim.run_trial(data[0][ii,:,:],t_connectivity=False)[1].reshape([data[0].shape[1],W.shape[0]])
#Figure 0 (Plot Params)
fig0 = plot_params(original_params)
pp.savefig(fig0)
#Figure 1 (Single Trial (Input Output State))
fig1 = plot_fps_vs_activity(s,W,brec)
pp.savefig(fig1)
#Figure 2 (Plot structural measures of W against random matrix R)
fig2 = plot_outputs_by_input(s,data,Wout,n=Win.shape[1])
pp.savefig(fig2)
pp.close()
if __name__ == "__main__":
import time
start_time = time.time()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('run_name', help="task name", type=str)
parser.add_argument('fig_directory',help="where to save figures")
parser.add_argument('weights_path',help="where to save weights")
parser.add_argument('-fp', '--n_fps', help="number of fixed points", type=int,default=5)
parser.add_argument('-nr','--n_rec', help="number of hidden units", type=int,default=10)
parser.add_argument('-i','--initialization', help ="initialization of Wrec", type=str,default='gauss')
parser.add_argument('-r','--rec_noise', help ="recurrent noise", type=float,default=0.01)
parser.add_argument('-t','--training_iters', help="training iterations", type=int,default=300000)
parser.add_argument('-ts','--task',help="task type",default='fixed_point')
args = parser.parse_args()
#run params
run_name = args.run_name
fig_directory = args.fig_directory
n_in = n_out = args.n_fps
n_rec = args.n_rec
#model params
#n_in = n_out = 5 #number of fixed points
#n_rec = 10
#n_steps = 80
tau = 100.0 #As double
dt = 20.0 #As double
dale_ratio = 0
rec_noise = args.rec_noise
stim_noise = 0.1
batch_size = 128 #256
#var_delay_length = 50
n_back = 0
#train params
learning_rate = .0001
training_iters = args.training_iters
display_step = 200
#weights_path = '../weights/n_fps6by8_1.npz'
save_weights_path = args.weights_path
params = set_params(n_in = n_in, n_out = n_out, n_steps = 300, stim_noise = stim_noise, rec_noise = rec_noise, L1_rec = 0, L2_firing_rate = 0,
sample_size = 128, epochs = 100, N_rec = n_rec, dale_ratio=dale_ratio, tau=tau, dt = dt, task='n_back')
generator = generate_train_trials(params)
#model = Model(n_in, n_hidden, n_out, n_steps, tau, dt, dale_ratio, rec_noise, batch_size)
model = Model(params)
sess = tf.Session()
model.train(sess, generator, learning_rate = learning_rate, training_iters = training_iters, save_weights_path = save_weights_path)
#print('second training')
#model.train(sess, generator, learning_rate = learning_rate, training_iters = training_iters, weights_path = weights_path, initialize_variables=False)
analysis_and_write(params,save_weights_path,fig_directory,run_name)
# data = generator.next()
# inp = np.argmax(data[0][:,40,:],axis=1)
# #output,states = model.test(sess, input, weights_path = weights_path)
#
#
# W = model.W_rec.eval(session=sess)
# U = model.W_in.eval(session=sess)
# Z = model.W_out.eval(session=sess)
# brec = model.b_rec.eval(session=sess)
# bout = model.b_out.eval(session=sess)
#
# sim = Simulator(params, weights_path=weights_path)
# output,states = sim.run_trial(data[0][0,:,:],t_connectivity=False)
#
# s = np.zeros([data[0].shape[1],data[0].shape[0],n_rec])
# for ii in range(data[0].shape[0]):
# s[:,ii,:] = sim.run_trial(data[0][ii,:,:],t_connectivity=False)[1].reshape([data[0].shape[1],n_rec])
dur = time.time()-start_time
print('runtime: '+ str(int(dur/60)) + ' min, ' + str(int(np.mod(dur,60))) + ' sec')
sess.close()
|
davidbrandfonbrener/Project-Sisyphus
|
tasks/n_fixed_points.py
|
Python
|
mit
| 9,304
|
[
"NEURON"
] |
7f00954b655d1d1fba029fcaedf8732264c8b2eacfe5ce6d01b789a5ea307fd5
|
#!/usr/bin/env python
# tests the support to pass generic vertex attributes to be used in Cg shaders.
xmlMaterial = '<?xml version="1.0" encoding="UTF-8"?> \
<Material name="GenericAttributes1"> \
<Shader \
scope="Vertex" \
name="VertexShader" \
location="Inline" \
language="Cg" \
entry="main"> \
<MatrixUniform name="ModelViewProj" \
type="State" \
number_of_elements="2" \
value="CG_GL_MODELVIEW_PROJECTION_MATRIX CG_GL_MATRIX_IDENTITY" /> \
<MatrixUniform name="ModelViewIT" \
type="State" \
number_of_elements="2" \
value="CG_GL_MODELVIEW_MATRIX CG_GL_MATRIX_INVERSE_TRANSPOSE" /> \
\
struct appin \
{ \
float4 Position : POSITION; \
float3 Normal : NORMAL; \
}; \
\
// define outputs from vertex shader \
struct vertout \
{ \
float4 HPosition : POSITION; \
float4 Color0 : COLOR0; \
}; \
\
vertout main(appin IN, \
uniform float4x4 ModelViewProj, \
uniform float4x4 ModelViewIT) \
{ \
vertout OUT; \
\
// transform vertex position into homogenous clip-space \
OUT.HPosition = mul(ModelViewProj, IN.Position); \
\
OUT.Color0.xyz = normalize(IN.Normal); \
OUT.Color0.a = 1.0; \
return OUT; \
} \
</Shader> \
</Material> \
'
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer = vtk.vtkRenderer()
renWin.AddRenderer(renderer)
src1 = vtk.vtkSphereSource()
src1.SetRadius(5)
src1.SetPhiResolution(20)
src1.SetThetaResolution(20)
randomVectors = vtk.vtkBrownianPoints()
randomVectors.SetMinimumSpeed(0)
randomVectors.SetMaximumSpeed(1)
randomVectors.SetInputConnection(src1.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(randomVectors.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Load the material. Here, we are loading a material
# defined in the Vtk Library. One can also specify
# a filename to a material description xml.
actor.GetProperty().LoadMaterialFromString(xmlMaterial)
# Set red color to show if shading fails.
actor.GetProperty().SetColor(1.0,0,0)
# Turn shading on. Otherwise, shaders are not used.
actor.GetProperty().ShadingOn()
# Map PointData.BrownianVectors (all 3 components) to IN.Normal
mapper.MapDataArrayToVertexAttribute("IN.Normal","BrownianVectors",0,-1)
renderer.AddActor(actor)
renderer.SetBackground(0.5,0.5,0.5)
renWin.Render()
renderer.GetActiveCamera().Azimuth(-50)
renderer.GetActiveCamera().Roll(70)
renWin.Render()
# --- end of script --
|
collects/VTK
|
Rendering/Core/Testing/Python/TestGenericVertexAttributesCg.py
|
Python
|
bsd-3-clause
| 2,663
|
[
"VTK"
] |
d2ee702c4bd9d98f823448156daeeec989f1cc7421584d061dd86fe921dc4141
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[41:82,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:20]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/skin_related/BMED_8813_HAP/Features/multiple_features/results/cross_validate_objects_BMED_8813_HAP_scaled_method_II_area_shape.py
|
Python
|
mit
| 3,997
|
[
"Mayavi"
] |
53f8bc6c0eda35b0f7deb49ce75ad665585432fb6568b929f04f1f60c66907fd
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, assert_allclose
import itertools
from itertools import combinations_with_replacement as comb
import MDAnalysis
from MDAnalysis.lib import distances
from MDAnalysis.lib import mdamath
from MDAnalysis.tests.datafiles import PSF, DCD, TRIC
class TestCheckResultArray(object):
ref = np.zeros(1, dtype=np.float64)
def test_check_result_array_pass(self):
# Assert input array is returned if it has correct shape and dtype:
res = distances._check_result_array(self.ref, self.ref.shape)
assert res is self.ref
# Assert correct array is returned if input is None:
res = distances._check_result_array(None, self.ref.shape)
assert_equal(res, self.ref)
assert res.dtype == np.float64
def test_check_result_array_wrong_shape(self):
wrong_shape = (1,) + self.ref.shape
with pytest.raises(ValueError) as err:
res = distances._check_result_array(self.ref, wrong_shape)
assert err.msg == ("Result array has incorrect shape, should be "
"{0}, got {1}.".format(self.ref.shape,
wrong_shape))
def test_check_result_array_wrong_dtype(self):
wrong_dtype = np.int64
ref_wrong_dtype = self.ref.astype(wrong_dtype)
with pytest.raises(TypeError) as err:
res = distances._check_result_array(ref_wrong_dtype, self.ref.shape)
assert err.msg == ("Result array must be of type numpy.float64, "
"got {}.".format(wrong_dtype))
@pytest.mark.parametrize('coord_dtype', (np.float32, np.float64))
def test_transform_StoR_pass(coord_dtype):
box = np.array([10, 7, 3, 45, 60, 90], dtype=np.float32)
s = np.array([[0.5, -0.1, 0.5]], dtype=coord_dtype)
original_r = np.array([[ 5.75, 0.36066014, 0.75]], dtype=np.float32)
test_r = distances.transform_StoR(s, box)
assert_allclose(original_r, test_r)
def test_capped_distance_noresults():
point1 = np.array([0.1, 0.1, 0.1], dtype=np.float32)
point2 = np.array([0.95, 0.1, 0.1], dtype=np.float32)
pairs, dists = distances.capped_distance(point1, point2, max_cutoff=0.2)
assert_equal(len(pairs), 0)
npoints_1 = (1, 100)
boxes_1 = (np.array([10, 20, 30, 90, 90, 90], dtype=np.float32), # ortho
np.array([10, 20, 30, 30, 45, 60], dtype=np.float32), # tri_box
None, # Non Periodic
)
query_1 = (np.array([0.1, 0.1, 0.1], dtype=np.float32),
np.array([[0.1, 0.1, 0.1],
[0.2, 0.1, 0.1]], dtype=np.float32))
method_1 = ('bruteforce', 'pkdtree', 'nsgrid')
min_cutoff_1 = (None, 0.1)
@pytest.mark.parametrize('npoints', npoints_1)
@pytest.mark.parametrize('box', boxes_1)
@pytest.mark.parametrize('query', query_1)
@pytest.mark.parametrize('method', method_1)
@pytest.mark.parametrize('min_cutoff', min_cutoff_1)
def test_capped_distance_checkbrute(npoints, box, query, method, min_cutoff):
np.random.seed(90003)
points = (np.random.uniform(low=0, high=1.0,
size=(npoints, 3))*(boxes_1[0][:3])).astype(np.float32)
max_cutoff = 2.5
# capped distance should be able to handle array of vectors
# as well as single vectors.
pairs, dist = distances.capped_distance(query, points, max_cutoff,
min_cutoff=min_cutoff, box=box,
method=method)
if pairs.shape != (0, ):
found_pairs = pairs[:, 1]
else:
found_pairs = list()
if(query.shape[0] == 3):
query = query.reshape((1, 3))
dists = distances.distance_array(query, points, box=box)
if min_cutoff is None:
min_cutoff = 0.
indices = np.where((dists <= max_cutoff) & (dists > min_cutoff))
assert_equal(np.sort(found_pairs, axis=0), np.sort(indices[1], axis=0))
# for coverage
@pytest.mark.parametrize('npoints', npoints_1)
@pytest.mark.parametrize('box', boxes_1)
@pytest.mark.parametrize('query', query_1)
@pytest.mark.parametrize('method', method_1)
@pytest.mark.parametrize('min_cutoff', min_cutoff_1)
def test_capped_distance_return(npoints, box, query, method, min_cutoff):
np.random.seed(90003)
points = (np.random.uniform(low=0, high=1.0,
size=(npoints, 3))*(boxes_1[0][:3])).astype(np.float32)
max_cutoff = 0.3
# capped distance should be able to handle array of vectors
# as well as single vectors.
pairs = distances.capped_distance(query, points, max_cutoff,
min_cutoff=min_cutoff, box=box,
method=method, return_distances=False)
if pairs.shape != (0, ):
found_pairs = pairs[:, 1]
else:
found_pairs = list()
if(query.shape[0] == 3):
query = query.reshape((1, 3))
dists = distances.distance_array(query, points, box=box)
if min_cutoff is None:
min_cutoff = 0.
indices = np.where((dists <= max_cutoff) & (dists > min_cutoff))
assert_equal(np.sort(found_pairs, axis=0), np.sort(indices[1], axis=0))
@pytest.mark.parametrize('npoints', npoints_1)
@pytest.mark.parametrize('box', boxes_1)
@pytest.mark.parametrize('method', method_1)
@pytest.mark.parametrize('min_cutoff', min_cutoff_1)
@pytest.mark.parametrize('ret_dist', (False, True))
def test_self_capped_distance(npoints, box, method, min_cutoff, ret_dist):
np.random.seed(90003)
points = (np.random.uniform(low=0, high=1.0,
size=(npoints, 3))*(boxes_1[0][:3])).astype(np.float32)
max_cutoff = 0.2
result = distances.self_capped_distance(points, max_cutoff,
min_cutoff=min_cutoff, box=box,
method=method,
return_distances=ret_dist)
if ret_dist:
pairs, cdists = result
else:
pairs = result
# Check we found all hits
ref = distances.self_distance_array(points, box)
ref_d = ref[ref < 0.2]
if not min_cutoff is None:
ref_d = ref_d[ref_d > min_cutoff]
assert len(ref_d) == len(pairs)
# Go through hit by hit and check we got the indices correct too
ref = distances.distance_array(points, points, box)
if ret_dist:
for (i, j), d in zip(pairs, cdists):
d_ref = ref[i, j]
assert d_ref < 0.2
if not min_cutoff is None:
assert d_ref > min_cutoff
assert_almost_equal(d, d_ref, decimal=6)
else:
for i, j in pairs:
d_ref = ref[i, j]
assert d_ref < 0.2
if not min_cutoff is None:
assert d_ref > min_cutoff
@pytest.mark.parametrize('box', (None,
np.array([1, 1, 1, 90, 90, 90], dtype=np.float32),
np.array([1, 1, 1, 60, 75, 80], dtype=np.float32)))
@pytest.mark.parametrize('npoints,cutoff,meth',
[(1, 0.02, '_bruteforce_capped_self'),
(1, 0.2, '_bruteforce_capped_self'),
(600, 0.02, '_pkdtree_capped_self'),
(600, 0.2, '_nsgrid_capped_self')])
def test_method_selfselection(box, npoints, cutoff, meth):
np.random.seed(90003)
points = (np.random.uniform(low=0, high=1.0,
size=(npoints, 3))).astype(np.float32)
method = distances._determine_method_self(points, cutoff, box=box)
assert_equal(method.__name__, meth)
@pytest.mark.parametrize('box', (None,
np.array([1, 1, 1, 90, 90, 90], dtype=np.float32),
np.array([1, 1, 1, 60, 75, 80], dtype=np.float32)))
@pytest.mark.parametrize('npoints,cutoff,meth',
[(1, 0.02, '_bruteforce_capped'),
(1, 0.2, '_bruteforce_capped'),
(200, 0.02, '_nsgrid_capped'),
(200, 0.35, '_bruteforce_capped'),
(10000, 0.35, '_nsgrid_capped')])
def test_method_selection(box, npoints, cutoff, meth):
np.random.seed(90003)
points = (np.random.uniform(low=0, high=1.0,
size=(npoints, 3)).astype(np.float32))
method = distances._determine_method(points, points, cutoff, box=box)
assert_equal(method.__name__, meth)
@pytest.fixture()
def ref_system():
box = np.array([1., 1., 2., 90., 90., 90], dtype=np.float32)
points = np.array(
[
[0, 0, 0], [1, 1, 2], [1, 0, 2], # identical under PBC
[0.5, 0.5, 1.5],
], dtype=np.float32)
ref = points[0:1]
conf = points[1:]
return box, points, ref, conf
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
class TestDistanceArray(object):
@staticmethod
def _dist(x, ref):
ref = np.asarray(ref, dtype=np.float32)
r = x - ref
return np.sqrt(np.dot(r, r))
def test_noPBC(self, backend, ref_system):
box, points, ref, conf = ref_system
d = distances.distance_array(ref, points, backend=backend)
assert_almost_equal(d, np.array([[
self._dist(points[0], ref[0]),
self._dist(points[1], ref[0]),
self._dist(points[2], ref[0]),
self._dist(points[3], ref[0])]
]))
def test_PBC(self, backend, ref_system):
box, points, ref, conf = ref_system
d = distances.distance_array(ref, points, box=box, backend=backend)
assert_almost_equal(d, np.array([[0., 0., 0., self._dist(points[3], ref=[1, 1, 2])]]))
def test_PBC2(self, backend):
a = np.array([7.90146923, -13.72858524, 3.75326586], dtype=np.float32)
b = np.array([-1.36250901, 13.45423985, -0.36317623], dtype=np.float32)
box = np.array([5.5457325, 5.5457325, 5.5457325, 90., 90., 90.], dtype=np.float32)
def mindist(a, b, box):
x = a - b
return np.linalg.norm(x - np.rint(x / box) * box)
ref = mindist(a, b, box[:3])
val = distances.distance_array(a, b, box=box, backend=backend)[0, 0]
assert_almost_equal(val, ref, decimal=6,
err_msg="Issue 151 not correct (PBC in distance array)")
def test_distance_array_overflow_exception():
class FakeArray(np.ndarray):
shape = (4294967296, 3) # upper limit is sqrt(UINT64_MAX)
ndim = 2
dummy_array = FakeArray([1, 2, 3])
box = np.array([100, 100, 100, 90., 90., 90.], dtype=np.float32)
with pytest.raises(ValueError, match="Size of resulting array"):
distances.distance_array.__wrapped__(dummy_array, dummy_array, box=box)
def test_self_distance_array_overflow_exception():
class FakeArray(np.ndarray):
shape = (6074001001, 3) # solution of x**2 -x = 2*UINT64_MAX
ndim = 2
dummy_array = FakeArray([1, 2, 3])
box = np.array([100, 100, 100, 90., 90., 90.], dtype=np.float32)
with pytest.raises(ValueError, match="Size of resulting array"):
distances.self_distance_array.__wrapped__(dummy_array, box=box)
@pytest.fixture()
def DCD_Universe():
universe = MDAnalysis.Universe(PSF, DCD)
trajectory = universe.trajectory
return universe, trajectory
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
class TestDistanceArrayDCD(object):
# reasonable precision so that tests succeed on 32 and 64 bit machines
# (the reference values were obtained on 64 bit)
# Example:
# Items are not equal: wrong maximum distance value
# ACTUAL: 52.470254967456412
# DESIRED: 52.470257062419059
prec = 5
def test_simple(self, DCD_Universe, backend):
U, trajectory = DCD_Universe
trajectory.rewind()
x0 = U.atoms.positions
trajectory[10]
x1 = U.atoms.positions
d = distances.distance_array(x0, x1, backend=backend)
assert_equal(d.shape, (3341, 3341), "wrong shape (should be (Natoms,Natoms))")
assert_almost_equal(d.min(), 0.11981228170520701, self.prec,
err_msg="wrong minimum distance value")
assert_almost_equal(d.max(), 53.572192429459619, self.prec,
err_msg="wrong maximum distance value")
def test_outarray(self, DCD_Universe, backend):
U, trajectory = DCD_Universe
trajectory.rewind()
x0 = U.atoms.positions
trajectory[10]
x1 = U.atoms.positions
natoms = len(U.atoms)
d = np.zeros((natoms, natoms), np.float64)
distances.distance_array(x0, x1, result=d, backend=backend)
assert_equal(d.shape, (natoms, natoms), "wrong shape, shoud be (Natoms,Natoms) entries")
assert_almost_equal(d.min(), 0.11981228170520701, self.prec,
err_msg="wrong minimum distance value")
assert_almost_equal(d.max(), 53.572192429459619, self.prec,
err_msg="wrong maximum distance value")
def test_periodic(self, DCD_Universe, backend):
# boring with the current dcd as that has no PBC
U, trajectory = DCD_Universe
trajectory.rewind()
x0 = U.atoms.positions
trajectory[10]
x1 = U.atoms.positions
d = distances.distance_array(x0, x1, box=U.coord.dimensions,
backend=backend)
assert_equal(d.shape, (3341, 3341), "should be square matrix with Natoms entries")
assert_almost_equal(d.min(), 0.11981228170520701, self.prec,
err_msg="wrong minimum distance value with PBC")
assert_almost_equal(d.max(), 53.572192429459619, self.prec,
err_msg="wrong maximum distance value with PBC")
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
class TestSelfDistanceArrayDCD(object):
prec = 5
def test_simple(self, DCD_Universe, backend):
U, trajectory = DCD_Universe
trajectory.rewind()
x0 = U.atoms.positions
d = distances.self_distance_array(x0, backend=backend)
N = 3341 * (3341 - 1) / 2
assert_equal(d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))")
assert_almost_equal(d.min(), 0.92905562402529318, self.prec,
err_msg="wrong minimum distance value")
assert_almost_equal(d.max(), 52.4702570624190590, self.prec,
err_msg="wrong maximum distance value")
def test_outarray(self, DCD_Universe, backend):
U, trajectory = DCD_Universe
trajectory.rewind()
x0 = U.atoms.positions
natoms = len(U.atoms)
N = natoms * (natoms - 1) // 2
d = np.zeros((N,), np.float64)
distances.self_distance_array(x0, result=d, backend=backend)
assert_equal(d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))")
assert_almost_equal(d.min(), 0.92905562402529318, self.prec,
err_msg="wrong minimum distance value")
assert_almost_equal(d.max(), 52.4702570624190590, self.prec,
err_msg="wrong maximum distance value")
def test_periodic(self, DCD_Universe, backend):
# boring with the current dcd as that has no PBC
U, trajectory = DCD_Universe
trajectory.rewind()
x0 = U.atoms.positions
natoms = len(U.atoms)
N = natoms * (natoms - 1) / 2
d = distances.self_distance_array(x0, box=U.coord.dimensions,
backend=backend)
assert_equal(d.shape, (N,), "wrong shape (should be (Natoms*(Natoms-1)/2,))")
assert_almost_equal(d.min(), 0.92905562402529318, self.prec,
err_msg="wrong minimum distance value with PBC")
assert_almost_equal(d.max(), 52.4702570624190590, self.prec,
err_msg="wrong maximum distance value with PBC")
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
class TestTriclinicDistances(object):
"""Unit tests for the Triclinic PBC functions.
Tests:
# transforming to and from S space (fractional coords)
MDAnalysis.lib.distances.transform_StoR
MDAnalysis.lib.distances.transform_RtoS
# distance calculations with PBC
MDAnalysis.lib.distances.self_distance_array
MDAnalysis.lib.distances.distance_array
"""
prec = 2
@staticmethod
@pytest.fixture()
def TRIC():
return MDAnalysis.Universe(TRIC)
@staticmethod
@pytest.fixture()
def tri_vec_box(TRIC):
return MDAnalysis.coordinates.core.triclinic_vectors(TRIC.dimensions)
@staticmethod
@pytest.fixture()
def box(TRIC):
return TRIC.dimensions
@staticmethod
@pytest.fixture()
def S_mol(TRIC):
S_mol1 = np.array([TRIC.atoms[383].position])
S_mol2 = np.array([TRIC.atoms[390].position])
return S_mol1, S_mol2
@staticmethod
@pytest.fixture()
def S_mol_single(TRIC):
S_mol1 = TRIC.atoms[383].position
S_mol2 = TRIC.atoms[390].position
return S_mol1, S_mol2
@pytest.mark.parametrize('S_mol', [S_mol, S_mol_single], indirect=True)
def test_transforms(self, S_mol, tri_vec_box, box, backend):
# To check the cython coordinate transform, the same operation is done in numpy
# Is a matrix multiplication of Coords x tri_vec_box = NewCoords, so can use np.dot
S_mol1, S_mol2 = S_mol
# Test transformation
R_mol1 = distances.transform_StoR(S_mol1, box, backend=backend)
R_np1 = np.dot(S_mol1, tri_vec_box)
R_mol2 = distances.transform_StoR(S_mol2, box, backend=backend)
R_np2 = np.dot(S_mol2, tri_vec_box)
assert_almost_equal(R_mol1, R_np1, self.prec, err_msg="StoR transform failed for S_mol1")
assert_almost_equal(R_mol2, R_np2, self.prec, err_msg="StoR transform failed for S_mol2")
# Round trip test
S_test1 = distances.transform_RtoS(R_mol1, box, backend=backend)
S_test2 = distances.transform_RtoS(R_mol2, box, backend=backend)
assert_almost_equal(S_test1, S_mol1, self.prec, err_msg="Round trip 1 failed in transform")
assert_almost_equal(S_test2, S_mol2, self.prec, err_msg="Round trip 2 failed in transform")
def test_selfdist(self, S_mol, box, tri_vec_box, backend):
S_mol1, S_mol2 = S_mol
R_coords = distances.transform_StoR(S_mol1, box, backend=backend)
# Transform functions are tested elsewhere so taken as working here
dists = distances.self_distance_array(R_coords, box=box, backend=backend)
# Manually calculate self_distance_array
manual = np.zeros(len(dists), dtype=np.float64)
distpos = 0
for i, Ri in enumerate(R_coords):
for Rj in R_coords[i + 1:]:
Rij = Rj - Ri
Rij -= round(Rij[2] / tri_vec_box[2][2]) * tri_vec_box[2]
Rij -= round(Rij[1] / tri_vec_box[1][1]) * tri_vec_box[1]
Rij -= round(Rij[0] / tri_vec_box[0][0]) * tri_vec_box[0]
Rij = np.linalg.norm(Rij) # find norm of Rij vector
manual[distpos] = Rij # and done, phew
distpos += 1
assert_almost_equal(dists, manual, self.prec,
err_msg="self_distance_array failed with input 1")
# Do it again for input 2 (has wider separation in points)
R_coords = distances.transform_StoR(S_mol2, box, backend=backend)
# Transform functions are tested elsewhere so taken as working here
dists = distances.self_distance_array(R_coords, box=box, backend=backend)
# Manually calculate self_distance_array
manual = np.zeros(len(dists), dtype=np.float64)
distpos = 0
for i, Ri in enumerate(R_coords):
for Rj in R_coords[i + 1:]:
Rij = Rj - Ri
Rij -= round(Rij[2] / tri_vec_box[2][2]) * tri_vec_box[2]
Rij -= round(Rij[1] / tri_vec_box[1][1]) * tri_vec_box[1]
Rij -= round(Rij[0] / tri_vec_box[0][0]) * tri_vec_box[0]
Rij = np.linalg.norm(Rij) # find norm of Rij vector
manual[distpos] = Rij # and done, phew
distpos += 1
assert_almost_equal(dists, manual, self.prec,
err_msg="self_distance_array failed with input 2")
def test_distarray(self, S_mol, tri_vec_box, box, backend):
S_mol1, S_mol2 = S_mol
R_mol1 = distances.transform_StoR(S_mol1, box, backend=backend)
R_mol2 = distances.transform_StoR(S_mol2, box, backend=backend)
# Try with box
dists = distances.distance_array(R_mol1, R_mol2, box=box, backend=backend)
# Manually calculate distance_array
manual = np.zeros((len(R_mol1), len(R_mol2)))
for i, Ri in enumerate(R_mol1):
for j, Rj in enumerate(R_mol2):
Rij = Rj - Ri
Rij -= round(Rij[2] / tri_vec_box[2][2]) * tri_vec_box[2]
Rij -= round(Rij[1] / tri_vec_box[1][1]) * tri_vec_box[1]
Rij -= round(Rij[0] / tri_vec_box[0][0]) * tri_vec_box[0]
Rij = np.linalg.norm(Rij) # find norm of Rij vector
manual[i][j] = Rij
assert_almost_equal(dists, manual, self.prec,
err_msg="distance_array failed with box")
def test_pbc_dist(self, S_mol, box, backend):
S_mol1, S_mol2 = S_mol
results = np.array([[37.629944]])
dists = distances.distance_array(S_mol1, S_mol2, box=box, backend=backend)
assert_almost_equal(dists, results, self.prec,
err_msg="distance_array failed to retrieve PBC distance")
def test_pbc_wrong_wassenaar_distance(self, backend):
box = [2, 2, 2, 60, 60, 60]
tri_vec_box = mdamath.triclinic_vectors(box)
a, b, c = tri_vec_box
point_a = a + b
point_b = .5 * point_a
dist = distances.distance_array(point_a, point_b, box=box, backend=backend)
assert_almost_equal(dist[0, 0], 1)
# check that our distance is different from the wassenaar distance as
# expected.
assert np.linalg.norm(point_a - point_b) != dist[0, 0]
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
class TestCythonFunctions(object):
# Unit tests for calc_bonds calc_angles and calc_dihedrals in lib.distances
# Tests both numerical results as well as input types as Cython will silently
# produce nonsensical results if given wrong data types otherwise.
prec = 5
@staticmethod
@pytest.fixture()
def box():
return np.array([10., 10., 10., 90., 90., 90.], dtype=np.float32)
@staticmethod
@pytest.fixture()
def triclinic_box():
box_vecs = np.array([[10., 0., 0.], [1., 10., 0., ], [1., 0., 10.]],
dtype=np.float32)
return mdamath.triclinic_box(box_vecs[0], box_vecs[1], box_vecs[2])
@staticmethod
@pytest.fixture()
def positions():
# dummy atom data
a = np.array([[0., 0., 0.], [0., 0., 0.], [0., 11., 0.], [1., 1., 1.]], dtype=np.float32)
b = np.array([[0., 0., 0.], [1., 1., 1.], [0., 0., 0.], [29., -21., 99.]], dtype=np.float32)
c = np.array([[0., 0., 0.], [2., 2., 2.], [11., 0., 0.], [1., 9., 9.]], dtype=np.float32)
d = np.array([[0., 0., 0.], [3., 3., 3.], [11., -11., 0.], [65., -65., 65.]], dtype=np.float32)
return a, b, c, d
@staticmethod
def convert_position_dtype(a, b, c, d, dtype):
return a.astype(dtype), b.astype(dtype), c.astype(dtype), d.astype(dtype)
@staticmethod
@pytest.fixture()
def wronglength():
# has a different length to other inputs and should raise ValueError
return np.array([[0., 0., 0.], [3., 3., 3.]],
dtype=np.float32)
# coordinate shifts for single coord tests
shifts = [((0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)), # no shifting
((1, 0, 0), (0, 1, 1), (0, 0, 1), (1, 1, 0)), # single box lengths
((-1, 0, 1), (0, -1, 0), (1, 0, 1), (-1, -1, -1)), # negative single
((4, 3, -2), (-2, 2, 2), (-5, 2, 2), (0, 2, 2))] # multiple boxlengths
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_bonds(self, positions, box, backend, dtype):
a, b, c, d = self.convert_position_dtype(*positions, dtype=dtype)
dists = distances.calc_bonds(a, b, backend=backend)
assert_equal(len(dists), 4, err_msg="calc_bonds results have wrong length")
dists_pbc = distances.calc_bonds(a, b, box=box, backend=backend)
#tests 0 length
assert_almost_equal(dists[0], 0.0, self.prec, err_msg="Zero length calc_bonds fail")
assert_almost_equal(dists[1], 1.7320508075688772, self.prec,
err_msg="Standard length calc_bonds fail") # arbitrary length check
# PBC checks, 2 without, 2 with
assert_almost_equal(dists[2], 11.0, self.prec,
err_msg="PBC check #1 w/o box") # pbc check 1, subtract single box length
assert_almost_equal(dists_pbc[2], 1.0, self.prec,
err_msg="PBC check #1 with box")
assert_almost_equal(dists[3], 104.26888318, self.prec, # pbc check 2, subtract multiple box
err_msg="PBC check #2 w/o box") # lengths in all directions
assert_almost_equal(dists_pbc[3], 3.46410072, self.prec,
err_msg="PBC check #w with box")
def test_bonds_badbox(self, positions, backend):
a, b, c, d = positions
badbox1 = np.array([10., 10., 10.], dtype=np.float64)
badbox2 = np.array([[10., 10.], [10., 10., ]], dtype=np.float32)
with pytest.raises(ValueError):
distances.calc_bonds(a, b, box=badbox1, backend=backend)
with pytest.raises(ValueError):
distances.calc_bonds(a, b, box=badbox2, backend=backend)
def test_bonds_badresult(self, positions, backend):
a, b, c, d = positions
badresult = np.zeros(len(a) - 1) # Bad result array
with pytest.raises(ValueError):
distances.calc_bonds(a, b, result=badresult, backend=backend)
def test_bonds_triclinic(self, positions, triclinic_box, backend):
a, b, c, d = positions
dists = distances.calc_bonds(a, b, box=triclinic_box, backend=backend)
reference = np.array([0.0, 1.7320508, 1.4142136, 2.82842712])
assert_almost_equal(dists, reference, self.prec, err_msg="calc_bonds with triclinic box failed")
@pytest.mark.parametrize('shift', shifts)
@pytest.mark.parametrize('periodic', [True, False])
def test_bonds_single_coords(self, shift, periodic, backend):
box = np.array([10, 20, 30, 90., 90., 90.], dtype=np.float32)
coords = np.array([[1, 1, 1], [3, 1, 1]], dtype=np.float32)
shift1, shift2, _, _ = shift
coords[0] += shift1 * box[:3]
coords[1] += shift2 * box[:3]
box = box if periodic else None
result = distances.calc_bonds(coords[0], coords[1], box, backend=backend)
reference = 2.0 if periodic else np.linalg.norm(coords[0] - coords[1])
assert_almost_equal(result, reference, decimal=self.prec)
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_angles(self, positions, backend, dtype):
a, b, c, d = self.convert_position_dtype(*positions, dtype=dtype)
angles = distances.calc_angles(a, b, c, backend=backend)
# Check calculated values
assert_equal(len(angles), 4, err_msg="calc_angles results have wrong length")
# assert_almost_equal(angles[0], 0.0, self.prec,
# err_msg="Zero length angle calculation failed") # What should this be?
assert_almost_equal(angles[1], np.pi, self.prec,
err_msg="180 degree angle calculation failed")
assert_almost_equal(np.rad2deg(angles[2]), 90., self.prec,
err_msg="Ninety degree angle in calc_angles failed")
assert_almost_equal(angles[3], 0.098174833, self.prec,
err_msg="Small angle failed in calc_angles")
def test_angles_bad_result(self, positions, backend):
a, b, c, d = positions
badresult = np.zeros(len(a) - 1) # Bad result array
with pytest.raises(ValueError):
distances.calc_angles(a, b, c, result=badresult, backend=backend)
@pytest.mark.parametrize('case', [
(np.array([[1, 1, 1], [1, 2, 1], [2, 2, 1]], dtype=np.float32), 0.5 * np.pi), # 90 degree angle
(np.array([[1, 1, 1], [1, 2, 1], [1, 3, 1]], dtype=np.float32), np.pi), # straight line / 180.
(np.array([[1, 1, 1], [1, 2, 1], [2, 1, 1]], dtype=np.float32), 0.25 * np.pi), # 45
])
@pytest.mark.parametrize('shift', shifts)
@pytest.mark.parametrize('periodic', [True, False])
def test_angles_single_coords(self, case, shift, periodic, backend):
def manual_angle(x, y, z):
return mdamath.angle(y - x, y - z)
box = np.array([10, 20, 30, 90., 90., 90.], dtype=np.float32)
(a, b, c), ref = case
shift1, shift2, shift3, _ = shift
a += shift1 * box[:3]
b += shift2 * box[:3]
c += shift3 * box[:3]
box = box if periodic else None
result = distances.calc_angles(a, b, c, box, backend=backend)
reference = ref if periodic else manual_angle(a, b, c)
assert_almost_equal(result, reference, decimal=4)
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_dihedrals(self, positions, backend, dtype):
a, b, c, d = self.convert_position_dtype(*positions, dtype=dtype)
dihedrals = distances.calc_dihedrals(a, b, c, d, backend=backend)
# Check calculated values
assert_equal(len(dihedrals), 4, err_msg="calc_dihedrals results have wrong length")
assert np.isnan(dihedrals[0]), "Zero length dihedral failed"
assert np.isnan(dihedrals[1]), "Straight line dihedral failed"
assert_almost_equal(dihedrals[2], np.pi, self.prec, err_msg="180 degree dihedral failed")
assert_almost_equal(dihedrals[3], -0.50714064, self.prec,
err_msg="arbitrary dihedral angle failed")
def test_dihedrals_wronglength(self, positions, wronglength, backend):
a, b, c, d = positions
with pytest.raises(ValueError):
distances.calc_dihedrals(a, wronglength, c, d, backend=backend)
with pytest.raises(ValueError):
distances.calc_dihedrals(wronglength, b, c, d, backend=backend)
with pytest.raises(ValueError):
distances.calc_dihedrals(a, b, wronglength, d, backend=backend)
with pytest.raises(ValueError):
distances.calc_dihedrals(a, b, c, wronglength, backend=backend)
def test_dihedrals_bad_result(self, positions, backend):
a, b, c, d = positions
badresult = np.zeros(len(a) - 1) # Bad result array
with pytest.raises(ValueError):
distances.calc_dihedrals(a, b, c, d, result=badresult, backend=backend)
@pytest.mark.parametrize('case', [
(np.array([[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 2, 1]], dtype=np.float32), 0.), # 0 degree angle (cis)
(np.array([[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 0, 1]], dtype=np.float32), np.pi), # 180 degree (trans)
(np.array([[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 1, 2]], dtype=np.float32), 0.5 * np.pi), # 90 degree
(np.array([[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 1, 0]], dtype=np.float32), 0.5 * np.pi), # other 90 degree
(np.array([[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 2, 2]], dtype=np.float32), 0.25 * np.pi), # 45 degree
(np.array([[1, 2, 1], [1, 1, 1], [2, 1, 1], [2, 0, 2]], dtype=np.float32), 0.75 * np.pi), # 135
])
@pytest.mark.parametrize('shift', shifts)
@pytest.mark.parametrize('periodic', [True, False])
def test_dihedrals_single_coords(self, case, shift, periodic, backend):
def manual_dihedral(a, b, c, d):
return mdamath.dihedral(b - a, c - b, d - c)
box = np.array([10., 10., 10., 90., 90., 90.], dtype=np.float32)
(a, b, c, d), ref = case
shift1, shift2, shift3, shift4 = shift
a += shift1 * box[:3]
b += shift2 * box[:3]
c += shift3 * box[:3]
d += shift4 * box[:3]
box = box if periodic else None
result = distances.calc_dihedrals(a, b, c, d, box, backend=backend)
reference = ref if periodic else manual_dihedral(a, b, c, d)
assert_almost_equal(abs(result), abs(reference), decimal=4)
def test_numpy_compliance(self, positions, backend):
a, b, c, d = positions
# Checks that the cython functions give identical results to the numpy versions
bonds = distances.calc_bonds(a, b, backend=backend)
angles = distances.calc_angles(a, b, c, backend=backend)
dihedrals = distances.calc_dihedrals(a, b, c, d, backend=backend)
bonds_numpy = np.array([mdamath.norm(y - x) for x, y in zip(a, b)])
vec1 = a - b
vec2 = c - b
angles_numpy = np.array([mdamath.angle(x, y) for x, y in zip(vec1, vec2)])
ab = a - b
bc = b - c
cd = c - d
dihedrals_numpy = np.array([mdamath.dihedral(x, y, z) for x, y, z in zip(ab, bc, cd)])
assert_almost_equal(bonds, bonds_numpy, self.prec,
err_msg="Cython bonds didn't match numpy calculations")
# numpy 0 angle returns NaN rather than 0
assert_almost_equal(angles[1:], angles_numpy[1:], self.prec,
err_msg="Cython angles didn't match numpy calcuations")
assert_almost_equal(dihedrals, dihedrals_numpy, self.prec,
err_msg="Cython dihedrals didn't match numpy calculations")
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
class Test_apply_PBC(object):
prec = 6
def test_ortho_PBC(self, backend):
U = MDAnalysis.Universe(PSF, DCD)
atoms = U.atoms.positions
box = np.array([2.5, 2.5, 3.5, 90., 90., 90.], dtype=np.float32)
with pytest.raises(ValueError):
cyth1 = distances.apply_PBC(atoms, box[:3], backend=backend)
cyth2 = distances.apply_PBC(atoms, box, backend=backend)
reference = atoms - np.floor(atoms / box[:3]) * box[:3]
assert_almost_equal(cyth2, reference, self.prec,
err_msg="Ortho apply_PBC #2 failed comparison with np")
def test_tric_PBC(self, backend):
U = MDAnalysis.Universe(TRIC)
atoms = U.atoms.positions
box = U.dimensions
def numpy_PBC(coords, box):
# move to fractional coordinates
fractional = distances.transform_RtoS(coords, box)
# move fractional coordinates to central cell
fractional -= np.floor(fractional)
# move back to real coordinates
return distances.transform_StoR(fractional, box)
cyth1 = distances.apply_PBC(atoms, box, backend=backend)
reference = numpy_PBC(atoms, box)
assert_almost_equal(cyth1, reference, decimal=4,
err_msg="Triclinic apply_PBC failed comparison with np")
box = np.array([10, 7, 3, 45, 60, 90], dtype=np.float32)
r = np.array([5.75, 0.36066014, 0.75], dtype=np.float32)
r_in_cell = distances.apply_PBC(r, box)
assert_almost_equal([5.75, 7.3606596, 0.75], r_in_cell, self.prec)
def test_coords_strictly_in_central_image_ortho(self, backend):
box = np.array([10.1, 10.1, 10.1, 90.0, 90.0, 90.0], dtype=np.float32)
# coordinates just below lower or exactly at the upper box boundaries:
coords = np.array([[-1.0e-7, -1.0e-7, -1.0e-7],
[-1.0e-7, -1.0e-7, box[2]],
[-1.0e-7, box[1], -1.0e-7],
[ box[0], -1.0e-7, -1.0e-7],
[ box[0], box[1], -1.0e-7],
[ box[0], -1.0e-7, box[2]],
[-1.0e-7, box[1], box[2]],
[ box[0], box[1], box[2]]], dtype=np.float32)
# Check that all test coordinates actually lie below the lower or
# exactly at the upper box boundary:
assert np.all((coords < 0.0) | (coords == box[:3]))
res = distances.apply_PBC(coords, box, backend=backend)
# Assert all result coordinates lie strictly within the primary image:
assert np.all(res >= 0.0)
assert np.all(res < box[:3])
def test_coords_in_central_image_tric(self, backend):
# Triclinic box corresponding to this box matrix:
tbx = np.array([[10.1 , 0. , 0. ],
[ 1.0100002, 10.1 , 0. ],
[ 1.0100006, 1.0100021, 10.1 ]],
dtype=np.float32)
box = mdamath.triclinic_box(*tbx)
# coordinates just below lower or exactly at the upper box boundaries:
coords = np.array([[ -1.0e-7, -1.0e-7, -1.0e-7],
[tbx[0, 0], -1.0e-7, -1.0e-7],
[ 1.01 , tbx[1, 1], -1.0e-7],
[ 1.01 , 1.01 , tbx[2, 2]],
[tbx[0, 0] + tbx[1, 0], tbx[1, 1], -1.0e-7],
[tbx[0, 0] + tbx[2, 0], 1.01, tbx[2, 2]],
[2.02, tbx[1, 1] + tbx[2, 1], tbx[2, 2]],
[tbx[0, 0] + tbx[1, 0] + tbx[2, 0],
tbx[1, 1] + tbx[2, 1], tbx[2, 2]]],
dtype=np.float32)
relcoords = distances.transform_RtoS(coords, box)
# Check that all test coordinates actually lie below the lower or
# exactly at the upper box boundary:
assert np.all((relcoords < 0.0) | (relcoords == 1.0))
res = distances.apply_PBC(coords, box, backend=backend)
relres = distances.transform_RtoS(res, box)
# Assert all result coordinates lie strictly within the primary image:
assert np.all(relres >= 0.0)
assert np.all(relres < 1.0)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
class TestPeriodicAngles(object):
"""Test case for properly considering minimum image convention when calculating angles and dihedrals
(Issue 172)
"""
@staticmethod
@pytest.fixture()
def positions():
a = np.array([[0.0, 1.0, 0.0]], dtype=np.float32)
b = np.array([[0.0, 0.0, 0.0]], dtype=np.float32)
c = np.array([[1.0, 0.0, 0.0]], dtype=np.float32)
d = np.array([[1.0, 0.0, 1.0]], dtype=np.float32)
box = np.array([10.0, 10.0, 10.0], dtype=np.float32)
return a, b, c, d, box
prec = 5
def test_angles(self, positions, backend):
# Shift atom coordinates a few box lengths in random directions and see if we still get same results
a, b, c, d, box = positions
a2 = a + box * (-1, 0, 0)
b2 = b + box * (1, 0, 1)
c2 = c + box * (-2, 5, -7)
ref = distances.calc_angles(a, b, c, backend=backend)
box = np.append(box, [90, 90, 90])
test1 = distances.calc_angles(a2, b, c, box=box, backend=backend)
test2 = distances.calc_angles(a, b2, c, box=box, backend=backend)
test3 = distances.calc_angles(a, b, c2, box=box, backend=backend)
test4 = distances.calc_angles(a2, b2, c2, box=box, backend=backend)
for val in [test1, test2, test3, test4]:
assert_almost_equal(ref, val, self.prec, err_msg="Min image in angle calculation failed")
def test_dihedrals(self, positions, backend):
a, b, c, d, box = positions
a2 = a + box * (-1, 0, 0)
b2 = b + box * (1, 0, 1)
c2 = c + box * (-2, 5, -7)
d2 = d + box * (0, -5, 0)
ref = distances.calc_dihedrals(a, b, c, d, backend=backend)
box = np.append(box, [90, 90, 90])
test1 = distances.calc_dihedrals(a2, b, c, d, box=box, backend=backend)
test2 = distances.calc_dihedrals(a, b2, c, d, box=box, backend=backend)
test3 = distances.calc_dihedrals(a, b, c2, d, box=box, backend=backend)
test4 = distances.calc_dihedrals(a, b, c, d2, box=box, backend=backend)
test5 = distances.calc_dihedrals(a2, b2, c2, d2, box=box, backend=backend)
for val in [test1, test2, test3, test4, test5]:
assert_almost_equal(ref, val, self.prec, err_msg="Min image in dihedral calculation failed")
class TestInputUnchanged(object):
"""Tests ensuring that the following functions in MDAnalysis.lib.distances
do not alter their input coordinate arrays:
* distance_array
* self_distance_array
* capped_distance
* self_capped_distance
* transform_RtoS
* transform_StoR
* calc_bonds
* calc_angles
* calc_dihedrals
* apply_PBC
"""
boxes = ([1.0, 1.0, 1.0, 90.0, 90.0, 90.0], # orthorhombic
[1.0, 1.0, 1.0, 80.0, 80.0, 80.0], # triclinic
None) # no PBC
@staticmethod
@pytest.fixture()
def coords():
# input coordinates, some outside the [1, 1, 1] box:
return [np.array([[0.1, 0.1, 0.1], [-0.9, -0.9, -0.9]], dtype=np.float32),
np.array([[0.1, 0.1, 1.9], [-0.9, -0.9, 0.9]], dtype=np.float32),
np.array([[0.1, 1.9, 1.9], [-0.9, 0.9, 0.9]], dtype=np.float32),
np.array([[0.1, 1.9, 0.1], [-0.9, 0.9, -0.9]], dtype=np.float32)]
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_input_unchanged_distance_array(self, coords, box, backend):
crds = coords[:2]
refs = [crd.copy() for crd in crds]
res = distances.distance_array(crds[0], crds[1], box=box,
backend=backend)
assert_equal(crds, refs)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_input_unchanged_self_distance_array(self, coords, box, backend):
crd = coords[0]
ref = crd.copy()
res = distances.self_distance_array(crd, box=box, backend=backend)
assert_equal(crd, ref)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
def test_input_unchanged_capped_distance(self, coords, box, met):
crds = coords[:2]
refs = [crd.copy() for crd in crds]
res = distances.capped_distance(crds[0], crds[1], max_cutoff=0.3,
box=box, method=met)
assert_equal(crds, refs)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
def test_input_unchanged_self_capped_distance(self, coords, box, met):
crd = coords[0]
ref = crd.copy()
r_cut = 0.25
res = distances.self_capped_distance(crd, max_cutoff=r_cut, box=box,
method=met)
assert_equal(crd, ref)
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_input_unchanged_transform_RtoS_and_StoR(self, coords, box, backend):
crd = coords[0]
ref = crd.copy()
res = distances.transform_RtoS(crd, box, backend=backend)
assert_equal(crd, ref)
crd = res
ref = crd.copy()
res = distances.transform_StoR(crd, box, backend=backend)
assert_equal(crd, ref)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_input_unchanged_calc_bonds(self, coords, box, backend):
crds = coords[:2]
refs = [crd.copy() for crd in crds]
res = distances.calc_bonds(crds[0], crds[1], box=box, backend=backend)
assert_equal(crds, refs)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_input_unchanged_calc_angles(self, coords, box, backend):
crds = coords[:3]
refs = [crd.copy() for crd in crds]
res = distances.calc_angles(crds[0], crds[1], crds[2], box=box,
backend=backend)
assert_equal(crds, refs)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_input_unchanged_calc_dihedrals(self, coords, box, backend):
crds = coords
refs = [crd.copy() for crd in crds]
res = distances.calc_dihedrals(crds[0], crds[1], crds[2], crds[3],
box=box, backend=backend)
assert_equal(crds, refs)
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_input_unchanged_apply_PBC(self, coords, box, backend):
crd = coords[0]
ref = crd.copy()
res = distances.apply_PBC(crd, box, backend=backend)
assert_equal(crd, ref)
class TestEmptyInputCoordinates(object):
"""Tests ensuring that the following functions in MDAnalysis.lib.distances
do not choke on empty input coordinate arrays:
* distance_array
* self_distance_array
* capped_distance
* self_capped_distance
* transform_RtoS
* transform_StoR
* calc_bonds
* calc_angles
* calc_dihedrals
* apply_PBC
"""
max_cut = 0.25 # max_cutoff parameter for *capped_distance()
min_cut = 0.0 # optional min_cutoff parameter for *capped_distance()
boxes = ([1.0, 1.0, 1.0, 90.0, 90.0, 90.0], # orthorhombic
[1.0, 1.0, 1.0, 80.0, 80.0, 80.0], # triclinic
None) # no PBC
@staticmethod
@pytest.fixture()
def empty_coord():
# empty coordinate array:
return np.empty((0, 3), dtype=np.float32)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_distance_array(self, empty_coord, box, backend):
res = distances.distance_array(empty_coord, empty_coord, box=box,
backend=backend)
assert_equal(res, np.empty((0, 0), dtype=np.float64))
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_self_distance_array(self, empty_coord, box, backend):
res = distances.self_distance_array(empty_coord, box=box,
backend=backend)
assert_equal(res, np.empty((0,), dtype=np.float64))
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('min_cut', [min_cut, None])
@pytest.mark.parametrize('ret_dist', [False, True])
@pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
def test_empty_input_capped_distance(self, empty_coord, min_cut, box, met,
ret_dist):
res = distances.capped_distance(empty_coord, empty_coord,
max_cutoff=self.max_cut,
min_cutoff=min_cut, box=box, method=met,
return_distances=ret_dist)
if ret_dist:
assert_equal(res[0], np.empty((0, 2), dtype=np.int64))
assert_equal(res[1], np.empty((0,), dtype=np.float64))
else:
assert_equal(res, np.empty((0, 2), dtype=np.int64))
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('min_cut', [min_cut, None])
@pytest.mark.parametrize('ret_dist', [False, True])
@pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
def test_empty_input_self_capped_distance(self, empty_coord, min_cut, box,
met, ret_dist):
res = distances.self_capped_distance(empty_coord,
max_cutoff=self.max_cut,
min_cutoff=min_cut, box=box,
method=met, return_distances=ret_dist)
if ret_dist:
assert_equal(res[0], np.empty((0, 2), dtype=np.int64))
assert_equal(res[1], np.empty((0,), dtype=np.float64))
else:
assert_equal(res, np.empty((0, 2), dtype=np.int64))
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_transform_RtoS(self, empty_coord, box, backend):
res = distances.transform_RtoS(empty_coord, box, backend=backend)
assert_equal(res, empty_coord)
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_transform_StoR(self, empty_coord, box, backend):
res = distances.transform_StoR(empty_coord, box, backend=backend)
assert_equal(res, empty_coord)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_calc_bonds(self, empty_coord, box, backend):
res = distances.calc_bonds(empty_coord, empty_coord, box=box,
backend=backend)
assert_equal(res, np.empty((0,), dtype=np.float64))
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_calc_angles(self, empty_coord, box, backend):
res = distances.calc_angles(empty_coord, empty_coord, empty_coord,
box=box, backend=backend)
assert_equal(res, np.empty((0,), dtype=np.float64))
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_calc_dihedrals(self, empty_coord, box, backend):
res = distances.calc_dihedrals(empty_coord, empty_coord, empty_coord,
empty_coord, box=box, backend=backend)
assert_equal(res, np.empty((0,), dtype=np.float64))
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_empty_input_apply_PBC(self, empty_coord, box, backend):
res = distances.apply_PBC(empty_coord, box, backend=backend)
assert_equal(res, empty_coord)
class TestOutputTypes(object):
"""Tests ensuring that the following functions in MDAnalysis.lib.distances
return results of the types stated in the docs:
* distance_array:
- numpy.ndarray (shape=(n, m), dtype=numpy.float64)
* self_distance_array:
- numpy.ndarray (shape=(n*(n-1)//2,), dtype=numpy.float64)
* capped_distance:
- numpy.ndarray (shape=(n, 2), dtype=numpy.int64)
- numpy.ndarray (shape=(n,), dtype=numpy.float64) (optional)
* self_capped_distance:
- numpy.ndarray (shape=(n, 2), dtype=numpy.int64)
- numpy.ndarray (shape=(n,), dtype=numpy.float64)
* transform_RtoS:
- numpy.ndarray (shape=input.shape, dtype=numpy.float32)
* transform_StoR:
- numpy.ndarray (shape=input.shape, dtype=numpy.float32)
* calc_bonds:
- numpy.ndarray (shape=(n,), dtype=numpy.float64) for at least one
shape (n,3) input, or numpy.float64 if all inputs are of shape (3,)
* calc_angles:
- numpy.ndarray (shape=(n,), dtype=numpy.float64) for at least one
shape (n,3) input, or numpy.float64 if all inputs are of shape (3,)
* calc_dihedrals:
- numpy.ndarray (shape=(n,), dtype=numpy.float64) for at least one
shape (n,3) input, or numpy.float64 for if all inputs are of
shape (3,)
* apply_PBC:
- numpy.ndarray (shape=input.shape, dtype=numpy.float32)
"""
max_cut = 0.25 # max_cutoff parameter for *capped_distance()
min_cut = 0.0 # optional min_cutoff parameter for *capped_distance()
boxes = ([1.0, 1.0, 1.0, 90.0, 90.0, 90.0], # orthorhombic
[1.0, 1.0, 1.0, 80.0, 80.0, 80.0], # triclinic
None) # no PBC
coords = [np.empty((0, 3), dtype=np.float32), # empty coord array
np.array([[0.1, 0.1, 0.1]], dtype=np.float32), # coord array
np.array([0.1, 0.1, 0.1], dtype=np.float32), # single coord
np.array([[-1.1, -1.1, -1.1]], dtype=np.float32)] # outside box
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('incoords', list(comb(coords, 2)))
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_type_distance_array(self, incoords, box, backend):
res = distances.distance_array(*incoords, box=box, backend=backend)
assert type(res) == np.ndarray
assert res.shape == (incoords[0].shape[0] % 2, incoords[1].shape[0] % 2)
assert res.dtype.type == np.float64
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('incoords', coords)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_type_self_distance_array(self, incoords, box, backend):
res = distances.self_distance_array(incoords, box=box, backend=backend)
assert type(res) == np.ndarray
assert res.shape == (0,)
assert res.dtype.type == np.float64
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('min_cut', [min_cut, None])
@pytest.mark.parametrize('ret_dist', [False, True])
@pytest.mark.parametrize('incoords', list(comb(coords, 2)))
@pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
def test_output_type_capped_distance(self, incoords, min_cut, box, met,
ret_dist):
res = distances.capped_distance(*incoords, max_cutoff=self.max_cut,
min_cutoff=min_cut, box=box, method=met,
return_distances=ret_dist)
if ret_dist:
pairs, dist = res
else:
pairs = res
assert type(pairs) == np.ndarray
assert pairs.dtype.type == np.intp
assert pairs.ndim == 2
assert pairs.shape[1] == 2
if ret_dist:
assert type(dist) == np.ndarray
assert dist.dtype.type == np.float64
assert dist.shape == (pairs.shape[0],)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('min_cut', [min_cut, None])
@pytest.mark.parametrize('ret_dist', [False, True])
@pytest.mark.parametrize('incoords', coords)
@pytest.mark.parametrize('met', ["bruteforce", "pkdtree", "nsgrid", None])
def test_output_type_self_capped_distance(self, incoords, min_cut, box,
met, ret_dist):
res = distances.self_capped_distance(incoords,
max_cutoff=self.max_cut,
min_cutoff=min_cut,
box=box, method=met,
return_distances=ret_dist)
if ret_dist:
pairs, dist = res
else:
pairs = res
assert type(pairs) == np.ndarray
assert pairs.dtype.type == np.intp
assert pairs.ndim == 2
assert pairs.shape[1] == 2
if ret_dist:
assert type(dist) == np.ndarray
assert dist.dtype.type == np.float64
assert dist.shape == (pairs.shape[0],)
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('incoords', coords)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_dtype_transform_RtoS(self, incoords, box, backend):
res = distances.transform_RtoS(incoords, box, backend=backend)
assert type(res) == np.ndarray
assert res.dtype.type == np.float32
assert res.shape == incoords.shape
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('incoords', coords)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_dtype_transform_RtoS(self, incoords, box, backend):
res = distances.transform_RtoS(incoords, box, backend=backend)
assert type(res) == np.ndarray
assert res.dtype.type == np.float32
assert res.shape == incoords.shape
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('incoords',
[2 * [coords[0]]] + list(comb(coords[1:], 2)))
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_type_calc_bonds(self, incoords, box, backend):
res = distances.calc_bonds(*incoords, box=box, backend=backend)
maxdim = max([crd.ndim for crd in incoords])
if maxdim == 1:
assert type(res) == np.float64
else:
assert type(res) == np.ndarray
assert res.dtype.type == np.float64
coord = [crd for crd in incoords if crd.ndim == maxdim][0]
assert res.shape == (coord.shape[0],)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('incoords',
[3 * [coords[0]]] + list(comb(coords[1:], 3)))
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_type_calc_angles(self, incoords, box, backend):
res = distances.calc_angles(*incoords, box=box, backend=backend)
maxdim = max([crd.ndim for crd in incoords])
if maxdim == 1:
assert type(res) == np.float64
else:
assert type(res) == np.ndarray
assert res.dtype.type == np.float64
coord = [crd for crd in incoords if crd.ndim == maxdim][0]
assert res.shape == (coord.shape[0],)
@pytest.mark.parametrize('box', boxes)
@pytest.mark.parametrize('incoords',
[4 * [coords[0]]] + list(comb(coords[1:], 4)))
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_type_calc_dihedrals(self, incoords, box, backend):
res = distances.calc_dihedrals(*incoords, box=box, backend=backend)
maxdim = max([crd.ndim for crd in incoords])
if maxdim == 1:
assert type(res) == np.float64
else:
assert type(res) == np.ndarray
assert res.dtype.type == np.float64
coord = [crd for crd in incoords if crd.ndim == maxdim][0]
assert res.shape == (coord.shape[0],)
@pytest.mark.parametrize('box', boxes[:2])
@pytest.mark.parametrize('incoords', coords)
@pytest.mark.parametrize('backend', ['serial', 'openmp'])
def test_output_type_apply_PBC(self, incoords, box, backend):
res = distances.apply_PBC(incoords, box, backend=backend)
assert type(res) == np.ndarray
assert res.dtype.type == np.float32
assert res.shape == incoords.shape
class TestDistanceBackendSelection(object):
@staticmethod
@pytest.fixture()
def backend_selection_pos():
positions = np.random.rand(10, 3)
N = positions.shape[0]
result = np.empty(N * (N - 1) // 2, dtype=np.float64)
return positions, result
@pytest.mark.parametrize('backend', [
"serial", "Serial", "SeRiAL", "SERIAL",
"openmp", "OpenMP", "oPENmP", "OPENMP",
])
def test_case_insensitivity(self, backend, backend_selection_pos):
positions, result = backend_selection_pos
try:
distances._run("calc_self_distance_array", args=(positions, result),
backend=backend)
except RuntimeError:
pytest.fail("Failed to understand backend {0}".format(backend))
def test_wront_backend(self, backend_selection_pos):
positions, result = backend_selection_pos
with pytest.raises(ValueError):
distances._run("calc_self_distance_array", args=(positions, result),
backend="not implemented stuff")
def test_used_openmpflag():
assert isinstance(distances.USED_OPENMP, bool)
# test both orthognal and triclinic boxes
@pytest.mark.parametrize('box', (np.eye(3) * 10, np.array([[10, 0, 0], [2, 10, 0], [2, 2, 10]])))
# try shifts of -2 to +2 in each dimension, and all combinations of shifts
@pytest.mark.parametrize('shift', itertools.product(range(-2, 3), range(-2, 3), range(-2, 3)))
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_minimize_vectors(box, shift, dtype):
# test vectors pointing in all directions
# these currently all obey minimum convention as they're much smaller than the box
vec = np.array(list(itertools.product(range(-1, 2), range(-1, 2), range(-1, 2))), dtype=dtype)
box = box.astype(dtype)
# box is 3x3 representation
# multiply by shift, then sum xyz components then add these to the vector
# this technically doesn't alter the vector because of periodic boundaries
shifted_vec = (vec + (box.T * shift).sum(axis=1)).astype(dtype)
box2 = mdamath.triclinic_box(*box).astype(dtype)
res = distances.minimize_vectors(shifted_vec, box2)
assert_allclose(res, vec, atol=0.00001)
assert res.dtype == dtype
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/lib/test_distances.py
|
Python
|
gpl-2.0
| 62,939
|
[
"MDAnalysis"
] |
eb5e922bec38e5fe33cbc5d3b74ccb310e007a14d0d855c27e11f47d12e7b0ed
|
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
import os
import traceback
import re
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer.
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = ''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception, e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
self.send_header("X-traceback", traceback.format_exc())
self.send_header("Content-length", "0")
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = xmlrpclib.gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return xmlrpclib.gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print 'Content-Type: text/xml'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
print 'Status: %d %s' % (code, message)
print 'Content-Type: %s' % BaseHTTPServer.DEFAULT_ERROR_CONTENT_TYPE
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (TypeError, ValueError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
print 'Running XML-RPC server on port 8000'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
|
alanjw/GreenOpenERP-Win-X86
|
python/Lib/SimpleXMLRPCServer.py
|
Python
|
agpl-3.0
| 26,475
|
[
"Brian"
] |
2790f1e0a63974e4d777596e3cc0f5ac06a2bad3d35436ee640e5249c39e3938
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty
from frappe.desk.notifications import clear_doctype_notifications
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class WarehouseRequired(frappe.ValidationError): pass
class SalesOrder(SellingController):
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'items')
self.validate_with_previous_doc()
self.set_status()
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0] and not \
cint(frappe.db.get_single_value("Selling Settings", "allow_against_multiple_purchase_orders")):
frappe.msgprint(_("Warning: Sales Order {0} already exists against Customer's Purchase Order {1}").format(so[0][0], self.po_no))
def validate_for_items(self):
check_list = []
for d in self.get('items'):
check_list.append(cstr(d.item_code))
if (frappe.db.get_value("Item", d.item_code, "is_stock_item")==1 or
(self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \
and not d.warehouse:
frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code),
WarehouseRequired)
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
# check for same entry multiple times
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list) and \
not cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
frappe.msgprint(_("Warning: Same item has been entered multiple times."))
def product_bundle_has_stock_item(self, product_bundle):
"""Returns true if product bundle has stock item"""
ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi
where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle))
return ret
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get("items") if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def on_submit(self):
super(SalesOrder, self).on_submit()
self.check_credit_limit()
self.update_reserved_qty()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.base_grand_total, self)
self.update_prevdoc_status('submit')
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_reserved_qty()
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
check_credit_limit(self.customer, self.company)
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.db_set('status', 'Stopped')
self.update_reserved_qty()
self.notify_update()
clear_doctype_notifications(self)
def unstop_sales_order(self):
self.check_modified_date()
self.db_set('status', 'Draft')
self.set_status(update=True)
self.update_reserved_qty()
clear_doctype_notifications(self)
def update_reserved_qty(self, so_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
item_wh_list = []
def _valid_for_reserve(item_code, warehouse):
if item_code and warehouse and [item_code, warehouse] not in item_wh_list \
and frappe.db.get_value("Item", item_code, "is_stock_item"):
item_wh_list.append([item_code, warehouse])
for d in self.get("items"):
if (not so_item_rows or d.name in so_item_rows):
_valid_for_reserve(d.item_code, d.warehouse)
if self.has_product_bundle(d.item_code):
for p in self.get("packed_items"):
if p.parent_detail_docname == d.name and p.parent_item == d.item_code:
_valid_for_reserve(p.item_code, p.warehouse)
for item_code, warehouse in item_wh_list:
update_bin_qty(item_code, warehouse, {
"reserved_qty": get_reserved_qty(item_code, warehouse)
})
def on_update(self):
pass
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["title"] = _("My Orders")
return list_context
@frappe.whitelist()
def stop_or_unstop_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status=="Stop":
if so.status not in ("Stopped", "Cancelled") and (so.per_delivered < 100 or so.per_billed < 100):
so.stop_sales_order()
else:
if so.status == "Stopped":
so.unstop_sales_order()
frappe.local.message_log = []
def before_recurring(self):
super(SalesOrder, self).before_recurring()
for field in ("delivery_status", "per_delivered", "billing_status", "per_billed"):
self.set(field, None)
for d in self.get("items"):
for field in ("delivered_qty", "billed_amt", "planned_qty", "prevdoc_docname"):
d.set(field, None)
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
so = frappe.get_doc("Sales Order", source_name)
item_table = "Packed Item" if so.packed_items else "Sales Order Item"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
item_table: {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
if source.po_no:
if target.po_no:
target_po_no = target.po_no.split(", ")
target_po_no.append(source.po_no)
target.po_no = ", ".join(list(set(target_po_no))) if len(target_po_no) > 1 else target_po_no[0]
else:
target.po_no = source.po_no
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
target.get_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.qty and (doc.base_amount==0 or doc.billed_amt < doc.amount)
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Sales Order", filters)
data = frappe.db.sql("""select name, customer_name, delivery_status, billing_status, delivery_date
from `tabSales Order`
where (ifnull(delivery_date, '0000-00-00')!= '0000-00-00') \
and (delivery_date between %(start)s and %(end)s) {conditions}
""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
|
gangadharkadam/saloon_erp_install
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 16,677
|
[
"VisIt"
] |
9f2b6f7dade695e31476ca129762fef1947ce1e9aac217a3254e55090a3715a7
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
GrassAlgorithm.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import time
import uuid
import importlib
import re
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsRasterLayer
from qgis.utils import iface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import (getParameterFromString,
ParameterVector,
ParameterMultipleInput,
ParameterExtent,
ParameterNumber,
ParameterSelection,
ParameterRaster,
ParameterTable,
ParameterBoolean,
ParameterString,
ParameterPoint)
from processing.core.outputs import (getOutputFromString,
OutputRaster,
OutputVector,
OutputFile,
OutputHTML)
from .GrassUtils import GrassUtils
from processing.tools import dataobjects, system
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class GrassAlgorithm(GeoAlgorithm):
GRASS_OUTPUT_TYPE_PARAMETER = 'GRASS_OUTPUT_TYPE_PARAMETER'
GRASS_MIN_AREA_PARAMETER = 'GRASS_MIN_AREA_PARAMETER'
GRASS_SNAP_TOLERANCE_PARAMETER = 'GRASS_SNAP_TOLERANCE_PARAMETER'
GRASS_REGION_EXTENT_PARAMETER = 'GRASS_REGION_PARAMETER'
GRASS_REGION_CELLSIZE_PARAMETER = 'GRASS_REGION_CELLSIZE_PARAMETER'
GRASS_REGION_ALIGN_TO_RESOLUTION = '-a_r.region'
OUTPUT_TYPES = ['auto', 'point', 'line', 'area']
def __init__(self, descriptionfile):
GeoAlgorithm.__init__(self)
self.hardcodedStrings = []
self.descriptionFile = descriptionfile
self.defineCharacteristicsFromFile()
self.numExportedLayers = 0
def getCopy(self):
newone = GrassAlgorithm(self.descriptionFile)
newone.provider = self.provider
return newone
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'grass.svg'))
def help(self):
return False, 'http://grass.osgeo.org/grass64/manuals/' + self.grassName + '.html'
def getParameterDescriptions(self):
descs = {}
_, helpfile = self.help()
try:
infile = open(helpfile)
lines = infile.readlines()
for i in range(len(lines)):
if lines[i].startswith('<DT><b>'):
for param in self.parameters:
searchLine = '<b>' + param.name + '</b>'
if searchLine in lines[i]:
i += 1
descs[param.name] = (lines[i])[4:-6]
break
infile.close()
except Exception:
pass
return descs
def defineCharacteristicsFromFile(self):
lines = open(self.descriptionFile)
line = lines.readline().strip('\n').strip()
self.grassName = line
line = lines.readline().strip('\n').strip()
self.name = line
self.i18n_name = QCoreApplication.translate("GrassAlgorithm", line)
if " - " not in self.name:
self.name = self.grassName + " - " + self.name
self.i18n_name = self.grassName + " - " + self.i18n_name
line = lines.readline().strip('\n').strip()
self.group = line
self.i18n_group = QCoreApplication.translate("GrassAlgorithm", line)
hasRasterOutput = False
hasVectorInput = False
vectorOutputs = 0
line = lines.readline().strip('\n').strip()
while line != '':
try:
line = line.strip('\n').strip()
if line.startswith('Hardcoded'):
self.hardcodedStrings.append(line[len('Hardcoded|'):])
elif line.startswith('Parameter'):
parameter = getParameterFromString(line)
self.addParameter(parameter)
if isinstance(parameter, ParameterVector):
hasVectorInput = True
if isinstance(parameter, ParameterMultipleInput) \
and parameter.datatype < 3:
hasVectorInput = True
elif line.startswith('*Parameter'):
param = getParameterFromString(line[1:])
param.isAdvanced = True
self.addParameter(param)
else:
output = getOutputFromString(line)
self.addOutput(output)
if isinstance(output, OutputRaster):
hasRasterOutput = True
elif isinstance(output, OutputVector):
vectorOutputs += 1
if isinstance(output, OutputHTML):
self.addOutput(OutputFile("rawoutput", output.description +
" (raw output)", "txt"))
line = lines.readline().strip('\n').strip()
except Exception as e:
ProcessingLog.addToLog(
ProcessingLog.LOG_ERROR,
self.tr('Could not open GRASS algorithm: %s.\n%s' % (self.descriptionFile, line)))
raise e
lines.close()
self.addParameter(ParameterExtent(
self.GRASS_REGION_EXTENT_PARAMETER,
self.tr('GRASS region extent'))
)
if hasRasterOutput:
self.addParameter(ParameterNumber(
self.GRASS_REGION_CELLSIZE_PARAMETER,
self.tr('GRASS region cellsize (leave 0 for default)'),
0, None, 0.0))
if hasVectorInput:
param = ParameterNumber(self.GRASS_SNAP_TOLERANCE_PARAMETER,
'v.in.ogr snap tolerance (-1 = no snap)',
-1, None, -1.0)
param.isAdvanced = True
self.addParameter(param)
param = ParameterNumber(self.GRASS_MIN_AREA_PARAMETER,
'v.in.ogr min area', 0, None, 0.0001)
param.isAdvanced = True
self.addParameter(param)
if vectorOutputs == 1:
param = ParameterSelection(self.GRASS_OUTPUT_TYPE_PARAMETER,
'v.out.ogr output type',
self.OUTPUT_TYPES)
param.isAdvanced = True
self.addParameter(param)
def getDefaultCellsize(self):
cellsize = 0
for param in self.parameters:
if param.value:
if isinstance(param, ParameterRaster):
if isinstance(param.value, QgsRasterLayer):
layer = param.value
else:
layer = dataobjects.getObjectFromUri(param.value)
cellsize = max(cellsize, (layer.extent().xMaximum()
- layer.extent().xMinimum())
/ layer.width())
elif isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObjectFromUri(layername)
if isinstance(layer, QgsRasterLayer):
cellsize = max(cellsize, (
layer.extent().xMaximum()
- layer.extent().xMinimum())
/ layer.width()
)
if cellsize == 0:
cellsize = 100
return cellsize
def processAlgorithm(self, progress):
if system.isWindows():
path = GrassUtils.grassPath()
if path == '':
raise GeoAlgorithmExecutionException(
self.tr('GRASS folder is not configured.\nPlease '
'configure it before running GRASS algorithms.'))
commands = []
self.exportedLayers = {}
outputCommands = []
# If GRASS session has been created outside of this algorithm then
# get the list of layers loaded in GRASS otherwise start a new
# session
existingSession = GrassUtils.sessionRunning
if existingSession:
self.exportedLayers = GrassUtils.getSessionLayers()
else:
GrassUtils.startGrassSession()
# 1: Export layer to grass mapset
for param in self.parameters:
if isinstance(param, ParameterRaster):
if param.value is None:
continue
value = param.value
# Check if the layer hasn't already been exported in, for
# example, previous GRASS calls in this session
if value in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(value, commands)
commands.append(self.exportRasterLayer(value))
if isinstance(param, ParameterVector):
if param.value is None:
continue
value = param.value
if value in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(value, commands)
commands.append(self.exportVectorLayer(value))
if isinstance(param, ParameterTable):
pass
if isinstance(param, ParameterMultipleInput):
if param.value is None:
continue
layers = param.value.split(';')
if layers is None or len(layers) == 0:
continue
if param.datatype == ParameterMultipleInput.TYPE_RASTER:
for layer in layers:
if layer in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(layer, commands)
commands.append(self.exportRasterLayer(layer))
elif param.datatype in [ParameterMultipleInput.TYPE_VECTOR_ANY,
ParameterMultipleInput.TYPE_VECTOR_LINE,
ParameterMultipleInput.TYPE_VECTOR_POLYGON,
ParameterMultipleInput.TYPE_VECTOR_POINT]:
for layer in layers:
if layer in self.exportedLayers.keys():
continue
else:
self.setSessionProjectionFromLayer(layer, commands)
commands.append(self.exportVectorLayer(layer))
self.setSessionProjectionFromProject(commands)
region = \
unicode(self.getParameterValue(self.GRASS_REGION_EXTENT_PARAMETER))
regionCoords = region.split(',')
command = 'g.region'
command += ' n=' + unicode(regionCoords[3])
command += ' s=' + unicode(regionCoords[2])
command += ' e=' + unicode(regionCoords[1])
command += ' w=' + unicode(regionCoords[0])
cellsize = self.getParameterValue(self.GRASS_REGION_CELLSIZE_PARAMETER)
if cellsize:
command += ' res=' + unicode(cellsize)
else:
command += ' res=' + unicode(self.getDefaultCellsize())
alignToResolution = \
self.getParameterValue(self.GRASS_REGION_ALIGN_TO_RESOLUTION)
if alignToResolution:
command += ' -a'
commands.append(command)
# 2: Set parameters and outputs
command = self.grassName
command += ' ' + ' '.join(self.hardcodedStrings)
for param in self.parameters:
if param.value is None or param.value == '':
continue
if param.name in [self.GRASS_REGION_CELLSIZE_PARAMETER, self.GRASS_REGION_EXTENT_PARAMETER,
self.GRASS_MIN_AREA_PARAMETER, self.GRASS_SNAP_TOLERANCE_PARAMETER,
self.GRASS_OUTPUT_TYPE_PARAMETER, self.GRASS_REGION_ALIGN_TO_RESOLUTION]:
continue
if isinstance(param, (ParameterRaster, ParameterVector)):
value = param.value
if value in self.exportedLayers.keys():
command += ' %s="%s"' % (param.name, self.exportedLayers[value])
else:
command += ' %s="%s"' % (param.name, value)
elif isinstance(param, ParameterMultipleInput):
s = param.value
for layer in self.exportedLayers.keys():
s = s.replace(layer, self.exportedLayers[layer])
s = s.replace(';', ',')
command += ' %s="%s"' % (param.name, s)
elif isinstance(param, ParameterBoolean):
if param.value:
command += ' ' + param.name
elif isinstance(param, ParameterSelection):
idx = int(param.value)
command += ' ' + param.name + '=' + unicode(param.options[idx])
elif isinstance(param, ParameterString):
command += ' ' + param.name + '="' + unicode(param.value) + '"'
elif isinstance(param, ParameterPoint):
command += ' ' + param.name + '=' + unicode(param.value)
else:
command += ' ' + param.name + '="' + unicode(param.value) + '"'
uniqueSufix = unicode(uuid.uuid4()).replace('-', '')
for out in self.outputs:
if isinstance(out, OutputFile):
command += ' > ' + out.value
elif not isinstance(out, OutputHTML):
# We add an output name to make sure it is unique if the session
# uses this algorithm several times.
uniqueOutputName = out.name + uniqueSufix
command += ' ' + out.name + '=' + uniqueOutputName
# Add output file to exported layers, to indicate that
# they are present in GRASS
self.exportedLayers[out.value] = uniqueOutputName
command += ' --overwrite'
commands.append(command)
# 3: Export resulting layers to a format that qgis can read
for out in self.outputs:
if isinstance(out, OutputRaster):
filename = out.getCompatibleFileName(self)
# Raster layer output: adjust region to layer before
# exporting
commands.append('g.region rast=' + out.name + uniqueSufix)
outputCommands.append('g.region rast=' + out.name
+ uniqueSufix)
if self.grassName == 'r.composite':
command = 'r.out.tiff -t --verbose'
command += ' input='
command += out.name + uniqueSufix
command += ' output="' + filename + '"'
commands.append(command)
outputCommands.append(command)
else:
command = 'r.out.gdal -c createopt="TFW=YES,COMPRESS=LZW"'
command += ' input='
if self.grassName == 'r.horizon':
command += out.name + uniqueSufix + '_0'
else:
command += out.name + uniqueSufix
command += ' output="' + filename + '"'
commands.append(command)
outputCommands.append(command)
if isinstance(out, OutputVector):
filename = out.getCompatibleFileName(self)
command = 'v.out.ogr -s -c -e -z input=' + out.name + uniqueSufix
command += ' dsn="' + os.path.dirname(filename) + '"'
command += ' format=ESRI_Shapefile'
command += ' olayer="%s"' % os.path.splitext(os.path.basename(filename))[0]
typeidx = \
self.getParameterValue(self.GRASS_OUTPUT_TYPE_PARAMETER)
outtype = ('auto' if typeidx
is None else self.OUTPUT_TYPES[typeidx])
command += ' type=' + outtype
commands.append(command)
outputCommands.append(command)
# 4: Run GRASS
loglines = []
loglines.append(self.tr('GRASS execution commands'))
for line in commands:
progress.setCommand(line)
loglines.append(line)
if ProcessingConfig.getSetting(GrassUtils.GRASS_LOG_COMMANDS):
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
GrassUtils.executeGrass(commands, progress, outputCommands)
for out in self.outputs:
if isinstance(out, OutputHTML):
with open(self.getOutputFromName("rawoutput").value) as f:
rawOutput = "".join(f.readlines())
with open(out.value, "w") as f:
f.write("<pre>%s</pre>" % rawOutput)
# If the session has been created outside of this algorithm, add
# the new GRASS layers to it otherwise finish the session
if existingSession:
GrassUtils.addSessionLayers(self.exportedLayers)
else:
GrassUtils.endGrassSession()
def exportVectorLayer(self, orgFilename):
# TODO: improve this. We are now exporting if it is not a shapefile,
# but the functionality of v.in.ogr could be used for this.
# We also export if there is a selection
if not os.path.exists(orgFilename) or not orgFilename.endswith('shp'):
layer = dataobjects.getObjectFromUri(orgFilename, False)
if layer:
filename = dataobjects.exportVectorLayer(layer)
else:
layer = dataobjects.getObjectFromUri(orgFilename, False)
if layer:
useSelection = \
ProcessingConfig.getSetting(ProcessingConfig.USE_SELECTED)
if useSelection and layer.selectedFeatureCount() != 0:
filename = dataobjects.exportVectorLayer(layer)
else:
filename = orgFilename
else:
filename = orgFilename
destFilename = self.getTempFilename()
self.exportedLayers[orgFilename] = destFilename
command = 'v.in.ogr'
min_area = self.getParameterValue(self.GRASS_MIN_AREA_PARAMETER)
command += ' min_area=' + unicode(min_area)
snap = self.getParameterValue(self.GRASS_SNAP_TOLERANCE_PARAMETER)
command += ' snap=' + unicode(snap)
command += ' dsn="%s"' % os.path.dirname(filename)
command += ' layer="%s"' % os.path.basename(filename)[:-4]
command += ' output=' + destFilename
command += ' --overwrite -o'
return command
def setSessionProjectionFromProject(self, commands):
if not GrassUtils.projectionSet:
proj4 = iface.mapCanvas().mapSettings().destinationCrs().toProj4()
command = 'g.proj'
command += ' -c'
command += ' proj4="' + proj4 + '"'
commands.append(command)
GrassUtils.projectionSet = True
def setSessionProjectionFromLayer(self, layer, commands):
if not GrassUtils.projectionSet:
qGisLayer = dataobjects.getObjectFromUri(layer)
if qGisLayer:
proj4 = unicode(qGisLayer.crs().toProj4())
command = 'g.proj'
command += ' -c'
command += ' proj4="' + proj4 + '"'
commands.append(command)
GrassUtils.projectionSet = True
def exportRasterLayer(self, layer):
destFilename = self.getTempFilename()
self.exportedLayers[layer] = destFilename
if bool(re.match('netcdf', layer, re.I)) or bool(re.match('hdf', layer, re.I)):
command = 'r.in.gdal'
else:
command = 'r.external -r'
command += ' input="' + layer + '"'
command += ' band=1'
command += ' output=' + destFilename
command += ' --overwrite -o'
return command
def getTempFilename(self):
filename = 'tmp' + unicode(time.time()).replace('.', '') \
+ unicode(system.getNumExportedLayers())
return filename
def commandLineName(self):
return 'grass:' + self.name[:self.name.find(' ')]
def checkBeforeOpeningParametersDialog(self):
return GrassUtils.checkGrassIsInstalled()
def checkParameterValuesBeforeExecuting(self):
name = self.commandLineName().replace('.', '_')[len('grass:'):]
try:
module = importlib.import_module('processing.algs.grass.ext.' + name)
except ImportError:
return
if hasattr(module, 'checkParameterValuesBeforeExecuting'):
func = getattr(module, 'checkParameterValuesBeforeExecuting')
return func(self)
|
AsgerPetersen/QGIS
|
python/plugins/processing/algs/grass/GrassAlgorithm.py
|
Python
|
gpl-2.0
| 22,802
|
[
"NetCDF"
] |
3c01b62e53977f5b5f90a87b07e2cb580847713bfd3541b2b59a1f2a74f2d620
|
"""
@package medpy.filter.image
Filters for multi-dimensional images.
These filter rely heavily on and are modelled after the scipy.ndimage package.
@author Oskar Maier
@version d0.2.0
@since 2013-11-29
@status Development
"""
# build-in module
import itertools
# third-party modules
import numpy
from scipy.ndimage.filters import convolve, gaussian_filter
from scipy.ndimage._ni_support import _get_output
# own modules
from medpy.filter.utilities import pad, __make_footprint
# code
def sls(minuend, subtrahend, metric = "ssd", noise = "global", signed = True,
sn_size = None, sn_footprint = None, sn_mode = "reflect", sn_cval = 0.0,
pn_size = None, pn_footprint = None, pn_mode = "reflect", pn_cval = 0.0):
"""
Computes the signed local similarity between two images.
Compares a patch around each voxel of the minuend array to a number of patches
centered at the points of a search neighbourhood in the subtrahend. Thus, creates
a multi-dimensional measure of patch similarity between the minuend and a
corresponding search area in the subtrahend.
This filter can also be used to compute local self-similarity, obtaining a
descriptor similar to the one described in [1].
minuend : array_like
Input array from which to subtract the subtrahend.
subtrahend : array_like
Input array to subtract from the minuend.
metric : {'ssd', 'mi', 'nmi', 'ncc'}, optional
The `metric` parameter determines the metric used to compute the
filter output. Default is 'ssd'.
noise : {'global', 'local'}, optional
The `noise` parameter determines how the noise is handled. If set
to 'global', the variance determining the noise is a scalar, if
set to 'local', it is a Gaussian smoothed field of estimated local
noise. Default is 'global'.
signed : bool, optional
Whether the filter output should be signed or not. If set to 'False',
only the absolute values will be returned. Default is 'True'.
sn_size : scalar or tuple, optional
See sn_footprint, below
sn_footprint : array, optional
The search neighbourhood.
Either `sn_size` or `sn_footprint` must be defined. `sn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`sn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``sn_size=(n,m)`` is equivalent
to ``sn_footprint=np.ones((n,m))``. We adjust `sn_size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `sn_size` is 2, then the actual size used is
(2,2,2).
sn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `sn_mode` parameter determines how the array borders are
handled, where `sn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
sn_cval : scalar, optional
Value to fill past edges of input if `sn_mode` is 'constant'. Default
is 0.0
pn_size : scalar or tuple, optional
See pn_footprint, below
pn_footprint : array, optional
The patch over which the distance measure is applied.
Either `pn_size` or `pn_footprint` must be defined. `pn_size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`pn_footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``pn_size=(n,m)`` is equivalent
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `pn_size` is 2, then the actual size used is
(2,2,2).
pn_mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `pn_mode` parameter determines how the array borders are
handled, where `pn_cval` is the value when mode is equal to
'constant'. Default is 'reflect'
pn_cval : scalar, optional
Value to fill past edges of input if `pn_mode` is 'constant'. Default
is 0.0
[1] Mattias P. Heinrich, Mark Jenkinson, Manav Bhushan, Tahreema Matin, Fergus V. Gleeson, Sir Michael Brady, Julia A. Schnabel
MIND: Modality independent neighbourhood descriptor for multi-modal deformable registration
Medical Image Analysis, Volume 16, Issue 7, October 2012, Pages 1423-1435, ISSN 1361-8415
http://dx.doi.org/10.1016/j.media.2012.05.008
"""
minuend = numpy.asarray(minuend)
subtrahend = numpy.asarray(subtrahend)
if numpy.iscomplexobj(minuend):
raise TypeError('complex type not supported')
if numpy.iscomplexobj(subtrahend):
raise TypeError('complex type not supported')
mshape = [ii for ii in minuend.shape if ii > 0]
sshape = [ii for ii in subtrahend.shape if ii > 0]
if not len(mshape) == len(sshape):
raise RuntimeError("minuend and subtrahend must be of same shape")
if not numpy.all([sm == ss for sm, ss in zip(mshape, sshape)]):
raise RuntimeError("minuend and subtrahend must be of same shape")
sn_footprint = __make_footprint(minuend, sn_size, sn_footprint)
sn_fshape = [ii for ii in sn_footprint.shape if ii > 0]
if len(sn_fshape) != minuend.ndim:
raise RuntimeError('search neighbourhood footprint array has incorrect shape.')
#!TODO: Is this required?
if not sn_footprint.flags.contiguous:
sn_footprint = sn_footprint.copy()
# created a padded copy of the subtrahend, whereas the padding mode is always 'reflect'
subtrahend = pad(subtrahend, footprint=sn_footprint, mode=sn_mode, cval=sn_cval)
# compute slicers for position where the search neighbourhood sn_footprint is TRUE
slicers = [[slice(x, (x + 1) - d if 0 != (x + 1) - d else None) for x in range(d)] for d in sn_fshape]
slicers = [sl for sl, tv in zip(itertools.product(*slicers), sn_footprint.flat) if tv]
# compute difference images and sign images for search neighbourhood elements
ssds = [ssd(minuend, subtrahend[slicer], normalized=True, signed=signed, size=pn_size, footprint=pn_footprint, mode=pn_mode, cval=pn_cval) for slicer in slicers]
distance = [x[0] for x in ssds]
distance_sign = [x[1] for x in ssds]
# compute local variance, which constitutes an approximation of local noise, out of patch-distances over the neighbourhood structure
variance = numpy.average(distance, 0)
variance = gaussian_filter(variance, sigma=3) #!TODO: Figure out if a fixed sigma is desirable here... I think that yes
if 'global' == noise:
variance = variance.sum() / float(numpy.product(variance.shape))
# variance[variance < variance_global / 10.] = variance_global / 10. #!TODO: Should I keep this i.e. regularizing the variance to be at least 10% of the global one?
# compute sls
sls = [dist_sign * numpy.exp(-1 * (dist / variance)) for dist_sign, dist in zip(distance_sign, distance)]
# convert into sls image, swapping dimensions to have varying patches in the last dimension
return numpy.rollaxis(numpy.asarray(sls), 0, minuend.ndim + 1)
def ssd(minuend, subtrahend, normalized=True, signed=False, size=None, footprint=None, mode="reflect", cval=0.0, origin=0):
"""
Computes the SSD between patches of minuend and subtrahend.
minuend : array_like
Input array from which to subtract the subtrahend.
subtrahend : array_like
Input array to subtract from the minuend.
normalized : bool, optional
Whether the SSD of each patch should be divided through the filter size for
normalization. Default is 'True'.
signed : bool, optional
Whether the accumulative sign of each patch should be returned as well. If
'True', the second return value is a numpy.sign array, otherwise the scalar '1'.
Default is 'False'.
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
The patch over which to compute the SSD.
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
"""
convolution_filter = average_filter if normalized else sum_filter
output = numpy.float if normalized else minuend.dtype
if signed:
difference = minuend - subtrahend
difference_squared = numpy.square(difference)
distance_sign = numpy.sign(convolution_filter(numpy.sign(difference) * difference_squared, size=size, footprint=footprint, mode=mode, cval=cval, origin=origin, output=output))
distance = convolution_filter(difference_squared, size=size, footprint=footprint, mode=mode, cval=cval, output=output)
else:
distance = convolution_filter(numpy.square(minuend - subtrahend), size=size, footprint=footprint, mode=mode, cval=cval, origin=origin, output=output)
distance_sign = 1
return distance, distance_sign
def average_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculates a multi-dimensional average filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
average_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
filter_size = footprint.sum()
output, return_value = _get_output(output, input)
sum_filter(input, footprint=footprint, output=output, mode=mode, cval=cval, origin=origin)
output /= filter_size
return return_value
def sum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculates a multi-dimensional sum filter.
Parameters
----------
input : array-like
input array to filter
size : scalar or tuple, optional
See footprint, below
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2).
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
origin : scalar, optional
The ``origin`` parameter controls the placement of the filter.
Default 0
Returns
-------
sum_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
Convenience implementation employing convolve.
See Also
--------
convolve : Convolve an image with a kernel.
"""
footprint = __make_footprint(input, size, footprint)
slicer = [slice(None, None, -1)] * footprint.ndim
return convolve(input, footprint[slicer], output, mode, cval, origin)
|
kleinfeld/medpy
|
medpy/filter/image.py
|
Python
|
gpl-3.0
| 14,384
|
[
"Gaussian"
] |
fbfd48c8d7bdb4b1c7a2a000cde82ddd57749fffc34219fcca411547d8e494f3
|
from django.test import TestCase
from django.contrib.auth.models import User
from txtalert.apps.googledoc.models import SpreadSheet, GoogleAccount
from txtalert.apps.googledoc.importer import Importer
from txtalert.apps.googledoc.reader.spreadsheetReader import SimpleCRUD
from txtalert.core.models import Patient, MSISDN, Visit, Clinic
from datetime import datetime, timedelta, date
import random
class ImporterTestCase(TestCase):
"""Testing the google spreadsheet import loop"""
fixtures = ['patient', 'visit', 'clinic']
def setUp(self):
#dummy google login details
self.email = 'txtalert@byteorbit.com'
self.password = 'testtest'
self.spreadsheet = 'Praekelt'
self.empty_spreadsheet = 'Empty Spreadsheet'
self.start = date.today() - timedelta(days=14)
self.until = date.today()
self.user = User.objects.all()[0]
self.importer = Importer(self.user, self.email, self.password)
# make sure we're actually testing some data
self.assertTrue(Patient.objects.count() > 0)
self.assertTrue(Visit.objects.count() > 0)
self.assertTrue(Clinic.objects.count() > 0)
self.random_msisdn = random.choice(range(111111111, 999999999, 123456))
self.enrolled_patients = {
1: {
'appointmentdate1': date(2011, 8, 1),
'fileno': 1111111,
'appointmentstatus1': 'Missed',
'phonenumber': self.random_msisdn
},
2: {
'appointmentdate1': date(2011, 8, 10),
'fileno': 9999999,
'appointmentstatus1': 'Attended',
'phonenumber': self.random_msisdn
}
}
def tearDown(self):
pass
def test_incorrect_spreadsheet_name(self):
"""Test for import with no existing spreadsheet names."""
#invalid spreadsheet names
self.doc_names = [
'##hgh', 'copy of appointment',
'123456', 123456, '#Praekelt'
]
#rondomly select an invalid spreadsheet name
self.invalid_doc_name = random.choice(self.doc_names)
self.test_doc_name, self.correct = self.importer.import_spread_sheet(
self.invalid_doc_name, self.start, self.until
)
self.str_invalid_doc_name = str(self.invalid_doc_name)
self.assertEquals(self.test_doc_name, self.str_invalid_doc_name)
self.assertIs(self.correct, False)
def test_empty_worksheets(self):
"""Test for a spreadsheet with no data to update."""
self.doc_name, self.data = self.importer.import_spread_sheet(
self.empty_spreadsheet, self.start, self.until
)
self.assertEquals(self.doc_name, self.empty_spreadsheet)
self.assertIs(self.data, False)
def test_import_worksheets(self):
"""Test for importing worksheets from a spreadsheet."""
self.from_date = date(2011, 7, 18)
self.to_date = date(2011, 9, 22)
self.enrolled, self.updates = self.importer.import_spread_sheet(
self.spreadsheet, self.from_date, self.to_date
)
self.assertEquals(self.enrolled, self.updates)
def test_check_file_no_format_fail(self):
"""Test invalid file number format."""
#invalid file number formats
self.file_numbers = ['+1234', '#ab789', 'abc8901@@', 'ab#12345']
#random selection of invalid file numbers
self.file_no_test = random.choice(self.file_numbers)
self.file_no, self.file_format = self.importer.check_file_no_format(
self.file_no_test
)
self.assertEqual(self.file_no, self.file_no_test)
self.assertEqual(self.file_format, False)
def test_check_file_no_pass(self):
"""Test file number format can only be alphanumeric."""
#invalid file number formats
self.file_numbers = [1234, 'ab789', 'abc8901', 'ab12345']
#random selection of invalid file numbers
self.file_no_test = random.choice(self.file_numbers)
self.file_no, self.file_format = self.importer.check_file_no_format(
self.file_no_test
)
self.test_file_no = str(self.file_no_test)
self.assertEqual(self.file_no, self.test_file_no)
self.assertEqual(self.file_format, True)
def test_check_msisdn_format_fail(self):
"""Test for an invalid msisdn format."""
#invalid phone number formats
self.phones = [
1234567, 123456789012, '+1234567',
'012456789', '-12345678901', '###12345',
'abcdefghi', '12345abcd'
]
#random selection of invalid phone numbers
self.phone_test = random.choice(self.phones)
#phone number and format correct flag
self.phone, self.phone_format = self.importer.check_msisdn_format(
self.phone_test
)
self.assertIs(self.phone_format, False)
self.assertEquals(self.phone_test, self.phone)
def test_check_msisdn_format_pass(self):
"""Test for valid msisdn formats. """
#valid phone numbers
self.valid_phones = [
123456789, '0123456789',
27123456789, '+27123456789'
]
#random selection of valid msisdn
self.valid_phone = random.choice(self.valid_phones)
#phone number and format correct flag
self.phone, self.phone_format = self.importer.check_msisdn_format(
self.valid_phone
)
self.assertIs(self.phone_format, True)
def test_create_patient_pass(self):
"""Test if the patient was created."""
self.random_patient = random.choice(range(11111, 99999, 1234))
self.random_patient = str(self.random_patient)
self.new_patient = {
'appointmentdate1': date(2011, 10, 1),
'fileno': self.random_patient,
'appointmentstatus1': 'Scheduled',
'phonenumber': self.random_msisdn
}
self.random_row = random.choice(range(1, 99, 1))
self.row = self.random_row
self.created = self.importer.create_patient(
self.new_patient, self.row, self.spreadsheet,
self.start, self.until
)
self.assertIs(self.created, True)
def test_create_patient_fail(self):
"""Test if the patient was not created. """
self.new_patient = {
'appointmentdate1': date(2011, 10, 1),
'fileno': '###s01011',
'appointmentstatus1': 'Scheduled',
'phonenumber': 190909090
}
self.row = 12
self.created = self.importer.create_patient(
self.new_patient, self.row, self.spreadsheet,
self.start, self.until
)
self.assertIs(self.created, False)
def test_set_cache_enrollment_status_fail(self):
"""Test caching of patient that have not enrolled."""
self.uncached_filenos = [111100, 323232, 666666, 'abc113', '123zxy']
self.cache_fileno = random.choice(self.uncached_filenos)
self.cached_enrolled = self.importer.set_cache_enrollement_status(
self.spreadsheet, self.cache_fileno, self.start, self.until
)
self.assertIs(self.cached_enrolled, False)
def test_set_cache_enrollment_status_pass(self):
"""Test caching of patient that have enrolled."""
self.uncached_filenos = [721003, 61201, 9999999, 118801]
self.cache_fileno = random.choice(self.uncached_filenos)
self.cached_enrolled = self.importer.set_cache_enrollement_status(
self.spreadsheet, self.cache_fileno, self.start, self.until
)
self.assertIs(self.cached_enrolled, True)
def test_get_cache_enrollment_status(self):
"""Test if cached enrollement status was found."""
self.uncached_filenos = [721003, 61201, 9999999, 118801]
self.cache_fileno = random.choice(self.uncached_filenos)
self.importer.set_cache_enrollement_status(
self.spreadsheet, self.cache_fileno, self.start, self.until
)
self.cached = self.importer.get_cache_enrollement_status(
self.cache_fileno
)
self.assertIs(self.cached, True)
def test_update_patients(self):
"""Test if a worksheet of patients is updated successfully."""
self.enrolled, self.updates = self.importer.update_patients(
self.enrolled_patients, self.spreadsheet,
self.start, self.until
)
self.assertEqual(self.enrolled, 2)
self.assertEqual(self.updates, 2)
def test_invalid_file_no_(self):
"""Test if the file no is invalid."""
#invalid phone number formats
self.files = ['+1234', '#ab789', 'abc8901@@', 'ab#12345']
#random selection of invalid file numbers
self.file_test = random.choice(self.files)
self.patient_row = {
'appointmentdate1': date(2011, 8, 10),
'fileno': self.file_test,
'appointmentstatus1': 'Missed',
'phonenumber': 987654321
}
self.row_no = 2
self.valid = self.importer.update_patient(
self.patient_row, self.row_no, self.spreadsheet,
self.start, self.until
)
self.assertIs(self.valid, False)
def test_invalid_patient_id(self):
"""Patient not on the database test if its created."""
self.patient_row = {
'appointmentdate1': date(2011, 8, 10),
'fileno': 555555,
'appointmentstatus1': 'Missed',
'phonenumber': 987654321
}
self.row_no = 2
self.created = self.importer.update_patient(
self.patient_row, self.row_no, self.spreadsheet,
self.start, self.until
)
self.assertEqual(self.created, True)
def test_successful_patient_update(self):
"""Test that a patient was successfully updated."""
self.msisdn = random.choice(range(111111111, 999999999, 123456))
self.patient_row = {
'appointmentdate1': date(2011, 8, 9),
'fileno': 9999999,
'appointmentstatus1': 'Attended',
'phonenumber': self.msisdn
}
self.row_no = 2
self.patient_updated = self.importer.update_patient(
self.patient_row, self.row_no, self.spreadsheet,
self.start, self.until
)
self.assertEqual(self.patient_updated, True)
def test_updated_msisdn(self):
"""Test that the phone number was updated."""
self.msisdn = random.choice(range(111111111, 999999999, 123456))
self.msisdn = '27' + str(self.msisdn)
self.curr_patient = Patient.objects.get(te_id='9999999')
self.assertTrue(self.curr_patient)
self.phone, self.created = self.importer.update_msisdn(
self.msisdn, self.curr_patient
)
self.assertIs(self.created, True)
self.assertEquals(self.msisdn, self.phone)
def test_msisdn_not_updated(self):
"""Test if incorrect phone number are not updated """
self.msisdn = random.choice(range(1111111, 9999999, 12345))
self.curr_patient = Patient.objects.get(te_id='9999999')
self.assertTrue(self.curr_patient)
self.phone, self.created = self.importer.update_msisdn(
self.msisdn, self.curr_patient
)
self.phone = int(self.phone)
self.assertIs(self.created, False)
self.assertEqual(self.msisdn, self.phone)
def test_invalid_visit_id(self):
"""Visit not on the database."""
(self.app_status, self.app_date, self.visit_id, self.curr_patient) = (
'Scheduled', date(2011, 8, 10), 'jjjjjjj',
Patient.objects.get(te_id='9999999')
)
original_count = Visit.objects.count()
status = self.importer.update_appointment_status(
self.app_status, self.curr_patient, self.app_date,
self.visit_id, self.spreadsheet
)
self.assertEqual(status, 's')
self.assertEqual(Visit.objects.count(), original_count + 1)
def test_update_not_needed(self):
"""Appointment status already updated."""
(self.app_status, self.app_date, self.visit_id, self.curr_patient) = (
'Scheduled', date(2011, 8, 10), '02-9999999',
Patient.objects.get(te_id='9999999')
)
self.updated = self.importer.update_appointment_status(
self.app_status, self.curr_patient, self.app_date,
self.visit_id, self.spreadsheet
)
self.status = 's'
self.assertEquals(self.status, 's')
def test_status_is_updated(self):
"""Checks that the status was updated"""
(self.app_status, self.app_date, self.visit_id, self.curr_patient) = (
'Missed', date(2011, 8, 10), '02-9999999',
Patient.objects.get(te_id='9999999')
)
self.status_updated = self.importer.update_appointment_status(
self.app_status, self.curr_patient, self.app_date,
self.visit_id, self.spreadsheet
)
self.assertEquals(self.status_updated, 'm')
def test_status_not_updated(self):
"""Test that the update failed."""
(self.app_status, self.app_date, self.visit_id, self.curr_patient) = (
'Missed', date(2011, 8, 1), '02-9999999',
Patient.objects.get(te_id='9999999')
)
self.status_updated = self.importer.update_appointment_status(
self.app_status, self.curr_patient, self.app_date,
self.visit_id, self.spreadsheet
)
self.assertEquals(self.status_updated, 'm')
class SpreadSheetReaderTestCase(TestCase):
def setUp(self):
self.email = 'txtalert@byteorbit.com'
self.password = 'testtest'
self.spreadsheet = 'Praekelt'
self.reader = SimpleCRUD(self.email, self.password)
self.assertTrue(self.reader)
self.start = date.today() - timedelta(days=14)
self.until = date.today()
self.test_dict = {
1: {
'appointmentdate1': date(2011, 8, 1),
'fileno': 9999999,
'appointmentstatus1': 'Scheduled',
'phonenumber': 123456789
},
2: {
'appointmentdate1': date(2011, 8, 5),
'fileno': 8888888,
'appointmentstatus1': 'Scheduled',
'phonenumber': 987654321
},
3: {
'appointmentdate1': date(2011, 8, 11),
'fileno': 7777777,
'appointmentstatus1': 'Scheduled',
'phonenumber': 741852963
},
4: {
'appointmentdate1': date(2011, 9, 2),
'fileno': 6666666,
'appointmentstatus1': 'Scheduled',
'phonenumber': 369258147
}
}
def tearDown(self):
pass
def test_get_spreadsheet(self):
"""Test for getting a spreadsheet that exists."""
self.found = self.reader.get_spreadsheet(self.spreadsheet)
self.assertTrue(self.found)
def test_get_spreadsheet_fail(self):
"""Test for getting a spreadsheet that does not exists."""
self.fail_spreadsheet = '##########'
self.not_found = self.reader.get_spreadsheet(self.fail_spreadsheet)
self.assertEqual(self.not_found, False)
def test_appointment_rows(self):
"""
Test for getting the appointments in a worksheet
that fall between the from_date to end_date.
"""
self.from_date = date(2011, 8, 1)
self.end_date = date(2011, 8, 14)
self.retrived_rows = self.reader.appointment_rows(
self.test_dict, self.from_date, self.end_date
)
self.assertEquals(len(self.retrived_rows), 3)
self.assertEqual(self.retrived_rows[1], self.test_dict[1])
self.assertEqual(self.retrived_rows[2], self.test_dict[2])
self.assertEqual(self.retrived_rows[3], self.test_dict[3])
def test_date_object_creator(self):
"""Convert date string to datetime object. """
self.valid_dates = ['21/08/2011', '31/8/2011']
self.curr_date = self.reader.date_object_creator('1/8/2011')
self.assertTrue(self.curr_date)
def test_database_record(self):
"""Convert worksheet row contents to proper types."""
self.test_row = {
'appointmentdate1': '02/09/2011',
'fileno': '63601',
'appointmentstatus1': 'Scheduled',
'phonenumber': '969577542',
}
self.modified_row = self.reader.database_record(self.test_row)
self.app_date = self.modified_row['appointmentdate1']
self.app_status = self.modified_row['appointmentstatus1']
self.file_no = self.modified_row['fileno']
self.phone = self.modified_row['phonenumber']
#test if the fields where converted correctly
self.assertEquals(self.app_date, date(2011, 9, 2))
self.assertEquals(self.app_status, self.test_row['appointmentstatus1'])
self.assertEquals(self.file_no, '63601')
self.assertEquals(self.phone, 969577542)
#test if the received fields are equal to those sent
self.app_date = str(self.app_date)
self.app_date = self.reader.date_format(self.app_date)
self.assertEquals(self.app_date, self.test_row['appointmentdate1'])
self.app_status = str(self.app_status)
self.assertEquals(self.app_status, self.test_row['appointmentstatus1'])
self.file_no = str(self.file_no)
self.assertEquals(self.file_no, self.test_row['fileno'])
self.phone = str(self.phone)
self.assertEquals(self.phone, self.test_row['phonenumber'])
def test_run_enrollment_check(self):
"""Tests if the patient has enrolled """
self.enrol = self.reader.run_enrollment_check(
self.spreadsheet, 63601, self.start, self.until
)
self.assertEquals(self.enrol, True)
def test_not_enrolled(self):
"""Test for a patient that is not enrolled. """
self.not_enrol = self.reader.run_enrollment_check(
self.spreadsheet, 60001, self.start, self.until
)
self.assertEquals(self.not_enrol, False)
def test_run_appointment_check(self):
"""Test if the appointments worksheets are retrieved."""
self.month = self.reader.run_appointment(
self.spreadsheet, self.start, self.until
)
self.assertTrue(self.month)
|
praekelt/txtalert
|
txtalert/apps/googledoc/_tests/importer.py
|
Python
|
gpl-3.0
| 20,734
|
[
"VisIt"
] |
e5c3b2f5fc9bb68ba285e5e996a4dc1841ce01dac438799341477951aab379e9
|
#!/usr/bin/env python
description = """
This produces a bam file corresponding to junctional regions in a given gtf file
"""
import sys
import pysam
from pythomics.genomics.parsers import GFFReader
from pythomics.templates import CustomParser
parser = CustomParser(description = description)
parser.add_bam()
parser.add_bam_out()
parser.add_gff()
def main():
args = parser.parse_args()
samfile = pysam.Samfile(args.bam, 'rb')
junctionreads = pysam.Samfile(args.out_bam, 'wb', template=samfile)
id_tag = args.group_on
chosen_feature = args.feature
if args.cufflinks:
gff = GFFReader(args.gff, preset='cufflinks')
else:
gff = GFFReader(args.gff, tag_map={'ID': id_tag, 'Parent': 'Parent'})
written = set([])
for feature_name, feature in gff.get_features():
try:
children = feature.children
except AttributeError:
continue
if len(children) > 1:
starts = dict([(j.start, j) for i,v in children.iteritems() for j in v.parts()])
if len(starts) > 1:
parts = [(v.seqid, v.start, v.end) for i,v in starts.iteritems()]
parts.sort(key=lambda x: x[1])
for ri, read in enumerate(parts[:-1]):
read2 = parts[ri+1]
reads = set([])
reads2 = set([])
read_dict = {}
try:
for i in samfile.fetch(read[0], int(read[2])-1, read[2]):
if not i.overlap(int(read[2])-1, int(read[2])) or i.qname in written:
continue
reads.add(i.qname)
read_dict[i.qname] = i
# if not i.mate_is_unmapped:
# mate = samfile.mate(i)
# reads.add(mate.qname)
# read_dict[mate.qname] = mate
for i in samfile.fetch(read2[0], read2[1], int(read2[1])+1):
if not i.overlap(int(read2[2])-1, int(read2[2])) or i.qname in written:
continue
reads2.add(i.qname)
read_dict[i.qname] = i
# if not i.mate_is_unmapped:
# mate = samfile.mate(i)
# reads2.add(mate.qname)
# read_dict[mate.qname] = mate
for i in reads&reads2:
written.add(i)
junctionreads.write(read_dict[i])
except ValueError:
continue
pysam.sort(args.out_bam, '%s_sort'%args.out_bam)
pysam.index('%s_sort.bam'%args.out_bam)
if __name__ == "__main__":
sys.exit(main())
|
pandeylab/pythomics
|
scripts/junctionalReads.py
|
Python
|
gpl-3.0
| 2,928
|
[
"pysam"
] |
c793193147a5e1bff7cf29782de379f98797e92cab3a49fc38ce6236c0b4a2f1
|
# Version: 0.11
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
[](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file. If your project uses `src/myproject/__init__.py`, this
should be `src/myproject/_version.py`. This file should be checked in to
your VCS as usual: the copy created below by `setup.py versioneer` will
include code that parses expanded VCS keywords in generated tarballs. The
'build' and 'sdist' commands will replace it with a copy that has just the
calculated version string.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and
will modify your `__init__.py` to define `__version__` (by calling a
function from `_version.py`). It will also modify your `MANIFEST.in` to
include both `versioneer.py` and the generated `_version.py` in sdist
tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py versioneer`. This will enable the use of additional version-control
systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import os, sys, re, subprocess, errno
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
# these configuration settings will be overridden by setup.py after it
# imports us
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = 'git'
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.11 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.11) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
ibest/grcScriptsPy
|
versioneer.py
|
Python
|
apache-2.0
| 35,320
|
[
"Brian"
] |
3750378e1371ae9f66e0c2393f462a6061e8351ed5ad145aed577fcb4740d862
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
import sys
import traceback
from zope.interface import implementer
from twisted.python.compat import _PY3
from twisted.python.failure import Failure
from twisted.trial import util
from twisted.trial.unittest import SynchronousTestCase, PyUnitResultAdapter
from twisted.trial.itrial import IReporter, ITestCase
import unittest as pyunit
class TestPyUnitTestCase(SynchronousTestCase):
class PyUnitTest(pyunit.TestCase):
def test_pass(self):
pass
def setUp(self):
self.original = self.PyUnitTest('test_pass')
self.test = ITestCase(self.original)
def test_visit(self):
"""
Trial assumes that test cases implement visit().
"""
log = []
def visitor(test):
log.append(test)
self.test.visit(visitor)
self.assertEqual(log, [self.test])
test_visit.suppress = [
util.suppress(category=DeprecationWarning,
message="Test visitors deprecated in Twisted 8.0")]
def test_callable(self):
"""
Tests must be callable in order to be used with Python's unittest.py.
"""
self.assertTrue(callable(self.test),
"%r is not callable." % (self.test,))
# Remove this when we port twisted.trial._synctest to Python 3:
if _PY3:
del TestPyUnitTestCase
class TestPyUnitResult(SynchronousTestCase):
"""
Tests to show that PyUnitResultAdapter wraps TestResult objects from the
standard library 'unittest' module in such a way as to make them usable and
useful from Trial.
"""
# Once erroneous is ported to Python 3 this can be replaced with
# erroneous.ErrorTest:
class ErrorTest(SynchronousTestCase):
"""
A test case which has a L{test_foo} which will raise an error.
@ivar ran: boolean indicating whether L{test_foo} has been run.
"""
ran = False
def test_foo(self):
"""
Set C{self.ran} to True and raise a C{ZeroDivisionError}
"""
self.ran = True
1/0
def test_dontUseAdapterWhenReporterProvidesIReporter(self):
"""
The L{PyUnitResultAdapter} is only used when the result passed to
C{run} does *not* provide L{IReporter}.
"""
@implementer(IReporter)
class StubReporter(object):
"""
A reporter which records data about calls made to it.
@ivar errors: Errors passed to L{addError}.
@ivar failures: Failures passed to L{addFailure}.
"""
def __init__(self):
self.errors = []
self.failures = []
def startTest(self, test):
"""
Do nothing.
"""
def stopTest(self, test):
"""
Do nothing.
"""
def addError(self, test, error):
"""
Record the error.
"""
self.errors.append(error)
test = self.ErrorTest("test_foo")
result = StubReporter()
test.run(result)
self.assertIsInstance(result.errors[0], Failure)
def test_success(self):
class SuccessTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
test = SuccessTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failUnless(test.ran)
self.assertEqual(1, result.testsRun)
self.failUnless(result.wasSuccessful())
def test_failure(self):
class FailureTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
s.fail('boom!')
test = FailureTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failUnless(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.failures))
self.failIf(result.wasSuccessful())
def test_error(self):
test = self.ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failUnless(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.failIf(result.wasSuccessful())
def test_setUpError(self):
class ErrorTest(SynchronousTestCase):
ran = False
def setUp(self):
1/0
def test_foo(s):
s.ran = True
test = ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
self.failIf(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.failIf(result.wasSuccessful())
def test_tracebackFromFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same traceback
information as if there were no adapter at all.
"""
try:
1/0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
self.assertEqual(pyresult.errors[0][1],
''.join(traceback.format_exception(*exc_info)))
def test_traceback(self):
"""
As test_tracebackFromFailure, but covering more code.
"""
class ErrorTest(SynchronousTestCase):
exc_info = None
def test_foo(self):
try:
1/0
except ZeroDivisionError:
self.exc_info = sys.exc_info()
raise
test = ErrorTest('test_foo')
result = pyunit.TestResult()
test.run(result)
# We can't test that the tracebacks are equal, because Trial's
# machinery inserts a few extra frames on the top and we don't really
# want to trim them off without an extremely good reason.
#
# So, we just test that the result's stack ends with the the
# exception's stack.
expected_stack = ''.join(traceback.format_tb(test.exc_info[2]))
observed_stack = '\n'.join(result.errors[0][1].splitlines()[:-1])
self.assertEqual(expected_stack.strip(),
observed_stack[-len(expected_stack):].strip())
def test_tracebackFromCleanFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same
traceback information as if there were no adapter at all, even
if the Failure that held the information has been cleaned.
"""
try:
1/0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
f.cleanFailure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
self.assertEqual(pyresult.errors[0][1],
''.join(traceback.format_exception(*exc_info)))
def test_trialSkip(self):
"""
Skips using trial's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
def test_skip(self):
1/0
test_skip.skip = "Let's skip!"
test = SkipTest('test_skip')
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "Let's skip!")])
def test_pyunitSkip(self):
"""
Skips using pyunit's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
@pyunit.skip("skippy")
def test_skip(self):
1/0
test = SkipTest('test_skip')
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "skippy")])
def test_skip26(self):
"""
On Python 2.6, pyunit doesn't support skipping, so it gets added as a
failure to the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
def test_skip(self):
1/0
test_skip.skip = "Let's skip!"
test = SkipTest('test_skip')
result = pyunit.TestResult()
test.run(result)
self.assertEqual(len(result.failures), 1)
test2, reason = result.failures[0]
self.assertIdentical(test, test2)
self.assertIn("UnsupportedTrialFeature", reason)
if sys.version_info[:2] < (2, 7):
message = "pyunit doesn't support skipping in Python 2.6"
test_trialSkip.skip = message
test_pyunitSkip.skip = message
del message
else:
test_skip26.skip = "This test is only relevant to Python 2.6"
|
geodrinx/gearthview
|
ext-libs/twisted/trial/test/test_pyunitcompat.py
|
Python
|
gpl-3.0
| 9,130
|
[
"VisIt"
] |
3121efdffa76f41059f39029fd0f807343f0bef2d271715abc2497acafee55c0
|
import datetime
import unittest
from decimal import Decimal
from pheme.longitudinal.tables import create_tables
from pheme.longitudinal.tables import AdmissionSource, AssignedLocation
from pheme.longitudinal.tables import AdmissionTemp, AdmissionO2sat
from pheme.longitudinal.tables import ChiefComplaint, FluVaccine, H1N1Vaccine
from pheme.longitudinal.tables import Disposition, Diagnosis, Location
from pheme.longitudinal.tables import Note, PerformingLab, SpecimenSource
from pheme.longitudinal.tables import Facility, Pregnancy, Race, ServiceArea
from pheme.longitudinal.tables import LabResult, Visit
from pheme.util.config import Config, configure_logging
from pheme.util.pg_access import AlchemyAccess, db_params
CONFIG_SECTION = 'longitudinal'
def setup_module():
"""Create a fresh db (once) for all tests in this module"""
configure_logging(verbosity=2, logfile='unittest.log')
c = Config()
if c.get('general', 'in_production'): # pragma: no cover
raise RuntimeError("DO NOT run destructive test on production system")
create_tables(enable_delete=True, **db_params(CONFIG_SECTION))
class TestLongitudinalAccess(unittest.TestCase):
"""Series of tests on longitudinal ORM classes. """
def setUp(self):
c = Config()
cfg_value = lambda v: c.get('longitudinal', v)
self.alchemy = AlchemyAccess(database=cfg_value('database'),
host='localhost',
user=cfg_value('database_user'),
password=cfg_value('database_password'))
self.session = self.alchemy.session
self.remove_after_test = []
def tearDown(self):
map(self.session.delete, self.remove_after_test)
self.session.commit()
self.alchemy.disconnect()
def commit_test_obj(self, obj):
"""Commit to db and bookkeep for safe removal on teardown"""
self.session.add(obj)
self.remove_after_test.append(obj)
self.session.commit()
def testAdmissionSource(self):
self.commit_test_obj(AdmissionSource(pk='7',
description='Emergency room'))
query = self.session.query(AdmissionSource).\
filter_by(description='Emergency room')
self.assertEquals(1, query.count())
self.assertEquals(query.first().pk, '7')
def testAdmissionTemp(self):
self.commit_test_obj(AdmissionTemp(degree_fahrenheit=98.5))
query = self.session.query(AdmissionTemp)
self.assertEquals(1, query.count())
self.assertEquals(query.first().degree_fahrenheit,
Decimal('98.5'))
def testAdmissionO2sat(self):
self.commit_test_obj(AdmissionO2sat(o2sat_percentage=98))
query = self.session.query(AdmissionO2sat)
self.assertEquals(1, query.count())
self.assertEquals(query.first().o2sat_percentage, 98)
def testAssignedLocation(self):
self.commit_test_obj(AssignedLocation(location='PMCLAB'))
query = self.session.query(AssignedLocation)
self.assertEquals(1, query.count())
self.assertEquals(query.first().location, 'PMCLAB')
def testChiefComplaint(self):
self.commit_test_obj(ChiefComplaint(chief_complaint='ABDOMINAL PAIN'))
query = self.session.query(ChiefComplaint)
self.assertEquals(1, query.count())
self.assertEquals(query.first().chief_complaint,
'ABDOMINAL PAIN')
def testLabResult(self):
loinc_text = 'Bacteria identified:Prid:Pt:Sputum:Nom:Aerobic culture'
loinc_code = '622-1'
coding = 'LN'
result = """Few Neutrophils Few Squamous Epithelial Cells Mixed Flora Squamous cells in the specimen indicate the presence of superficial material that may contain contaminating or colonizing bacteria unrelated to infection. Collection of another specimen is suggested, avoiding superficial sources of contamination. *****CULTURE RESULTS*****"""
self.commit_test_obj(LabResult(test_code=loinc_code,
test_text=loinc_text,
coding=coding,
result=result))
query = self.session.query(LabResult)
self.assertEquals(1, query.count())
self.assertEquals(query.first().test_code, loinc_code)
self.assertEquals(query.first().test_text, loinc_text)
self.assertEquals(query.first().result, result)
def testLocationCountry(self):
self.commit_test_obj(Location(country='CAN'))
query = self.session.query(Location)
self.assertEquals(1, query.count())
self.assertEquals(query.first().country, 'CAN')
self.assertTrue(datetime.datetime.now() >= query.first().last_updated)
def testLocationCounty(self):
self.commit_test_obj(Location(county='SPO-WA'))
query = self.session.query(Location)
self.assertEquals(1, query.count())
self.assertEquals(query.first().county, 'SPO-WA')
self.assertTrue(datetime.datetime.now() >= query.first().last_updated)
def testLocationZip(self):
self.commit_test_obj(Location(zip="98101"))
query = self.session.query(Location)
self.assertEquals(1, query.count())
self.assertEquals(query.first().zip, '98101')
def testLocation(self):
self.commit_test_obj(Location(county='SPO-WA', state='WA',
country='USA', zip='95432'))
query = self.session.query(Location)
self.assertEquals(1, query.count())
self.assertEquals(query.first().state, 'WA')
self.assertEquals(query.first().county, 'SPO-WA')
self.assertEquals(query.first().country, 'USA')
self.assertEquals(query.first().zip, '95432')
def testNote(self):
self.commit_test_obj(Note(note="IS PT ALLERGIC TO PENICILLIN? N"))
query = self.session.query(Note)
self.assertEquals(1, query.count())
self.assertEquals(query.first().note,
"IS PT ALLERGIC TO PENICILLIN? N")
def testLongNote(self):
too_long_note = """ REFERENCE INTERVAL: INFLUENZA B VIRUS Ab, IgG 0.89 IV or less: Negative - No significant level of influenza B virus IgG antibody detected. 0.90 - 1.10 IV: Equivocal - Questionable presence of influenza B virus IgG antibody detected. Repeat testing in 10-14 days may be helpful. 1.11 IV or greater: Positive - IgG antibodies to influenza B virus detected, which may suggest current or past infection. Test performed at ARUP Laboratories, 500 Chipeta Way, Salt Lake City, Utah 84108 Performed at ARUP, 500 Chipeta Way, Salt Lake City, UT 84108"""
self.commit_test_obj(Note(note=too_long_note))
query = self.session.query(Note)
self.assertEquals(1, query.count())
self.assertTrue(query.first().note.startswith(too_long_note[:100]))
def testDisposition(self):
self.commit_test_obj(Disposition(code=20, description='Expired',
gipse_mapping='Expired',
odin_mapping='Died'))
disposition = self.session.query(Disposition).\
filter(Disposition.description == 'Expired').one()
self.assertTrue(disposition)
self.assertEquals(disposition.code, 20)
self.assertEquals(disposition.odin_mapping, 'Died')
self.assertEquals(disposition.gipse_mapping, 'Expired')
self.assertTrue(datetime.datetime.now() > disposition.last_updated)
def testDx(self):
self.commit_test_obj(Diagnosis(status='W', icd9='569.3',
description='HYPERTENSION NOS'))
query = self.session.query(Diagnosis)
self.assertEquals(1, query.count())
self.assertEquals(query.first().description, 'HYPERTENSION NOS')
def testFacility(self):
self.commit_test_obj(Facility(county='NEAR', npi=123454321,
zip='99999',
organization_name='Nearby Medical '
'Center', local_code='NMC'))
sh = self.session.query(Facility).\
filter_by(npi=123454321).one()
self.assertEquals(sh.organization_name,
'Nearby Medical Center')
self.assertEquals(sh.zip, '99999')
self.assertEquals(sh.county, 'NEAR')
def testFacilityUpdates(self):
"Facilities are pre-loaded. Use to test update timestamps"
self.commit_test_obj(Facility(county='NEAR', npi=123454321,
zip='99999',
organization_name='Nearby Medical '
'Center', local_code='NMC'))
facility = self.session.query(Facility).\
filter_by(npi=123454321).one()
b4 = facility.last_updated
self.assertTrue(b4)
facility.local_code = 'FOO'
self.session.commit()
facility = self.session.query(Facility).\
filter_by(npi=123454321).one()
after = facility.last_updated
self.assertTrue(after > b4)
def testPerformingLab(self):
self.commit_test_obj(PerformingLab(local_code='HFH'))
query = self.session.query(PerformingLab)
self.assertEquals(1, query.count())
self.assertEquals(query.first().local_code,
'HFH')
def testPrego(self):
self.commit_test_obj(Pregnancy(result='Patient Currently Pregnant'))
query = self.session.query(Pregnancy)
self.assertEquals(1, query.count())
self.assertEquals(query.first().result,
'Patient Currently Pregnant')
def testRace(self):
self.commit_test_obj(Race(race='Native Hawaiian or Other '
'Pacific Islander'))
query = self.session.query(Race)
self.assertEquals(1, query.count())
self.assertEquals(query.first().race,
'Native Hawaiian or Other Pacific Islander')
def testServiceArea(self):
self.commit_test_obj(ServiceArea(area='obstetrics'))
query = self.session.query(ServiceArea)
self.assertEquals(1, query.count())
self.assertEquals(query.first().area,
'obstetrics')
def testSpecimenSource(self):
self.commit_test_obj(SpecimenSource(source='PLEFLD'))
query = self.session.query(SpecimenSource)
self.assertEquals(1, query.count())
self.assertEquals(query.first().source, 'PLEFLD')
def testFluVaccine(self):
self.commit_test_obj(FluVaccine(status='Not Specified'))
query = self.session.query(FluVaccine)
self.assertEquals(1, query.count())
self.assertEquals(query.first().status, 'Not Specified')
def testH1N1Vaccine(self):
self.commit_test_obj(H1N1Vaccine(status='Not Applicable (Age<18)'))
query = self.session.query(H1N1Vaccine)
self.assertEquals(1, query.count())
self.assertEquals(query.first().status, 'Not Applicable (Age<18)')
def testVisit(self):
"Test with minimal required fields set"
self.commit_test_obj(Facility(county='NEAR', npi=123454321,
zip='99999',
organization_name='Nearby Medical '
'Center', local_code='NMC'))
kw = {
'visit_id': '284999^^^&650903.98473.0179.6039.1.333.1&ISO',
'patient_class': 'E',
'patient_id': '156999^^^&650903.98473.0179.6039.1.333.1&ISO',
'admit_datetime': datetime.datetime(2007, 01, 01),
'first_message': datetime.datetime(2007, 01, 01),
'last_message': datetime.datetime(2007, 01, 01),
'dim_facility_pk': 123454321}
self.commit_test_obj(Visit(**kw))
query = self.session.query(Visit)
self.assertEquals(1, query.count())
self.assertEquals(query.first().ever_in_icu, False)
if '__main__' == __name__: # pragma: no cover
unittest.main()
|
pbugni/pheme.longitudinal
|
pheme/longitudinal/tests/test_tables.py
|
Python
|
bsd-3-clause
| 12,281
|
[
"VisIt"
] |
f007fcaa0d2536f9b3bad7f9de47ced8ad6cd79da31c85e5d0175c2fadd8450b
|
from neuron import h
from nrn import *
load_hoc_obj=h
#self.hoc_obj.execute('load_file("'+str(self.model)+'")')
#self.hoc_obj.execute('load_file("stdrun.hoc")')
load_hoc_obj.load_file(str("hh_pas.hoc"))
load_hoc_obj.load_file("stdrun.hoc")
#self.vec=h.Vector
load_vec=load_hoc_obj.Vector()
|
KaliLab/optimizer
|
optimizer/new_test_files/hh_pas_surrogate/hoc_load.py
|
Python
|
lgpl-2.1
| 331
|
[
"NEURON"
] |
f907b49322b6bd02a2e4c3da5351dab8fbbd3c128cad48caafad2d6405818c55
|
#!/usr/bin/env python
import argparse
import json
import logging
import os
import eutils
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='EFetch', epilog='')
parser.add_argument('db', help='Database to use, sometimes "none" (e.g. *check)')
parser.add_argument('dbfrom', help='Database containing input UIDs')
parser.add_argument('cmd', choices=['neighbor', 'neighbor_score',
'neighbor_history', 'acheck', 'ncheck', 'lcheck',
'llinks', 'llinkslib', 'prlinks'],
help='ELink command mode')
parser.add_argument('--version', action='version', version=eutils.Client.getVersion(), help='Version (reports Biopython version)')
parser.add_argument('--user_email', help="User email")
parser.add_argument('--admin_email', help="Admin email")
# ID Sources
parser.add_argument('--id_xml', help='list of ids in an xml file as returned by esearch or elink')
parser.add_argument('--id_json', help='list of ids in a json file as returned by esearch or elink')
parser.add_argument('--id_list', help='list of ids')
parser.add_argument('--id', help='Comma separated individual IDs')
parser.add_argument('--history_file', help='Fetch results from previous query')
parser.add_argument('--history_xml', help='Fetch results from previous query')
# Optional
parser.add_argument('--linkname', help='Restrict results to a specific link source')
parser.add_argument('--retmode', choices=['xml', 'json', 'uilist'], help='Output format')
# TODO: dates, linkname, term, holding
# neighbor or neighbor_history and dbfrom is pubmed
# parser.add_argument('--datetype', help='Date type')
# parser.add_argument('--reldate', help='In past N days')
# parser.add_argument('--mindate', help='Minimum date')
# parser.add_argument('--maxdate', help='maximum date')
# Output
args = parser.parse_args()
c = eutils.Client(history_file=args.history_file, user_email=args.user_email, admin_email=args.admin_email)
payload = {
'dbfrom': args.dbfrom,
'cmd': args.cmd,
}
# DB can be 'none' in a few cases.
if args.db != "none":
payload['db'] = args.db
if args.linkname is not None:
payload['linkname'] = args.linkname
results = []
qkeys = []
if args.history_file is not None or args.history_xml is not None:
payload['retmode'] = args.retmode
if args.history_file is not None:
input_histories = c.get_histories()
else:
input_histories = c.extract_histories_from_xml_file(args.history_xml)
for hist in input_histories:
qkeys += [hist['query_key']]
tmp_payload = payload
tmp_payload.update(hist)
results += [c.link(**tmp_payload)]
else:
# There is no uilist retmode
if args.retmode == "uilist":
payload['retmode'] = 'xml'
else:
payload['retmode'] = args.retmode
merged_ids = c.parse_ids(args.id_list, args.id, args.history_file, args.id_xml, args.id_json)
payload['id'] = ','.join(merged_ids)
qkeys += [1]
results += [c.link(**payload)]
# There could be multiple sets of results if a history was supplied
if args.history_file is not None or args.history_xml is not None:
# Multiple result sets can be returned
# Create a directory for the output files
current_directory = os.getcwd()
final_directory = os.path.join(current_directory, r'downloads')
if not os.path.exists(final_directory):
os.makedirs(final_directory)
logging.info("Writing files:")
# When rettype is uilist, convert to text format (which elink does not do)
count = 0
if args.retmode == 'uilist':
for result in results:
qkey = qkeys[count]
count += 1
ids = c.xmlstring2UIlist(result)
file_path = os.path.join('downloads', '%s-querykey%s.tabular' % (args.db, qkey))
logging.info('%s.tabular' % (args.db))
with open(file_path, 'w') as handle:
for id in ids:
handle.write(id)
handle.write(os.linesep)
elif args.retmode == 'json':
for result in results:
qkey = qkeys[count]
count += 1
file_path = os.path.join('downloads', '%s-querykey%s.json' % (args.db, qkey))
logging.info('%s-link%s.json' % (args.db, count))
with open(file_path, 'w') as handle:
json_data = c.jsonstring2jsondata(result)
handle.write(json.dumps(json_data, indent=4))
else:
for result in results:
qkey = qkeys[count]
count += 1
file_path = os.path.join('downloads', '%s-querykey%s.xml' % (args.db, qkey))
logging.info('%s-link%s.xml' % (args.db, count))
with open(file_path, 'w') as handle:
handle.write(result)
else:
# When rettype is uilist, convert to text format (which elink does not do)
if args.retmode == 'uilist':
ids = c.xmlstring2UIlist(results[0])
for id in ids:
print(id)
elif args.retmode == 'json':
json_data = c.jsonstring2jsondata(results[0])
print(json.dumps(json_data, indent=4))
else:
print(results[0])
|
galaxyproject/tools-iuc
|
tools/ncbi_entrez_eutils/elink.py
|
Python
|
mit
| 5,682
|
[
"Biopython"
] |
fd642e451798238ec51bc1032dbd009ee6dceabfd0d6fd35dbed97f1974c4ef2
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division, with_statement
import datetime
import functools
import locale
import os
import platform
import sys
import time
import urllib2
import warnings
from vistrails.core import debug
from vistrails.core.utils import unimplemented, VistrailsDeprecation, Chdir
###############################################################################
from common import *
def with_c_locale(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
previous_locale = locale.setlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, 'C')
try:
return func(*args, **kwargs)
finally:
locale.setlocale(locale.LC_TIME, previous_locale)
return newfunc
@with_c_locale
def strptime(*args, **kwargs):
"""Version of datetime.strptime that always uses the C locale.
This is because date strings are used internally in the database, and
should not be localized.
"""
return datetime.datetime.strptime(*args, **kwargs)
@with_c_locale
def time_strptime(*args, **kwargs):
"""Version of time.strptime that always uses the C locale.
This is because date strings are used internally in the database, and
should not be localized.
"""
return time.strptime(*args, **kwargs)
@with_c_locale
def strftime(dt, *args, **kwargs):
"""Version of datetime.strftime that always uses the C locale.
This is because date strings are used internally in the database, and
should not be localized.
"""
if hasattr(dt, 'strftime'):
return dt.strftime(*args, **kwargs)
else:
return time.strftime(dt, *args, **kwargs)
##############################################################################
systemType = platform.system()
if systemType in ['Windows', 'Microsoft']:
from vistrails.core.system.windows import *
elif systemType in ['Linux']:
from vistrails.core.system.linux import *
elif systemType in ['Darwin']:
from vistrails.core.system.osx import *
else:
debug.critical("VisTrails could not detect your operating system.")
sys.exit(1)
###############################################################################
# Makes sure root directory is sensible.
if __name__ == '__main__':
_thisDir = sys.argv[0]
else:
_thisDir = sys.modules[__name__].__file__
_thisDir = os.path.split(_thisDir)[0]
__rootDir = os.path.realpath(os.path.join(_thisDir,
'..',
'..'))
__dataDir = os.path.realpath(os.path.join(__rootDir,
'data'))
__fileDir = os.path.realpath(os.path.join(__rootDir,
'..','examples'))
if systemType in ['Darwin'] and not os.path.exists(__fileDir):
# Assume we are running from py2app
__fileDir = os.path.realpath(os.path.join(__rootDir,
'/'.join(['..']*6),'examples'))
__examplesDir = __fileDir
__defaultFileType = '.vt'
_defaultPkgPrefix = 'org.vistrails.vistrails'
def get_vistrails_default_pkg_prefix():
"""Gets the namespace under which identifiers of builtin packages live.
You should *not* use this, it is only useful intended to expand short names
of builtin packages in parse_descriptor_string.
"""
warnings.warn("get_vistrails_default_pkg_prefix() is deprecated",
category=VistrailsDeprecation)
return _defaultPkgPrefix
def get_vistrails_basic_pkg_id():
return "%s.basic" % _defaultPkgPrefix
def get_vistrails_directory(config_key, conf=None):
if conf is None:
from vistrails.core.configuration import get_vistrails_configuration
conf = get_vistrails_configuration()
if conf.has_deep_value(config_key):
d = conf.get_deep_value(config_key)
if os.path.isabs(d):
return d
else:
return os.path.join(current_dot_vistrails(conf), d)
return None
def set_vistrails_data_directory(d):
""" set_vistrails_data_directory(d:str) -> None
Sets vistrails data directory taking into account environment variables
"""
global __dataDir
new_d = os.path.expanduser(d)
new_d = os.path.expandvars(new_d)
while new_d != d:
d = new_d
new_d = os.path.expandvars(d)
__dataDir = os.path.realpath(d)
def set_vistrails_file_directory(d):
""" set_vistrails_file_directory(d: str) -> None
Sets vistrails file directory taking into accoun environment variables
"""
global __fileDir
new_d = os.path.expanduser(d)
new_d = os.path.expandvars(new_d)
while new_d != d:
d = new_d
new_d = os.path.expandvars(d)
__fileDir = os.path.realpath(d)
def set_vistrails_root_directory(d):
""" set_vistrails_root_directory(d:str) -> None
Sets vistrails root directory taking into account environment variables
"""
global __rootDir
new_d = os.path.expanduser(d)
new_d = os.path.expandvars(new_d)
while new_d != d:
d = new_d
new_d = os.path.expandvars(d)
__rootDir = os.path.realpath(d)
def set_vistrails_default_file_type(t):
""" set_vistrails_default_file_type(t:str) -> None
Which file type to use when the user doesn't provide a file extension
"""
global __defaultFileType
t = t.lower()
if t in ['.vt', '.xml']:
__defaultFileType = t
else:
__defaultFileType = '.vt'
def vistrails_root_directory():
""" vistrails_root_directory() -> str
Returns vistrails root directory
"""
return __rootDir
def vistrails_file_directory():
""" vistrails_file_directory() -> str
Returns current vistrails file directory
"""
return __fileDir
def vistrails_examples_directory():
""" vistrails_file_directory() -> str
Returns vistrails examples directory
"""
return __examplesDir
def vistrails_data_directory():
""" vistrails_data_directory() -> str
Returns vistrails data directory
"""
return __dataDir
def vistrails_default_file_type():
""" vistrails_default_file_type() -> str
Returns vistrails file type
"""
return __defaultFileType
def packages_directory():
""" packages_directory() -> str
Returns vistrails packages directory
"""
return os.path.join(vistrails_root_directory(),'packages')
def blank_vistrail_file():
unimplemented()
def resource_directory():
""" resource_directory() -> str
Returns vistrails gui resource directory
"""
return os.path.join(vistrails_root_directory(),
'gui', 'resources')
def default_options_file():
""" default_options_file() -> str
Returns vistrails default options file
"""
return os.path.join(home_directory(), ".vistrailsrc")
def default_dot_vistrails():
""" default_dot_vistrails() -> str
Returns the default VisTrails per-user directory.
"""
return os.path.join(home_directory(), '.vistrails')
def current_dot_vistrails(conf=None):
""" current_dot_vistrails() -> str
Returns the VisTrails per-user directory.
"""
if conf is None:
from vistrails.core.configuration import get_vistrails_configuration
conf = get_vistrails_configuration()
return conf.dotVistrails
def default_connections_file():
""" default_connections_file() -> str
Returns default Vistrails per-user connections file
"""
return os.path.join(current_dot_vistrails(), 'connections.xml')
VERSION = '2.x'
def vistrails_version():
"""vistrails_version() -> string - Returns the current VisTrails version."""
# 0.1 was the Vis2005 version
# 0.2 was the SIGMOD demo version
# 0.3 was the plugin/vtk version
# 0.4 is cleaned up version with new GUI
# 1.0 is version with new schema
return VERSION
def get_latest_vistrails_version():
"""get_latest_vistrails_version() -> string - Returns latest vistrails
release version as queried from vistrails.org"""
version = ''
version_url = \
"http://www.vistrails.org/download/download.php?id=release_version.txt"
try:
request = urllib2.Request(version_url)
get_latest_version = urllib2.urlopen(request)
version = get_latest_version.read().strip()
except urllib2.HTTPError, err:
debug.warning("Unable to check for updates: %s" % str(err))
return version
return version
def new_vistrails_release_exists():
""" new_vistrail_release_exists() -> (bool, str)
Returns (True, new_version_str) if newer version exists
"""
local_version = [int(x) for x in vistrails_version().split('.')]
remote_str = get_latest_vistrails_version()
if remote_str:
remote_version = [int(x) for x in remote_str.split('.')]
else:
remote_version = [0]
if cmp(local_version, remote_version) is -1:
return (True, remote_str)
return (False, None)
def vistrails_revision():
"""vistrails_revision() -> str
When run on a working copy, shows the current svn revision else
shows the latest release revision
"""
git_dir = os.path.join(vistrails_root_directory(), '..')
with Chdir(git_dir):
release = vistrails_version()
import vistrails.core.requirements
if vistrails.core.requirements.executable_file_exists('git'):
lines = []
result = execute_cmdline(
['git', 'describe', '--always'],
lines)
if len(lines) == 1:
if result == 0:
release = lines[0].strip(" \n")
return release
_registry = None
def get_module_registry():
global _registry
if _registry is None:
from vistrails.core.modules.module_registry import get_module_registry
_registry = get_module_registry()
return _registry
def short_about_string():
return """VisTrails version %s (%s) -- contact@vistrails.org""" % \
(vistrails_version(), vistrails_revision())
def about_string():
"""about_string() -> string - Returns the about string for VisTrails."""
return """VisTrails version %s (%s) -- contact@vistrails.org
Copyright (C) 2014-2016 New York University. Copyright (C) 2011-2014 NYU-Poly.
Copyright (C) 2006-2011 University of Utah.
All rights reserved.
http://www.vistrails.org
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the New York University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" % (vistrails_version(),
vistrails_revision())
###############################################################################
import unittest
if __name__ == '__main__':
unittest.main()
class TestSystem(unittest.TestCase):
def test_vistrails_revision(self):
r = vistrails_root_directory()
with Chdir(r):
v1 = vistrails_revision()
try:
with Chdir(os.path.join(r, '..')):
self.assertEquals(v1, vistrails_revision())
except AssertionError:
raise
except Exception:
pass
try:
with Chdir(os.path.join(r, '..', '..')):
self.assertEquals(v1, vistrails_revision())
except AssertionError:
raise
except Exception:
pass
|
minesense/VisTrails
|
vistrails/core/system/__init__.py
|
Python
|
bsd-3-clause
| 14,635
|
[
"VTK"
] |
f8047cdfa55a7575fffe8b7d090b30223d18a40fc7b3d51ad61a188214ecd6f1
|
"""Unit tests for the VTK io library"""
# Copyright (C) 2011 Garth N. Wells
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2011-05-18
# Last changed:
import unittest
from dolfin import *
# VTK file options
file_options = ["ascii", "base64", "compressed"]
mesh_functions = [CellFunction, FacetFunction, FaceFunction, EdgeFunction, VertexFunction]
mesh_function_types = ["size_t", "int", "double", "bool"]
type_conv = dict(size_t=int, int=int, double=float, bool=bool)
class VTK_MeshFunction_Output(unittest.TestCase):
"""Test output of MeshFunctions to VTK files"""
def test_save_1d_meshfunctions(self):
mesh = UnitIntervalMesh(32)
for F in mesh_functions:
if F in [FaceFunction, EdgeFunction]: continue
for t in mesh_function_types:
mf = F(t, mesh, type_conv[t](1))
File("mf.pvd") << mf
f = File("mf.pvd")
f << (mf, 0.)
f << (mf, 1.)
for file_option in file_options:
File("mf.pvd", file_option) << mf
def test_save_2d_meshfunctions(self):
mesh = UnitSquareMesh(32, 32)
for F in mesh_functions:
for t in mesh_function_types:
mf = F(t, mesh, type_conv[t](1))
File("mf.pvd") << mf
f = File("mf.pvd")
f << (mf, 0.)
f << (mf, 1.)
for file_option in file_options:
File("mf.pvd", file_option) << mf
def test_save_3d_meshfunctions(self):
mesh = UnitCubeMesh(8, 8, 8)
for F in mesh_functions:
for t in mesh_function_types:
mf = F(t, mesh, type_conv[t](1))
File("mf.pvd") << mf
f = File("mf.pvd")
f << (mf, 0.)
f << (mf, 1.)
for file_option in file_options:
File("mf.pvd", file_option) << mf
class VTK_Mesh_Output(unittest.TestCase):
"""Test output of Meshes to VTK files"""
def test_save_1d_mesh(self):
mesh = UnitIntervalMesh(32)
File("mesh.pvd") << mesh
f = File("mesh.pvd")
f << (mesh, 0.)
f << (mesh, 1.)
for file_option in file_options:
File("mesh.pvd", file_option) << mesh
def test_save_2d_mesh(self):
mesh = UnitSquareMesh(32, 32)
File("mesh.pvd") << mesh
f = File("mesh.pvd")
f << (mesh, 0.)
f << (mesh, 1.)
for file_option in file_options:
File("mesh.pvd", file_option) << mesh
def test_save_3d_mesh(self):
mesh = UnitCubeMesh(8, 8, 8)
File("mesh.pvd") << mesh
f = File("mesh.pvd")
f << (mesh, 0.)
f << (mesh, 1.)
for file_option in file_options:
File("mesh.pvd", file_option) << mesh
class VTK_Point_Function_Output(unittest.TestCase):
"""Test output of point-based Functions to VTK files"""
def test_save_1d_scalar(self):
mesh = UnitIntervalMesh(32)
u = Function(FunctionSpace(mesh, "Lagrange", 2))
u.vector()[:] = 1.0
File("u.pvd") << u
f = File("u.pvd")
f << (u, 0.)
f << (u, 1.)
for file_option in file_options:
File("u.pvd", file_option) << u
def test_save_2d_scalar(self):
mesh = UnitSquareMesh(16, 16)
u = Function(FunctionSpace(mesh, "Lagrange", 2))
u.vector()[:] = 1.0
File("u.pvd") << u
f = File("u.pvd")
f << (u, 0.)
f << (u, 1.)
for file_option in file_options:
File("u.pvd", file_option) << u
def test_save_3d_scalar(self):
mesh = UnitCubeMesh(8, 8, 8)
u = Function(FunctionSpace(mesh, "Lagrange", 2))
u.vector()[:] = 1.0
File("u.pvd") << u
f = File("u.pvd")
f << (u, 0.)
f << (u, 1.)
for file_option in file_options:
File("u.pvd", file_option) << u
# FFC fails for vector spaces in 1D
#def test_save_1d_vector(self):
# if MPI.size() == 1:
# mesh = UnitIntervalMesh(32)
# u = Function(VectorFunctionSpace(mesh, "Lagrange", 2))
# u.vector()[:] = 1.0
# File("u.pvd") << u
# for file_option in file_options:
# File("u.pvd", file_option) << u
def test_save_2d_vector(self):
mesh = UnitSquareMesh(16, 16)
u = Function(VectorFunctionSpace(mesh, "Lagrange", 2))
u.vector()[:] = 1.0
File("u.pvd") << u
f = File("u.pvd")
f << (u, 0.)
f << (u, 1.)
for file_option in file_options:
File("u.pvd", file_option) << u
def test_save_3d_vector(self):
mesh = UnitCubeMesh(8, 8, 8)
u = Function(VectorFunctionSpace(mesh, "Lagrange", 2))
u.vector()[:] = 1.0
File("u.pvd") << u
f = File("u.pvd")
f << (u, 0.)
f << (u, 1.)
for file_option in file_options:
File("u.pvd", file_option) << u
# FFC fails for tensor spaces in 1D
#def test_save_1d_tensor(self):
# if MPI.size() == 1:
# mesh = UnitIntervalMesh(32)
# u = Function(TensorFunctionSpace(mesh, "Lagrange", 2))
# u.vector()[:] = 1.0
# File("u.pvd") << u
# for file_option in file_options:
# File("u.pvd", file_option) << u
def test_save_2d_tensor(self):
mesh = UnitSquareMesh(16, 16)
u = Function(TensorFunctionSpace(mesh, "Lagrange", 2))
u.vector()[:] = 1.0
File("u.pvd") << u
f = File("u.pvd")
f << (u, 0.)
f << (u, 1.)
for file_option in file_options:
File("u.pvd", file_option) << u
def test_save_3d_tensor(self):
mesh = UnitCubeMesh(8, 8, 8)
u = Function(TensorFunctionSpace(mesh, "Lagrange", 2))
u.vector()[:] = 1.0
File("u.pvd") << u
f = File("u.pvd")
f << (u, 0.)
f << (u, 1.)
for file_option in file_options:
File("u.pvd", file_option) << u
if __name__ == "__main__":
unittest.main()
|
akshmakov/Dolfin-Fijee-Fork
|
test/unit/io/python/vtk.py
|
Python
|
lgpl-3.0
| 6,797
|
[
"VTK"
] |
5be5135dcf328b11ed04894c3c4e48f6e817aff35724f5d18ad88ed72316dea3
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to identify optimal substrates for film growth
"""
import warnings
from pymatgen.analysis.interfaces import SubstrateAnalyzer, ZSLGenerator # noqa
__author__ = "Shyam Dwaraknath"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Production"
__date__ = "Feb, 2016"
warnings.warn(
"The substrate_analyzer module is being moved to the interfaces submodule in analysis."
" These imports will break in Pymatgen 2023",
category=FutureWarning,
stacklevel=2,
)
|
materialsproject/pymatgen
|
pymatgen/analysis/substrate_analyzer.py
|
Python
|
mit
| 707
|
[
"pymatgen"
] |
2f1daf6029c67a1cae685976d2e04bd52f23885ed9790af11ea06e3915dfbc60
|
from __future__ import print_function
import logging
from datetime import datetime
import barotropic
import interpolation as interp
import numpy as np
from netCDF4 import Dataset, date2num
import IOinitial
import IOsubset
import IOwrite
import datetimeFunctions
import forcingFilenames as fc
import interp2D
try:
import ESMF
except ImportError:
print("Could not find module ESMF")
pass
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@niva.no'
__created__ = datetime(2008, 8, 15)
__modified__ = datetime(2021, 3, 23)
__version__ = "1.8"
__status__ = "Development, modified on 15.08.2008,01.10.2009,07.01.2010, " \
"15.07.2014, 01.12.2014, 07.08.2015, " \
"08.02.2018, 04.03.2019, 13.03.2019, 23.03.2021"
def vertical_interpolation(myvar, array1, array2, grdROMS, grdMODEL):
outINDEX_ST = (grdROMS.nlevels, grdROMS.eta_rho, grdROMS.xi_rho)
outINDEX_U = (grdROMS.nlevels, grdROMS.eta_u, grdROMS.xi_u)
outINDEX_UBAR = (grdROMS.eta_u, grdROMS.xi_u)
outINDEX_V = (grdROMS.nlevels, grdROMS.eta_v, grdROMS.xi_v)
outINDEX_VBAR = (grdROMS.eta_v, grdROMS.xi_v)
if myvar in ['salinity', 'temperature', 'O3_c', 'O3_TA', 'N1_p', 'N3_n', 'N5_s', 'O2_o']:
logging.info(
'Start vertical interpolation for {} (dimensions={} x {})'.format(myvar, grdROMS.xi_rho, grdROMS.eta_rho))
outdata = np.empty((outINDEX_ST), dtype=np.float, order='F')
outdata = interp.interpolation.dovertinter(np.asarray(outdata, order='F'),
np.asarray(array1, order='F'),
np.asarray(grdROMS.h, order='F'),
np.asarray(grdROMS.z_r, order='F'),
np.asarray(grdMODEL.z_r, order='F'),
int(grdROMS.nlevels),
int(grdMODEL.nlevels),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho))
outdata = np.ma.masked_where(abs(outdata) > 1000, outdata)
# The BCG has to be capped at 0
if myvar in ['O3_c', 'O3_TA', 'N1_p', 'N3_p', 'N3_n', 'N5_s', 'O2_o']:
outdata = np.ma.masked_where(abs(outdata) < 0, outdata)
# import plotData
# for k in range(grdROMS.nlevels):
# plotData.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, np.squeeze(outdata[k,:,:]),k, varname)
return outdata
if myvar == 'vvel':
logging.info('Start vertical interpolation for uvel (dimensions={} x {})'.format(grdROMS.xi_u, grdROMS.eta_u))
outdataU = np.zeros((outINDEX_U), dtype=np.float)
outdataUBAR = np.zeros((outINDEX_UBAR), dtype=np.float)
outdataU = interp.interpolation.dovertinter(np.asarray(outdataU, order='F'),
np.asarray(array1, order='F'),
np.asarray(grdROMS.h, order='F'),
np.asarray(grdROMS.z_r, order='F'),
np.asarray(grdMODEL.z_r, order='F'),
int(grdROMS.nlevels),
int(grdMODEL.nlevels),
int(grdROMS.xi_u),
int(grdROMS.eta_u),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho))
outdataU = np.ma.masked_where(abs(outdataU) > 1000, outdataU)
logging.info('Start vertical interpolation for vvel (dimensions={} x {})'.format(grdROMS.xi_v, grdROMS.eta_v))
outdataV = np.zeros((outINDEX_V), dtype=np.float)
outdataVBAR = np.zeros((outINDEX_VBAR), dtype=np.float)
outdataV = interp.interpolation.dovertinter(np.asarray(outdataV, order='F'),
np.asarray(array2, order='F'),
np.asarray(grdROMS.h, order='F'),
np.asarray(grdROMS.z_r, order='F'),
np.asarray(grdMODEL.z_r, order='F'),
int(grdROMS.nlevels),
int(grdMODEL.nlevels),
int(grdROMS.xi_v),
int(grdROMS.eta_v),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho))
outdataV = np.ma.masked_where(abs(outdataV) > 1000, outdataV)
z_wu = np.zeros((grdROMS.nlevels + 1, grdROMS.eta_u, grdROMS.xi_u), dtype=np.float)
z_wv = np.zeros((grdROMS.nlevels + 1, grdROMS.eta_v, grdROMS.xi_v), dtype=np.float)
outdataUBAR = barotropic.velocity.ubar(np.asarray(outdataU, order='F'),
np.asarray(outdataUBAR, order='F'),
np.asarray(grdROMS.z_w, order='F'),
np.asarray(z_wu, order='F'),
grdROMS.nlevels,
grdROMS.xi_u,
grdROMS.eta_u,
grdROMS.xi_rho,
grdROMS.eta_rho)
outdataUBAR = np.ma.masked_where(abs(outdataUBAR) > 1000, outdataUBAR)
# plotData.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, outdataUBAR,1, "ubar")
outdataVBAR = barotropic.velocity.vbar(np.asarray(outdataV, order='F'),
np.asarray(outdataVBAR, order='F'),
np.asarray(grdROMS.z_w, order='F'),
np.asarray(z_wv, order='F'),
grdROMS.nlevels,
grdROMS.xi_v,
grdROMS.eta_v,
grdROMS.xi_rho,
grdROMS.eta_rho)
# plotData.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, outdataVBAR,1, "vbar")
outdataVBAR = np.ma.masked_where(abs(outdataVBAR) > 1000, outdataVBAR)
return outdataU, outdataV, outdataUBAR, outdataVBAR
def rotate(grdROMS, grdMODEL, data, u, v):
"""
First rotate the values of U, V at rho points with the angle, and then interpolate
the rho point values to U and V points and save the result
"""
urot = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_rho), int(grdROMS.xi_rho)), np.float)
vrot = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_rho), int(grdROMS.xi_rho)), np.float)
urot, vrot = interp.interpolation.rotate(np.asarray(urot, order='F'),
np.asarray(vrot, order='F'),
np.asarray(u, order='F'),
np.asarray(v, order='F'),
np.asarray(grdROMS.angle, order='F'),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdMODEL.nlevels))
return urot, vrot
def interpolate2uv(grdROMS, grdMODEL, urot, vrot):
Zu = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_u), int(grdROMS.xi_u)), np.float)
Zv = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_v), int(grdROMS.xi_v)), np.float)
# Interpolate from RHO points to U and V points for velocities
Zu = interp.interpolation.rho2u(np.asarray(Zu, order='F'),
np.asarray(urot, order='F'),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdMODEL.nlevels))
# plotData.contourMap(grdROMS,grdMODEL,Zu[0,:,:],"1",'urot')
Zv = interp.interpolation.rho2v(np.asarray(Zv, order='F'),
np.asarray(vrot, order='F'),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdMODEL.nlevels))
# plotData.contourMap(grdROMS,grdMODEL,Zv[0,:,:],"1",'vrot')
return Zu, Zv
def get_time(confM2R, year, month, day, ntime):
"""
Create a date object to keep track of Julian dates etc.
Also create a reference date starting at 1948/01/01.
Go here to check results:http://lena.gsfc.nasa.gov/lenaDEV/html/doy_conv.html
"""
if confM2R.ocean_indata_type == 'SODA3':
filename = fc.getSODA3filename(confM2R, year, month, day, None)
if confM2R.ocean_indata_type == 'SODA3_5DAY':
filename = fc.getSODA3_5DAYfilename(confM2R, year, month, day, None)
if confM2R.ocean_indata_type == 'SODAMONTHLY':
filename = fc.getSODAMONTHLYfilename(confM2R, year, month, None)
if confM2R.ocean_indata_type == 'GLORYS':
filename = fc.get_GLORYS_filename(confM2R, year, month, "So")
if confM2R.ocean_indata_type == 'NORESM':
filename = fc.getNORESMfilename(confM2R, year, month, "salnlvl")
# Now open the input file and get the time
cdf = Dataset(filename)
jdref = date2num(datetime(1948, 1, 1),
units="days since 1948-01-01 00:00:00",
calendar="standard")
if confM2R.ocean_indata_type == 'SODA3_5DAY':
currentdate = datetime(year, month, day)
units = confM2R.time_object.units
jd = date2num(currentdate, units=confM2R.time_object.units, calendar=confM2R.time_object.calendar)
else:
# Find the day and month that the GLORYS file represents based on the year and ID number.
# Each file represents a 1 month average.
# calendar = cdf.variables["time"].calendar
units = cdf.variables["time"].units
currentdate = datetime(year, month, day)
jd = date2num(currentdate, units="days since 1948-01-01 00:00:00", calendar="standard")
confM2R.grdROMS.time = (jd - jdref)
confM2R.grdROMS.reftime = jdref
confM2R.grdROMS.timeunits = "days since 1948-01-01 00:00:00"
cdf.close()
logging.info("-------------------------------")
logging.info('Current time of {} file : {}'.format(confM2R.ocean_indata_type,
currentdate))
logging.info("-------------------------------")
def get_3d_data(confM2R, varname, year, month, day, timecounter):
varN = confM2R.global_varnames.index(varname)
# The variable splitExtract is defined in IOsubset.py and depends on the orientation
# and ocean_indata_type of grid (-180-180 or 0-360). Assumes regular grid.
filename = fc.get_filename(confM2R, year, month, day, confM2R.input_varnames[varN])
try:
cdf = Dataset(filename)
except:
logging.error("[M2R_model2roms] Unable to open input file {}".format(filename))
return
if confM2R.ocean_indata_type == "SODA3":
data = cdf.variables[confM2R.input_varnames[varN]][month - 1, :, :, :]
data = np.where(data.mask, confM2R.fillvaluein, data)
if confM2R.ocean_indata_type == "NORESM":
# For NorESM data - all data is in one big file so we need the timecounter to access correct data
myunits = cdf.variables[str(confM2R.input_varnames[varN])].units
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][timecounter, :, :, :])
data = np.where(data.mask, confM2R.fillvaluein, data)
if confM2R.ocean_indata_type == "GLORYS":
myunits = cdf.variables[str(confM2R.input_varnames[varN])].units
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][0, :, :, :])
data = np.where(data.mask, confM2R.fillvaluein, data)
cdf.close()
if varname == 'temperature' and confM2R.ocean_indata_type in ["GLORYS", "NORESM"]:
if myunits == "degree_Kelvin" or myunits == "K":
if confM2R.ocean_indata_type in ["GLORYS"]:
data = np.where(data <= -32767, confM2R.grdROMS.fillval, data)
data = data - 273.15
if confM2R.ocean_indata_type == "GLORYS":
data = np.where(data <= -32767, confM2R.grdROMS.fillval, data)
data = np.ma.masked_where(data <= confM2R.grdROMS.fillval, data)
logging.debug('Data range of {} just after extracting from netcdf file: {:3.3f}-{:3.3f}'.format(
str(confM2R.input_varnames[varN]),
float(data.min()), float(data.max())))
return data
def get_2d_data(confM2R, myvar, year, month, day, timecounter):
varN = confM2R.global_varnames.index(myvar)
if confM2R.set_2d_vars_to_zero and confM2R.input_varnames[varN] in ['ageice', 'uice',
'vice',
'aice',
'hice',
'hs']:
return np.zeros((np.shape(confM2R.grdMODEL.lon)))
else:
filename = fc.get_filename(confM2R, year, month, day, confM2R.input_varnames[varN])
try:
cdf = Dataset(filename)
except:
logging.error("[M2R_model2roms] Unable to open input file {}".format(filename))
return
if confM2R.ocean_indata_type in ["SODA", "SODA3_5DAY"]:
data = cdf.variables[confM2R.input_varnames[varN]][0, :, :]
if confM2R.ocean_indata_type == "SODA3":
if myvar == 'aice':
# We only extract the first thickness concentration. Need to fix this so all 5 classes can be extracted.
# http://www.atmos.umd.edu/~ocean/index_files/soda3_readme.htm
# hi: sea ice thickness [m ice]
# mi: sea ice mass [kg/m^2]
# hs: snow thickness [m snow]
# {cn1,cn2,cn3,cn4,cn5}: sea ice concentration [0:1] in five ice thickness classes
data = cdf.variables[confM2R.input_varnames[varN]][int(month - 1), 0, :, :]
else:
data = cdf.variables[confM2R.input_varnames[varN]][int(month - 1), :, :]
if confM2R.ocean_indata_type == "NORESM" and confM2R.set_2d_vars_to_zero is False:
# myunits = cdf.variables[str(grdROMS.varNames[varN])].units
# For NORESM data are 12 months of data stored in ice files. Use ID as month indicator to get data.
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][timecounter, :, :])
data = np.where(data.mask, confM2R.grdROMS.fillval, data)
if confM2R.ocean_indata_type == "GLORYS":
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][0, :, :])
data = np.where(data.mask, confM2R.grdROMS.fillval, data)
if not confM2R.set_2d_vars_to_zero:
cdf.close()
if __debug__ and not confM2R.set_2d_vars_to_zero:
logging.info("[M2R_model2roms] Data range of {} just after extracting from netcdf "
"file: {:3.3f}-{:3.3f}".format(str(confM2R.input_varnames[varN]),
float(data.min()), float(data.max())))
return data
def convert_MODEL2ROMS(confM2R):
# First opening of input file is just for initialization of grid
filenamein = fc.get_filename(confM2R, confM2R.start_year, confM2R.start_month, confM2R.start_day, None)
# Finalize creating the model grd object now that we know the filename for input data
confM2R.grdMODEL.create_object(confM2R, filenamein)
confM2R.grdMODEL.getdims()
# Create the ESMF weights used to do all of the horizontal interpolation
interp2D.setup_ESMF_interpolation_weights(confM2R)
# Now we want to subset the data to avoid storing more information than we need.
# We do this by finding the indices of maximum and minimum latitude and longitude in the matrixes
if confM2R.subset_indata:
IOsubset.find_subset_indices(confM2R.grdMODEL, min_lat=confM2R.subset[0], max_lat=confM2R.subset[1],
min_lon=confM2R.subset[2], max_lon=confM2R.subset[3])
logging.info("[M2R_model2roms] ==> Initializing done")
logging.info("[M2R_model2roms] --------------------------")
logging.info("[M2R_model2roms] ==> Starting loop over time")
time_counter = 0
first_run = True
for year in confM2R.years:
months = datetimeFunctions.create_list_of_months(confM2R, year)
for month in months:
days = datetimeFunctions.create_list_of_days(confM2R, year, month, first_run)
for day in days:
# Get the current date for given time-step
get_time(confM2R, year, month, day, time_counter)
# Each MODEL file consist only of one time step. Get the subset data selected, and
# store that time step in a new array:
if first_run:
logging.info("[M2R_model2roms] => NOTE! Make sure that these two arrays are in sequential order:")
logging.info("[M2R_model2roms] ==> myvars: {}".format(confM2R.input_varnames))
logging.info("[M2R_model2roms] ==> varNames {}".format(confM2R.global_varnames))
first_run = False
if confM2R.subset_indata:
# The first iteration we want to organize the subset indices we want to extract
# from the input data to get the interpolation correct and to function fast
IOsubset.organize_split(confM2R.grdMODEL, confM2R.grdROMS)
for myvar in confM2R.global_varnames:
if myvar in ['temperature', 'salinity', 'uvel', 'vvel', 'O3_c', 'O3_TA', 'N1_p', 'N3_n', 'N5_s',
'O2_o']:
data = get_3d_data(confM2R, myvar, year, month, day, time_counter)
if myvar in ['ssh', 'ageice', 'uice', 'vice', 'aice', 'hice', 'snow_thick']:
data = get_2d_data(confM2R, myvar, year, month, day, time_counter)
# Take the input data and horizontally interpolate to your grid
array1 = interp2D.do_hor_interpolation_regular_grid(confM2R, data, myvar)
if myvar in ['temperature', 'salinity', 'O3_c', 'O3_TA', 'N1_p', 'N3_n', 'N5_s', 'O2_o']:
STdata = vertical_interpolation(myvar, array1, array1, confM2R.grdROMS, confM2R.grdMODEL)
for dd in range(len(STdata[:, 0, 0])):
STdata[dd, :, :] = np.where(confM2R.grdROMS.mask_rho == 0, confM2R.grdROMS.fillval,
STdata[dd, :, :])
STdata = np.where(abs(STdata) > 1000, confM2R.grdROMS.fillval, STdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, STdata)
if time_counter == confM2R.grdROMS.inittime and confM2R.grdROMS.write_init is True:
IOinitial.create_init_file(confM2R, time_counter, myvar, STdata)
if myvar in ['ssh', 'ageice', 'aice', 'hice', 'snow_thick']:
SSHdata = array1[0, :, :]
SSHdata = np.where(confM2R.grdROMS.mask_rho == 0, confM2R.grdROMS.fillval, SSHdata)
SSHdata = np.where((abs(SSHdata) > 100) | (SSHdata == 0), confM2R.grdROMS.fillval, SSHdata)
# Specific for ROMS - we set 0 where we should have fillvalue for ice otherwise ROMS blows up.
SSHdata = np.where(abs(SSHdata) == confM2R.grdROMS.fillval, 0, SSHdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, SSHdata)
if time_counter == confM2R.grdROMS.inittime:
IOinitial.create_init_file(confM2R, time_counter, myvar, SSHdata)
# The following are special routines used to calculate the u and v velocity
# of ice based on the transport, which is divided by snow and ice thickenss
# and then multiplied by grid size in dx or dy direction (opposite of transport).
if myvar in ['uice', 'vice']:
SSHdata = array1[0, :, :]
if myvar == "uice":
mymask = confM2R.grdROMS.mask_u
if myvar == "vice":
mymask = confM2R.grdROMS.mask_v
SSHdata = np.where(mymask == 0, confM2R.grdROMS.fillval, SSHdata)
SSHdata = np.where((abs(SSHdata) > 100) | (SSHdata == 0), confM2R.grdROMS.fillval, SSHdata)
SSHdata = np.where(abs(SSHdata) == confM2R.grdROMS.fillval, 0, SSHdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, SSHdata)
if time_counter == confM2R.grdROMS.inittime:
if myvar in ['uice', 'vice']:
IOinitial.create_init_file(confM2R, time_counter, myvar, SSHdata)
if myvar == 'uvel':
array2 = array1
if myvar == 'vvel':
urot, vrot = rotate(confM2R.grdROMS, confM2R.grdMODEL, data, array2, array1)
u, v = interpolate2uv(confM2R.grdROMS, confM2R.grdMODEL, urot, vrot)
Udata, Vdata, UBARdata, VBARdata = vertical_interpolation(myvar, u, v, confM2R.grdROMS,
confM2R.grdMODEL)
if myvar == 'vvel':
Udata = np.where(confM2R.grdROMS.mask_u == 0, confM2R.grdROMS.fillval, Udata)
Udata = np.where(abs(Udata) > 1000, confM2R.grdROMS.fillval, Udata)
Vdata = np.where(confM2R.grdROMS.mask_v == 0, confM2R.grdROMS.fillval, Vdata)
Vdata = np.where(abs(Vdata) > 1000, confM2R.grdROMS.fillval, Vdata)
UBARdata = np.where(confM2R.grdROMS.mask_u == 0, confM2R.grdROMS.fillval, UBARdata)
UBARdata = np.where(abs(UBARdata) > 1000, confM2R.grdROMS.fillval, UBARdata)
VBARdata = np.where(confM2R.grdROMS.mask_v == 0, confM2R.grdROMS.fillval, VBARdata)
VBARdata = np.where(abs(VBARdata) > 1000, confM2R.grdROMS.fillval, VBARdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, Udata, Vdata, UBARdata, VBARdata)
if time_counter == confM2R.grdROMS.inittime:
IOinitial.create_init_file(confM2R, time_counter, myvar, Udata, Vdata, UBARdata, VBARdata)
time_counter += 1
|
trondkr/model2roms
|
model2roms.py
|
Python
|
mit
| 23,882
|
[
"NetCDF"
] |
a4995442fc60bfb77e7fd60de771fdfd3d8539c80c1a4d2f1d7b54d445297fd9
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005,2006 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""
Database exceptions
This is just a layer on top of the Python DBAPI we're using to access the
database
"""
from storm.exceptions import StormError
from psycopg2 import (Error, IntegrityError, InterfaceError, OperationalError,
ProgrammingError)
PostgreSQLError = Error
IntegrityError = IntegrityError
OperationalError = OperationalError
ProgrammingError = ProgrammingError
InterfaceError = InterfaceError
class SQLError(Exception):
pass
class ORMObjectNotFound(StormError):
# ORMObject.get raises this
pass
class ORMTestError(Exception):
pass
|
andrebellafronte/stoq
|
stoqlib/database/exceptions.py
|
Python
|
gpl-2.0
| 1,470
|
[
"VisIt"
] |
cd7526f2ffd89c44d752ce28c2aaedc3e0d5c3ee6ec2619dcf449ae6556abb75
|
"""Reading raw data
This file holds all functions necessary to read in information
and data to run the energy demand model.
"""
import os
import csv
import math
import logging
from collections import defaultdict
import fiona
import pandas as pd
from shapely.geometry import shape, mapping
import numpy as np
from ruamel.yaml import YAML
from energy_demand.technologies import tech_related
from energy_demand.profiles import load_profile
from energy_demand.basic import lookup_tables
def read_yaml(file_path):
"""Parse yaml config file into plain data (lists, dicts and simple values)
Parameters
----------
file_path : str
The path of the configuration file to parse
"""
with open(file_path, 'r') as file_handle:
return YAML(typ='unsafe').load(file_handle)
class TechnologyData(object):
"""Class to store technology related data
Arguments
---------
fueltype : str
Fueltype of technology
eff_by : str, default=1
Efficiency of technology in base year
eff_ey : str, default=1
Efficiency of technology in future year
year_eff_ey : int
Future year when eff_ey is fully realised
eff_achieved : float
Factor of how much of the efficienc future
efficiency is achieved
diff_method : float
Differentiation method
market_entry : int,default=2015
Year when technology comes on the market
tech_type : list
Technology type
tech_max_share : float
Maximum theoretical fraction of how much
this indivdual technology can contribute
to total energy service of its enduse
fueltypes : crit or bool,default=None
Fueltype or criteria
"""
def __init__(
self,
name=None,
fueltype=None,
eff_by=None,
eff_ey=None,
year_eff_ey=None,
eff_achieved=None,
diff_method=None,
market_entry=2015,
tech_type=None,
tech_max_share=None,
description=None
):
self.name = name
self.fueltype_str = fueltype
self.fueltype_int = tech_related.get_fueltype_int(fueltype)
self.eff_by = eff_by
self.eff_ey = eff_ey
self.year_eff_ey = year_eff_ey
self.eff_achieved = eff_achieved
self.diff_method = diff_method
self.market_entry = market_entry
self.tech_type = tech_type
self.tech_max_share = tech_max_share
self.description = description
def set_tech_attr(self, attribute_to_set, value_to_set):
"""Set a technology attribute
Arguments
----------
attribute_to_set : str
Attribue to set
value_to_set : any
Value to set
"""
setattr(self, attribute_to_set, value_to_set)
class CapacitySwitch(object):
"""Capacity switch class for storing
switches
Arguments
---------
enduse : str
Enduse of affected switch
technology_install : str
Installed technology
switch_yr : int
Year until capacity installation is fully realised
installed_capacity : float
Installed capacity in GWh
"""
def __init__(
self,
enduse,
technology_install,
switch_yr,
installed_capacity,
sector=None
):
"""Constructor
"""
self.enduse = enduse
self.technology_install = technology_install
self.switch_yr = switch_yr
self.installed_capacity = installed_capacity
if not sector:
self.sector = None
elif isinstance(sector, str):
self.sector = sector
elif math.isnan(sector):
self.sector = None
else:
self.sector = sector
def update(self, name, value):
"""Update switch
Arguments
---------
name : str
name of attribute
value : any
Type of value
"""
setattr(self, name, value)
class FuelSwitch(object):
"""Fuel switch class for storing
switches
Arguments
---------
enduse : str
Enduse of affected switch
fueltype_replace : str
Fueltype which is beeing switched from
technology_install : str
Installed technology
switch_yr : int
Year until switch is fully realised
fuel_share_switched_ey : float
Switched fuel share
"""
def __init__(
self,
enduse=None,
fueltype_replace=None,
technology_install=None,
switch_yr=None,
fuel_share_switched_ey=None,
sector=None
):
"""Constructor
"""
self.enduse = enduse
self.fueltype_replace = fueltype_replace
self.technology_install = technology_install
self.switch_yr = switch_yr
self.fuel_share_switched_ey = fuel_share_switched_ey
if not sector:
self.sector = None
elif isinstance(sector, str):
self.sector = sector
elif math.isnan(sector):
self.sector = None
else:
self.sector = sector
def update(self, name, value):
"""Update switch
Arguments
---------
name : str
name of attribute
value : any
Type of value
"""
setattr(self, name, value)
class ServiceSwitch(object):
"""Service switch class for storing
switches
Arguments
---------
enduse : str
Enduse of affected switch
sector : str
Sector
technology_install : str
Installed technology
service_share_ey : float
Service share of installed technology in future year
switch_yr : int
Year until switch is fully realised
"""
def __init__(
self,
enduse=None,
sector=None,
technology_install=None,
service_share_ey=None,
switch_yr=None
):
"""Constructor
"""
self.enduse = enduse
self.technology_install = technology_install
self.service_share_ey = service_share_ey
self.switch_yr = switch_yr
if not sector:
self.sector = None
elif isinstance(sector, str):
self.sector = sector
elif math.isnan(sector):
self.sector = None
else:
self.sector = sector
def update(self, name, value):
"""Update service switch
Arguments
---------
name : str
name of attribute
value : any
Type of value
"""
setattr(self, name, value)
def read_in_results(
path_result,
seasons,
model_yeardays_daytype
):
"""Read and post calculate results from txt files
and store into container
Arguments
---------
path_result : str
Paths
seasons : dict
seasons
model_yeardays_daytype : dict
Daytype of modelled yeardays
"""
logging.info("... Reading in results")
lookups = lookup_tables.basic_lookups()
results_container = {}
# -----------------
# Read in demands
# -----------------
try:
results_container['results_enduse_every_year'] = read_enduse_specific_results(
path_result)
except:
pass
try:
print("path_result " + str(path_result))
results_container['ed_fueltype_regs_yh'] = read_results_yh(
path_result, 'ed_fueltype_regs_yh')
except:
pass
# Read in residential demands
try:
results_container['residential_results'] = read_results_yh(
path_result, 'residential_results')
except:
pass
# Calculate total demand per fueltype for every hour
try:
tot_fueltype_yh = {}
for year in results_container['ed_fueltype_regs_yh']:
nr_of_fueltypes = results_container['ed_fueltype_regs_yh'][year].shape[0]
tot_fueltype_yh[year] = np.zeros((nr_of_fueltypes, 8760))
for year, ed_regs_yh in results_container['ed_fueltype_regs_yh'].items():
fuel_yh = np.sum(ed_regs_yh, axis=1) #Sum across all regions
tot_fueltype_yh[year] += fuel_yh
results_container['tot_fueltype_yh'] = tot_fueltype_yh
except:
pass
# -----------------
# Peak calculations
# -----------------
try:
results_container['ed_peak_h'] = {}
results_container['ed_peak_regs_h'] = {}
for year, ed_fueltype_reg_yh in results_container['ed_fueltype_regs_yh'].items():
results_container['ed_peak_h'][year] = {}
results_container['ed_peak_regs_h'][year] = {}
for fueltype_int, ed_reg_yh in enumerate(ed_fueltype_reg_yh):
fueltype_str = tech_related.get_fueltype_str(lookups['fueltypes'], fueltype_int)
# Calculate peak per fueltype for all regions (ed_reg_yh= np.array(fueltype, reg, yh))
all_regs_yh = np.sum(ed_reg_yh, axis=0) # sum regs
peak_h = np.max(all_regs_yh) # select max of 8760 h
results_container['ed_peak_h'][year][fueltype_str] = peak_h
results_container['ed_peak_regs_h'][year][fueltype_str] = np.max(ed_reg_yh, axis=1)
# -------------
# Load factors
# -------------
results_container['reg_load_factor_y'] = read_lf_y(
os.path.join(path_result, "result_reg_load_factor_y"))
results_container['reg_load_factor_yd'] = read_lf_y(
os.path.join(path_result, "result_reg_load_factor_yd"))
# -------------
# Post-calculations
# -------------
# Calculate average per season and fueltype for every fueltype
results_container['av_season_daytype_cy'], results_container['season_daytype_cy'] = calc_av_per_season_fueltype(
results_container['ed_fueltype_regs_yh'],
seasons,
model_yeardays_daytype)
'''results_container['load_factor_seasons'] = {}
results_container['load_factor_seasons']['winter'] = read_lf_y(
os.path.join(path_result, "result_reg_load_factor_winter"))
results_container['load_factor_seasons']['spring'] = read_lf_y(
os.path.join(path_result, "result_reg_load_factor_spring"))
results_container['load_factor_seasons']['summer'] = read_lf_y(
os.path.join(path_result, "result_reg_load_factor_summer"))
results_container['load_factor_seasons']['autumn'] = read_lf_y(
os.path.join(path_result, "result_reg_load_factor_autumn"))'''
except:
pass
logging.info("... Reading in results finished")
return results_container
def calc_av_per_season_fueltype(results_every_year, seasons, model_yeardays_daytype):
"""Calculate average demand per season and fueltype for every fueltype
Arguments
---------
results_every_year : dict
Results for every year
seasons : dict
Seasons
model_yeardays_daytype : list
Daytype of modelled days
Returns
-------
av_season_daytype_cy :
Average demand per season and daytype
season_daytype_cy :
Demand per season and daytpe
"""
av_season_daytype_cy = defaultdict(dict)
season_daytype_cy = defaultdict(dict)
for year, fueltypes_data in results_every_year.items():
for fueltype, reg_fuels in enumerate(fueltypes_data):
# Summarise across regions
tot_all_reg_fueltype = np.sum(reg_fuels, axis=0)
tot_all_reg_fueltype_reshape = tot_all_reg_fueltype.reshape((365, 24))
calc_av, calc_lp = load_profile.calc_av_lp(
tot_all_reg_fueltype_reshape,
seasons,
model_yeardays_daytype)
av_season_daytype_cy[year][fueltype] = calc_av
season_daytype_cy[year][fueltype] = calc_lp
return dict(av_season_daytype_cy), dict(season_daytype_cy)
def read_results_yh(path_to_folder, name_of_folder):
"""Read results
Arguments
---------
fueltypes_nr : int
Number of fueltypes
reg_nrs : int
Number of regions
path_to_folder : str
Path to folder
Returns
-------
results = dict
Results
"""
results = {}
path_to_folder = os.path.join(path_to_folder, name_of_folder)
all_txt_files_in_folder = os.listdir(path_to_folder)
for file_path in all_txt_files_in_folder:
try:
path_file_to_read = os.path.join(path_to_folder, file_path)
file_path_split = file_path.split("__")
year = int(file_path_split[1][:-4])
results[year] = np.load(path_file_to_read)
except IndexError:
pass #path is a folder and not a file
return results
def read_max_results(path):
"""Read max results
Arguments
---------
path : str
Path to folder
"""
results = {}
all_txt_files_in_folder = os.listdir(path)
# Iterate files
for file_path in all_txt_files_in_folder:
path_file_to_read = os.path.join(path, file_path)
file_path_split = file_path.split("__")
year = int(file_path_split[1])
# Add year if not already exists
results[year] = np.load(path_file_to_read)
return results
def read_enduse_specific_results(path_to_folder):
"""Read enduse specific results
Arguments
---------
path_to_folder : str
Folder path
"""
results = defaultdict(dict)
path_results = os.path.join(
path_to_folder,
"enduse_specific_results")
all_txt_files_in_folder = os.listdir(path_results)
for file_path in all_txt_files_in_folder:
path_file_to_read = os.path.join(path_results, file_path)
file_path_split = file_path.split("__")
if file_path_split[-1] == '.txt':
pass
else:
enduse = file_path_split[1]
year = int(file_path_split[2])
results[year][enduse] = np.load(path_file_to_read)
return dict(results)
def read_fuel_ss(path_to_csv, fueltypes_nr):
"""This function reads in base_data_CSV all fuel types
Arguments
----------
path_to_csv : str
Path to csv file
fueltypes_nr : str
Nr of fueltypes
Returns
-------
fuels : dict
Fuels per enduse
sectors : list
Service sectors
enduses : list
Service enduses
Info of categories
------------------
https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/565748/BEES_overarching_report_FINAL.pdf
"""
lookups = lookup_tables.basic_lookups()
fueltypes_lu = lookups['fueltypes']
rows_list = []
fuels = {}
try:
with open(path_to_csv, 'r') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
headings = next(rows) # Skip row
_secondline = next(rows) # Skip row
# All sectors
sectors = set([])
for sector in _secondline[1:]: #skip fuel ID:
sectors.add(sector)
# All enduses
enduses = set([])
for enduse in headings[1:]: #skip fuel ID:
enduses.add(enduse)
# Initialise dict
for enduse in enduses:
fuels[enduse] = {}
for sector in sectors:
fuels[enduse][sector] = np.zeros((fueltypes_nr), dtype="float")
for row in rows:
rows_list.append(row)
for row in rows_list:
fueltype_str = row[0]
fueltype_int = fueltypes_lu[fueltype_str]
for cnt, entry in enumerate(row[1:], 1):
enduse = headings[cnt]
sector = _secondline[cnt]
fuels[enduse][sector][fueltype_int] += float(entry)
except ValueError:
raise Exception(
"The service sector fuel could not be loaded. Check if empty cells.")
return fuels, sorted(sectors), sorted(enduses)
def read_load_shapes_tech(path_to_csv):
"""This function reads in csv technology shapes
Arguments
----------
path_to_csv : str
Path to csv file
"""
load_shapes_dh = {}
with open(path_to_csv, 'r') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
headings = next(rows) # Skip first row
for row in rows:
dh_shape = np.zeros((24), dtype="float")
for cnt, row_entry in enumerate(row[1:], 1):
dh_shape[int(headings[cnt])] = float(row_entry)
load_shapes_dh[str(row[0])] = dh_shape
return load_shapes_dh
def service_switch(df_service_switches):
"""This function reads in service assumptions from csv file,
tests whether the maximum defined switch is larger than
possible for a technology.
Arguments
----------
path_to_csv : str
Path to csv file
technologies : list
All technologies
Returns
-------
enduse_tech_ey_p : dict
Technologies per enduse for endyear in p
service_switches : dict
Service switches
Notes
-----
The base year service shares are generated from technology stock definition
Info
-----
The following attributes need to be defined for a service switch.
Attribute Description
========== =========================
enduse [str] Enduse affected by switch
tech [str] Technology
switch_yr [int] Year until switch is fully realised
service_share_ey [str] Service share of 'tech' in 'switch_yr'
sector [str] Optional sector specific info where switch applies
"""
test_enduses = set([])
service_switches = []
default_parameter = 999.0 #default parameter
for i in df_service_switches.index:
enduse = df_service_switches.at[i, 'enduses_service_switch']
test_enduses.add(enduse)
tech = df_service_switches.at[i, 'tech']
service_share_ey = df_service_switches.at[i, 'switches_service']
switch_yr = df_service_switches.at[i, 'end_yr']
sector = df_service_switches.at[i, 'sector']
if sector == 'None':
sector = None
if float(service_share_ey) == default_parameter:
pass
else:
service_switches.append(
ServiceSwitch(
enduse=str(enduse),
technology_install=str(tech),
service_share_ey=float(service_share_ey),
switch_yr=float(switch_yr),
sector=sector))
# --------------------------------------------
# Test if not 100% per enduse is defined
# --------------------------------------------
for enduse in test_enduses:
switch_yrs = {}
for switch in service_switches:
if switch.enduse == enduse:
year = switch.switch_yr
value = switch.service_share_ey
#print("... {} {} {}".format(year, value, enduse))
if year in switch_yrs.keys():
switch_yrs[year] += value
else:
switch_yrs[year] = value
for year, value in switch_yrs.items():
if value != 1.0:
raise Exception("WRONG SERVICE SWITHC INPUT AS NOT SUMS TO 1.0 (100%) {} {} {}".format(value, year, enduse))
return service_switches
def read_fuel_switches(
path_to_csv,
enduses,
fueltypes,
technologies,
base_yr=2015
):
"""This function reads in from CSV file defined fuel
switch assumptions
Arguments
----------
path_to_csv : str
Path to csv file
enduses : dict
Endues per submodel
fueltypes : dict
Look-ups
technologies : dict
Technologies
Returns
-------
dict_with_switches : dict
All assumptions about fuel switches provided as input
Info
-----
The following attributes need to be defined for a fuel switch.
Attribute Description
========== =========================
enduse [str] Enduse affected by switch
fueltype_replace [str] Fueltype to be switched from
technology_install [str] Technology which is installed
switch_yr [int] Year until switch is fully realised
fuel_share_switched_ey [float] Share of fuel which is switched until switch_yr
sector [str] Optional sector specific info where switch applies
If field is empty the switch is across all sectors
"""
fuel_switches = []
if os.path.isfile(path_to_csv):
raw_csv_file = pd.read_csv(path_to_csv)
for index, row in raw_csv_file.iterrows():
fuel_switches.append(
FuelSwitch(
enduse=str(row['enduse']),
fueltype_replace=fueltypes[str(row['fueltype_replace'])],
technology_install=str(row['technology_install']),
switch_yr=float(row['switch_yr']),
fuel_share_switched_ey=float(row['fuel_share_switched_ey']),
sector=row['sector']))
# -------
# Testing
#
# Test if more than 100% per fueltype is switched or more than
# than theoretically possible per technology
# --------
# Testing wheter the provided inputs make sense
for obj in fuel_switches:
if obj.fuel_share_switched_ey == 0:
raise Exception(
"Input error: The share of switched fuel must be > 0. Delete {} from input".format(
obj.technology_install))
for obj_iter in fuel_switches:
# Test if lager than maximum defined technology diffusion (L)
if obj_iter.fuel_share_switched_ey > technologies[obj_iter.technology_install].tech_max_share:
raise Exception(
"Configuration Error: More service provided for tech '{}' in enduse '{}' than max possible".format(
obj_iter.enduse, obj_iter.technology_install))
if obj_iter.fuel_share_switched_ey > 1.0:
raise Exception(
"Configuration Error: The fuel switches are > 1.0 for enduse {} and fueltype {}".format(
obj.enduse, obj.fueltype_replace))
if obj.switch_yr <= base_yr:
raise Exception("Configuration Error of fuel switch: switch_yr must be in the future")
# Test whether defined enduse exist
for obj in fuel_switches:
if obj.enduse in enduses['service'] or obj.enduse in enduses['residential'] or obj.enduse in enduses['industry']:
pass
else:
raise Exception(
"Input Error: The defined enduse '{}' to switch fuel from is not defined...".format(
obj.enduse))
else:
pass
return fuel_switches
def read_technologies(path_to_csv):
"""Read in technology definition csv file. Append
for every technology type a 'placeholder_tech'.
Arguments
----------
path_to_csv : str
Path to csv file
Returns
-------
dict_technologies : dict
All technologies and their assumptions provided as input
dict_tech_lists : dict
List with technologies. The technology type
is defined in the technology input file. A placeholder technology
is added for every list in order to allow that a generic
technology type can be added for every enduse
Info
-----
The following attributes need to be defined for implementing
a technology.
Attribute Description
========== =========================
technology [str] Name of technology
fueltype [str] Fueltype of technology
eff_by [float] Efficiency in base year
eff_ey [float] Efficiency in future end year
year_eff_ey [int] Future year where efficiency is fully reached
eff_achieved [float] Factor of how much of the efficiency
is achieved (overwritten by scenario input)
This is set to 1.0 as default for initial
technology class generation
diff_method market_entry [int] Year of market entry of technology
tech_list [str] Definition of to which group
of technologies a technology belongs
tech_max_share [float] Maximum share of technology related
energy service which can be reached in theory
description [str] Optional technology description
"""
dict_technologies = {}
dict_tech_lists = {}
raw_csv_file = pd.read_csv(path_to_csv)
for index, row in raw_csv_file.iterrows():
dict_technologies[str(row['technology'])] = TechnologyData(
name=str(row['technology']),
fueltype=str(row['fueltype']),
eff_by=float(row['efficiency in base year']),
eff_ey=float(row['efficiency in future year']),
year_eff_ey=float(row['year when efficiency is fully realised']),
eff_achieved=1.0, # Set to one as default
diff_method=str(row['diffusion method (sigmoid or linear)']),
market_entry=float(row['market_entry']),
tech_type=str(row['technology type']),
tech_max_share=float(row['maximum theoretical service share of technology']),
description=str(row['description']))
try:
dict_tech_lists[row['technology type']].append(row['technology'])
except KeyError:
dict_tech_lists[row['technology type']] = [row['technology']]
# Add placeholder technology to all tech_lists
for tech_list in dict_tech_lists.values():
tech_list.append('placeholder_tech')
return dict_technologies, dict_tech_lists
def read_fuel_rs(path_to_csv):
"""This function reads in base_data_CSV all fuel types
(first row is fueltype, subkey), header is appliances
Arguments
----------
path_to_csv : str
Path to csv file
_dt : str
Defines dtype of array to be read in (takes float)
Returns
-------
fuels : dict
Residential fuels
enduses : list
Residential end uses
Notes
-----
the first row is the fuel_ID
The header is the sub_key
"""
dummy_sector = None
sectors = [dummy_sector]
fuels = {}
# Read csv
raw_csv_file = pd.read_csv(path_to_csv)
# Replace NaN with " " values
raw_csv_file = raw_csv_file.fillna(0)
# Enduses
enduses = list(raw_csv_file.columns[1:].values) #skip fuel_id
# Replace str fueltypes with int fueltypes
raw_csv_file['fuel_id'] = raw_csv_file['fuel_id'].apply(tech_related.get_fueltype_int)
# Iterate columns and convert to array
for enduse in raw_csv_file.columns[1:]: # skip for column
fuels[enduse] = {}
fuels[enduse][dummy_sector] = raw_csv_file[enduse].values
return fuels, sectors, list(enduses)
def read_fuel_is(path_to_csv, fueltypes_nr):
"""This function reads in base_data_CSV all fuel types
Arguments
----------
path_to_csv : str
Path to csv file
fueltypes_nr : int
Number of fueltypes
Returns
-------
fuels : dict
Industry fuels
sectors : list
Industral sectors
enduses : list
Industrial enduses
Info
----
Source: User Guide Energy Consumption in the UK
https://www.gov.uk/government/uploads/system/uploads/attach
ment_data/file/573271/ECUK_user_guide_November_2016_final.pdf
https://unstats.un.org/unsd/cr/registry/regcst.asp?Cl=27
http://ec.europa.eu/eurostat/ramon/nomenclatures/
index.cfm?TargetUrl=LST_NOM_DTL&StrNom=NACE_REV2&StrLanguageCode=EN&IntPcKey=&StrLayoutCode=
High temperature processes
=============================
High temperature processing dominates energy consumption in the iron and steel,
non-ferrous metal, bricks, cement, glass and potteries industries. This includes
- coke ovens
- blast furnaces and other furnaces
- kilns and
- glass tanks.
Low temperature processes
=============================
Low temperature processes are the largest end use of energy for the food, drink
and tobacco industry. This includes:
- process heating and distillation in the chemicals sector;
- baking and separation processes in food and drink;
- pressing and drying processes, in paper manufacture;
- and washing, scouring, dyeing and drying in the textiles industry.
Drying/separation
=============================
Drying and separation is important in paper-making while motor processes are used
more in the manufacture of chemicals and chemical products than in any other
individual industry.
Motors
=============================
This includes pumping, fans and machinery drives.
Compressed air
=============================
Compressed air processes are mainly used in the publishing, printing and
reproduction of recorded media sub-sector.
Lighting
=============================
Lighting (along with space heating) is one of the main end uses in engineering
(mechanical and electrical engineering and vehicles industries).
Refrigeration
=============================
Refrigeration processes are mainly used in the chemicals and food and drink
industries.
Space heating
=============================
Space heating (along with lighting) is one of the main end uses in engineering
(mechanical and electrical engineering and vehicles industries).
Other
=============================
-----------------------
Industry classes from BEIS
-----------------------
SIC 2007 Name
-------- ------
08 Other mining and quarrying
10 Manufacture of food products
11 Manufacture of beverages
12 Manufacture of tobacco products
13 Manufacture of textiles
14 Manufacture of wearing apparel
15 Manufacture of leather and related products
16 Manufacture of wood and of products of wood and cork, except furniture; manufacture of articles of straw and plaiting materials
17 Manufacture of paper and paper products
18 Printing and publishing of recorded media and other publishing activities
20 Manufacture of chemicals and chemical products
21 Manufacture of basic pharmaceutical products and pharmaceutical preparations
22 Manufacture of rubber and plastic products
23 Manufacture of other non-metallic mineral products
24 Manufacture of basic metals
25 Manufacture of fabricated metal products, except machinery and equipment
26 Manufacture of computer, electronic and optical products
27 Manufacture of electrical equipment
28 Manufacture of machinery and equipment n.e.c.
29 Manufacture of motor vehicles, trailers and semi-trailers
30 Manufacture of other transport equipment
31 Manufacture of furniture
32 Other manufacturing
36 Water collection, treatment and supply
38 Waste collection, treatment and disposal activities; materials recovery
"""
rows_list = []
fuels = {}
'''# Read csv
raw_csv_file = pd.read_csv(path_to_csv)
# Replace NaN with " " values
raw_csv_file = raw_csv_file.fillna(0)
# Enduses
enduses = list(raw_csv_file.columns.values)'''
with open(path_to_csv, 'r') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
headings = next(rows)
_secondline = next(rows)
# All sectors
enduses = set([])
for enduse in headings[1:]:
if enduse is not '':
enduses.add(enduse)
# All enduses
sectors = set([])
for row in rows:
rows_list.append(row)
sectors.add(row[0])
# Initialise dict
for enduse in enduses:
fuels[enduse] = {}
for sector in sectors:
fuels[str(enduse)][str(sector)] = np.zeros(
(fueltypes_nr), dtype="float")
for row in rows_list:
sector = row[0]
for position, entry in enumerate(row[1:], 1): # Start with position 1
if entry != '':
enduse = str(headings[position])
fueltype = _secondline[position]
fueltype_int = tech_related.get_fueltype_int(fueltype)
fuels[enduse][sector][fueltype_int] += float(row[position])
return fuels, list(sectors), list(enduses)
def read_lf_y(result_path):
"""Read load factors from .npy file
Arguments
----------
result_path : str
Path
Returns
-------
results : dict
Annual results
"""
results = {}
all_txt_files_in_folder = os.listdir(result_path)
for file_path in all_txt_files_in_folder:
path_file_to_read = os.path.join(result_path, file_path)
file_path_split = file_path.split("__")
year = int(file_path_split[1])
results[year] = np.load(path_file_to_read)
return results
def read_scenaric_population_data(result_path):
"""Read population data
Arguments
---------
result_path : str
Path
Returns
-------
results : dict
Population, {year: np.array(fueltype, regions)}
"""
results = {}
all_txt_files_in_folder = os.listdir(result_path)
for file_path in all_txt_files_in_folder:
path_file_to_read = os.path.join(result_path, file_path)
file_path_split = file_path.split("__")
year = int(file_path_split[1])
# Add year if not already exists
results[year] = np.load(path_file_to_read)
return results
def read_capacity_switch(path_to_csv, base_yr=2015):
"""This function reads in service assumptions
from csv file
Arguments
----------
path_to_csv : str
Path to csv file
Returns
-------
service_switches : dict
Service switches which implement the defined capacity installation
Info
-----
The following attributes need to be defined for a capacity switch.
Attribute Description
========== =========================
enduse [str] Enduse affected by switch
tech [str] Technology installed
switch_yr [int] Year until switch is fully realised
installed_capacity [float] Installed total capacity in GWh
sector [str] Optional sector specific info where switch applies
If field is empty the switch is across all sectors
"""
service_switches = []
if os.path.isfile(path_to_csv):
# Read switches
raw_csv_file = pd.read_csv(path_to_csv)
# Iterate rows
for _, row in raw_csv_file.iterrows():
service_switches.append(
CapacitySwitch(
enduse=str(row['enduse']),
technology_install=str(row['technology_install']),
switch_yr=float(row['swich_yr']),
installed_capacity=float(row['installed_capacity']),
sector=row['sector']))
# Testing
for obj in service_switches:
if obj.switch_yr <= base_yr:
raise Exception("Input Error capacity switch: switch_yr must be in the future")
else:
pass
return service_switches
def read_floor_area_virtual_stock(path_to_csv, f_mixed_floorarea=0.5):
"""Read in floor area from csv file for every LAD
to generate virtual building stock.
This file is obainted from Newcastle
Arguments
---------
path_to_csv : str
Path to csv file
f_mixed_floorarea : float
Factor to assign mixed floor area
Returns
-------
res_floorarea : dict
Residential floor area per region
non_res_floorarea : dict
Non residential floor area per region
Info
-----
* The mixed floor area (residential and non residential) is distributed
according to `f_mixed_floorarea`.
Attributes from data from Newcastle
===================================
(1) Commercial_General
(2) Primary_Industry
(3) Public_Services
(4) Education
(5) Hospitality
(6) Community_Arts_Leisure
(7) Industrial
(8) Healthcare
(9) Office
(10) Retail
(11) Transport_and_Storage
(12) Residential
(13) Military
"""
# Redistribute the mixed enduse
p_mixed_no_resid = 1 - f_mixed_floorarea
# Second Mail from Craig
res_floorarea, non_res_floorarea, floorarea_mixed = {}, {}, {}
building_count_service = {}
for i in range(1, 15):
building_count_service[i] = {}
with open(path_to_csv, 'r') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
headings = next(rows)
for row in rows:
geo_name = str.strip(row[get_position(headings, 'lad')])
if row[get_position(headings, 'res_bld_floor_area')] == 'null':
# Not data or faulty data
pass
else:
res_floorarea[geo_name] = float(row[get_position(headings, 'res_bld_floor_area')])
if row[get_position(headings, 'nonres_bld_floor_area')] == 'null':
# Not data or faulty data
pass
else:
non_res_floorarea[geo_name] = float(row[get_position(headings, 'nonres_bld_floor_area')])
if row[get_position(headings, 'mixeduse_bld_floor_area')] == 'null':
# Not data or faulty data
pass
else:
floorarea_mixed[geo_name] = float(row[get_position(headings, 'mixeduse_bld_floor_area')])
# Distribute mixed floor area
non_res_from_mixed = floorarea_mixed[geo_name] * p_mixed_no_resid
res_from_mixed = floorarea_mixed[geo_name] * f_mixed_floorarea
# Add
res_floorarea[geo_name] += res_from_mixed
non_res_floorarea[geo_name] += non_res_from_mixed
# ---------------------------------------
# Read building count for service sector
# ---------------------------------------
building_1 = float(row[get_position(headings, 'building_type_count_1')])
building_2 = float(row[get_position(headings, 'building_type_count_2')])
building_3 = float(row[get_position(headings, 'building_type_count_3')])
building_4 = float(row[get_position(headings, 'building_type_count_4')])
building_5 = float(row[get_position(headings, 'building_type_count_5')])
building_6 = float(row[get_position(headings, 'building_type_count_6')])
building_7 = float(row[get_position(headings, 'building_type_count_7')])
building_8 = float(row[get_position(headings, 'building_type_count_8')])
building_9 = float(row[get_position(headings, 'building_type_count_9')])
building_10 = float(row[get_position(headings, 'building_type_count_10')])
building_11 = float(row[get_position(headings, 'building_type_count_11')])
building_12 = float(row[get_position(headings, 'building_type_count_12')])
building_13 = float(row[get_position(headings, 'building_type_count_13')])
building_count_service[1][geo_name] = building_1
building_count_service[2][geo_name] = building_2
building_count_service[3][geo_name] = building_3
building_count_service[4][geo_name] = building_4
building_count_service[5][geo_name] = building_5
building_count_service[6][geo_name] = building_6
building_count_service[7][geo_name] = building_7
building_count_service[8][geo_name] = building_8
building_count_service[9][geo_name] = building_9
building_count_service[10][geo_name] = building_10
building_count_service[11][geo_name] = building_11
building_count_service[12][geo_name] = building_12
building_count_service[13][geo_name] = building_13
# Create Other category and buildings
building_count_service[14][geo_name] = int(
building_1 + building_2 + building_3 +
building_4 + building_5 + building_6 +
building_7 + building_8 + building_9 +
building_10 + building_11 + building_12 +
building_13)
return res_floorarea, non_res_floorarea, building_count_service
def get_position(headings, name):
"""Get position of an entry in a list
Arguments
---------
headings : list
List with names
name : str
Name of entry to find
Returns
-------
position : int
Position in list
"""
return headings.index(name)
def read_np_array_from_txt(path_file_to_read):
"""Read np array from textfile
Arguments
---------
path_file_to_read : str
File to path with stored array
Return
------
txt_array : array
Array containing read text
"""
txt_array = np.loadtxt(path_file_to_read, delimiter=',')
return txt_array
def get_region_selection(path_to_csv):
"""Read region names in a csv
Arguments
----------
path_to_csv : str
Path to csv file
"""
regions = []
with open(path_to_csv, 'r') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
_headings = next(rows)
for row in rows:
regions.append(row[0])
return regions
def get_region_names(path):
'''Returns names of shapes within a shapefile
'''
with fiona.open(path, 'r') as source:
return [elem['properties']['name'] for elem in source]
def get_region_centroids(path):
'''Returns centroids of shapes within a shapefile
'''
with fiona.open(path, 'r') as source:
geoms = [elem for elem in source]
for geom in geoms:
my_shape = shape(geom['geometry'])
geom['geometry'] = mapping(my_shape.centroid)
return geoms
def get_region_objects(path):
'''Returns shape objects within a shapefile
'''
with fiona.open(path, 'r') as source:
return [elem for elem in source]
def load_full_paramter_values(file_path):
"""
"""
# READ csv file
# "region", "year", "value", "interval"
gp_file = pd.read_csv(file_path)
return gp_file
|
nismod/energy_demand
|
energy_demand/read_write/read_data.py
|
Python
|
mit
| 44,047
|
[
"BLAST"
] |
ff12ea2af6d8b2d8b42ab4cd38e6288e7fae2218335317e350bb5061d9476396
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************************************
**PressureTensorMultiLayer** - Analysis
*****************************************
This class computes the pressure tensor of the system in `n` layers.
Layers are perpendicular to Z direction and are equidistant(distance is Lz/n).
It can be used as standalone class in python as well as
in combination with the integrator extension ExtAnalyze.
Standalone Usage:
-----------------
>>> pt = espressopp.analysis.PressureTensorMultiLayer(system, n, dh)
>>> for i in range(n):
>>> print "pressure tensor in layer %d: %s" % ( i, pt.compute())
or
>>> pt = espressopp.analysis.PressureTensorMultiLayer(system, n, dh)
>>> for k in range(100):
>>> integrator.run(100)
>>> pt.performMeasurement()
>>> for i in range(n):
>>> print "average pressure tensor in layer %d: %s" % ( i, pt.compute())
Usage in integrator with ExtAnalyze:
------------------------------------
>>> pt = espressopp.analysis.PressureTensorMultiLayer(system, n, dh)
>>> extension_pt = espressopp.integrator.ExtAnalyze(pt , interval=100)
>>> integrator.addExtension(extension_pt)
>>> integrator.run(10000)
>>> pt_ave = pt.getAverageValue()
>>> for i in range(n):
>>> print "average Pressure Tensor = ", pt_ave[i][:6]
>>> print " std deviation = ", pt_ave[i][6:]
>>> print "number of measurements = ", pt.getNumberOfMeasurements()
The following methods are supported:
* performMeasurement()
computes the pressure tensor and updates average and standard deviation
* reset()
resets average and standard deviation to 0
* compute()
computes the instant pressure tensor in `n` layers, return value: [xx, yy, zz, xy, xz, yz]
* getAverageValue()
returns the average pressure tensor and the standard deviation,
return value: [xx, yy, zz, xy, xz, yz, +-xx, +-yy, +-zz, +-xy, +-xz, +-yz]
* getNumberOfMeasurements()
counts the number of measurements that have been computed (standalone or in integrator)
does _not_ include measurements that have been done using "compute()"
.. function:: espressopp.analysis.PressureTensorMultiLayer(system, n, dh)
:param system:
:param n:
:param dh:
:type system:
:type n:
:type dh:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.AnalysisBase import *
from _espressopp import analysis_PressureTensorMultiLayer
class PressureTensorMultiLayerLocal(AnalysisBaseLocal, analysis_PressureTensorMultiLayer):
def __init__(self, system, n, dh):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_PressureTensorMultiLayer, system, n, dh)
if pmi.isController:
class PressureTensorMultiLayer(AnalysisBase):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.analysis.PressureTensorMultiLayerLocal',
pmiproperty = [ 'n', 'dh' ]
)
|
junghans/espressopp
|
src/analysis/PressureTensorMultiLayer.py
|
Python
|
gpl-3.0
| 3,826
|
[
"ESPResSo"
] |
39d88df03615d31f290c6866108a5d1f449b98214e26406b83be5962182a8b30
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from struct import unpack
from androguard.decompiler.dad.util import get_type
from androguard.decompiler.dad.opcode_ins import Op
from androguard.decompiler.dad.instruction import (Constant, ThisParam,
BinaryExpression,
BaseClass,
InstanceExpression,
NewInstance,
Variable,
BinaryCompExpression)
logger = logging.getLogger('dad.writer')
class Writer(object):
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.ind = 4
self.buffer = []
self.buffer2 = []
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.skip = False
self.need_break = True
def __str__(self):
return ''.join(self.buffer)
def str_ext(self):
return self.buffer2
def inc_ind(self, i=1):
self.ind += (4 * i)
def dec_ind(self, i=1):
self.ind -= (4 * i)
def space(self):
if self.skip:
self.skip = False
return ''
return ' ' * self.ind
def write_ind(self):
if self.skip:
self.skip = False
else:
self.write(self.space())
self.write_ext(('INDENTATION', self.space()))
def write(self, s, data=None):
self.buffer.append(s)
# old method, still used
# TODO: clean?
if data:
self.buffer2.append((data, s))
# at minimum, we have t as a tuple of the form:
# (TYPE_STR, MY_STR) such as ('THIS', 'this')
# where the 2nd field is the actual generated source code
# We can have more fields, for example:
# ('METHOD', 'sendToServer', 'this -> sendToServer', <androguard.decompiler.dad.instruction.ThisParam>)
def write_ext(self, t):
if not isinstance(t, tuple):
raise "Error in write_ext: %s not a tuple" % str(t)
self.buffer2.append(t)
def end_ins(self):
self.write(';\n')
self.write_ext(('END_INSTRUCTION', ';\n'))
def write_ind_visit_end(self, lhs, s, rhs=None, data=None):
self.write_ind()
lhs.visit(self)
self.write(s)
self.write_ext(('TODO_4343', s, data))
if rhs is not None:
rhs.visit(self)
self.end_ins()
#TODO: prefer this class as write_ind_visit_end that should be deprecated
# at the end
def write_ind_visit_end_ext(self, lhs, before, s, after, rhs=None,
data=None, subsection='UNKNOWN_SUBSECTION'):
self.write_ind()
lhs.visit(self)
self.write(before + s + after)
self.write_ext(('BEFORE', before))
self.write_ext((subsection, s, data))
self.write_ext(('AFTER', after))
if rhs is not None:
rhs.visit(self)
self.end_ins()
def write_inplace_if_possible(self, lhs, rhs):
if isinstance(rhs, BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
if rhs.op in '+-' and isinstance(exp_rhs, Constant) and\
exp_rhs.get_int_value() == 1:
return self.write_ind_visit_end(lhs, rhs.op * 2, data=rhs)
return self.write_ind_visit_end(
lhs, ' %s= ' % rhs.op, exp_rhs, data=rhs)
return self.write_ind_visit_end(lhs, ' = ', rhs, data=rhs)
def visit_ins(self, ins):
ins.visit(self)
def write_method(self):
acc = []
access = self.method.access
self.constructor = False
for modifier in access:
if modifier == 'constructor':
self.constructor = True
continue
acc.append(modifier)
self.write('\n%s' % self.space())
self.write_ext(('NEWLINE', '\n%s' % (self.space())))
if acc:
self.write('%s ' % ' '.join(acc))
self.write_ext(('PROTOTYPE_ACCESS', '%s ' % ' '.join(acc)))
if self.constructor:
name = get_type(self.method.cls_name).split('.')[-1]
self.write(name)
self.write_ext(('NAME_METHOD_PROTOTYPE', '%s' % name, self.method))
else:
self.write(
'%s %s' % (get_type(self.method.type), self.method.name))
self.write_ext(
('PROTOTYPE_TYPE', '%s' % get_type(self.method.type)))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_METHOD_PROTOTYPE',
'%s' % self.method.name, self.method))
params = self.method.lparams
if 'static' not in access:
params = params[1:]
proto = ''
self.write_ext(('PARENTHESIS_START', '('))
if self.method.params_type:
proto = ', '.join(['%s p%s' % (get_type(p_type), param) for
p_type, param in zip(self.method.params_type, params)])
first = True
for p_type, param in zip(self.method.params_type, params):
if not first:
self.write_ext(('COMMA', ', '))
else:
first = False
self.write_ext(('ARG_TYPE', '%s' % get_type(p_type)))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_ARG', 'p%s' % param, p_type, self.method))
self.write_ext(('PARENTHESIS_END', ')'))
self.write('(%s)' % proto)
if self.graph is None:
self.write(';\n')
self.write_ext(('METHOD_END_NO_CONTENT', ';\n'))
return
self.write('\n%s{\n' % self.space())
self.write_ext(('METHOD_START', '\n%s{\n' % self.space()))
self.inc_ind()
self.visit_node(self.graph.entry)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('METHOD_END', '%s}\n' % self.space()))
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
var.visit_decl(self)
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
follow = loop.follow['loop']
if follow is None and not loop.looptype.is_endless:
logger.error('Loop has no follow !')
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
self.write('%swhile (' % self.space())
self.write_ext(('WHILE', '%swhile (' % self.space()))
loop.visit_cond(self)
self.write(') {\n')
self.write_ext(('WHILE_START', ') {\n'))
elif loop.looptype.is_posttest:
self.write('%sdo {\n' % self.space())
self.write_ext(('DO', '%sdo {\n' % self.space()))
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
self.write('%swhile(true) {\n' % self.space())
self.write_ext(('WHILE_TRUE', '%swhile(true) {\n' % self.space()))
self.inc_ind()
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
self.dec_ind()
if loop.looptype.is_pretest:
self.write('%s}\n' % self.space())
self.write_ext(('END_PRETEST', '%s}\n' % self.space()))
elif loop.looptype.is_posttest:
self.latch_node.pop()
self.write('%s} while(' % self.space())
self.write_ext(('WHILE_POSTTEST', '%s} while(' % self.space()))
loop.latch.visit_cond(self)
self.write(');\n')
self.write_ext(('POSTTEST_END', ');\n'))
else:
self.inc_ind()
self.visit_node(loop.latch)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('END_LOOP', '%s}\n' % self.space()))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
follow = cond.follow['if']
if cond.false is cond.true:
self.write('%s// Both branches of the condition point to the same'
' code.\n' % self.space())
self.write_ext(
('COMMENT_ERROR_MSG',
'%s// Both branches of the condition point to the same'
' code.\n' % self.space()))
self.write('%s// if (' % self.space())
self.write_ext(('COMMENT_IF', '%s// if (' % self.space()))
cond.visit_cond(self)
self.write(') {\n')
self.write_ext(('COMMENT_COND_END', ') {\n'))
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s// }\n' % self.space(), data="COMMENT_IF_COND_END")
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
self.write('%sif (' % self.space(), data="IF_2")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE_2")
self.inc_ind()
self.write('%sbreak;\n' % self.space(), data="BREAK")
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_2")
self.visit_node(cond.false)
elif follow is not None:
if cond.true in (follow, self.next_case) or\
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
self.write('%sif (' % self.space(), data="IF")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
is_else = not (follow in (cond.true, cond.false))
if is_else and not cond.false in self.visited_nodes:
self.write('%s} else {\n' % self.space(), data="IF_FALSE")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.if_follow.pop()
self.write('%s}\n' % self.space(), data="IF_END")
self.visit_node(follow)
else:
self.write('%sif (' % self.space(), data="IF_3")
cond.visit_cond(self)
self.write(') {\n', data="IF_COND_3")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s} else {\n' % self.space(), data="ELSE_3")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_3")
def visit_short_circuit_condition(self, nnot, aand, cond1, cond2):
if nnot:
cond1.neg()
self.write('(', data="TODO24")
cond1.visit_cond(self)
self.write(') %s (' % ['||', '&&'][aand], data="TODO25")
cond2.visit_cond(self)
self.write(')', data="TODO26")
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
self.write('%sswitch (' % self.space(), data="SWITCH")
self.visit_ins(switch_ins)
self.write(') {\n', data="SWITCH_END")
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
self.inc_ind()
for case in switch.node_to_case[node]:
self.write(
'%scase %d:\n' % (self.space(), case), data="CASE_XX")
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT")
default = None
self.inc_ind()
self.visit_node(node)
if self.need_break:
self.write('%sbreak;\n' % self.space(), data="CASE_BREAK")
else:
self.need_break = True
self.dec_ind(2)
if default not in (None, follow):
self.inc_ind()
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT_2")
self.inc_ind()
self.visit_node(default)
self.dec_ind(2)
self.write('%s}\n' % self.space(), data="CASE_END")
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.write('%sbreak;\n' % self.space(), data="BREAK_2")
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
self.write('%stry {\n' % self.space(), data="TRY_START")
self.inc_ind()
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
self.dec_ind()
self.write('%s}' % self.space(), data="TRY_START_END")
for catch in try_node.catch:
self.visit_node(catch)
self.write('\n', data="NEWLINE_END_TRY")
self.visit_node(self.try_follow.pop())
def visit_catch_node(self, catch_node):
self.write(' catch (', data="CATCH")
catch_node.visit_exception(self)
self.write(') {\n', data="CATCH_START")
self.inc_ind()
self.visit_node(catch_node.catch_start)
self.dec_ind()
self.write('%s}' % self.space(), data="CATCH_END")
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
def visit_decl(self, var):
if not var.declared:
var_type = var.get_type() or 'unknownType'
self.write('%s%s v%s' % (
self.space(), get_type(var_type),
var.value()), data="DECLARATION")
self.end_ins()
def visit_constant(self, cst):
if isinstance(cst, str) or isinstance(cst, unicode):
return self.write(string(cst), data="CONSTANT_STRING")
self.write('%r' % cst, data="CONSTANT_INTEGER") # INTEGER or also others?
def visit_base_class(self, cls, data=None):
self.write(cls)
self.write_ext(('NAME_BASE_CLASS', cls, data))
def visit_variable(self, var):
var_type = var.get_type() or 'unknownType'
if not var.declared:
self.write('%s ' % get_type(var_type))
self.write_ext(
('VARIABLE_TYPE', '%s' % get_type(var_type), var_type))
self.write_ext(('SPACE', ' '))
var.declared = True
self.write('v%s' % var.name)
self.write_ext(('NAME_VARIABLE', 'v%s' % var.name, var, var_type))
def visit_param(self, param, data=None):
self.write('p%s' % param)
self.write_ext(('NAME_PARAM', 'p%s' % param, data))
def visit_this(self):
self.write('this', data="THIS")
def visit_assign(self, lhs, rhs):
if lhs is not None:
return self.write_inplace_if_possible(lhs, rhs)
self.write_ind()
rhs.visit(self)
if not self.skip:
self.end_ins()
def visit_move_result(self, lhs, rhs):
self.write_ind_visit_end(lhs, ' = ', rhs)
def visit_move(self, lhs, rhs):
if lhs is not rhs:
self.write_inplace_if_possible(lhs, rhs)
def visit_astore(self, array, index, rhs, data=None):
self.write_ind()
array.visit(self)
self.write('[', data=("ASTORE_START", data))
index.visit(self)
self.write('] = ', data="ASTORE_END")
rhs.visit(self)
self.end_ins()
def visit_put_static(self, cls, name, rhs):
self.write_ind()
self.write('%s.%s = ' % (cls, name), data="STATIC_PUT")
rhs.visit(self)
self.end_ins()
def visit_put_instance(self, lhs, name, rhs, data=None):
self.write_ind_visit_end_ext(
lhs, '.', '%s' % name, ' = ', rhs,
data=data, subsection='NAME_CLASS_ASSIGNMENT')
def visit_new(self, atype, data=None):
self.write('new %s' % get_type(atype))
self.write_ext(('NEW', 'new '))
self.write_ext(
('NAME_CLASS_NEW', '%s' % get_type(atype), data.type, data))
def visit_invoke(self, name, base, ptype, rtype, args, invokeInstr=None):
if isinstance(base, ThisParam):
if name == '<init>' and self.constructor and len(args) == 0:
self.skip = True
return
base.visit(self)
if name != '<init>':
if isinstance(base, BaseClass):
call_name = "%s -> %s" % (base.cls, name)
elif isinstance(base, InstanceExpression):
call_name = "%s -> %s" % (base.ftype, name)
elif hasattr(base, "base") and hasattr(base, "var_map"):
base2base = base
while True:
base2base = base2base.var_map[base2base.base]
if isinstance(base2base, NewInstance):
call_name = "%s -> %s" % (base2base.type, name)
break
elif (hasattr(base2base, "base") and
hasattr(base2base, "var_map")):
continue
else:
call_name = "UNKNOWN_TODO"
break
elif isinstance(base, ThisParam):
call_name = "this -> %s" % name
elif isinstance(base, Variable):
call_name = "%s -> %s" % (base.type, name)
else:
call_name = "UNKNOWN_TODO2"
self.write('.%s' % name)
self.write_ext(('INVOKE', '.'))
self.write_ext(
('NAME_METHOD_INVOKE',
'%s' % name, call_name, ptype, rtype, base, invokeInstr))
self.write('(', data="PARAM_START")
comma = False
for arg in args:
if comma:
self.write(', ', data="PARAM_SEPARATOR")
comma = True
arg.visit(self)
self.write(')', data="PARAM_END")
def visit_return_void(self):
self.write_ind()
self.write('return', data="RETURN")
self.end_ins()
def visit_return(self, arg):
self.write_ind()
self.write('return ', data="RETURN")
arg.visit(self)
self.end_ins()
def visit_nop(self):
pass
def visit_switch(self, arg):
arg.visit(self)
def visit_check_cast(self, arg, atype):
self.write('((%s) ' % atype, data="CHECKCAST")
arg.visit(self)
self.write(')')
def visit_aload(self, array, index):
array.visit(self)
self.write('[', data="ALOAD_START")
index.visit(self)
self.write(']', data="ALOAD_END")
def visit_alength(self, array):
array.visit(self)
self.write('.length', data="ARRAY_LENGTH")
def visit_new_array(self, atype, size):
self.write('new %s[' % get_type(atype[1:]), data="NEW_ARRAY")
size.visit(self)
self.write(']', data="NEW_ARRAY_END")
def visit_filled_new_array(self, atype, size, args):
self.write('new %s {' % get_type(atype), data="NEW_ARRAY_FILLED")
for idx, arg in enumerate(args):
arg.visit(self)
if idx + 1 < len(args):
self.write(', ', data="COMMA")
self.write('})', data="NEW_ARRAY_FILLED_END")
def visit_fill_array(self, array, value):
self.write_ind()
array.visit(self)
self.write(' = {', data="ARRAY_FILLED")
data = value.get_data()
tab = []
elem_size = value.element_width
if elem_size == 4:
for i in range(0, value.size * 4, 4):
tab.append('%s' % unpack('i', data[i:i + 4])[0])
else: # FIXME: other cases
for i in range(value.size):
tab.append('%s' % unpack('b', data[i])[0])
self.write(', '.join(tab), data="COMMA")
self.write('}', data="ARRAY_FILLED_END")
self.end_ins()
def visit_move_exception(self, var, data=None):
var.declared = True
var_type = var.get_type() or 'unknownType'
self.write('%s v%s' % (get_type(var_type), var.name))
self.write_ext(
('EXCEPTION_TYPE', '%s' % get_type(var_type), data.type))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_CLASS_EXCEPTION', 'v%s' % var.value(), data.type, data))
def visit_monitor_enter(self, ref):
self.write_ind()
self.write('synchronized(', data="SYNCHRONIZED")
ref.visit(self)
self.write(') {\n', data="SYNCHRONIZED_END")
self.inc_ind()
def visit_monitor_exit(self, ref):
self.dec_ind()
self.write_ind()
self.write('}\n', data="MONITOR_EXIT")
def visit_throw(self, ref):
self.write_ind()
self.write('throw ', data="THROW")
ref.visit(self)
self.end_ins()
def visit_binary_expression(self, op, arg1, arg2):
self.write('(', data="BINARY_EXPRESSION_START")
arg1.visit(self)
self.write(' %s ' % op, data="TODO58")
arg2.visit(self)
self.write(')', data="BINARY_EXPRESSION_END")
def visit_unary_expression(self, op, arg):
self.write('(%s ' % op, data="UNARY_EXPRESSION_START")
arg.visit(self)
self.write(')', data="UNARY_EXPRESSION_END")
def visit_cast(self, op, arg):
self.write('(%s ' % op, data="CAST_START")
arg.visit(self)
self.write(')', data="CAST_END")
def visit_cond_expression(self, op, arg1, arg2):
arg1.visit(self)
self.write(' %s ' % op, data="COND_EXPRESSION")
arg2.visit(self)
def visit_condz_expression(self, op, arg):
if isinstance(arg, BinaryCompExpression):
arg.op = op
return arg.visit(self)
atype = arg.get_type()
if atype == 'Z':
if op == Op.EQUAL:
self.write('!', data="NEGATE")
arg.visit(self)
else:
arg.visit(self)
if atype in 'VBSCIJFD':
self.write(' %s 0' % op, data="TODO64")
else:
self.write(' %s null' % op, data="TODO65")
def visit_get_instance(self, arg, name, data=None):
arg.visit(self)
self.write('.%s' % name)
self.write_ext(('GET_INSTANCE', '.'))
self.write_ext(('NAME_CLASS_INSTANCE', '%s' % name, data))
def visit_get_static(self, cls, name):
self.write('%s.%s' % (cls, name), data="GET_STATIC")
def string(s):
ret = ['"']
for c in s:
if c >= ' ' and c < '\x7f':
if c == "'" or c == '"' or c == '\\':
ret.append('\\')
ret.append(c)
continue
elif c <= '\x7f':
if c in ('\r', '\n', '\t'):
ret.append(c.encode('unicode-escape'))
continue
i = ord(c)
ret.append('\\u')
ret.append('%x' % (i >> 12))
ret.append('%x' % ((i >> 8) & 0x0f))
ret.append('%x' % ((i >> 4) & 0x0f))
ret.append('%x' % (i & 0x0f))
ret.append('"')
return ''.join(ret)
|
0x0mar/androguard
|
androguard/decompiler/dad/writer.py
|
Python
|
apache-2.0
| 25,568
|
[
"VisIt"
] |
498fa6ced781605fc751daac5457c3a06f96b3c1bf3f18551631712c66cc66a0
|
from compiler import *
####################################################################################################################
# Each quest record contains the following fields:
# 1) Quest id: used for referencing quests in other files. The prefix qst_ is automatically added before each quest-id.
# 2) Quest Name: Name displayed in the quest screen.
# 3) Quest flags. See header_quests.py for a list of available flags
# 4) Quest Description: Description displayed in the quest screen.
#
# Note that you may call the opcode setup_quest_text for setting up the name and description
####################################################################################################################
quests = [
# Note : This is defined as the first governer quest in module_constants.py:
("deliver_message", "Deliver Message to {s13}", qf_random_quest,
"{!}{s9} asked you to take a message to {s13}. {s13} was at {s4} when you were given this quest."
),
("deliver_message_to_enemy_lord", "Deliver Message to {s13}", qf_random_quest,
"{!}{s9} asked you to take a message to {s13} of {s15}. {s13} was at {s4} when you were given this quest."
),
("raise_troops", "Raise {reg1} {s14}", qf_random_quest,
"{!}{s9} asked you to raise {reg1} {s14} and bring them to him."
),
("escort_lady", "Escort {s13} to {s14}", qf_random_quest,
"{!}None"
),
## ("rescue_lady_under_siege", "Rescue {s3} from {s4}", qf_random_quest,
## "{s1} asked you to rescue his {s7} {s3} from {s4} and return her back to him."
## ),
## ("deliver_message_to_lover", "Deliver Message to {s3}", qf_random_quest,
## "{s1} asked you to take a message to his lover {s3} at {s4}."
## ),
## ("bring_prisoners_to_enemy", "Bring Prisoners to {s4}", qf_random_quest,
## "{s1} asked you to bring {reg1} {s3} as prisoners to the guards at {s4}."
## ),
## ("bring_reinforcements_to_siege", "Bring Reinforcements to the Siege of {s5}", qf_random_quest,
## "{s1} asked you to bring {reg1} {s3} to {s4} at the siege of {s5}."
## ),
## ("deliver_supply_to_center_under_siege", "Deliver Supplies to {s5}", qf_random_quest,
## "TODO: Take {reg1} cartloads of supplies from constable {s3} and deliver them to constable {s4} at {s5}."
## ),
("deal_with_bandits_at_lords_village", "Save the Village of {s15} from Marauding Bandits", qf_random_quest,
"{!}{s13} asked you to deal with the bandits who took refuge in his village of {s15} and then report back to him."
),
("collect_taxes", "Collect Taxes from {s3}", qf_random_quest,
"{!}{s9} asked you to collect taxes from {s3}. He offered to leave you one-fifth of all the money you collect there."
),
("hunt_down_fugitive", "Hunt Down {s4}", qf_random_quest,
"{!}{s9} asked you to hunt down the fugitive named {s4}. He is currently believed to be at {s3}."
),
## ("capture_messenger", "Capture {s3}", qf_random_quest,
## "{s1} asked you to capture a {s3} and bring him back."
## ),
## ("bring_back_deserters", "Bring {reg1} {s3}", qf_random_quest,
## "{s1} asked you to bring {reg1} {s3}."
## ),
("kill_local_merchant", "Assassinate Local Merchant at {s3}", qf_random_quest,
"{!}{s9} asked you to assassinate a local merchant at {s3}."
),
("bring_back_runaway_serfs", "Bring Back Runaway Serfs", qf_random_quest,
"{!}{s9} asked you to bring back the three groups of runaway serfs back to {s2}. He said all three groups must be running away in the direction of {s3}."
),
("follow_spy", "Follow the Spy to Meeting", qf_random_quest,
"{!}{s11} asked you to follow the spy that will leave {s12}. You must be careful not to be seen by the spy during his travel, or else he may get suspicious and turn back. Once the spy meets with his accomplice, you are to ambush and capture them and bring them both back to {s11}."
),
("capture_enemy_hero", "Capture a Lord from {s13}", qf_random_quest,
"{!}TODO: {s11} asked you to capture a lord from {s13}."
),
("lend_companion", "Lend Your Companion {s3} to {s9}", qf_random_quest,
"{!}{s9} asked you to lend your companion {s3} to him for a week."
),
("collect_debt", "Collect the Debt {s3} Owes to {s9}", qf_random_quest,
"{!}{s9} asked you to collect the debt of {reg4} denars {s3} owes to him."
),
## ("capture_conspirators", "Capture Conspirators", qf_random_quest,
## "TODO: {s1} asked you to capture all troops in {reg1} conspirator parties that plan to rebel against him and join {s3}."
## ),
## ("defend_nobles_against_peasants", "Defend Nobles Against Peasants", qf_random_quest,
## "TODO: {s1} asked you to defend {reg1} noble parties against peasants."l
## ),
("incriminate_loyal_commander", "Incriminate the Loyal Commander of {s13}, {s16}", qf_random_quest,
"{!}None"
),
# ("raid_caravan_to_start_war", "Raid {reg13} Caravans of {s13}", qf_random_quest, #This is now a dynamic quest, integrated into the provocation system
# "None"
# ),
("meet_spy_in_enemy_town", "Meet Spy in {s13}", qf_random_quest,
"{!}None"
),
("capture_prisoners", "Bring {reg1} {s3} Prisoners", qf_random_quest,
"{!}{s9} wanted you to bring him {reg1} {s3} as prisoners."
),
## ("hunt_down_raiders", "Hunt Down Raiders",qf_random_quest,
## "{s1} asked you to hunt down and punish the raiders that attacked a village near {s3} before they reach the safety of their base at {s4}."
## ),
##################
# Enemy Kingdom Lord quests
##################
# Note : This is defined as the first enemy lord quest in module_constants.py:
("lend_surgeon", "Lend Your Surgeon {s3} to {s1}", qf_random_quest,
"{!}Lend your experienced surgeon {s3} to {s1}."
),
##################
# Kingdom Army quests
##################
# Note : This is defined as lord quests end in module_constants.py:
("follow_army", "Follow {s9}'s Army", qf_random_quest,
"{!}None"
),
("report_to_army", "Report to {s13}, the Marshall", qf_random_quest,
"{!}None"
),
# Note : This is defined as the first army quest in module_constants.py:
# maybe disable these army quests, except as volunteer quests that add to the capacity of the army
("deliver_cattle_to_army", "Deliver {reg3} Heads of Cattle to {s13}", qf_random_quest,
"{!}None"
),
("join_siege_with_army", "Join the Siege of {s14}", qf_random_quest,
"{!}None"
),
("screen_army", "Screen the Advance of {s13}'s Army", qf_random_quest,
"{!}None"
),
("scout_waypoints", "Scout {s13}, {s14} and {s15}", qf_random_quest,
"{!}None"
),
##################
# Kingdom Lady quests
##################
# Note : This is defined as the first kingdom lady quest in module_constants.py:
#Rescue lord by replace will become a
("rescue_lord_by_replace", "Rescue {s13} from {s14}", qf_random_quest,
"{!}None"
),
("deliver_message_to_prisoner_lord", "Deliver Message to {s13} at {s14}", qf_random_quest,
"{!}None"
),
#Courtship quests
("duel_for_lady", "Challenge {s13} to a Trial of Arms", qf_random_quest,
"{!}None"
),
("duel_courtship_rival", "Challenge {s13} to a Trial of Arms (optional)", qf_random_quest,
"{!}None"
),
#Other duel quests
("duel_avenge_insult", "Challenge {s13} to a Trial of Arms", qf_random_quest,
"{!}None"
),
##################
# Mayor quests
##################
# Note : This is defined as the first mayor quest in module_constants.py:
("move_cattle_herd", "Move Cattle Herd to {s13}", qf_random_quest,
"{!}Guildmaster of {s10} asked you to move a cattle herd to {s13}."
),
("escort_merchant_caravan", "Escort Merchant Caravan to {s8}", qf_random_quest, #make this a non-random quest?
"{!}Escort the merchant caravan to the town of {s8}."
),
("deliver_wine", "Deliver {reg5} Units of {s6} to {s4}", qf_random_quest,
"{!}{s9} of {s3} asked you to deliver {reg5} units of {s6} to the tavern in {s4} in 7 days."
),
("troublesome_bandits", "Hunt Down Troublesome Bandits", qf_random_quest,
"{!}{s9} of {s4} asked you to hunt down the troublesome bandits in the vicinity of the town."
),
("kidnapped_girl", "Ransom Girl from Bandits", qf_random_quest,
"{!}Guildmaster of {s4} gave you {reg12} denars to pay the ransom of a girl kidnapped by bandits.\
You are to meet the bandits near {s3} and pay them the ransom fee.\
After that you are to bring the girl back to {s4}."
),
("persuade_lords_to_make_peace", "Make Sure Two Lords Do Not Object to Peace", qf_random_quest, #possibly deprecate., or change effects
"{!}Guildmaster of {s4} promised you {reg12} denars if you can make sure that\
{s12} and {s13} no longer pose a threat to a peace settlement between {s15} and {s14}.\
In order to do that, you must either convince them or make sure they fall captive and remain so until a peace agreement is made."
),
("deal_with_looters", "Deal with Looters", qf_random_quest,
"{!}The Guildmaster of {s4} has asked you to deal with several bands of looters around {s4}, and bring back any goods you recover."
),
("deal_with_night_bandits", "Deal with Night Bandits", qf_random_quest,
"{!}TODO: The Guildmaster of {s14} has asked you to deal with night bandits at {s14}."
),
############
# Village Elder quests
############
# Note : This is defined as the first village elder quest in module_constants.py:
("deliver_grain", "Bring wheat to {s3}", qf_random_quest,
"{!}The elder of the village of {s3} asked you to bring them {reg5} packs of wheat.."
),
("deliver_cattle", "Deliver {reg5} Heads of Cattle to {s3}", qf_random_quest,
"{!}The elder of the village of {s3} asked you to bring {reg5} heads of cattle."
),
("train_peasants_against_bandits", "Train the Peasants of {s13} Against Bandits.", qf_random_quest,
"{!}None"
),
# Deliver horses, Deliver food, Escort_Caravan, Hunt bandits, Ransom Merchant.
## ("capture_nobleman", "Capture Nobleman",qf_random_quest,
## "{s1} wanted you to capture an enemy nobleman on his way from {s3} to {s4}. He said the nobleman would leave {s3} in {reg1} days."
## ),
# Bandit quests: Capture rich merchant, capture banker, kill manhunters?..
# Note : This is defined as the last village elder quest in module_constants.py:
("eliminate_bandits_infesting_village", "Save the Village of {s7} from Marauding Bandits", qf_random_quest,
"{!}A villager from {s7} begged you to save their village from the bandits that took refuge there."
),
# Tutorial quest
## ("destroy_dummies", "Destroy Dummies", qf_show_progression,
## "Trainer ordered you to destroy 10 dummies in the training camp."
## ),
#Courtship and marriage quests begin here
("visit_lady", "Visit Lady", qf_random_quest,
"{!}None"
),
("formal_marriage_proposal", "Formal Marriage Proposal", qf_random_quest,
"{!}None"
), #Make a formal proposal to a bride's father or brother
("obtain_liege_blessing", "Formal Marriage Proposal", qf_random_quest,
"{!}None"
), #The equivalent of the above -- ask permission of a groom's liege. Is currently not used
("wed_betrothed", "Wed Your Betrothed", qf_random_quest,
"{!}None"
), #in this case, the giver troop is the father or guardian of the bride, object troop is the bride
("wed_betrothed_female", "Wed Your Betrothed", qf_random_quest,
"{!}None"
), #in this case, the giver troop is the spouse
# Join Kingdom quest
("join_faction", "Give Oath of Homage to {s1}", qf_random_quest,
"{!}Find {s1} and give him your oath of homage."
),
# Rebel against Kingdom quest
("rebel_against_kingdom", "Help {s13} Claim the Throne of {s14}", qf_random_quest,
"{!}None"
),
#Political quests begin here
("consult_with_minister", "Consult With Minister", qf_random_quest, "{!}Consult your minister, {s11}, currently at {s12}"),
("organize_feast", "Organize Feast", qf_random_quest, "{!}Bring goods for a feast to your spouse {s11}, currently at {s12}"),
("resolve_dispute", "Resolve Dispute", qf_random_quest, "{!}Resolve the dispute between {s11} and {s12}"),
("offer_gift", "Procure Gift", qf_random_quest, "{!}Give {s10} a gift to provide to {reg4?her:his} {s11}, {s12}"),
("denounce_lord", "Denounce Lord", qf_random_quest, "{!}Denounce {s11} in Public"),
("intrigue_against_lord", "Intrigue against Lord", qf_random_quest, "{!}Criticize {s11} in Private"),
#Dynamic quests begin here
#These quests are determined dynamically by external conditions -- bandits who have carried out a raid, an impending war, etc...
("track_down_bandits", "Track Down Bandits", qf_random_quest,
"{!}{s9} of {s4} asked you to track down {s6}, who attacked travellers on the roads near town."
), #this is a fairly simple quest for the early game to make the town guildmaster's description of the economy a little more relevant, and also to give the player a reason to talk to other neutral parties on the map
("track_down_provocateurs", "Track Down Provocateurs", qf_random_quest,
"{!}{s9} of {s4} asked you to track down a group of thugs, hired to create a border incident between {s5} and {s6}."
),
("retaliate_for_border_incident", "Retaliate for a Border Incident", qf_random_quest,
"{!}{s9} of {s4} asked you to defeat {s5} of the {s7} in battle, defusing tension in the {s8} to go to war."
), #perhaps replaces persuade_lords_to_make_peace
("raid_caravan_to_start_war", "Attack a Neutral Caravan to Provoke War", qf_random_quest,
"{!}placeholder",
),
("cause_provocation", "Give a Kingdom Provocation to Attack Another", qf_random_quest,
"{!}placeholder",
), #replaces raid_caravan_to_start_war
("rescue_prisoner", "Rescue or Ransom a Prisoner", qf_random_quest,
"{!}placeholder"
), #possibly replaces rescue lord
("destroy_bandit_lair", "Destroy Bandit Lair", qf_random_quest,
"{!}{s9} of {s4} asked you to discover a {s6} and destroy it."
),
("blank_quest_2", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_3", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_4", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_5", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_6", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_7", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_8", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_9", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_10", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_11", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_12", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_13", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_14", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_15", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_16", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_17", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_18", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_19", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_20", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_21", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_22", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_23", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_24", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_25", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_26", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("blank_quest_27", "{!}blank_quest", qf_random_quest,
"{!}placeholder"
),
("collect_men", "Collect Five Men", 0,
"{!}{s9} asked you to collect at least 5 men before you move against the bandits threatening the townsmen. You can recruit soldiers from villages as well as town taverns. You can find {s9} at the tavern in {s4} when you have think you have enough men."
),
("learn_where_merchant_brother_is", "Learn Where the Hostages are Held.", 0,
"{!}placeholder."
),
("save_relative_of_merchant", "Attack the Bandit Lair", 0,
"{!}placeholder."
),
("save_town_from_bandits", "Save Town from Bandits", 0,
"{!}placeholder."
),
("quests_end", "Quests End", 0, "{!}."),
]
#LWBR WarForge 2.0 --- BEGIN
if not IS_CLIENT:
for g in xrange(len(quests)):
quests[g] = (quests[g][0],"_",0,"_")
#LWBR WarForge 2.0 --- END
|
Ikaguia/LWBR-WarForge
|
module_quests.py
|
Python
|
unlicense
| 16,618
|
[
"VisIt"
] |
604308b2a3231f4defc550600762fef6292c6a7911f154621c2a18281417d7bb
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2011 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
SHOWNOTES_HTML_TEMPLATE = """
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
</head>
<body>
<a href="%s" style="color:black;font-size: big; font-weight: bold;">%s</a>
<br>
<span style="font-size: small;">%s</span>
<hr style="border: 1px #eeeeee solid;">
<p>%s</p>
</body>
</html>
"""
import os
import platform
import gtk
import gtk.gdk
import gobject
import pango
import random
import sys
import shutil
import subprocess
import glob
import time
import tempfile
import collections
import threading
import Queue
import urllib
from xml.sax import saxutils
import gpodder
try:
import dbus
import dbus.service
import dbus.mainloop
import dbus.glib
except ImportError:
# Mock the required D-Bus interfaces with no-ops (ugly? maybe.)
class dbus:
class SessionBus:
def __init__(self, *args, **kwargs):
pass
def add_signal_receiver(self, *args, **kwargs):
pass
class glib:
class DBusGMainLoop:
def __init__(self, *args, **kwargs):
pass
class service:
@staticmethod
def method(*args, **kwargs):
return lambda x: x
class BusName:
def __init__(self, *args, **kwargs):
pass
class Object:
def __init__(self, *args, **kwargs):
pass
from gpodder import feedcore
from gpodder import util
from gpodder import opml
from gpodder import download
from gpodder import my
from gpodder import youtube
from gpodder import player
from gpodder.liblogger import log
_ = gpodder.gettext
N_ = gpodder.ngettext
from gpodder.model import PodcastChannel
from gpodder.model import PodcastEpisode
from gpodder.dbsqlite import Database
from gpodder.gtkui.model import PodcastListModel
from gpodder.gtkui.model import EpisodeListModel
from gpodder.gtkui.config import UIConfig
from gpodder.gtkui.services import CoverDownloader
from gpodder.gtkui.widgets import SimpleMessageArea
from gpodder.gtkui.desktopfile import UserAppsReader
from gpodder.gtkui.draw import draw_text_box_centered
from gpodder.gtkui.interface.common import BuilderWidget
from gpodder.gtkui.interface.common import TreeViewHelper
from gpodder.gtkui.interface.addpodcast import gPodderAddPodcast
if gpodder.ui.desktop:
from gpodder.gtkui.download import DownloadStatusModel
from gpodder.gtkui.desktop.sync import gPodderSyncUI
from gpodder.gtkui.desktop.channel import gPodderChannel
from gpodder.gtkui.desktop.preferences import gPodderPreferences
from gpodder.gtkui.desktop.shownotes import gPodderShownotes
from gpodder.gtkui.desktop.episodeselector import gPodderEpisodeSelector
from gpodder.gtkui.desktop.podcastdirectory import gPodderPodcastDirectory
from gpodder.gtkui.desktop.dependencymanager import gPodderDependencyManager
from gpodder.gtkui.interface.progress import ProgressIndicator
try:
from gpodder.gtkui.desktop.trayicon import GPodderStatusIcon
have_trayicon = True
except Exception, exc:
log('Warning: Could not import gpodder.trayicon.', traceback=True)
log('Warning: This probably means your PyGTK installation is too old!')
have_trayicon = False
elif gpodder.ui.diablo:
from gpodder.gtkui.download import DownloadStatusModel
from gpodder.gtkui.maemo.channel import gPodderChannel
from gpodder.gtkui.maemo.preferences import gPodderPreferences
from gpodder.gtkui.maemo.shownotes import gPodderShownotes
from gpodder.gtkui.maemo.episodeselector import gPodderEpisodeSelector
from gpodder.gtkui.maemo.podcastdirectory import gPodderPodcastDirectory
from gpodder.gtkui.maemo.mygpodder import MygPodderSettings
from gpodder.gtkui.interface.progress import ProgressIndicator
have_trayicon = False
elif gpodder.ui.fremantle:
from gpodder.gtkui.frmntl.model import DownloadStatusModel
from gpodder.gtkui.frmntl.model import EpisodeListModel
from gpodder.gtkui.frmntl.model import PodcastListModel
from gpodder.gtkui.maemo.channel import gPodderChannel
from gpodder.gtkui.frmntl.preferences import gPodderPreferences
from gpodder.gtkui.frmntl.shownotes import gPodderShownotes
from gpodder.gtkui.frmntl.episodeselector import gPodderEpisodeSelector
from gpodder.gtkui.frmntl.podcastdirectory import gPodderPodcastDirectory
from gpodder.gtkui.frmntl.episodes import gPodderEpisodes
from gpodder.gtkui.frmntl.downloads import gPodderDownloads
from gpodder.gtkui.frmntl.progress import ProgressIndicator
from gpodder.gtkui.frmntl.widgets import FancyProgressBar
have_trayicon = False
from gpodder.gtkui.frmntl.portrait import FremantleRotation
from gpodder.gtkui.frmntl.mafw import MafwPlaybackMonitor
from gpodder.gtkui.frmntl.hints import HINT_STRINGS
from gpodder.gtkui.frmntl.network import NetworkManager
from gpodder.gtkui.interface.common import Orientation
from gpodder.gtkui.interface.welcome import gPodderWelcome
if gpodder.ui.maemo:
import hildon
from gpodder.dbusproxy import DBusPodcastsProxy
from gpodder import hooks
class gPodder(BuilderWidget, dbus.service.Object):
finger_friendly_widgets = ['btnCleanUpDownloads', 'button_search_episodes_clear', 'label2', 'labelDownloads', 'btnUpdateFeeds']
ICON_GENERAL_ADD = 'general_add'
ICON_GENERAL_REFRESH = 'general_refresh'
# Delay until live search is started after typing stop
LIVE_SEARCH_DELAY = 200
def __init__(self, bus_name, config):
dbus.service.Object.__init__(self, object_path=gpodder.dbus_gui_object_path, bus_name=bus_name)
self.podcasts_proxy = DBusPodcastsProxy(lambda: self.channels, \
self.on_itemUpdate_activate, \
self.playback_episodes, \
self.download_episode_list, \
self.episode_object_by_uri, \
bus_name)
self.db = Database(gpodder.database_file)
self.config = config
BuilderWidget.__init__(self, None)
def new(self):
if gpodder.ui.diablo:
import hildon
self.app = hildon.Program()
self.app.add_window(self.main_window)
self.main_window.add_toolbar(self.toolbar)
menu = gtk.Menu()
for child in self.main_menu.get_children():
child.reparent(menu)
self.main_window.set_menu(self.set_finger_friendly(menu))
self._last_orientation = Orientation.LANDSCAPE
elif gpodder.ui.fremantle:
import hildon
self.app = hildon.Program()
self.app.add_window(self.main_window)
appmenu = hildon.AppMenu()
for filter in (self.item_view_podcasts_all, \
self.item_view_podcasts_downloaded, \
self.item_view_podcasts_unplayed):
button = gtk.ToggleButton()
filter.connect_proxy(button)
appmenu.add_filter(button)
for action in (self.itemPreferences, \
self.item_downloads, \
self.itemRemoveOldEpisodes, \
self.item_unsubscribe, \
self.itemAbout):
button = hildon.Button(gtk.HILDON_SIZE_AUTO,\
hildon.BUTTON_ARRANGEMENT_HORIZONTAL)
action.connect_proxy(button)
if action == self.item_downloads:
button.set_title(_('Downloads'))
button.set_value(_('Idle'))
self.button_downloads = button
appmenu.append(button)
def show_hint(button):
self.show_message(random.choice(HINT_STRINGS), important=True)
button = hildon.Button(gtk.HILDON_SIZE_AUTO,\
hildon.BUTTON_ARRANGEMENT_HORIZONTAL)
button.set_title(_('Hint of the day'))
button.connect('clicked', show_hint)
appmenu.append(button)
appmenu.show_all()
self.main_window.set_app_menu(appmenu)
# Initialize portrait mode / rotation manager
self._fremantle_rotation = FremantleRotation('gPodder', \
self.main_window, \
gpodder.__version__, \
self.config.rotation_mode)
# Initialize the Fremantle network manager
self.network_manager = NetworkManager()
if self.config.rotation_mode == FremantleRotation.ALWAYS:
util.idle_add(self.on_window_orientation_changed, \
Orientation.PORTRAIT)
self._last_orientation = Orientation.PORTRAIT
else:
self._last_orientation = Orientation.LANDSCAPE
# Flag set when a notification is being shown (Maemo bug 11235)
self._fremantle_notification_visible = False
else:
self._last_orientation = Orientation.LANDSCAPE
self.toolbar.set_property('visible', self.config.show_toolbar)
self.bluetooth_available = util.bluetooth_available()
self.config.connect_gtk_window(self.gPodder, 'main_window')
if not gpodder.ui.fremantle:
self.config.connect_gtk_paned('paned_position', self.channelPaned)
self.main_window.show()
self.player_receiver = player.MediaPlayerDBusReceiver(self.on_played)
if gpodder.ui.fremantle:
# Create a D-Bus monitoring object that takes care of
# tracking MAFW (Nokia Media Player) playback events
# and sends episode playback status events via D-Bus
self.mafw_monitor = MafwPlaybackMonitor(gpodder.dbus_session_bus)
self.gPodder.connect('key-press-event', self.on_key_press)
self.preferences_dialog = None
self.config.add_observer(self.on_config_changed)
self.tray_icon = None
self.episode_shownotes_window = None
self.new_episodes_window = None
if gpodder.ui.desktop:
# Mac OS X-specific UI tweaks: Native main menu integration
# http://sourceforge.net/apps/trac/gtk-osx/wiki/Integrate
if getattr(gtk.gdk, 'WINDOWING', 'x11') == 'quartz':
try:
import igemacintegration as igemi
# Move the menu bar from the window to the Mac menu bar
self.mainMenu.hide()
igemi.ige_mac_menu_set_menu_bar(self.mainMenu)
# Reparent some items to the "Application" menu
for widget in ('/mainMenu/menuHelp/itemAbout', \
'/mainMenu/menuPodcasts/itemPreferences'):
item = self.uimanager1.get_widget(widget)
group = igemi.ige_mac_menu_add_app_menu_group()
igemi.ige_mac_menu_add_app_menu_item(group, item, None)
quit_widget = '/mainMenu/menuPodcasts/itemQuit'
quit_item = self.uimanager1.get_widget(quit_widget)
igemi.ige_mac_menu_set_quit_menu_item(quit_item)
except ImportError:
print >>sys.stderr, """
Warning: ige-mac-integration not found - no native menus.
"""
self.sync_ui = gPodderSyncUI(self.config, self.notification, \
self.main_window, self.show_confirmation, \
self.update_episode_list_icons, \
self.update_podcast_list_model, self.toolPreferences, \
gPodderEpisodeSelector, \
self.commit_changes_to_database)
else:
self.sync_ui = None
self.download_status_model = DownloadStatusModel()
self.download_queue_manager = download.DownloadQueueManager(self.config)
if gpodder.ui.desktop:
self.show_hide_tray_icon()
self.itemShowAllEpisodes.set_active(self.config.podcast_list_view_all)
self.itemShowNewEpisodes.set_active(self.config.podcast_list_view_new)
self.itemShowToolbar.set_active(self.config.show_toolbar)
self.itemShowDescription.set_active(self.config.episode_list_descriptions)
if not gpodder.ui.fremantle:
self.config.connect_gtk_spinbutton('max_downloads', self.spinMaxDownloads)
self.config.connect_gtk_togglebutton('max_downloads_enabled', self.cbMaxDownloads)
self.config.connect_gtk_spinbutton('limit_rate_value', self.spinLimitDownloads)
self.config.connect_gtk_togglebutton('limit_rate', self.cbLimitDownloads)
# When the amount of maximum downloads changes, notify the queue manager
changed_cb = lambda spinbutton: self.download_queue_manager.spawn_threads()
self.spinMaxDownloads.connect('value-changed', changed_cb)
self.default_title = 'gPodder'
if gpodder.__version__.rfind('git') != -1:
self.set_title('gPodder %s' % gpodder.__version__)
else:
title = self.gPodder.get_title()
if title is not None:
self.set_title(title)
else:
self.set_title(_('gPodder'))
self.cover_downloader = CoverDownloader()
# Generate list models for podcasts and their episodes
self.podcast_list_model = PodcastListModel(self.cover_downloader)
self.cover_downloader.register('cover-available', self.cover_download_finished)
self.cover_downloader.register('cover-removed', self.cover_file_removed)
if gpodder.ui.fremantle:
# Work around Maemo bug #4718
self.button_refresh.set_name('HildonButton-finger')
self.button_subscribe.set_name('HildonButton-finger')
self.button_refresh.set_sensitive(False)
self.button_subscribe.set_sensitive(False)
self.button_subscribe.set_image(gtk.image_new_from_icon_name(\
self.ICON_GENERAL_ADD, gtk.ICON_SIZE_BUTTON))
self.button_refresh.set_image(gtk.image_new_from_icon_name(\
self.ICON_GENERAL_REFRESH, gtk.ICON_SIZE_BUTTON))
# Make the button scroll together with the TreeView contents
action_area_box = self.treeChannels.get_action_area_box()
for child in self.buttonbox:
child.reparent(action_area_box)
self.vbox.remove(self.buttonbox)
self.treeChannels.set_action_area_visible(True)
# Set up a very nice progress bar setup
self.fancy_progress_bar = FancyProgressBar(self.main_window, \
self.on_btnCancelFeedUpdate_clicked)
self.pbFeedUpdate = self.fancy_progress_bar.progress_bar
self.pbFeedUpdate.set_ellipsize(pango.ELLIPSIZE_MIDDLE)
self.vbox.pack_start(self.fancy_progress_bar.event_box, False)
from gpodder.gtkui.frmntl import style
sub_font = style.get_font_desc('SmallSystemFont')
sub_color = style.get_color('SecondaryTextColor')
sub = (sub_font.to_string(), sub_color.to_string())
sub = '<span font_desc="%s" foreground="%s">%%s</span>' % sub
self.label_footer.set_markup(sub % gpodder.__copyright__)
hildon.hildon_gtk_window_set_progress_indicator(self.main_window, True)
while gtk.events_pending():
gtk.main_iteration(False)
try:
# Try to get the real package version from dpkg
p = subprocess.Popen(['dpkg-query', '-W', '-f=${Version}', 'gpodder'], stdout=subprocess.PIPE)
version, _stderr = p.communicate()
del _stderr
del p
except:
version = gpodder.__version__
self.label_footer.set_markup(sub % ('v %s' % version))
self.label_footer.hide()
self.episodes_window = gPodderEpisodes(self.main_window, \
on_treeview_expose_event=self.on_treeview_expose_event, \
show_episode_shownotes=self.show_episode_shownotes, \
update_podcast_list_model=self.update_podcast_list_model, \
on_itemRemoveChannel_activate=self.on_itemRemoveChannel_activate, \
item_view_episodes_all=self.item_view_episodes_all, \
item_view_episodes_unplayed=self.item_view_episodes_unplayed, \
item_view_episodes_downloaded=self.item_view_episodes_downloaded, \
item_view_episodes_undeleted=self.item_view_episodes_undeleted, \
on_entry_search_episodes_changed=self.on_entry_search_episodes_changed, \
on_entry_search_episodes_key_press=self.on_entry_search_episodes_key_press, \
hide_episode_search=self.hide_episode_search, \
on_itemUpdateChannel_activate=self.on_itemUpdateChannel_activate, \
playback_episodes=self.playback_episodes, \
delete_episode_list=self.delete_episode_list, \
episode_list_status_changed=self.episode_list_status_changed, \
download_episode_list=self.download_episode_list, \
episode_is_downloading=self.episode_is_downloading, \
show_episode_in_download_manager=self.show_episode_in_download_manager, \
add_download_task_monitor=self.add_download_task_monitor, \
remove_download_task_monitor=self.remove_download_task_monitor, \
for_each_episode_set_task_status=self.for_each_episode_set_task_status, \
on_itemUpdate_activate=self.on_itemUpdate_activate, \
show_delete_episodes_window=self.show_delete_episodes_window, \
cover_downloader=self.cover_downloader)
# Expose objects for episode list type-ahead find
self.hbox_search_episodes = self.episodes_window.hbox_search_episodes
self.entry_search_episodes = self.episodes_window.entry_search_episodes
self.button_search_episodes_clear = self.episodes_window.button_search_episodes_clear
self.downloads_window = gPodderDownloads(self.main_window, \
on_treeview_expose_event=self.on_treeview_expose_event, \
cleanup_downloads=self.cleanup_downloads, \
_for_each_task_set_status=self._for_each_task_set_status, \
downloads_list_get_selection=self.downloads_list_get_selection, \
_config=self.config)
self.treeAvailable = self.episodes_window.treeview
self.treeDownloads = self.downloads_window.treeview
# Source IDs for timeouts for search-as-you-type
self._podcast_list_search_timeout = None
self._episode_list_search_timeout = None
# Init the treeviews that we use
self.init_podcast_list_treeview()
self.init_episode_list_treeview()
self.init_download_list_treeview()
if self.config.podcast_list_hide_boring:
self.item_view_hide_boring_podcasts.set_active(True)
self.currently_updating = False
if gpodder.ui.maemo or self.config.enable_fingerscroll:
self.context_menu_mouse_button = 1
else:
self.context_menu_mouse_button = 3
if self.config.start_iconified:
self.iconify_main_window()
self.download_tasks_seen = set()
self.download_list_update_enabled = False
self.download_task_monitors = set()
# Subscribed channels
self.active_channel = None
self.channels = PodcastChannel.load_from_db(self.db, self.config.download_dir)
self.channel_list_changed = True
self.update_podcasts_tab()
# load list of user applications for audio playback
self.user_apps_reader = UserAppsReader(['audio', 'video'])
threading.Thread(target=self.user_apps_reader.read).start()
# Set the "Device" menu item for the first time
if gpodder.ui.desktop:
self.update_item_device()
# Set up the first instance of MygPoClient
self.mygpo_client = my.MygPoClient(self.config)
# Now, update the feed cache, when everything's in place
if not gpodder.ui.fremantle:
self.btnUpdateFeeds.show()
self.updating_feed_cache = False
self.feed_cache_update_cancelled = False
# Always load the podcast list, even when updating later (bug 1337)
self.update_feed_cache(force_update=False)
if self.config.update_on_startup:
self.update_feed_cache(force_update=True)
self.message_area = None
def find_partial_downloads():
# Look for partial file downloads
partial_files = glob.glob(os.path.join(self.config.download_dir, '*', '*.partial'))
count = len(partial_files)
resumable_episodes = []
if count:
if not gpodder.ui.fremantle:
util.idle_add(self.wNotebook.set_current_page, 1)
indicator = ProgressIndicator(_('Loading incomplete downloads'), \
_('Some episodes have not finished downloading in a previous session.'), \
False, self.get_dialog_parent())
indicator.on_message(N_('%(count)d partial file', '%(count)d partial files', count) % {'count':count})
candidates = [f[:-len('.partial')] for f in partial_files]
found = 0
for c in self.channels:
for e in c.get_all_episodes():
filename = e.local_filename(create=False, check_only=True)
if filename in candidates:
log('Found episode: %s', e.title, sender=self)
found += 1
indicator.on_message(e.title)
indicator.on_progress(float(found)/count)
candidates.remove(filename)
partial_files.remove(filename+'.partial')
if os.path.exists(filename):
# The file has already been downloaded;
# remove the leftover partial file
util.delete_file(filename+'.partial')
else:
resumable_episodes.append(e)
if not candidates:
break
if not candidates:
break
for f in partial_files:
log('Partial file without episode: %s', f, sender=self)
util.delete_file(f)
util.idle_add(indicator.on_finished)
if len(resumable_episodes):
def offer_resuming():
self.download_episode_list_paused(resumable_episodes)
if not gpodder.ui.fremantle:
resume_all = gtk.Button(_('Resume all'))
#resume_all.set_border_width(0)
def on_resume_all(button):
selection = self.treeDownloads.get_selection()
selection.select_all()
selected_tasks, can_queue, can_cancel, can_pause, can_remove, can_force = self.downloads_list_get_selection()
selection.unselect_all()
self._for_each_task_set_status(selected_tasks, download.DownloadTask.QUEUED)
self.message_area.hide()
resume_all.connect('clicked', on_resume_all)
self.message_area = SimpleMessageArea(_('Incomplete downloads from a previous session were found.'), (resume_all,))
self.vboxDownloadStatusWidgets.pack_start(self.message_area, expand=False)
self.vboxDownloadStatusWidgets.reorder_child(self.message_area, 0)
self.message_area.show_all()
self.clean_up_downloads(delete_partial=False)
util.idle_add(offer_resuming)
elif not gpodder.ui.fremantle:
util.idle_add(self.wNotebook.set_current_page, 0)
else:
util.idle_add(self.clean_up_downloads, True)
threading.Thread(target=find_partial_downloads).start()
# Start the auto-update procedure
self._auto_update_timer_source_id = None
if self.config.auto_update_feeds:
self.restart_auto_update_timer()
# Delete old episodes if the user wishes to
if self.config.auto_remove_played_episodes and \
self.config.episode_old_age > 0:
old_episodes = list(self.get_expired_episodes())
if len(old_episodes) > 0:
self.delete_episode_list(old_episodes, confirm=False)
self.update_podcast_list_model(set(e.channel.url for e in old_episodes))
if gpodder.ui.fremantle:
hildon.hildon_gtk_window_set_progress_indicator(self.main_window, False)
self.button_refresh.set_sensitive(True)
self.button_subscribe.set_sensitive(True)
self.main_window.set_title(_('gPodder'))
hildon.hildon_gtk_window_take_screenshot(self.main_window, True)
# Do the initial sync with the web service
util.idle_add(self.mygpo_client.flush, True)
# First-time users should be asked if they want to see the OPML
if not self.channels and not gpodder.ui.fremantle:
util.idle_add(self.on_itemUpdate_activate)
# initialise the html notes
self._read_timer_source_id = None
# initialise text view
self.textview.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse('#ffffff'))
self.hovering_over_link = False
self.hand_cursor = gtk.gdk.Cursor(gtk.gdk.HAND2)
self.regular_cursor = gtk.gdk.Cursor(gtk.gdk.XTERM)
if self.config.enable_html_shownotes:
try:
import webkit
webview_signals = gobject.signal_list_names(webkit.WebView)
if 'navigation-policy-decision-requested' in webview_signals:
setattr(self, 'have_webkit', True)
setattr(self, 'htmlview', webkit.WebView())
else:
log('Your WebKit is too old (see bug 1001).', sender=self)
setattr(self, 'have_webkit', False)
def navigation_policy_decision(wv, fr, req, action, decision):
REASON_LINK_CLICKED, REASON_OTHER = 0, 5
if action.get_reason() == REASON_LINK_CLICKED:
util.open_website(req.get_uri())
decision.ignore()
elif action.get_reason() == REASON_OTHER:
decision.use()
else:
decision.ignore()
self.htmlview.connect('navigation-policy-decision-requested', \
navigation_policy_decision)
self.scrolled_window.remove(self.scrolled_window.get_child())
self.scrolled_window.add(self.htmlview)
self.textview = None
self.htmlview.load_html_string('', '')
self.htmlview.show()
except ImportError:
setattr(self, 'have_webkit', False)
else:
setattr(self, 'have_webkit', False)
# Links can be activated by pressing Enter.
def on_textview_key_press_event(self, text_view, event):
if (event.keyval == gtk.keysyms.Return or
event.keyval == gtk.keysyms.KP_Enter):
buffer = text_view.get_buffer()
iter = buffer.get_iter_at_mark(buffer.get_insert())
self.textview_follow_if_link(iter)
return False
# Links can also be activated by clicking.
def on_textview_event_after(self, text_view, event):
if event.type != gtk.gdk.BUTTON_RELEASE:
return False
if event.button != 1:
return False
buffer = text_view.get_buffer()
# we shouldn't follow a link if the user has selected something
try:
start, end = buffer.get_selection_bounds()
except ValueError:
# If there is nothing selected, None is return
pass
else:
if start.get_offset() != end.get_offset():
return False
x, y = text_view.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET,
int(event.x), int(event.y))
iter = text_view.get_iter_at_location(x, y)
self.textview_follow_if_link(iter)
return False
# Looks at all tags covering the position (x, y) in the text view,
# and if one of them is a link, change the cursor to the "hands" cursor
# typically used by web browsers.
def textview_set_cursor_if_appropriate(self, text_view, x, y):
hovering = False
buffer = text_view.get_buffer()
iter = text_view.get_iter_at_location(x, y)
tags = iter.get_tags()
for tag in tags:
page = tag.get_data("page")
if page is not None:
hovering = True
break
if hovering != self.hovering_over_link:
self.hovering_over_link = hovering
if self.hovering_over_link:
text_view.get_window(gtk.TEXT_WINDOW_TEXT).set_cursor(self.hand_cursor)
else:
text_view.get_window(gtk.TEXT_WINDOW_TEXT).set_cursor(self.regular_cursor)
# Update the cursor image if the pointer moved.
def on_textview_motion_notify_event(self, text_view, event):
x, y = text_view.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET,
int(event.x), int(event.y))
self.textview_set_cursor_if_appropriate(text_view, x, y)
text_view.window.get_pointer()
return False
# Also update the cursor image if the window becomes visible
# (e.g. when a window covering it got iconified).
def on_textview_visibility_notify_event(self, text_view, event):
wx, wy, mod = text_view.window.get_pointer()
bx, by = text_view.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET, wx, wy)
self.textview_set_cursor_if_appropriate(text_view, bx, by)
return False
def textview_insert_link(self, buffer, iter, text, page):
''' Inserts a piece of text into the buffer, giving it the usual
appearance of a hyperlink in a web browser: blue and underlined.
Additionally, attaches some data on the tag, to make it recognizable
as a link.
'''
tag = buffer.create_tag(None,
foreground="blue", underline=pango.UNDERLINE_SINGLE)
tag.set_data("page", page)
buffer.insert_with_tags(iter, text, tag)
def textview_follow_if_link(self, iter):
''' Looks at all tags covering the position of iter in the text view,
and if one of them is a link, follow it by showing the page identified
by the data attached to it.
'''
tags = iter.get_tags()
for tag in tags:
page = tag.get_data("page")
if page is not None:
util.open_website(page)
break
def display_embedded_notes(self,episode):
self.pre_display_notes()
# breaks selection...
#while gtk.events_pending():
# gtk.main_iteration(False)
# Load the shownotes into the UI
self.clear_embedded_notes()
self.load_embedded_notes(episode)
def pre_display_notes(self):
if self.have_webkit:
self.htmlview.load_html_string('<html><head></head><body><em>%s</em></body></html>' % _('Loading shownotes...'), '')
else:
self.b = gtk.TextBuffer()
self.textview.set_buffer(self.b)
def clear_embedded_notes(self):
if self.have_webkit:
self.htmlview.load_html_string('', '')
else:
self.textview.get_buffer().set_text('')
self.restart_read_timer(None)
def load_embedded_notes(self,episode):
if episode is None:
return
# Now do the stuff that takes a bit longer...
heading = episode.title
subheading = _('from %s') % (episode.channel.title)
description = episode.description
if self.have_webkit:
global SHOWNOTES_HTML_TEMPLATE
# Get the description - if it looks like plaintext, replace the
# newline characters with line breaks for the HTML view
description = episode.description
if '<' not in description:
description = description.replace('\n', '<br>')
args = (
episode.link,
saxutils.escape(heading),
saxutils.escape(subheading),
description
)
url = os.path.dirname(episode.channel.url)
self.htmlview.load_html_string(SHOWNOTES_HTML_TEMPLATE % args, url)
else:
tag = self.b.create_tag('heading', scale=pango.SCALE_LARGE, \
weight=pango.WEIGHT_BOLD, underline=pango.UNDERLINE_SINGLE)
tag.set_data('page',episode.link)
self.b.create_tag('subheading', scale=pango.SCALE_SMALL)
self.b.insert_with_tags_by_name(self.b.get_end_iter(), heading, 'heading')
self.b.insert_at_cursor('\n')
self.b.insert_with_tags_by_name(self.b.get_end_iter(), subheading, 'subheading')
self.b.insert_at_cursor('\n\n')
self.b.insert(self.b.get_end_iter(), util.remove_html_tags(description))
self.b.place_cursor(self.b.get_start_iter())
self.restart_read_timer(episode)
def on_visit_website_button_clicked(self, widget=None):
if self.episode and self.episode.link:
util.open_website(self.episode.link)
def episode_object_by_uri(self, uri):
"""Get an episode object given a local or remote URI
This can be used to quickly access an episode object
when all we have is its download filename or episode
URL (e.g. from external D-Bus calls / signals, etc..)
"""
if uri.startswith('/'):
uri = 'file://' + urllib.quote(uri)
prefix = 'file://' + urllib.quote(self.config.download_dir)
if uri.startswith(prefix):
# File is on the local filesystem in the download folder
filename = urllib.unquote(uri[len(prefix):])
file_parts = [x for x in filename.split(os.sep) if x]
if len(file_parts) == 2:
dir_name, filename = file_parts
channels = [c for c in self.channels if c.foldername == dir_name]
if len(channels) == 1:
channel = channels[0]
return channel.get_episode_by_filename(filename)
else:
# Possibly remote file - search the database for a podcast
channel_id = self.db.get_channel_id_from_episode_url(uri)
if channel_id is not None:
channels = [c for c in self.channels if c.id == channel_id]
if len(channels) == 1:
channel = channels[0]
return channel.get_episode_by_url(uri)
return None
def on_played(self, start, end, total, file_uri):
"""Handle the "played" signal from a media player"""
if start == 0 and end == 0 and total == 0:
# Ignore bogus play event
return
elif end < start + 5:
# Ignore "less than five seconds" segments,
# as they can happen with seeking, etc...
return
log('Received play action: %s (%d, %d, %d)', file_uri, start, end, total, sender=self)
episode = self.episode_object_by_uri(file_uri)
if episode is not None:
file_type = episode.file_type()
now = time.time()
if total > 0:
episode.total_time = total
elif total == 0:
# Assume the episode's total time for the action
total = episode.total_time
assert episode.current_position_updated is None or \
now >= episode.current_position_updated
episode.current_position = end
episode.current_position_updated = now
episode.mark(is_played=True)
episode.save()
self.db.commit()
self.update_episode_list_icons([episode.url])
self.update_podcast_list_model([episode.channel.url])
# Submit this action to the webservice
self.mygpo_client.on_playback_full(episode, \
start, end, total)
def on_add_remove_podcasts_mygpo(self):
actions = self.mygpo_client.get_received_actions()
if not actions:
return False
existing_urls = [c.url for c in self.channels]
# Columns for the episode selector window - just one...
columns = (
('description', None, None, _('Action')),
)
# A list of actions that have to be chosen from
changes = []
# Actions that are ignored (already carried out)
ignored = []
for action in actions:
if action.is_add and action.url not in existing_urls:
changes.append(my.Change(action))
elif action.is_remove and action.url in existing_urls:
podcast_object = None
for podcast in self.channels:
if podcast.url == action.url:
podcast_object = podcast
break
changes.append(my.Change(action, podcast_object))
else:
log('Ignoring action: %s', action, sender=self)
ignored.append(action)
# Confirm all ignored changes
self.mygpo_client.confirm_received_actions(ignored)
def execute_podcast_actions(selected):
add_list = [c.action.url for c in selected if c.action.is_add]
remove_list = [c.podcast for c in selected if c.action.is_remove]
# Apply the accepted changes locally
self.add_podcast_list(add_list)
self.remove_podcast_list(remove_list, confirm=False)
# All selected items are now confirmed
self.mygpo_client.confirm_received_actions(c.action for c in selected)
# Revert the changes on the server
rejected = [c.action for c in changes if c not in selected]
self.mygpo_client.reject_received_actions(rejected)
def ask():
# We're abusing the Episode Selector again ;) -- thp
gPodderEpisodeSelector(self.main_window, \
title=_('Confirm changes from gpodder.net'), \
instructions=_('Select the actions you want to carry out.'), \
episodes=changes, \
columns=columns, \
size_attribute=None, \
stock_ok_button=gtk.STOCK_APPLY, \
callback=execute_podcast_actions, \
_config=self.config)
# There are some actions that need the user's attention
if changes:
util.idle_add(ask)
return True
# We have no remaining actions - no selection happens
return False
def rewrite_urls_mygpo(self):
# Check if we have to rewrite URLs since the last add
rewritten_urls = self.mygpo_client.get_rewritten_urls()
for rewritten_url in rewritten_urls:
if not rewritten_url.new_url:
continue
for channel in self.channels:
if channel.url == rewritten_url.old_url:
log('Updating URL of %s to %s', channel, \
rewritten_url.new_url, sender=self)
channel.url = rewritten_url.new_url
channel.save()
self.channel_list_changed = True
util.idle_add(self.update_episode_list_model)
break
def on_send_full_subscriptions(self):
# Send the full subscription list to the gpodder.net client
# (this will overwrite the subscription list on the server)
indicator = ProgressIndicator(_('Uploading subscriptions'), \
_('Your subscriptions are being uploaded to the server.'), \
False, self.get_dialog_parent())
try:
self.mygpo_client.set_subscriptions([c.url for c in self.channels])
util.idle_add(self.show_message, _('List uploaded successfully.'))
except Exception, e:
def show_error(e):
message = str(e)
if not message:
message = e.__class__.__name__
self.show_message(message, \
_('Error while uploading'), \
important=True)
util.idle_add(show_error, e)
util.idle_add(indicator.on_finished)
def on_podcast_selected(self, treeview, path, column):
# for Maemo 5's UI
model = treeview.get_model()
channel = model.get_value(model.get_iter(path), \
PodcastListModel.C_CHANNEL)
self.active_channel = channel
self.update_episode_list_model()
self.episodes_window.channel = self.active_channel
self.episodes_window.show()
def on_button_subscribe_clicked(self, button):
self.on_itemImportChannels_activate(button)
def on_button_downloads_clicked(self, widget):
self.downloads_window.show()
def show_episode_in_download_manager(self, episode):
self.downloads_window.show()
model = self.treeDownloads.get_model()
selection = self.treeDownloads.get_selection()
selection.unselect_all()
it = model.get_iter_first()
while it is not None:
task = model.get_value(it, DownloadStatusModel.C_TASK)
if task.episode.url == episode.url:
selection.select_iter(it)
# FIXME: Scroll to selection in pannable area
break
it = model.iter_next(it)
def for_each_episode_set_task_status(self, episodes, status):
episode_urls = set(episode.url for episode in episodes)
model = self.treeDownloads.get_model()
selected_tasks = [(gtk.TreeRowReference(model, row.path), \
model.get_value(row.iter, \
DownloadStatusModel.C_TASK)) for row in model \
if model.get_value(row.iter, DownloadStatusModel.C_TASK).url \
in episode_urls]
self._for_each_task_set_status(selected_tasks, status)
def on_window_orientation_changed(self, orientation):
self._last_orientation = orientation
if self.preferences_dialog is not None:
self.preferences_dialog.on_window_orientation_changed(orientation)
treeview = self.treeChannels
if orientation == Orientation.PORTRAIT:
treeview.set_action_area_orientation(gtk.ORIENTATION_VERTICAL)
# Work around Maemo bug #4718
self.button_subscribe.set_name('HildonButton-thumb')
self.button_refresh.set_name('HildonButton-thumb')
else:
treeview.set_action_area_orientation(gtk.ORIENTATION_HORIZONTAL)
# Work around Maemo bug #4718
self.button_subscribe.set_name('HildonButton-finger')
self.button_refresh.set_name('HildonButton-finger')
if gpodder.ui.fremantle:
self.fancy_progress_bar.relayout()
def on_treeview_podcasts_selection_changed(self, selection):
model, iter = selection.get_selected()
if iter is None:
self.active_channel = None
self.episode_list_model.clear()
def on_treeview_button_pressed(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
TreeViewHelper.save_button_press_event(treeview, event)
if getattr(treeview, TreeViewHelper.ROLE) == \
TreeViewHelper.ROLE_PODCASTS:
return self.currently_updating
return event.button == self.context_menu_mouse_button and \
gpodder.ui.desktop
def on_treeview_podcasts_button_released(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
if gpodder.ui.maemo:
return self.treeview_channels_handle_gestures(treeview, event)
return self.treeview_channels_show_context_menu(treeview, event)
def on_treeview_episodes_button_released(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
if self.config.enable_fingerscroll or self.config.maemo_enable_gestures:
return self.treeview_available_handle_gestures(treeview, event)
return self.treeview_available_show_context_menu(treeview, event)
def on_treeview_downloads_button_released(self, treeview, event):
if event.window != treeview.get_bin_window():
return False
return self.treeview_downloads_show_context_menu(treeview, event)
def on_entry_search_podcasts_changed(self, editable):
if self.hbox_search_podcasts.get_property('visible'):
def set_search_term(self, text):
self.podcast_list_model.set_search_term(text)
self._podcast_list_search_timeout = None
return False
if self._podcast_list_search_timeout is not None:
gobject.source_remove(self._podcast_list_search_timeout)
self._podcast_list_search_timeout = gobject.timeout_add(\
self.LIVE_SEARCH_DELAY, \
set_search_term, self, editable.get_chars(0, -1))
def on_entry_search_podcasts_key_press(self, editable, event):
if event.keyval == gtk.keysyms.Escape:
self.hide_podcast_search()
return True
def hide_podcast_search(self, *args):
if self._podcast_list_search_timeout is not None:
gobject.source_remove(self._podcast_list_search_timeout)
self._podcast_list_search_timeout = None
self.hbox_search_podcasts.hide()
self.entry_search_podcasts.set_text('')
self.podcast_list_model.set_search_term(None)
self.treeChannels.grab_focus()
def show_podcast_search(self, input_char):
self.hbox_search_podcasts.show()
self.entry_search_podcasts.insert_text(input_char, -1)
self.entry_search_podcasts.grab_focus()
self.entry_search_podcasts.set_position(-1)
def init_podcast_list_treeview(self):
# Set up podcast channel tree view widget
if gpodder.ui.fremantle:
if self.config.podcast_list_view_mode == EpisodeListModel.VIEW_DOWNLOADED:
self.item_view_podcasts_downloaded.set_active(True)
elif self.config.podcast_list_view_mode == EpisodeListModel.VIEW_UNPLAYED:
self.item_view_podcasts_unplayed.set_active(True)
else:
self.item_view_podcasts_all.set_active(True)
self.podcast_list_model.set_view_mode(self.config.podcast_list_view_mode)
iconcolumn = gtk.TreeViewColumn('')
iconcell = gtk.CellRendererPixbuf()
iconcolumn.pack_start(iconcell, False)
iconcolumn.add_attribute(iconcell, 'pixbuf', PodcastListModel.C_COVER)
self.treeChannels.append_column(iconcolumn)
namecolumn = gtk.TreeViewColumn('')
namecell = gtk.CellRendererText()
namecell.set_property('ellipsize', pango.ELLIPSIZE_END)
namecolumn.pack_start(namecell, True)
namecolumn.add_attribute(namecell, 'markup', PodcastListModel.C_DESCRIPTION)
if gpodder.ui.fremantle:
countcell = gtk.CellRendererText()
from gpodder.gtkui.frmntl import style
countcell.set_property('font-desc', style.get_font_desc('EmpSystemFont'))
countcell.set_property('foreground-gdk', style.get_color('SecondaryTextColor'))
countcell.set_property('alignment', pango.ALIGN_RIGHT)
countcell.set_property('xalign', 1.)
countcell.set_property('xpad', 5)
namecolumn.pack_start(countcell, False)
namecolumn.add_attribute(countcell, 'text', PodcastListModel.C_DOWNLOADS)
namecolumn.add_attribute(countcell, 'visible', PodcastListModel.C_DOWNLOADS)
else:
iconcell = gtk.CellRendererPixbuf()
iconcell.set_property('xalign', 1.0)
namecolumn.pack_start(iconcell, False)
namecolumn.add_attribute(iconcell, 'pixbuf', PodcastListModel.C_PILL)
namecolumn.add_attribute(iconcell, 'visible', PodcastListModel.C_PILL_VISIBLE)
self.treeChannels.append_column(namecolumn)
self.treeChannels.set_model(self.podcast_list_model.get_filtered_model())
iter = self.treeChannels.get_model().get_iter_root()
if iter != None:
iter = self.treeChannels.get_model().iter_next(iter)
if iter != None:
self.treeChannels.get_selection().select_iter(iter)
else:
print "None 2nd line"
else:
print "None 1st line"
# When no podcast is selected, clear the episode list model
selection = self.treeChannels.get_selection()
selection.connect('changed', self.on_treeview_podcasts_selection_changed)
# Set up type-ahead find for the podcast list
def on_key_press(treeview, event):
if event.keyval == gtk.keysyms.Escape:
self.hide_podcast_search()
elif gpodder.ui.fremantle and event.keyval == gtk.keysyms.BackSpace:
self.hide_podcast_search()
elif event.state & gtk.gdk.CONTROL_MASK:
# Don't handle type-ahead when control is pressed (so shortcuts
# with the Ctrl key still work, e.g. Ctrl+A, ...)
return True
else:
unicode_char_id = gtk.gdk.keyval_to_unicode(event.keyval)
if unicode_char_id == 0:
return False
input_char = unichr(unicode_char_id)
self.show_podcast_search(input_char)
return True
self.treeChannels.connect('key-press-event', on_key_press)
# Enable separators to the podcast list to separate special podcasts
# from others (this is used for the "all episodes" view)
self.treeChannels.set_row_separator_func(PodcastListModel.row_separator_func)
TreeViewHelper.set(self.treeChannels, TreeViewHelper.ROLE_PODCASTS)
def on_entry_search_episodes_changed(self, editable):
if self.hbox_search_episodes.get_property('visible'):
def set_search_term(self, text):
self.episode_list_model.set_search_term(text)
self._episode_list_search_timeout = None
return False
if self._episode_list_search_timeout is not None:
gobject.source_remove(self._episode_list_search_timeout)
self._episode_list_search_timeout = gobject.timeout_add(\
self.LIVE_SEARCH_DELAY, \
set_search_term, self, editable.get_chars(0, -1))
def on_entry_search_episodes_key_press(self, editable, event):
if event.keyval == gtk.keysyms.Escape:
self.hide_episode_search()
return True
def hide_episode_search(self, *args):
if self._episode_list_search_timeout is not None:
gobject.source_remove(self._episode_list_search_timeout)
self._episode_list_search_timeout = None
self.hbox_search_episodes.hide()
self.entry_search_episodes.set_text('')
self.episode_list_model.set_search_term(None)
self.treeAvailable.grab_focus()
def show_episode_search(self, input_char):
self.hbox_search_episodes.show()
self.entry_search_episodes.insert_text(input_char, -1)
self.entry_search_episodes.grab_focus()
self.entry_search_episodes.set_position(-1)
def init_episode_list_treeview(self):
# For loading the list model
self.episode_list_model = EpisodeListModel(self.on_episode_list_filter_changed)
if self.config.episode_list_view_mode == EpisodeListModel.VIEW_UNDELETED:
self.item_view_episodes_undeleted.set_active(True)
elif self.config.episode_list_view_mode == EpisodeListModel.VIEW_DOWNLOADED:
self.item_view_episodes_downloaded.set_active(True)
elif self.config.episode_list_view_mode == EpisodeListModel.VIEW_UNPLAYED:
self.item_view_episodes_unplayed.set_active(True)
else:
self.item_view_episodes_all.set_active(True)
self.episode_list_model.set_view_mode(self.config.episode_list_view_mode)
self.treeAvailable.set_model(self.episode_list_model.get_filtered_model())
TreeViewHelper.set(self.treeAvailable, TreeViewHelper.ROLE_EPISODES)
iconcell = gtk.CellRendererPixbuf()
iconcell.set_property('stock-size', gtk.ICON_SIZE_BUTTON)
if gpodder.ui.maemo:
iconcell.set_fixed_size(50, 50)
else:
iconcell.set_fixed_size(40, -1)
namecell = gtk.CellRendererText()
namecell.set_property('ellipsize', pango.ELLIPSIZE_END)
namecolumn = gtk.TreeViewColumn(_('Episode'))
namecolumn.pack_start(iconcell, False)
namecolumn.add_attribute(iconcell, 'icon-name', EpisodeListModel.C_STATUS_ICON)
namecolumn.pack_start(namecell, True)
namecolumn.add_attribute(namecell, 'markup', EpisodeListModel.C_DESCRIPTION)
if gpodder.ui.fremantle:
namecolumn.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
else:
namecolumn.set_sort_column_id(EpisodeListModel.C_DESCRIPTION)
namecolumn.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
namecolumn.set_resizable(True)
namecolumn.set_expand(True)
if gpodder.ui.fremantle:
from gpodder.gtkui.frmntl import style
timecell = gtk.CellRendererText()
timecell.set_property('font-desc', style.get_font_desc('SmallSystemFont'))
timecell.set_property('foreground-gdk', style.get_color('SecondaryTextColor'))
timecell.set_property('alignment', pango.ALIGN_RIGHT)
timecell.set_property('xalign', 1.)
timecell.set_property('xpad', 5)
timecell.set_property('yalign', .85)
namecolumn.pack_start(timecell, False)
namecolumn.add_attribute(timecell, 'text', EpisodeListModel.C_TIME)
namecolumn.add_attribute(timecell, 'visible', EpisodeListModel.C_TIME_VISIBLE)
else:
lockcell = gtk.CellRendererPixbuf()
lockcell.set_fixed_size(40, -1)
lockcell.set_property('stock-size', gtk.ICON_SIZE_MENU)
lockcell.set_property('icon-name', 'emblem-readonly')
namecolumn.pack_start(lockcell, False)
namecolumn.add_attribute(lockcell, 'visible', EpisodeListModel.C_LOCKED)
sizecell = gtk.CellRendererText()
sizecell.set_property('xalign', 1)
sizecolumn = gtk.TreeViewColumn(_('Size'), sizecell, text=EpisodeListModel.C_FILESIZE_TEXT)
sizecolumn.set_sort_column_id(EpisodeListModel.C_FILESIZE)
releasecell = gtk.CellRendererText()
releasecolumn = gtk.TreeViewColumn(_('Released'), releasecell, text=EpisodeListModel.C_PUBLISHED_TEXT)
releasecolumn.set_sort_column_id(EpisodeListModel.C_PUBLISHED)
namecolumn.set_reorderable(True)
self.treeAvailable.append_column(namecolumn)
if not gpodder.ui.maemo:
for itemcolumn in (sizecolumn, releasecolumn):
itemcolumn.set_reorderable(True)
self.treeAvailable.append_column(itemcolumn)
# Set up type-ahead find for the episode list
def on_key_press(treeview, event):
if event.keyval == gtk.keysyms.Escape:
self.hide_episode_search()
elif gpodder.ui.fremantle and event.keyval == gtk.keysyms.BackSpace:
self.hide_episode_search()
elif event.state & gtk.gdk.CONTROL_MASK:
# Don't handle type-ahead when control is pressed (so shortcuts
# with the Ctrl key still work, e.g. Ctrl+A, ...)
return False
else:
unicode_char_id = gtk.gdk.keyval_to_unicode(event.keyval)
if unicode_char_id == 0:
return False
input_char = unichr(unicode_char_id)
self.show_episode_search(input_char)
return True
self.treeAvailable.connect('key-press-event', on_key_press)
if gpodder.ui.desktop and not self.config.enable_fingerscroll:
self.treeAvailable.enable_model_drag_source(gtk.gdk.BUTTON1_MASK, \
(('text/uri-list', 0, 0),), gtk.gdk.ACTION_COPY)
def drag_data_get(tree, context, selection_data, info, timestamp):
if self.config.on_drag_mark_played:
for episode in self.get_selected_episodes():
episode.mark(is_played=True)
self.on_selected_episodes_status_changed()
uris = ['file://'+e.local_filename(create=False) \
for e in self.get_selected_episodes() \
if e.was_downloaded(and_exists=True)]
uris.append('') # for the trailing '\r\n'
selection_data.set(selection_data.target, 8, '\r\n'.join(uris))
self.treeAvailable.connect('drag-data-get', drag_data_get)
selection = self.treeAvailable.get_selection()
if self.config.maemo_enable_gestures or self.config.enable_fingerscroll:
selection.set_mode(gtk.SELECTION_SINGLE)
elif gpodder.ui.fremantle:
selection.set_mode(gtk.SELECTION_SINGLE)
else:
selection.set_mode(gtk.SELECTION_MULTIPLE)
# Update the sensitivity of the toolbar buttons on the Desktop
selection.connect('changed', lambda s: self.play_or_download())
# show embedded notes
selection.connect('changed', self.on_treeAvailable_selection_changed)
if gpodder.ui.diablo:
# Set up the tap-and-hold context menu for podcasts
menu = gtk.Menu()
menu.append(self.itemUpdateChannel.create_menu_item())
menu.append(self.itemEditChannel.create_menu_item())
menu.append(gtk.SeparatorMenuItem())
menu.append(self.itemRemoveChannel.create_menu_item())
menu.append(gtk.SeparatorMenuItem())
item = gtk.ImageMenuItem(_('Close this menu'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_CLOSE, \
gtk.ICON_SIZE_MENU))
menu.append(item)
menu.show_all()
menu = self.set_finger_friendly(menu)
self.treeChannels.tap_and_hold_setup(menu)
def init_download_list_treeview(self):
# enable multiple selection support
self.treeDownloads.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.treeDownloads.set_search_equal_func(TreeViewHelper.make_search_equal_func(DownloadStatusModel))
# columns and renderers for "download progress" tab
# First column: [ICON] Episodename
column = gtk.TreeViewColumn(_('Episode'))
cell = gtk.CellRendererPixbuf()
if gpodder.ui.maemo:
cell.set_fixed_size(50, 50)
cell.set_property('stock-size', gtk.ICON_SIZE_BUTTON)
column.pack_start(cell, expand=False)
column.add_attribute(cell, 'icon-name', \
DownloadStatusModel.C_ICON_NAME)
cell = gtk.CellRendererText()
cell.set_property('ellipsize', pango.ELLIPSIZE_END)
column.pack_start(cell, expand=True)
column.add_attribute(cell, 'markup', DownloadStatusModel.C_NAME)
column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
column.set_expand(True)
self.treeDownloads.append_column(column)
# Second column: Progress
cell = gtk.CellRendererProgress()
cell.set_property('yalign', .5)
cell.set_property('ypad', 6)
column = gtk.TreeViewColumn(_('Progress'), cell,
value=DownloadStatusModel.C_PROGRESS, \
text=DownloadStatusModel.C_PROGRESS_TEXT)
column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
column.set_expand(False)
self.treeDownloads.append_column(column)
if gpodder.ui.maemo:
column.set_property('min-width', 200)
column.set_property('max-width', 200)
else:
column.set_property('min-width', 150)
column.set_property('max-width', 150)
self.treeDownloads.set_model(self.download_status_model)
TreeViewHelper.set(self.treeDownloads, TreeViewHelper.ROLE_DOWNLOADS)
def on_treeview_expose_event(self, treeview, event):
if event.window == treeview.get_bin_window():
model = treeview.get_model()
if (model is not None and model.get_iter_first() is not None):
return False
role = getattr(treeview, TreeViewHelper.ROLE, None)
if role is None:
return False
ctx = event.window.cairo_create()
ctx.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
ctx.clip()
x, y, width, height, depth = event.window.get_geometry()
progress = None
if role == TreeViewHelper.ROLE_EPISODES:
if self.currently_updating:
text = _('Loading episodes')
elif self.config.episode_list_view_mode != \
EpisodeListModel.VIEW_ALL:
text = _('No episodes in current view')
else:
text = _('No episodes available')
elif role == TreeViewHelper.ROLE_PODCASTS:
if self.config.episode_list_view_mode != \
EpisodeListModel.VIEW_ALL and \
self.config.podcast_list_hide_boring and \
len(self.channels) > 0:
text = _('No podcasts in this view')
else:
text = _('No subscriptions')
elif role == TreeViewHelper.ROLE_DOWNLOADS:
text = _('No active downloads')
else:
raise Exception('on_treeview_expose_event: unknown role')
if gpodder.ui.fremantle:
from gpodder.gtkui.frmntl import style
font_desc = style.get_font_desc('LargeSystemFont')
else:
font_desc = None
draw_text_box_centered(ctx, treeview, width, height, text, font_desc, progress)
return False
def enable_download_list_update(self):
if not self.download_list_update_enabled:
self.update_downloads_list()
gobject.timeout_add(1500, self.update_downloads_list)
self.download_list_update_enabled = True
def cleanup_downloads(self):
model = self.download_status_model
all_tasks = [(gtk.TreeRowReference(model, row.path), row[0]) for row in model]
changed_episode_urls = set()
for row_reference, task in all_tasks:
if task.status in (task.DONE, task.CANCELLED):
model.remove(model.get_iter(row_reference.get_path()))
try:
# We don't "see" this task anymore - remove it;
# this is needed, so update_episode_list_icons()
# below gets the correct list of "seen" tasks
self.download_tasks_seen.remove(task)
except KeyError, key_error:
log('Cannot remove task from "seen" list: %s', task, sender=self)
changed_episode_urls.add(task.url)
# Tell the task that it has been removed (so it can clean up)
task.removed_from_list()
# Tell the podcasts tab to update icons for our removed podcasts
self.update_episode_list_icons(changed_episode_urls)
# Tell the shownotes window that we have removed the episode
if self.episode_shownotes_window is not None and \
self.episode_shownotes_window.episode is not None and \
self.episode_shownotes_window.episode.url in changed_episode_urls:
self.episode_shownotes_window._download_status_changed(None)
# Update the downloads list one more time
self.update_downloads_list(can_call_cleanup=False)
def on_tool_downloads_toggled(self, toolbutton):
if toolbutton.get_active():
self.wNotebook.set_current_page(1)
else:
self.wNotebook.set_current_page(0)
def add_download_task_monitor(self, monitor):
self.download_task_monitors.add(monitor)
model = self.download_status_model
if model is None:
model = ()
for row in model:
task = row[self.download_status_model.C_TASK]
monitor.task_updated(task)
def remove_download_task_monitor(self, monitor):
self.download_task_monitors.remove(monitor)
def update_downloads_list(self, can_call_cleanup=True):
try:
model = self.download_status_model
downloading, failed, finished, queued, paused, others = 0, 0, 0, 0, 0, 0
total_speed, total_size, done_size = 0, 0, 0
# Keep a list of all download tasks that we've seen
download_tasks_seen = set()
# Remember the DownloadTask object for the episode that
# has been opened in the episode shownotes dialog (if any)
if self.episode_shownotes_window is not None:
shownotes_episode = self.episode_shownotes_window.episode
shownotes_task = None
else:
shownotes_episode = None
shownotes_task = None
# Do not go through the list of the model is not (yet) available
if model is None:
model = ()
for row in model:
self.download_status_model.request_update(row.iter)
task = row[self.download_status_model.C_TASK]
speed, size, status, progress = task.speed, task.total_size, task.status, task.progress
# Let the download task monitors know of changes
for monitor in self.download_task_monitors:
monitor.task_updated(task)
total_size += size
done_size += size*progress
if shownotes_episode is not None and \
shownotes_episode.url == task.episode.url:
shownotes_task = task
download_tasks_seen.add(task)
if status == download.DownloadTask.DOWNLOADING:
downloading += 1
total_speed += speed
elif status == download.DownloadTask.FAILED:
failed += 1
elif status == download.DownloadTask.DONE:
finished += 1
elif status == download.DownloadTask.QUEUED:
queued += 1
elif status == download.DownloadTask.PAUSED:
paused += 1
else:
others += 1
# Remember which tasks we have seen after this run
self.download_tasks_seen = download_tasks_seen
if gpodder.ui.desktop:
text = [_('Downloads')]
if downloading + failed + queued > 0:
s = []
if downloading > 0:
s.append(N_('%(count)d active', '%(count)d active', downloading) % {'count':downloading})
if failed > 0:
s.append(N_('%(count)d failed', '%(count)d failed', failed) % {'count':failed})
if queued > 0:
s.append(N_('%(count)d queued', '%(count)d queued', queued) % {'count':queued})
text.append(' (' + ', '.join(s)+')')
self.labelDownloads.set_text(''.join(text))
elif gpodder.ui.diablo:
sum = downloading + failed + finished + queued + paused + others
if sum:
self.tool_downloads.set_label(_('Downloads (%d)') % sum)
else:
self.tool_downloads.set_label(_('Downloads'))
elif gpodder.ui.fremantle:
if downloading + queued > 0:
self.button_downloads.set_value(N_('%(count)d active', '%(count)d active', downloading+queued) % {'count':(downloading+queued)})
elif failed > 0:
self.button_downloads.set_value(N_('%(count)d failed', '%(count)d failed', failed) % {'count':failed})
elif paused > 0:
self.button_downloads.set_value(N_('%(count)d paused', '%(count)d paused', paused) % {'count':paused})
else:
self.button_downloads.set_value(_('Idle'))
title = [self.default_title]
# We have to update all episodes/channels for which the status has
# changed. Accessing task.status_changed has the side effect of
# re-setting the changed flag, so we need to get the "changed" list
# of tuples first and split it into two lists afterwards
changed = [(task.url, task.podcast_url) for task in \
self.download_tasks_seen if task.status_changed]
episode_urls = [episode_url for episode_url, channel_url in changed]
channel_urls = [channel_url for episode_url, channel_url in changed]
count = downloading + queued
if count > 0:
title.append(N_('downloading %(count)d file', 'downloading %(count)d files', count) % {'count':count})
if total_size > 0:
percentage = 100.0*done_size/total_size
else:
percentage = 0.0
total_speed = util.format_filesize(total_speed)
title[1] += ' (%d%%, %s/s)' % (percentage, total_speed)
if self.tray_icon is not None:
# Update the tray icon status and progress bar
self.tray_icon.set_status(self.tray_icon.STATUS_DOWNLOAD_IN_PROGRESS, title[1])
self.tray_icon.draw_progress_bar(percentage/100.)
else:
if self.tray_icon is not None:
# Update the tray icon status
self.tray_icon.set_status()
if gpodder.ui.desktop:
self.downloads_finished(self.download_tasks_seen)
if gpodder.ui.diablo:
hildon.hildon_banner_show_information(self.gPodder, '', 'gPodder: %s' % _('All downloads finished'))
log('All downloads have finished.', sender=self)
if self.config.cmd_all_downloads_complete:
util.run_external_command(self.config.cmd_all_downloads_complete)
if gpodder.ui.fremantle:
message = '\n'.join(['%s: %s' % (str(task), \
task.error_message) for task in self.download_tasks_seen if task.notify_as_failed()])
if message:
self.show_message(message, _('Downloads failed'), important=True)
# Remove finished episodes
if self.config.auto_cleanup_downloads and can_call_cleanup:
self.cleanup_downloads()
# Stop updating the download list here
self.download_list_update_enabled = False
if not gpodder.ui.fremantle:
self.gPodder.set_title(' - '.join(title))
self.update_episode_list_icons(episode_urls)
if self.episode_shownotes_window is not None:
if (shownotes_task and shownotes_task.url in episode_urls) or \
shownotes_task != self.episode_shownotes_window.task:
self.episode_shownotes_window._download_status_changed(shownotes_task)
self.episode_shownotes_window._download_status_progress()
self.play_or_download()
if channel_urls:
self.update_podcast_list_model(channel_urls)
return self.download_list_update_enabled
except Exception, e:
log('Exception happened while updating download list.', sender=self, traceback=True)
self.show_message('%s\n\n%s' % (_('Please report this problem and restart gPodder:'), str(e)), _('Unhandled exception'), important=True)
# We return False here, so the update loop won't be called again,
# that's why we require the restart of gPodder in the message.
return False
def on_config_changed(self, *args):
util.idle_add(self._on_config_changed, *args)
def _on_config_changed(self, name, old_value, new_value):
if name == 'show_toolbar' and gpodder.ui.desktop:
self.toolbar.set_property('visible', new_value)
elif name == 'episode_list_descriptions':
self.update_episode_list_model()
elif name == 'episode_list_thumbnails':
self.update_episode_list_icons(all=True)
elif name == 'rotation_mode':
self._fremantle_rotation.set_mode(new_value)
elif name in ('auto_update_feeds', 'auto_update_frequency'):
self.restart_auto_update_timer()
elif name == 'podcast_list_view_all' or name == 'podcast_list_view_new':
# Force a update of the podcast list model
self.channel_list_changed = True
if gpodder.ui.fremantle:
hildon.hildon_gtk_window_set_progress_indicator(self.main_window, True)
while gtk.events_pending():
gtk.main_iteration(False)
self.update_podcast_list_model()
if gpodder.ui.fremantle:
hildon.hildon_gtk_window_set_progress_indicator(self.main_window, False)
def on_treeview_query_tooltip(self, treeview, x, y, keyboard_tooltip, tooltip):
# With get_bin_window, we get the window that contains the rows without
# the header. The Y coordinate of this window will be the height of the
# treeview header. This is the amount we have to subtract from the
# event's Y coordinate to get the coordinate to pass to get_path_at_pos
(x_bin, y_bin) = treeview.get_bin_window().get_position()
y -= x_bin
y -= y_bin
(path, column, rx, ry) = treeview.get_path_at_pos( x, y) or (None,)*4
if not getattr(treeview, TreeViewHelper.CAN_TOOLTIP) or x > 50 or (column is not None and column != treeview.get_columns()[0]):
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, None)
return False
if path is not None:
model = treeview.get_model()
iter = model.get_iter(path)
role = getattr(treeview, TreeViewHelper.ROLE)
if role == TreeViewHelper.ROLE_EPISODES:
id = model.get_value(iter, EpisodeListModel.C_URL)
elif role == TreeViewHelper.ROLE_PODCASTS:
id = model.get_value(iter, PodcastListModel.C_URL)
last_tooltip = getattr(treeview, TreeViewHelper.LAST_TOOLTIP)
if last_tooltip is not None and last_tooltip != id:
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, None)
return False
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, id)
if role == TreeViewHelper.ROLE_EPISODES:
description = model.get_value(iter, EpisodeListModel.C_TOOLTIP)
if description:
tooltip.set_text(description)
else:
return False
elif role == TreeViewHelper.ROLE_PODCASTS:
channel = model.get_value(iter, PodcastListModel.C_CHANNEL)
if channel is None:
return False
channel.request_save_dir_size()
diskspace_str = util.format_filesize(channel.save_dir_size, 0)
error_str = model.get_value(iter, PodcastListModel.C_ERROR)
if error_str:
error_str = _('Feedparser error: %s') % saxutils.escape(error_str.strip())
error_str = '<span foreground="#ff0000">%s</span>' % error_str
table = gtk.Table(rows=3, columns=3)
table.set_row_spacings(5)
table.set_col_spacings(5)
table.set_border_width(5)
heading = gtk.Label()
heading.set_alignment(0, 1)
heading.set_markup('<b><big>%s</big></b>\n<small>%s</small>' % (saxutils.escape(channel.title), saxutils.escape(channel.url)))
table.attach(heading, 0, 1, 0, 1)
size_info = gtk.Label()
size_info.set_alignment(1, 1)
size_info.set_justify(gtk.JUSTIFY_RIGHT)
size_info.set_markup('<b>%s</b>\n<small>%s</small>' % (diskspace_str, _('disk usage')))
table.attach(size_info, 2, 3, 0, 1)
table.attach(gtk.HSeparator(), 0, 3, 1, 2)
if len(channel.description) < 500:
description = channel.description
else:
pos = channel.description.find('\n\n')
if pos == -1 or pos > 500:
description = channel.description[:498]+'[...]'
else:
description = channel.description[:pos]
description = gtk.Label(description)
if error_str:
description.set_markup(error_str)
description.set_alignment(0, 0)
description.set_line_wrap(True)
table.attach(description, 0, 3, 2, 3)
table.show_all()
tooltip.set_custom(table)
return True
setattr(treeview, TreeViewHelper.LAST_TOOLTIP, None)
return False
def treeview_allow_tooltips(self, treeview, allow):
setattr(treeview, TreeViewHelper.CAN_TOOLTIP, allow)
def update_m3u_playlist_clicked(self, widget):
if self.active_channel is not None:
self.active_channel.update_m3u_playlist()
self.show_message(_('Updated M3U playlist in download folder.'), _('Updated playlist'), widget=self.treeChannels)
def treeview_handle_context_menu_click(self, treeview, event):
x, y = int(event.x), int(event.y)
path, column, rx, ry = treeview.get_path_at_pos(x, y) or (None,)*4
selection = treeview.get_selection()
model, paths = selection.get_selected_rows()
if path is None or (path not in paths and \
event.button == self.context_menu_mouse_button):
# We have right-clicked, but not into the selection,
# assume we don't want to operate on the selection
paths = []
if path is not None and not paths and \
event.button == self.context_menu_mouse_button:
# No selection or clicked outside selection;
# select the single item where we clicked
treeview.grab_focus()
treeview.set_cursor(path, column, 0)
paths = [path]
if not paths:
# Unselect any remaining items (clicked elsewhere)
if hasattr(treeview, 'is_rubber_banding_active'):
if not treeview.is_rubber_banding_active():
selection.unselect_all()
else:
selection.unselect_all()
return model, paths
def downloads_list_get_selection(self, model=None, paths=None):
if model is None and paths is None:
selection = self.treeDownloads.get_selection()
model, paths = selection.get_selected_rows()
can_queue, can_cancel, can_pause, can_remove, can_force = (True,)*5
selected_tasks = [(gtk.TreeRowReference(model, path), \
model.get_value(model.get_iter(path), \
DownloadStatusModel.C_TASK)) for path in paths]
for row_reference, task in selected_tasks:
if task.status != download.DownloadTask.QUEUED:
can_force = False
if task.status not in (download.DownloadTask.PAUSED, \
download.DownloadTask.FAILED, \
download.DownloadTask.CANCELLED):
can_queue = False
if task.status not in (download.DownloadTask.PAUSED, \
download.DownloadTask.QUEUED, \
download.DownloadTask.DOWNLOADING, \
download.DownloadTask.FAILED):
can_cancel = False
if task.status not in (download.DownloadTask.QUEUED, \
download.DownloadTask.DOWNLOADING):
can_pause = False
if task.status not in (download.DownloadTask.CANCELLED, \
download.DownloadTask.FAILED, \
download.DownloadTask.DONE):
can_remove = False
return selected_tasks, can_queue, can_cancel, can_pause, can_remove, can_force
def downloads_finished(self, download_tasks_seen):
finished_downloads = [str(task) for task in download_tasks_seen if task.notify_as_finished()]
failed_downloads = [str(task)+' ('+task.error_message+')' for task in download_tasks_seen if task.notify_as_failed()]
if finished_downloads and failed_downloads:
message = self.format_episode_list(finished_downloads, 5)
message += '\n\n<i>%s</i>\n' % _('These downloads failed:')
message += self.format_episode_list(failed_downloads, 5)
self.show_message(message, _('Downloads finished'), True, widget=self.labelDownloads)
elif finished_downloads:
message = self.format_episode_list(finished_downloads)
self.show_message(message, _('Downloads finished'), widget=self.labelDownloads)
elif failed_downloads:
message = self.format_episode_list(failed_downloads)
self.show_message(message, _('Downloads failed'), True, widget=self.labelDownloads)
# Open torrent files right after download (bug 1029)
if self.config.open_torrent_after_download:
for task in download_tasks_seen:
if task.status != task.DONE:
continue
episode = task.episode
if episode.mimetype != 'application/x-bittorrent':
continue
self.playback_episodes([episode])
def format_episode_list(self, episode_list, max_episodes=10):
"""
Format a list of episode names for notifications
Will truncate long episode names and limit the amount of
episodes displayed (max_episodes=10).
The episode_list parameter should be a list of strings.
"""
MAX_TITLE_LENGTH = 100
result = []
for title in episode_list[:min(len(episode_list), max_episodes)]:
if len(title) > MAX_TITLE_LENGTH:
middle = (MAX_TITLE_LENGTH/2)-2
title = '%s...%s' % (title[0:middle], title[-middle:])
result.append(saxutils.escape(title))
result.append('\n')
more_episodes = len(episode_list) - max_episodes
if more_episodes > 0:
result.append('(...')
result.append(N_('%(count)d more episode', '%(count)d more episodes', more_episodes) % {'count':more_episodes})
result.append('...)')
return (''.join(result)).strip()
def _for_each_task_set_status(self, tasks, status, force_start=False):
episode_urls = set()
model = self.treeDownloads.get_model()
for row_reference, task in tasks:
if status == download.DownloadTask.QUEUED:
# Only queue task when its paused/failed/cancelled (or forced)
if task.status in (task.PAUSED, task.FAILED, task.CANCELLED) or force_start:
self.download_queue_manager.add_task(task, force_start)
self.enable_download_list_update()
elif status == download.DownloadTask.CANCELLED:
# Cancelling a download allowed when downloading/queued
if task.status in (task.QUEUED, task.DOWNLOADING):
task.status = status
# Cancelling paused/failed downloads requires a call to .run()
elif task.status in (task.PAUSED, task.FAILED):
task.status = status
# Call run, so the partial file gets deleted
task.run()
elif status == download.DownloadTask.PAUSED:
# Pausing a download only when queued/downloading
if task.status in (task.DOWNLOADING, task.QUEUED):
task.status = status
elif status is None:
# Remove the selected task - cancel downloading/queued tasks
if task.status in (task.QUEUED, task.DOWNLOADING):
task.status = task.CANCELLED
model.remove(model.get_iter(row_reference.get_path()))
# Remember the URL, so we can tell the UI to update
try:
# We don't "see" this task anymore - remove it;
# this is needed, so update_episode_list_icons()
# below gets the correct list of "seen" tasks
self.download_tasks_seen.remove(task)
except KeyError, key_error:
log('Cannot remove task from "seen" list: %s', task, sender=self)
episode_urls.add(task.url)
# Tell the task that it has been removed (so it can clean up)
task.removed_from_list()
else:
# We can (hopefully) simply set the task status here
task.status = status
# Tell the podcasts tab to update icons for our removed podcasts
self.update_episode_list_icons(episode_urls)
# Update the tab title and downloads list
self.update_downloads_list()
def treeview_downloads_show_context_menu(self, treeview, event):
model, paths = self.treeview_handle_context_menu_click(treeview, event)
if not paths:
if not hasattr(treeview, 'is_rubber_banding_active'):
return True
else:
return not treeview.is_rubber_banding_active()
if event.button == self.context_menu_mouse_button:
selected_tasks, can_queue, can_cancel, can_pause, can_remove, can_force = \
self.downloads_list_get_selection(model, paths)
def make_menu_item(label, stock_id, tasks, status, sensitive, force_start=False):
# This creates a menu item for selection-wide actions
item = gtk.ImageMenuItem(label)
item.set_image(gtk.image_new_from_stock(stock_id, gtk.ICON_SIZE_MENU))
item.connect('activate', lambda item: self._for_each_task_set_status(tasks, status, force_start))
item.set_sensitive(sensitive)
return self.set_finger_friendly(item)
menu = gtk.Menu()
item = gtk.ImageMenuItem(_('Episode details'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_INFO, gtk.ICON_SIZE_MENU))
if len(selected_tasks) == 1:
row_reference, task = selected_tasks[0]
episode = task.episode
item.connect('activate', lambda item: self.show_episode_shownotes(episode))
else:
item.set_sensitive(False)
menu.append(self.set_finger_friendly(item))
menu.append(gtk.SeparatorMenuItem())
if can_force:
menu.append(make_menu_item(_('Start download now'), gtk.STOCK_GO_DOWN, selected_tasks, download.DownloadTask.QUEUED, True, True))
else:
menu.append(make_menu_item(_('Download'), gtk.STOCK_GO_DOWN, selected_tasks, download.DownloadTask.QUEUED, can_queue, False))
menu.append(make_menu_item(_('Cancel'), gtk.STOCK_CANCEL, selected_tasks, download.DownloadTask.CANCELLED, can_cancel))
menu.append(make_menu_item(_('Pause'), gtk.STOCK_MEDIA_PAUSE, selected_tasks, download.DownloadTask.PAUSED, can_pause))
menu.append(gtk.SeparatorMenuItem())
menu.append(make_menu_item(_('Remove from list'), gtk.STOCK_REMOVE, selected_tasks, None, can_remove))
if gpodder.ui.maemo or self.config.enable_fingerscroll:
# Because we open the popup on left-click for Maemo,
# we also include a non-action to close the menu
menu.append(gtk.SeparatorMenuItem())
item = gtk.ImageMenuItem(_('Close this menu'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU))
menu.append(self.set_finger_friendly(item))
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
return True
def treeview_channels_show_context_menu(self, treeview, event):
model, paths = self.treeview_handle_context_menu_click(treeview, event)
if not paths:
return True
# Check for valid channel id, if there's no id then
# assume that it is a proxy channel or equivalent
# and cannot be operated with right click
if self.active_channel.id is None:
return True
if event.button == 3:
menu = gtk.Menu()
ICON = lambda x: x
item = gtk.ImageMenuItem( _('Update podcast'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_REFRESH, gtk.ICON_SIZE_MENU))
item.connect('activate', self.on_itemUpdateChannel_activate)
item.set_sensitive(not self.updating_feed_cache)
menu.append(item)
menu.append(gtk.SeparatorMenuItem())
item = gtk.CheckMenuItem(_('Keep episodes'))
item.set_active(self.active_channel.channel_is_locked)
item.connect('activate', self.on_channel_toggle_lock_activate)
menu.append(self.set_finger_friendly(item))
item = gtk.ImageMenuItem(_('Remove podcast'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_DELETE, gtk.ICON_SIZE_MENU))
item.connect( 'activate', self.on_itemRemoveChannel_activate)
menu.append( item)
if self.config.device_type != 'none':
item = gtk.MenuItem(_('Synchronize to device'))
item.connect('activate', lambda item: self.on_sync_to_ipod_activate(item, self.active_channel.get_downloaded_episodes(), force_played=False))
menu.append(item)
menu.append( gtk.SeparatorMenuItem())
item = gtk.ImageMenuItem(_('Podcast details'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_INFO, gtk.ICON_SIZE_MENU))
item.connect('activate', self.on_itemEditChannel_activate)
menu.append(item)
menu.show_all()
# Disable tooltips while we are showing the menu, so
# the tooltip will not appear over the menu
self.treeview_allow_tooltips(self.treeChannels, False)
menu.connect('deactivate', lambda menushell: self.treeview_allow_tooltips(self.treeChannels, True))
menu.popup( None, None, None, event.button, event.time)
return True
def on_itemClose_activate(self, widget):
if self.tray_icon is not None:
self.iconify_main_window()
else:
self.on_gPodder_delete_event(widget)
def cover_file_removed(self, channel_url):
"""
The Cover Downloader calls this when a previously-
available cover has been removed from the disk. We
have to update our model to reflect this change.
"""
self.podcast_list_model.delete_cover_by_url(channel_url)
def cover_download_finished(self, channel, pixbuf):
"""
The Cover Downloader calls this when it has finished
downloading (or registering, if already downloaded)
a new channel cover, which is ready for displaying.
"""
self.podcast_list_model.add_cover_by_channel(channel, pixbuf)
def save_episodes_as_file(self, episodes):
for episode in episodes:
self.save_episode_as_file(episode)
def save_episode_as_file(self, episode):
PRIVATE_FOLDER_ATTRIBUTE = '_save_episodes_as_file_folder'
if episode.was_downloaded(and_exists=True):
folder = getattr(self, PRIVATE_FOLDER_ATTRIBUTE, None)
copy_from = episode.local_filename(create=False)
assert copy_from is not None
copy_to = util.sanitize_filename(episode.sync_filename(\
self.config.custom_sync_name_enabled, \
self.config.custom_sync_name))
(result, folder) = self.show_copy_dialog(src_filename=copy_from, dst_filename=copy_to, dst_directory=folder)
setattr(self, PRIVATE_FOLDER_ATTRIBUTE, folder)
def copy_episodes_bluetooth(self, episodes):
episodes_to_copy = [e for e in episodes if e.was_downloaded(and_exists=True)]
if gpodder.ui.maemo:
util.bluetooth_send_files_maemo([e.local_filename(create=False) \
for e in episodes_to_copy])
return True
def convert_and_send_thread(episode):
for episode in episodes:
filename = episode.local_filename(create=False)
assert filename is not None
destfile = os.path.join(tempfile.gettempdir(), \
util.sanitize_filename(episode.sync_filename(self.config.custom_sync_name_enabled, self.config.custom_sync_name)))
(base, ext) = os.path.splitext(filename)
if not destfile.endswith(ext):
destfile += ext
try:
shutil.copyfile(filename, destfile)
util.bluetooth_send_file(destfile)
except:
log('Cannot copy "%s" to "%s".', filename, destfile, sender=self)
self.notification(_('Error converting file.'), _('Bluetooth file transfer'), important=True)
util.delete_file(destfile)
threading.Thread(target=convert_and_send_thread, args=[episodes_to_copy]).start()
def get_device_name(self):
if self.config.device_type == 'ipod':
return _('iPod')
elif self.config.device_type in ('filesystem', 'mtp'):
return _('MP3 player')
else:
return '(unknown device)'
def _treeview_button_released(self, treeview, event):
xpos, ypos = TreeViewHelper.get_button_press_event(treeview)
dy = int(abs(event.y-ypos))
dx = int(event.x-xpos)
selection = treeview.get_selection()
path = treeview.get_path_at_pos(int(event.x), int(event.y))
if path is None or dy > 30:
return (False, dx, dy)
path, column, x, y = path
selection.select_path(path)
treeview.set_cursor(path)
treeview.grab_focus()
return (True, dx, dy)
def treeview_channels_handle_gestures(self, treeview, event):
if self.currently_updating:
return False
selected, dx, dy = self._treeview_button_released(treeview, event)
if selected:
if self.config.maemo_enable_gestures:
if dx > 70:
self.on_itemUpdateChannel_activate()
elif dx < -70:
self.on_itemEditChannel_activate(treeview)
return False
def treeview_available_handle_gestures(self, treeview, event):
selected, dx, dy = self._treeview_button_released(treeview, event)
if selected:
if self.config.maemo_enable_gestures:
if dx > 70:
self.on_playback_selected_episodes(None)
return True
elif dx < -70:
self.on_shownotes_selected_episodes(None)
return True
# Pass the event to the context menu handler for treeAvailable
self.treeview_available_show_context_menu(treeview, event)
return True
def treeview_available_show_context_menu(self, treeview, event):
model, paths = self.treeview_handle_context_menu_click(treeview, event)
if not paths:
if not hasattr(treeview, 'is_rubber_banding_active'):
return True
else:
return not treeview.is_rubber_banding_active()
if event.button == self.context_menu_mouse_button:
episodes = self.get_selected_episodes()
any_locked = any(e.is_locked for e in episodes)
any_played = any(e.is_played for e in episodes)
one_is_new = any(e.state == gpodder.STATE_NORMAL and not e.is_played for e in episodes)
downloaded = all(e.was_downloaded(and_exists=True) for e in episodes)
downloading = any(self.episode_is_downloading(e) for e in episodes)
menu = gtk.Menu()
(can_play, can_download, can_transfer, can_cancel, can_delete, open_instead_of_play) = self.play_or_download()
if open_instead_of_play:
item = gtk.ImageMenuItem(gtk.STOCK_OPEN)
elif downloaded:
item = gtk.ImageMenuItem(gtk.STOCK_MEDIA_PLAY)
else:
item = gtk.ImageMenuItem(_('Stream'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY, gtk.ICON_SIZE_MENU))
item.set_sensitive(can_play and not downloading)
item.connect('activate', self.on_playback_selected_episodes)
menu.append(self.set_finger_friendly(item))
if not can_cancel:
item = gtk.ImageMenuItem(_('Download'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_GO_DOWN, gtk.ICON_SIZE_MENU))
item.set_sensitive(can_download)
item.connect('activate', self.on_download_selected_episodes)
menu.append(self.set_finger_friendly(item))
else:
item = gtk.ImageMenuItem(gtk.STOCK_CANCEL)
item.connect('activate', self.on_item_cancel_download_activate)
menu.append(self.set_finger_friendly(item))
item = gtk.ImageMenuItem(gtk.STOCK_DELETE)
item.set_sensitive(can_delete)
item.connect('activate', self.on_btnDownloadedDelete_clicked)
menu.append(self.set_finger_friendly(item))
ICON = lambda x: x
# Ok, this probably makes sense to only display for downloaded files
if downloaded:
menu.append(gtk.SeparatorMenuItem())
share_item = gtk.MenuItem(_('Send to'))
menu.append(self.set_finger_friendly(share_item))
share_menu = gtk.Menu()
item = gtk.ImageMenuItem(_('Local folder'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_DIRECTORY, gtk.ICON_SIZE_MENU))
item.connect('button-press-event', lambda w, ee: self.save_episodes_as_file(episodes))
share_menu.append(self.set_finger_friendly(item))
if self.bluetooth_available:
item = gtk.ImageMenuItem(_('Bluetooth device'))
if gpodder.ui.maemo:
icon_name = ICON('qgn_list_filesys_bluetooth')
else:
icon_name = ICON('bluetooth')
item.set_image(gtk.image_new_from_icon_name(icon_name, gtk.ICON_SIZE_MENU))
item.connect('button-press-event', lambda w, ee: self.copy_episodes_bluetooth(episodes))
share_menu.append(self.set_finger_friendly(item))
if can_transfer:
item = gtk.ImageMenuItem(self.get_device_name())
item.set_image(gtk.image_new_from_icon_name(ICON('multimedia-player'), gtk.ICON_SIZE_MENU))
item.connect('button-press-event', lambda w, ee: self.on_sync_to_ipod_activate(w, episodes))
share_menu.append(self.set_finger_friendly(item))
share_item.set_submenu(share_menu)
if not downloading:
menu.append(gtk.SeparatorMenuItem())
if one_is_new:
item = gtk.CheckMenuItem(_('New'))
item.set_active(True)
item.connect('activate', lambda w: self.mark_selected_episodes_old())
menu.append(self.set_finger_friendly(item))
else:
item = gtk.CheckMenuItem(_('New'))
item.set_active(False)
item.connect('activate', lambda w: self.mark_selected_episodes_new())
menu.append(self.set_finger_friendly(item))
if downloaded:
item = gtk.CheckMenuItem(_('Played'))
item.set_active(any_played)
item.connect( 'activate', lambda w: self.on_item_toggle_played_activate( w, False, not any_played))
menu.append(self.set_finger_friendly(item))
item = gtk.CheckMenuItem(_('Keep episode'))
item.set_active(any_locked)
item.connect('activate', lambda w: self.on_item_toggle_lock_activate( w, False, not any_locked))
menu.append(self.set_finger_friendly(item))
menu.append(gtk.SeparatorMenuItem())
# Single item, add episode information menu item
item = gtk.ImageMenuItem(_('Episode details'))
item.set_image(gtk.image_new_from_stock( gtk.STOCK_INFO, gtk.ICON_SIZE_MENU))
item.connect('activate', lambda w: self.show_episode_shownotes(episodes[0]))
menu.append(self.set_finger_friendly(item))
if gpodder.ui.maemo or self.config.enable_fingerscroll:
# Because we open the popup on left-click for Maemo,
# we also include a non-action to close the menu
menu.append(gtk.SeparatorMenuItem())
item = gtk.ImageMenuItem(_('Close this menu'))
item.set_image(gtk.image_new_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU))
menu.append(self.set_finger_friendly(item))
menu.show_all()
# Disable tooltips while we are showing the menu, so
# the tooltip will not appear over the menu
self.treeview_allow_tooltips(self.treeAvailable, False)
menu.connect('deactivate', lambda menushell: self.treeview_allow_tooltips(self.treeAvailable, True))
menu.popup( None, None, None, event.button, event.time)
return True
def set_title(self, new_title):
if not gpodder.ui.fremantle:
self.default_title = new_title
self.gPodder.set_title(new_title)
def update_episode_list_icons(self, urls=None, selected=False, all=False):
"""
Updates the status icons in the episode list.
If urls is given, it should be a list of URLs
of episodes that should be updated.
If urls is None, set ONE OF selected, all to
True (the former updates just the selected
episodes and the latter updates all episodes).
"""
additional_args = (self.episode_is_downloading, \
self.config.episode_list_descriptions and gpodder.ui.desktop, \
self.config.episode_list_thumbnails and gpodder.ui.desktop)
if urls is not None:
# We have a list of URLs to walk through
self.episode_list_model.update_by_urls(urls, *additional_args)
elif selected and not all:
# We should update all selected episodes
selection = self.treeAvailable.get_selection()
model, paths = selection.get_selected_rows()
for path in reversed(paths):
iter = model.get_iter(path)
self.episode_list_model.update_by_filter_iter(iter, \
*additional_args)
elif all and not selected:
# We update all (even the filter-hidden) episodes
self.episode_list_model.update_all(*additional_args)
else:
# Wrong/invalid call - have to specify at least one parameter
raise ValueError('Invalid call to update_episode_list_icons')
def episode_list_status_changed(self, episodes):
self.update_episode_list_icons(set(e.url for e in episodes))
self.update_podcast_list_model(set(e.channel.url for e in episodes))
self.db.commit()
def clean_up_downloads(self, delete_partial=False):
# Clean up temporary files left behind by old gPodder versions
temporary_files = glob.glob('%s/*/.tmp-*' % self.config.download_dir)
if delete_partial:
temporary_files += glob.glob('%s/*/*.partial' % self.config.download_dir)
for tempfile in temporary_files:
util.delete_file(tempfile)
# Clean up empty download folders and abandoned download folders
download_dirs = glob.glob(os.path.join(self.config.download_dir, '*'))
for ddir in download_dirs:
if os.path.isdir(ddir) and False: # FIXME not db.channel_foldername_exists(os.path.basename(ddir)):
globr = glob.glob(os.path.join(ddir, '*'))
if len(globr) == 0 or (len(globr) == 1 and globr[0].endswith('/cover')):
log('Stale download directory found: %s', os.path.basename(ddir), sender=self)
shutil.rmtree(ddir, ignore_errors=True)
def streaming_possible(self):
if gpodder.ui.desktop:
# User has to have a media player set on the Desktop, or else we
# would probably open the browser when giving a URL to xdg-open..
return (self.config.player and self.config.player != 'default')
elif gpodder.ui.maemo:
# On Maemo, the default is to use the Nokia Media Player, which is
# already able to deal with HTTP URLs the right way, so we
# unconditionally enable streaming always on Maemo
return True
return False
def playback_episodes_for_real(self, episodes):
groups = collections.defaultdict(list)
for episode in episodes:
file_type = episode.file_type()
if file_type == 'video' and self.config.videoplayer and \
self.config.videoplayer != 'default':
player = self.config.videoplayer
if gpodder.ui.diablo:
# Use the wrapper script if it's installed to crop 3GP YouTube
# videos to fit the screen (looks much nicer than w/ black border)
if player == 'mplayer' and util.find_command('gpodder-mplayer'):
player = 'gpodder-mplayer'
elif gpodder.ui.fremantle and player == 'mplayer':
player = 'mplayer -fs %F'
elif file_type == 'audio' and self.config.player and \
self.config.player != 'default':
player = self.config.player
else:
player = 'default'
# Mark episode as played in the database
episode.mark(is_played=True)
self.mygpo_client.on_playback([episode])
filename = episode.local_filename(create=False)
if filename is None or not os.path.exists(filename):
filename = episode.url
if youtube.is_video_link(filename):
fmt_id = self.config.youtube_preferred_fmt_id
if gpodder.ui.fremantle:
fmt_id = 5
filename = youtube.get_real_download_url(filename, fmt_id)
# Determine the playback resume position - if the file
# was played 100%, we simply start from the beginning
resume_position = episode.current_position
if resume_position == episode.total_time:
resume_position = 0
# Only on Maemo 5, and only if the episode isn't finished yet
if gpodder.ui.fremantle and not episode.is_finished():
self.mafw_monitor.set_resume_point(filename, resume_position)
# If Panucci is configured, use D-Bus on Maemo to call it
if player == 'panucci':
try:
PANUCCI_NAME = 'org.panucci.panucciInterface'
PANUCCI_PATH = '/panucciInterface'
PANUCCI_INTF = 'org.panucci.panucciInterface'
o = gpodder.dbus_session_bus.get_object(PANUCCI_NAME, PANUCCI_PATH)
i = dbus.Interface(o, PANUCCI_INTF)
def on_reply(*args):
pass
def error_handler(filename, err):
log('Exception in D-Bus call: %s', str(err), \
sender=self)
# Fallback: use the command line client
for command in util.format_desktop_command('panucci', \
[filename]):
log('Executing: %s', repr(command), sender=self)
subprocess.Popen(command)
on_error = lambda err: error_handler(filename, err)
# This method only exists in Panucci > 0.9 ('new Panucci')
i.playback_from(filename, resume_position, \
reply_handler=on_reply, error_handler=on_error)
continue # This file was handled by the D-Bus call
except Exception, e:
log('Error calling Panucci using D-Bus', sender=self, traceback=True)
elif player == 'MediaBox' and gpodder.ui.maemo:
try:
MEDIABOX_NAME = 'de.pycage.mediabox'
MEDIABOX_PATH = '/de/pycage/mediabox/control'
MEDIABOX_INTF = 'de.pycage.mediabox.control'
o = gpodder.dbus_session_bus.get_object(MEDIABOX_NAME, MEDIABOX_PATH)
i = dbus.Interface(o, MEDIABOX_INTF)
def on_reply(*args):
pass
def on_error(err):
log('Exception in D-Bus call: %s', str(err), \
sender=self)
i.load(filename, '%s/x-unknown' % file_type, \
reply_handler=on_reply, error_handler=on_error)
continue # This file was handled by the D-Bus call
except Exception, e:
log('Error calling MediaBox using D-Bus', sender=self, traceback=True)
groups[player].append(filename)
# Open episodes with system default player
if 'default' in groups:
# Special-casing for a single episode when the object is a PDF
# file - this is needed on Maemo 5, so we only use gui_open()
# for single PDF files, but still use the built-in media player
# with an M3U file for single audio/video files. (The Maemo 5
# media player behaves differently when opening a single-file
# M3U playlist compared to opening the single file directly.)
if len(groups['default']) == 1:
fn = groups['default'][0]
# The list of extensions is taken from gui_open in util.py
# where all special-cases of Maemo apps are listed
for extension in ('.pdf', '.jpg', '.jpeg', '.png'):
if fn.lower().endswith(extension):
util.gui_open(fn)
groups['default'] = []
break
if gpodder.ui.maemo and groups['default']:
# The Nokia Media Player app does not support receiving multiple
# file names via D-Bus, so we simply place all file names into a
# temporary M3U playlist and open that with the Media Player.
m3u_filename = os.path.join(gpodder.home, 'gpodder_open_with.m3u')
def to_url(x):
# Diablo's Player hates file:// URLs (Maemo bug 11647)
if gpodder.ui.diablo:
return x
if '://' not in x:
return 'file://' + urllib.quote(os.path.abspath(x))
return x
util.write_m3u_playlist(m3u_filename, \
map(to_url, groups['default']), \
extm3u=False)
util.gui_open(m3u_filename)
else:
for filename in groups['default']:
log('Opening with system default: %s', filename, sender=self)
util.gui_open(filename)
del groups['default']
elif gpodder.ui.maemo and groups:
# When on Maemo and not opening with default, show a notification
# (no startup notification for Panucci / MPlayer yet...)
if len(episodes) == 1:
text = _('Opening %s') % episodes[0].title
else:
count = len(episodes)
text = N_('Opening %(count)d episode', 'Opening %(count)d episodes', count) % {'count':count}
banner = hildon.hildon_banner_show_animation(self.gPodder, '', text)
def destroy_banner_later(banner):
banner.destroy()
return False
gobject.timeout_add(5000, destroy_banner_later, banner)
# For each type now, go and create play commands
for group in groups:
for command in util.format_desktop_command(group, groups[group], resume_position):
log('Executing: %s', repr(command), sender=self)
subprocess.Popen(command)
# Persist episode status changes to the database
self.db.commit()
# Flush updated episode status
self.mygpo_client.flush()
def playback_episodes(self, episodes):
# We need to create a list, because we run through it more than once
episodes = list(PodcastEpisode.sort_by_pubdate(e for e in episodes if \
e.was_downloaded(and_exists=True) or self.streaming_possible()))
try:
self.playback_episodes_for_real(episodes)
except Exception, e:
log('Error in playback!', sender=self, traceback=True)
if gpodder.ui.desktop:
self.show_message(_('Please check your media player settings in the preferences dialog.'), \
_('Error opening player'), widget=self.toolPreferences)
else:
self.show_message(_('Please check your media player settings in the preferences dialog.'))
channel_urls = set()
episode_urls = set()
for episode in episodes:
channel_urls.add(episode.channel.url)
episode_urls.add(episode.url)
self.update_episode_list_icons(episode_urls)
self.update_podcast_list_model(channel_urls)
def play_or_download(self):
if not gpodder.ui.fremantle:
if self.wNotebook.get_current_page() > 0:
if gpodder.ui.desktop:
self.toolCancel.set_sensitive(True)
return
if self.currently_updating:
return (False, False, False, False, False, False)
( can_play, can_download, can_transfer, can_cancel, can_delete ) = (False,)*5
( is_played, is_locked ) = (False,)*2
open_instead_of_play = False
selection = self.treeAvailable.get_selection()
if selection.count_selected_rows() > 0:
(model, paths) = selection.get_selected_rows()
for path in paths:
try:
episode = model.get_value(model.get_iter(path), EpisodeListModel.C_EPISODE)
except TypeError, te:
log('Invalid episode at path %s', str(path), sender=self)
continue
if episode.file_type() not in ('audio', 'video'):
open_instead_of_play = True
if episode.was_downloaded():
can_play = episode.was_downloaded(and_exists=True)
is_played = episode.is_played
is_locked = episode.is_locked
if not can_play:
can_download = episode.url != ''
else:
if self.episode_is_downloading(episode):
can_cancel = True
else:
can_download = episode.url != ''
can_download = can_download and not can_cancel
can_play = self.streaming_possible() or (can_play and not can_cancel and not can_download)
can_transfer = can_play and self.config.device_type != 'none' and not can_cancel and not can_download and not open_instead_of_play
can_delete = not can_cancel
if gpodder.ui.desktop:
if open_instead_of_play:
self.toolPlay.set_stock_id(gtk.STOCK_OPEN)
else:
self.toolPlay.set_stock_id(gtk.STOCK_MEDIA_PLAY)
self.toolPlay.set_sensitive( can_play)
self.toolDownload.set_sensitive( can_download)
self.toolTransfer.set_sensitive( can_transfer)
self.toolCancel.set_sensitive( can_cancel)
if not gpodder.ui.fremantle:
self.item_cancel_download.set_sensitive(can_cancel)
self.itemDownloadSelected.set_sensitive(can_download)
self.itemOpenSelected.set_sensitive(can_play)
self.itemPlaySelected.set_sensitive(can_play)
self.itemDeleteSelected.set_sensitive(can_delete)
self.item_toggle_played.set_sensitive(can_play)
self.item_toggle_lock.set_sensitive(can_play)
self.itemOpenSelected.set_visible(open_instead_of_play)
self.itemPlaySelected.set_visible(not open_instead_of_play)
return (can_play, can_download, can_transfer, can_cancel, can_delete, open_instead_of_play)
def on_cbMaxDownloads_toggled(self, widget, *args):
self.spinMaxDownloads.set_sensitive(self.cbMaxDownloads.get_active())
def on_cbLimitDownloads_toggled(self, widget, *args):
self.spinLimitDownloads.set_sensitive(self.cbLimitDownloads.get_active())
def episode_new_status_changed(self, urls):
self.update_podcast_list_model()
self.update_episode_list_icons(urls)
def update_podcast_list_model(self, urls=None, selected=False, select_url=None):
"""Update the podcast list treeview model
If urls is given, it should list the URLs of each
podcast that has to be updated in the list.
If selected is True, only update the model contents
for the currently-selected podcast - nothing more.
The caller can optionally specify "select_url",
which is the URL of the podcast that is to be
selected in the list after the update is complete.
This only works if the podcast list has to be
reloaded; i.e. something has been added or removed
since the last update of the podcast list).
"""
selection = self.treeChannels.get_selection()
model, iter = selection.get_selected()
if not self.channel_list_changed:
# Update "all episodes" view in any case (if enabled)
self.podcast_list_model.update_channel_proxies(self.config)
if selected:
print "selected"
# very cheap! only update selected channel
if iter is not None:
# If we have selected the "all episodes" view, we have
# to update all channels for selected episodes:
if self.podcast_list_model.iter_is_proxy_row(self.config,iter):
urls = self.get_podcast_urls_from_selected_episodes()
self.podcast_list_model.update_by_urls(urls)
else:
# Otherwise just update the selected row (a podcast)
self.podcast_list_model.update_by_filter_iter(iter)
elif not self.channel_list_changed:
print "not channel_list_changed"
# we can keep the model, but have to update some
if urls is None:
# still cheaper than reloading the whole list
self.podcast_list_model.update_all()
else:
# ok, we got a bunch of urls to update
self.podcast_list_model.update_by_urls(urls)
else:
print "channel_list_changed"
if model and iter and select_url is None:
# Get the URL of the currently-selected podcast
select_url = model.get_value(iter, PodcastListModel.C_URL)
# Update the podcast list model with new channels
self.podcast_list_model.set_channels(self.db, self.config, self.channels)
try:
selected_iter = model.get_iter_first()
# Find the previously-selected URL in the new
# model if we have an URL (else select first)
if select_url is not None:
pos = model.get_iter_first()
while pos is not None:
url = model.get_value(pos, PodcastListModel.C_URL)
if url == select_url:
selected_iter = pos
break
pos = model.iter_next(pos)
if not gpodder.ui.maemo:
if selected_iter is not None:
print "selecting"
selection.select_iter(selected_iter)
self.on_treeChannels_cursor_changed(self.treeChannels)
except:
log('Cannot select podcast in list', traceback=True, sender=self)
self.channel_list_changed = False
def episode_is_downloading(self, episode):
"""Returns True if the given episode is being downloaded at the moment"""
if episode is None:
return False
return episode.url in (task.url for task in self.download_tasks_seen if task.status in (task.DOWNLOADING, task.QUEUED, task.PAUSED))
def on_episode_list_filter_changed(self, has_episodes):
if gpodder.ui.fremantle:
if has_episodes:
self.episodes_window.empty_label.hide()
self.episodes_window.pannablearea.show()
else:
if self.config.episode_list_view_mode != \
EpisodeListModel.VIEW_ALL:
text = _('No episodes in current view')
else:
text = _('No episodes available')
self.episodes_window.empty_label.set_text(text)
self.episodes_window.pannablearea.hide()
self.episodes_window.empty_label.show()
def update_episode_list_model(self):
if self.channels and self.active_channel is not None:
if gpodder.ui.fremantle:
hildon.hildon_gtk_window_set_progress_indicator(self.episodes_window.main_window, True)
self.currently_updating = True
self.episode_list_model.clear()
if gpodder.ui.fremantle:
self.episodes_window.pannablearea.hide()
self.episodes_window.empty_label.set_text(_('Loading episodes'))
self.episodes_window.empty_label.show()
def update():
additional_args = (self.episode_is_downloading, \
self.config.episode_list_descriptions and gpodder.ui.desktop, \
self.config.episode_list_thumbnails and gpodder.ui.desktop)
self.episode_list_model.replace_from_channel(self.active_channel, *additional_args)
self.treeAvailable.get_selection().unselect_all()
self.treeAvailable.scroll_to_point(0, 0)
self.currently_updating = False
self.play_or_download()
if gpodder.ui.fremantle:
hildon.hildon_gtk_window_set_progress_indicator(\
self.episodes_window.main_window, False)
util.idle_add(update)
else:
self.episode_list_model.clear()
@dbus.service.method(gpodder.dbus_interface)
def offer_new_episodes(self, channels=None):
if gpodder.ui.fremantle:
# Assume that when this function is called that the
# notification is not shown anymore (Maemo bug 11345)
self._fremantle_notification_visible = False
new_episodes = self.get_new_episodes(channels)
if new_episodes:
self.new_episodes_show(new_episodes)
return True
return False
def add_podcast_list(self, urls, auth_tokens=None):
"""Subscribe to a list of podcast given their URLs
If auth_tokens is given, it should be a dictionary
mapping URLs to (username, password) tuples."""
if auth_tokens is None:
auth_tokens = {}
# Sort and split the URL list into five buckets
queued, failed, existing, worked, authreq = [], [], [], [], []
for input_url in urls:
url = util.normalize_feed_url(input_url)
if url is None:
# Fail this one because the URL is not valid
failed.append(input_url)
elif self.podcast_list_model.get_filter_path_from_url(url) is not None:
# A podcast already exists in the list for this URL
existing.append(url)
else:
# This URL has survived the first round - queue for add
queued.append(url)
if url != input_url and input_url in auth_tokens:
auth_tokens[url] = auth_tokens[input_url]
error_messages = {}
redirections = {}
progress = ProgressIndicator(_('Adding podcasts'), \
_('Please wait while episode information is downloaded.'), \
parent=self.get_dialog_parent())
def on_after_update():
progress.on_finished()
# Report already-existing subscriptions to the user
if existing:
title = _('Existing subscriptions skipped')
message = _('You are already subscribed to these podcasts:') \
+ '\n\n' + '\n'.join(saxutils.escape(url) for url in existing)
self.show_message(message, title, widget=self.treeChannels)
# Report subscriptions that require authentication
if authreq:
retry_podcasts = {}
for url in authreq:
title = _('Podcast requires authentication')
message = _('Please login to %s:') % (saxutils.escape(url),)
success, auth_tokens = self.show_login_dialog(title, message)
if success:
retry_podcasts[url] = auth_tokens
else:
# Stop asking the user for more login data
retry_podcasts = {}
for url in authreq:
error_messages[url] = _('Authentication failed')
failed.append(url)
break
# If we have authentication data to retry, do so here
if retry_podcasts:
self.add_podcast_list(retry_podcasts.keys(), retry_podcasts)
# Report website redirections
for url in redirections:
title = _('Website redirection detected')
message = _('The URL %(url)s redirects to %(target)s.') \
+ '\n\n' + _('Do you want to visit the website now?')
message = message % {'url': url, 'target': redirections[url]}
if self.show_confirmation(message, title):
util.open_website(url)
else:
break
# Report failed subscriptions to the user
if failed:
title = _('Could not add some podcasts')
message = _('Some podcasts could not be added to your list:') \
+ '\n\n' + '\n'.join(saxutils.escape('%s: %s' % (url, \
error_messages.get(url, _('Unknown')))) for url in failed)
self.show_message(message, title, important=True)
# Upload subscription changes to gpodder.net
self.mygpo_client.on_subscribe(worked)
# If at least one podcast has been added, save and update all
if self.channel_list_changed:
# Fix URLs if mygpo has rewritten them
self.rewrite_urls_mygpo()
self.save_channels_opml()
# If only one podcast was added, select it after the update
if len(worked) == 1:
url = worked[0]
else:
url = None
# Update the list of subscribed podcasts
self.update_feed_cache(force_update=False, select_url_afterwards=url)
self.update_podcasts_tab()
# Offer to download new episodes
episodes = []
for podcast in self.channels:
if podcast.url in worked:
episodes.extend(podcast.get_all_episodes())
#omit episodes without downloads
episodes = [e for e in episodes if e.url != '']
if episodes:
episodes = list(PodcastEpisode.sort_by_pubdate(episodes, \
reverse=True))
self.new_episodes_show(episodes, \
selected=[e.check_is_new() for e in episodes])
def thread_proc():
# After the initial sorting and splitting, try all queued podcasts
length = len(queued)
for index, url in enumerate(queued):
progress.on_progress(float(index)/float(length))
progress.on_message(url)
log('QUEUE RUNNER: %s', url, sender=self)
try:
# The URL is valid and does not exist already - subscribe!
channel = PodcastChannel.load(self.db, url=url, create=True, \
authentication_tokens=auth_tokens.get(url, None), \
max_episodes=self.config.max_episodes_per_feed, \
download_dir=self.config.download_dir, \
allow_empty_feeds=self.config.allow_empty_feeds, \
mimetype_prefs=self.config.mimetype_prefs)
try:
username, password = util.username_password_from_url(url)
except ValueError, ve:
username, password = (None, None)
if username is not None and channel.username is None and \
password is not None and channel.password is None:
channel.username = username
channel.password = password
channel.save()
self._update_cover(channel)
except feedcore.AuthenticationRequired:
if url in auth_tokens:
# Fail for wrong authentication data
error_messages[url] = _('Authentication failed')
failed.append(url)
else:
# Queue for login dialog later
authreq.append(url)
continue
except feedcore.WifiLogin, error:
redirections[url] = error.data
failed.append(url)
error_messages[url] = _('Redirection detected')
continue
except Exception, e:
log('Subscription error: %s', e, traceback=True, sender=self)
error_messages[url] = str(e)
failed.append(url)
continue
assert channel is not None
worked.append(channel.url)
self.channels.append(channel)
self.channel_list_changed = True
util.idle_add(on_after_update)
threading.Thread(target=thread_proc).start()
def save_channels_opml(self):
exporter = opml.Exporter(gpodder.subscription_file)
return exporter.write(self.channels)
def find_episode(self, podcast_url, episode_url):
"""Find an episode given its podcast and episode URL
The function will return a PodcastEpisode object if
the episode is found, or None if it's not found.
"""
for podcast in self.channels:
if podcast_url == podcast.url:
for episode in podcast.get_all_episodes():
if episode_url == episode.url:
return episode
return None
def process_received_episode_actions(self, updated_urls):
"""Process/merge episode actions from gpodder.net
This function will merge all changes received from
the server to the local database and update the
status of the affected episodes as necessary.
"""
indicator = ProgressIndicator(_('Merging episode actions'), \
_('Episode actions from gpodder.net are merged.'), \
False, self.get_dialog_parent())
for idx, action in enumerate(self.mygpo_client.get_episode_actions(updated_urls)):
if action.action == 'play':
episode = self.find_episode(action.podcast_url, \
action.episode_url)
if episode is not None:
log('Play action for %s', episode.url, sender=self)
episode.mark(is_played=True)
if action.timestamp > episode.current_position_updated and \
action.position is not None:
log('Updating position for %s', episode.url, sender=self)
episode.current_position = action.position
episode.current_position_updated = action.timestamp
if action.total:
log('Updating total time for %s', episode.url, sender=self)
episode.total_time = action.total
episode.save()
elif action.action == 'delete':
episode = self.find_episode(action.podcast_url, \
action.episode_url)
if episode is not None:
if not episode.was_downloaded(and_exists=True):
# Set the episode to a "deleted" state
log('Marking as deleted: %s', episode.url, sender=self)
episode.delete_from_disk()
episode.save()
indicator.on_message(N_('%(count)d action processed', '%(count)d actions processed', idx) % {'count':idx})
gtk.main_iteration(False)
indicator.on_finished()
self.db.commit()
def update_feed_cache_finish_callback(self, updated_urls=None, select_url_afterwards=None):
print("update_feed_cache_finish_callback(%s,%s)" % (updated_urls,select_url_afterwards))
self.db.commit()
self.updating_feed_cache = False
self.channels = PodcastChannel.load_from_db(self.db, self.config.download_dir)
# Process received episode actions for all updated URLs
self.process_received_episode_actions(updated_urls)
self.channel_list_changed = True
self.update_podcast_list_model(select_url=select_url_afterwards)
# Only search for new episodes in podcasts that have been
# updated, not in other podcasts (for single-feed updates)
episodes = self.get_new_episodes([c for c in self.channels if c.url in updated_urls])
real_new_episode_count = len(episodes)
print("there are %i new episodes" % real_new_episode_count)
#only consider episodes with downloads
episodes = [e for e in episodes if e.url != '']
if gpodder.ui.fremantle:
self.fancy_progress_bar.hide()
self.button_subscribe.set_sensitive(True)
self.button_refresh.set_sensitive(True)
hildon.hildon_gtk_window_set_progress_indicator(self.main_window, False)
hildon.hildon_gtk_window_set_progress_indicator(self.episodes_window.main_window, False)
self.update_podcasts_tab()
self.update_episode_list_model()
if self.feed_cache_update_cancelled:
return
def application_in_foreground():
try:
return any(w.get_property('is-topmost') for w in hildon.WindowStack.get_default().get_windows())
except Exception, e:
log('Could not determine is-topmost', traceback=True)
# When in doubt, assume not in foreground
return False
if episodes:
if self.config.auto_download == 'quiet' and not self.config.auto_update_feeds:
# New episodes found, but we should do nothing
self.show_message(_('New episodes are available.'))
elif self.config.auto_download == 'always' or \
(self.config.auto_download == 'wifi' and \
self.network_manager.connection_is_wlan()):
count = len(episodes)
title = N_('Downloading %(count)d new episode.', 'Downloading %(count)d new episodes.', count) % {'count':count}
self.show_message(title)
self.download_episode_list(episodes)
elif self.config.auto_download == 'queue':
self.show_message(_('New episodes have been added to the download list.'))
self.download_episode_list_paused(episodes)
elif application_in_foreground():
if not self._fremantle_notification_visible:
self.new_episodes_show(episodes)
elif not self._fremantle_notification_visible:
try:
import pynotify
pynotify.init('gPodder')
n = pynotify.Notification('gPodder', _('New episodes available'), 'gpodder')
n.set_urgency(pynotify.URGENCY_CRITICAL)
n.set_hint('dbus-callback-default', ' '.join([
gpodder.dbus_bus_name,
gpodder.dbus_gui_object_path,
gpodder.dbus_interface,
'offer_new_episodes',
]))
n.set_category('gpodder-new-episodes')
n.show()
self._fremantle_notification_visible = True
except Exception, e:
log('Error: %s', str(e), sender=self, traceback=True)
self.new_episodes_show(episodes)
self._fremantle_notification_visible = False
elif not self.config.auto_update_feeds:
self.show_message(_('No new episodes. Please check for new episodes later.'))
return
if self.tray_icon:
self.tray_icon.set_status()
if self.feed_cache_update_cancelled:
# The user decided to abort the feed update
self.show_update_feeds_buttons()
elif not episodes:
# Nothing new here - but inform the user
self.pbFeedUpdate.set_fraction(1.0)
if real_new_episode_count == 0:
self.pbFeedUpdate.set_text(_('No new episodes'))
else:
message = N_('%(count)d new episode available', '%(count)d new episodes available', real_new_episode_count) % {'count':real_new_episode_count}
self.pbFeedUpdate.set_text(message)
self.feed_cache_update_cancelled = True
self.btnCancelFeedUpdate.show()
self.btnCancelFeedUpdate.set_sensitive(True)
self.itemUpdate.set_sensitive(True)
if gpodder.ui.maemo:
# btnCancelFeedUpdate is a ToolButton on Maemo
self.btnCancelFeedUpdate.set_stock_id(gtk.STOCK_APPLY)
else:
# btnCancelFeedUpdate is a normal gtk.Button
self.btnCancelFeedUpdate.set_image(gtk.image_new_from_stock(gtk.STOCK_APPLY, gtk.ICON_SIZE_BUTTON))
else:
count = len(episodes)
# New episodes are available
self.pbFeedUpdate.set_fraction(1.0)
# Are we minimized and should we auto download?
if (self.is_iconified() and (self.config.auto_download == 'minimized')) or (self.config.auto_download == 'always'):
self.download_episode_list(episodes)
title = N_('Downloading %(count)d new episode.', 'Downloading %(count)d new episodes.', count) % {'count':count}
self.show_message(title, _('New episodes available'), widget=self.labelDownloads)
self.show_update_feeds_buttons()
elif self.config.auto_download == 'queue':
self.download_episode_list_paused(episodes)
title = N_('%(count)d new episode added to download list.', '%(count)d new episodes added to download list.', count) % {'count':count}
self.show_message(title, _('New episodes available'), widget=self.labelDownloads)
self.show_update_feeds_buttons()
else:
self.show_update_feeds_buttons()
# New episodes are available and we are not minimized
if not self.config.do_not_show_new_episodes_dialog:
self.new_episodes_show(episodes, notification=True)
else:
message = N_('%(count)d new episode available', '%(count)d new episodes available', count) % {'count':count}
self.pbFeedUpdate.set_text(message)
def _update_cover(self, channel):
if channel is not None and not os.path.exists(channel.cover_file) and channel.image:
self.cover_downloader.request_cover(channel)
def update_one_feed_cache_proc(self, queue,total):
"""worker thread for updating feeds.
It will grab channels from the queue and
acknowledge them using Queue.task_done()
so that Queue.join() will succeed when the Queue
is empty and all the feeds are refreshed.
total is used to update the status
"""
while not queue.empty():
try:
# get a channel to update
# get() will fail after 1s if the queue is empy
# (another thread stole the last channel since
# empty() was called).
# the Empty exception is caught at the end
channel = queue.get(True,1)
# when update is cancelled, the channels are still
# dequeued to allow all threads to end
if not self.feed_cache_update_cancelled:
try:
channel.update(max_episodes=self.config.max_episodes_per_feed, \
mimetype_prefs=self.config.mimetype_prefs)
self._update_cover(channel)
except Exception, e:
d = {'url': saxutils.escape(channel.url), 'message': saxutils.escape(str(e))}
if d['message']:
message = _('Error while updating %(url)s: %(message)s')
else:
message = _('The feed at %(url)s could not be updated.')
self.notification(message % d, _('Error while updating feed'), widget=self.treeChannels)
log('Error: %s', str(e), sender=self, traceback=True)
# By the time we get here the update may have already been cancelled
if not self.feed_cache_update_cancelled:
log("Updated %s", channel.title, sender=self)
# must keep track somehow of the number
# of feeds already updated.
# this is stored in self.update_feed_cache_count
# and concurrent access is protected by
# self.update_feed_cache_lock
self.update_feed_cache_lock.acquire()
updated = self.update_feed_cache_count + 1
self.update_feed_cache_count = updated
self.update_feed_cache_lock.release()
# will update the ui later on
self.update_feed_cache_status(updated,total,channel)
# notify main thread of completion of the task
# (even if feed_cache_update_cancelled)
queue.task_done()
except Queue.Empty:
pass
log("Worker thread done: %s", threading.current_thread().name)
def update_feed_cache_proc(self, channels, select_url_afterwards):
"""update given channels in parallel.
will exit once all channels are updated or update is cancelled
"""
total = len(channels)
print("update_feed_cache_proc(%i)" % total)
self.update_feed_cache_lock = threading.Lock()
self.update_feed_cache_count = 0
queue = Queue.Queue(len(channels))
for channel in channels:
queue.put(channel)
# 4 concurrent feed updates
for i in range(4):
args = (queue,total)
t = threading.Thread(target=self.update_one_feed_cache_proc, args=args)
t.start()
# wait for all threads to be done
queue.join()
updated_urls = [c.url for c in channels]
util.idle_add(self.update_feed_cache_finish_callback, updated_urls, select_url_afterwards)
def update_feed_cache_status(self, updated, total, channel):
""" display some progress information ("updated XXX (N/total)")"""
# By the time we get here the update may have already been cancelled
if not self.feed_cache_update_cancelled:
def update_progress():
d = {'podcast': channel.title, 'position': updated+1, 'total': total}
progression = _('Updated %(podcast)s (%(position)d/%(total)d)') % d
self.pbFeedUpdate.set_text(progression)
if self.tray_icon:
self.tray_icon.set_status(self.tray_icon.STATUS_UPDATING_FEED_CACHE, progression)
self.pbFeedUpdate.set_fraction(float(updated+1)/float(total))
util.idle_add(update_progress)
def show_update_feeds_buttons(self):
# Make sure that the buttons for updating feeds
# appear - this should happen after a feed update
if gpodder.ui.maemo:
self.btnUpdateSelectedFeed.show()
self.toolFeedUpdateProgress.hide()
self.btnCancelFeedUpdate.hide()
self.btnCancelFeedUpdate.set_is_important(False)
self.btnCancelFeedUpdate.set_stock_id(gtk.STOCK_CLOSE)
self.toolbarSpacer.set_expand(True)
self.toolbarSpacer.set_draw(False)
else:
self.hboxUpdateFeeds.hide()
self.btnUpdateFeeds.show()
self.itemUpdate.set_sensitive(True)
self.itemUpdateChannel.set_sensitive(True)
def on_btnCancelFeedUpdate_clicked(self, widget):
if not self.feed_cache_update_cancelled:
self.pbFeedUpdate.set_text(_('Cancelling...'))
self.feed_cache_update_cancelled = True
if not gpodder.ui.fremantle:
self.btnCancelFeedUpdate.set_sensitive(False)
elif not gpodder.ui.fremantle:
self.show_update_feeds_buttons()
def update_feed_cache(self, channels=None, force_update=True, select_url_afterwards=None):
print("update_feed_cache(%i,%s,%s)" % (channels is not None,force_update,select_url_afterwards))
if self.updating_feed_cache:
if gpodder.ui.fremantle:
self.feed_cache_update_cancelled = True
return
if not force_update:
self.channels = PodcastChannel.load_from_db(self.db, self.config.download_dir)
self.channel_list_changed = True
self.update_podcast_list_model(select_url=select_url_afterwards)
return
# Fix URLs if mygpo has rewritten them
self.rewrite_urls_mygpo()
self.updating_feed_cache = True
if channels is None:
# Only update podcasts for which updates are enabled
channels = [c for c in self.channels if c.feed_update_enabled]
if gpodder.ui.fremantle:
hildon.hildon_gtk_window_set_progress_indicator(self.main_window, True)
hildon.hildon_gtk_window_set_progress_indicator(self.episodes_window.main_window, True)
self.fancy_progress_bar.show()
self.button_subscribe.set_sensitive(False)
self.button_refresh.set_sensitive(False)
self.feed_cache_update_cancelled = False
else:
self.itemUpdate.set_sensitive(False)
self.itemUpdateChannel.set_sensitive(False)
if self.tray_icon:
self.tray_icon.set_status(self.tray_icon.STATUS_UPDATING_FEED_CACHE)
self.feed_cache_update_cancelled = False
self.btnCancelFeedUpdate.show()
self.btnCancelFeedUpdate.set_sensitive(True)
if gpodder.ui.maemo:
self.toolbarSpacer.set_expand(False)
self.toolbarSpacer.set_draw(True)
self.btnUpdateSelectedFeed.hide()
self.toolFeedUpdateProgress.show_all()
else:
self.btnCancelFeedUpdate.set_image(gtk.image_new_from_stock(gtk.STOCK_STOP, gtk.ICON_SIZE_BUTTON))
self.hboxUpdateFeeds.show_all()
self.btnUpdateFeeds.hide()
if len(channels) == 1:
text = _('Updating "%s"...') % channels[0].title
else:
count = len(channels)
text = N_('Updating %(count)d feed...', 'Updating %(count)d feeds...', count) % {'count':count}
self.pbFeedUpdate.set_text(text)
self.pbFeedUpdate.set_fraction(0)
args = (channels, select_url_afterwards)
threading.Thread(target=self.update_feed_cache_proc, args=args).start()
def on_gPodder_delete_event(self, widget, *args):
"""Called when the GUI wants to close the window
Displays a confirmation dialog (and closes/hides gPodder)
"""
downloading = self.download_status_model.are_downloads_in_progress()
if downloading:
if gpodder.ui.fremantle:
self.close_gpodder()
elif gpodder.ui.diablo:
result = self.show_confirmation(_('Do you really want to quit gPodder now?'))
if result:
self.close_gpodder()
else:
return True
dialog = gtk.MessageDialog(self.gPodder, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_NONE)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
quit_button = dialog.add_button(gtk.STOCK_QUIT, gtk.RESPONSE_CLOSE)
title = _('Quit gPodder')
message = _('You are downloading episodes. You can resume downloads the next time you start gPodder. Do you want to quit now?')
dialog.set_title(title)
dialog.set_markup('<span weight="bold" size="larger">%s</span>\n\n%s'%(title, message))
quit_button.grab_focus()
result = dialog.run()
dialog.destroy()
if result == gtk.RESPONSE_CLOSE:
self.close_gpodder()
else:
self.close_gpodder()
return True
def close_gpodder(self):
""" clean everything and exit properly
"""
if self.channels:
if self.save_channels_opml():
pass # FIXME: Add mygpo synchronization here
else:
self.show_message(_('Please check your permissions and free disk space.'), _('Error saving podcast list'), important=True)
self.gPodder.hide()
if self.tray_icon is not None:
self.tray_icon.set_visible(False)
# Notify all tasks to to carry out any clean-up actions
self.download_status_model.tell_all_tasks_to_quit()
while gtk.events_pending():
gtk.main_iteration(False)
self.db.close()
self.quit()
#:sys.exit(0)
def get_expired_episodes(self):
for channel in self.channels:
for episode in channel.get_downloaded_episodes():
# Never consider locked episodes as old
if episode.is_locked:
continue
# Never consider fresh episodes as old
if episode.age_in_days() < self.config.episode_old_age:
continue
# Do not delete played episodes (except if configured)
if episode.is_played:
if not self.config.auto_remove_played_episodes:
continue
# Do not delete unplayed episodes (except if configured)
if not episode.is_played:
if not self.config.auto_remove_unplayed_episodes:
continue
yield episode
def delete_episode_list(self, episodes, confirm=True, skip_locked=True):
if not episodes:
return False
download_episodes = [e for e in episodes if e.url != '']
if skip_locked:
episodes = [e for e in episodes if not e.is_locked or e.url == '']
if not episodes:
title = _('Episodes are locked')
message = _('The selected episodes are locked. Please unlock the episodes that you want to delete before trying to delete them.')
self.notification(message, title, widget=self.treeAvailable)
return False
count = len(episodes)
title = N_('Delete %(count)d episode?', 'Delete %(count)d episodes?', count) % {'count':count}
message = _('Deleting episodes removes downloaded files.')
if gpodder.ui.fremantle:
message = '\n'.join([title, message])
#ask for confirmation only if episodes contain downloaded files
if confirm and download_episodes:
if not self.show_confirmation(message, title):
return False
progress = ProgressIndicator(_('Deleting episodes'), \
_('Please wait while episodes are deleted'), \
parent=self.get_dialog_parent())
def finish_deletion(episode_urls, channel_urls):
progress.on_finished()
# Episodes have been deleted - persist the database
self.db.commit()
self.update_episode_list_icons(episode_urls)
self.update_podcast_list_model(channel_urls)
self.play_or_download()
def thread_proc():
episode_urls = set()
channel_urls = set()
episodes_status_update = []
for idx, episode in enumerate(episodes):
progress.on_progress(float(idx)/float(len(episodes)))
if episode.is_locked and episode.url != '' and skip_locked:
log('Not deleting episode (is locked): %s', episode.title)
else:
log('Deleting episode: %s', episode.title)
progress.on_message(episode.title)
episode.delete_from_disk()
episode_urls.add(episode.url)
channel_urls.add(episode.channel.url)
episodes_status_update.append(episode)
# Tell the shownotes window that we have removed the episode
if self.episode_shownotes_window is not None and \
self.episode_shownotes_window.episode is not None and \
self.episode_shownotes_window.episode.url == episode.url:
util.idle_add(self.episode_shownotes_window._download_status_changed, None)
# Notify the web service about the status update + upload
self.mygpo_client.on_delete(episodes_status_update)
self.mygpo_client.flush()
util.idle_add(finish_deletion, episode_urls, channel_urls)
threading.Thread(target=thread_proc).start()
return True
def on_itemRemoveOldEpisodes_activate(self, widget):
self.show_delete_episodes_window()
def show_delete_episodes_window(self, channel=None):
"""Offer deletion of episodes
If channel is None, offer deletion of all episodes.
Otherwise only offer deletion of episodes in the channel.
"""
if gpodder.ui.maemo:
columns = (
('maemo_remove_markup', None, None, _('Episode')),
)
else:
columns = (
('title_markup', None, None, _('Episode')),
('filesize_prop', 'length', gobject.TYPE_INT, _('Size')),
('pubdate_prop', 'pubDate', gobject.TYPE_INT, _('Released')),
('played_prop', None, None, _('Status')),
('age_prop', 'age_int_prop', gobject.TYPE_INT, _('Downloaded')),
)
msg_older_than = N_('Select older than %(count)d day', 'Select older than %(count)d days', self.config.episode_old_age)
selection_buttons = {
_('Select played'): lambda episode: episode.is_played,
_('Select finished'): lambda episode: episode.is_finished(),
msg_older_than % {'count':self.config.episode_old_age}: lambda episode: episode.age_in_days() > self.config.episode_old_age,
}
instructions = _('Select the episodes you want to delete:')
if channel is None:
channels = self.channels
else:
channels = [channel]
episodes = []
for channel in channels:
for episode in channel.get_downloaded_episodes():
# Disallow deletion of locked episodes that still exist
if not episode.is_locked or not episode.file_exists():
episodes.append(episode)
selected = [e.is_played or not e.file_exists() for e in episodes]
gPodderEpisodeSelector(self.gPodder, title = _('Delete episodes'), instructions = instructions, \
episodes = episodes, selected = selected, columns = columns, \
stock_ok_button = gtk.STOCK_DELETE, callback = self.delete_episode_list, \
selection_buttons = selection_buttons, _config=self.config, \
show_episode_shownotes=self.show_episode_shownotes)
def on_selected_episodes_status_changed(self):
# The order of the updates here is important! When "All episodes" is
# selected, the update of the podcast list model depends on the episode
# list selection to determine which podcasts are affected. Updating
# the episode list could remove the selection if a filter is active.
self.update_podcast_list_model(selected=True)
self.update_episode_list_icons(selected=True)
self.db.commit()
def mark_selected_episodes_new(self):
for episode in self.get_selected_episodes():
episode.mark_new()
self.on_selected_episodes_status_changed()
def mark_selected_episodes_old(self):
for episode in self.get_selected_episodes():
episode.mark_old()
self.on_selected_episodes_status_changed()
def on_item_toggle_played_activate( self, widget, toggle = True, new_value = False):
for episode in self.get_selected_episodes():
if toggle:
episode.mark(is_played=not episode.is_played)
else:
episode.mark(is_played=new_value)
self.on_selected_episodes_status_changed()
def on_item_toggle_lock_activate(self, widget, toggle=True, new_value=False):
for episode in self.get_selected_episodes():
if toggle:
episode.mark(is_locked=not episode.is_locked)
else:
episode.mark(is_locked=new_value)
self.on_selected_episodes_status_changed()
def on_channel_toggle_lock_activate(self, widget, toggle=True, new_value=False):
if self.active_channel is None:
return
self.active_channel.channel_is_locked = not self.active_channel.channel_is_locked
self.active_channel.update_channel_lock()
for episode in self.active_channel.get_all_episodes():
episode.mark(is_locked=self.active_channel.channel_is_locked)
self.update_podcast_list_model(selected=True)
self.update_episode_list_icons(all=True)
def on_itemUpdateChannel_activate(self, widget=None):
if self.active_channel is None:
title = _('No podcast selected')
message = _('Please select a podcast in the podcasts list to update.')
self.show_message( message, title, widget=self.treeChannels)
return
# Dirty hack to check for "All episodes" (see gpodder.gtkui.model)
if getattr(self.active_channel, 'ALL_EPISODES_PROXY', False):
self.update_feed_cache()
else:
self.update_feed_cache(channels=[self.active_channel])
def on_itemUpdate_activate(self, widget=None):
# Check if we have outstanding subscribe/unsubscribe actions
if self.on_add_remove_podcasts_mygpo():
log('Update cancelled (received server changes)', sender=self)
return
if self.channels:
self.update_feed_cache()
else:
welcome_window = gPodderWelcome(self.main_window,
center_on_widget=self.main_window,
show_example_podcasts_callback=self.on_itemImportChannels_activate,
setup_my_gpodder_callback=self.on_download_subscriptions_from_mygpo)
result = welcome_window.main_window.run()
welcome_window.main_window.destroy()
if result == gPodderWelcome.RESPONSE_OPML:
self.on_itemImportChannels_activate(None)
elif result == gPodderWelcome.RESPONSE_MYGPO:
self.on_download_subscriptions_from_mygpo(None)
def download_episode_list_paused(self, episodes):
self.download_episode_list(episodes, True)
def download_episode_list(self, episodes, add_paused=False, force_start=False):
enable_update = False
for episode in episodes:
log('Downloading episode: %s', episode.title, sender = self)
if not episode.was_downloaded(and_exists=True):
task_exists = False
for task in self.download_tasks_seen:
if episode.url == task.url and task.status not in (task.DOWNLOADING, task.QUEUED):
self.download_queue_manager.add_task(task, force_start)
enable_update = True
task_exists = True
continue
if task_exists:
continue
try:
task = download.DownloadTask(episode, self.config)
except Exception, e:
d = {'episode': episode.title, 'message': str(e)}
message = _('Download error while downloading %(episode)s: %(message)s')
self.show_message(message % d, _('Download error'), important=True)
log('Download error while downloading %s', episode.title, sender=self, traceback=True)
continue
if add_paused:
task.status = task.PAUSED
else:
self.mygpo_client.on_download([task.episode])
self.download_queue_manager.add_task(task, force_start)
self.download_status_model.register_task(task)
enable_update = True
if enable_update:
self.enable_download_list_update()
# Flush updated episode status
self.mygpo_client.flush()
def cancel_task_list(self, tasks):
if not tasks:
return
for task in tasks:
if task.status in (task.QUEUED, task.DOWNLOADING):
task.status = task.CANCELLED
elif task.status == task.PAUSED:
task.status = task.CANCELLED
# Call run, so the partial file gets deleted
task.run()
self.update_episode_list_icons([task.url for task in tasks])
self.play_or_download()
# Update the tab title and downloads list
self.update_downloads_list()
def new_episodes_show(self, episodes, notification=False, selected=None):
if gpodder.ui.maemo:
columns = (
('maemo_markup', None, None, _('Episode')),
)
show_notification = notification
else:
columns = (
('title_markup', None, None, _('Episode')),
('filesize_prop', 'length', gobject.TYPE_INT, _('Size')),
('pubdate_prop', 'pubDate', gobject.TYPE_INT, _('Released')),
)
show_notification = False
instructions = _('Select the episodes you want to download:')
if self.new_episodes_window is not None:
self.new_episodes_window.main_window.destroy()
self.new_episodes_window = None
def download_episodes_callback(episodes):
self.new_episodes_window = None
self.download_episode_list(episodes)
if selected is None:
# Select all by default
selected = [True]*len(episodes)
self.new_episodes_window = gPodderEpisodeSelector(self.gPodder, \
title=_('New episodes available'), \
instructions=instructions, \
episodes=episodes, \
columns=columns, \
selected=selected, \
stock_ok_button = 'gpodder-download', \
callback=download_episodes_callback, \
remove_callback=lambda e: e.mark_old(), \
remove_action=_('Mark as old'), \
remove_finished=self.episode_new_status_changed, \
_config=self.config, \
show_notification=show_notification, \
show_episode_shownotes=self.show_episode_shownotes)
pass
def on_itemDownloadAllNew_activate(self, widget, *args):
if not self.offer_new_episodes():
self.show_message(_('Please check for new episodes later.'), \
_('No new episodes available'), widget=self.btnUpdateFeeds)
def get_new_episodes(self, channels=None):
if channels is None:
channels = self.channels
episodes = []
for channel in channels:
for episode in channel.get_new_episodes(downloading=self.episode_is_downloading):
episodes.append(episode)
return episodes
@dbus.service.method(gpodder.dbus_interface)
def start_device_synchronization(self):
"""Public D-Bus API for starting Device sync (Desktop only)
This method can be called to initiate a synchronization with
a configured protable media player. This only works for the
Desktop version of gPodder and does nothing on Maemo.
"""
if gpodder.ui.desktop:
self.on_sync_to_ipod_activate(None)
return True
return False
def on_sync_to_ipod_activate(self, widget, episodes=None, force_played=True):
self.sync_ui.on_synchronize_episodes(self.channels, episodes, force_played)
def commit_changes_to_database(self):
"""This will be called after the sync process is finished"""
self.db.commit()
def on_cleanup_ipod_activate(self, widget, *args):
self.sync_ui.on_cleanup_device()
def on_manage_device_playlist(self, widget):
self.sync_ui.on_manage_device_playlist()
def show_hide_tray_icon(self):
if self.config.display_tray_icon and have_trayicon and self.tray_icon is None:
self.tray_icon = GPodderStatusIcon(self, gpodder.icon_file, self.config)
elif not self.config.display_tray_icon and self.tray_icon:
self.tray_icon.set_visible(False)
del self.tray_icon
self.tray_icon = None
if self.tray_icon:
self.tray_icon.set_visible(True)
def on_itemShowAllEpisodes_activate(self, widget):
self.config.podcast_list_view_all = widget.get_active()
def on_itemShowNewEpisodes_activate(self, widget):
self.config.podcast_list_view_new = widget.get_active()
def on_itemShowToolbar_activate(self, widget):
self.config.show_toolbar = self.itemShowToolbar.get_active()
def on_itemShowDescription_activate(self, widget):
self.config.episode_list_descriptions = self.itemShowDescription.get_active()
def on_item_view_hide_boring_podcasts_toggled(self, toggleaction):
self.config.podcast_list_hide_boring = toggleaction.get_active()
if self.config.podcast_list_hide_boring:
self.podcast_list_model.set_view_mode(self.config.episode_list_view_mode)
else:
self.podcast_list_model.set_view_mode(-1)
def on_item_view_podcasts_changed(self, radioaction, current):
# Only on Fremantle
if current == self.item_view_podcasts_all:
self.podcast_list_model.set_view_mode(-1)
elif current == self.item_view_podcasts_downloaded:
self.podcast_list_model.set_view_mode(EpisodeListModel.VIEW_DOWNLOADED)
elif current == self.item_view_podcasts_unplayed:
self.podcast_list_model.set_view_mode(EpisodeListModel.VIEW_UNPLAYED)
self.config.podcast_list_view_mode = self.podcast_list_model.get_view_mode()
def on_item_view_episodes_changed(self, radioaction, current):
if current == self.item_view_episodes_all:
self.config.episode_list_view_mode = EpisodeListModel.VIEW_ALL
elif current == self.item_view_episodes_undeleted:
self.config.episode_list_view_mode = EpisodeListModel.VIEW_UNDELETED
elif current == self.item_view_episodes_downloaded:
self.config.episode_list_view_mode = EpisodeListModel.VIEW_DOWNLOADED
elif current == self.item_view_episodes_unplayed:
self.config.episode_list_view_mode = EpisodeListModel.VIEW_UNPLAYED
self.episode_list_model.set_view_mode(self.config.episode_list_view_mode)
if self.config.podcast_list_hide_boring and not gpodder.ui.fremantle:
self.podcast_list_model.set_view_mode(self.config.episode_list_view_mode)
def update_item_device( self):
if not gpodder.ui.fremantle:
if self.config.device_type != 'none':
self.itemDevice.set_visible(True)
self.itemDevice.label = self.get_device_name()
else:
self.itemDevice.set_visible(False)
def properties_closed( self):
self.preferences_dialog = None
self.show_hide_tray_icon()
self.update_item_device()
if gpodder.ui.maemo:
selection = self.treeAvailable.get_selection()
if self.config.maemo_enable_gestures or \
self.config.enable_fingerscroll:
selection.set_mode(gtk.SELECTION_SINGLE)
else:
selection.set_mode(gtk.SELECTION_MULTIPLE)
def on_itemPreferences_activate(self, widget, *args):
self.preferences_dialog = gPodderPreferences(self.main_window, \
_config=self.config, \
callback_finished=self.properties_closed, \
user_apps_reader=self.user_apps_reader, \
parent_window=self.main_window, \
mygpo_client=self.mygpo_client, \
on_send_full_subscriptions=self.on_send_full_subscriptions, \
on_itemExportChannels_activate=self.on_itemExportChannels_activate)
# Initial message to relayout window (in case it's opened in portrait mode
self.preferences_dialog.on_window_orientation_changed(self._last_orientation)
def on_itemDependencies_activate(self, widget):
gPodderDependencyManager(self.gPodder)
def on_goto_mygpo(self, widget):
self.mygpo_client.open_website()
def on_download_subscriptions_from_mygpo(self, action=None):
title = _('Login to gpodder.net')
message = _('Please login to download your subscriptions.')
success, (username, password) = self.show_login_dialog(title, message, \
self.config.mygpo_username, self.config.mygpo_password)
if not success:
return
self.config.mygpo_username = username
self.config.mygpo_password = password
dir = gPodderPodcastDirectory(self.gPodder, _config=self.config, \
custom_title=_('Subscriptions on gpodder.net'), \
add_urls_callback=self.add_podcast_list, \
hide_url_entry=True)
# TODO: Refactor this into "gpodder.my" or mygpoclient, so that
# we do not have to hardcode the URL here
OPML_URL = 'http://gpodder.net/subscriptions/%s.opml' % self.config.mygpo_username
url = util.url_add_authentication(OPML_URL, \
self.config.mygpo_username, \
self.config.mygpo_password)
dir.download_opml_file(url)
def on_mygpo_settings_activate(self, action=None):
# This dialog is only used for Maemo 4
if not gpodder.ui.diablo:
return
settings = MygPodderSettings(self.main_window, \
config=self.config, \
mygpo_client=self.mygpo_client, \
on_send_full_subscriptions=self.on_send_full_subscriptions)
def on_itemAddChannel_activate(self, widget=None):
gPodderAddPodcast(self.gPodder, \
add_urls_callback=self.add_podcast_list)
def on_itemEditChannel_activate(self, widget, *args):
if self.active_channel is None:
title = _('No podcast selected')
message = _('Please select a podcast in the podcasts list to edit.')
self.show_message( message, title, widget=self.treeChannels)
return
callback_closed = lambda: self.update_podcast_list_model(selected=True)
gPodderChannel(self.main_window, \
channel=self.active_channel, \
callback_closed=callback_closed, \
cover_downloader=self.cover_downloader)
def on_itemMassUnsubscribe_activate(self, item=None):
columns = (
('title', None, None, _('Podcast')),
)
# We're abusing the Episode Selector for selecting Podcasts here,
# but it works and looks good, so why not? -- thp
gPodderEpisodeSelector(self.main_window, \
title=_('Remove podcasts'), \
instructions=_('Select the podcast you want to remove.'), \
episodes=self.channels, \
columns=columns, \
size_attribute=None, \
stock_ok_button=_('Remove'), \
callback=self.remove_podcast_list, \
_config=self.config)
def remove_podcast_list(self, channels, confirm=True):
if not channels:
log('No podcasts selected for deletion', sender=self)
return
if len(channels) == 1:
title = _('Removing podcast')
info = _('Please wait while the podcast is removed')
message = _('Do you really want to remove this podcast and its episodes?')
else:
title = _('Removing podcasts')
info = _('Please wait while the podcasts are removed')
message = _('Do you really want to remove the selected podcasts and their episodes?')
if confirm and not self.show_confirmation(message, title):
return
progress = ProgressIndicator(title, info, parent=self.get_dialog_parent())
def finish_deletion(select_url):
# Upload subscription list changes to the web service
self.mygpo_client.on_unsubscribe([c.url for c in channels])
# Re-load the channels and select the desired new channel
self.update_feed_cache(force_update=False, select_url_afterwards=select_url)
progress.on_finished()
self.update_podcasts_tab()
def thread_proc():
select_url = None
for idx, channel in enumerate(channels):
# Update the UI for correct status messages
progress.on_progress(float(idx)/float(len(channels)))
progress.on_message(channel.title)
# Delete downloaded episodes
channel.remove_downloaded()
# cancel any active downloads from this channel
for episode in channel.get_all_episodes():
util.idle_add(self.download_status_model.cancel_by_url,
episode.url)
if len(channels) == 1:
# get the URL of the podcast we want to select next
if channel in self.channels:
position = self.channels.index(channel)
else:
position = -1
if position == len(self.channels)-1:
# this is the last podcast, so select the URL
# of the item before this one (i.e. the "new last")
select_url = self.channels[position-1].url
else:
# there is a podcast after the deleted one, so
# we simply select the one that comes after it
select_url = self.channels[position+1].url
# Remove the channel and clean the database entries
channel.delete()
self.channels.remove(channel)
# Clean up downloads and download directories
self.clean_up_downloads()
self.channel_list_changed = True
self.save_channels_opml()
# The remaining stuff is to be done in the GTK main thread
util.idle_add(finish_deletion, select_url)
threading.Thread(target=thread_proc).start()
def on_itemRemoveChannel_activate(self, widget, *args):
if self.active_channel is None:
title = _('No podcast selected')
message = _('Please select a podcast in the podcasts list to remove.')
self.show_message( message, title, widget=self.treeChannels)
return
self.remove_podcast_list([self.active_channel])
def get_opml_filter(self):
filter = gtk.FileFilter()
filter.add_pattern('*.opml')
filter.add_pattern('*.xml')
filter.set_name(_('OPML files')+' (*.opml, *.xml)')
return filter
def on_item_import_from_file_activate(self, widget, filename=None):
if filename is None:
if gpodder.ui.desktop or gpodder.ui.fremantle:
dlg = gtk.FileChooserDialog(title=_('Import from OPML'), \
parent=None, action=gtk.FILE_CHOOSER_ACTION_OPEN)
dlg.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dlg.add_button(gtk.STOCK_OPEN, gtk.RESPONSE_OK)
elif gpodder.ui.diablo:
dlg = hildon.FileChooserDialog(self.gPodder, gtk.FILE_CHOOSER_ACTION_OPEN)
dlg.set_filter(self.get_opml_filter())
response = dlg.run()
filename = None
if response == gtk.RESPONSE_OK:
filename = dlg.get_filename()
dlg.destroy()
if filename is not None:
dir = gPodderPodcastDirectory(self.gPodder, _config=self.config, \
custom_title=_('Import podcasts from OPML file'), \
add_urls_callback=self.add_podcast_list, \
hide_url_entry=True)
dir.download_opml_file(filename)
def on_itemExportChannels_activate(self, widget, *args):
if not self.channels:
title = _('Nothing to export')
message = _('Your list of podcast subscriptions is empty. Please subscribe to some podcasts first before trying to export your subscription list.')
self.show_message(message, title, widget=self.treeChannels)
return
if gpodder.ui.desktop:
dlg = gtk.FileChooserDialog(title=_('Export to OPML'), parent=self.gPodder, action=gtk.FILE_CHOOSER_ACTION_SAVE)
dlg.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dlg.add_button(gtk.STOCK_SAVE, gtk.RESPONSE_OK)
elif gpodder.ui.fremantle:
dlg = gobject.new(hildon.FileChooserDialog, \
action=gtk.FILE_CHOOSER_ACTION_SAVE)
dlg.set_title(_('Export to OPML'))
elif gpodder.ui.diablo:
dlg = hildon.FileChooserDialog(self.gPodder, gtk.FILE_CHOOSER_ACTION_SAVE)
dlg.set_filter(self.get_opml_filter())
response = dlg.run()
if response == gtk.RESPONSE_OK:
filename = dlg.get_filename()
dlg.destroy()
exporter = opml.Exporter( filename)
if filename is not None and exporter.write(self.channels):
count = len(self.channels)
title = N_('%(count)d subscription exported', '%(count)d subscriptions exported', count) % {'count':count}
self.show_message(_('Your podcast list has been successfully exported.'), title, widget=self.treeChannels)
else:
self.show_message( _('Could not export OPML to file. Please check your permissions.'), _('OPML export failed'), important=True)
else:
dlg.destroy()
def on_itemImportChannels_activate(self, widget, *args):
if gpodder.ui.fremantle:
gPodderPodcastDirectory.show_add_podcast_picker(self.main_window, \
self.config.toplist_url, \
self.config.opml_url, \
self.add_podcast_list, \
self.on_itemAddChannel_activate, \
self.on_download_subscriptions_from_mygpo, \
self.show_text_edit_dialog)
else:
dir = gPodderPodcastDirectory(self.main_window, _config=self.config, \
add_urls_callback=self.add_podcast_list)
util.idle_add(dir.download_opml_file, self.config.opml_url)
def on_homepage_activate(self, widget, *args):
util.open_website(gpodder.__url__)
def on_wiki_activate(self, widget, *args):
util.open_website('http://gpodder.org/wiki/User_Manual')
def on_bug_tracker_activate(self, widget, *args):
if gpodder.ui.maemo:
util.open_website('http://bugs.maemo.org/enter_bug.cgi?product=gPodder')
else:
util.open_website('https://bugs.gpodder.org/enter_bug.cgi?product=gPodder')
def on_item_support_activate(self, widget):
util.open_website('http://gpodder.org/donate')
def on_itemAbout_activate(self, widget, *args):
if gpodder.ui.fremantle:
from gpodder.gtkui.frmntl.about import HeAboutDialog
HeAboutDialog.present(self.main_window,
'gPodder',
'gpodder',
gpodder.__version__,
_('A podcast client with focus on usability'),
gpodder.__copyright__,
gpodder.__url__,
'http://bugs.maemo.org/enter_bug.cgi?product=gPodder',
'http://gpodder.org/donate')
return
dlg = gtk.AboutDialog()
dlg.set_transient_for(self.main_window)
dlg.set_name('gPodder')
dlg.set_version(gpodder.__version__)
dlg.set_copyright(gpodder.__copyright__)
dlg.set_comments(_('A podcast client with focus on usability'))
dlg.set_website(gpodder.__url__)
dlg.set_translator_credits( _('translator-credits'))
dlg.connect( 'response', lambda dlg, response: dlg.destroy())
if gpodder.ui.desktop:
# For the "GUI" version, we add some more
# items to the about dialog (credits and logo)
app_authors = [
_('Maintainer:'),
'Thomas Perl <thp.io>',
]
if os.path.exists(gpodder.credits_file):
credits = open(gpodder.credits_file).read().strip().split('\n')
app_authors += ['', _('Patches, bug reports and donations by:')]
app_authors += credits
dlg.set_authors(app_authors)
try:
dlg.set_logo(gtk.gdk.pixbuf_new_from_file(gpodder.icon_file))
except:
dlg.set_logo_icon_name('gpodder')
dlg.run()
def on_wNotebook_switch_page(self, widget, *args):
page_num = args[1]
if gpodder.ui.maemo:
self.tool_downloads.set_active(page_num == 1)
page = self.wNotebook.get_nth_page(page_num)
tab_label = self.wNotebook.get_tab_label(page).get_text()
if page_num == 0 and self.active_channel is not None:
self.set_title(self.active_channel.title)
else:
self.set_title(tab_label)
if page_num == 0:
self.play_or_download()
self.menuChannels.set_sensitive(True)
self.menuSubscriptions.set_sensitive(True)
# The message area in the downloads tab should be hidden
# when the user switches away from the downloads tab
if self.message_area is not None:
self.message_area.hide()
self.message_area = None
else:
self.menuChannels.set_sensitive(False)
self.menuSubscriptions.set_sensitive(False)
if gpodder.ui.desktop:
self.toolDownload.set_sensitive(False)
self.toolPlay.set_sensitive(False)
self.toolTransfer.set_sensitive(False)
self.toolCancel.set_sensitive(False)
def on_treeChannels_row_activated(self, widget, path, *args):
# double-click action of the podcast list or enter
self.treeChannels.set_cursor(path)
def on_treeChannels_cursor_changed(self, widget, *args):
( model, iter ) = self.treeChannels.get_selection().get_selected()
if model is not None and iter is not None:
old_active_channel = self.active_channel
self.active_channel = model.get_value(iter, PodcastListModel.C_CHANNEL)
if self.active_channel == old_active_channel:
return
if gpodder.ui.maemo:
self.set_title(self.active_channel.title)
# Dirty hack to check for "All episodes" (see gpodder.gtkui.model)
if getattr(self.active_channel, 'ALL_EPISODES_PROXY', False):
self.itemEditChannel.set_visible(False)
self.itemRemoveChannel.set_visible(False)
else:
self.itemEditChannel.set_visible(True)
self.itemRemoveChannel.set_visible(True)
else:
self.active_channel = None
self.itemEditChannel.set_visible(False)
self.itemRemoveChannel.set_visible(False)
self.update_episode_list_model()
def on_btnEditChannel_clicked(self, widget, *args):
self.on_itemEditChannel_activate( widget, args)
def get_podcast_urls_from_selected_episodes(self):
"""Get a set of podcast URLs based on the selected episodes"""
return set(episode.channel.url for episode in \
self.get_selected_episodes())
def get_selected_episodes(self):
"""Get a list of selected episodes from treeAvailable"""
selection = self.treeAvailable.get_selection()
model, paths = selection.get_selected_rows()
episodes = [model.get_value(model.get_iter(path), EpisodeListModel.C_EPISODE) for path in paths]
return episodes
def on_transfer_selected_episodes(self, widget):
self.on_sync_to_ipod_activate(widget, self.get_selected_episodes())
def on_playback_selected_episodes(self, widget):
self.playback_episodes(self.get_selected_episodes())
def on_shownotes_selected_episodes(self, widget):
episodes = self.get_selected_episodes()
if episodes:
episode = episodes.pop(0)
self.show_episode_shownotes(episode)
else:
self.show_message(_('Please select an episode from the episode list to display shownotes.'), _('No episode selected'), widget=self.treeAvailable)
def on_download_selected_episodes(self, widget):
episodes = self.get_selected_episodes()
self.download_episode_list(episodes)
self.update_episode_list_icons([episode.url for episode in episodes])
self.play_or_download()
def on_treeAvailable_selection_changed(self, widget):
"""selection changed handler for treeAvailable"""
# Only display the first episode
if widget.count_selected_rows() == 0:
self.clear_embedded_notes()
else:
e = self.get_selected_episodes()[0]
self.display_embedded_notes(e)
def on_treeAvailable_row_activated(self, widget, path, view_column):
"""Double-click/enter action handler for treeAvailable"""
# We should only have one one selected as it was double clicked!
e = self.get_selected_episodes()[0]
if (self.config.double_click_episode_action == 'download'):
# If the episode has already been downloaded and exists then play it
if e.was_downloaded(and_exists=True):
self.playback_episodes(self.get_selected_episodes())
# else download it if it is not already downloading
elif not self.episode_is_downloading(e):
self.download_episode_list([e])
self.update_episode_list_icons([e.url])
self.play_or_download()
elif (self.config.double_click_episode_action == 'stream'):
# If we happen to have downloaded this episode simple play it
if e.was_downloaded(and_exists=True):
self.playback_episodes(self.get_selected_episodes())
# else if streaming is possible stream it
elif self.streaming_possible():
self.playback_episodes(self.get_selected_episodes())
else:
log('Unable to stream episode - default media player selected!', sender=self, traceback=True)
self.show_message(_('Please check your media player settings in the preferences dialog.'), _('Unable to stream episode'), widget=self.toolPreferences)
else:
# default action is to display show notes
self.on_shownotes_selected_episodes(widget)
def show_episode_shownotes(self, episode):
if self.episode_shownotes_window is None:
log('First-time use of episode window --- creating', sender=self)
self.episode_shownotes_window = gPodderShownotes(self.gPodder, _config=self.config, \
_download_episode_list=self.download_episode_list, \
_playback_episodes=self.playback_episodes, \
_delete_episode_list=self.delete_episode_list, \
_episode_list_status_changed=self.episode_list_status_changed, \
_cancel_task_list=self.cancel_task_list, \
_episode_is_downloading=self.episode_is_downloading, \
_streaming_possible=self.streaming_possible())
self.episode_shownotes_window.show(episode)
if self.episode_is_downloading(episode):
self.update_downloads_list()
def restart_auto_update_timer(self):
if self._auto_update_timer_source_id is not None:
log('Removing existing auto update timer.', sender=self)
gobject.source_remove(self._auto_update_timer_source_id)
self._auto_update_timer_source_id = None
if self.config.auto_update_feeds and \
self.config.auto_update_frequency:
interval = 60*1000*self.config.auto_update_frequency
log('Setting up auto update timer with interval %d.', \
self.config.auto_update_frequency, sender=self)
self._auto_update_timer_source_id = gobject.timeout_add(\
interval, self._on_auto_update_timer)
def _on_auto_update_timer(self):
log('Auto update timer fired.', sender=self)
self.update_feed_cache(force_update=True)
# Ask web service for sub changes (if enabled)
self.mygpo_client.flush()
return True
def restart_read_timer(self,episode):
if self._read_timer_source_id is not None:
log('Removing existing read timer.', sender=self)
gobject.source_remove(self._read_timer_source_id)
self._read_timer_source_id = None
# was called only to clear timeout
if episode is None:
return
if episode.url == '':
if episode.state == gpodder.STATE_NORMAL\
and not episode.is_played:
print("new episode without enclosure, prepare mark as old")
def mark_old():
print("marking %s as old" % episode.title)
episode.mark_old()
self.on_selected_episodes_status_changed()
return False
interval = 1000*2
log('Setting up mark read timer with interval %ds.', \
interval, sender=self)
self._read_timer_source_id = gobject.timeout_add(\
interval, mark_old)
else:
print("episode %s with enclosure %s" % (episode.title,episode.url))
def on_treeDownloads_row_activated(self, widget, *args):
# Use the standard way of working on the treeview
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [(gtk.TreeRowReference(model, path), model.get_value(model.get_iter(path), 0)) for path in paths]
for tree_row_reference, task in selected_tasks:
if task.status in (task.DOWNLOADING, task.QUEUED):
task.status = task.PAUSED
elif task.status in (task.CANCELLED, task.PAUSED, task.FAILED):
self.download_queue_manager.add_task(task)
self.enable_download_list_update()
elif task.status == task.DONE:
model.remove(model.get_iter(tree_row_reference.get_path()))
self.play_or_download()
# Update the tab title and downloads list
self.update_downloads_list()
def on_item_cancel_download_activate(self, widget):
if self.wNotebook.get_current_page() == 0:
selection = self.treeAvailable.get_selection()
(model, paths) = selection.get_selected_rows()
urls = [model.get_value(model.get_iter(path), \
self.episode_list_model.C_URL) for path in paths]
selected_tasks = [task for task in self.download_tasks_seen \
if task.url in urls]
else:
selection = self.treeDownloads.get_selection()
(model, paths) = selection.get_selected_rows()
selected_tasks = [model.get_value(model.get_iter(path), \
self.download_status_model.C_TASK) for path in paths]
self.cancel_task_list(selected_tasks)
def on_btnCancelAll_clicked(self, widget, *args):
self.cancel_task_list(self.download_tasks_seen)
def on_btnDownloadedDelete_clicked(self, widget, *args):
episodes = self.get_selected_episodes()
if len(episodes) == 1:
self.delete_episode_list(episodes, skip_locked=False)
else:
self.delete_episode_list(episodes)
def on_key_press(self, widget, event):
# Allow tab switching with Ctrl + PgUp/PgDown
if event.state & gtk.gdk.CONTROL_MASK:
if event.keyval == gtk.keysyms.Page_Up:
self.wNotebook.prev_page()
return True
elif event.keyval == gtk.keysyms.Page_Down:
self.wNotebook.next_page()
return True
# After this code we only handle Maemo hardware keys,
# so if we are not a Maemo app, we don't do anything
if not gpodder.ui.maemo:
return False
diff = 0
if event.keyval == gtk.keysyms.F7: #plus
diff = 1
elif event.keyval == gtk.keysyms.F8: #minus
diff = -1
if diff != 0 and not self.currently_updating:
selection = self.treeChannels.get_selection()
(model, iter) = selection.get_selected()
new_path = ((model.get_path(iter)[0]+diff)%len(model),)
selection.select_path(new_path)
self.treeChannels.set_cursor(new_path)
return True
return False
def on_iconify(self):
if self.tray_icon:
self.gPodder.set_skip_taskbar_hint(False)
else:
self.gPodder.set_skip_taskbar_hint(False)
def on_uniconify(self):
if self.tray_icon:
self.gPodder.set_skip_taskbar_hint(False)
else:
self.gPodder.set_skip_taskbar_hint(False)
def uniconify_main_window(self):
# We need to hide and then show the window in WMs like Metacity
# or KWin4 to move the window to the active workspace
# (see http://gpodder.org/bug/1125)
self.gPodder.hide()
self.gPodder.show()
self.gPodder.present()
def iconify_main_window(self):
if not self.is_iconified():
self.gPodder.hide()
def update_podcasts_tab(self):
if gpodder.ui.fremantle:
return
self.label2.set_text(_('Podcasts'))
count = len(self.channels)
if count:
self.label2.set_text(self.label2.get_text() + ' (%d)' % count)
@dbus.service.method(gpodder.dbus_interface)
def show_gui_window(self):
parent = self.get_dialog_parent()
parent.present()
@dbus.service.method(gpodder.dbus_interface)
def subscribe_to_url(self, url):
gPodderAddPodcast(self.gPodder,
add_urls_callback=self.add_podcast_list,
preset_url=url)
@dbus.service.method(gpodder.dbus_interface)
def mark_episode_played(self, filename):
if filename is None:
return False
for channel in self.channels:
for episode in channel.get_all_episodes():
fn = episode.local_filename(create=False, check_only=True)
if fn == filename:
episode.mark(is_played=True)
self.db.commit()
self.update_episode_list_icons([episode.url])
self.update_podcast_list_model([episode.channel.url])
return True
return False
def main(options=None):
gobject.threads_init()
gobject.set_application_name('gPodder')
if gpodder.ui.maemo:
# Try to enable the custom icon theme for gPodder on Maemo
settings = gtk.settings_get_default()
settings.set_string_property('gtk-icon-theme-name', \
'gpodder', __file__)
# Extend the search path for the optified icon theme (Maemo 5)
icon_theme = gtk.icon_theme_get_default()
icon_theme.prepend_search_path('/opt/gpodder-icon-theme/')
# Add custom icons for the new Maemo 5 look :)
for id in ('audio', 'video', 'download', 'audio-locked', 'video-locked'):
filename = os.path.join(gpodder.images_folder, '%s.png' % id)
pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
gtk.icon_theme_add_builtin_icon('gpodder-%s' % id, 40, pixbuf)
gtk.window_set_default_icon_name('gpodder')
gtk.about_dialog_set_url_hook(lambda dlg, link, data: util.open_website(link), None)
try:
dbus_main_loop = dbus.glib.DBusGMainLoop(set_as_default=True)
gpodder.dbus_session_bus = dbus.SessionBus(dbus_main_loop)
bus_name = dbus.service.BusName(gpodder.dbus_bus_name, bus=gpodder.dbus_session_bus)
except dbus.exceptions.DBusException, dbe:
log('Warning: Cannot get "on the bus".', traceback=True)
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, \
gtk.BUTTONS_CLOSE, _('Cannot start gPodder'))
dlg.format_secondary_markup(_('D-Bus error: %s') % (str(dbe),))
dlg.set_title('gPodder')
dlg.run()
dlg.destroy()
sys.exit(0)
util.make_directory(gpodder.home)
gpodder.load_plugins()
config = UIConfig(gpodder.config_file)
# Load hook modules and install the hook manager globally
# if modules have been found an instantiated by the manager
user_hooks = hooks.HookManager()
if user_hooks.has_modules():
gpodder.user_hooks = user_hooks
if gpodder.ui.diablo:
# Detect changing of SD cards between mmc1/mmc2 if a gpodder
# folder exists there (allow moving "gpodder" between SD cards or USB)
# Also allow moving "gpodder" to home folder (e.g. rootfs on SD)
if not os.path.exists(config.download_dir):
log('Downloads might have been moved. Trying to locate them...')
for basedir in ['/media/mmc1', '/media/mmc2']+glob.glob('/media/usb/*')+['/home/user/MyDocs']:
dir = os.path.join(basedir, 'gpodder')
if os.path.exists(dir):
log('Downloads found in: %s', dir)
config.download_dir = dir
break
else:
log('Downloads NOT FOUND in %s', dir)
if config.enable_fingerscroll:
BuilderWidget.use_fingerscroll = True
config.mygpo_device_type = util.detect_device_type()
gp = gPodder(bus_name, config)
# Handle options
if options.subscribe:
util.idle_add(gp.subscribe_to_url, options.subscribe)
# mac OS X stuff :
# handle "subscribe to podcast" events from firefox
if platform.system() == 'Darwin':
from gpodder import gpodderosx
gpodderosx.register_handlers(gp)
# end mac OS X stuff
gp.run()
|
elelay/gPodderAsRSSReader
|
src/gpodder/gui.py
|
Python
|
gpl-3.0
| 207,780
|
[
"VisIt"
] |
573b6f37bf03de8af9ea222ef53f42884904ccf8011d5b353145c6a5ba8e9323
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import time
import itertools
import numpy as np
import mdtraj as md
from mdtraj.testing import eq, skipif, get_fn, assert_allclose
from mdtraj.geometry.distance import compute_distances, compute_displacements, find_closest_contact
from mdtraj.geometry.distance import _displacement_mic, _displacement
N_FRAMES = 20
N_ATOMS = 20
xyz = np.asarray(np.random.randn(N_FRAMES, N_ATOMS, 3), dtype=np.float32)
pairs = np.array(list(itertools.combinations(range(N_ATOMS), 2)), dtype=np.int32)
ptraj = md.Trajectory(xyz=xyz, topology=None)
ptraj.unitcell_vectors = np.ascontiguousarray(np.random.randn(N_FRAMES, 3, 3) + 2*np.eye(3,3), dtype=np.float32)
def test_generator():
pairs2 = itertools.combinations(range(N_ATOMS), 2)
a = compute_distances(ptraj, pairs)
b = compute_distances(ptraj, pairs2)
eq(a, b)
def test_0():
a = compute_distances(ptraj, pairs, periodic=False, opt=True)
b = compute_distances(ptraj, pairs, periodic=False, opt=False)
eq(a, b)
def test_1():
a = compute_displacements(ptraj, pairs, periodic=False, opt=True)
b = compute_displacements(ptraj, pairs, periodic=False, opt=False)
eq(a, b)
def test_2():
a = compute_distances(ptraj, pairs, periodic=False, opt=False)
b = compute_displacements(ptraj, pairs, periodic=False, opt=False)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_3():
a = compute_distances(ptraj, pairs, periodic=False, opt=True)
b = compute_displacements(ptraj, pairs, periodic=False, opt=True)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_0p():
a = compute_distances(ptraj, pairs, periodic=True, opt=True)
b = compute_distances(ptraj, pairs, periodic=True, opt=False)
eq(a, b, decimal=3)
def test_1p():
a = compute_displacements(ptraj, pairs, periodic=True, opt=True)
b = compute_displacements(ptraj, pairs, periodic=True, opt=False)
eq(a, b, decimal=3)
def test_2p():
a = compute_distances(ptraj, pairs, periodic=True, opt=False)
b = compute_displacements(ptraj, pairs, periodic=True, opt=False)
assert a.shape == (len(ptraj), len(pairs))
assert b.shape == (len(ptraj), len(pairs), 3), str(b.shape)
b = np.sqrt(np.sum(np.square(b), axis=2))
eq(a, b, decimal=5)
def test_3p():
a = compute_distances(ptraj, pairs, periodic=True, opt=True)
b = compute_displacements(ptraj, pairs, periodic=True, opt=True)
eq(a, np.sqrt(np.sum(np.square(b), axis=2)))
def test_4():
# using a really big box, we should get the same results with and without
# pbcs
box = np.array([[100, 0, 0], [0, 200, 0], [0, 0, 300]])
box = np.zeros((N_FRAMES, 3, 3)) + box #broadcast it out
a = _displacement_mic(xyz, pairs, box, False)
b = _displacement(xyz, pairs)
eq(a, b, decimal=3)
def test_5():
# simple wrap around along the z axis.
xyz = np.array([[[0.0, 0.0, 0.0], [0.0, 0.0, 2.2]]])
box = np.eye(3,3).reshape(1,3,3)
result = _displacement_mic(xyz, np.array([[0,1]]), box, True)
eq(result, np.array([[[0, 0, 0.2]]]))
def test_6():
ext_ref = np.array([17.4835, 22.2418, 24.2910, 22.5505, 12.8686, 22.1090,
7.4472, 22.4253, 19.8283, 20.6935]) / 10
_run_amber_traj('test_good.nc', ext_ref)
def test_7():
ext_ref = np.array([30.9184, 23.9040, 25.3869, 28.0060, 25.9704, 24.6836,
23.0508, 27.1983, 24.4954, 26.7448]) / 10
_run_amber_traj('test_bad.nc', ext_ref)
def _run_amber_traj(trajname, ext_ref):
# Test triclinic case where simple approach in Tuckerman text does not
# always work
traj = md.load(get_fn(trajname), top=get_fn('test.parm7'))
distopt = md.compute_distances(traj, [[0, 9999]], opt=True)
distslw = md.compute_distances(traj, [[0, 9999]], opt=False)
dispopt = md.compute_displacements(traj, [[0, 9999]], opt=True)
dispslw = md.compute_displacements(traj, [[0, 9999]], opt=False)
eq(distopt, distslw, decimal=5)
eq(dispopt, dispslw, decimal=5)
assert_allclose(distopt.flatten(), ext_ref, atol=2e-5)
# Make sure distances from displacements are the same
eq(np.sqrt((dispopt.squeeze()**2).sum(axis=1)), distopt.squeeze())
eq(np.sqrt((dispslw.squeeze()**2).sum(axis=1)), distslw.squeeze())
eq(dispopt, dispslw, decimal=5)
def test_closest_contact():
box_size = np.array([3.0, 4.0, 5.0])
traj = md.Trajectory(xyz=xyz*box_size, topology=None)
_verify_closest_contact(traj)
traj.unitcell_lengths = np.array([box_size for i in range(N_FRAMES)])
traj.unitcell_angles = np.array([[90.0, 90.0, 90.0] for i in range(N_FRAMES)])
_verify_closest_contact(traj)
traj.unitcell_angles = np.array([[80.0, 90.0, 100.0] for i in range(N_FRAMES)])
_verify_closest_contact(traj)
def _verify_closest_contact(traj):
group1 = np.array([i for i in range(N_ATOMS//2)], dtype=np.int)
group2 = np.array([i for i in range(N_ATOMS//2, N_ATOMS)], dtype=np.int)
contact = find_closest_contact(traj, group1, group2)
pairs = np.array([(i,j) for i in group1 for j in group2], dtype=np.int)
dists = md.compute_distances(traj, pairs, True)[0]
dists2 = md.compute_distances(traj, pairs, False)[0]
nearest = np.argmin(dists)
eq(float(dists[nearest]), contact[2], decimal=5)
assert((pairs[nearest,0] == contact[0] and pairs[nearest,1] == contact[1]) or (pairs[nearest,0] == contact[1] and pairs[nearest,1] == contact[0]))
def test_distance_nan():
xyz = np.array([[1,1,1], [2,1,1], [np.nan, np.nan, np.nan]]).reshape(1,3,3)
dists = md.compute_distances(md.Trajectory(xyz=xyz, topology=None), [[0,1]])
assert np.isfinite(dists).all()
def test_closest_contact_nan_pos():
box_size = np.array([3.0, 4.0, 5.0])
xyz = np.asarray(np.random.randn(2, 20, 3), dtype=np.float32)
xyz *= box_size
# Set the last frame to nan
xyz[-1] = np.nan
# Slice of the last frame, so nans should not cause troubles.
xyz = xyz[:-1]
traj = md.Trajectory(xyz=xyz, topology=None)
_verify_closest_contact(traj)
|
msultan/mdtraj
|
mdtraj/geometry/tests/test_distance.py
|
Python
|
lgpl-2.1
| 7,022
|
[
"MDTraj"
] |
6a981dc3b747cc3d8b0588f69c71a943f492cb6b70fc302c990e528641668593
|
#!/usr/bin/env python
# file exclude_seqs_by_blast.py
from __future__ import division
"""
A lightweight script for BLASTing one or more sequences against a number of BLAST databases, and returning FASTA files a) of the results that did match b) of the results that didn't match c) raw blast results and also d) returning a report containing the parameters used, which sequences were excluded and why.
"""
from os.path import join
from time import strftime, time
from skbio.parse.sequences import parse_fasta
from bfillings.blast import blast_seqs, Blastall, BlastResult
__author__ = "Jesse Zaneveld"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jesse Zaneveld", "Rob Knight", "Adam Robbins-Pianka"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Jesse Zaneveld"
__email__ = "zaneveld@gmail.com"
FORMAT_BAR = """------------------------------""" * 2
def blast_genome(seqs, blast_db, e_value, max_hits, word_size, working_dir,
blast_mat_root, extra_params=[], DEBUG=True):
"""Blast sequences against all genes in a genome
seqs -- input sequences as strings
blast_db -- path to blast database
e_value -- e_value (float)
max_hits -- maximum sequences detected by BLAST to show
word_size -- word size for initial BLAST screen.
blast_mat_root -- location of BLAST matrix files
extra_params -- additional paramters to pass to BLAST
DEBUG -- display verbose debugging outout
"""
# set up params to use with blastp or
params = {
# matrix
"-M": "BLOSUM62",
# max procs
"-a": "1",
# expectation
"-e": e_value,
# max seqs to show
"-b": max_hits,
# Word size
"-W": word_size,
# max one line descriptions
"-v": max_hits,
# tabular output
"-m": "9",
# program
"-p": "blastn"
}
params.update(extra_params)
output = blast_seqs(seqs,
Blastall,
blast_db=blast_db,
params=params,
WorkingDir=working_dir,
add_seq_names=False,
blast_mat_root=blast_mat_root)
raw_output = [x for x in output['StdOut']]
return raw_output
def find_homologs(query_file, subject_genome, e_value, max_hits,
working_dir, blast_mat_root, wordsize,
percent_aligned, extra_params={},
require_hit=False, DEBUG=True):
"""BLAST query_file against subject_genome
query_file -- .nuc file or other FASTA file to BLAST against all files in file_list
subject_genome -- path to a KEGG .nuc file or other FASTA formated file.
e-value -- e-value threshold for blasts
percent_aligned -- minumum percent alignment, between 0.0 and 1.0
max_hits,blast_mat_root,extra_params -- these are passed along to blastn
DEBUG -- if True, display debugging output
"""
start_time = time()
raw_blast_output = []
seqs = open(query_file, "U").readlines()
if DEBUG:
print "BLASTING %s vs. %s" % (query_file, subject_genome)
blast_db = subject_genome
raw_output_data = blast_genome(seqs,
blast_db, e_value,
max_hits, wordsize, working_dir,
blast_mat_root, extra_params,
DEBUG=DEBUG)
if DEBUG:
print "Length of raw BLAST results:", len(raw_output_data)
curr_blast_result = BlastResult(raw_output_data)
align_filter = make_percent_align_filter(percent_aligned)
# should a mismatch filter be added?
filtered_ids, removed_ids = query_ids_from_blast_result(curr_blast_result,
align_filter, DEBUG=DEBUG)
return raw_output_data, filtered_ids, removed_ids
def sequences_to_file(results, outfile_name):
"""Translate a generator of label,seq tuples to an output file """
f = open(outfile_name, 'w+')
for label, seq in results:
output_lines = []
output_lines.append(">%s\n" % label)
output_lines.append("%s\n" % seq)
f.writelines(output_lines)
f.close()
def no_filter(blast_subject_entry):
"""A placeholder filter function which always returns True"""
return True
def make_percent_align_filter(min_percent):
"""Return a filter function that filters BLAST results on % alignment
min_percent -- minimum percent match as a float between 0 and 1"""
min_percent = float(min_percent) * 100
def align_filter(blast_result):
if float(blast_result['% IDENTITY']) < min_percent:
return False
else:
return True
return align_filter
def check_align_percent_field(d):
"""Check for empty percent identity fields in a dict"""
if d['% IDENTITY']:
return True
else:
return False
def query_ids_from_blast_result(
blast_result, filter_fn=no_filter, DEBUG=False):
"""Returns a list of blast query ids, filtered by a given function.
--blast_result: BLAST result from BLAST app controller
--filter_fn: a function that, given a dict representing a BLAST result
returns True or False based on whether the result passes
some filter.
"""
ok_ids = []
removed_ids = []
for id in blast_result:
for entry in blast_result[id]:
for subentry in entry:
if not check_align_percent_field(subentry):
continue
if not filter_fn(subentry):
removed_ids.append(id)
continue
ok_ids.append(subentry['QUERY ID'])
ok_ids = set(ok_ids)
# Ensure query seqs with multiple BLAST hits, only some of which
# are filtered out, don't end up in removed_ids
removed_ids = set(removed_ids) - ok_ids
return ok_ids, removed_ids
def ids_from_fasta_lines(lines):
"""Extract ids from label lines"""
ids = []
for line in lines:
if not line.startswith(">"):
continue
id = id_from_fasta_label_line(line)
ids.append(id)
return ids
def id_from_fasta_label_line(line):
"Extract id from fasta label line"
id_field = line.split()[0]
id = id_field.strip(">")
return id
def seqs_from_file(ids, file_lines):
"""Extract labels and seqs from file"""
for label, seq in parse_fasta(file_lines):
if id_from_fasta_label_line(label) in ids:
yield label, seq
def compose_logfile_lines(start_time, db_format_time, blast_time, option_lines,
formatdb_cmd, blast_results, options, all_ids,
hit_ids, removed_hit_ids,
included_ids, DEBUG):
"""Compose lines for a logfile from data on analysis"""
log_lines = []
log_lines.append("Sequence exclusion analysis run on %s" % strftime("%c"))
log_lines.append(
"Formatting subject database took %2.f seconds" %
(db_format_time))
log_lines.append(
"BLAST search took %2.f minute(s)" %
((blast_time) / 60.0))
log_lines.append(
"Total analysis completed in %2.f minute(s)" %
((time() - start_time) / 60.0))
log_lines.append(FORMAT_BAR)
log_lines.append(
"| Options |")
log_lines.append(FORMAT_BAR)
log_lines.extend(option_lines)
log_lines.append("Subject database formatted with command: %s"
% formatdb_cmd)
log_lines.append(FORMAT_BAR)
log_lines.append(
"| Results |")
log_lines.append(FORMAT_BAR)
log_lines.append("BLAST results above e-value threshold:")
log_lines.append(
"\t".join(["Query id", "Subject id", "percent identity", "alignment length",
"mismatches", "gap openings", "q. start", "q. end", "s. start", "s. end", "e-value", "bit score"]))
for line in blast_results:
if line.startswith("#"):
continue
else:
log_lines.append(line)
log_lines.append(
"Hits matching e-value and percent alignment filter: %s" %
','.join(sorted(hit_ids)))
log_lines.append(FORMAT_BAR)
log_lines.append(
"| Summary |")
log_lines.append(FORMAT_BAR)
log_lines.append("Input query sequences: %i" % len(all_ids))
log_lines.append(
"Query hits from BLAST: %i" %
(len(hit_ids) + len(removed_hit_ids)))
log_lines.append(
"Query hits from BLAST lacking minimal percent alignment: %i" %
len(removed_hit_ids))
log_lines.append("Final hits: %i" % len(hit_ids))
log_lines.append("Output screened sequences: %i" % len(included_ids))
log_lines.append(FORMAT_BAR)
log_lines.append(
"| Output |")
log_lines.append(FORMAT_BAR)
log_lines.append(
"Writing excluded sequences (hits matching filters) to: %s" %
join(options.outputdir, "matching.fna"))
log_lines.append(
"Writing screened sequences (excluding hits matching filters) to: %s" %
join(options.outputdir, "non-matching.fna"))
log_lines.append(
"Writing raw BLAST results to: %s" %
join(options.outputdir, 'raw_blast_results.txt'))
# format for printing
revised_log_lines = []
for line in log_lines:
line = line + "\n"
revised_log_lines.append(line)
if DEBUG:
for line in log_lines:
print line
return revised_log_lines
def check_options(parser, options):
"""Check to insure required options have been supplied"""
if options.percent_aligned > 1.0:
parser.error(
"Please check -p option: should be between 0.0(0%) and 1.0(100%)")
if options.querydb is None:
parser.error(
"Please check -i option: must specify path to a FASTA file")
try:
f = open(options.querydb, 'r')
f.close()
except IOError:
parser.error(
"Please check -i option: cannot read from query FASTA filepath")
if options.subjectdb is None:
parser.error(
"Please check -d option: must specify path to a FASTA file")
try:
f = open(options.subjectdb, 'r')
f.close()
except IOError:
parser.error(
"Please check -d option: cannot read from subject FASTA filepath")
if options.outputdir is None:
parser.error(
"Please check -o option: must specify the output directory path")
def format_options_as_lines(options):
"""Format options as a string for log file"""
option_lines = []
option_fields = str(options).split(",")
for field in option_fields:
option_lines.append(str(field).strip("{").strip("}"))
return option_lines
def ids_to_seq_file(ids, infile, outfile, suffix=''):
"""Lookup FASTA recs for ids and record to file
ids -- list of ids to lookup seqs for in infile
infile -- path to FASTA file
outfile -- base path to which to write FASTA entries
with ids in supplied ids
suffix -- will be appended to outfile base path
"""
seqs = seqs_from_file(ids, open(infile).readlines())
out_path = outfile + suffix
sequences_to_file(seqs, out_path)
|
wasade/qiime
|
qiime/exclude_seqs_by_blast.py
|
Python
|
gpl-2.0
| 11,970
|
[
"BLAST"
] |
93f49b030712d14251a9919cf86b1b070dfb5cd5b23626e163705473f4e997bd
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Canonicalizes functions with multiple returns to use just one."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
# TODO(mdan): Move this logic into transformer_base.
class BodyVisitor(converter.Base):
"""Walks breadth- or depth-first the list-of-nodes bodies of AST nodes."""
def __init__(self, ctx, depth_first=False):
super(BodyVisitor, self).__init__(ctx)
self.depth_first = depth_first
self.changes_made = False
def visit_nodelist(self, nodelist):
for node in nodelist:
if isinstance(node, list):
node = self.visit_nodelist(node)
else:
node = self.generic_visit(node)
return nodelist
def visit_If(self, node):
if self.depth_first:
node = self.generic_visit(node)
node.body = self.visit_nodelist(node.body)
node.orelse = self.visit_nodelist(node.orelse)
if not self.depth_first:
node = self.generic_visit(node)
return node
def visit_For(self, node):
if self.depth_first:
node = self.generic_visit(node)
node.body = self.visit_nodelist(node.body)
node.orelse = self.visit_nodelist(node.orelse)
if not self.depth_first:
node = self.generic_visit(node)
return node
def visit_While(self, node):
if self.depth_first:
node = self.generic_visit(node)
node.body = self.visit_nodelist(node.body)
node.orelse = self.visit_nodelist(node.orelse)
if not self.depth_first:
node = self.generic_visit(node)
return node
def visit_Try(self, node):
if self.depth_first:
node = self.generic_visit(node)
node.body = self.visit_nodelist(node.body)
node.orelse = self.visit_nodelist(node.orelse)
node.finalbody = self.visit_nodelist(node.finalbody)
for i in range(len(node.handlers)):
node.handlers[i].body = self.visit_nodelist(node.handlers[i].body)
if not self.depth_first:
node = self.generic_visit(node)
return node
def visit_With(self, node):
if self.depth_first:
node = self.generic_visit(node)
node.body = self.visit_nodelist(node.body)
if not self.depth_first:
node = self.generic_visit(node)
return node
def visit_FunctionDef(self, node):
if self.depth_first:
node = self.generic_visit(node)
node.body = self.visit_nodelist(node.body)
self.generic_visit(node)
if not self.depth_first:
node = self.generic_visit(node)
return node
class FoldElse(BodyVisitor):
def visit_nodelist(self, nodelist):
for i in range(len(nodelist)):
node = nodelist[i]
if isinstance(node, gast.If):
true_branch_returns = isinstance(node.body[-1], gast.Return)
false_branch_returns = len(node.orelse) and isinstance(
node.orelse[-1], gast.Return)
# If the last node in the if body is a return,
# then every line after this if statement effectively
# belongs in the else.
if true_branch_returns and not false_branch_returns:
for j in range(i + 1, len(nodelist)):
nodelist[i].orelse.append(ast_util.copy_clean(nodelist[j]))
if nodelist[i + 1:]:
self.changes_made = True
return nodelist[:i + 1]
elif not true_branch_returns and false_branch_returns:
for j in range(i + 1, len(nodelist)):
nodelist[i].body.append(ast_util.copy_clean(nodelist[j]))
if nodelist[i + 1:]:
self.changes_made = True
return nodelist[:i + 1]
elif true_branch_returns and false_branch_returns:
if nodelist[i + 1:]:
raise ValueError(
'Unreachable code after conditional where both branches return.'
)
return nodelist
elif isinstance(node, gast.Return) and nodelist[i + 1:]:
raise ValueError(
'Cannot have statements after a return in the same basic block')
return nodelist
def contains_return(node):
for n in gast.walk(node):
if isinstance(n, gast.Return):
return True
return False
class LiftReturn(converter.Base):
"""Move return statements out of If and With blocks."""
def __init__(self, ctx):
super(LiftReturn, self).__init__(ctx)
self.changes_made = False
self.common_return_name = None
def visit_If(self, node):
# Depth-first traversal of if statements
node = self.generic_visit(node)
# We check if both branches return, and if so, lift the return out of the
# conditional. We don't enforce that the true and false branches either
# both return or both do not, because FoldElse might move a return
# into a branch after this transform completes. FoldElse and LiftReturn
# are alternately run until the code reaches a fixed point.
true_branch_returns = isinstance(node.body[-1], gast.Return)
false_branch_returns = len(node.orelse) and isinstance(
node.orelse[-1], gast.Return)
if true_branch_returns and false_branch_returns:
node.body[-1] = templates.replace(
'a = b', a=self.common_return_name, b=node.body[-1].value)[0]
node.orelse[-1] = templates.replace(
'a = b', a=self.common_return_name, b=node.orelse[-1].value)[0]
return_node = templates.replace('return a', a=self.common_return_name)[0]
self.changes_made = True
return [node, return_node]
else:
return node
def visit_With(self, node):
# Depth-first traversal of syntax
node = self.generic_visit(node)
# If the with statement returns, lift the return
if isinstance(node.body[-1], gast.Return):
node.body[-1] = templates.replace(
'a = b', a=self.common_return_name, b=node.body[-1].value)[0]
return_node = templates.replace('return a', a=self.common_return_name)[0]
node = self.generic_visit(node)
self.changes_made = True
return [node, return_node]
else:
return node
def visit_FunctionDef(self, node):
# Ensure we're doing depth-first traversal
last_return_name = self.common_return_name
body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
referenced_names = body_scope.referenced
self.common_return_name = self.ctx.namer.new_symbol('return_',
referenced_names)
node = self.generic_visit(node)
self.common_return_name = last_return_name
return node
class DetectReturnInUnsupportedControlFlow(gast.NodeVisitor):
"""Throws an error if code returns inside loops or try/except."""
# First, throw an error if we detect a return statement in a loop.
# TODO(alexbw): we need to learn to handle returns inside a loop,
# but don't currently have the TF constructs to do so (need something
# that looks vaguely like a goto).
def __init__(self):
self.cant_return = False
super(DetectReturnInUnsupportedControlFlow, self).__init__()
def visit_While(self, node):
self.cant_return = True
self.generic_visit(node)
self.cant_return = False
def visit_For(self, node):
self.cant_return = True
self.generic_visit(node)
self.cant_return = False
def visit_Try(self, node):
self.cant_return = True
self.generic_visit(node)
self.cant_return = False
def visit_Return(self, node):
if self.cant_return:
raise ValueError(
'`return` statements are not supported in loops. '
'Try assigning to a variable in the while loop, and returning '
'outside of the loop')
class DetectReturnInConditional(gast.NodeVisitor):
"""Assert that no return statements are present in conditionals."""
def __init__(self):
self.cant_return = False
super(DetectReturnInConditional, self).__init__()
def visit_If(self, node):
self.cant_return = True
self.generic_visit(node)
self.cant_return = False
def visit_Return(self, node):
if self.cant_return:
raise ValueError(
'After transforms, a conditional contained a `return `statement, '
'which is not allowed. This is a bug, and should not happen.')
class DetectReturnInFunctionDef(gast.NodeVisitor):
def visit_FunctionDef(self, node):
self.generic_visit(node)
if not contains_return(node):
raise ValueError(
'Each function definition should contain at least one return.')
def transform(node, ctx):
"""Ensure a function has only a single return.
This transforms an AST node with multiple returns successively into containing
only a single return node.
There are a few restrictions on what we can handle:
- An AST being transformed must contain at least one return.
- No returns allowed in loops. We have to know the type of the return value,
and we currently don't have either a type inference system to discover it,
nor do we have a mechanism for late type binding in TensorFlow.
- After all transformations are finished, a Return node is not allowed inside
control flow. If we were unable to move a return outside of control flow,
this is an error.
Args:
node: ast.AST
ctx: converter.EntityContext
Returns:
new_node: an AST with a single return value
Raises:
ValueError: if the AST is structured so that we can't perform the
transform.
"""
# Make sure that the function has at least one return statement
# TODO(alexbw): turning off this assertion for now --
# we need to not require this in e.g. class constructors.
# DetectReturnInFunctionDef().visit(node)
# Make sure there's no returns in unsupported locations (loops, try/except)
DetectReturnInUnsupportedControlFlow().visit(node)
while True:
# Try to lift all returns out of if statements and with blocks
lr = LiftReturn(ctx)
node = lr.visit(node)
changes_made = lr.changes_made
fe = FoldElse(ctx)
node = fe.visit(node)
changes_made = changes_made or fe.changes_made
if not changes_made:
break
# Make sure we've scrubbed all returns from conditionals
DetectReturnInConditional().visit(node)
return node
|
xodus7/tensorflow
|
tensorflow/python/autograph/converters/return_statements.py
|
Python
|
apache-2.0
| 11,029
|
[
"VisIt"
] |
e8ea2abdafcade9ffff580f2dd9f20746c3f297bf161c390be0f1ac3b103a1d0
|
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.io.netcdf import from_netcdf, to_netcdf
@pytest.mark.parametrize("include", ((), [], set(), None))
def test_include_keyword_is_empty(tmpdir, format, include):
grid = RasterModelGrid((4, 3), xy_spacing=(2, 5), xy_of_lower_left=(-2.0, 10.0))
grid.add_ones("elev", at="node")
grid.add_zeros("elev", at="link")
grid.add_empty("temp", at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc", include=include)
assert len(actual.at_node) == 0
assert len(actual.at_link) == 0
@pytest.mark.parametrize("include", ("*", ("*",), ("at_node:*", "at_link:*")))
@pytest.mark.parametrize("exclude", (None, ()))
def test_include_everything(tmpdir, format, include, exclude):
grid = RasterModelGrid((4, 3), xy_spacing=(2, 5), xy_of_lower_left=(-2.0, 10.0))
grid.add_ones("elev", at="node")
grid.add_zeros("elev", at="link")
grid.add_empty("temp", at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc", include=include)
assert set(actual.at_node) == set(["elev", "temp"])
assert set(actual.at_link) == set(["elev"])
@pytest.mark.parametrize(
"include,exclude", [(("*", "*")), ((None, None)), (([], None))]
)
def test_exclude_everything(tmpdir, format, include, exclude):
grid = RasterModelGrid((4, 3), xy_spacing=(2, 5), xy_of_lower_left=(-2.0, 10.0))
grid.add_ones("elev", at="node")
grid.add_zeros("elev", at="link")
grid.add_empty("temp", at="node")
with tmpdir.as_cwd():
to_netcdf(grid, "test.nc", format=format)
actual = from_netcdf("test.nc", include=include, exclude=exclude)
assert len(actual.at_node) == 0
assert len(actual.at_link) == 0
@pytest.mark.parametrize(
"grid_type", ["HexModelGrid", "RadialModelGrid", "RasterModelGrid"]
)
def test_from_grid(datadir, grid_type):
grid = from_netcdf(datadir / "test-{0}.nc".format(grid_type))
assert grid.__class__.__name__ == grid_type
assert_array_equal(grid.at_node["elev"], 1.0)
assert_array_equal(grid.at_node["temp"], 1.0)
assert_array_equal(grid.at_link["elev"], 0.0)
|
cmshobe/landlab
|
tests/io/netcdf/test_from_netcdf.py
|
Python
|
mit
| 2,314
|
[
"NetCDF"
] |
29f1beeb0292b9fc4d13cde88f857719b88b4fba64909aa9ac0e43ada819c472
|
r"""
File I/O (:mod:`skbio.io`)
==========================
.. currentmodule:: skbio.io
This package provides I/O functionality for skbio.
Supported file formats
----------------------
For details on what objects are supported by each format,
see the associated documentation.
.. currentmodule:: skbio.io.format
.. autosummary::
:toctree: generated/
clustal
fasta
fastq
lsmat
newick
ordination
phylip
qseq
.. currentmodule:: skbio.io.registry
User functions
--------------
.. autosummary::
:toctree: generated/
write
read
sniff
.. currentmodule:: skbio.io
User exceptions and warnings
----------------------------
.. autosummary::
:toctree: generated/
FormatIdentificationWarning
ArgumentOverrideWarning
UnrecognizedFormatError
IOSourceError
FileFormatError
ClustalFormatError
FASTAFormatError
FASTQFormatError
LSMatFormatError
NewickFormatError
OrdinationFormatError
PhylipFormatError
QSeqFormatError
QUALFormatError
Subpackages
-----------
.. autosummary::
:toctree: generated/
registry
util
For developer documentation on extending I/O, see :mod:`skbio.io.registry`.
Introduction to I/O
-------------------
Reading and writing files (I/O) can be a complicated task:
* A file format can sometimes be read into more than one in-memory
representation (i.e., object). For example, a FASTA file can be read into an
:mod:`skbio.alignment.SequenceCollection` or :mod:`skbio.alignment.Alignment`
depending on the file's contents and what operations you'd like to perform on
your data.
* A single object might be writeable to more than one file format. For example,
an :mod:`skbio.alignment.Alignment` object could be written to FASTA, FASTQ,
QSEQ, or PHYLIP formats, just to name a few.
* You might not know the exact file format of your file, but you want to read
it into an appropriate object.
* You might want to read multiple files into a single object, or write an
object to multiple files.
* Instead of reading a file into an object, you might want to stream the file
using a generator (e.g., if the file cannot be fully loaded into memory).
To address these issues (and others), scikit-bio provides a simple, powerful
interface for dealing with I/O. We accomplish this by using a single I/O
registry.
What kinds of files scikit-bio can use
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To see a complete list of file-like inputs that can be used for reading,
writing, and sniffing, see the documentation for :func:`skbio.io.util.open`.
Reading files into scikit-bio
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two ways to read files. The first way is to use the
procedural interface:
.. code-block:: python
my_obj = skbio.io.read(file, format='someformat', into=SomeSkbioClass)
The second is to use the object-oriented (OO) interface which is automatically
constructed from the procedural interface:
.. code-block:: python
my_obj = SomeSkbioClass.read(file, format='someformat')
For example, to read a `newick` file using both interfaces you would type:
>>> from skbio import read
>>> from skbio import TreeNode
>>> from io import StringIO
>>> open_filehandle = StringIO(u'(a, b);')
>>> tree = read(open_filehandle, format='newick', into=TreeNode)
>>> tree
<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
For the OO interface:
>>> open_filehandle = StringIO(u'(a, b);')
>>> tree = TreeNode.read(open_filehandle, format='newick')
>>> tree
<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
In the case of :func:`skbio.io.registry.read` if `into` is not provided, then a
generator will be returned. What the generator yields will depend on what
format is being read.
When `into` is provided, format may be omitted and the registry will use its
knowledge of the available formats for the requested class to infer the correct
format. This format inference is also available in the OO interface, meaning
that `format` may be omitted there as well.
As an example:
>>> open_filehandle = StringIO(u'(a, b);')
>>> tree = TreeNode.read(open_filehandle)
>>> tree
<TreeNode, name: unnamed, internal node count: 0, tips count: 2>
We call format inference `sniffing`, much like the :class:`csv.Sniffer`
class of Python's standard library. The goal of a `sniffer` is twofold: to
identify if a file is a specific format, and if it is, to provide `**kwargs`
which can be used to better parse the file.
.. note:: There is a built-in `sniffer` which results in a useful error message
if an empty file is provided as input and the format was omitted.
Writing files from scikit-bio
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Just as when reading files, there are two ways to write files.
Procedural Interface:
.. code-block:: python
skbio.io.write(my_obj, format='someformat', into=file)
OO Interface:
.. code-block:: python
my_obj.write(file, format='someformat')
In the procedural interface, `format` is required. Without it, scikit-bio does
not know how you want to serialize an object. OO interfaces define a default
`format`, so it may not be necessary to include it.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from importlib import import_module
from skbio.util import TestRunner
from ._warning import FormatIdentificationWarning, ArgumentOverrideWarning
from ._exception import (UnrecognizedFormatError, FileFormatError,
ClustalFormatError, FASTAFormatError,
IOSourceError, FASTQFormatError, LSMatFormatError,
NewickFormatError, OrdinationFormatError,
PhylipFormatError, QSeqFormatError, QUALFormatError)
from .registry import write, read, sniff, create_format, io_registry
from .util import open
__all__ = ['write', 'read', 'sniff', 'open', 'io_registry', 'create_format',
'FormatIdentificationWarning', 'ArgumentOverrideWarning',
'UnrecognizedFormatError', 'IOSourceError',
'FileFormatError',
'ClustalFormatError',
'FASTAFormatError',
'FASTQFormatError',
'LSMatFormatError',
'NewickFormatError',
'OrdinationFormatError',
'PhylipFormatError',
'QSeqFormatError',
'QUALFormatError']
# Necessary to import each file format module to have them added to the I/O
# registry. We use import_module instead of a typical import to avoid flake8
# unused import errors.
import_module('skbio.io.format.clustal')
import_module('skbio.io.format.fasta')
import_module('skbio.io.format.fastq')
import_module('skbio.io.format.lsmat')
import_module('skbio.io.format.newick')
import_module('skbio.io.format.ordination')
import_module('skbio.io.format.phylip')
import_module('skbio.io.format.qseq')
# This is meant to be a handy indicator to the user that they have done
# something wrong.
import_module('skbio.io.format.emptyfile')
# Now that all of our I/O has loaded, we can add the object oriented methods
# (read and write) to each class which has registered I/O operations.
io_registry.monkey_patch()
test = TestRunner(__file__).test
|
demis001/scikit-bio
|
skbio/io/__init__.py
|
Python
|
bsd-3-clause
| 7,513
|
[
"scikit-bio"
] |
fd8f6a5d14513c92bd631845700f4ceb1e1d140302332528b5c287425537e6c6
|
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from outputparser import OutputParser, ValuesMatcher
import re, mx, mx_graal, os, sys, StringIO, subprocess
from os.path import isfile, join, exists
gc = 'UseSerialGC'
dacapoSanityWarmup = {
'avrora': [0, 0, 3, 6, 13],
'batik': [0, 0, 5, 5, 20],
'eclipse': [0, 0, 0, 0, 0],
'fop': [4, 8, 10, 20, 30],
'h2': [0, 0, 5, 5, 8],
'jython': [0, 0, 5, 10, 13],
'luindex': [0, 0, 5, 10, 10],
'lusearch': [0, 4, 5, 5, 8],
'pmd': [0, 0, 5, 10, 13],
'sunflow': [0, 2, 5, 10, 15],
'tomcat': [0, 0, 5, 10, 15],
'tradebeans': [0, 0, 5, 10, 13],
'tradesoap': [0, 0, 5, 10, 15],
'xalan': [0, 0, 5, 10, 18],
}
dacapoScalaSanityWarmup = {
'actors': [0, 0, 2, 5, 5],
'apparat': [0, 0, 2, 5, 5],
'factorie': [0, 0, 2, 5, 5],
'kiama': [0, 4, 3, 13, 15],
'scalac': [0, 0, 5, 15, 20],
'scaladoc': [0, 0, 5, 15, 15],
'scalap': [0, 0, 5, 15, 20],
'scalariform':[0, 0, 6, 15, 20],
'scalatest': [0, 0, 2, 10, 12],
'scalaxb': [0, 0, 5, 15, 25],
# (gdub) specs sometimes returns a non-zero value event though there is no apparent failure
'specs': [0, 0, 0, 0, 0],
'tmt': [0, 0, 3, 10, 12]
}
dacapoGateBuildLevels = {
'avrora': ['product', 'fastdebug', 'debug'],
'batik': ['product', 'fastdebug', 'debug'],
# (lewurm): does not work with JDK8
'eclipse': [],
'fop': ['fastdebug', 'debug'],
'h2': ['product', 'fastdebug', 'debug'],
'jython': ['product', 'fastdebug', 'debug'],
'luindex': ['product', 'fastdebug', 'debug'],
'lusearch': ['product'],
'pmd': ['product', 'fastdebug', 'debug'],
'sunflow': ['fastdebug', 'debug'],
'tomcat': ['product', 'fastdebug', 'debug'],
'tradebeans': ['product', 'fastdebug', 'debug'],
# tradesoap is too unreliable for the gate, often crashing with concurrency problems:
# http://sourceforge.net/p/dacapobench/bugs/99/
'tradesoap': [],
'xalan': ['product', 'fastdebug', 'debug'],
}
dacapoScalaGateBuildLevels = {
'actors': ['product', 'fastdebug', 'debug'],
'apparat': ['product', 'fastdebug', 'debug'],
'factorie': ['product', 'fastdebug', 'debug'],
'kiama': ['fastdebug', 'debug'],
'scalac': ['product', 'fastdebug', 'debug'],
'scaladoc': ['product', 'fastdebug', 'debug'],
'scalap': ['product', 'fastdebug', 'debug'],
'scalariform':['product', 'fastdebug', 'debug'],
'scalatest': ['product', 'fastdebug', 'debug'],
'scalaxb': ['product', 'fastdebug', 'debug'],
'specs': ['product', 'fastdebug', 'debug'],
'tmt': ['product', 'fastdebug', 'debug'],
}
specjvm2008Names = [
'startup.helloworld',
'startup.compiler.compiler',
'startup.compiler.sunflow',
'startup.compress',
'startup.crypto.aes',
'startup.crypto.rsa',
'startup.crypto.signverify',
'startup.mpegaudio',
'startup.scimark.fft',
'startup.scimark.lu',
'startup.scimark.monte_carlo',
'startup.scimark.sor',
'startup.scimark.sparse',
'startup.serial',
'startup.sunflow',
'startup.xml.transform',
'startup.xml.validation',
'compiler.compiler',
'compiler.sunflow',
'compress',
'crypto.aes',
'crypto.rsa',
'crypto.signverify',
'derby',
'mpegaudio',
'scimark.fft.large',
'scimark.lu.large',
'scimark.sor.large',
'scimark.sparse.large',
'scimark.fft.small',
'scimark.lu.small',
'scimark.sor.small',
'scimark.sparse.small',
'scimark.monte_carlo',
'serial',
'sunflow',
'xml.transform',
'xml.validation'
]
def _noneAsEmptyList(a):
if a is None:
return []
return a
class SanityCheckLevel:
Fast, Gate, Normal, Extensive, Benchmark = range(5)
def getSPECjbb2005(benchArgs=None):
benchArgs = [] if benchArgs is None else benchArgs
specjbb2005 = mx.get_env('SPECJBB2005')
if specjbb2005 is None or not exists(join(specjbb2005, 'jbb.jar')):
mx.abort('Please set the SPECJBB2005 environment variable to a SPECjbb2005 directory')
score = re.compile(r"^Valid run, Score is (?P<score>[0-9]+)$", re.MULTILINE)
error = re.compile(r"VALIDATION ERROR")
success = re.compile(r"^Valid run, Score is [0-9]+$", re.MULTILINE)
matcher = ValuesMatcher(score, {'group' : 'SPECjbb2005', 'name' : 'score', 'score' : '<score>'})
classpath = ['jbb.jar', 'check.jar']
return Test("SPECjbb2005", ['spec.jbb.JBBmain', '-propfile', 'SPECjbb.props'] + benchArgs, [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops', '-cp', os.pathsep.join(classpath)], defaultCwd=specjbb2005)
def getSPECjbb2013(benchArgs=None):
specjbb2013 = mx.get_env('SPECJBB2013')
if specjbb2013 is None or not exists(join(specjbb2013, 'specjbb2013.jar')):
mx.abort('Please set the SPECJBB2013 environment variable to a SPECjbb2013 directory')
jops = re.compile(r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$", re.MULTILINE)
# error?
success = re.compile(r"org.spec.jbb.controller: Run finished", re.MULTILINE)
matcherMax = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'max', 'score' : '<max>'})
matcherCritical = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'critical', 'score' : '<critical>'})
return Test("SPECjbb2013", ['-jar', 'specjbb2013.jar', '-m', 'composite'] +
_noneAsEmptyList(benchArgs), [success], [], [matcherCritical, matcherMax],
vmOpts=['-Xmx6g', '-Xms6g', '-Xmn3g', '-XX:+UseParallelOldGC', '-XX:-UseAdaptiveSizePolicy', '-XX:-UseBiasedLocking', '-XX:-UseCompressedOops'], defaultCwd=specjbb2013)
def getSPECjbb2015(benchArgs=None):
specjbb2015 = mx.get_env('SPECJBB2015')
if specjbb2015 is None or not exists(join(specjbb2015, 'specjbb2015.jar')):
mx.abort('Please set the SPECJBB2015 environment variable to a SPECjbb2015 directory')
jops = re.compile(r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$", re.MULTILINE)
# error?
success = re.compile(r"org.spec.jbb.controller: Run finished", re.MULTILINE)
matcherMax = ValuesMatcher(jops, {'group' : 'SPECjbb2015', 'name' : 'max', 'score' : '<max>'})
matcherCritical = ValuesMatcher(jops, {'group' : 'SPECjbb2015', 'name' : 'critical', 'score' : '<critical>'})
return Test("SPECjbb2015", ['-jar', 'specjbb2015.jar', '-m', 'composite'] +
_noneAsEmptyList(benchArgs), [success], [], [matcherCritical, matcherMax],
vmOpts=['-Xmx6g', '-Xms6g', '-Xmn3g', '-XX:+UseParallelOldGC', '-XX:-UseAdaptiveSizePolicy', '-XX:-UseBiasedLocking', '-XX:-UseCompressedOops'], defaultCwd=specjbb2015)
def getSPECjvm2008(benchArgs=None):
specjvm2008 = mx.get_env('SPECJVM2008')
if specjvm2008 is None or not exists(join(specjvm2008, 'SPECjvm2008.jar')):
mx.abort('Please set the SPECJVM2008 environment variable to a SPECjvm2008 directory')
score = re.compile(r"^(Score on|Noncompliant) (?P<benchmark>[a-zA-Z0-9\._]+)( result)?: (?P<score>[0-9]+((,|\.)[0-9]+)?)( SPECjvm2008 Base)? ops/m$", re.MULTILINE)
error = re.compile(r"^Errors in benchmark: ", re.MULTILINE)
# The ' ops/m' at the end of the success string is important : it's how you can tell valid and invalid runs apart
success = re.compile(r"^(Noncompliant c|C)omposite result: [0-9]+((,|\.)[0-9]+)?( SPECjvm2008 (Base|Peak))? ops/m$", re.MULTILINE)
matcher = ValuesMatcher(score, {'group' : 'SPECjvm2008', 'name' : '<benchmark>', 'score' : '<score>'})
return Test("SPECjvm2008", ['-jar', 'SPECjvm2008.jar'] + _noneAsEmptyList(benchArgs), [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops'], defaultCwd=specjvm2008)
def getDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=None, extraVmArguments=None):
checks = []
for (bench, ns) in dacapoSanityWarmup.items():
if ns[level] > 0:
if gateBuildLevel is None or gateBuildLevel in dacapoGateBuildLevels[bench]:
checks.append(getDacapo(bench, ['-n', str(ns[level])] + _noneAsEmptyList(dacapoArgs), extraVmArguments=extraVmArguments))
return checks
def getDacapo(name, dacapoArgs=None, extraVmArguments=None):
dacapo = mx.get_env('DACAPO_CP')
if dacapo is None:
l = mx.library('DACAPO', False)
if l is not None:
dacapo = l.get_path(True)
else:
mx.abort('DaCapo 9.12 jar file must be specified with DACAPO_CP environment variable or as DACAPO library')
if not isfile(dacapo) or not dacapo.endswith('.jar'):
mx.abort('Specified DaCapo jar file does not exist or is not a jar file: ' + dacapo)
dacapoSuccess = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====", re.MULTILINE)
dacapoFail = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====", re.MULTILINE)
dacapoTime = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====")
dacapoTime1 = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup 1 in (?P<time>[0-9]+) msec =====")
dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : 'DaCapo', 'name' : '<benchmark>', 'score' : '<time>'})
dacapoMatcher1 = ValuesMatcher(dacapoTime1, {'group' : 'DaCapo-1stRun', 'name' : '<benchmark>', 'score' : '<time>'})
# Use ipv4 stack for dacapos; tomcat+solaris+ipv6_interface fails (see also: JDK-8072384)
return Test("DaCapo-" + name, ['-jar', mx._cygpathU2W(dacapo), name] + _noneAsEmptyList(dacapoArgs), [dacapoSuccess], [dacapoFail],
[dacapoMatcher, dacapoMatcher1],
['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops', "-Djava.net.preferIPv4Stack=true", '-G:+ExitVMOnException'] +
_noneAsEmptyList(extraVmArguments))
def getScalaDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=None, extraVmArguments=None):
checks = []
for (bench, ns) in dacapoScalaSanityWarmup.items():
if ns[level] > 0:
if gateBuildLevel is None or gateBuildLevel in dacapoScalaGateBuildLevels[bench]:
checks.append(getScalaDacapo(bench, ['-n', str(ns[level])] + _noneAsEmptyList(dacapoArgs), extraVmArguments=extraVmArguments))
return checks
def getScalaDacapo(name, dacapoArgs=None, extraVmArguments=None):
dacapo = mx.get_env('DACAPO_SCALA_CP')
if dacapo is None:
l = mx.library('DACAPO_SCALA', False)
if l is not None:
dacapo = l.get_path(True)
else:
mx.abort('Scala DaCapo 0.1.0 jar file must be specified with DACAPO_SCALA_CP environment variable or as DACAPO_SCALA library')
if not isfile(dacapo) or not dacapo.endswith('.jar'):
mx.abort('Specified Scala DaCapo jar file does not exist or is not a jar file: ' + dacapo)
dacapoSuccess = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====", re.MULTILINE)
dacapoFail = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) FAILED (warmup|) =====", re.MULTILINE)
dacapoTime = re.compile(r"===== DaCapo 0\.1\.0(-SNAPSHOT)? (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====")
dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : "Scala-DaCapo", 'name' : '<benchmark>', 'score' : '<time>'})
return Test("Scala-DaCapo-" + name, ['-jar', mx._cygpathU2W(dacapo), name] + _noneAsEmptyList(dacapoArgs), [dacapoSuccess], [dacapoFail], [dacapoMatcher], ['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops'] + _noneAsEmptyList(extraVmArguments))
def getBootstraps():
time = re.compile(r"Bootstrapping Graal\.+ in (?P<time>[0-9]+) ms( \(compiled (?P<methods>[0-9]+) methods\))?")
scoreMatcher = ValuesMatcher(time, {'group' : 'Bootstrap', 'name' : 'BootstrapTime', 'score' : '<time>'})
methodMatcher = ValuesMatcher(time, {'group' : 'Bootstrap', 'name' : 'BootstrapMethods', 'score' : '<methods>'})
scoreMatcherBig = ValuesMatcher(time, {'group' : 'Bootstrap-bigHeap', 'name' : 'BootstrapTime', 'score' : '<time>'})
methodMatcherBig = ValuesMatcher(time, {'group' : 'Bootstrap-bigHeap', 'name' : 'BootstrapMethods', 'score' : '<methods>'})
tests = []
tests.append(Test("Bootstrap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcher, methodMatcher], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False))
tests.append(Test("Bootstrap-bigHeap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcherBig, methodMatcherBig], vmOpts=['-Xms2g'], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False))
return tests
class CTWMode:
Full, NoInline = range(2)
def getCTW(vm, mode):
time = re.compile(r"CompileTheWorld : Done \([0-9]+ classes, [0-9]+ methods, (?P<time>[0-9]+) ms\)")
scoreMatcher = ValuesMatcher(time, {'group' : 'CompileTheWorld', 'name' : 'CompileTime', 'score' : '<time>'})
jre = os.environ.get('JAVA_HOME')
if exists(join(jre, 'jre')):
jre = join(jre, 'jre')
rtjar = join(jre, 'lib', 'rt.jar')
args = ['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + rtjar]
if vm == 'jvmci':
args += ['-XX:+BootstrapGraal']
if mode >= CTWMode.NoInline:
if not mx_graal.isJVMCIEnabled(vm):
args.append('-XX:-Inline')
else:
args.append('-G:CompileTheWordConfig=-Inline')
return Test("CompileTheWorld", args, successREs=[time], scoreMatchers=[scoreMatcher], benchmarkCompilationRate=False)
class Tee:
def __init__(self):
self.output = StringIO.StringIO()
def eat(self, line):
self.output.write(line)
sys.stdout.write(line)
"""
Encapsulates a single program that is a sanity test and/or a benchmark.
"""
class Test:
def __init__(self, name, cmd, successREs=None, failureREs=None, scoreMatchers=None, vmOpts=None, defaultCwd=None, ignoredVMs=None, benchmarkCompilationRate=False):
self.name = name
self.successREs = _noneAsEmptyList(successREs)
self.failureREs = _noneAsEmptyList(failureREs) + [re.compile(r"Exception occurred in scope: ")]
self.scoreMatchers = _noneAsEmptyList(scoreMatchers)
self.vmOpts = _noneAsEmptyList(vmOpts)
self.cmd = cmd
self.defaultCwd = defaultCwd
self.ignoredVMs = _noneAsEmptyList(ignoredVMs)
self.benchmarkCompilationRate = benchmarkCompilationRate
if benchmarkCompilationRate:
self.vmOpts = self.vmOpts + ['-XX:+CITime']
def __str__(self):
return self.name
def test(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
"""
Run this program as a sanity test.
"""
if vm in self.ignoredVMs:
return True
if cwd is None:
cwd = self.defaultCwd
parser = OutputParser()
jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)")
parser.addMatcher(ValuesMatcher(jvmError, {'jvmError' : '<jvmerror>'}))
for successRE in self.successREs:
parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'}))
for failureRE in self.failureREs:
parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'}))
tee = Tee()
retcode = mx_graal.run_vm(self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, debugLevel=vmbuild)
output = tee.output.getvalue()
valueMaps = parser.parse(output)
if len(valueMaps) == 0:
return False
record = {}
for valueMap in valueMaps:
for key, value in valueMap.items():
if record.has_key(key) and record[key] != value:
mx.abort('Inconsistant values returned by test machers : ' + str(valueMaps))
record[key] = value
jvmErrorFile = record.get('jvmError')
if jvmErrorFile:
mx.log('/!\\JVM Error : dumping error log...')
with open(jvmErrorFile, 'rb') as fp:
mx.log(fp.read())
os.unlink(jvmErrorFile)
return False
if record.get('failed') == '1':
return False
return retcode == 0 and record.get('passed') == '1'
def bench(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
"""
Run this program as a benchmark.
"""
if vm in self.ignoredVMs:
return {}
if cwd is None:
cwd = self.defaultCwd
parser = OutputParser()
for successRE in self.successREs:
parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'}))
for failureRE in self.failureREs:
parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'}))
for scoreMatcher in self.scoreMatchers:
parser.addMatcher(scoreMatcher)
if self.benchmarkCompilationRate:
if vm == 'jvmci':
bps = re.compile(r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
ibps = re.compile(r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
parser.addMatcher(ValuesMatcher(bps, {'group' : 'ParsedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
else:
ibps = re.compile(r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard")
parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : '<compiler>:' + self.name, 'score' : '<rate>'}))
startDelim = 'START: ' + self.name
endDelim = 'END: ' + self.name
outputfile = os.environ.get('BENCH_OUTPUT', None)
if outputfile:
# Used only to debug output parsing
with open(outputfile) as fp:
output = fp.read()
start = output.find(startDelim)
end = output.find(endDelim, start)
if start == -1 and end == -1:
return {}
output = output[start + len(startDelim + os.linesep): end]
mx.log(startDelim)
mx.log(output)
mx.log(endDelim)
else:
tee = Tee()
mx.log(startDelim)
if mx_graal.run_vm(self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, debugLevel=vmbuild) != 0:
mx.abort("Benchmark failed (non-zero retcode)")
mx.log(endDelim)
output = tee.output.getvalue()
groups = {}
passed = False
for valueMap in parser.parse(output):
assert (valueMap.has_key('name') and valueMap.has_key('score') and valueMap.has_key('group')) or valueMap.has_key('passed') or valueMap.has_key('failed'), valueMap
if valueMap.get('failed') == '1':
mx.abort("Benchmark failed")
if valueMap.get('passed') == '1':
passed = True
groupName = valueMap.get('group')
if groupName:
group = groups.setdefault(groupName, {})
name = valueMap.get('name')
score = valueMap.get('score')
if name and score:
group[name] = score
if not passed:
mx.abort("Benchmark failed (not passed)")
return groups
|
md-5/jdk10
|
src/jdk.internal.vm.compiler/.mx.graal/sanitycheck.py
|
Python
|
gpl-2.0
| 21,104
|
[
"VisIt"
] |
9ed7cbeeb8dc23ab1420e0f160c790f247ce9fa360fb934b016c54436663e56c
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
FCI solver for Singlet state
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
direct_spin0 solver is specified for singlet state. However, calling this
solver sometimes ends up with the error "State not singlet x.xxxxxxe-06" due
to numerical issues. Calling direct_spin1 for singlet state is slightly
slower but more robust than direct_spin0 especially when combining to energy
penalty method (:func:`fix_spin_`)
'''
import sys
import ctypes
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.fci import cistring
from pyscf.fci import rdm
from pyscf.fci import direct_spin1
from pyscf.fci.spin_op import contract_ss
libfci = lib.load_library('libfci')
@lib.with_doc(direct_spin1.contract_1e.__doc__)
def contract_1e(f1e, fcivec, norb, nelec, link_index=None):
fcivec = numpy.asarray(fcivec, order='C')
link_index = _unpack(norb, nelec, link_index)
na, nlink = link_index.shape[:2]
assert(fcivec.size == na**2)
ci1 = numpy.empty_like(fcivec)
f1e_tril = lib.pack_tril(f1e)
libfci.FCIcontract_1e_spin0(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(na),
ctypes.c_int(nlink),
link_index.ctypes.data_as(ctypes.c_void_p))
# no *.5 because FCIcontract_2e_spin0 only compute half of the contraction
return lib.transpose_sum(ci1, inplace=True).reshape(fcivec.shape)
# Note eri is NOT the 2e hamiltonian matrix, the 2e hamiltonian is
# h2e = eri_{pq,rs} p^+ q r^+ s
# = (pq|rs) p^+ r^+ s q - (pq|rs) \delta_{qr} p^+ s
# so eri is defined as
# eri_{pq,rs} = (pq|rs) - (1/Nelec) \sum_q (pq|qs)
# to restore the symmetry between pq and rs,
# eri_{pq,rs} = (pq|rs) - (.5/Nelec) [\sum_q (pq|qs) + \sum_p (pq|rp)]
# Please refer to the treatment in direct_spin1.absorb_h1e
# the input fcivec should be symmetrized
@lib.with_doc(direct_spin1.contract_2e.__doc__)
def contract_2e(eri, fcivec, norb, nelec, link_index=None):
fcivec = numpy.asarray(fcivec, order='C')
eri = ao2mo.restore(4, eri, norb)
lib.transpose_sum(eri, inplace=True)
eri *= .5
link_index = _unpack(norb, nelec, link_index)
na, nlink = link_index.shape[:2]
assert(fcivec.size == na**2)
ci1 = numpy.empty((na,na))
libfci.FCIcontract_2e_spin0(eri.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(na),
ctypes.c_int(nlink),
link_index.ctypes.data_as(ctypes.c_void_p))
# no *.5 because FCIcontract_2e_spin0 only compute half of the contraction
return lib.transpose_sum(ci1, inplace=True).reshape(fcivec.shape)
absorb_h1e = direct_spin1.absorb_h1e
@lib.with_doc(direct_spin1.make_hdiag.__doc__)
def make_hdiag(h1e, eri, norb, nelec):
hdiag = direct_spin1.make_hdiag(h1e, eri, norb, nelec)
na = int(numpy.sqrt(hdiag.size))
# symmetrize hdiag to reduce numerical error
hdiag = lib.transpose_sum(hdiag.reshape(na,na), inplace=True) * .5
return hdiag.ravel()
pspace = direct_spin1.pspace
# be careful with single determinant initial guess. It may lead to the
# eigvalue of first davidson iter being equal to hdiag
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
e, c = direct_spin1._kfactory(FCISolver, h1e, eri, norb, nelec, ci0, level_shift,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
return e, c
# dm[p,q] = <|q^+ p|>
@lib.with_doc(direct_spin1.make_rdm1.__doc__)
def make_rdm1(fcivec, norb, nelec, link_index=None):
rdm1 = rdm.make_rdm1('FCImake_rdm1a', fcivec, fcivec,
norb, nelec, link_index)
return rdm1 * 2
# alpha and beta 1pdm
@lib.with_doc(direct_spin1.make_rdm1s.__doc__)
def make_rdm1s(fcivec, norb, nelec, link_index=None):
rdm1 = rdm.make_rdm1('FCImake_rdm1a', fcivec, fcivec,
norb, nelec, link_index)
return rdm1, rdm1
# Chemist notation
@lib.with_doc(direct_spin1.make_rdm12.__doc__)
def make_rdm12(fcivec, norb, nelec, link_index=None, reorder=True):
#dm1, dm2 = rdm.make_rdm12('FCIrdm12kern_spin0', fcivec, fcivec,
# norb, nelec, link_index, 1)
# NOT use FCIrdm12kern_spin0 because for small system, the kernel may call
# direct diagonalization, which may not fulfil fcivec = fcivet.T
dm1, dm2 = rdm.make_rdm12('FCIrdm12kern_sf', fcivec, fcivec,
norb, nelec, link_index, 1)
if reorder:
dm1, dm2 = rdm.reorder_rdm(dm1, dm2, True)
return dm1, dm2
# dm[p,q] = <I|q^+ p|J>
@lib.with_doc(direct_spin1.trans_rdm1s.__doc__)
def trans_rdm1s(cibra, ciket, norb, nelec, link_index=None):
if link_index is None:
if isinstance(nelec, (int, numpy.number)):
neleca = nelec//2
else:
neleca, nelecb = nelec
assert(neleca == nelecb)
link_index = cistring.gen_linkstr_index(range(norb), neleca)
rdm1a = rdm.make_rdm1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, link_index)
rdm1b = rdm.make_rdm1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, link_index)
return rdm1a, rdm1b
@lib.with_doc(direct_spin1.trans_rdm1.__doc__)
def trans_rdm1(cibra, ciket, norb, nelec, link_index=None):
rdm1a, rdm1b = trans_rdm1s(cibra, ciket, norb, nelec, link_index)
return rdm1a + rdm1b
# dm[p,q,r,s] = <I|p^+ q r^+ s|J>
@lib.with_doc(direct_spin1.trans_rdm12.__doc__)
def trans_rdm12(cibra, ciket, norb, nelec, link_index=None, reorder=True):
dm1, dm2 = rdm.make_rdm12('FCItdm12kern_sf', cibra, ciket,
norb, nelec, link_index, 2)
if reorder:
dm1, dm2 = rdm.reorder_rdm(dm1, dm2, True)
return dm1, dm2
def energy(h1e, eri, fcivec, norb, nelec, link_index=None):
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.ravel(), ci1.ravel())
def get_init_guess(norb, nelec, nroots, hdiag):
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
init_strs = []
iroot = 0
for addr in numpy.argsort(hdiag):
addra = addr // nb
addrb = addr % nb
if (addrb,addra) not in init_strs: # avoid initial guess linear dependency
init_strs.append((addra,addrb))
iroot += 1
if iroot >= nroots:
break
ci0 = []
for addra,addrb in init_strs:
x = numpy.zeros((na,nb))
if addra == addrb:
x[addra,addrb] = 1
else:
x[addra,addrb] = x[addrb,addra] = numpy.sqrt(.5)
ci0.append(x.ravel())
# Add noise
ci0[0][0 ] += 1e-5
ci0[0][-1] -= 1e-5
return ci0
###############################################################
# direct-CI driver
###############################################################
def kernel_ms0(fci, h1e, eri, norb, nelec, ci0=None, link_index=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
if nroots is None: nroots = fci.nroots
if davidson_only is None: davidson_only = fci.davidson_only
if pspace_size is None: pspace_size = fci.pspace_size
if max_memory is None:
max_memory = fci.max_memory - lib.current_memory()[0]
log = logger.new_logger(fci, verbose)
assert(fci.spin is None or fci.spin == 0)
assert(0 <= numpy.sum(nelec) <= norb*2)
link_index = _unpack(norb, nelec, link_index)
h1e = numpy.ascontiguousarray(h1e)
eri = numpy.ascontiguousarray(eri)
na = link_index.shape[0]
if max_memory < na**2*6*8e-6:
log.warn('Not enough memory for FCI solver. '
'The minimal requirement is %.0f MB', na**2*60e-6)
hdiag = fci.make_hdiag(h1e, eri, norb, nelec)
nroots = min(hdiag.size, nroots)
try:
addr, h0 = fci.pspace(h1e, eri, norb, nelec, hdiag, max(pspace_size,nroots))
if pspace_size > 0:
pw, pv = fci.eig(h0)
else:
pw = pv = None
if pspace_size >= na*na and ci0 is None and not davidson_only:
# The degenerated wfn can break symmetry. The davidson iteration with proper
# initial guess doesn't have this issue
if na*na == 1:
return pw[0]+ecore, pv[:,0].reshape(1,1)
elif nroots > 1:
civec = numpy.empty((nroots,na*na))
civec[:,addr] = pv[:,:nroots].T
civec = civec.reshape(nroots,na,na)
try:
return pw[:nroots]+ecore, [_check_(ci) for ci in civec]
except ValueError:
pass
elif abs(pw[0]-pw[1]) > 1e-12:
civec = numpy.empty((na*na))
civec[addr] = pv[:,0]
civec = civec.reshape(na,na)
civec = lib.transpose_sum(civec) * .5
# direct diagonalization may lead to triplet ground state
##TODO: optimize initial guess. Using pspace vector as initial guess may have
## spin problems. The 'ground state' of psapce vector may have different spin
## state to the true ground state.
try:
return pw[0]+ecore, _check_(civec.reshape(na,na))
except ValueError:
pass
except NotImplementedError:
addr = [0]
pw = pv = None
precond = fci.make_precond(hdiag, pw, pv, addr)
h2e = fci.absorb_h1e(h1e, eri, norb, nelec, .5)
def hop(c):
hc = fci.contract_2e(h2e, c.reshape(na,na), norb, nelec, link_index)
return hc.ravel()
#TODO: check spin of initial guess
if ci0 is None:
if callable(getattr(fci, 'get_init_guess', None)):
ci0 = lambda: fci.get_init_guess(norb, nelec, nroots, hdiag)
else:
def ci0():
x0 = []
for i in range(nroots):
x = numpy.zeros((na,na))
addra = addr[i] // na
addrb = addr[i] % na
if addra == addrb:
x[addra,addrb] = 1
else:
x[addra,addrb] = x[addrb,addra] = numpy.sqrt(.5)
x0.append(x.ravel())
return x0
elif not callable(ci0):
if isinstance(ci0, numpy.ndarray) and ci0.size == na*na:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
if tol is None: tol = fci.conv_tol
if lindep is None: lindep = fci.lindep
if max_cycle is None: max_cycle = fci.max_cycle
if max_space is None: max_space = fci.max_space
tol_residual = getattr(fci, 'conv_tol_residual', None)
with lib.with_omp_threads(fci.threads):
#e, c = lib.davidson(hop, ci0, precond, tol=fci.conv_tol, lindep=fci.lindep)
e, c = fci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, follow_state=True,
tol_residual=tol_residual, **kwargs)
if nroots > 1:
return e+ecore, [_check_(ci.reshape(na,na)) for ci in c]
else:
return e+ecore, _check_(c.reshape(na,na))
def _check_(c):
c = lib.transpose_sum(c, inplace=True)
c *= .5
norm = numpy.linalg.norm(c)
if abs(norm-1) > 1e-6:
raise ValueError('State not singlet %g' % abs(numpy.linalg.norm(c)-1))
return c/norm
class FCISolver(direct_spin1.FCISolver):
def make_hdiag(self, h1e, eri, norb, nelec):
return make_hdiag(h1e, eri, norb, nelec)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None, **kwargs):
return contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs)
def get_init_guess(self, norb, nelec, nroots, hdiag):
return get_init_guess(norb, nelec, nroots, hdiag)
def kernel(self, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
orbsym=None, wfnsym=None, ecore=0, **kwargs):
if self.verbose >= logger.WARN:
self.check_sanity()
self.norb = norb
self.nelec = nelec
self.eci, self.ci = \
kernel_ms0(self, h1e, eri, norb, nelec, ci0, None,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
return self.eci, self.ci
def energy(self, h1e, eri, fcivec, norb, nelec, link_index=None):
h2e = self.absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = self.contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))
def make_rdm1s(self, fcivec, norb, nelec, link_index=None):
return make_rdm1s(fcivec, norb, nelec, link_index)
def make_rdm1(self, fcivec, norb, nelec, link_index=None):
return make_rdm1(fcivec, norb, nelec, link_index)
@lib.with_doc(make_rdm12.__doc__)
def make_rdm12(self, fcivec, norb, nelec, link_index=None, reorder=True):
return make_rdm12(fcivec, norb, nelec, link_index, reorder)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm12.__doc__)
def trans_rdm12(self, cibra, ciket, norb, nelec, link_index=None,
reorder=True):
return trans_rdm12(cibra, ciket, norb, nelec, link_index, reorder)
def gen_linkstr(self, norb, nelec, tril=True, spin=None):
if isinstance(nelec, (int, numpy.number)):
neleca = nelec//2
else:
neleca, nelecb = nelec
assert(neleca == nelecb)
if tril:
link_index = cistring.gen_linkstr_index_trilidx(range(norb), neleca)
else:
link_index = cistring.gen_linkstr_index(range(norb), neleca)
return link_index
FCI = FCISolver
def _unpack(norb, nelec, link_index):
if link_index is None:
if isinstance(nelec, (int, numpy.number)):
neleca = nelec//2
else:
neleca, nelecb = nelec
assert(neleca == nelecb)
return cistring.gen_linkstr_index_trilidx(range(norb), neleca)
else:
return link_index
if __name__ == '__main__':
import time
from functools import reduce
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
cis = FCISolver(mol)
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False)
e, c = cis.kernel(h1e, eri, norb, nelec)
print(e - -15.9977886375)
print('t',time.clock())
|
gkc1000/pyscf
|
pyscf/fci/direct_spin0.py
|
Python
|
apache-2.0
| 17,952
|
[
"PySCF"
] |
fc29905e8145713cb701089a7bfc56c3934c58ecafb12dc4e606cfa52ac42d5b
|
import lxml.objectify
import httplib
import urlparse
from utils.dates import *
from feeds import InvalidFeed
__all__ = ('ParseError', 'InvalidFeed', 'from_string', 'from_url', 'from_file', 'parse_date')
# TODO: change the feeds to a registration model
from feeds.atom10 import Atom10Feed
from feeds.rss20 import RSS20Feed
feeds = (RSS20Feed, Atom10Feed)
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
USER_AGENT = 'py-feedreader'
class ParseError(Exception): pass
def _from_parsed(parsed):
for feed in feeds:
try:
result = feed(parsed)
except InvalidFeed:
pass
else:
return result
raise InvalidFeed(parsed.tag)
def from_string(data, *args, **kwargs):
parsed = lxml.objectify.fromstring(data, *args, **kwargs)
return _from_parsed(parsed)
def from_file(fp, *args, **kwargs):
parsed = lxml.objectify.parse(fp, **kwargs).getroot()
return _from_parsed(parsed)
def from_url(url, **kwargs):
url = urlparse.urlparse(url)
if url.scheme == 'https':
conn = httplib.HTTPSConnection
elif url.scheme == 'http':
conn = httplib.HTTPConnection
else:
raise NotImplementedError
base_url = '%s://%s' % (url.scheme, url.hostname)
headers = {
'User-Agent': USER_AGENT,
'Accept': ACCEPT_HEADER,
}
connection = conn(url.hostname)
method = kwargs.pop('method', 'GET').upper()
if method == 'GET':
path, query = url.path, ''
if url.query:
path += '?' + url.query
else:
path, query = url.path, url.query
connection.request(method, path, query, headers)
try:
response = connection.getresponse()
except httplib.BadStatusLine, exc:
raise ParseError('Bad status line: %s' % (exc,))
if response.status != 200:
if response.status in (301, 302):
return from_url(response.getheader('location'), **kwargs)
raise ParseError('%s %s' % (response.status, response.reason))
return from_file(response, base_url=base_url)
|
dcramer/feedreader
|
feedreader/parser.py
|
Python
|
bsd-2-clause
| 2,172
|
[
"NetCDF"
] |
07357ed5c4a9f115ed5474cf5395f31254568c344556e465e102eb96938e9262
|
from django.conf import settings
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.PDB import *
from Bio.PDB.PDBIO import Select
from common.definitions import *
from protein.models import Protein, ProteinSegment
from residue.models import Residue
from structure.functions import BlastSearch, MappedResidue, StructureSeqNumOverwrite
from structure.sequence_parser import *
import Bio.PDB.Polypeptide as polypeptide
import os,logging
from collections import OrderedDict
logger = logging.getLogger("protwis")
#==============================================================================
#Class for annotating the pdb structures with generic numbers
class GenericNumbering(object):
residue_list = ["ARG","ASP","GLU","HIS","ASN","GLN","LYS","SER","THR","HID","PHE","LEU","ILE","TYR","TRP","VAL","MET","PRO","CYS","ALA","GLY"]
exceptions = {'6GDG':[255, 10]}
def __init__ (self, pdb_file=None, pdb_filename=None, structure=None, pdb_code=None, blast_path='blastp',
blastdb=os.sep.join([settings.STATICFILES_DIRS[0], 'blast', 'protwis_blastdb']),top_results=1, sequence_parser=False, signprot=False):
# pdb_file can be either a name/path or a handle to an open file
self.pdb_file = pdb_file
self.pdb_filename = pdb_filename
# if pdb 4 letter code is specified
self.pdb_code = pdb_code
# dictionary of 'MappedResidue' object storing information about alignments and bw numbers
self.residues = {}
self.pdb_seq = {} #Seq('')
# list of uniprot ids returned from blast
self.prot_id_list = []
#setup for local blast search
self.blast = BlastSearch(blast_path=blast_path, blastdb=blastdb,top_results=top_results)
# calling sequence parser
if sequence_parser:
if pdb_code:
struct = Structure.objects.get(pdb_code__index=self.pdb_code)
if not signprot:
if pdb_code:
s = SequenceParser(pdb_file=self.pdb_file, wt_protein_id=struct.protein_conformation.protein.parent.id)
else:
s = SequenceParser(pdb_file=self.pdb_file)#, wt_protein_id=struct.protein_conformation.protein.parent.id)
else:
s = SequenceParser(pdb_file=self.pdb_file, wt_protein_id=signprot.id)
self.pdb_structure = s.pdb_struct
self.mapping = s.mapping
self.wt = s.wt
else:
if self.pdb_file:
self.pdb_structure = PDBParser(PERMISSIVE=True, QUIET=True).get_structure('ref', self.pdb_file)[0]
elif self.pdb_filename:
self.pdb_structure = PDBParser(PERMISSIVE=True, QUIET=True).get_structure('ref', self.pdb_filename)[0]
else:
self.pdb_structure = structure
self.parse_structure(self.pdb_structure)
def parse_structure(self, pdb_struct):
"""
extracting sequence and preparing dictionary of residues
bio.pdb reads pdb in the following cascade: model->chain->residue->atom
"""
for chain in pdb_struct:
self.residues[chain.id] = {}
self.pdb_seq[chain.id] = Seq('')
for res in chain:
#in bio.pdb the residue's id is a tuple of (hetatm flag, residue number, insertion code)
if res.resname == "HID":
resname = polypeptide.three_to_one('HIS')
else:
if res.resname not in self.residue_list:
continue
self.residues[chain.id][res.id[1]] = MappedResidue(res.id[1], polypeptide.three_to_one(res.resname))
self.pdb_seq[chain.id] = ''.join([self.residues[chain.id][x].name for x in sorted(self.residues[chain.id].keys())])
for pos, res in enumerate(sorted(self.residues[chain.id].keys()), start=1):
self.residues[chain.id][res].pos_in_aln = pos
def locate_res_by_pos (self, chain, pos):
for res in self.residues[chain].keys():
if self.residues[chain][res].pos_in_aln == pos:
return res
return 0
def map_blast_seq (self, prot_id, hsps, chain):
#find uniprot residue numbers corresponding to those in pdb file
q_seq = list(hsps.query)
tmp_seq = list(hsps.sbjct)
subj_counter = hsps.sbjct_start
q_counter = hsps.query_start
logger.info("{}\n{}".format(hsps.query, hsps.sbjct))
logger.info("{:d}\t{:d}".format(hsps.query_start, hsps.sbjct_start))
rs = Residue.objects.prefetch_related('display_generic_number', 'protein_segment').filter(
protein_conformation__protein=prot_id)
residues = {}
for r in rs:
residues[r.sequence_number] = r
while tmp_seq:
#skipping position if there is a gap in either of sequences
if q_seq[0] == '-' or q_seq[0] == 'X' or q_seq[0] == ' ':
subj_counter += 1
tmp_seq.pop(0)
q_seq.pop(0)
continue
if tmp_seq[0] == '-' or tmp_seq[0] == 'X' or tmp_seq[0] == ' ':
q_counter += 1
tmp_seq.pop(0)
q_seq.pop(0)
continue
if tmp_seq[0] == q_seq[0]:
resn = self.locate_res_by_pos(chain, q_counter)
if resn != 0:
if subj_counter in residues:
db_res = residues[subj_counter]
if db_res.protein_segment:
segment = db_res.protein_segment.slug
self.residues[chain][resn].add_segment(segment)
if db_res.display_generic_number:
num = db_res.display_generic_number.label
bw, gpcrdb = num.split('x')
gpcrdb = "{}.{}".format(bw.split('.')[0], gpcrdb)
self.residues[chain][resn].add_bw_number(bw)
self.residues[chain][resn].add_gpcrdb_number(gpcrdb)
self.residues[chain][resn].add_gpcrdb_number_id(db_res.display_generic_number.id)
self.residues[chain][resn].add_display_number(num)
self.residues[chain][resn].add_residue_record(db_res)
else:
logger.warning("Could not find residue {} {} in the database.".format(resn, subj_counter))
if prot_id not in self.prot_id_list:
self.prot_id_list.append(prot_id)
q_counter += 1
subj_counter += 1
tmp_seq.pop(0)
q_seq.pop(0)
def get_substructure_mapping_dict(self):
mapping_dict = {}
for chain in self.residues.keys():
for res in self.residues[chain].keys():
if self.residues[chain][res].segment in mapping_dict.keys():
mapping_dict[self.residues[chain][res].segment].append(self.residues[chain][res].number)
else:
mapping_dict[self.residues[chain][res].segment] = [self.residues[chain][res].number,]
return mapping_dict
def get_annotated_structure(self):
for chain in self.pdb_structure:
for residue in chain:
if residue.id[1] in self.residues[chain.id].keys():
if self.residues[chain.id][residue.id[1]].gpcrdb != 0.:
residue["CA"].set_bfactor(float(self.residues[chain.id][residue.id[1]].gpcrdb))
if self.residues[chain.id][residue.id[1]].bw != 0.:
residue["N"].set_bfactor(float(self.residues[chain.id][residue.id[1]].bw))
return self.pdb_structure
def save_gn_to_pdb(self):
#replace bfactor field of CA atoms with b-w numbers and return filehandle with the structure written
for chain in self.pdb_structure:
for residue in chain:
if residue.id[1] in self.residues[chain.id].keys():
if self.residues[chain.id][residue.id[1]].gpcrdb != 0.:
residue["CA"].set_bfactor(float(self.residues[chain.id][residue.id[1]].gpcrdb))
if self.residues[chain.id][residue.id[1]].bw != 0.:
residue["N"].set_bfactor(float(self.residues[chain.id][residue.id[1]].bw))
r = self.residues[chain.id][residue.id[1]]
#get the basename, extension and export the pdb structure with b-w numbers
root, ext = os.path.splitext(self.pdb_filename)
io=PDBIO()
io.set_structure(self.pdb_structure)
io.save("%s_GPCRDB%s" %(root, ext))
def assign_generic_numbers(self):
alignments = {}
#blast search goes first, looping through all the chains
for chain in self.pdb_seq.keys():
alignments[chain] = self.blast.run(self.pdb_seq[chain])
#map the results onto pdb sequence for every sequence pair from blast
for chain in self.pdb_seq.keys():
for alignment in alignments[chain]:
if alignment == []:
continue
for hsps in alignment[1].hsps:
self.map_blast_seq(alignment[0], hsps, chain)
return self.get_annotated_structure()
def assign_generic_numbers_with_sequence_parser(self):
for chain in self.pdb_structure:
for residue in chain:
if chain.id in self.mapping:
if residue.id[1] in self.mapping[chain.id].keys():
gpcrdb_num = self.mapping[chain.id][residue.id[1]].gpcrdb
if gpcrdb_num != '' and len(gpcrdb_num.split('x'))==2:
bw, gn = gpcrdb_num.split('x')
gn = "{}.{}".format(bw.split('.')[0], gn)
if len(gn.split('.')[1])==3:
gn = '-'+gn[:-1]
try:
residue["CA"].set_bfactor(float(gn))
residue["N"].set_bfactor(float(bw))
except:
pass
return self.pdb_structure
def assign_cgn_with_sequence_parser(self, target_chain):
pdb_array = OrderedDict()
for s in G_PROTEIN_SEGMENTS['Full']:
pdb_array[s] = OrderedDict()
i, j = 0, 0
key_list = [i.gpcrdb for i in list(self.mapping[target_chain].values())]
for key, vals in self.mapping[target_chain].items():
category, segment, num = vals.gpcrdb.split('.')
if self.pdb_code in self.exceptions:
try:
if self.pdb_structure[target_chain][key].get_id()[1]>=self.exceptions[self.pdb_code][0]:
if i<self.exceptions[self.pdb_code][1]:
pdb_array[segment][vals.gpcrdb] = 'x'
i+=1
continue
except:
pass
this_cat, this_seg, this_num = key_list[j].split('.')
try:
pdb_array[segment][vals.gpcrdb] = self.pdb_structure[target_chain][key-i].get_list()
except:
pdb_array[segment][vals.gpcrdb] = 'x'
j+=1
return pdb_array
|
cmunk/protwis
|
structure/assign_generic_numbers_gpcr.py
|
Python
|
apache-2.0
| 11,967
|
[
"BLAST"
] |
2739927a73889126f66d77f73d66f192834ada318ba50acf26714ca2cc5ff1f5
|
"""
This file implements a brew resolver for Galaxy requirements. In order for Galaxy
to pick up on recursively defined and versioned brew dependencies recipes should
be installed using the experimental `brew-vinstall` external command.
More information here:
https://github.com/jmchilton/brew-tests
https://github.com/Homebrew/homebrew-science/issues/1191
This is still an experimental module and there will almost certainly be backward
incompatible changes coming.
"""
from .resolver_mixins import UsesHomebrewMixin
from ..resolvers import DependencyResolver, INDETERMINATE_DEPENDENCY
# TODO: Implement prefer version linked...
PREFER_VERSION_LINKED = 'linked'
PREFER_VERSION_LATEST = 'latest'
UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE = "HomebrewDependencyResolver prefer_version must be %s"
UNKNOWN_PREFER_VERSION_MESSAGE = UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE % (PREFER_VERSION_LATEST)
DEFAULT_PREFER_VERSION = PREFER_VERSION_LATEST
class HomebrewDependencyResolver(DependencyResolver, UsesHomebrewMixin):
resolver_type = "homebrew"
def __init__(self, dependency_manager, **kwds):
self.versionless = _string_as_bool(kwds.get('versionless', 'false'))
self.prefer_version = kwds.get('prefer_version', None)
if self.prefer_version is None:
self.prefer_version = DEFAULT_PREFER_VERSION
if self.versionless and self.prefer_version not in [PREFER_VERSION_LATEST]:
raise Exception(UNKNOWN_PREFER_VERSION_MESSAGE)
self._init_homebrew(**kwds)
def resolve(self, name, version, type, **kwds):
if type != "package":
return INDETERMINATE_DEPENDENCY
if version is None or self.versionless:
return self._find_dep_default(name, version)
else:
return self._find_dep_versioned(name, version)
def _string_as_bool( value ):
return str( value ).lower() == "true"
__all__ = ['HomebrewDependencyResolver']
|
ssorgatem/pulsar
|
galaxy/tools/deps/resolvers/homebrew.py
|
Python
|
apache-2.0
| 1,947
|
[
"Galaxy"
] |
f5edfdedb55f01c32131dd9339ac363a62afe21cad23b2f0fdcf87b66edfffcc
|
'''
Created on 2012-11-10
@author: Andre R. Erler
'''
## imports
from numpy import array, arange, zeros, diff
import os
import re
# netcdf stuff
from netcdf import Dataset, add_coord, copy_dims, copy_ncatts, copy_vars
# data root folder
from socket import gethostname
hostname = gethostname()
if hostname=='komputer':
WRFroot = '/media/data/DATA/WRF/Downscaling/'
exp = 'ctrl-2'
folder = WRFroot + exp + '/'
elif hostname[0:3] == 'gpc': # i.e. on scinet...
exproot = os.getcwd()
exp = exproot.split('/')[-1] # root folder name
folder = exproot + '/wrfout/' # output folder
else:
folder = os.getcwd() # just operate in the current directory
exp = '' # need to define experiment name...
## definitions
# input files and folders
maxdom = 2
wrfpfx = 'wrfsrfc_d%02i_' # %02i is for the domain number
wrfext = '-01_00:00:00.nc'
wrfdate = '19(79|80))-\d\d' # use '\d' for any number and [1-3,45] for ranges
# output files and folders
meanfile = 'wrfsrfc_d%02i_monthly_1979-1981.nc' # %02i is for the domain number
climfile = 'wrfsrfc_d%02i_clim_1979-1981.nc' # %02i is for the domain number
# variables
tax = 0 # time axis (to average over)
dimlist = ['x', 'y'] # copy these dimensions
dimmap = dict(time='Time', x='west_east', y='south_north') # original names of dimensions
varlist = ['ps','T2','Ts','rainnc','rainc','snownc','graupelnc','snow'] # include these variables in monthly means
varmap = dict(ps='PSFC',T2='T2',Ts='TSK',snow='SNOW',snowh='SNOWH', # original (WRF) names of variables
rainnc='RAINNC',rainc='RAINC',rainsh='RAINSH',snownc='SNOWNC',graupelnc='GRAUPELNC')
acclist = dict(rainnc=100,rainc=100,rainsh=0,snownc=0,graupelnc=0) # dictionary of accumulated variables
# N.B.: keys = variables and values = bucket sizes; value = None or 0 means no bucket
bktpfx = 'I_' # prefix for bucket variables
# time constants
months = ['January ', 'February ', 'March ', 'April ', 'May ', 'June ', #
'July ', 'August ', 'September', 'October ', 'November ', 'December ']
days = array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]) # no leap year
mons = arange(1,13); nmons = len(mons)
if __name__ == '__main__':
## loop over domains
for ndom in xrange(1,maxdom+1):
# announcement
print('\n\n *** Processing Domain #%02i (of %02i) *** '%(ndom,maxdom))
## setup files and folders
wrffiles = wrfpfx%ndom + wrfdate + wrfext
# N.B.: wrfpfx must contain something like %02i to accommodate the domain number
# assemble input filelist
wrfrgx = re.compile(wrffiles) # compile regular expression
filelist = [wrfrgx.match(filename) for filename in os.listdir(folder)] # list folder and match
filelist = [match.group() for match in filelist if match is not None] # assemble valid file list
if len(filelist) == 0:
print('\nWARNING: no matching files found for domain %02i'%(ndom,))
break # skip and go to next domain
filelist.sort() # sort alphabetically, so that files are in sequence (temporally)
datergx = re.compile(wrfdate) # compile regular expression, also used to infer month (later)
begindate = datergx.search(filelist[0]).group()
enddate = datergx.search(filelist[-1]).group()
# load first file to copy some meta data
wrfout = Dataset(folder+filelist[0], 'r', format='NETCDF4')
# create monthly mean output file
mean = Dataset(folder+meanfile%ndom, 'w', format='NETCDF4')
add_coord(mean, 'time', values=None, dtype='i4', atts=dict(units='month since '+begindate)) # unlimited time dimension
copy_dims(mean, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# global attributes
copy_ncatts(mean, wrfout, prefix='WRF_') # copy all attributes and save with prefix WRF
mean.description = 'WRF monthly means'
mean.begin_date = begindate; mean.end_date = enddate
mean.experiment = exp
mean.creator = 'Andre R. Erler'
# create climatology output file
clim = Dataset(folder+climfile%ndom, 'w', format='NETCDF4')
add_coord(clim, 'time', values=mons, dtype='i4', atts=dict(units='month of the year')) # month of the year
copy_dims(clim, wrfout, dimlist=dimlist, namemap=dimmap, copy_coords=False) # don't have coordinate variables
# variable with proper names of the months
clim.createDimension('tstrlen', size=9)
coord = clim.createVariable('month','S1',('time','tstrlen'))
for m in xrange(nmons):
for n in xrange(9): coord[m,n] = months[m][n]
# global attributes
copy_ncatts(clim, wrfout, prefix='WRF_') # copy all attributes and save with prefix WRF
clim.description = 'climatology of WRF monthly means'
clim.begin_date = begindate; clim.end_date = enddate
clim.experiment = exp
clim.creator = 'Andre R. Erler'
# check variable list
for var in varlist:
if not wrfout.variables.has_key(varmap.get(var,var)):
print('\nWARNING: variable %s not found in source file!\n'%(var,))
del var # remove variable if not present in soruce file
# copy variables to new datasets
copy_vars(mean, wrfout, varlist=varlist, namemap=varmap, dimmap=dimmap, copy_data=False)
copy_vars(clim, wrfout, varlist=varlist, namemap=varmap, dimmap=dimmap, copy_data=False)
# length of time, x, and y dimensions
nvar = len(varlist)
nx = len(wrfout.dimensions[dimmap['x']])
ny = len(wrfout.dimensions[dimmap['y']])
nfiles = len(filelist) # number of files
# close sample input file
wrfout.close()
## compute monthly means and climatology
# allocate arrays
print('\n Computing monthly means from %s to %s (incl);'%(begindate,enddate))
print ('%3i fields of shape (%i,%i):\n'%(nvar,nx,ny))
for var in varlist:
print(' %s (%s)'%(var,varmap.get(var,var)))
assert (ny,nx) == mean.variables[var].shape[1:], \
'\nWARNING: variable %s does not conform to assumed shape (%i,%i)!\n'%(var,nx,ny)
# monthly means
meandata = dict()
climdata = dict()
for var in varlist:
meandata[var] = zeros((nfiles,ny,nx))
climdata[var] = zeros((nmons,ny,nx))
xtime = zeros((nfiles,)) # number of month
xmon = zeros((nmons,)) # counter for number of contributions
# loop over input files
print('\n Starting computation: %i iterations (files)\n'%nfiles)
for n in xrange(nfiles):
wrfout = Dataset(folder+filelist[n], 'r', format='NETCDF4')
ntime = len(wrfout.dimensions[dimmap['time']]) # length of month
print(' processing file #%i of %3i (%i time-steps):'%(n+1,nfiles,ntime))
print(' %s\n'%filelist[n])
# compute monthly averages
m = int(datergx.search(filelist[n]).group()[-2:])-1 # infer month from filename (for climatology)
xtime[n] = n+1 # month since start
xmon[m] += 1 # one more item
for var in varlist:
ncvar = varmap.get(var,var)
tmp = wrfout.variables[ncvar]
if acclist.has_key(var): # special treatment for accumulated variables
mtmp = diff(tmp[:].take([0,ntime-1],axis=tax), n=1, axis=tax).squeeze()
if acclist[var]:
bktvar = bktpfx + ncvar # guess name of bucket variable
if wrfout.variables.has_key(bktvar):
bkt = wrfout.variables[bktvar]
mtmp = mtmp + acclist[var] * diff(bkt[:].take([0,ntime-1],axis=tax), n=1, axis=tax).squeeze()
mtmp /= (days[m]-1) # transform to daily instead of monthly rate
# N.B.: technically the difference should be taken w.r.t. the last day of the previous month,
# not the first day of the current month, hence we loose one day in the accumulation
else:
mtmp = tmp[:].mean(axis=tax) # normal variables, normal mean...
meandata[var][n,:] = mtmp # save monthly mean
climdata[var][m,:] += mtmp # accumulate climatology
# close file
wrfout.close()
# normalize climatology
if n < nmons: xmon[xmon==0] = 1 # avoid division by zero
for var in varlist:
climdata[var][:,:,:] = climdata[var][:,:,:] / xmon[:,None,None] # 'None" indicates a singleton dimension
## finish
# save to files
print(' Done. Writing output to:\n %s'%(folder,))
for var in varlist:
mean.variables[var][:] = meandata[var]
mean.variables['time'][:] = xtime
clim.variables[var][:] = climdata[var]
# close files
mean.close()
print(' %s'%(meanfile%ndom,))
clim.close()
print(' %s'%(climfile%ndom,))
|
aerler/WRF-Tools
|
Python/archive/avgWRF_1979-1981.py
|
Python
|
gpl-3.0
| 8,576
|
[
"NetCDF"
] |
7d154a2bacd0dcc112fe9ec8be8615391c300203db857f1ce74f3a460188fd89
|
"""
DIRAC Logger client
"""
import sys
import traceback
import inspect
import DIRAC
from DIRAC.FrameworkSystem.private.logging.LogLevels import LogLevels
from DIRAC.FrameworkSystem.private.logging.Message import Message
from DIRAC.Core.Utilities import Time, List
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure, reprReturnErrorStructure
from DIRAC.FrameworkSystem.private.logging.backends.BackendIndex import gBackendIndex
from DIRAC.Core.Utilities import ExitCallback
__RCSID__ = "$Id$"
DEBUG = 1
class Logger( object ):
defaultLogLevel = 'NOTICE'
def __init__( self ):
self._minLevel = 0
self._showCallingFrame = False
self._systemName = False
self._outputList = []
self._subLoggersDict = {}
self._logLevels = LogLevels()
self.__backendOptions = { 'showHeaders' : True, 'showThreads' : False, 'Color' : False }
self.__preinitialize()
self.__initialized = False
def initialized( self ):
return self.__initialized
def showHeaders( self, yesno = True ):
self.__backendOptions[ 'showHeaders' ] = yesno
def showThreadIDs( self, yesno = True ):
self.__backendOptions[ 'showThreads' ] = yesno
def registerBackends( self, desiredBackends ):
self._backendsDict = {}
for backend in desiredBackends:
backend = backend.lower()
if not backend in gBackendIndex:
self.warn( "Unexistant method for showing messages",
"Unexistant %s logging method" % backend )
else:
self._backendsDict[ backend ] = gBackendIndex[ backend ]( self.__backendOptions )
def __preinitialize ( self ):
""" This sets some defaults
"""
self._systemName = "Framework"
self.registerBackends( [ 'stdout' ] )
self._minLevel = self._logLevels.getLevelValue( "NOTICE" )
# HACK to take into account dev levels before the command line if fully parsed
debLevs = 0
for arg in sys.argv:
if arg.find( "-d" ) == 0:
debLevs += arg.count( "d" )
if debLevs == 1:
self.setLevel( "VERBOSE" )
elif debLevs == 2:
self.setLevel( "VERBOSE" )
self.showHeaders( True )
elif debLevs >= 3:
self.setLevel( "DEBUG" )
self.showHeaders( True )
self.showThreadIDs()
def initialize( self, systemName, cfgPath ):
if self.__initialized:
return
self.__initialized = True
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from os import getpid
# self.__printDebug( "The configuration path is %s" % cfgPath )
# Get the options for the different output backends
retDict = gConfig.getOptionsDict( "%s/BackendsOptions" % cfgPath )
# self.__printDebug( retDict )
if not retDict[ 'OK' ]:
cfgBackOptsDict = { 'FileName': 'Dirac-log_%s.log' % getpid(), 'Interactive': True, 'SleepTime': 150 }
else:
cfgBackOptsDict = retDict[ 'Value' ]
self.__backendOptions.update( cfgBackOptsDict )
if 'FileName' not in self.__backendOptions:
self.__backendOptions[ 'FileName' ] = 'Dirac-log_%s.log' % getpid()
sleepTime = 150
try:
sleepTime = int ( self.__backendOptions[ 'SleepTime' ] )
except:
pass
self.__backendOptions[ 'SleepTime' ] = sleepTime
self.__backendOptions[ 'Interactive' ] = gConfig.getValue( "%s/BackendsOptions/Interactive" % cfgPath, True )
self.__backendOptions[ 'Site' ] = DIRAC.siteName()
self.__backendOptions[ 'Color' ] = gConfig.getValue( "%s/LogColor" % cfgPath, False )
# Configure outputs
desiredBackends = gConfig.getValue( "%s/LogBackends" % cfgPath, 'stdout' )
self.registerBackends( List.fromChar( desiredBackends ) )
# Configure verbosity
defaultLevel = Logger.defaultLogLevel
if "Scripts" in cfgPath:
defaultLevel = gConfig.getValue( '/Systems/Scripts/LogLevel', Logger.defaultLogLevel )
self.setLevel( gConfig.getValue( "%s/LogLevel" % cfgPath, defaultLevel ) )
# Configure framing
self._showCallingFrame = gConfig.getValue( "%s/LogShowLine" % cfgPath, self._showCallingFrame )
# Get system name
self._systemName = str( systemName )
if not self.__backendOptions['Interactive']:
ExitCallback.registerExitCallback( self.flushAllMessages )
def setLevel( self, levelName ):
levelName = levelName.upper()
if levelName in self._logLevels.getLevels():
self._minLevel = abs( self._logLevels.getLevelValue( levelName ) )
return True
return False
def getLevel( self ):
"""
Return the level name of the logger
"""
return self._logLevels.getLevel( self._minLevel )
def getAllPossibleLevels( self ):
"""
Return a list of all the levels available
"""
return self._logLevels.getLevels()
def shown( self, levelName ):
levelName = levelName.upper()
if levelName in self._logLevels.getLevels():
return self._minLevel <= abs(self._logLevels.getLevelValue( levelName ))
return False
def getName( self ):
"""
Return the system/component name
"""
return self._systemName
def always( self, sMsg, sVarMsg = '' ):
return self._sendMessage( self._logLevels.always,
sMsg,
sVarMsg )
def notice( self, sMsg, sVarMsg = '' ):
return self._sendMessage( self._logLevels.notice,
sMsg,
sVarMsg )
def info( self, sMsg, sVarMsg = '' ):
return self._sendMessage( self._logLevels.info,
sMsg,
sVarMsg )
def verbose( self, sMsg, sVarMsg = '' ):
return self._sendMessage( self._logLevels.verbose,
sMsg,
sVarMsg )
def debug( self, sMsg, sVarMsg = '' ):
# In case of S_ERROR structure make full string representation
if self.__testLevel( self._logLevels.debug ):
if isReturnStructure( sMsg ):
sMsg = reprReturnErrorStructure( sMsg, full = True )
if isReturnStructure( sVarMsg ):
sVarMsg = reprReturnErrorStructure( sVarMsg, full = True )
return self._sendMessage( self._logLevels.debug,
sMsg,
sVarMsg )
return False
def warn( self, sMsg, sVarMsg = '' ):
return self._sendMessage( self._logLevels.warn,
sMsg,
sVarMsg )
def error( self, sMsg, sVarMsg = '' ):
return self._sendMessage( self._logLevels.error,
sMsg,
sVarMsg )
def exception( self, sMsg = "", sVarMsg = '', lException = False, lExcInfo = False ):
if self.__testLevel( self._logLevels.exception ):
if sVarMsg:
sVarMsg += "\n%s" % self.__getExceptionString( lException, lExcInfo )
else:
sVarMsg = "\n%s" % self.__getExceptionString( lException, lExcInfo )
return self._sendMessage( self._logLevels.exception,
sMsg,
sVarMsg )
return False
def fatal( self, sMsg, sVarMsg = '' ):
return self._sendMessage( self._logLevels.fatal,
sMsg,
sVarMsg )
def showStack( self ):
return self._sendMessage( self._logLevels.debug, '', '' )
def _sendMessage( self, level, msgText, variableText ):
if self.__testLevel( level ):
messageObject = Message( self._systemName,
level,
Time.dateTime(),
msgText,
variableText,
self.__discoverCallingFrame() )
return self.processMessage( messageObject )
return False
def processMessage( self, messageObject ):
if self.__testLevel( messageObject.getLevel() ):
if not messageObject.getName():
messageObject.setName( self._systemName )
self._processMessage( messageObject )
return True
return False
def __testLevel( self, sLevel ):
return abs( self._logLevels.getLevelValue( sLevel ) ) >= self._minLevel
def _processMessage( self, messageObject ):
for backend in self._backendsDict:
self._backendsDict[ backend ].doMessage( messageObject )
def __getExceptionString( self, lException = False, lExcInfo = False ):
"""
Return a formated string with exception and traceback information
If lExcInfo is present: full traceback
Elif lException is present: only last call traceback
Else: no traceback
"""
if lExcInfo:
if isinstance( lExcInfo, bool ):
lExcInfo = sys.exc_info()
# Get full traceback
stack = "".join( traceback.format_tb( lExcInfo[2] ) )
elif lException:
# This is useless but makes pylint happy
if not lException:
lException = Exception()
lExcInfo = sys.exc_info()
try:
args = lException.args
except:
return "Passed exception to the logger is not a valid Exception: %s" % str( lException )
exceptType = lException.__class__.__name__
value = ','.join( [str( arg ) for arg in args] )
# Only print out last part of the traceback
stack = traceback.format_tb( lExcInfo[2] )[-1]
else:
lExcInfo = sys.exc_info()
stack = ""
exceptType = lExcInfo[0].__name__
value = lExcInfo[1]
return "== EXCEPTION == %s\n%s\n%s: %s\n===============" % ( exceptType, stack, exceptType, value )
def __discoverCallingFrame( self ):
if self.__testLevel( self._logLevels.debug ) and self._showCallingFrame:
oActualFrame = inspect.currentframe()
lOuterFrames = inspect.getouterframes( oActualFrame )
lCallingFrame = lOuterFrames[2]
return "%s:%s" % ( lCallingFrame[1].replace( sys.path[0], "" )[1:], lCallingFrame[2] )
else:
return ""
def __getExtendedExceptionString( self, lException = None ):
"""
Print the usual traceback information, followed by a listing of all the
local variables in each frame.
"""
if lException:
tb = lException[2]
else:
tb = sys.exc_info()[2]
if not tb:
return
while 1:
if not tb.tb_next:
break
tb = tb.tb_next
stack = []
f = tb.tb_frame
while f:
stack.append( f )
f = f.f_back
stack.reverse()
# traceback.print_exc()
sExtendedException = "Locals by frame, innermost last\n"
for frame in stack:
sExtendedException += "\n"
sExtendedException += "Frame %s in %s at line %s\n" % ( frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno )
for key, value in frame.f_locals.iteritems():
# We have to be careful not to cause a new error in our error
# printer! Calling str() on an unknown object could cause an
# error we don't want.
try:
sExtendedException += "\t%20s = %s\n" % ( key, value )
except:
sExtendedException += "\t%20s = <ERROR WHILE PRINTING VALUE>\n" % key
return sExtendedException
def __getStackString( self ):
""" This function returns the stack as a string to be printed via
a debug message, the upper 3 levels are skipped since they correspond
to gLogger.showStack, self.__getStackString, traceback.print_stack
"""
stack_list = traceback.extract_stack()
return ''.join( traceback.format_list( stack_list[:-2] ) )
def flushAllMessages( self, exitCode = 0 ):
for backend in self._backendsDict:
self._backendsDict[ backend ].flush()
def getSubLogger( self, subName, child = True ):
from DIRAC.FrameworkSystem.private.logging.SubSystemLogger import SubSystemLogger
if not subName in self._subLoggersDict.keys():
self._subLoggersDict[ subName ] = SubSystemLogger( subName, self, child )
return self._subLoggersDict[ subName ]
def __printDebug( self, debugString ):
""" This function is implemented to debug problems with initialization
of the logger. We have to use it because the Logger is obviously unusable
during its initialization.
"""
if DEBUG:
print debugString
|
Andrew-McNab-UK/DIRAC
|
FrameworkSystem/private/logging/Logger.py
|
Python
|
gpl-3.0
| 12,267
|
[
"DIRAC"
] |
72475ca84d2ba0ee05790e6fbad0855f5f5feb6c1431be67f874788cde38f94b
|
from setuptools import setup
pypi_classifiers = [
'Programming Language :: Python :: 3',
"Development Status :: 4 - Beta",
"Environment :: Console",
"Operating System :: OS Independent",
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Bio-Informatics',
"Topic :: Software Development :: Libraries :: Python Modules",
'License :: OSI Approved :: MIT License',
]
install_requires = [
"pandas>=0.20.3",
'biopython>=1.70',
]
desc = """Scan genomes for internally repeated sequences, elements which are \
repetitive in another species, or high-identity HGT candidate regions between \
species."""
setup(name='mimeo',
version='1.1.1',
description=desc,
url='https://github.com/Adamtaranto/mimeo',
author='Adam Taranto',
author_email='adam.taranto@anu.edu.au',
license='MIT',
packages=['mimeo'],
classifiers=pypi_classifiers,
keywords=["Transposon", "TE", "WGA", "LASTZ", "Whole genome alignment",
"repeat", "transposition"],
install_requires=install_requires,
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'mimeo-self=mimeo.run_self:main',
'mimeo-x=mimeo.run_interspecies:main',
'mimeo-map=mimeo.run_map:main',
'mimeo-filter=mimeo.run_filter:main',
],
},
)
|
Adamtaranto/mimeo
|
setup.py
|
Python
|
mit
| 1,464
|
[
"Biopython"
] |
8bca96f31dff1742edef4fdd436aef5b016e7f6a180d4a597102ef16c3466801
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility functions."""
import collections
import os
from os import path
from absl import flags
import flax
import jax
import jax.numpy as jnp
import jax.scipy as jsp
import numpy as np
from PIL import Image
import yaml
from jaxnerf.nerf import datasets
BASE_DIR = "jaxnerf"
INTERNAL = False
@flax.struct.dataclass
class TrainState:
optimizer: flax.optim.Optimizer
@flax.struct.dataclass
class Stats:
loss: float
psnr: float
loss_c: float
psnr_c: float
weight_l2: float
Rays = collections.namedtuple("Rays", ("origins", "directions", "viewdirs"))
def namedtuple_map(fn, tup):
"""Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple."""
return type(tup)(*map(fn, tup))
def define_flags():
"""Define flags for both training and evaluation modes."""
flags.DEFINE_string("train_dir", None, "where to store ckpts and logs")
flags.DEFINE_string("data_dir", None, "input data directory.")
flags.DEFINE_string("config", None,
"using config files to set hyperparameters.")
# Dataset Flags
# TODO(pratuls): rename to dataset_loader and consider cleaning up
flags.DEFINE_enum("dataset", "blender",
list(k for k in datasets.dataset_dict.keys()),
"The type of dataset feed to nerf.")
flags.DEFINE_enum(
"batching", "single_image", ["single_image", "all_images"],
"source of ray sampling when collecting training batch,"
"single_image for sampling from only one image in a batch,"
"all_images for sampling from all the training images.")
flags.DEFINE_bool(
"white_bkgd", True, "using white color as default background."
"(used in the blender dataset only)")
flags.DEFINE_integer("batch_size", 1024,
"the number of rays in a mini-batch (for training).")
flags.DEFINE_integer("factor", 4,
"the downsample factor of images, 0 for no downsample.")
flags.DEFINE_bool("spherify", False, "set for spherical 360 scenes.")
flags.DEFINE_bool(
"render_path", False, "render generated path if set true."
"(used in the llff dataset only)")
flags.DEFINE_integer(
"llffhold", 8, "will take every 1/N images as LLFF test set."
"(used in the llff dataset only)")
flags.DEFINE_bool(
"use_pixel_centers", False,
"If True, generate rays through the center of each pixel. Note: While "
"this is the correct way to handle rays, it is not the way rays are "
"handled in the original NeRF paper. Setting this TRUE yields ~ +1 PSNR "
"compared to Vanilla NeRF.")
# Model Flags
flags.DEFINE_string("model", "nerf", "name of model to use.")
flags.DEFINE_float("near", 2., "near clip of volumetric rendering.")
flags.DEFINE_float("far", 6., "far clip of volumentric rendering.")
flags.DEFINE_integer("net_depth", 8, "depth of the first part of MLP.")
flags.DEFINE_integer("net_width", 256, "width of the first part of MLP.")
flags.DEFINE_integer("net_depth_condition", 1,
"depth of the second part of MLP.")
flags.DEFINE_integer("net_width_condition", 128,
"width of the second part of MLP.")
flags.DEFINE_float("weight_decay_mult", 0, "The multiplier on weight decay")
flags.DEFINE_integer(
"skip_layer", 4, "add a skip connection to the output vector of every"
"skip_layer layers.")
flags.DEFINE_integer("num_rgb_channels", 3, "the number of RGB channels.")
flags.DEFINE_integer("num_sigma_channels", 1,
"the number of density channels.")
flags.DEFINE_bool("randomized", True, "use randomized stratified sampling.")
flags.DEFINE_integer("min_deg_point", 0,
"Minimum degree of positional encoding for points.")
flags.DEFINE_integer("max_deg_point", 10,
"Maximum degree of positional encoding for points.")
flags.DEFINE_integer("deg_view", 4,
"Degree of positional encoding for viewdirs.")
flags.DEFINE_integer(
"num_coarse_samples", 64,
"the number of samples on each ray for the coarse model.")
flags.DEFINE_integer("num_fine_samples", 128,
"the number of samples on each ray for the fine model.")
flags.DEFINE_bool("use_viewdirs", True, "use view directions as a condition.")
flags.DEFINE_float(
"noise_std", None, "std dev of noise added to regularize sigma output."
"(used in the llff dataset only)")
flags.DEFINE_bool("lindisp", False,
"sampling linearly in disparity rather than depth.")
flags.DEFINE_string("net_activation", "relu",
"activation function used within the MLP.")
flags.DEFINE_string("rgb_activation", "sigmoid",
"activation function used to produce RGB.")
flags.DEFINE_string("sigma_activation", "relu",
"activation function used to produce density.")
flags.DEFINE_bool(
"legacy_posenc_order", False,
"If True, revert the positional encoding feature order to an older version of this codebase."
)
# Train Flags
flags.DEFINE_float("lr_init", 5e-4, "The initial learning rate.")
flags.DEFINE_float("lr_final", 5e-6, "The final learning rate.")
flags.DEFINE_integer(
"lr_delay_steps", 0, "The number of steps at the beginning of "
"training to reduce the learning rate by lr_delay_mult")
flags.DEFINE_float(
"lr_delay_mult", 1., "A multiplier on the learning rate when the step "
"is < lr_delay_steps")
flags.DEFINE_float("grad_max_norm", 0.,
"The gradient clipping magnitude (disabled if == 0).")
flags.DEFINE_float("grad_max_val", 0.,
"The gradient clipping value (disabled if == 0).")
flags.DEFINE_integer("max_steps", 1000000,
"the number of optimization steps.")
flags.DEFINE_integer("save_every", 10000,
"the number of steps to save a checkpoint.")
flags.DEFINE_integer("print_every", 100,
"the number of steps between reports to tensorboard.")
flags.DEFINE_integer(
"render_every", 5000, "the number of steps to render a test image,"
"better to be x00 for accurate step time record.")
flags.DEFINE_integer("gc_every", 10000,
"the number of steps to run python garbage collection.")
# Eval Flags
flags.DEFINE_bool(
"eval_once", True,
"evaluate the model only once if true, otherwise keeping evaluating new"
"checkpoints if there's any.")
flags.DEFINE_bool("save_output", True,
"save predicted images to disk if True.")
flags.DEFINE_integer(
"chunk", 8192,
"the size of chunks for evaluation inferences, set to the value that"
"fits your GPU/TPU memory.")
def update_flags(args):
"""Update the flags in `args` with the contents of the config YAML file."""
pth = path.join(BASE_DIR, args.config + ".yaml")
with open_file(pth, "r") as fin:
configs = yaml.load(fin, Loader=yaml.FullLoader)
# Only allow args to be updated if they already exist.
invalid_args = list(set(configs.keys()) - set(dir(args)))
if invalid_args:
raise ValueError(f"Invalid args {invalid_args} in {pth}.")
args.__dict__.update(configs)
def open_file(pth, mode="r"):
if not INTERNAL:
return open(pth, mode=mode)
def file_exists(pth):
if not INTERNAL:
return path.exists(pth)
def listdir(pth):
if not INTERNAL:
return os.listdir(pth)
def isdir(pth):
if not INTERNAL:
return path.isdir(pth)
def makedirs(pth):
if not INTERNAL:
os.makedirs(pth)
def render_image(render_fn, rays, rng, normalize_disp, chunk=8192):
"""Render all the pixels of an image (in test mode).
Args:
render_fn: function, jit-ed render function.
rays: a `Rays` namedtuple, the rays to be rendered.
rng: jnp.ndarray, random number generator (used in training mode only).
normalize_disp: bool, if true then normalize `disp` to [0, 1].
chunk: int, the size of chunks to render sequentially.
Returns:
rgb: jnp.ndarray, rendered color image.
disp: jnp.ndarray, rendered disparity image.
acc: jnp.ndarray, rendered accumulated weights per pixel.
"""
height, width = rays[0].shape[:2]
num_rays = height * width
rays = namedtuple_map(lambda r: r.reshape((num_rays, -1)), rays)
unused_rng, key_0, key_1 = jax.random.split(rng, 3)
host_id = jax.host_id()
results = []
for i in range(0, num_rays, chunk):
# pylint: disable=cell-var-from-loop
chunk_rays = namedtuple_map(lambda r: r[i:i + chunk], rays)
chunk_size = chunk_rays[0].shape[0]
rays_remaining = chunk_size % jax.device_count()
if rays_remaining != 0:
padding = jax.device_count() - rays_remaining
chunk_rays = namedtuple_map(
lambda r: jnp.pad(r, ((0, padding), (0, 0)), mode="edge"), chunk_rays)
else:
padding = 0
# After padding the number of chunk_rays is always divisible by
# host_count.
rays_per_host = chunk_rays[0].shape[0] // jax.host_count()
start, stop = host_id * rays_per_host, (host_id + 1) * rays_per_host
chunk_rays = namedtuple_map(lambda r: shard(r[start:stop]), chunk_rays)
chunk_results = render_fn(key_0, key_1, chunk_rays)[-1]
results.append([unshard(x[0], padding) for x in chunk_results])
# pylint: enable=cell-var-from-loop
rgb, disp, acc = [jnp.concatenate(r, axis=0) for r in zip(*results)]
# Normalize disp for visualization for ndc_rays in llff front-facing scenes.
if normalize_disp:
disp = (disp - disp.min()) / (disp.max() - disp.min())
return (rgb.reshape((height, width, -1)), disp.reshape(
(height, width, -1)), acc.reshape((height, width, -1)))
def compute_psnr(mse):
"""Compute psnr value given mse (we assume the maximum pixel value is 1).
Args:
mse: float, mean square error of pixels.
Returns:
psnr: float, the psnr value.
"""
return -10. * jnp.log(mse) / jnp.log(10.)
def compute_ssim(img0,
img1,
max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False):
"""Computes SSIM from two images.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Args:
img0: array. An image of size [..., width, height, num_channels].
img1: array. An image of size [..., width, height, num_channels].
max_val: float > 0. The maximum magnitude that `img0` or `img1` can have.
filter_size: int >= 1. Window size.
filter_sigma: float > 0. The bandwidth of the Gaussian used for filtering.
k1: float > 0. One of the SSIM dampening parameters.
k2: float > 0. One of the SSIM dampening parameters.
return_map: Bool. If True, will cause the per-pixel SSIM "map" to returned
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((jnp.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = jnp.exp(-0.5 * f_i)
filt /= jnp.sum(filt)
# Blur in x and y (faster than the 2D convolution).
filt_fn1 = lambda z: jsp.signal.convolve2d(z, filt[:, None], mode="valid")
filt_fn2 = lambda z: jsp.signal.convolve2d(z, filt[None, :], mode="valid")
# Vmap the blurs to the tensor size, and then compose them.
num_dims = len(img0.shape)
map_axes = tuple(list(range(num_dims - 3)) + [num_dims - 1])
for d in map_axes:
filt_fn1 = jax.vmap(filt_fn1, in_axes=d, out_axes=d)
filt_fn2 = jax.vmap(filt_fn2, in_axes=d, out_axes=d)
filt_fn = lambda z: filt_fn1(filt_fn2(z))
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0**2) - mu00
sigma11 = filt_fn(img1**2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = jnp.maximum(0., sigma00)
sigma11 = jnp.maximum(0., sigma11)
sigma01 = jnp.sign(sigma01) * jnp.minimum(
jnp.sqrt(sigma00 * sigma11), jnp.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = jnp.mean(ssim_map, list(range(num_dims - 3, num_dims)))
return ssim_map if return_map else ssim
def save_img(img, pth):
"""Save an image to disk.
Args:
img: jnp.ndarry, [height, width, channels], img will be clipped to [0, 1]
before saved to pth.
pth: string, path to save the image to.
"""
with open_file(pth, "wb") as imgout:
Image.fromarray(np.array(
(np.clip(img, 0., 1.) * 255.).astype(jnp.uint8))).save(imgout, "PNG")
def learning_rate_decay(step,
lr_init,
lr_final,
max_steps,
lr_delay_steps=0,
lr_delay_mult=1):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1))
else:
delay_rate = 1.
t = np.clip(step / max_steps, 0, 1)
log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
return delay_rate * log_lerp
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs)
def to_device(xs):
"""Transfer data to devices (GPU/TPU)."""
return jax.tree_map(jnp.array, xs)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
|
google-research/google-research
|
jaxnerf/nerf/utils.py
|
Python
|
apache-2.0
| 15,504
|
[
"Gaussian"
] |
83fd35577594eb0e49e0d167ac82ee1cc5500314cb3c6784c5188b28a495100e
|
#!/usr/bin python3.5
"""
Provide code and solution for Application 4
"""
import math
import random
import matplotlib.pyplot as plt
# import alg_project4_solution as student
import matrix_and_alignment_func as student
# URLs for data files
PAM50 = "alg_PAM50.txt"
HUMAN_EYELESS = "alg_HumanEyelessProtein.txt"
FRUITFLY_EYELESS = "alg_FruitflyEyelessProtein.txt"
CONSENSUS_PAX = "alg_ConsensusPAXDomain.txt"
WORD_LIST = "assets_scrabble_words3.txt"
###############################################
# provided code
def read_scoring_matrix(filename):
"""
Read a scoring matrix from the file named filename.
Argument:
filename -- name of file containing a scoring matrix
Returns:
A dictionary of dictionaries mapping X and Y characters to scores
"""
scoring_dict = {}
scoring_file = open(filename)
ykeys = scoring_file.readline()
ykeychars = ykeys.split()
for line in scoring_file.readlines():
vals = line.split()
xkey = vals.pop(0)
scoring_dict[xkey] = {}
for ykey, val in zip(ykeychars, vals):
scoring_dict[xkey][ykey] = int(val)
return scoring_dict
def read_protein(filename):
"""
Read a protein sequence from the file named filename.
Arguments:
filename -- name of file containing a protein sequence
Returns:
A string representing the protein
"""
protein_file = open(filename)
protein_seq = protein_file.read()
protein_seq = protein_seq.rstrip()
return protein_seq
def read_words(filename):
"""
Load word list from the file named filename.
Returns a list of strings.
"""
# load assets
word_file = open(filename)
# read in files as string
words = word_file.read()
# template lines and solution lines list of line string
word_list = words.split('\n')
print("Loaded a dictionary with", len(word_list), "words")
return word_list
def delete_all(string, key='-'):
new_string = ''
idx = string.find(key)
if idx == -1:
return string
new_string += string[:idx]
new_string += delete_all(string[idx + 1:], key)
return new_string
def similarity_percentage(seq, consensus):
seq = delete_all(seq, '-')
alignment_matrix = student.compute_alignment_matrix(seq,
consensus,
scoring_matrix,
True)
score, seq_align, con_align = student.compute_global_alignment(seq,
consensus,
scoring_matrix,
alignment_matrix)
matches = 0
align_len = len(seq_align)
for idx in range(align_len):
if seq_align[idx] == con_align[idx]:
matches += 1
return float(matches) / align_len
def shuffle_sequence(seq):
seq = list(seq)
random.shuffle(seq)
return ''.join(seq)
def generate_null_distribution(seq_x,
seq_y,
scoring_matrix,
num_trials):
scoring_distribution = {}
for dummy_i in range(num_trials):
rand_y = shuffle_sequence(seq_y)
alignment_matrix = student.compute_alignment_matrix(seq_x,
rand_y,
scoring_matrix,
False)
alignment = student.compute_local_alignment(seq_x,
rand_y,
scoring_matrix,
alignment_matrix)
try:
scoring_distribution[alignment[0]] += 1
except KeyError:
scoring_distribution[alignment[0]] = 1
return scoring_distribution
human_eyeless_protein = read_protein(HUMAN_EYELESS)
fruitfly_eyeless_protein = read_protein(FRUITFLY_EYELESS)
scoring_matrix = read_scoring_matrix(PAM50)
score, align_x, align_y = student.compute_local_alignment(human_eyeless_protein,
fruitfly_eyeless_protein,
scoring_matrix,
student.compute_alignment_matrix(human_eyeless_protein,
fruitfly_eyeless_protein,
scoring_matrix,
False))
consensus_PAX_domain = read_protein(CONSENSUS_PAX)
# dist = generate_null_distribution(human_eyeless_protein,
# fruitfly_eyeless_protein,
# scoring_matrix,
# 1000)
def edit_dist(seq_x, seq_y, scoring_matrix):
alignment_matrix = student.compute_alignment_matrix(seq_x,
seq_y,
scoring_matrix,
True)
return len(seq_x) + len(seq_y) - student.compute_global_alignment(seq_x,
seq_y,
scoring_matrix,
alignment_matrix)[0]
def check_spelling(checked_word, dist, word_list):
scoring_matrix = student.build_scoring_matrix(set(list('abcdefghijklmnopqrstuvwxyz')), 2, 1, 0)
words = set([])
for word in word_list:
if edit_dist(checked_word, word, scoring_matrix) <= dist:
words.add(word)
return words
word_list = read_words(WORD_LIST)
humble = check_spelling('humble', 1, word_list)
firefly = check_spelling('firefly', 2, word_list)
|
MohamedAbdultawab/FOC_RiceUniv
|
algorithmic-thinking-2/module-4-project-and-application/02_application-4-applications-to-genomics-and-beyond/alg_application4_provided.py
|
Python
|
gpl-3.0
| 6,270
|
[
"Firefly"
] |
9f33510bb5b6c3456fdf4c53cc8b49c8a017851975962ff67ec2098a1a7b2ade
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import argparse as arg
from datetime import datetime
from collections import OrderedDict
import QM_parser.parser as parser
def checkfile(filename):
if not os.path.isfile(filename):
# print(banner(text='ERROR', ch='#', length=80))
print(" File %s not found!" % filename)
sys.exit()
def fill_dict(filename):
'''Fills the dictionary of options for chromophores contained in filename.'''
opts = OrderedDict()
# Handle the possibility of a None object
try:
checkfile(filename)
with open(filename) as f:
for line in f:
#
# Ignore comments and empty lines
#
if line.startswith('#'):
continue
if not line.strip():
continue
chrom = line.split()[0]
data = line.split()[1:]
#
# Try to understand whether data should be stored as int, float
# or string
#
try:
data = map(int, data)
# This can occur with both floats and strings
except ValueError:
try:
data = map(float, data)
except ValueError:
data = map(str, data)
opts[chrom] = data
except TypeError:
pass
return opts
def options():
'''Defines the options of the script.'''
parser = arg.ArgumentParser(description='Excitonic Calculations',
formatter_class=arg.ArgumentDefaultsHelpFormatter)
#
# Input files
#
inp = parser.add_argument_group("Input Data")
inp.add_argument('-s', '--settings',
default=None, type=str, dest="SettingsFile",
help='''Settings file''')
inp.add_argument('-t', '--templates',
default=None, type=str, dest="TempFile",
help=arg.SUPPRESS)
# help='''Chomophores list and templates file''')
inp.add_argument('--states',
default=None, type=str, dest="StatesFile",
help=arg.SUPPRESS)
# help='''States modification file''')
inp.add_argument('-e', '--energies',
default=None, type=str, dest="EnergiesFile",
help=arg.SUPPRESS)
# help='''Energies file''')
inp.add_argument('-d', '--dipoles',
default=None, type=str, dest="DipolesFile",
help=arg.SUPPRESS)
# help='''Dipoles file''')
inp.add_argument('--centers',
default=None, type=str, dest="CentersFile",
help=arg.SUPPRESS)
# help='''Centers file''')
inp.add_argument('--centersmode',
default=None, type=str, choices=["idx", "coor"], dest="CentersMode",
help=arg.SUPPRESS)
# help='''How to read data in Centers File''')
inp.add_argument('-c', '--coups',
default=None, type=str, dest="CoupsFile",
help='''Electronic Couplings file''')
inp.add_argument('--chgs',
default=None, type=str, dest="ChgsFile",
help=arg.SUPPRESS)
# help='''Transition Charges file''')
inp.add_argument('--cubs',
default=None, type=str, dest="CubsFile",
help=arg.SUPPRESS)
# help='''Transition Densities Cubes file''')
inp.add_argument('--vibs',
default=None, type=str, dest="VibsFile",
help=arg.SUPPRESS)
# help='''Vibrational Frequencies File''')
inp.add_argument('--vibq',
default=None, type=str, dest="VibLvlFile",
help=arg.SUPPRESS)
# help='''Vibrational Levels File''')
inp.add_argument('--hr',
default=None, type=str, dest="HRFile",
help=arg.SUPPRESS)
# help='''Huang-Rhys factors File''')
#
# Calculations Options
#
calc = parser.add_argument_group("Calculation Options")
calc.add_argument('--select',
default=False, action="store_true", dest="Select",
help='''Select states according to the State
Modification file''')
calc.add_argument('--coupcalc',
default="pda", choices=["pda", "chgs", "tdc"],
type=lambda s : s.lower(), dest="CoupCalc",
help='''Coupling Calculation Method''')
#
# Spectra Options
#
spec = parser.add_argument_group("Spectra Convolution Options")
spec.add_argument('--ls',
default="gau", type=str, choices=["gau", "lor"],
dest="LineShape", help='''Spectral LineShape.''')
spec.add_argument('--lw',
default=[1500], type=float, nargs='+', dest="LineWidth",
help='''Spectral LineWidth in wavenumbers (gamma for Lorentzian,
sigma for Gaussian LineShape.''')
spec.add_argument('--unit',
default="eV", type=str, choices=["eV", "wn", "nm"], dest="SpecUnit",
help='''X axis unit for plotting Spectra.''')
#
# Output Options
#
out = parser.add_argument_group("Output Options")
out.add_argument('-o', '--out',
default=None, type=str, dest="OutPref",
help='''Output Prefix for files''')
out.add_argument('--outdir',
default="output", type=str, dest="OutDir",
help='''Output Directory''')
out.add_argument('--savesite',
default=False, action="store_true", dest="SaveSite",
help='''Save properties in also site basis''')
out.add_argument('--figext',
default=None, type=str, choices=["svg", "png", "eps"],
dest="FigExt", help='''Format for image output''')
out.add_argument('--savefigs',
default=False, action="store_true", dest="SaveFigs",
help='''Save figures''')
out.add_argument('-v', '--verbosity',
default=0, action="count", dest="Verb",
help='''Verbosity level''')
#
# Parse and create the Options Dictionary
#
args = parser.parse_args()
Opts = vars(args)
#
# Set Default Folders for some options
#
Opts['WorkDir'] = os.getcwd()
Opts['TempPath'] = "templates"
Opts['DipolesPath'] = "dipoles"
Opts['ChgsPath'] = "chgs"
Opts['CubsPath'] = "cubs"
if Opts['SaveFigs'] and not Opts['FigExt']:
Opts['FigExt'] = "svg"
if Opts['FigExt'] and not Opts['SaveFigs']:
Opts['SaveFigs'] = True
#
# Process Settings file
#
# Handle the possibility of a None object
try:
checkfile(Opts['SettingsFile'])
with open(Opts['SettingsFile']) as f:
for line in f:
line = line.lower()
#
# Ignore comments
#
if line.startswith("#"):
continue
#
# Each of the following options is used only if the related
# command line argument has not been passed by the user
# If an option is present both in the settings file and in
# the command line options, the command line option is given
# priority
#
if line.startswith("temp") and not Opts['TempFile']:
Opts['TempFile'] = line.split()[1]
try:
Opts['TempPath'] = line.split()[2]
except IndexError:
pass
if line.startswith("stat") and not Opts['StatesFile']:
Opts['StatesFile'] = line.split()[1]
try:
if line.split()[2] == "select":
Opts['Select'] = True
except IndexError:
pass
if line.startswith("ene") and not Opts['EnergiesFile']:
Opts['EnergiesFile'] = line.split()[1]
if line.startswith("dip") and not Opts['DipolesFile']:
Opts['DipolesFile'] = line.split()[1]
try:
Opts['DipolesPath'] = line.split()[2]
except IndexError:
pass
if line.startswith("cent") and not Opts['CentersFile']:
Opts['CentersFile'] = line.split()[1]
try:
Opts['CentersMode'] = line.split()[2]
except IndexError:
Opts['CentersMode'] = "idx"
if line.startswith("coup") and not Opts['CoupsFile']:
opt = line.split()[1]
if opt.lower() not in ["pda", "chgs", "tdc"]:
Opts['CoupsFile'] = line.split()[1]
else:
Opts['CoupCalc'] = opt
if line.startswith("charges") and not Opts['ChgsFile']:
Opts['ChgsFile'] = line.split()[1]
try:
Opts['ChgsPath'] = line.split()[2]
except IndexError:
pass
if line.startswith("cub") and not Opts['CubsFile']:
Opts['CubsFile'] = line.split()[1]
try:
Opts['CubsPath'] = line.split()[2]
except IndexError:
pass
if line.startswith("spec") and not Opts['CubsFile']:
Opts['LineShape'] = line.split()[1]
try:
Opts['LineWidth'] = map(float, line.split()[2:])
except IndexError:
pass
if line.startswith("vib") and not Opts['VibsFile']:
Opts['VibsFile'] = line.split()[1]
if line.startswith("lvl") and not Opts['VibLvlFile']:
Opts['VibLvlFile'] = line.split()[1]
if line.startswith("hr") and not Opts['HRFile']:
Opts['HRFile'] = line.split()[1]
except TypeError:
pass
#
# Sort Opts Dict for later printing
#
Opts = OrderedDict(sorted(Opts.items()))
if Opts['OutPref']:
Opts['OutDir'] = Opts['OutPref'] + '.' + Opts['OutDir']
Opts['OutDir'] = os.path.join(Opts['WorkDir'], Opts['OutDir'])
Opts['OutPref'] = os.path.join(Opts['OutDir'], Opts['OutPref'] + '.')
else:
Opts['OutPref'] = os.path.join(Opts['OutDir'], '')
if not os.path.exists(Opts['OutDir']):
os.makedirs(Opts['OutDir'])
else:
add = datetime.now().strftime('%d%b%y_%H%M%S')
os.rename(Opts['OutDir'], Opts['OutDir'] + "_" + add)
os.makedirs(Opts['OutDir'])
pass
TempDict = fill_dict(Opts["TempFile"])
StatesDict = fill_dict(Opts["StatesFile"])
EnergiesDict = fill_dict(Opts["EnergiesFile"])
DipolesDict = fill_dict(Opts["DipolesFile"])
CentersDict = fill_dict(Opts["CentersFile"])
ChgsDict = fill_dict(Opts["ChgsFile"])
CubsDict = fill_dict(Opts["CubsFile"])
VibsDict = fill_dict(Opts["VibsFile"])
VibLvlDict = fill_dict(Opts["VibLvlFile"])
HRDict = fill_dict(Opts["HRFile"])
# Set one vibrational quantum if Vibrations are specified and quanta aren't
if VibsDict and not VibLvlDict:
VibLvlDict = OrderedDict((k, [1] * len(v)) for k, v in VibsDict.iteritems())
return Opts, TempDict, StatesDict, EnergiesDict, DipolesDict, CentersDict, ChgsDict, CubsDict, VibsDict, VibLvlDict, HRDict
def fill_chrom_dict(TempDict, WorkDir=None, path=None):
'''Creates a dictionary linking Chromophores names with their object
instance'''
ChromDict = OrderedDict()
#
# Possible file extensions for chromophores
#
exts = [".out", ".log", ".xyz"]
for chrom in TempDict.keys():
basename = os.path.join(WorkDir, chrom, chrom)
for ext in exts:
filename = basename + ext
#
# Find out the chromophore class
# Try to open a file
#
try:
chromobj = parser.guess(filename)
# if a template is required, project the template on the real
# structure
try:
temp_name = TempDict[chrom][0]
template = os.path.join(path, temp_name)
tempobj = parser.guess(template)
tempobj.transform(chromobj.coords)
ChromDict[chrom] = tempobj
except IndexError:
ChromDict[chrom] = chromobj
pass
# This break is necessary to avoid loading an xyz file in case
# in the folder there are both an out (or log) and an xyz file
break
except IOError:
pass
# If no object was assigned to the chromophore, create an empty one
# This is for dummy chromophores to be used in runs where the user
# gives input data without needing a structure
if chrom not in ChromDict:
ChromDict[chrom] = parser.Chrom()
return ChromDict
if __name__ == '__main__':
pass
|
dpadula85/ExSPy
|
dev/Opts.py
|
Python
|
gpl-3.0
| 14,029
|
[
"Gaussian"
] |
34b53b96712030b701ed4de3b9b813470fc32bf1a07b83f52252b947b856cc05
|
#!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
import copy
import doctest
import glob
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
import docutils.core
import numpy as np
import sphinx
from docutils.parsers.rst import directives
from pkg_resources import parse_version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fft',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
'stats.contingency',
'stats.qmc',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'scipy.optimize.show_options',
'scipy.integrate.quad_explain',
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.ndimage\.sum', # alias for sum_labels
r'scipy\.integrate\.simps', # alias for simpson
r'scipy\.integrate\.trapz', # alias for trapezoid
r'scipy\.integrate\.cumtrapz', # alias for cumulative_trapezoid
r'scipy\.linalg\.solve_lyapunov', # deprecated name
r'scipy\.stats\.contingency\.chi2_contingency',
r'scipy\.stats\.contingency\.expected_freq',
r'scipy\.stats\.contingency\.margins',
r'scipy\.stats\.reciprocal',
r'scipy\.stats\.trapz', # alias for trapezoid
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',
'ref', 'func', 'toctree', 'moduleauthor', 'deprecated',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
def try_convert_namedtuple(got):
# suppose that "got" is smth like MoodResult(statistic=10, pvalue=0.1).
# Then convert it to the tuple (10, 0.1), so that can later compare tuples.
num = got.count('=')
if num == 0:
# not a nameduple, bail out
return got
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return got_again
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
self._had_unexpected_error = False
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
# Ignore name errors after failing due to an unexpected exception
exception_type = exc_info[0]
if self._had_unexpected_error and exception_type is NameError:
return
self._had_unexpected_error = True
self._report_item_name(out)
return super().report_unexpected_exception(
out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if "=" not in want and "=" not in got:
# if we're here, want and got cannot be eval-ed (hence cannot
# be converted to numpy objects), they are not namedtuples
# (those must have at least one '=' sign).
# Thus they should have compared equal with vanilla doctest.
# Since they did not, it's an error.
return False
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
got_again = try_convert_namedtuple(got)
want_again = try_convert_namedtuple(want)
except Exception:
return False
else:
return self.check_output(want_again, got_again, optionflags)
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
from scipy._lib._util import _fixed_default_rng
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd(), \
redirect_stderr(tmp_stderr), \
_fixed_default_rng():
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=output.write)
if fails > 0:
success = False
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch():
pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
|
e-q/scipy
|
tools/refguide_check.py
|
Python
|
bsd-3-clause
| 32,497
|
[
"Gaussian"
] |
2dff2f49a0e300881713380beb79ef2ce784f610ace56f43fd6b1ccd5b30dd1c
|
# -*- coding: utf-8 -*-
#
# CampbellSiegert.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# CampbellSiegert.py
#
# Example script that applies Campbell's theorem and Siegert's rate approximation.
#
# This script calculates the firing rate of an integrate-and-fire neuron
# in response to a series of Poisson generators, each specified with
# a rate and a synaptic weight.
# The calculated rate is compared with a simulation using the iaf_psc_alpha model
#
#
#
# Sven Schrader, Nov 2008, Siegert implementation by Tom Tetzlaff
from scipy.special import erf
from scipy.optimize import fmin
import numpy
from numpy import sqrt, exp
import pylab
import nest
# example 1
weights = [0.1] # mV psp amplitudes
rates = [8000.] # Hz
# example 2, should have same result as example 1
#weights = [0.1, 0.1]
#rates = [4000., 4000.]
Cm = 250. # pF, capacitance
tau_syn_ex = 0.5 # ms, synaptic time constants
tau_syn_in = 2.0 #
tau_m = 20. # ms, membrane time constant
tref = 2.0 # ms, refractory period
V0 = 0.0 # mV, resting potential
Vth = 20.0 # mV, firing threshold
simtime = 20000 # ms
n_neurons = 10 # number of simulated neurons
pi = numpy.pi
e = exp(1)
pF = 1e-12
ms = 1e-3
pA = 1e-12
mV = 1e-3
mu = 0.0
sigma2 = 0.0
J = []
assert(len(weights) == len(rates))
########################################################################################
# Analytical section
for rate, weight in zip(rates, weights):
if weight >0:
tau_s = tau_syn_ex
else:
tau_s = tau_syn_in
t_psp = numpy.arange(0, 10 * (tau_m*ms + tau_s*ms),0.0001 )
# calculation of a single PSP
psp = lambda x: -(Cm*pF) / (tau_s*ms) * (1/(Cm*pF)) * (e/(tau_s*ms)) * \
(((-x * exp(-x/(tau_s*ms))) / (1/(tau_s*ms )-1 / (tau_m*ms))) +\
(exp(-x/(tau_m*ms)) - exp(-x/(tau_s*ms))) / ((1/(tau_s*ms) - 1/(tau_m*ms))**2) )
min_result = fmin(psp, [0], full_output=1, disp=0)
fudge = -1./min_result[1] # fudge is used here to scale psC amplitude from psP amplitude
J.append( Cm*weight/tau_s*fudge) # <-------|
# Campbell's Theorem
# the mean membrane potential mu and variance sigma adds up for each Poisson source
mu += ((V0*mV) + rate * \
(J[-1]*pA) * (tau_s*ms) * e * (tau_m*ms) / (Cm*pF))
sigma2 += rate * \
(2* tau_m*ms + tau_s*ms ) * \
(J[-1]*pA * tau_s*ms *e * tau_m*ms/ ( 2 * (Cm*pF) * (tau_m*ms + tau_s*ms) ) ) ** 2
sigma = sqrt(sigma2)
# Siegert's rate approximation
num_iterations = 100
ul = (Vth*mV - mu) / (sigma)/sqrt(2)
ll = (V0*mV - mu) / (sigma)/sqrt(2)
interval = (ul-ll)/num_iterations
tmpsum = 0.0
for cu in range(0,num_iterations+1):
u = ll + cu * interval
f = exp(u**2)*(1+erf(u))
tmpsum += interval * sqrt(pi) * f
r = 1. / (tref*ms + tau_m*ms * tmpsum)
########################################################################################
# Simulation section
nest.ResetKernel()
nest.sr('20 setverbosity')
neurondict = {'V_th':Vth, 'tau_m':tau_m, 'tau_syn_ex':tau_syn_ex,'tau_syn_in':tau_syn_in, 'C_m':Cm, 'E_L':V0, 't_ref':tref, 'V_m': V0, 'V_reset': V0}
if (mu*1000) < Vth:
neurondict['V_m'] = mu*1000.
nest.SetDefaults('iaf_psc_alpha', neurondict)
n = nest.Create('iaf_psc_alpha', n_neurons)
n_free = nest.Create('iaf_psc_alpha', 1 ,[{'V_th':999999.}]) # high threshold as we want free membrane potential
pg = nest.Create('poisson_generator', len(rates), [ {'rate':float(rate_i)} for rate_i in rates] )
vm = nest.Create('voltmeter', 1, [{'record_to':['memory'], 'withtime':True, 'withgid':True, 'interval':.1}])
sd = nest.Create('spike_detector',1, [{'record_to':['memory'], 'withtime':True, 'withgid':True}])
for i, currentpg in enumerate(pg):
nest.Connect([currentpg], n, syn_spec={'weight': float(J[i]), 'delay': 0.1})
nest.Connect([currentpg], n_free, syn_spec={'weight':J[i]})
nest.Connect(vm, n_free)
nest.Connect(n, sd)
nest.Simulate(simtime)
# free membrane potential (first 100 steps are omitted)
v_free = nest.GetStatus(vm,'events')[0]['V_m'][100:-1]
print('mean membrane potential (actual / calculated): {0} / {1}'.format(numpy.mean(v_free), mu * 1000))
print('variance (actual / calculated): {0} / {1}'.format(numpy.var(v_free), sigma2 * 1e6))
print('firing rate (actual / calculated): {0} / {1}'.format(nest.GetStatus(sd, 'n_events')[0] / (n_neurons * simtime * ms), r))
|
INM-6/nest-git-migration
|
pynest/examples/CampbellSiegert.py
|
Python
|
gpl-2.0
| 5,481
|
[
"NEURON"
] |
07bdb7c828eea20746dd2c925711ea7fc3edda2f316cbbfc343247a91f524df0
|
import numpy as np, os, glob
from astropy.table import Table, vstack, join
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.stats import sigma_clipped_stats
from scipy.interpolate import interp1d, interp2d, RegularGridInterpolator
from scipy.sparse import lil_matrix, save_npz
from frb.halos.models import ModifiedNFW, halomass_from_stellarmass
from frb.frb import FRB
from frb.galaxies import cigale as frbcig
from frb.galaxies import eazy as frb_ez
from frb.surveys import des
from frb import defs
try:
from pathos.multiprocessing import ProcessingPool as Pool
except ImportError:
print("You will need to run 'pip install pathos' to use some functions in this module.")
try:
import progressbar
except ImportError:
print("You will need to run 'pip install progressbar2' to use some functions in this module.")
try:
from threedhst import eazyPy as ez
except ImportError:
print("You will need to run 'pip install threedhst' to read EAZY output.")
DEFAULT_DATA_FOLDER = "data"
def get_des_data(coords:SkyCoord, radius:u.Quantity=15.*u.arcmin, starbright:float=17,
starflagval:float=0.9, gaiacat:str=None, write:bool=False, outfile:str=None)->Table:
"""
Download photometry for galaxies within an FRB field.
Args:
coords (SkyCoord): Coordinates of the center of a cone search.
radius (Quantity, optional): Radius of cone search.
starbright (float, optional): Lower limit of r band mag. Objects brighter
than this will be removed.
starflagval (float, optional): Upper limit for a morphology-based classifier
flag. Objects more point-like (i.e. higher value)
will be filtered out.
gaicat (str, optional): Optional file with gaia catalog of stars within the same search
radius. These stars will be removed. Must contain at least two
columns: "ra" and "dec". The values must be in decimal degrees
and the column names are case sensitive.
write (bool, optional): Write output table to file?
outfile (str, optional): Path to the output file. If not given and write is True,
the table will be written to "photom_cat_J{coords}_{radius}arcmin.fits"
in the current working directory.
Returns:
des_data (Table): Table of DES galaxies within the search radius.
"""
# Download catalog
survey = des.DES_Survey(coords, radius)
cat = survey.get_catalog()
# Add separation info
des_coords = SkyCoord(cat['ra'],cat['dec'], unit="deg")
dessep = coords.separation(des_coords).to('arcmin').value
cat['separation'] = dessep
cat.sort("separation")
cat_colnames = cat.colnames
# Add a convenient unique ID
cat['ID'] = np.arange(len(cat))+1
cat = cat[['ID']+cat_colnames]
# Make brightness and morphology cuts
photom_cat = cat[(cat['star_flag_r']<starflagval)&(cat['DES_r']>starbright)]
# Remove GAIA stars if given
if gaiacat:
gaia_tab = Table.read(gaiacat)
gaia_coords = SkyCoord(gaia_tab['ra'], gaia_tab['dec'], unit="deg")
idx, d2d, _ = gaia_coords.match_to_catalog_sky(des_coords)
matched_des = cat[idx][d2d<1*u.arcsec]
matched_gaia = gaia_tab[d2d<1*u.arcsec]
photom_cat = Table(np.setdiff1d(photom_cat, matched_des))
if write:
if outfile is None:
coordstr = coords.to_string(style='hmsdms', sep="", precision=2).replace(" ", "")
outfile = "photom_cat_J{:s}_{:0.1f}_arcmin.fits".format(coordstr,radius.to('arcmin').value)
photom_cat.write(outfile, overwrite=True)
return photom_cat
def _gen_eazy_tab(photom_cat:Table, input_dir:str="eazy_in", name:str="FRB180924", out_dir:str="eazy_out", output_tab:str="no_stars_eazy.fits")->Table:
"""
Run EAZY on the photometry and produce p(z) estimates.
Args:
photom_cat (Table): Photometry catalog.
input_dir (str, optional): Folder where EAZY config files are written.
name (str, optional): frb name. Will be passed onto frb.galaxies.eazy.eazy_input_files
to generate input files.
out_dir (str, optional): Folder where EAZY output is stored.
output_tab (str, optional): Name of the output summary table fits file.
Returns:
joined_tab (Table): EAZY results table joined (type:inner) with photom_cat based
on the id/ID columns.
"""
# Prepare EAZY
frb_ez.eazy_input_files(photom_cat, input_dir, name, out_dir,
prior_filter="r", zmin=0.01)
# Run it
logfile = os.path.join(out_dir, "eazy_run.log")
frb_ez.run_eazy(input_dir, name, logfile)
# read EAZY output
photz_file = os.path.join(out_dir, "photz.zout")
eazy_tab = Table.read(photz_file, format="ascii")
eazy_tab.rename_column('id','ID')
# Combine the input catalog with EAZY output
joined_tab = join(photom_cat, eazy_tab, 'ID')
joined_tab.write(output_tab, overwrite=True)
return joined_tab
def _create_cigale_in(photom_cat:Table, zmin:float = 0.01, zmax:float=0.35, n_z:int = 35, cigale_input:str = "cigin_minz_zfrb.fits")->Table:
"""
Take the photometry table and
create a new table with redshifts.
For each galaxy, create multiple entries
with different redshifts from 0 to 2.
These redshifts will be uniformly spaced.
Args:
photom_cat (Table): Photometry catalog
zmin (float, optional): Minimum redshift for analysis.
zmax (float, optional): Maximum redshift for analysis.
n_z (int, optional): Number of redshift grid points.
cigale_input (str, optional): Name of input file to be produced.
Returns:
stacked_photom (Table): A table with multiple groups, one for each galaxy.
Each entry in a group has the same photometry but different redshift values.
This way, CIGALE can be run on the same galaxy at multiple redshift guesses
in one go.
"""
# Define z values
z_range = np.linspace(zmin, zmax, n_z)
photom_cat['redshift'] = z_range[0] # Set up initial redshift value
photom_cat['ID'] = photom_cat['ID'].astype(str) # Convert form int to str
photom_cat.sort("separation")
photom_cat['ID'] = [ID.zfill(5)+"_{:0.2f}".format(z_range[0]) for ID in photom_cat['ID']]
# Create new table
stacked_photom = photom_cat.copy()
for z in z_range[1:]:
newphotom = photom_cat.copy()
newphotom['redshift'] = z
for entry in newphotom:
entry['ID'] = entry['ID'].replace("_0.01", "_{:0.2f}".format(z))
stacked_photom = vstack([stacked_photom, newphotom])
# Sort table by ID
stacked_photom = stacked_photom.group_by('ID')
# Write to disk
stacked_photom.write(cigale_input, overwrite=True)
print("Wrote to disk {:s}".format(cigale_input))
return stacked_photom
def _gen_cigale_tab(stacked_photom:Table, n_chunks:int=10, n_cores:int=25, outdir:str=DEFAULT_DATA_FOLDER)->Table:
"""
Run CIGALE and produce a table of results.
Args:
stacked_photom (Table): Table with a group for each galaxy. Output of _create_cigale_in.
n_chunks (int, optional): How many chunks do you want to split stacked_photom.groups into?
Just so that galaxies are not redone in case of a crash.
n_cores (int, optional): Number of CPU threads to be used.
outdir (str, optional): Path to the output directory.
Returns:
full_results (Table): CIGALE output with stellar mass and error for all entries
in stakced_photom.
"""
chunk_size = int(len(stacked_photom.groups)/n_chunks)
# Only compute SFH and Stellar mass.
compute_variables = ['stellar.m_star']
for num in range(n_chunks):
cigale_outdir = os.path.join(outdir,"out_minz_zfrb_chunk{}".format(num))
# Check if a chunk has already been computed
if os.path.isdir(cigale_outdir):
print("Chunk {} has already been analyzed.".format(num))
continue
else:
cig_photom = stacked_photom.groups[num*chunk_size:(num+1)*chunk_size]
# Run cigale on each chunk of galaxies.
frbcig.run(cig_photom, 'redshift', plot=False,
outdir=cigale_outdir, cores=n_cores, variables=compute_variables, save_sed=False)
# Read and combine the CIGALE results
cigfolders = glob.glob(os.path.join(outdir, "out_minz_zfrb_chunk*"))
relevant_cols = ['id', 'bayes.stellar.m_star', 'bayes.stellar.m_star_err']
all_results = []
for folder in cigfolders:
results = Table.read(os.path.join(folder, "results.fits"))
all_results.append(results[relevant_cols])
full_results = vstack(all_results)
full_results.write(os.path.join(outdir, "cigale_full_output.fits"), overwrite=True)
return full_results
def _load_cigale_results(cigale_input:str, cigale_output:str)->Table:
"""
Load the CIGALE stellar mass data.
Args:
cigale_input (str): cigale input file path.
cigale_output (str): cigale_output file path.
Returns:
trim_tab (Table): Summary table with CIGALE results.
"""
cigin = Table.read(cigale_input)
cigtab = Table.read(cigale_output)
# Trim the output table
trim_tab = cigtab[['id', 'bayes.stellar.m_star', 'bayes.stellar.m_star_err']]
# produce some extra columns
trim_tab['redshift'] = 0.0
trim_tab['gal_ID'] = 1
for entry in trim_tab:
entry['gal_ID'] = int(entry['id'][:-5])
entry['redshift'] = float(entry['id'][-4:])
# produce a column for angular separation
trim_tab.sort('id')
trim_tab = trim_tab.group_by('gal_ID')
trim_tab['sep_ang'] = 99.0
for group in trim_tab.groups:
group['sep_ang'] = cigin['separation'][cigin['ID'] == group['gal_ID'][0]][0]
# A similar column for separation in kpc
#trim_tab['sep_kpc'] = p15.angular_diameter_distance(trim_tab['redshift']).to('kpc').value*trim_tab['sep_ang']*u.arcmin.to('rad')
# Rename the stellar mass columns
trim_tab.rename_columns(['bayes.stellar.m_star', 'bayes.stellar.m_star_err'],['log_mstar', 'log_mstar_err'])
# Convert to logarithmic values
trim_tab['log_mstar_err'] = (np.log10(trim_tab['log_mstar']+trim_tab['log_mstar_err']) -
np.log10(np.abs(trim_tab['log_mstar']-trim_tab['log_mstar_err'])))/2
trim_tab['log_mstar'] = np.log10(trim_tab['log_mstar'])
return trim_tab
def _sample_eazy_redshifts(gal_ID:int, eazy_outdir:str, ndraws:int = 1000)->np.ndarray:
"""
Returns a sample of redshifts drawn from the
EAZY photo-z PDF of galaxy <gal_iD>.
Args:
gal_ID(int): ID number of the galaxy in the EAZY table.
eazy_outdir(str): Path to the EAZY results folder
ndraws(int, optional): Number of redshift samples desired.
Returns:
sample_z (np.ndarray): Redshift sample array of length ndraws.
"""
# Get posterior
zgrid, pz = ez.getEazyPz(gal_ID-1,OUTPUT_DIRECTORY=eazy_outdir)
# Force a value of 0 at z = 0
zgrid = np.hstack([[0],zgrid])
pz = np.hstack([[0],pz])
if np.all(np.diff(zgrid) == 0):
return -99
# make a CDF
cdf_z = np.cumsum(pz)
cdf_z /= np.max(cdf_z)
cdf_interp = interp1d(cdf_z, zgrid, kind="linear", fill_value=0, bounds_error=False)
# Use uniform distribution to produce random draws from the CDF
sample_u = np.random.rand(ndraws)
sample_z = cdf_interp(sample_u)
return sample_z
def _mhalo_lookup_table(z:float, npz_out:str = "m_halo_realizations", n_cores:int = 8):
"""
For a given z, produce realizations of m_halo for relevant
m_star values using only the uncertainty in the SHMR relation.
Internal function. Use directly if you know what you're doing.
Args:
z (float): redshift
npz_out(str, optional): output .npz file path.
n_cores(int, optional): Number of CPU threads used for parallel processing.
"""
# Define a range of stellar masses
n_star = 1000
log_mstar_array = np.linspace(6, 11, n_star)
# Instantiate a 2D array
n_halo = 10000
log_mhalo_array = np.zeros((n_star, n_halo))
def mhalo_factory(log_mstar:float, z:float, n_cores = n_cores)->np.ndarray:
"""
Parallelize m_halo computations for a given log_mstar array.
"""
p = Pool(n_cores)
func = lambda x: halomass_from_stellarmass(x, z = z, randomize=True)
log_mhalo_array = p.map(func, log_mstar)
return log_mhalo_array
# Loop over log_mstar:
for idx, log_mstar in enumerate(log_mstar_array):
temp_log_mstar = np.full(n_halo, log_mstar)
log_mhalo_array[idx] = mhalo_factory(temp_log_mstar, z = z, n_cores = n_cores)
# Store this in an .npz file
np.savez_compressed(npz_out, MSTAR=log_mstar_array, MHALO=log_mhalo_array)
return
def mhalo_lookup_tables(z_grid:list, datafolder:str=DEFAULT_DATA_FOLDER, n_cores:int=8):
"""
For each z in z_grid, produces a fits file containing m_halo values
corresponding to a fixed grid of m_star values. The values are produced
by sampling the Moster+13 SHMR relation. The fits files can then be
used to produce interpolation functions of the moments of the m_halo
distribution (e.g. mean, std.dev) as a function of redshift and log_mstar.
Args:
z_grid (list or np.ndarray): List of redshift values to be sampled.
datafolder (str, optional): Path to the directory where the results will be stored.
n_cores (int, optional): Number of CPU threads used for parallel processing.
"""
# Just loop over z_grid and produce the fits files.
for z in z_grid:
realization_file = os.path.join(datafolder, "mhalo_realization_z_{:0.2f}".format(z))
_mhalo_lookup_table(z, realization_file, n_cores)
return
def _mhalo_realizations(log_mstar:float, log_mstar_err:float, z:float,
mean_interp:interp2d, stddev_interp:interp2d,
n_mstar:int=100, n_norm:int=10, max_log_mhalo:float=12.8)->np.ndarray:
"""
Using the lookup tables generated (see function mhalo_lookup_tables), produce
realiztions of mhalo. This takes into account both the stellar mass uncertainty
and the uncertainty in the SMHR relation from Moster+13.
Args:
log_mstar (float): log stellar mass in M_sun.
log_mstar_err (float): log error in log_mstar
z (float): redshift
mean_interp (interp2d): <log_mhalo(log_mstar, z)> (based on SHMR)
stddev_interp (interp2d): std.dev. log_mhalo(log_mstar, z) (based on SHMR)
n_mstrar (int, optional): Number of m_star samples to be produced.
n_norm (int, optional): Number of m_halo samples for each m_star sample.
max_log_mhalo (float, optional): Maximum allowed log halo mass. log halo masses
are capped artificially to this value if any exceed.
Returns:
mhalo_reals (np.ndarray): log_mhalo realizations.
"""
# First produce realizations of mstar from a normal distribution.
mstar_reals = np.random.normal(log_mstar, log_mstar_err, n_mstar)
# Then get mean values of halo masses for each stellar mass.
mean_mhalo_reals = mean_interp(mstar_reals, z)
mean_mhalo_reals = np.minimum(mean_mhalo_reals, max_log_mhalo) # Set a cutoff for the mean halo mass
# Then get the std. dev of the halo masses for each stellar mass.
stddev_mhalo_reals = stddev_interp(mstar_reals, z)
# Finally, produce mhalo realizations assuming a normal distribution
# with the means and std.devs from above.
dummy_normal = np.random.normal(0,1, (n_norm,n_mstar))
mhalo_reals = np.ravel(stddev_mhalo_reals*dummy_normal+mean_mhalo_reals)
return mhalo_reals
def _dm_pdf(cigale_tab:Table, eazy_outdir:str,
mean_interp:interp2d, stddev_interp:interp2d,
ang_dia_interp:interp1d, dm_interpolator:RegularGridInterpolator,
n_cores:int = 8):
"""
For a given galaxy, compute its PDF of
DM from the CIGALE and EAZY inputs.
Args:
cigale_tab (Table): On of the groups
from the full cigale result. This
group contains data on only one galaxy
at various assumed redshifts.
eazy_outdir (str): Path to the directory with EAZY output
mean_interp (interp2d): <log_mhalo(log_mstar, z)> (based on SHMR)
stddev_interp (interp2d): std.dev. log_mhalo(log_mstar, z) (based on SHMR)
ang_dia_interp (interp1d): angular_diameter_distance(z) (default Repo cosmology)
dm_interpolator (RegularGridInterpolator): DM(z, offset_kpc, log_mhalo)
n_cores (int, optional): Number of CPU threads to use.
Returns:
dm_values (np.ndarray): Array containing DM realizations for the galaxy.
z_draws (np.ndarray): Array containing redshift draws from which dm_values were produced.
"""
# Prepare interpolation functions from the
# CIGALE table
log_mstar_interp = interp1d(cigale_tab['redshift'], cigale_tab['log_mstar'], bounds_error=False, fill_value=1)
log_mstar_err_interp = interp1d(cigale_tab['redshift'], cigale_tab['log_mstar_err'], bounds_error=False, fill_value=1)
# Get 1000 random redshift draws from EAZY
z_draws = _sample_eazy_redshifts(cigale_tab['gal_ID'][0], eazy_outdir)
if np.isscalar(z_draws):
return -99.
# Convert the photo-z draws to mean stellar masses and errors
log_mstar_array = log_mstar_interp(z_draws)
log_mstar_err_array = log_mstar_err_interp(z_draws)
func = lambda idx: _mhalo_realizations(log_mstar_array[idx], log_mstar_err_array[idx], z_draws[idx], mean_interp, stddev_interp)
# Draw stellar mass values from a normal distribution and produce halo
# masses, halo_mass errors
p = Pool(n_cores)
log_mhalos = p.map(func, np.arange(len(z_draws)))
zz_draws = np.repeat(z_draws, len(log_mhalos[0]))
offsets = ang_dia_interp(z_draws)*cigale_tab['sep_ang'][0]*u.arcmin.to('rad')
oo_draws = np.repeat(offsets, len(log_mhalos[0]))
dm_values = dm_interpolator((zz_draws, oo_draws, np.concatenate(log_mhalos)))
return dm_values, z_draws.astype('float32') # Save memory by switching to a 32 bit representation.
def dm_grid(frb_z:float, n_z:int = 100, n_o:int = 100, n_m:int =100, max_log_mhalo:float=12.8,
outdir:str=DEFAULT_DATA_FOLDER, outfile:str=None)->None:
"""
Produce DM estimates for a 3D grid of
redshift, offsets and log_halo_masses and write
them to disk.
Args:
frb_z(float): frb redshift
n_z(int, optional): size of the redshift grid. i.e. np.linspace(0, frb_z, n_z)
n_o(int, optional): size of the offset grid. i.e. np.linspace(0, 600, n_o)
n_m(int, optional):size of the log_halo_mass grid. i.e. np.linspace(8, 16, n_m)
max_log_mhalo (float, optional): DM for halo masses larger than this are currently
set to -99.0 to prevent weirdly large DM contributions from galactic halos.
outdir(str, optional): data directory to store results
outfile(str, optional): name of results .npz file (within outdir).
"""
# Redshift grid
redshifts = np.linspace(0, frb_z, n_z)
# Offset grid
offsets = np.linspace(0, 600, n_o)
# Mass grid
log_halo_masses = np.linspace(8, 16, n_m)
ZZ, OO, MM = np.meshgrid(redshifts, offsets, log_halo_masses, indexing='ij')
raveled_z = ZZ.ravel()
raveled_o = OO.ravel()
raveled_m = MM.ravel()
def halo_dm(idx):
if raveled_m[idx] > max_log_mhalo: # Not necessary but just in case.
return -99.0
else:
mnfw = ModifiedNFW(raveled_m[idx], alpha = 2, y0 = 2, z = raveled_z[idx])
return mnfw.Ne_Rperp(raveled_o[idx]*u.kpc).to('pc/cm**3').value/(1+raveled_z[idx])
p = Pool(8)
raveled_dm = np.array(p.map(halo_dm, np.arange(n_z*n_o*n_m)))
# Dm grid
dm_grid = raveled_dm.reshape((n_z, n_o, n_m))
if not outfile:
outfile = os.path.join(outdir, "halo_dm_data.npz")
np.savez_compressed(outfile, redshifts=redshifts, offsets=offsets, m_halo=log_halo_masses, dm=dm_grid)
return
def _instantiate_intepolators(datafolder:str=DEFAULT_DATA_FOLDER, dmfilename:str=None, frb_name:str="FRB180924")->list:
"""
Produce interpolator functions
for key quantities required
for the analysis.
Args:
datfolder(str, optional): Folder where the interpolation data files exist
dmfilename(str, optional): file name (within datafolder) for the DM interpolation data.
frb_name(str, optional): Assumes "FRB180924" by default.
Returns:
dm_interpolator (RegularGridInterpolator): DM(z, offset_kpc, log_mhalo)
mean_interp (interp2d): <log_mhalo(log_mstar, z)> (based on SHMR)
stddev_interp (interp2d): std.dev. log_mhalo(log_mstar, z) (based on SHMR)
ang_dia_interp (interp1d): angular_diameter_distance(z) (default Repo cosmology)
"""
# DM for a variety of halo parameters.
if not dmfilename:
dmfilename = "halo_dm_data.npz"
dmdata = np.load(dmfilename)
redshifts = dmdata['redshifts']
offsets = dmdata['offsets']
log_mhalos = dmdata['m_halo']
dm_grid = dmdata['dm']
dm_interpolator = RegularGridInterpolator((redshifts, offsets, log_mhalos), dm_grid,bounds_error=False, fill_value=0.)
# Halo mass mean and variance from stellar mass
frb = FRB.by_name(frb_name)
realization_files = glob.glob(os.path.join(datafolder, "mhalo_realization_z*.npz"))
realization_files.sort()
# Define redshift grid
zgrid = np.linspace(0, frb.z, 10)
# Now initialize arrays to store mean and std.dev.
mean_arrays = []
stddev_arrays = []
# Loop through files, compute mean & std.dev of log_mhalo for log_mstar
for file in realization_files:
loaded = np.load(file)
log_mhalo = loaded['MHALO']
mean_mhalo, _, stddev_mhalo = sigma_clipped_stats(log_mhalo, sigma = 20, axis=1)
mean_arrays.append(mean_mhalo)
stddev_arrays.append(stddev_mhalo)
# laoded is going to be from the last file in the loop. The first entry contains
# a stellar mass array.
log_mstar = loaded['MSTAR']
mean_interp = interp2d(log_mstar, zgrid, np.array(mean_arrays), bounds_error=False)
stddev_interp = interp2d(log_mstar, zgrid, np.array(stddev_arrays), bounds_error=False)
# Angular diameter distance
z = np.linspace(0,7, 10000)
ang_dia_dist = defs.frb_cosmo.angular_diameter_distance(z).to('kpc').value
ang_dia_interp = interp1d(z, ang_dia_dist, bounds_error=False, fill_value='extrapolate')
# Return interpolators
return dm_interpolator, mean_interp, stddev_interp, ang_dia_interp
def dm_for_all_galaxies(frb:FRB, input_catfile:str, datafolder:str,
n_cores:int=8, n_gals:int = None):
"""
Produce DM estimates for all the galaxies provided by the user. Creates
two files : "DM_halos_zdraws.npz" which contains all the redshift draws
used for the DM realizations and "DM_halos_final.npz" which contains the
DM realizations themselves. Each row in each of these files corresponds to
one galaxy and each z draw corresponds to 1000 DM realizations for a galaxy.
Args:
frb (FRB): The FRB object of interest.
input_catfile (str): Path to the input catalog of photometry. Assumed
to be from DES for now.
datafolder (str): Path to the folder in which results will be saved.
n_cores (int, optional): Number of CPU threads to be used for computation.
n_gals (int, optional): Limit analysis to n_gals galaxies for testing purposes.
"""
# Load the input catalog
master_cat = Table.read(input_catfile)
# First run EAZY on that master_cat
print("Running EAZY on the input catalog first ...")
eazy_outdir = os.path.join(datafolder, "eazy_output")
eazy_tab = _gen_eazy_tab(master_cat, datafolder, frb.frb_name, eazy_outdir)
print("Done")
# Create a CIGALE input file
print("Creating a CIGALE input file...")
stacked_photom = _create_cigale_in(master_cat, zmax = frb.z+0.03)
print("Running CIGALE ...")
cigale_output = _gen_cigale_tab(stacked_photom, outdir=datafolder, n_cores=n_cores)
# Load CIGALE results
cigale_input = input_catfile
cigale_output = os.path.join(datafolder,"cigale_full_output.fits")
cigale_tab = _load_cigale_results(cigale_input, cigale_output)
print("CIGALE results loaded.")
# Prepare interpolator functions
dm_interpolator, mean_interp, stddev_interp, ang_dia_interp = _instantiate_intepolators(datafolder)
print("Interpolators created.")
# Reduce the sample size for testing purposes.
if (n_gals!=None) & (type(n_gals)==int):
eazy_tab = eazy_tab[:n_gals]
# Loop through galaxies
print("Computing DM realizations for all galaxies ...")
# Initialize storage for the DM realizations and the redshifts at which these are computed.
dm_realizations = lil_matrix((len(eazy_tab), 1000000))
z_draws = np.zeros((len(eazy_tab),1000), dtype='float32')
# Begin calculating
with progressbar.ProgressBar(max_value=len(eazy_tab)-1) as bar:
for idx, ez_entry in enumerate(eazy_tab):
cigale_galaxy = cigale_tab[cigale_tab['gal_ID']==ez_entry['ID']]
if np.any(np.isnan(cigale_galaxy['log_mstar'])):
continue
else:
dm_realizations[idx], z_draws[idx] = _dm_pdf(cigale_galaxy, eazy_outdir, mean_interp,
stddev_interp, ang_dia_interp, dm_interpolator,
n_cores = 20)
bar.update(idx)
# Save results to file
np.savez_compressed(os.path.join(datafolder, "DM_halos_zdraws.npz"), z_draws=z_draws)
save_npz(os.path.join(datafolder,"DM_halos_final.npz"), dm_realizations.tocsr())
print("Done calculating")
return
|
FRBs/FRB
|
frb/halos/photoz.py
|
Python
|
bsd-3-clause
| 26,428
|
[
"Galaxy"
] |
74f083d90c5200688a09f87bbc3906ee2f3e32cada60ad97c073fc6e83b5f2b2
|
import numpy as np
from ase.io import read as aseread
import networkx as nx
import itertools
import pandas as pd
from bokeh import palettes
import matplotlib.pyplot as plt
from fundef import atoms_to_nxgraph, minimal_cycles, cycle_dual_graph
########################################################################
########################################################################
at = aseread('../data/reduced_1ayer.xyz', format='extxyz')
do_plots = True
top = at[np.where(at.positions[:,2] > -.5)[0]]
at = top[np.where(top.get_atomic_numbers() == 14)[0]]
cutoff = 3.8 # 2 * 1.6 + some extra for elongation. visual inspection first!
graph = atoms_to_nxgraph(at, cutoff)
all_cycles = minimal_cycles(graph, cutoff=9)
graph_dual = cycle_dual_graph(all_cycles)
cycle_n_nodes = {}
for i, c in enumerate(all_cycles):
cycle_n_nodes[i] = len(c)
nx.set_node_attributes(graph_dual, 'cycle_size', cycle_n_nodes)
if do_plots:
# print out the graphs corresponding to the glass network and its dual.
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.figsize'] = [24., 18.]
plt.rcParams['savefig.transparent'] = True
positions = at.get_positions()[:,:2]
positions_dual = np.array([positions[np.array(list(cycle))].mean(axis=0) for cycle in all_cycles])
lengths = np.array(cycle_n_nodes.values())
lmin = lengths.min()
colours = [palettes.RdYlBu6[i - lmin] for i in lengths]
plt.clf()
nx.draw(graph_dual, positions_dual, node_color=colours, node_size=plt.rcParams['figure.figsize'][0]*lengths**2, alpha=0.7, width=2)
plt.savefig('../figs/onlydual.eps')
plt.clf()
nx.draw(graph_dual, positions_dual, node_color=colours, node_size=plt.rcParams['figure.figsize'][0]*lengths**2, alpha=0.7, width=2, style='dotted', linewidth=0)
nx.draw(graph, positions, node_color='#000000', node_size=plt.rcParams['figure.figsize'][0]*3**2, width=3)
plt.savefig('../figs/superimposed.eps')
plt.clf()
lengths = np.array(cycle_n_nodes.values())
smallest, largest = lengths.min(), lengths.max()
allsizes = np.arange(smallest, largest + 1)
neighbours = [[lengths[u] for u in graph_dual.neighbors(i)] for i in range(len(lengths))]
nneighs = [len(n) for n in neighbours]
inner_indices = []
for i, (length, nn) in enumerate(zip(lengths, nneighs)):
if length == nn:
inner_indices.append(i)
# frequency of ring of given size
freqs = {}
for i in allsizes:
freqs[i] = (lengths == i).sum()
n_rings = np.float(np.sum(freqs.values()))
for i in allsizes:
freqs[i] /= n_rings
# indices of neighbours of each ring
n_indices = {}
for i in allsizes:
n_indices[i] = np.where(lengths == i)[0]
import string
table = []
for size in allsizes:
all_neigh_size = [neighbours[i] for i in n_indices[size]]
all_neigh_size = np.array(list(itertools.chain.from_iterable(all_neigh_size)))
result = np.array([(all_neigh_size == i).sum() for i in allsizes.astype('float')])
result = result / float(result.sum())
table.append(result)
# print out probability of neighbour size
print(string.join(['%.2f' % res for res in result], sep=' & '))
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
rcParams['figure.figsize'] = 8, 6
plt.clf()
sns.set_context('paper', font_scale=2, rc={"lines.linewidth": 2})
[plt.plot(allsizes, v, 'o-', label=k) for k, v in zip(allsizes, table)]
sns.despine()
plt.xlabel("Neighbouring ring size")
plt.ylabel("Frequency")
plt.legend()
plt.savefig("../figs/neighbour_sizes.pdf")
plt.clf()
df = pd.DataFrame(data=np.array(table), index=[4,5,6,7,8,9], columns=[4,5,6,7,8,9])
sns.set_context('paper', font_scale=2, rc={"lines.linewidth": 2, 'figure.figsize': [8, 6]})
cbar_kws = { 'label': 'Frequency'}
ax = sns.heatmap(df, cmap="YlGnBu", cbar_kws=cbar_kws)
plt.ylabel(r"Ring size $N$")
plt.xlabel(r"Neighbouring ring size $M$")
rcParams['figure.figsize'] = 8, 6
plt.clf()
plt.hist(lengths, bins=[4,5,6,7,8,9,10], align='left')
plt.xlabel("Ring size")
plt.ylabel("Count")
plt.savefig("../figs/size_count.pdf")
|
marcocaccin/Glass_Cycle_Network
|
src/at_cycles.py
|
Python
|
gpl-2.0
| 4,063
|
[
"ASE"
] |
b8931d772b881aee108eb74b87aa91bfe4df6717220686a5c099e45dff61b8c1
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Module is used for visualization of segmentation stored in pkl file.
"""
from loguru import logger
# logger = logging.getLogger()
# from PyQt4.QtCore import Qt
import argparse
import numpy as np
from dicom2fem import seg2fem
import io3d
import dicom2fem
from imtools.image_manipulation import select_labels
def seg2stl(
segmentation,
voxelsize_mm=np.ones([3, 1]),
degrad=4,
labels=[1],
smoothing=True,
outputfile="output.stl",
tempfile="mesh_geom.vtk",
):
"""
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
"""
print(np.unique(segmentation))
segmentation = select_labels(segmentation, labels)
# print 'labels: ', np.unique(data['segmentation'])
# print np.sum(data['segmentation'] == 0)
# print args.labels
# for i in range(0, len(args.label)):
segmentation = segmentation[::degrad, ::degrad, ::degrad]
print(np.unique(segmentation))
# import pdb; pdb.set_trace()
if smoothing:
mesh_data = seg2fem.gen_mesh_from_voxels(segmentation, voxelsize_mm*degrad*1e-3, etype='t', mtype='s')
mesh_data.coors = dicom2fem.seg2fem.smooth_mesh(mesh_data)
else:
mesh_data = dicom2fem.seg2fem.gen_mesh_from_voxels_mc(segmentation, voxelsize_mm * degrad * 1.0e-2)
# mesh_data.coors +=
mesh_data.write(tempfile)
dicom2fem.vtk2stl.vtk2stl(tempfile, outputfile)
# QApplication(sys.argv)
# view = viewer.QVTKViewer(vtk_file)
# view.exec_()
if __name__ == "__main__":
# logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(
description='\
convert segmentation stored in pklz file into stl\n\
\npython convert.py -i resection.pkl -l 2 3 4 -d 4')
parser.add_argument(
'-i', '--inputfile',
default='organ.pkl',
help='input file')
parser.add_argument(
'-o', '--outputfile',
default='output.stl',
help='output file')
parser.add_argument(
'-t', '--tempfile',
default='mesh_geom.vtk',
help='temp file used in processing')
parser.add_argument(
'-d', '--degrad', type=int,
default=4,
help='data degradation, default 4')
parser.add_argument(
'-l', '--labels', type=int, metavar='N', nargs='+',
default=[1],
help='segmentation labels, default 1')
parser.add_argument(
'-s', '--show', action='store_true',
help='Show mode')
args = parser.parse_args()
dr = io3d.DataReader()
data = dr.Get3DData(args.inputfile, dataplus_format=True)
# args.label = np.array(eval(args.label))
# print args.label
# import pdb; pdb.set_trace()
ds = data['segmentation']
if args.show:
dsel = select_labels(ds, args.labels)
import sed3
ed = sed3.sed3(dsel.astype(np.double))
ed.show()
seg2stl(ds, labels=args.labels, degrad=args.degrad, outputfile=args.outputfile, tempfile=args.tempfile)
|
mjirik/lisa
|
lisa/convert.py
|
Python
|
bsd-3-clause
| 3,272
|
[
"VTK"
] |
6dde82f6ae482a7519eed65140440cc5afdc825b0a1aba69e8ec318ee98a47ed
|
"""
Two-dimensional pattern generators drawing from various random distributions.
$Id$
"""
__version__='$Revision$'
import numpy
from numpy.oldnumeric import zeros,floor,where,choose,less,greater,Int,random_array
import param
from param.parameterized import ParamOverrides
from patterngenerator import PatternGenerator
from . import Composite, Gaussian
from sheetcoords import SheetCoordinateSystem
def seed(seed=None):
"""
Set the seed on the shared RandomState instance.
Convenience function: shortcut to RandomGenerator.random_generator.seed().
"""
RandomGenerator.random_generator.seed(seed)
class RandomGenerator(PatternGenerator):
"""2D random noise pattern generator abstract class."""
__abstract = True
# The orientation is ignored, so we don't show it in
# auto-generated lists of parameters (e.g. in the GUI)
orientation = param.Number(precedence=-1)
random_generator = param.Parameter(
default=numpy.random.RandomState(seed=(500,500)),precedence=-1,doc=
"""
numpy's RandomState provides methods for generating random
numbers (see RandomState's help for more information).
Note that all instances will share this RandomState object,
and hence its state. To create a RandomGenerator that has its
own state, set this parameter to a new RandomState instance.
""")
def _distrib(self,shape,p):
"""Method for subclasses to override with a particular random distribution."""
raise NotImplementedError
# Optimization: We use a simpler __call__ method here to skip the
# coordinate transformations (which would have no effect anyway)
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
shape = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
result = self._distrib(shape,p)
self._apply_mask(p,result)
for of in p.output_fns:
of(result)
return result
class UniformRandom(RandomGenerator):
"""2D uniform random noise pattern generator."""
def _distrib(self,shape,p):
return p.random_generator.uniform(p.offset, p.offset+p.scale, shape)
class BinaryUniformRandom(RandomGenerator):
"""
2D binary uniform random noise pattern generator.
Generates an array of random numbers that are 1.0 with the given
on_probability, or else 0.0, then scales it and adds the offset as
for other patterns. For the default scale and offset, the result
is a binary mask where some elements are on at random.
"""
on_probability = param.Number(default=0.5,bounds=[0.0,1.0],doc="""
Probability (in the range 0.0 to 1.0) that the binary value
(before scaling) is on rather than off (1.0 rather than 0.0).""")
def _distrib(self,shape,p):
rmin = p.on_probability-0.5
return p.offset+p.scale*(p.random_generator.uniform(rmin,rmin+1.0,shape).round())
class GaussianRandom(RandomGenerator):
"""
2D Gaussian random noise pattern generator.
Each pixel is chosen independently from a Gaussian distribution
of zero mean and unit variance, then multiplied by the given
scale and adjusted by the given offset.
"""
scale = param.Number(default=0.25,softbounds=(0.0,2.0))
offset = param.Number(default=0.50,softbounds=(-2.0,2.0))
def _distrib(self,shape,p):
return p.offset+p.scale*p.random_generator.standard_normal(shape)
# CEBALERT: in e.g. script_repr, an instance of this class appears to
# have only pattern.Constant() in its list of generators, which might
# be confusing. The Constant pattern has no effect because the
# generators list is overridden in __call__. Shouldn't the generators
# parameter be hidden for this class (and possibly for others based on
# pattern.Composite)? For that to be safe, we'd at least have to have
# a warning if someone ever sets a hidden parameter, so that having it
# revert to the default value would always be ok.
class GaussianCloud(Composite):
"""Uniform random noise masked by a circular Gaussian."""
operator = param.Parameter(numpy.multiply)
gaussian_size = param.Number(default=1.0,doc="Size of the Gaussian pattern.")
aspect_ratio = param.Number(default=1.0,bounds=(0.0,None),softbounds=(0.0,2.0),
precedence=0.31,doc="""
Ratio of gaussian width to height; width is gaussian_size*aspect_ratio.""")
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
p.generators=[Gaussian(aspect_ratio=p.aspect_ratio,size=p.gaussian_size),
UniformRandom()]
return super(GaussianCloud,self).__call__(**p)
### JABHACKALERT: This code seems to work fine when the input regions
### are all the same size and shape, but for
### e.g. examples/hierarchical.ty the resulting images in the Test
### Pattern preview window are square (instead of the actual
### rectangular shapes), matching between the eyes (instead of the
### actual two different rectangles), and with dot sizes that don't
### match between the eyes. It's not clear why this happens.
class RandomDotStereogram(PatternGenerator):
"""
Random dot stereogram using rectangular black and white patches.
Based on Matlab code originally from Jenny Read, reimplemented
in Python by Tikesh Ramtohul (2006).
"""
# Suppress unused parameters
x = param.Number(precedence=-1)
y = param.Number(precedence=-1)
size = param.Number(precedence=-1)
orientation = param.Number(precedence=-1)
# Override defaults to make them appropriate
scale = param.Number(default=0.5)
offset = param.Number(default=0.5)
# New parameters for this pattern
#JABALERT: Should rename xdisparity and ydisparity to x and y, and simply
#set them to different values for each pattern to get disparity
xdisparity = param.Number(default=0.0,bounds=(-1.0,1.0),softbounds=(-0.5,0.5),
precedence=0.50,doc="Disparity in the horizontal direction.")
ydisparity = param.Number(default=0.0,bounds=(-1.0,1.0),softbounds=(-0.5,0.5),
precedence=0.51,doc="Disparity in the vertical direction.")
dotdensity = param.Number(default=0.5,bounds=(0.0,None),softbounds=(0.1,0.9),
precedence=0.52,doc="Number of dots per unit area; 0.5=50% coverage.")
dotsize = param.Number(default=0.1,bounds=(0.0,None),softbounds=(0.05,0.15),
precedence=0.53,doc="Edge length of each square dot.")
random_seed=param.Integer(default=500,bounds=(0,1000),
precedence=0.54,doc="Seed value for the random position of the dots.")
def __call__(self,**params_to_override):
p = ParamOverrides(self,params_to_override)
xsize,ysize = SheetCoordinateSystem(p.bounds,p.xdensity,p.ydensity).shape
xsize,ysize = int(round(xsize)),int(round(ysize))
xdisparity = int(round(xsize*p.xdisparity))
ydisparity = int(round(xsize*p.ydisparity))
dotsize = int(round(xsize*p.dotsize))
bigxsize = 2*xsize
bigysize = 2*ysize
ndots=int(round(p.dotdensity * (bigxsize+2*dotsize) * (bigysize+2*dotsize) /
min(dotsize,xsize) / min(dotsize,ysize)))
halfdot = floor(dotsize/2)
# Choose random colors and locations of square dots
random_seed = p.random_seed
random_array.seed(random_seed*12,random_seed*99)
col=where(random_array.random((ndots))>=0.5, 1.0, -1.0)
random_array.seed(random_seed*122,random_seed*799)
xpos=floor(random_array.random((ndots))*(bigxsize+2*dotsize)) - halfdot
random_array.seed(random_seed*1243,random_seed*9349)
ypos=floor(random_array.random((ndots))*(bigysize+2*dotsize)) - halfdot
# Construct arrays of points specifying the boundaries of each
# dot, cropping them by the big image size (0,0) to (bigxsize,bigysize)
x1=xpos.astype(Int) ; x1=choose(less(x1,0),(x1,0))
y1=ypos.astype(Int) ; y1=choose(less(y1,0),(y1,0))
x2=(xpos+(dotsize-1)).astype(Int) ; x2=choose(greater(x2,bigxsize),(x2,bigxsize))
y2=(ypos+(dotsize-1)).astype(Int) ; y2=choose(greater(y2,bigysize),(y2,bigysize))
# Draw each dot in the big image, on a blank background
bigimage = zeros((bigysize,bigxsize))
for i in range(ndots):
bigimage[y1[i]:y2[i]+1,x1[i]:x2[i]+1] = col[i]
result = p.offset + p.scale*bigimage[ (ysize/2)+ydisparity:(3*ysize/2)+ydisparity ,
(xsize/2)+xdisparity:(3*xsize/2)+xdisparity ]
for of in p.output_fns:
of(result)
return result
|
ioam/svn-history
|
imagen/random.py
|
Python
|
bsd-3-clause
| 8,904
|
[
"Gaussian"
] |
9d38a6d2f465729f0d51ea95a4b26d6e458522f93876402ba846bfb4e018f9d1
|
# $Id: test_MurckoScaffold.py 3672 2010-06-14 17:10:00Z landrgr1 $
#
# Created by Peter Gedeck, June 2008
#
from rdkit.Chem.Scaffolds.MurckoScaffold import *
import unittest
import random
from rdkit import Chem
class TestCase(unittest.TestCase):
testMolecules = [
("CC1CCC1", "C1CCC1"),
("NCNCC2CC2C1CC1O", "C1CC1C1CC1"),
("OC2C(C)C21C(N)C1C", "C2CC12CC1"), # Spiro
("C1CC1C(=O)OC", "C1CC1"), # Carbonyl outside scaffold
("C1CC1C=C", "C1CC1"), # Double bond outside scaffold
("C1CC1C=CC1CC1C=CNNCO", "C1CC1C=CC1CC1"), # Double bond in scaffold
("CC1CC1C(N)C1C(N)C1", "C1CC1CC1CC1"),
("C1CC1S(=O)C1CC1C=CNNCO", "C1CC1S(=O)C1CC1"), # S=O group in scaffold
("O=SCNC1CC1S(=O)C1CC1C=CNNCO", "C1CC1S(=O)C1CC1"), # S=O group outside scaffold
("C1CC1S(=O)(=O)C1CC1C=CNNCO", "C1CC1S(=O)(=O)C1CC1"), # SO2 group in scaffold
("O=S(CNCNC)(=O)CNC1CC1S(=O)(=O)C1CC1C=CNNCO", "C1CC1S(=O)(=O)C1CC1"), # SO2 group outside scaffold
("C1CC1C=NO","C1CC1"), #Hydroxamide
("C1CC1C(C(C)C)=NC1CC1","C1CC1C=NC1CC1"), #Hydroxamide
("C1CC1C#N","C1CC1"), #Cyano group
("C1CC1C#CNC","C1CC1"), #Acetylene group
("O=C1N(C)C(=O)N1C#CNC","O=C1NC(=O)N1"), #Acetylene group
("[O-][N+](=O)c1cc(ccc1Cl)NS(=O)(=O)Cc2ccccc2","c1ccccc1NS(=O)(=O)Cc2ccccc2"),
("Cn1cccc1", "c1ccc[nH]1"),
("C1CC1[CH](C)C1CC1", "C1CC1CC1CC1"),
]
testMolecules2 = [
("CCOc1ccccc1N(S(C)(=O)=O)CC(NC1CCCCC1)=O","O=C(NC1CCCCC1)CNc1ccccc1"),
("c1ccc(-c2c(C)n(-c3c(C(O)=O)cccc3)c(C)nc2=O)cc1","O=c1c(cn(cn1)-c1ccccc1)-c1ccccc1"),
("Cc1ccc(Cl)c2c1NC(=O)C2=C1NC(=S)NC1=O","c1cc2c(cc1)C(=C1C(NC(N1)=S)=O)C(=O)N2"),
("CNC(=O)CCc1[nH]c2c(c1Sc1ccccc1)cccc2","c1cc(Sc2c3c([nH]c2)cccc3)ccc1"),
("CC(=O)OCC(=O)C1(O)CCC2C1(C)CC(=O)C1C3(C)CCC(=O)C=C3CCC21","O=C1C=C2CCC3C4CCCC4CC(=O)C3C2CC1"),
("CC(C)CC(Nc1nc(Cl)ccc1[N+]([O-])=O)C(O)=O","c1ccncc1"),
("COc1ccc(C(Nc2ccc(S(N3C(C)CCCC3)(=O)=O)cc2)=O)c(OC)c1OC","O=C(Nc1ccc(S(=O)(=O)N2CCCCC2)cc1)c1ccccc1"),
("CC(C)CCNc1nc(N)c([N+](=O)[O-])c(NCCO)n1","c1cncnc1"),
("c1ccc(Oc2c(NC(COC(c3c(C)noc3C)=O)=O)cccc2)cc1","O=C(COC(=O)c1cnoc1)Nc1ccccc1Oc1ccccc1"),
("COC(CCCCC1SCC(NC(OC)=O)C1NC(OC)=O)=O","C1CCCS1"),
("CSc1ccc(-c2c(C#N)c(N)nc3n(-c4ccccc4)nc(C)c32)cc1","c1ccc(cc1)-c1c2c(n(nc2)-c2ccccc2)ncc1"),
("O=C1Cc2ccccc2Sc2c1cc(Cl)cc2","O=C1Cc2ccccc2Sc2ccccc21"),
("COC(c1n(CC(N(C)c2ccccc2)=O)c2ccsc2c1)=O","O=C(Cn1c2ccsc2cc1)Nc1ccccc1"),
("N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12","N=C1C(=Cc2coc3ccccc3c2=O)C(=O)N=C2SC(c3ccncc3)=NN12"),
("CCOC(c1ccc(NC(CCc2c(C)nc3ncnn3c2C)=O)cc1)=O","O=C(Nc1ccccc1)CCc1cnc2n(ncn2)c1"),
("COC(=O)C1=C(C)NC(C)=C(C(OC)=O)C1c1oc(-c2c(Cl)c(Cl)ccc2)cc1","c1ccc(-c2oc(C3C=CNC=C3)cc2)cc1"),
("CCN(S(c1cc(NC(COC(CCc2nc3ccccc3s2)=O)=O)ccc1)(=O)=O)CC","c1cc(NC(COC(=O)CCc2nc3c(s2)cccc3)=O)ccc1"),
("CCOC(c1cc(OC(c2ccccc2)=O)n(-c2ccccc2)n1)=O","O=C(Oc1n(ncc1)-c1ccccc1)c1ccccc1"),
("CCOC(=O)c1nc2c(c(NCc3ccccc3F)n1)cccc2","c1ccc(CNc2ncnc3c2cccc3)cc1"),
("Cc1nc(C)n(CC(N2CCCC(C(c3c(C)cc(Cl)cc3)=O)C2)=O)n1","c1ccc(cc1)C(=O)C1CCCN(C(=O)Cn2cncn2)C1"),
("COc1cc(NC(=O)c2nnn(CCc3ccccc3)c2N)c(OC)cc1","O=C(c1nnn(c1)CCc1ccccc1)Nc1ccccc1"),
("Cc1cc(C(=O)CN2C(=O)c3ccccc3C2=O)c(C)n1Cc1cccs1","O=C(CN1C(c2c(cccc2)C1=O)=O)c1cn(Cc2cccs2)cc1"),
("c1cnc2c(c1)cccc2S(N1CCC(C(=O)N2CCN(c3ccc(Cl)cc3)CC2)CC1)(=O)=O","c1ccc(cc1)N1CCN(C(=O)C2CCN(S(=O)(=O)c3c4ncccc4ccc3)CC2)CC1"),
("CCOC(c1c(C)[nH]c(C(NNC(c2ccc(C(C)(C)C)cc2)=O)=O)c1C)=O","c1ccc(cc1)C(NNC(c1ccc[nH]1)=O)=O"),
("CCOC(c1cc(C(C)C)sc1NC(=O)COC(CCS(c1ccccc1)(=O)=O)=O)=O","c1ccc(S(CCC(=O)OCC(Nc2cccs2)=O)(=O)=O)cc1"),
("CCC1CCCCN1CCCNC(=O)Cn1nc(-c2ccccc2)ccc1=O","O=C(NCCCN1CCCCC1)Cn1nc(ccc1=O)-c1ccccc1"),
("CCc1cc(OCCn2nc(C(O)=O)c3ccccc3c2=O)ccc1","O=c1n(CCOc2ccccc2)ncc2ccccc21"),
("Fc1ccc(CN2CCN3C(CCC3)C2C2CCCCC2)cc1F","c1ccc(cc1)CN1CCN2CCCC2C1C1CCCCC1"),
("O=[N+]([O-])c1cc(-c2nnc(N3CCOCC3)c3ccccc23)ccc1N1CCOCC1","c1cc2c(nnc(c2cc1)N1CCOCC1)-c1ccc(cc1)N1CCOCC1"),
("Cc1ccnc(NC(=O)COc2ccc3oc4c(c3c2)CCCC4)c1","O=C(COc1ccc2oc3c(c2c1)CCCC3)Nc1ccccn1"),
("Cc1cc(=O)oc(C)c1C(=O)NCCCN1CCN(c2ccc(F)cc2)CC1","c1ccc(N2CCN(CCCNC(c3ccc(oc3)=O)=O)CC2)cc1"),
("Cc1cc(C(=O)CSc2nc(=O)cc(N)[nH]2)c(C)n1-c1cccc(F)c1","O=C(CSc1nc(cc[nH]1)=O)c1cn(cc1)-c1ccccc1"),
("CCN(S(c1cccc(C(=O)N2CCCCC2)c1)(=O)=O)CC","O=C(N1CCCCC1)c1ccccc1"),
("CNC(=S)N1CCC(NC(=O)C23CC4CC(C2)CC(C3)C4)CC1","O=C(NC1CCNCC1)C12CC3CC(C1)CC(C3)C2"),
("Cc1cc2c(cc1)N=C(C)C(N=O)=C(C)N2","c1cc2NC=CC=Nc2cc1"),
("COc1ccc(Sc2cc(C(F)(F)F)nc(-c3ncccc3)n2)cc1","c1ccc(cc1)Sc1nc(ncc1)-c1ncccc1"),
("c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1","c1coc(CNC(Cn2cc(C(c3ccccc3)=O)c3c2cccc3)=O)c1"),
("O=C(NCc1ccc(Cl)cc1)c1noc(-c2ccco2)c1","O=C(c1noc(c1)-c1ccco1)NCc1ccccc1"),
("CN(C)c1ccc(C(c2n(CCOC(=O)Nc3ccc(Cl)cc3)nnn2)N2CCOCC2)cc1","O=C(Nc1ccccc1)OCCn1nnnc1C(c1ccccc1)N1CCOCC1"),
("NC(=NOC(=O)c1cc(Cn2cc(C(F)(F)F)ccc2=O)ccc1)c1ccccc1","c1ccc(C=NOC(c2cc(Cn3ccccc3=O)ccc2)=O)cc1"),
("CCc1nnc(NC(=O)Cc2c(-c3ccc(C)cc3)nc(C)s2)s1","O=C(Cc1c(-c2ccccc2)ncs1)Nc1nncs1"),
("COCCCNC(=O)CN1C(=O)N(Cc2ccccc2Cl)CC1","O=C1NCCN1Cc1ccccc1"),
("Cc1cc([N+]([O-])=O)nn1CC(=O)NCCCn1ccnc1","O=C(Cn1nccc1)NCCCn1ccnc1"),
("c1cc(F)c(N2CCN(C(=O)c3ccc(S(NCC4OCCC4)(=O)=O)cc3)CC2)cc1","c1ccc(cc1)N1CCN(C(c2ccc(cc2)S(=O)(=O)NCC2OCCC2)=O)CC1"),
("CC(NCc1cccnc1)=C1C(=O)NC(=O)N(c2ccc(C)cc2)C1=O","c1cc(ccc1)N1C(=O)NC(C(=CNCc2cccnc2)C1=O)=O"),
("Cc1ccn(C)c(=N)c1","N=c1[nH]cccc1"),
("Cc1cc(C)nc(N2CCC(CNC(=O)CCc3ccccc3)CC2)n1","O=C(CCc1ccccc1)NCC1CCN(c2ncccn2)CC1"),
("CCOC1=CC(=CNNC(CCCC(NC2CCCCC2)=O)=O)C=CC1=O","C1=CC(C=CC1=O)=CNNC(=O)CCCC(=O)NC1CCCCC1"),
("CC(=O)N1CCN(c2ccc([N+]([O-])=O)cc2)CC1","c1ccc(cc1)N1CCNCC1"),
("CS(N(CC(=O)N1CCCCC1)Cc1ccc(Cl)cc1)(=O)=O","O=C(N1CCCCC1)CNCc1ccccc1"),
("c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1","c1coc(C(=O)N2CCN(C(COc3cc(C(NCc4ccccc4)=O)ccc3)=O)CC2)c1"),
("Cc1cccc2sc(NNC(=O)C3=COCCO3)nc12","O=C(NNc1nc2ccccc2s1)C1=COCCO1"),
("c1ccc2c(c1)N(C)C1(C=Nc3c(cc(N4CCOCC4)c4ccccc34)O1)C2(C)C","C1=Nc2c(cc(c3ccccc23)N2CCOCC2)OC11Nc2ccccc2C1"),
("COc1cccc(C2N(CCN3CCOCC3)C(=O)C(O)=C2C(=O)c2sc(C)nc2C)c1","O=C(C1=CC(=O)N(C1c1ccccc1)CCN1CCOCC1)c1scnc1"),
("COc1cc(OC)c(NC(CSc2nc3c(c(=O)n2-c2ccc(F)cc2)SCC3)=O)cc1","c1ccc(cc1)NC(=O)CSc1n(c(=O)c2c(n1)CCS2)-c1ccccc1"),
("Cc1ccccc1CN1c2ccccc2C2(C1=O)OCCCO2","O=C1C2(OCCCO2)c2c(N1Cc1ccccc1)cccc2"),
("O=C(N1C2(OCC1)CCN(c1ncc(C(F)(F)F)cc1Cl)CC2)c1ccccc1","O=C(c1ccccc1)N1C2(OCC1)CCN(c1ccccn1)CC2"),
("CC=CC=CC(=O)Nc1nccs1","c1ncsc1"),
("CC(C)(C)c1ccc(C(c2c[nH]c(C(NCc3cccnc3)=O)c2)=O)cc1","c1ccc(cc1)C(=O)c1c[nH]c(c1)C(=O)NCc1cccnc1"),
("CCC(=O)Nc1c(C)nn(-c2cc(C)c(C)cc2)c1C","c1ccc(cc1)-n1nccc1"),
("Cc1ccc(SCCC(=O)NCCSCc2c(C)cccc2)cc1","O=C(NCCSCc1ccccc1)CCSc1ccccc1"),
("CC1=NN(Cc2ccccc2)C(=O)C1=Cc1ccc(N(C)C)cc1","O=C1C(C=NN1Cc1ccccc1)=Cc1ccccc1"),
("COCC(=O)Nc1ccc(S(NCCc2ccccc2)(=O)=O)cc1","c1ccc(CCNS(=O)(=O)c2ccccc2)cc1"),
("CCOC(=O)N(C)c1ccc(C(O)(C(F)(F)F)C(F)(F)F)cc1","c1ccccc1"),
("Fc1ccc(COC2=C(C(O)=O)CCNC2=O)cc1F","O=C1NCCC=C1OCc1ccccc1"),
("O=C1N2C(Nc3ccccc31)CCCCC2","O=C1N2C(Nc3ccccc31)CCCCC2"),
("Cl.COc1ccc(-c2nc3n(ccc4ccccc43)c2CN2CCOCC2)cc1OC","c1cccc(c1)-c1nc2c3c(ccn2c1CN1CCOCC1)cccc3"),
("ClCc1oc(-c2ccccc2)nn1","c1oc(nn1)-c1ccccc1"),
("Cl.Cc1ccc(OCC(O)Cn2c(=N)n(CCN3CCCCC3)c3ccccc32)cc1","N=c1n(CCCOc2ccccc2)c2ccccc2n1CCN1CCCCC1"),
("COc1ccc(C(=O)C=C(C)Nc2ccc3c(c2)OCO3)cc1","O=C(C=CNc1ccc2c(c1)OCO2)c1ccccc1"),
("c1csc(CN(C(c2ccc(F)cc2)C(NC2CCCCC2)=O)C(=O)CN2S(=O)(=O)c3ccccc3C2=O)c1","c1cc(CN(C(=O)CN2S(=O)(c3ccccc3C2=O)=O)C(C(=O)NC2CCCCC2)c2ccccc2)sc1"),
("c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1","c1csc(S(NCCSc2n(-c3ccccc3)nnn2)(=O)=O)c1"),
("Cc1cccc(C=NNC(=O)Cn2c(N)nnn2)n1","O=C(Cn1cnnn1)NN=Cc1ccccn1"),
("CCOC(C1(Cc2ccc(Cl)cc2)CCN(C(c2cc(C)nc(C)n2)=O)CC1)=O","O=C(N1CCC(CC1)Cc1ccccc1)c1ccncn1"),
("c1ccc(C(N(CC2OCCC2)C(Cn2nnc3ccccc23)=O)C(NCc2ccc(F)cc2)=O)cc1","O=C(N(C(c1ccccc1)C(=O)NCc1ccccc1)CC1OCCC1)Cn1nnc2c1cccc2"),
("O=C1CSC(c2ccncc2)N1Cc1occc1","O=C1CSC(c2ccncc2)N1Cc1occc1"),
("COc1c(OCc2ccccc2)c(Br)cc(C=NNC(=O)Cn2nc([N+]([O-])=O)cc2C)c1","O=C(Cn1nccc1)NN=Cc1ccc(cc1)OCc1ccccc1"),
("Cc1c(Cn2nnc(-c3cc(C(=O)O)ccc3)n2)cccc1","c1cccc(-c2nn(nn2)Cc2ccccc2)c1"),
("O=C(c1ccc2snnc2c1)N1CCCC1","O=C(c1ccc2snnc2c1)N1CCCC1"),
("c1ccc(CC(NN2C(=O)C(=Cc3c(C(O)=O)cccc3)SC2=S)=O)cc1","O=C1C(=Cc2ccccc2)SC(=S)N1NC(Cc1ccccc1)=O"),
("Cc1ccccc1OCC(=O)NN=Cc1ccncc1","O=C(COc1ccccc1)NN=Cc1ccncc1"),
("O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1","O=C(C=Cc1ccccc1)NC(=S)Nc1ccc(CN2CCOCC2)cc1"),
("COc1ccc(NC(=S)N(Cc2cnccc2)Cc2c(=O)[nH]c3c(c2)cc(OC)c(OC)c3)cc1","O=c1c(CN(C(=S)Nc2ccccc2)Cc2cnccc2)cc2ccccc2[nH]1"),
("Nc1ccc2nc3c([nH]c(=O)n(C4CCCCC4)c3=O)nc2c1","c1ccc2nc3[nH]c(n(c(c3nc2c1)=O)C1CCCCC1)=O"),
("Cc1cc(NC(=O)c2ccc(S(Nc3ccccc3)(=O)=O)cc2)no1","c1cc(no1)NC(=O)c1ccc(S(=O)(=O)Nc2ccccc2)cc1"),
("Nn1c(Cc2c3c(cccc3)ccc2)nnc1SCc1ccccc1","c1ccc(CSc2nnc([nH]2)Cc2c3c(cccc3)ccc2)cc1"),
("Cc1[nH]nc(Nc2cc(C)ccc2)c1[N+](=O)[O-]","c1ccc(cc1)Nc1n[nH]cc1"),
("CC1Cn2c(nc3n(C)c(=O)[nH]c(=O)c23)O1","O=c1[nH]c2nc3n(c2c([nH]1)=O)CCO3"),
("c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1","c1csc(C(OCC(NC23CC4CC(C2)CC(C3)C4)=O)=O)c1"),
("c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1","c1ccc(S(NC2=NC(=O)C(=Cc3cnccc3)S2)(=O)=O)cc1"),
("CCCn1c(N2CCN(C)CC2)nc2n(C)c(=O)[nH]c(=O)c12","O=c1[nH]c([nH]c2nc([nH]c12)N1CCNCC1)=O"),
("CCn1c(SCC(Nc2cc(S(N3CCOCC3)(=O)=O)ccc2OC)=O)nnc1-c1ccncc1","c1cc(S(=O)(=O)N2CCOCC2)cc(NC(=O)CSc2nnc(-c3ccncc3)[nH]2)c1"),
("C#CCNC(=O)C1=CC(c2ccc(Br)cc2)CC(OCc2ccc(CO)cc2)O1","c1cccc(c1)C1C=COC(OCc2ccccc2)C1"),
("CCc1c(SCC(=O)Nc2cc(C)on2)nc2ccc(C)cc2c1","O=C(Nc1ccon1)CSc1ccc2c(cccc2)n1"),
("CCOCCCN(C(C(NC1CCCC1)=O)c1cccc(OC)c1OC)C(c1ccco1)=O","c1cc(ccc1)C(NC(c1occc1)=O)C(=O)NC1CCCC1"),
("Cc1ccc(C(=O)NC(=S)NNS(c2ccccc2)(=O)=O)cc1","c1cccc(c1)C(NC(=S)NNS(=O)(=O)c1ccccc1)=O"),
("COc1ccc(CC(N)=NOC(=O)c2sccc2)cc1","O=C(ON=CCc1ccccc1)c1sccc1"),
("c1ccc(C(O)=C2C(c3ncccc3)N(CC(OC)OC)C(=O)C2=O)cc1","c1cc(C=C2C(=O)C(=O)NC2c2ncccc2)ccc1"),
("COC(=O)CSc1nc(C)cc(Oc2ccccc2)n1","c1ccc(Oc2ccncn2)cc1"),
("COc1ccc(Cn2c(C)ccc2C)cc1","c1ccc(cc1)Cn1cccc1"),
("COc1cccc(N2CCN(C3CC(=O)N(c4ccc(C)c(Cl)c4)C3=O)CC2)c1","O=C1N(c2ccccc2)C(=O)C(C1)N1CCN(c2ccccc2)CC1"),
("COc1cccc(OC)c1OCCN(C)C.OC(=O)C(O)=O","c1ccccc1"),
("C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1","C1CCC(NC(=O)c2ccc(S(N3CCCC3)(=O)=O)cc2)C1"),
("CCCN(C(=O)Cn1ncc2c(=O)oc3c(c12)cccc3)c1cc(C)ccc1","O=C(Cn1ncc2c(oc3c(cccc3)c12)=O)Nc1ccccc1"),
("CNC(NC(CSc1nnc(C(F)(F)F)n1C)=O)=O","n1nc[nH]c1"),
("CCOCCCN1C(=O)CC(C(NCCc2ccc(C)cc2)=O)C1","O=C1NCC(C1)C(NCCc1ccccc1)=O"),
("COc1c([N+](=O)[O-])cc(CSc2n[nH]c(C)n2)cc1","c1ccc(CSc2nc[nH]n2)cc1"),
("CN(C)CC(=O)c1ccc(-c2ccccc2)cc1","c1cccc(c1)-c1ccccc1"),
("CC1(O)C(=O)c2c(cccc2)N(c2ccccc2)C1=O","O=C1CC(=O)N(c2c1cccc2)c1ccccc1"),
("CN(S(c1ccccc1)(=O)=O)CC(=O)NCCc1ccccc1","c1ccc(CCNC(=O)CNS(=O)(=O)c2ccccc2)cc1"),
("CCNc1ccccc1C(=O)O","c1ccccc1"),
("CC1(C)C(CSc2nc3ccccc3[nH]2)C1(Cl)Cl","c1ccc2c(nc([nH]2)SCC2CC2)c1"),
("CC(C)c1ccc(OCC(=O)NC(=S)Nc2c3cccc4c3c(cc2)CC4)cc1","O=C(NC(=S)Nc1c2cccc3c2c(cc1)CC3)COc1ccccc1"),
("CN(C)c1ccc(NC(CN2CCC(C(c3ccc(F)cc3)=O)CC2)=O)cc1","c1cccc(c1)NC(CN1CCC(CC1)C(=O)c1ccccc1)=O"),
("CCCCN(C)C(=O)Cc1c(OC)ccc2cc(Br)ccc21","c1c2ccccc2ccc1"),
("Cc1ccc(NC(CSc2sc(NC(CN3CCOCC3)=O)nn2)=O)cc1","O=C(Nc1ccccc1)CSc1sc(nn1)NC(=O)CN1CCOCC1"),
("COCCNC(=S)NNc1cccc(C(=O)O)c1","c1ccccc1"),
("O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2","O=C(CNc1ccccc1)NN=Cc1ccc2c(c1)OCCO2"),
("COc1cc2ccccc2cc1C(=O)NCC(c1sccc1)N(C)C","O=C(NCCc1sccc1)c1cc2c(cc1)cccc2"),
("COc1ccc(C(N(C)C)CNC(=O)CCOc2ccccc2)cc1","O=C(NCCc1ccccc1)CCOc1ccccc1"),
("Cl.CCN(CC)CCCN1C(=O)CSC1c1ccc([N+]([O-])=O)cc1","O=C1CSC(c2ccccc2)N1"),
("CCC(Nc1ccc(OC)cc1OC)=C1C(=O)NC(=O)NC1=O","c1cc(NC=C2C(=O)NC(=O)NC2=O)ccc1"),
("c1coc(-c2cc(C(F)(F)F)nc(NCc3ccc(F)cc3)n2)c1","c1ccc(CNc2nccc(n2)-c2occc2)cc1"),
("CCOC(Nc1sc(C)c(C)c1C(OCC)=O)=O","c1ccsc1"),
("O=CN1CCN(C(C(=O)NC2CCCCC2)c2cc3c(cc2[N+]([O-])=O)OCO3)CC1","O=C(C(N1CCNCC1)c1ccc2c(c1)OCO2)NC1CCCCC1"),
("COc1cc(C2N(c3ccc(Br)cc3)C(=O)c3n[nH]c(C)c32)ccc1O","O=C1c2n[nH]cc2C(N1c1ccccc1)c1ccccc1"),
("c1cc(NC(=O)c2ccccc2[N+]([O-])=O)c(N2CCOCC2)cc1","O=C(Nc1c(cccc1)N1CCOCC1)c1ccccc1"),
("N#Cc1cc2c(nc1SCC(=O)N1CCCCC1)CCCCC2","O=C(N1CCCCC1)CSc1ccc2c(n1)CCCCC2"),
("CCN(CC)c1ccc(CN(C(=O)c2cc(OC)c(OC)c(OC)c2)C2CCS(=O)(=O)C2)cc1","O=S1(=O)CCC(N(Cc2ccccc2)C(=O)c2ccccc2)C1"),
("COc1cc(NC(=S)N2CCN(Cc3ccccc3)CC2)cc(OC)c1","S=C(N1CCN(CC1)Cc1ccccc1)Nc1ccccc1"),
("CC(=O)C(=CNc1ccc(OCc2ccccc2)cc1)c1ccccc1","c1cccc(c1)COc1ccc(NC=Cc2ccccc2)cc1"),
("CC(C)C(C(NC(C)C(N)=O)=O)NC(C1CCCN1C(OC(C)(C)C)=O)=O","C1CCNC1"),
("CCOc1ccc(N2CC(C(=O)Nc3cccc(S(NC4=NCCC4)(=O)=O)c3)CC2=O)cc1","c1cccc(c1)N1CC(C(=O)Nc2cccc(S(=O)(=O)NC3=NCCC3)c2)CC1=O"),
("O=C(NCc1ccccc1Cl)CSc1ccc(-c2cccs2)nn1","O=C(NCc1ccccc1)CSc1ccc(nn1)-c1sccc1"),
("COc1ccc(OC)c(N=c2ssnc2Cl)c1","c1cccc(c1)N=c1ssnc1"),
("CC(=O)C1=C(C)NC(=O)CC1c1c(Cl)cccc1","O=C1CC(C=CN1)c1ccccc1"),
("CCC(=O)N=C(N)Nc1nc(C)c2cc(C)c(C)cc2n1","c1cc2c(cc1)ncnc2"),
("Cc1ccccc1C(OC1OC(=O)C(Cl)=C1Nc1ccc(C(O)=O)cc1)=O","O=C(OC1OC(C=C1Nc1ccccc1)=O)c1ccccc1"),
("CCOc1cc(CN2CCC(CO)(Cc3cccc(C(F)(F)F)c3)CC2)ccc1OC","c1ccc(cc1)CC1CCN(Cc2ccccc2)CC1"),
("Cc1cc2c([nH]c(=O)c(CCNC(c3cccs3)=O)c2)cc1C","O=C(NCCc1cc2ccccc2[nH]c1=O)c1cccs1"),
("Cc1ccc(Nc2cc(=O)[nH]c(=O)[nH]2)cc1C","c1cccc(c1)Nc1cc([nH]c([nH]1)=O)=O"),
("Cc1cc(OCC(=O)NC2CCS(=O)(=O)C2)c2c(oc(=O)c3c2CCC3)c1","O=C(NC1CCS(=O)(C1)=O)COc1c2c(ccc1)oc(c1c2CCC1)=O"),
("CCc1sc(NC(CCC(NCCc2ccc(OC)c(OC)c2)=O)=O)nn1","c1cc(ccc1)CCNC(=O)CCC(=O)Nc1scnn1"),
("N#CC1=C(SCc2ccccc2)NC(=O)CC1c1ccc(O)cc1","O=C1NC(=CC(C1)c1ccccc1)SCc1ccccc1"),
("O=C(NCCN1CCOCC1)c1csc2c1CCCC2","O=C(NCCN1CCOCC1)c1csc2c1CCCC2"),
("CCCCC(=O)Nc1cc(OC)c(NC(C2CCCCC2)=O)cc1OC","O=C(Nc1ccccc1)C1CCCCC1"),
("Cc1ccc(C(C(C)OC(C2CC(=O)N(C3CCCCC3)C2)=O)=O)cc1","c1cc(C(=O)COC(C2CC(=O)N(C2)C2CCCCC2)=O)ccc1"),
("Cc1ccc(S(C(C#N)c2c(N3CCCC3)nc3ccccc3n2)(=O)=O)cc1C","c1ccc(cc1)S(=O)(=O)Cc1c(nc2ccccc2n1)N1CCCC1"),
("CC1(C)OC(=O)C(=Cc2[nH]ccc2)C(=O)O1","O=C1OCOC(=O)C1=Cc1[nH]ccc1"),
("Cc1cc(C)cc(Oc2nc3n(cccc3C)c(=O)c2C=C(C#N)C(=O)NC2CCS(=O)(=O)C2)c1","c1ccc(cc1)Oc1c(c(=O)n2ccccc2n1)C=CC(=O)NC1CCS(=O)(=O)C1"),
("COc1cc(NC(=O)NCc2c(C)onc2-c2ccccc2)ccc1","O=C(NCc1conc1-c1ccccc1)Nc1ccccc1"),
("c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1","c1ccc(C(Oc2cc3c(cc2)C(=O)CO3)=O)cc1"),
("CCN1C(=O)C2C(c3cccs3)N3C4C(=O)N(CC)C(=O)C4C(c4cccs4)N3C2C1=O","c1cc(sc1)C1C2C(NC(=O)C2N2N1C1C(=O)NC(=O)C1C2c1cccs1)=O"),
("Cc1cc(C(N2CCCC(C(c3cc(F)ccc3F)=O)C2)=O)c(C)o1","O=C(N1CCCC(C(=O)c2ccccc2)C1)c1cocc1"),
("COc1cc(C=NO)ccc1Oc1c([N+]([O-])=O)cc([N+]([O-])=O)cc1","c1cccc(Oc2ccccc2)c1"),
("Cc1ccc(N(Cc2c(=O)[nH]c3ccc(C)cc3c2)C(c2cccs2)=O)cc1","O=C(N(c1ccccc1)Cc1c([nH]c2c(cccc2)c1)=O)c1cccs1"),
("COc1ccc(C(=O)Nn2c(C)nnc2-n2c(C)cc(C)n2)cc1OC","O=C(c1ccccc1)Nn1cnnc1-n1nccc1"),
("Cc1c(NC(=O)c2c(C)c(Cl)c(C)nc2Cl)cccc1","O=C(c1cccnc1)Nc1ccccc1"),
("c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1","c1ccc(CNC(CC(C(=O)NCc2ccccc2)c2nc(=O)c3ccccc3[nH]2)=O)cc1"),
("CNc1n(-c2ccccc2)ncc1[N+](=O)[O-]","c1n(ncc1)-c1ccccc1"),
("CC1SC2(NC1=O)C1CC3CC(C1)CC2C3","O=C1CSC2(N1)C1CC3CC(C1)CC2C3"),
("CCc1ccccc1NC(=S)N(C(C)c1occc1)CCOC","S=C(NCc1occc1)Nc1ccccc1"),
("CCC(C)NC(=O)C1CCCN(S(c2ccc(-n3cnnn3)cc2)(=O)=O)C1","C1CCN(CC1)S(=O)(=O)c1ccc(cc1)-n1nnnc1"),
("COc1c2c(ccc1)C1CC(C)(O2)N(Cc2ccccc2)C(=O)N1","O=C1NC2CC(Oc3ccccc32)N1Cc1ccccc1"),
("COc1ccc(C2NC(=O)c3c(cccc3)O2)c(OC)c1OC","O=C1NC(Oc2c1cccc2)c1ccccc1"),
("O=C(NNC=C1C=Nc2ccccc21)c1ccn(Cc2c(Cl)cc(Cl)cc2)n1","O=C(NNC=C1c2c(cccc2)N=C1)c1nn(cc1)Cc1ccccc1"),
("c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1","c1ccc(NS(c2ccc(OCC(=O)NCc3cnccc3)cc2)(=O)=O)cc1"),
("COC1=CC(=O)C(=C2NNC(C(F)(F)F)=C2c2cc3ccccc3o2)C=C1","O=C1C=CC=CC1=C1NNC=C1c1cc2ccccc2o1"),
("CCOC(=O)c1c(C(COC(C=Cc2ccc(Cl)cc2)=O)=O)c(C)[nH]c1C","c1ccc(C=CC(OCC(=O)c2cc[nH]c2)=O)cc1"),
("Cc1nc2ncnn2c(N2CCN(c3nnnn3-c3ccccc3)CC2)c1","c1nc2ncnn2c(c1)N1CCN(c2nnnn2-c2ccccc2)CC1"),
("CC(C)Oc1ccc(C(=O)Nc2ccc(NC(c3ccco3)=O)c(Cl)c2)cc1","O=C(Nc1ccc(cc1)NC(=O)c1ccccc1)c1occc1"),
("CC(c1ccccc1)NC(C(NCC1OCCC1)=O)=O","O=C(NCc1ccccc1)C(=O)NCC1OCCC1"),
("CCCCOc1ccc(NC(=O)CCSc2nccn2C)cc1","O=C(Nc1ccccc1)CCSc1ncc[nH]1"),
("O=C(OCc1ncccc1)c1oc(COc2c(Cl)cccc2)cc1","O=C(OCc1ncccc1)c1ccc(o1)COc1ccccc1"),
("COc1ccc(C=NNC(=O)OC(C)(C)C)cc1OC","c1ccccc1"),
("CC1CCCCC1NC(COC(c1ccc(S(NCc2ccco2)(=O)=O)cc1)=O)=O","c1coc(c1)CNS(=O)(=O)c1ccc(cc1)C(=O)OCC(=O)NC1CCCCC1"),
("Nn1c(SCC(=O)Nc2cccc(F)c2)nnc1C1CCCCC1","O=C(CSc1[nH]c(nn1)C1CCCCC1)Nc1ccccc1"),
("Cc1n[nH]c(NC2CCCCC2)nc1=O","O=c1cn[nH]c(n1)NC1CCCCC1"),
("CCCCCCCCC(=O)NC(C(Cl)(Cl)Cl)NC(=S)N1CCOCC1","C1NCCOC1"),
("CCCc1ccc(Oc2coc3cc(OCC(Nc4c(C)cccc4)=O)ccc3c2=O)cc1","c1cccc(c1)Oc1c(c2ccc(cc2oc1)OCC(=O)Nc1ccccc1)=O"),
("Cc1ccc(C(=O)NN=C2CCSC2)cc1[N+]([O-])=O","O=C(NN=C1CCSC1)c1ccccc1"),
("N#CC1=C2SCN(c3ccc(F)cc3)CN2C(=O)CC1c1cc(F)ccc1","O=C1N2CN(c3ccccc3)CSC2=CC(c2ccccc2)C1"),
("c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1","c1ccc(CN2C(=O)CC(Nc3cc4c(cc3)cccc4)C2=O)cc1"),
("COc1ccc(NC(C)=O)cc1NC(=O)CN1CCN(CC(=O)Nc2ccc(Cl)cc2)CC1","O=C(Nc1ccccc1)CN1CCN(CC1)CC(=O)Nc1ccccc1"),
("Clc1c(Cl)c(C2NC(=O)CCC2[N+]([O-])=O)ccc1","O=C1NC(CCC1)c1ccccc1"),
("CCN(C(=O)CSc1n(-c2ccccc2)c(-c2ccccc2)nn1)CC","c1ccc(cc1)-n1cnnc1-c1ccccc1"),
("CC(=O)CCCCn1cnc2n(C)c(=O)n(C)c(=O)c12","O=c1[nH]c(c2c(nc[nH]2)[nH]1)=O"),
("CC1=NN(c2ccccc2)C(=N)C1=NNc1ccc(Cl)cc1","N=C1C(=NNc2ccccc2)C=NN1c1ccccc1"),
("CCc1ccc(OCC(=O)N(CC)CC)cc1","c1ccccc1"),
("CN(CC(=O)N1CCCCC1)S(c1ccc(Cl)cc1)(=O)=O","O=C(CNS(=O)(=O)c1ccccc1)N1CCCCC1"),
("CSc1ncc(C=C2C(=O)NC(=O)N(c3ccc(C)cc3)C2=O)cn1","c1ccc(N2C(NC(=O)C(=Cc3cncnc3)C2=O)=O)cc1"),
("COCCNC(=S)Nc1c(Cc2ccccc2)cccc1","c1ccc(Cc2ccccc2)cc1"),
("COc1cc(C(=O)Nc2nnc(C(C)(C)C)s2)c([N+]([O-])=O)cc1OC","O=C(Nc1nncs1)c1ccccc1"),
("CCOC(=O)c1ccc(NC(=O)c2cc(OC)c(OC(C)C)cc2)cc1","O=C(Nc1ccccc1)c1ccccc1"),
("COc1ccc(C(=O)C=C2Sc3cc4c(cc3N2C)OCO4)cc1","O=C(C=C1Sc2cc3c(cc2N1)OCO3)c1ccccc1"),
("CCCC1=NN(c2sc3c(n2)cccc3)C(=O)C1=CNCCCN(CC)CC","C=C1C=NN(C1=O)c1sc2ccccc2n1"),
("COc1ccc(C(COC(CN2C(=O)NC(C)(C)C2=O)=O)=O)cc1OC","c1ccc(C(=O)COC(=O)CN2C(=O)CNC2=O)cc1"),
("O=C(Oc1ccc(Br)cc1)C1CC(=O)N(c2ccc(F)cc2)C1","O=C(C1CC(N(C1)c1ccccc1)=O)Oc1ccccc1"),
("O=c1nc(-c2ccccn2)[nH]c(C(F)(F)F)c1Br","O=c1cc[nH]c(-c2ncccc2)n1"),
("CCOC(c1oc2ccccc2c1NC(CN1CCN(C)CC1)=O)=O","O=C(CN1CCNCC1)Nc1coc2ccccc21"),
("CSc1nsc(NN=Cc2ccc3c(c2)OCO3)c1C#N","c1cc(sn1)NN=Cc1ccc2OCOc2c1"),
("CC(C)(C)NC(NC(CSc1nc(C)c(C)c(C)n1)=O)=O","c1cncnc1"),
("Cc1cccnc1CN1CCN(Cc2onc(C(c3ccccc3)c3ccccc3)n2)CC1","c1cccnc1CN1CCN(CC1)Cc1onc(n1)C(c1ccccc1)c1ccccc1"),
("COc1ccc(Nc2oc3cc(=O)ccc-3cc2C(=O)Nc2ncccc2)cc1OC","c1ccc(cc1)Nc1oc2-c(ccc(c2)=O)cc1C(Nc1ncccc1)=O"),
("c1cc(C)c(OCC(NS(c2ccc(C)cc2)(=O)=O)=O)cc1","O=C(COc1ccccc1)NS(=O)(=O)c1ccccc1"),
("CCOc1ccc(-c2scc(CSc3sc(N)nn3)n2)cc1OC","c1cccc(c1)-c1nc(cs1)CSc1scnn1"),
("c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1","c1ccc(C(=O)COC(=O)CN2C(=O)C3C4CC(C3C2=O)C=C4)cc1"),
("Cc1occc1C(=O)NC(C)c1ccc2c(c1)OCO2","O=C(NCc1ccc2c(c1)OCO2)c1ccoc1"),
("CCn1c(SCC(=O)Nc2c(Cl)nccc2)nnc1-c1ccccc1","O=C(Nc1cnccc1)CSc1[nH]c(nn1)-c1ccccc1"),
("CCC(C)N(C)C1CCN(C(=S)Nc2cc(OC)ccc2)CC1","S=C(Nc1ccccc1)N1CCCCC1"),
("Brc1oc(C(=O)N2CC(=O)Nc3c(cc(Br)cc3)C2c2ccccc2)cc1","O=C(N1CC(Nc2ccccc2C1c1ccccc1)=O)c1occc1"),
("CN(C(=O)CCSc1nc(-c2cc3c(cc2)OCO3)cc(C(F)(F)F)n1)Cc1ccccc1","O=C(NCc1ccccc1)CCSc1nc(ccn1)-c1cc2c(cc1)OCO2"),
("[Br-].COc1c(OC)c(OC)cc(-c2nc3c[n+](CC(=O)c4ccccc4)ccc3n2C)c1","O=C(C[n+]1cc2nc([nH]c2cc1)-c1ccccc1)c1ccccc1"),
("CCOC(CSc1n(-c2c(OC)cccc2)c(CNC(Cc2ccccc2)=O)nn1)=O","O=C(Cc1ccccc1)NCc1n(cnn1)-c1ccccc1"),
("CS(N(Cc1ccccc1)c1ccc(C(Nc2c(Sc3ccccc3)cccc2)=O)cc1)(=O)=O","O=C(c1ccc(NCc2ccccc2)cc1)Nc1c(cccc1)Sc1ccccc1"),
("Cc1nc(C2N(C(=O)c3cn(C)c4c(c3=O)cccc4)CCc3c4c([nH]c32)cccc4)ccc1","O=C(c1c[nH]c2c(cccc2)c1=O)N1C(c2ncccc2)c2[nH]c3ccccc3c2CC1"),
("CCCCc1nc(N2CCOCC2)c(C#N)c2c1CCCC2","c1nc(cc2c1CCCC2)N1CCOCC1"),
("O=C(NN=Cc1cc([N+]([O-])=O)ccc1Cl)c1nccnc1","O=C(NN=Cc1ccccc1)c1nccnc1"),
("COc1ccc(-n2c(SCC(=O)c3ccc4c(c3)OCCO4)nnn2)cc1","O=C(c1ccc2c(c1)OCCO2)CSc1n(nnn1)-c1ccccc1"),
("COc1c(C=CC(=O)Nc2cc(S(NC3=NCCCCC3)(=O)=O)ccc2)cccc1","O=C(Nc1cc(ccc1)S(=O)(=O)NC1=NCCCCC1)C=Cc1ccccc1"),
("Cc1nn(-c2ccc(F)cc2)c(Cl)c1C=C(CC(=O)O)c1sc2ccccc2n1","c1cc2sc(nc2cc1)C=Cc1cn(nc1)-c1ccccc1"),
("COc1c(OC)c(OC)cc(C2N(c3ccccc3)OC3C2C(=O)N(Cc2ccccc2)C3=O)c1","c1cccc(c1)CN1C(=O)C2C(N(OC2C1=O)c1ccccc1)c1ccccc1"),
("COCCNC(=S)Nc1cc(OC)c(NC(=O)c2ccco2)cc1OC","O=C(Nc1ccccc1)c1occc1"),
("N#Cc1c(SCC(=O)c2cc3c(oc2=O)cccc3)nc(-c2ccccc2)cc1","O=C(c1cc2c(cccc2)oc1=O)CSc1cccc(n1)-c1ccccc1"),
("O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1","O=C(N1CCCC1)c1nc2ccccn2c1CN1CCCC(OCc2ccccc2)C1"),
("Brc1cccc(OCCSc2ncccn2)c1","c1cccc(c1)OCCSc1ncccn1"),
("CC(C)(C)NC(=O)C12CCC(C)(C1(C)C)c1nc3ccccc3nc12","c1cccc2nc3C4CC(CC4)c3nc12"),
("[I-].CC(C)C1C(OCC(O)C[N+]2(C)CCCCC2)CC(C)CC1","C1CC[NH+](CC1)CCCOC1CCCCC1"),
("Cc1ccccc1NS(=O)(=O)c1ccc(OCC(=O)N2CCCCC2)cc1","c1cc(ccc1)NS(=O)(=O)c1ccc(cc1)OCC(=O)N1CCCCC1"),
("Cc1cc(NC(=O)CSc2nc3c(c(=O)n2-c2ccc(Br)cc2)SCC3)no1","O=C(CSc1nc2c(c(n1-c1ccccc1)=O)SCC2)Nc1ccon1"),
("Cc1ccccc1C(NC(C(C)C)C(OCC(c1[nH]ccc1)=O)=O)=O","c1cc([nH]c1)C(COC(CNC(=O)c1ccccc1)=O)=O"),
("Cc1ccnc(NS(c2ccc(NS(C)(=O)=O)cc2)(=O)=O)n1","c1ccc(S(=O)(=O)Nc2ncccn2)cc1"),
("Cn1c(-c2ccc(Cl)cc2)cnc1NCc1cc2c(cc1[N+]([O-])=O)OCO2.OC(=O)C(O)=O","c1cc(ccc1)-c1[nH]c(nc1)NCc1cc2c(cc1)OCO2"),
("CC1Cc2ccccc2N1C(=O)CON=Cc1ccc(OC(F)F)cc1","O=C(CON=Cc1ccccc1)N1CCc2c1cccc2"),
("C=C1C(=O)OC2C(O)C(C)=CC(=O)C=C(C)CC(OC(C(C)=CC)=O)C12","C=C1C2CCC=CC(C=CCC2OC1=O)=O"),
("O=C1C2N(CSC2)c2c(cc(C(F)(F)F)cc2)N1Cc1cccc(F)c1","O=C1C2N(CSC2)c2ccccc2N1Cc1ccccc1"),
("Cc1ccc(OCC(=O)Nc2c[nH]c(=O)[nH]c2=O)cc1C","O=C(COc1ccccc1)Nc1c[nH]c([nH]c1=O)=O"),
("Cn1c(CN2CCOCC2)nc2cc(NC(=O)c3ccccc3Cl)ccc12","O=C(c1ccccc1)Nc1ccc2[nH]c(nc2c1)CN1CCOCC1"),
("O=c1oc2ccc(O)cc2c(CN2CCN(CC=Cc3ccccc3)CC2)c1","O=c1oc2ccccc2c(c1)CN1CCN(CC1)CC=Cc1ccccc1"),
("Cn1c(Cc2ccccc2)nnc1SCCC(=O)Nc1ccccc1","O=C(CCSc1nnc([nH]1)Cc1ccccc1)Nc1ccccc1"),
("c1cc2nc(CC(=O)c3cc([N+]([O-])=O)ccc3)[nH]c2cc1","O=C(Cc1nc2ccccc2[nH]1)c1ccccc1"),
("c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1","c1cc2cc(C(=O)N3CCN(c4ccc(N5CCOCC5)nn4)CC3)c(=O)oc2cc1"),
("COc1ccccc1-n1c(=S)[nH]nc1CCn1nc(C)c(Br)c1C","S=c1[nH]nc(n1-c1ccccc1)CCn1cccn1"),
("CCC(=O)NC(=S)Nc1ccc(N2CCOCC2)cc1","c1cccc(c1)N1CCOCC1"),
("CCCCCC(=O)N1CCN(CCNC=C2C(=O)CC(c3ccc(OC)c(OC)c3)CC2=O)CC1","c1ccc(cc1)C1CC(=O)C(C(=O)C1)=CNCCN1CCNCC1"),
("CN1CCN(C(=O)CN(S(C)(=O)=O)Cc2ccc(Cl)cc2)CC1","O=C(CNCc1ccccc1)N1CCNCC1"),
("COc1cc(OC)cc(C(=O)NCc2cccnc2)c1","O=C(NCc1cccnc1)c1ccccc1"),
("c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1","c1cncc(NC(=O)C2CCCN(S(c3cccc4c3nsn4)(=O)=O)C2)c1"),
("CC(NC1=NN(C(C)=O)C(C)(c2cccs2)S1)=O","c1cc(sc1)C1SC=NN1"),
("CCCC(=O)Nc1ccc(-c2nc3cc(C)c(C)cc3o2)cc1","c1cccc(c1)-c1nc2ccccc2o1"),
("Cc1c(C)n(CC(O)CN2CCOCC2)c2ccccc12.OC(=O)C(O)=O","c1cn(c2ccccc12)CCCN1CCOCC1"),
("Cc1occc1-c1n(CCc2ccccc2)c(SCC(=O)Nc2sccn2)nn1","O=C(Nc1sccn1)CSc1n(c(nn1)-c1cocc1)CCc1ccccc1"),
("Cc1oc(-c2cc(F)ccc2)nc1CN1C(CCc2ncccc2)CCCC1","c1ccc(cc1)-c1nc(co1)CN1C(CCCC1)CCc1ncccc1"),
("COc1c(OC)c(C(O)=O)c(C=NNC(c2cc(NC(c3ccc(F)cc3)=O)ccc2)=O)cc1","O=C(Nc1cc(ccc1)C(=O)NN=Cc1ccccc1)c1ccccc1"),
("CCn1c(Cc2ccccc2)nnc1SCC(=O)Nc1ccc(S(N)(=O)=O)cc1","O=C(CSc1[nH]c(nn1)Cc1ccccc1)Nc1ccccc1"),
("CCn1c(COc2nn(-c3ccccc3)c(=O)cc2)nnc1SCc1ccc(OC)cc1","O=c1ccc(nn1-c1ccccc1)OCc1[nH]c(nn1)SCc1ccccc1"),
("CC1=NC(=O)C(=C2CC(O)(C(F)(F)F)ON2)C(C)=C1","O=C1C(=C2NOCC2)C=CC=N1"),
("COc1ccc(NC(=S)Nc2ccccc2C(F)(F)F)cc1","S=C(Nc1ccccc1)Nc1ccccc1"),
("CCCc1cc(=O)nc(SCC(=O)c2cc(C)n(CCOC)c2C)[nH]1","O=C(c1c[nH]cc1)CSc1[nH]ccc(=O)n1"),
("CC(=O)Nc1ccc2c(c1)C(C)(C)C(C)N2C","c1ccc2c(c1)NCC2"),
("CCN1CCN(C(c2ccc(OCC(Nc3ccc(F)cc3)=O)c(OC)c2)=O)CC1","c1cc(ccc1)NC(=O)COc1ccc(C(N2CCNCC2)=O)cc1"),
("CCCCN1C2CCCC1CC(NC(=O)c1ccc(OC)c(OC)c1)C2","O=C(NC1CC2NC(CCC2)C1)c1ccccc1"),
("c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1","c1ccc(N(CC(=O)N2CCOCC2)S(c2ccccc2)(=O)=O)cc1"),
("CCn1c(C)nc2cc(C(=O)NN=Cc3ccc(OC)c(O)c3)ccc12","O=C(NN=Cc1ccccc1)c1ccc2[nH]cnc2c1"),
("[Cl-].NC(=O)CN1C=CC(=C[NH+]=O)C=C1","C=C1C=CNC=C1"),
("Cn1cnnc1SC1C(NS(c2ccccc2)(=O)=O)c2c3c(ccc2)cccc31","O=S(=O)(NC1C(Sc2[nH]cnn2)c2cccc3c2c1ccc3)c1ccccc1"),
("COc1ccc(Nc2nc(NCc3ccco3)nc(NN=Cc3ccccc3F)n2)cc1","c1ccc(Nc2nc(nc(n2)NN=Cc2ccccc2)NCc2ccco2)cc1"),
("CC1=CC(=O)C(=C2C=C(c3ccccc3[N+]([O-])=O)NN2)C=C1","O=C1C(=C2NNC(=C2)c2ccccc2)C=CC=C1"),
("COc1ccc(CC2[N+]([O-])(C)CCc3cc(OC)c(O)cc32)cc1O","c1ccc(cc1)CC1c2c(cccc2)CC[NH2+]1"),
("Cl.NC(N)=Nc1nc(=O)c2cc(Br)ccc2[nH]1","O=c1nc[nH]c2ccccc21"),
("CC(=O)N1CCC(=NNc2ccc(S(=O)(=O)N3CCOCC3)cc2[N+]([O-])=O)CC1","c1cc(ccc1NN=C1CCNCC1)S(=O)(=O)N1CCOCC1"),
("Cc1cc(S(N(Cc2ccc(F)cc2)CC2OCCC2)(=O)=O)ccc1-n1cnnn1","c1cc(ccc1)CN(CC1OCCC1)S(c1ccc(cc1)-n1cnnn1)(=O)=O"),
("CC1(C)OCc2c(c3c(sc4c(NCCCO)ncnc43)nc2-c2ccco2)C1","c1ncnc2c1sc1nc(c3c(c12)CCOC3)-c1ccco1"),
("COc1ccc(CCNC(=O)CSc2n(-c3ccc(OC)c(OC)c3)nnn2)cc1OC","O=C(CSc1n(-c2ccccc2)nnn1)NCCc1ccccc1"),
("CC(C)(CC(O)=O)CC(NCc1c(Cl)cccc1Sc1ccc(Cl)cc1)=O","c1ccc(Sc2ccccc2)cc1"),
("COc1ccc(-c2cc(CCCC(=O)NCCc3cc(OC)ccc3OC)no2)cc1","O=C(NCCc1ccccc1)CCCc1noc(c1)-c1ccccc1"),
("Cc1ccc(-c2ncns2)cc1","c1ccc(cc1)-c1sncn1"),
("C(O)CCn1c(=O)c2c(nc1C=Cc1ccc([N+]([O-])=O)o1)cccc2","O=c1[nH]c(C=Cc2ccco2)nc2c1cccc2"),
("COC(CC(O)CC(O)C(C)OCc1ccccc1)OC","c1ccccc1"),
("Cl.CCCC(N1CCN(C(=O)c2occc2)CC1)c1n(C(C)(C)C)nnn1","O=C(N1CCN(Cc2nnn[nH]2)CC1)c1ccco1"),
("O=C(NC(CO)c1ccccc1)c1occc1","O=C(NCc1ccccc1)c1occc1"),
("O=C(Nc1ccc(N2CCOCC2)cc1)c1c(Cl)cc(F)c(F)c1","O=C(Nc1ccc(N2CCOCC2)cc1)c1ccccc1"),
("CCc1sc(N2C(=O)c3ccc(Oc4ccc([N+]([O-])=O)cc4)cc3C2=O)nn1","O=C1N(C(=O)c2cc(Oc3ccccc3)ccc21)c1scnn1"),
("CC(C)Cc1ccc(C(C)C(=O)O)cc1","c1ccccc1"),
("Cl.N=c1sccn1CC(=O)Nc1cc(S(N2CCCC2)(=O)=O)ccc1Cl","N=c1n(CC(=O)Nc2cccc(S(=O)(N3CCCC3)=O)c2)ccs1"),
("c1ccc(-c2ccc(C(=O)OC3CC4OC(=O)CC4C3CO)cc2)cc1","c1ccc(cc1)-c1ccc(C(=O)OC2CC3CC(=O)OC3C2)cc1"),
("CN(CCC#N)CC(=O)Nc1ccc(S(N)(=O)=O)cc1","c1ccccc1"),
("Cc1nc(-c2ccc([N+]([O-])=O)cc2)sc1C(=O)O","c1cc(-c2sccn2)ccc1"),
("c1coc(C(=O)N2CCN(C(Cn3nnc(-c4ccc(NC(c5ccc(F)cc5)=O)cc4)n3)=O)CC2)c1","O=C(N1CCN(C(=O)Cn2nc(nn2)-c2ccc(NC(=O)c3ccccc3)cc2)CC1)c1ccco1"),
("Cc1onc(-c2c(Cl)cccc2Cl)c1C(N)=S","c1ccc(cc1)-c1nocc1"),
("CCOC(=O)c1cnc2ccccc2c1NCCO","c1cnc2ccccc2c1"),
("Cc1ccc(C)c(NC(=O)Cn2nnc(-c3ccc(N4CCOCC4)cc3)n2)c1","O=C(Cn1nnc(n1)-c1ccc(cc1)N1CCOCC1)Nc1ccccc1"),
("CC(C)(C)c1cc(C(=O)NNc2ccc(OC(F)(F)F)cc2)n(Cc2ccccc2)n1","O=C(NNc1ccccc1)c1ccnn1Cc1ccccc1"),
("CCCCCOC(=O)C1=C(C)N=C2N(NN=N2)C1c1ccc(OC)c(OC)c1OC","c1cccc(c1)C1N2NN=NC2=NC=C1"),
("Cc1cc2cc(CNC(=O)C3CC3)ccc2n1C","O=C(NCc1ccc2c(cc[nH]2)c1)C1CC1"),
("Cc1ccccc1C(NC(CC(C)C)C(Nc1cc(S(N(C)C)(=O)=O)ccc1)=O)=O","c1ccc(cc1)NC(CNC(=O)c1ccccc1)=O"),
("COCCCNC(=S)N1CCC(NC(=O)c2ccco2)CC1","O=C(NC1CCNCC1)c1ccco1"),
("Cn1c(C=Cc2oc([N+]([O-])=O)cc2)nc2ccccc2c1=O","O=c1[nH]c(C=Cc2occc2)nc2ccccc12"),
("c1cc2nc(SCc3cc(=O)n4ccsc4n3)n(CCCO)c(=O)c2cc1","c1ccc2nc(SCc3cc(=O)n4ccsc4n3)[nH]c(=O)c2c1"),
("c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O","c1ccc2c(c1)cccc2NC(=O)CC1SC(NCC2OCCC2)=NC1=O"),
]
def test1MurckoScaffold(self):
for testMol in self.testMolecules:
mol = Chem.MolFromSmiles(testMol[0])
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol[1]))
self.assertEqual(calcScaffold, actualScaffold)
def test2MurckoScaffold(self):
for testMol in self.testMolecules2:
mol = Chem.MolFromSmiles(testMol[0])
calcScaffold = Chem.MolToSmiles(GetScaffoldForMol(mol))
actualScaffold = Chem.MolToSmiles(Chem.MolFromSmiles(testMol[1]))
self.assertEqual(calcScaffold, actualScaffold)
if __name__ == '__main__': #pragma: no cover
unittest.main()
|
adalke/rdkit
|
rdkit/Chem/Scaffolds/test_MurckoScaffold.py
|
Python
|
bsd-3-clause
| 27,412
|
[
"RDKit"
] |
670a4b621447c891faf1b23403e4b0f5a202fcebaf259f0481c58452c06d65f8
|
import math
import sys
def validate_image_equality(image_1_path, image_2_path, max_delta):
import pyrap.images as pim
# get the difference between the two images
print("comparing images from paths:")
print(image_1_path)
print(image_2_path)
im = pim.image('"{0}" - "{1}"'.format(image_1_path, image_2_path))
im.saveas("difference.IM2")
# get the stats of the image
stats_dict = im.statistics()
return_value = compare_image_statistics(stats_dict, max_delta)
if not return_value:
print("\n\n\n")
print("*"*30)
print("Statistics of the produced image:")
im = pim.image("{0}".format(image_1_path))
stats_dict_single_image = im.statistics()
print(stats_dict_single_image)
print("\n\n\n")
print("Statistics of the compare image:")
im = pim.image("{0}".format(image_2_path))
stats_dict_single_image = im.statistics()
print(stats_dict_single_image)
print("\n\n\n")
print("difference between produced image and the baseline image:")
print("maximum delta: {0}".format(max_delta))
print(stats_dict)
print("*"*30)
return return_value
def _test_against_maxdelta(value, max_delta, name):
if math.fabs(value) > max_delta:
print("Dif found: '{0}' difference >{2}<is larger then " \
"the maximum accepted delta: {1}".format(name, max_delta, value))
return True
return False
def compare_image_statistics(stats_dict, max_delta = 0.0001):
return_value = False
found_incorrect_datapoint = False
for name, value in list(stats_dict.items()):
if name == "rms":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 300, name)
elif name == "medabsdevmed":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 200, name)
elif name == "minpos":
pass
# this min location might move 100 points while still being the same image
elif name == "min":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 2000, name)
elif name == "maxpos":
pass
# this max location might move 100 points while still being the same image
elif name == "max":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 1500, name)
elif name == "sum":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 200000, name)
elif name == "quartile":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 4000, name)
elif name == "sumsq":
# tested with sum already
pass
elif name == "median":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta, name)
elif name == "npts":
pass # cannot be tested..
elif name == "sigma":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 300, name)
elif name == "mean":
found_incorrect_datapoint = _test_against_maxdelta(
float(value[0]), max_delta * 3, name)
# if we found an incorrect datapoint in this run or with previous
# results: results in true value if any comparison failed
return_value = return_value or found_incorrect_datapoint
return not return_value
# from here sourcelist compare functions
def validate_source_list_files(source_list_1_path, source_list_2_path, max_delta):
# read the sourcelist files
fp = open(source_list_1_path)
sourcelist1 = fp.read()
fp.close()
fp = open(source_list_2_path)
sourcelist2 = fp.read()
fp.close()
# convert to dataarrays
sourcelist_data_1 = convert_sourcelist_as_string_to_data_array(sourcelist1)
sourcelist_data_2 = convert_sourcelist_as_string_to_data_array(sourcelist2)
return compare_sourcelist_data_arrays(sourcelist_data_1, sourcelist_data_2, max_delta)
def convert_sourcelist_as_string_to_data_array(source_list_as_string):
# split in lines
source_list_lines = source_list_as_string.split("\n")
entries_array = []
# get the format line
format_line_entrie = source_list_lines[0]
# get the format entries
entries_array.append([format_line_entrie.split(",")[0].split("=")[1].strip()])
for entry in format_line_entrie.split(',')[1:]:
entries_array.append([entry.strip()])
# scan all the lines for the actual data
for line in sorted(source_list_lines[2:]): # try sorting based on name (should work :P)
# if empty
if line == "":
continue
# add the data entries
for idx, entrie in enumerate(line.split(",")):
entries_array[idx].append(entrie.strip())
return entries_array
def easyprint_data_arrays(data_array1, data_array2):
print("All data as red from the sourcelists:")
for (first_array, second_array) in zip(data_array1, data_array2):
print(first_array)
print(second_array)
def compare_sourcelist_data_arrays(data_array1, data_array2, max_delta = 0.0001):
"""
Ugly function to compare two sourcelists.
It needs major refactoring, but for a proof of concept it works
"""
print("######################################################")
found_incorrect_datapoint = False
for (first_array, second_array) in zip(data_array1, data_array2):
# first check if the format string is the same, we have a major fail if this happens
if first_array[0] != second_array[0]:
print("******************* problem:")
print("format strings not equal: {0} != {1}".format(first_array[0], second_array[0]))
found_incorrect_datapoint = True
# Hard check on equality of the name of the found sources
if first_array[0] == "Name":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
if entrie1 != entrie2:
print("The sourcelist entrie names are not the same: \n{0} !=\n {1}".format(entrie1, entrie2))
found_incorrect_datapoint = True
# Hard check on equality of the type of the found sources
elif first_array[0] == "Type":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
if entrie1 != entrie2:
print("The sourcelist entrie types are not the same: {0} != {1}".format(entrie1, entrie2))
found_incorrect_datapoint = True
# soft check on the Ra: convert to float and compare the values
elif first_array[0] == "Ra":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_array = entrie1.split(":")
entrie1_as_float = float(entrie1_as_array[0]) * 3600 + float(entrie1_as_array[1]) * 60 + float(entrie1_as_array[2]) # float("".join(entrie1.split(":")))
entrie2_as_array = entrie2.split(":")
entrie2_as_float = float(entrie2_as_array[0]) * 3600 + float(entrie2_as_array[1]) * 60 + float(entrie2_as_array[2])
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 10000) :
print("we have a problem Ra's are not the same within max_delta: {0} != {1} max_delta_ra = {2}".format(
entrie1, entrie2, max_delta * 10000))
found_incorrect_datapoint = True
elif first_array[0] == "Dec":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_array = entrie1.strip("+").split(".")
entrie1_as_float = float(entrie1_as_array[0]) * 3600 + float(entrie1_as_array[1]) * 60 + \
float("{0}.{1}".format(entrie1_as_array[2], entrie1_as_array[3]))
entrie2_as_array = entrie2.strip("+").split(".")
entrie2_as_float = float(entrie2_as_array[0]) * 3600 + float(entrie2_as_array[1]) * 60 + \
float("{0}.{1}".format(entrie2_as_array[2], entrie2_as_array[3]))
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 10000) :
print("Dec's are not the same within max_delta: {0} != {1} max_delta_ra = {2}".format(
entrie1, entrie2, max_delta * 10000))
found_incorrect_datapoint = True
elif first_array[0] == "I":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 2000):
print("I's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 1000))
found_incorrect_datapoint = True
elif first_array[0] == "Q":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 1000):
print("Q's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 1000))
found_incorrect_datapoint = True
elif first_array[0] == "U":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 1000):
print("Q's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 1000))
found_incorrect_datapoint = True
elif first_array[0] == "V":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 1000):
print("V's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 1000))
found_incorrect_datapoint = True
elif first_array[0] == "MajorAxis":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 60000):
print("MajorAxis's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 50000))
found_incorrect_datapoint = True
elif first_array[0] == "MinorAxis":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 30000):
print("MinorAxis's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 30000))
found_incorrect_datapoint = True
elif first_array[0] == "Orientation":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 70000):
print("Orientation's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 10000))
found_incorrect_datapoint = True
elif first_array[0].split("=")[0].strip() == "ReferenceFrequency":
for (entrie1, entrie2) in zip(first_array[1:], second_array[1:]):
entrie1_as_float = float(entrie1)
entrie2_as_float = float(entrie2)
if not math.fabs(entrie1_as_float - entrie2_as_float) < (max_delta * 10000000):
print("Orientation's are not the same within max_delta {0} != {1} max_delta_I = {2} ".format(
entrie1_as_float, entrie2_as_float, max_delta * 10000000))
found_incorrect_datapoint = True
elif first_array[0].split("=")[0].strip() == "SpectralIndex":
# Not known yet what will be in the spectral index: therefore do not test it
pass
else:
print("unknown format line entrie found: delta fails")
print(first_array[0])
found_incorrect_datapoint = True
if found_incorrect_datapoint:
print("######################################################")
print("compared the following data arrays:")
easyprint_data_arrays(data_array1, data_array2)
print("######################################################")
# return inverse of found_incorrect_datapoint to signal delta test success
return not found_incorrect_datapoint
# Test data:
source_list_as_string = """
format = Name, Type, Ra, Dec, I, Q, U, V, MajorAxis, MinorAxis, Orientation, ReferenceFrequency='6.82495e+07', SpectralIndex='[]'
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i3_s3_g3, GAUSSIAN, 14:58:34.711, +71.42.19.636, 3.145e+01, 0.0, 0.0, 0.0, 1.79857e+02, 1.49783e+02, 1.24446e+02, 6.82495e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i2_s2_g2, GAUSSIAN, 15:09:52.818, +70.48.01.625, 2.321e+01, 0.0, 0.0, 0.0, 2.23966e+02, 1.09786e+02, 1.32842e+02, 6.82495e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i4_s4_g4, GAUSSIAN, 14:53:10.634, +69.29.31.920, 1.566e+01, 0.0, 0.0, 0.0, 1.25136e+02, 4.72783e+01, 6.49083e+01, 6.82495e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i0_s0_g0, POINT, 15:20:15.370, +72.27.35.077, 1.151e+01, 0.0, 0.0, 0.0, 0.00000e+00, 0.00000e+00, 0.00000e+00, 6.82495e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i1_s1_g1, POINT, 15:15:15.623, +66.54.31.670, 4.138e+00, 0.0, 0.0, 0.0, 0.00000e+00, 0.00000e+00, 0.00000e+00, 6.82495e+07, [0.000e+00]
"""
source_list_as_string2 = """
format = Name, Type, Ra, Dec, I, Q, U, V, MajorAxis, MinorAxis, Orientation, ReferenceFrequency='6.82495e+07', SpectralIndex='[]'
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i3_s3_g3, GAUSSIAN, 14:58:34.711, +71.42.19.636, 3.146e+01, 0.0, 0.0, 0.0, 1.79857e+02, 1.49783e+02, 1.24446e+02, 6.82496e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i2_s2_g2, GAUSSIAN, 15:09:52.818, +70.48.01.625, 2.321e+01, 0.0, 0.0, 0.0, 2.23966e+02, 1.09786e+02, 1.32842e+02, 6.82495e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i4_s4_g4, GAUSSIAN, 14:53:10.634, +69.29.31.920, 1.566e+01, 0.0, 0.0, 0.0, 1.25136e+02, 4.72783e+01, 6.49083e+01, 6.82495e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i0_s0_g0, POINT, 15:20:15.370, +72.27.35.077, 1.151e+01, 0.0, 0.0, 0.0, 0.00000e+00, 0.00000e+00, 0.00000e+00, 6.82495e+07, [0.000e+00]
/data/scratch/klijn/out/awimage_cycle_0/image.restored_w0_i1_s1_g1, POINT, 15:15:15.623, +66.54.31.670, 4.138e+00, 0.0, 0.0, 0.0, 0.00000e+00, 0.00000e+00, 0.00000e+00, 6.82495e+07, [0.000e+00]
"""
# entries_array = convert_sourcelist_as_string_to_data_array(source_list_as_string)
# entries_array2 = convert_sourcelist_as_string_to_data_array(source_list_as_string2)
# print compare_sourcelist_data_arrays(entries_array, entries_array2, 0.0001)
image_data = {'rms': [ 0.], 'medabsdevmed':[ 0.], 'minpos': [0, 0, 0, 0]
, 'min':[ 0.], 'max': [ 0.],
'quartile': [ 0.], 'sumsq': [ 0.], 'median': [ 0.], 'npts':[ 65536.],
'maxpos': [0, 0, 0, 0], 'sigma': [ 0.], 'mean': [ 0.]}
# {'rms': array([ 0.52093363]), 'medabsdevmed': array([ 0.27387491]), 'minpos': array([156, 221, 0, 0],
# dtype=int32), 'min': array([-2.26162958]), 'max': array([ 24.01361465]), 'sum': array([ 1355.46549538]),
# 'quartile': array([ 0.54873329]), 'sumsq': array([ 17784.62525496]), 'median': array([ 0.00240479]),
# 'npts': array([ 65536.]), 'maxpos': array([148, 199, 0, 0], dtype=int32),
# 'sigma': array([ 0.52052685]), 'mean': array([ 0.02068276])}
image_data = {'rms': [ 0.52093363], 'medabsdevmed': [ 0.27387491], 'minpos': [[156, 221, 0, 0], "int32"],
'min': [-2.26162958], 'max': [ 24.01361465], 'sum': [ 1355.46549538],
'quartile' : [ 0.54873329], 'sumsq': [ 17784.62525496], 'median': [ 0.00240479],
'npts': [ 65536.], 'maxpos':[ [148, 199, 0, 0], "int32"],
'sigma': [ 0.52052685], 'mean': [ 0.02068276]}
# print compare_image_statistics(image_data)
if __name__ == "__main__":
source_list_1, image_1, source_list_2, image_2, max_delta = None, None, None, None, None
# Parse parameters from command line
error = False
print(sys.argv[1:5])
try:
image_1, source_list_1, fist_1, image_2, source_list_2, fits_2 = sys.argv[1:7]
except:
print("Sourcelist comparison has been disabled! Arguments must still be provided")
print("usage: python3 {0} source_list_1_path "\
" image_1_path source_list_2_path image_2_path (max_delta type=float)".format(sys.argv[0]))
sys.exit(1)
max_delta = None
try:
max_delta = float(sys.argv[5])
except:
max_delta = 0.0001
print("using max delta: {0}".format(max_delta))
if not error:
image_equality = validate_image_equality(image_1, image_2, max_delta)
# sourcelist comparison is still unstable default to true
sourcelist_equality = True # validate_source_list_files(source_list_1, source_list_2, max_delta)
if not (image_equality and sourcelist_equality):
print("Regression test failed: exiting with exitstatus 1")
print(" image_equality: {0}".format(image_equality))
print(" sourcelist_equality: {0}".format(sourcelist_equality))
sys.exit(1)
print("Regression test Succeed!!")
sys.exit(0)
|
kernsuite-debian/lofar
|
CEP/Pipeline/test/regression_tests/selfcal_imager_pipeline_test.py
|
Python
|
gpl-3.0
| 19,176
|
[
"Gaussian"
] |
487a143301d6f451c8044c5ffa0885ef06edcb3e96382e8e733f6ace67889d2e
|
#! usr/bin/sh
# -*- coding:utf8 -*-
# randomQuizGenerator.py - Create quizzes with questions and answers in
# random order , along with thw answer key
import random
# The quiz data. Keys are states and values are their capitals.
capitals = {'Alabama': 'Montgomery', 'Alaska': 'Juneau', 'Arizona': 'Phoenix',
'Arkansas': 'Little Rock', 'California': 'Sacramento', 'Colorado': 'Denver',
'Connecticut': 'Hartford', 'Delaware': 'Dover', 'Florida': 'Tallahassee',
'Georgia': 'Atlanta', 'Hawaii': 'Honolulu', 'Idaho': 'Boise', 'Illinois':
'Springfield', 'Indiana': 'Indianapolis', 'Iowa': 'Des Moines', 'Kansas':
'Topeka', 'Kentucky': 'Frankfort', 'Louisiana': 'Baton Rouge', 'Maine':
'Augusta', 'Maryland': 'Annapolis', 'Massachusetts': 'Boston', 'Michigan':
'Lansing', 'Minnesota': 'Saint Paul', 'Mississippi': 'Jackson', 'Missouri':
'Jefferson City', 'Montana': 'Helena', 'Nebraska': 'Lincoln', 'Nevada':
'Carson City', 'New Hampshire': 'Concord', 'New Jersey': 'Trenton',
'NewMexico': 'Santa Fe', 'New York': 'Albany', 'North Carolina': 'Raleigh',
'North Dakota': 'Bismarck', 'Ohio': 'Columbus', 'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem', 'Pennsylvania': 'Harrisburg', 'Rhode Island': 'Providence',
'South Carolina': 'Columbia', 'South Dakota': 'Pierre', 'Tennessee':
'Nashville', 'Texas': 'Austin', 'Utah': 'Salt Lake City', 'Vermont':
'Montpelier', 'Virginia': 'Richmond', 'Washington': 'Olympia', 'West Virginia': 'Charleston', 'Wisconsin': 'Madison', 'Wyoming': 'Cheyenne'}
# Generate 35 quiz files.
for quizNum in range(35):
# TODO: Create the quiz and answer key files.
quizFile = open('capitalsquiz%s.txt'%(quizNum+1),'w')
answerKeyFile = open('capitalsquiz_answers%s.txt'%(quizNum+1),'w')
# TODO: Write out the header for the quiz.
quizFile.write('Name:\n\nDate:\n\nPeriod:\n\n')
quizFile.write((' ' * 20) + 'State Capitals Quiz (Form %s)' % (quizNum + 1))
quizFile.write('\n\n')
# TODO: Shuffle the order of the states.
states = list(capitals.keys())
random.shuffle(states)
# TODO: Loop through all 50 states, making a question for each.
for questionNum in range(50):
correctAnswer = capitals[states[questionNum]]
wrongAnswers = list(capitals.values())
del wrongAnswers[wrongAnswers.index(correctAnswer)]
answerOptions = wrongAnswers + [correctAnswer]
random.shuffle(answerOptions)
# TODO: Write the question and answer options to the quiz file.
quizFile.write('%s. What is the capital of %s?\n' % (questionNum + 1,states[questionNum]))
for i in range(4):
quizFile.write(' %s. %s\n' % ('ABCD'[i], answerOptions[i]))
quizFile.write('\n')
# TODO: Write the answer key to a file.
answerKeyFile.write('%s. %s\n' % (questionNum + 1, 'ABCD'[
answerOptions.index(correctAnswer)]))
quizFile.close()
answerKeyFile.close()
|
liuyepiaoxiang/es6-learning
|
032-python/chap2/project-8.0/randomQuizGenerator.py
|
Python
|
mit
| 2,905
|
[
"COLUMBUS"
] |
ed97e1080eb0abe63d06782fe03a68beb6acb2e0599068a5933dbd1dd2b51396
|
"""A component that allows one to place colored and scaled glyphs at
input point data.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# KK Rai (kk.rai [at] iitb.ac.in)
# R. Ambareesha (ambareesha [at] iitb.ac.in)
# Copyright (c) 2005-2007, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Trait, Bool
from traits.api import Enum
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from tvtk.tvtk_base import TraitRevPrefixMap
import tvtk.common as tvtk_common
# Local imports.
from mayavi.core.component import Component
from mayavi.core.module import Module
from mayavi.components import glyph_source
######################################################################
# `Glyph` class.
######################################################################
class Glyph(Component):
# The version of this class. Used for persistence.
__version__ = 0
# Type of Glyph: 'tensor' or 'vector'
glyph_type = Enum('vector', 'tensor',
desc = 'if the glyph is vector or tensor')
# The scaling mode to use when scaling the glyphs. We could have
# used the glyph's own scale mode but it allows users to set the
# mode to use vector components for the scaling which I'd like to
# disallow.
scale_mode = Trait('scale_by_scalar',
TraitRevPrefixMap({'scale_by_vector': 1,
'scale_by_vector_components': 2,
'data_scaling_off': 3,
'scale_by_scalar': 0}),
desc="if scaling is done using scalar or vector/normal magnitude"
)
# The color mode to use when coloring the glyphs. We could have
# used the glyph's own color_mode trait but it allows users to set
# the mode to use vector components for the scaling which I'd
# like to disallow.
color_mode = Trait('color_by_scalar',
TraitRevPrefixMap({'color_by_vector': 2,
'color_by_scalar': 1,
'no_coloring': 0}),
desc="if coloring is done by scalar or vector/normal magnitude"
)
color_mode_tensor = Trait('scalar',
TraitRevPrefixMap({'scalars': 1,
'eigenvalues':2,
'no_coloring': 0}),
desc="if coloring is done by scalar or eigenvalues"
)
# Specify if the input points must be masked. By mask we mean
# that only a subset of the input points must be displayed.
mask_input_points = Bool(False, desc="if input points are masked")
# The MaskPoints filter.
mask_points = Instance(tvtk.MaskPoints, args=(),
kw={'random_mode': True}, record=True)
# The Glyph3D instance.
glyph = Instance(tvtk.Object, allow_none=False, record=True)
# The Source to use for the glyph. This is chosen from
# `self._glyph_list` or `self.glyph_dict`.
glyph_source = Instance(glyph_source.GlyphSource,
allow_none=False, record=True)
# The module associated with this component. This is used to get
# the data range of the glyph when the scale mode changes. This
# *must* be set if this module is to work correctly.
module = Instance(Module)
# Should we show the GUI option for changing the scalar mode or
# not? This is useful for vector glyphing modules where there it
# does not make sense to scale the data based on scalars.
show_scale_mode = Bool(True)
########################################
# Private traits.
# Used for optimization.
_updating = Bool(False)
########################################
# View related traits.
view = View(Group(Item(name='mask_input_points'),
Group(Item(name='mask_points',
enabled_when='object.mask_input_points',
style='custom', resizable=True),
show_labels=False,
),
label='Masking',
),
Group(Group(Item(name='scale_mode',
enabled_when='show_scale_mode',
visible_when='show_scale_mode'),
Item(name='color_mode',
enabled_when= 'glyph_type == "vector"',
visible_when= 'glyph_type == "vector"'),
Item(name='color_mode_tensor',
enabled_when= 'glyph_type == "tensor"',
visible_when= 'glyph_type == "tensor"'),
),
Group(Item(name='glyph', style='custom',
resizable=True),
show_labels=False),
label='Glyph',
selected=True,
),
Group(Item(name='glyph_source',
style='custom', resizable=True),
show_labels=False,
label='Glyph Source',
),
resizable=True
)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(Glyph, self).__get_pure_state__()
for attr in ('module', '_updating'):
d.pop(attr, None)
return d
######################################################################
# `Module` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* the tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters. You should also
set the `actors` attribute up at this point.
"""
self._glyph_type_changed(self.glyph_type)
self.glyph_source = glyph_source.GlyphSource()
# Handlers to setup our source when the sources pipeline changes.
self.glyph_source.on_trait_change(self._update_source, 'pipeline_changed')
self.mask_points.on_trait_change(self.render)
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when any of the inputs
sends a `pipeline_changed` event.
"""
if ((len(self.inputs) == 0) or (len(self.inputs[0].outputs) == 0)):
return
self._mask_input_points_changed(self.mask_input_points)
if self.glyph_type == 'vector':
self._color_mode_changed(self.color_mode)
else:
self._color_mode_tensor_changed(self.color_mode_tensor)
self._scale_mode_changed(self.scale_mode)
# Set our output.
tvtk_common.configure_outputs(self, self.glyph)
self.pipeline_changed = True
def update_data(self):
"""Override this method so that it flushes the vtk pipeline if
that is necessary.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self._scale_mode_changed(self.scale_mode)
self.data_changed = True
def render(self):
if not self._updating:
super(Glyph, self).render()
def start(self):
"""Overridden method.
"""
if self.running:
return
self.glyph_source.start()
super(Glyph, self).start()
def stop(self):
if not self.running:
return
self.glyph_source.stop()
super(Glyph, self).stop()
def has_output_port(self):
""" The filter has an output port."""
return True
def get_output_object(self):
""" Returns the output port."""
return self.glyph.output_port
######################################################################
# Non-public methods.
######################################################################
def _update_source(self):
self.configure_source_data(self.glyph, self.glyph_source.outputs[0])
def _glyph_source_changed(self, value):
self.configure_source_data(self.glyph, value.outputs[0])
def _color_mode_changed(self, value):
if len(self.inputs) == 0:
return
if value != 'no_coloring':
self.glyph.color_mode = value
def _color_mode_tensor_changed(self, value):
if len(self.inputs) == 0:
return
self._updating = True
if value != 'no_coloring':
self.glyph.color_mode = value
self.glyph.color_glyphs = True
else:
self.glyph.color_glyphs = False
self._updating = False
self.render()
def _scale_mode_changed(self, value):
if (self.module is None) or (len(self.inputs) == 0)\
or self.glyph_type == 'tensor':
return
self._updating = True
try:
glyph = self.glyph
glyph.scale_mode = value
mm = self.module.module_manager
if glyph.scale_mode == 'scale_by_scalar':
glyph.range = tuple(mm.scalar_lut_manager.data_range)
else:
glyph.range = tuple(mm.vector_lut_manager.data_range)
finally:
self._updating = False
self.render()
def _mask_input_points_changed(self, value):
inputs = self.inputs
if len(inputs) == 0:
return
if value:
mask = self.mask_points
tvtk_common.configure_input(mask, inputs[0].outputs[0])
self.configure_connection(self.glyph, mask)
else:
self.configure_connection(self.glyph, inputs[0])
self.glyph.update()
def _glyph_type_changed(self, value):
if self.glyph_type == 'vector':
self.glyph = tvtk.Glyph3D(clamping=True)
else:
self.glyph = tvtk.TensorGlyph(scale_factor=0.1)
self.show_scale_mode = False
self.glyph.on_trait_change(self.render)
def _scene_changed(self, old, new):
super(Glyph, self)._scene_changed(old, new)
self.glyph_source.scene = new
|
dmsurti/mayavi
|
mayavi/components/glyph.py
|
Python
|
bsd-3-clause
| 11,129
|
[
"Mayavi",
"VTK"
] |
91108bc1ed9b9dc223065926d2bc9e806c2f7952e6bb396ac7900693f255c25f
|
# Import our awesome modules.
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import glob
# Image processing modules.
import skimage.io
import skimage.filters
import skimage.measure
import skimage.segmentation
# Here's what we've done so far.
im = skimage.io.imread('data/lacI_titration/O2_delta_phase_pos_16.tif')
yfp_im = skimage.io.imread('data/lacI_titration/O2_delta_yfp_pos_16.tif')
# Normalize the image.
im_norm = (im - im.min()) / (im.max() - im.min())
# Do the background subtraction
im_blur = skimage.filters.gaussian(im_norm, 50.0)
im_sub = im_norm - im_blur
# Threshold the image.
im_thresh = im_sub < -0.2
# Label our image.
im_label = skimage.measure.label(im_thresh)
props = skimage.measure.regionprops(im_label)
# We want to keep the cells with a given area.
approved_objects = np.zeros_like(im_label)
ip_dist = 0.160 # in units of microns per pixel
for prop in props:
obj_area = prop.area * ip_dist**2
if (obj_area > 0.5) & (obj_area < 5):
approved_objects += (im_label == prop.label)
# Extract the intensities.
mean_int = []
im_relab = skimage.measure.label(approved_objects)
props = skimage.measure.regionprops(im_relab, intensity_image=yfp_im)
for prop in props:
mean_int.append(prop.mean_intensity)
plt.figure()
plt.hist(mean_int, bins=10)
plt.xlabel('mean pixel intensity')
plt.ylabel('count')
plt.show()
def phase_segmentation(image, threshold):
"""
Performs segmentation on a phase image.
"""
# Normalize the image
im_norm = (image - image.min()) / (image.max() - image.min())
# Do a background subtraction
im_blur = skimage.filters.gaussian(image, 50.0)
im_sub = im_norm - im_blur
# Threshold the image
im_thresh = im_sub < -0.2
# Label the image
im_label = skimage.measure.label(im_thresh)
# Get the properties and apply an area threshold
props = skimage.measure.regionprops(im_label)
# Make an empty image to store the approved cells
approved_objects = np.zeros_like(im_label)
# Apply the area filters
for prop in props:
obj_area = prop.area * 0.160**2 # Given the interpixel distance
if (obj_area > 0.5) & (obj_area < 5):
approved_objects += (im_label==prop.label)
# Relabel the image.
return im_relab
def extract_intensity(mask, yfp_image):
"""
Extract the mean intensity from a segmented image.
"""
# Get the region properties for the image.
props = skimage.measure.regionprops(mask, intensity_image=yfp_image)
# Make a vector to store the mean intensities
mean_int = []
for prop in props:
intensity = prop.mean_intensity
mean_int.append(intensity)
return mean_int
# With these functions in hand, let's loop over autofluorescence and delta.
delta_phase = glob.glob('data/lacI_titration/O2_delta_phase*.tif')
delta_yfp = glob.glob('data/lacI_titration/O2_delta_yfp_pos*.tif')
delta_mean_int = []
for i in range(len(delta_phase)):
im = skimage.io.imread(delta_phase[i])
yfp_im = skimage.io.imread(delta_yfp[i])
# Put it through our functions.
mask = phase_segmentation(im, -0.2)
ints = extract_intensity(mask, yfp_im)
#Loop through the intensity and add it.
for value in ints:
delta_mean_int.append(value)
# Now do the same for the autoflurescent samples.
auto_phase = glob.glob('data/lacI_titration/O2_auto_phase_*.tif')
auto_yfp = glob.glob('data/lacI_titration/O2_auto_yfp_*.tif')
auto_mean_int = []
for i in range(len(auto_phase)):
im = skimage.io.imread(auto_phase[i])
yfp_im = skimage.io.imread(auto_yfp[i])
mask = phase_segmentation(im, -0.2)
ints = extract_intensity(mask, yfp_im)
for value in ints:
auto_mean_int.append(value)
# Now generate the histograms of each.
plt.figure()
plt.hist(delta_mean_int, bins=100)
plt.xlabel('mean pixel intensity')
plt.ylabel('counts')
plt.title('delta sample')
plt.figure()
plt.hist(auto_mean_int, bins=100)
plt.xlabel('mean pixel intensity')
plt.ylabel('counts')
plt.title('autofluorescent sample')
plt.show()
|
RPGroup-PBoC/gist_pboc_2017
|
code/inclass/project_part3_in_class.py
|
Python
|
mit
| 4,078
|
[
"Gaussian"
] |
d7189efbe1207d6fe1a8523ec91fd0cfe5de5136423668500ed514884eea1a36
|
""" core implementation of testing process: init, session, runtest loop. """
import re
import py
import pytest, _pytest
import os, sys, imp
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.runner import collect_one_node
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
name_re = re.compile("^[a-zA-Z_]\w*$")
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
pytest.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config.do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
doit(config, session)
except pytest.UsageError:
args = sys.exc_info()[1].args
for msg in args:
sys.stderr.write("ERROR: %s\n" %(msg,))
session.exitstatus = EXIT_USAGEERROR
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = py.code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
else:
if session._testsfailed:
session.exitstatus = EXIT_TESTSFAILED
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
if initstate >= 1:
config.do_unconfigure()
config.pluginmanager.ensure_shutdown()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
def getnextitem(i):
# this is a function to avoid python2
# keeping sys.exc_info set when calling into a test
# python2 keeps sys.exc_info till the frame is left
try:
return session.items[i+1]
except IndexError:
return None
for i, item in enumerate(session.items):
nextitem = getnextitem(i)
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class FSHookProxy(object):
def __init__(self, fspath, config):
self.fspath = fspath
self.config = config
def __getattr__(self, name):
plugins = self.config._getmatchingplugins(self.fspath)
x = self.config.hook._getcaller(name, plugins)
self.__dict__[name] = x
return x
def compatproperty(name):
def fget(self):
# deprecated - use pytest.name
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__())
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
#self.extrainit()
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
#def extrainit(self):
# """"extra initialization after Node is initialized. Implemented
# by some subclasses. """
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__,
getattr(self, 'name', None))
def warn(self, code, message):
""" generate a warning with the given code and message for this
item. """
assert isinstance(code, str)
fslocation = getattr(self, "location", None)
if fslocation is None:
fslocation = getattr(self, "fspath", None)
else:
fslocation = "%s:%s" % fslocation[:2]
self.ihook.pytest_logwarning(code=code, message=message,
nodeid=self.nodeid,
fslocation=fslocation)
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
"""
from _pytest.mark import MarkDecorator
if isinstance(marker, py.builtin._basestring):
marker = MarkDecorator(marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name. """
val = self.keywords.get(name, None)
if val is not None:
from _pytest.mark import MarkInfo, MarkDecorator
if isinstance(val, (MarkDecorator, MarkInfo)):
return val
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def getplugins(self):
return self.config._getmatchingplugins(self.fspath)
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style="long"
else:
self._prunetraceback(excinfo)
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
return excinfo.getrepr(funcargs=True,
showlocals=self.config.option.showlocals,
style=style, tbfilter=tbfilter)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
relpath = self.fspath.relto(self.config.rootdir)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None):
super(Item, self).__init__(name, parent, config, session)
self._report_sections = []
def add_report_section(self, when, key, content):
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Session(FSCollector):
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
def __init__(self, config):
FSCollector.__init__(self, config.rootdir, parent=None,
config=config, session=self)
self.config.pluginmanager.register(self, name="session", prepend=True)
self._testsfailed = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self._fs2hookproxy = {}
def _makeid(self):
return ""
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self._testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self._testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self._testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
try:
return self._fs2hookproxy[fspath]
except KeyError:
self._fs2hookproxy[fspath] = x = FSHookProxy(fspath, self.config)
return x
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
#XXX: test this
raise pytest.UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
mod = None
path = [os.path.abspath('.')] + sys.path
for name in x.split('.'):
# ignore anything that's not a proper name here
# else something like --pyargs will mess up '.'
# since imp.find_module will actually sometimes work for it
# but it's supposed to be considered a filesystem path
# not a package
if name_re.match(name) is None:
return x
try:
fd, mod, type_ = imp.find_module(name, path)
except ImportError:
return x
else:
if fd is not None:
fd.close()
if type_[2] != imp.PKG_DIRECTORY:
path = [os.path.dirname(mod)]
else:
path = [mod]
return mod
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
arg = str(arg)
if self.config.option.pyargs:
arg = self._tryconvertpyarg(arg)
parts = str(arg).split("::")
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
msg = "file or package not found: "
else:
msg = "file not found: "
raise pytest.UsageError(msg + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
has_matched = False
for x in rep.result:
if x.name == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
|
jessekl/flixr
|
venv/lib/python2.7/site-packages/_pytest/main.py
|
Python
|
mit
| 25,584
|
[
"VisIt"
] |
adb076fa259e8f6a56c7f6321638655ed90c0ed918a2971392a53cca9210d0c8
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Testmodule for the Widom Insertion.
"""
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd # pylint: disable=import-error
from espressomd import reaction_ensemble
from tests_common import lj_potential
@utx.skipIfMissingFeatures(["LENNARD_JONES"])
class WidomInsertionTest(ut.TestCase):
"""Test the implementation of the widom insertion.
The excess chemical potential is calculated for identical particles in
a 20 cubed box with a single particle, interacting via a LJ-potential
(cut-off at 5 sigma)."""
N0 = 1
TEMPERATURE = 0.5
TYPE_HA = 0
CHARGE_HA = 0
LJ_EPS = 1.0
LJ_SIG = 1.0
LJ_CUT = 5
BOX_L = 2 * LJ_CUT
LJ_SHIFT = lj_potential(LJ_CUT, LJ_EPS, LJ_SIG, LJ_CUT + 1, 0.0)
radius = np.linspace(1e-10, LJ_CUT, 1000)
# numerical integration for radii smaller than the cut-off in spherical
# coordinates
integrateUpToCutOff = 4 * np.pi * np.trapz(
radius**2 * np.exp(-lj_potential(radius,
LJ_EPS,
LJ_SIG,
LJ_CUT,
LJ_SHIFT) / TEMPERATURE),
x=radius)
# numerical solution for V_lj=0 => corresponds to the volume (as exp(0)=1)
integreateRest = (BOX_L**3 - 4.0 / 3.0 * np.pi * LJ_CUT**3)
# calculate excess chemical potential of the system, see Frenkel Smith,
# p 174. Note: He uses scaled coordinates, which is why we need to divide
# by the box volume
target_mu_ex = -TEMPERATURE * \
np.log((integrateUpToCutOff + integreateRest) / BOX_L**3)
system = espressomd.System(box_l=np.ones(3) * BOX_L)
system.cell_system.set_n_square()
system.seed = system.cell_system.get_state()['n_nodes'] * [2]
np.random.seed(69) # make reaction code fully deterministic
system.cell_system.skin = 0.4
volume = np.prod(system.box_l) # cuboid box
Widom = reaction_ensemble.WidomInsertion(
temperature=TEMPERATURE, seed=1)
def setUp(self):
self.system.part.add(id=0, pos=0.5 * self.system.box_l,
type=self.TYPE_HA)
self.system.non_bonded_inter[self.TYPE_HA, self.TYPE_HA].lennard_jones.set_params(
epsilon=self.LJ_EPS, sigma=self.LJ_SIG, cutoff=self.LJ_CUT,
shift="auto")
self.Widom.add_reaction(
reactant_types=[],
reactant_coefficients=[],
product_types=[self.TYPE_HA],
product_coefficients=[1],
default_charges={self.TYPE_HA: self.CHARGE_HA})
def test_widom_insertion(self):
TYPE_HA = WidomInsertionTest.TYPE_HA
system = WidomInsertionTest.system
Widom = WidomInsertionTest.Widom
target_mu_ex = WidomInsertionTest.target_mu_ex
system.seed = system.cell_system.get_state()[
'n_nodes'] * [np.random.randint(5)]
num_samples = 100000
for i in range(num_samples):
# 0 for insertion reaction
Widom.measure_excess_chemical_potential(0)
mu_ex = Widom.measure_excess_chemical_potential(0)
deviation_mu_ex = abs(mu_ex[0] - target_mu_ex)
# error
self.assertLess(
deviation_mu_ex - 1e-3,
0.0,
msg="\nExcess chemical potential for single LJ-particle computed via widom insertion gives a wrong value.\n"
+ " average mu_ex: " + str(mu_ex[0])
+ " mu_ex_std_err: " + str(mu_ex[1])
+ " target_mu_ex: " + str(target_mu_ex)
)
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/widom_insertion.py
|
Python
|
gpl-3.0
| 4,391
|
[
"ESPResSo"
] |
a41482d3fcdc418a38f1cfc6617fb7e2ce16d3da404bc625493f4580e213bc1a
|
""" Accounting reporter
"""
import hashlib
import re
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Utilities.ObjectLoader import loadObjects
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceSection
from DIRAC.AccountingSystem.private.Policies import gPoliciesList
from DIRAC.AccountingSystem.private.Plotters.BaseReporter import BaseReporter as myBaseReporter
class PlottersList(object):
def __init__(self):
objectsLoaded = loadObjects(
"AccountingSystem/private/Plotters", re.compile(r".*[a-z1-9]Plotter\.py$"), myBaseReporter
)
self.__plotters = {}
for objName in objectsLoaded:
self.__plotters[objName[:-7]] = objectsLoaded[objName]
def getPlotterClass(self, typeName):
try:
return self.__plotters[typeName]
except KeyError:
return None
class MainReporter(object):
def __init__(self, db, setup):
self._db = db
self.setup = setup
self.csSection = getServiceSection("Accounting/ReportGenerator", setup=setup)
self.plotterList = PlottersList()
def __calculateReportHash(self, reportRequest):
requestToHash = dict(reportRequest)
granularity = gConfig.getValue("%s/CacheTimeGranularity" % self.csSection, 300)
for key in ("startTime", "endTime"):
epoch = requestToHash[key]
requestToHash[key] = epoch - epoch % granularity
md5Hash = hashlib.md5()
md5Hash.update(repr(requestToHash).encode())
md5Hash.update(self.setup.encode())
return md5Hash.hexdigest()
def generate(self, reportRequest, credDict):
typeName = reportRequest["typeName"]
plotterClass = self.plotterList.getPlotterClass(typeName)
if not plotterClass:
return S_ERROR("There's no reporter registered for type %s" % typeName)
if typeName in gPoliciesList:
retVal = gPoliciesList[typeName].checkRequest(
reportRequest["reportName"], credDict, reportRequest["condDict"], reportRequest["grouping"]
)
if not retVal["OK"]:
return retVal
reportRequest["hash"] = self.__calculateReportHash(reportRequest)
plotter = plotterClass(self._db, self.setup, reportRequest["extraArgs"])
return plotter.generate(reportRequest)
def list(self, typeName):
plotterClass = self.plotterList.getPlotterClass(typeName)
if not plotterClass:
return S_ERROR("There's no plotter registered for type %s" % typeName)
plotter = plotterClass(self._db, self.setup)
return S_OK(plotter.plotsList())
|
DIRACGrid/DIRAC
|
src/DIRAC/AccountingSystem/private/MainReporter.py
|
Python
|
gpl-3.0
| 2,673
|
[
"DIRAC"
] |
7a7a2373b871054514b3f73f714946324694b8d04e5b7ad32ce755fa29a5f760
|
# -*- coding: UTF-8 -*-
from __future__ import division
import numpy as np
import pandas as pd
import sys
import math
import re
from scipy import stats
class ConfidenceInterval(object):
def getPercentageForConfidenceInterval(self, # three things we can measure
N_sample=36, sample_mean=112, sample_std=40,
min_mean=100, max_mean=124 # we are asking for these boundaries
):
"""https://www.youtube.com/watch?v=bekNKJoxYbQ"""
assert min_mean < sample_mean < max_mean
# N_total = 200e3 <-- we consider population to be very large
# there is the population distribution that we do NOT know
# the sample mean itself as a distribution (under the law of big numbers) will be Gaussian
means_mean = sample_mean # the mean of the mean is same as the sample mean
# means_std = population_std / math.sqrt(N_sample) # but population std is unknown so..
approx_population_std = sample_std
# so..
means_std = approx_population_std / math.sqrt(N_sample)
left_distance = abs(min_mean - sample_mean)
right_distance = abs(max_mean - sample_mean)
# print left_distance, right_distance
total_percentage = self.getPercentageOfSide(distance=left_distance, means_std=means_std) + \
self.getPercentageOfSide(distance=right_distance, means_std=means_std)
return total_percentage
@staticmethod
def getPercentageOfSide(distance, means_std):
# now we want to convert the distance of the mean from the boundaries to standard deviations
how_many_stds = distance / means_std
# print how_many_stds
minus_inf_to_zero = 0.5 # stats.norm.cdf(0) = 0.5 # this is standard
# this gives all the area from minus infinity to the std
minus_inf_to_std = stats.norm.cdf(how_many_stds)
return minus_inf_to_std - minus_inf_to_zero
@staticmethod
def getConfidenceInterval(percentage=0.928139, sample_mean=112, sample_std=40, N_sample=36):
return stats.norm.interval(percentage, loc=sample_mean, scale=sample_std / math.sqrt(N_sample))
if __name__ == "__main__":
N_sample = 36
sample_mean = 112
sample_std = 40
cf = ConfidenceInterval()
# if you know the end points
min_mean = 100
max_mean = 124
# and you want to know the percentage
percentage = cf.getPercentageForConfidenceInterval(N_sample=N_sample, sample_mean=sample_mean,
sample_std=sample_std, min_mean=min_mean, max_mean=max_mean)
print percentage
# if you know the percentage
# but you want to get the end points
bounds = cf.getConfidenceInterval(percentage=percentage, sample_mean=sample_mean,
sample_std=sample_std, N_sample=N_sample)
print bounds
assert np.allclose(bounds, (min_mean, max_mean))
|
pligor/predicting-future-product-prices
|
00_skroutz_import/confidence_interval.py
|
Python
|
agpl-3.0
| 3,044
|
[
"Gaussian"
] |
a9eccbec0c44014b1c2cdc9bee21d3c84d149f17c346bf998611790f60e37cb1
|
#!/usr/bin/env python
"""
Time decoding algorithms that make use of the integrate-and-fire
neuron model and the trigonometric polynomial approximation.
- iaf_decode - IAF time decoding machine.
- iaf_decode_pop - MISO IAF time decoding machine.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
__all__ = ['iaf_decode', 'iaf_decode_pop']
import numpy as np
# Pseudoinverse singular value cutoff:
__pinv_rcond__ = 1e-8
def iaf_decode(s, dur, dt, bw, b, d, R=np.inf, C=1.0, M=5, smoothing=0.0):
"""
IAF time decoding machine using trigonometric polynomials.
Decode a finite length signal encoded with an Integrate-and-Fire
neuron assuming that the encoded signal is representable in terms
of trigonometric polynomials.
Parameters
----------
s : ndarray of floats
Encoded signal. The values represent the time between spikes (in s).
dur : float
Duration of signal (in s).
dt : float
Sampling resolution of original signal; the sampling frequency
is 1/dt Hz.
bw : float
Signal bandwidth (in rad/s).
b : float
Encoder bias.
d : float
Encoder threshold.
R : float
Neuron resistance.
C : float
Neuron capacitance.
M : int
2*M+1 coefficients are used for reconstructing the signal.
smoothing : float
Smoothing parameter.
Returns
-------
u_rec : ndarray of floats
Recovered signal.
"""
N = len(s)
T = 2*np.pi*M/bw
if T < dur:
raise ValueError('2*pi*M/bw must exceed the signal length')
bwM = bw/M
em = lambda m, t: np.exp(1j*m*bwM*t)
RC = R*C
ts = np.cumsum(s)
F = np.empty((N-1, 2*M+1), complex)
if np.isinf(R):
for k in xrange(N-1):
for m in xrange(-M, M+1):
if m == 0:
F[k, m+M] = s[k+1]
else:
F[k, m+M] = np.conj((em(-m, ts[k+1])-em(-m, ts[k]))/(-1j*m*bwM))
q = C*d-b*s[1:]
else:
for k in xrange(N-1):
for m in xrange(-M, M+1):
yk = RC*(1-np.exp(-s[k+1]/RC))
F[k, m+M] = np.conj((RC*em(-m, ts[k+1])+(yk-RC)*em(-m, ts[k]))/(1-1j*m*bwM*RC))
q = C*(d+b*R*(np.exp(-s[1:]/RC)-1))
FH = F.conj().T
c = np.dot(np.dot(np.linalg.pinv(np.dot(FH,
F)+(N-1)*smoothing*np.eye(2*M+1),
__pinv_rcond__), FH), q)
t = np.arange(0, dur, dt)
u_rec = np.zeros(len(t), complex)
for m in xrange(-M, M+1):
u_rec += c[m+M]*em(m, t)
return np.real(u_rec)
def iaf_decode_pop(s_list, dur, dt, bw, b_list, d_list, R_list,
C_list, M=5, smoothing=0.0):
"""
Multi-input single-output IAF time decoding machine.
Decode a signal encoded with an ensemble of Integrate-and-Fire
neurons assuming that the encoded signal is representable in terms
of trigonometric polynomials.
Parameters
----------
s_list : list of ndarrays of floats
Signal encoded by an ensemble of encoders. The values represent the
time between spikes (in s). The number of arrays in the list
corresponds to the number of encoders in the ensemble.
dur : float
Duration of signal (in s).
dt : float
Sampling resolution of original signal; the sampling frequency
is 1/dt Hz.
bw : float
Signal bandwidth (in rad/s).
b_list : list of floats
List of encoder biases.
d_list : list of floats
List of encoder thresholds.
R_list : list of floats
List of encoder neuron resistances.
C_list : list of floats.
List of encoder neuron capacitances.
M : int
2*M+1 coefficients are used for reconstructing the signal.
smoothing : float
Smoothing parameter.
Returns
-------
u_rec : ndarray of floats
Recovered signal.
Notes
-----
The number of spikes contributed by each neuron may differ from the
number contributed by other neurons.
"""
# Number of neurons:
N = len(s_list)
if not N:
raise ValueError('no spike data given')
T = 2*np.pi*M/bw
if T < dur:
raise ValueError('2*pi*M/bw must exceed the signal length')
bwM = bw/M
em = lambda m, t: np.exp(1j*m*bwM*t)
# Number of interspike intervals per neuron:
ns = np.array(map(len, s_list))
# Compute the spike times:
ts_list = map(np.cumsum, s_list)
# Indices for accessing subblocks of the reconstruction matrix:
Fi = np.cumsum(np.hstack([0, ns-1]))
# Compute the values of the matrix that must be inverted to obtain
# the reconstruction coefficients:
Nq = np.sum(ns)-np.sum(ns>1)
F = np.empty((Nq, 2*M+1), complex)
q = np.empty((Nq, 1), np.float)
if all(np.isinf(R_list)):
for i in xrange(N):
ts = ts_list[i]
F_temp = np.empty((ns[i]-1, 2*M+1), complex)
q_temp = np.empty((ns[i], 1), np.float)
for k in xrange(ns[i]-1):
for m in xrange(-M, M+1):
if m == 0:
F_temp[k, m+M] = s_list[i][k+1]
else:
F_temp[k, m+M] = (em(m, ts[k+1])- \
em(m, ts[k]))/(1j*m*bwM)
F[Fi[i]:Fi[i+1], :] = F_temp
q[Fi[i]:Fi[i+1], 0] = \
C_list[i]*d_list[i]-b_list[i]*s_list[i][1:]
else:
for i in xrange(N):
ts = ts_list[i]
F_temp = np.empty((ns[i]-1, 2*M+1), complex)
q_temp = np.empty((ns[i], 1), np.float)
RC = R_list[i]*C_list[i]
for k in xrange(ns[i]-1):
for m in xrange(-M, M+1):
if m == 0:
F_temp[k, m+M] = (np.exp(ts[k+1]/RC)-np.exp(ts[k]/RC))* \
np.exp(-ts[k+1]/RC)*RC
else:
x = 1j*m*bwM+1/RC
F_temp[k, m+M] = (np.exp(ts[k+1]*x)-np.exp(ts[k]*x))* \
np.exp(-ts[k+1]/RC)/x
F[Fi[i]:Fi[i+1], :] = F_temp
q[Fi[i]:Fi[i+1], 0] = \
C_list[i]*d_list[i]-b_list[i]*RC*(1-np.exp(-s_list[i][1:]/RC))
FH = F.conj().T
c = np.dot(np.dot(np.linalg.pinv(np.dot(FH, F)+(N-1)*smoothing*np.eye(2*M+1), __pinv_rcond__), FH), q)
t = np.arange(0, dur, dt)
u_rec = np.zeros(len(t), complex)
for m in xrange(-M, M+1):
u_rec += c[m+M]*em(m, t)
return np.real(u_rec)
|
bionet/ted.python
|
bionet/ted/iaf_trig.py
|
Python
|
bsd-3-clause
| 6,764
|
[
"NEURON"
] |
27784c72d90ef73466ff63c71784c0ed9e1fe117bae6d9c7278e0e7a0e4a6cd0
|
# Author: Robert McGibbon <rmcgibbo@gmail.com>
# Contributors:
# Copyright (c) 2014, Stanford University and the Authors
# All rights reserved.
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import print_function, absolute_import, division
from glob import glob
from os.path import join
import mdtraj as md
from .base import Bunch, _MDDataset
DATA_URL = "https://ndownloader.figshare.com/articles/1026324/versions/1"
TARGET_DIRECTORY = "met_enkephalin"
class MetEnkephalin(_MDDataset):
"""Loader for the met-enkephalin dataset
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all MSMBuilder data is stored in '~/msmbuilder_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Notes
-----
The dataset consists of ten ~50 ns molecular dynamics (MD) simulation
trajectories of the 5 residue Met-enkaphalin peptide. The aggregate
sampling is 499.58 ns. Simulations were performed starting from the 1st
model in the 1PLX PDB file, solvated with 832 TIP3P water molecules using
OpenMM 6.0. The coordinates (protein only -- the water was stripped)
are saved every 5 picoseconds. Each of the ten trajectories is roughly
50 ns long and contains about 10,000 snapshots.
Forcefield: amber99sb-ildn; water: tip3p; nonbonded method: PME; cutoffs:
1nm; bonds to hydrogen were constrained; integrator: langevin dynamics;
temperature: 300K; friction coefficient: 1.0/ps; pressure control: Monte
Carlo barostat (interval of 25 steps); timestep 2 fs.
The dataset is available on figshare at
http://dx.doi.org/10.6084/m9.figshare.1026324
"""
data_url = DATA_URL
target_directory = TARGET_DIRECTORY
def get_cached(self):
top = md.load(join(self.data_dir, '1plx.pdb'))
trajectories = []
for fn in glob(join(self.data_dir, 'trajectory*.dcd')):
trajectories.append(md.load(fn, top=top))
return Bunch(trajectories=trajectories, DESCR=self.description())
def fetch_met_enkephalin(data_home=None):
return MetEnkephalin(data_home).get()
fetch_met_enkephalin.__doc__ = MetEnkephalin.__doc__
|
msultan/msmbuilder
|
msmbuilder/example_datasets/met_enkephalin.py
|
Python
|
lgpl-2.1
| 2,514
|
[
"MDTraj",
"OpenMM"
] |
e9f793a2c1b597039ea11811f6b31c05a6ea8b85e96410f38a5be7dcaee920dd
|
import nest
import nest.raster_plot
import numpy as np
import pylab as pl
nest.ResetKernel()
nest.SetKernelStatus({"overwrite_files": True})
sim_time = 0.
weight = []
if (not 'bcpnn_dopamine_synapse' in nest.Models()):
#nest.Install('ml_module')
nest.Install('/media/backup/temp_milner/save/17.10.14/modules/from.git/bcpnndopa_module/lib/nest/ml_module')
dopa = nest.Create('iaf_neuron', 200)
vt_dopa = nest.Create('volume_transmitter', 1)
nest.ConvergentConnect(dopa, vt_dopa, weight= 5., delay = 1.)
sample_size = 20
pre = nest.Create('iaf_cond_alpha_bias', sample_size)
post = nest.Create('iaf_cond_alpha_bias', sample_size)
poisson_pre = nest.Create('poisson_generator',1)
poisson_post = nest.Create('poisson_generator',1)
poisson_dopa = nest.Create('poisson_generator',1)
poisson_noise = nest.Create('poisson_generator',1)
nest.DivergentConnect(poisson_noise,pre , weight=1., delay=1.)
nest.DivergentConnect(poisson_noise,post , weight=1., delay=1.)
nest.DivergentConnect(poisson_noise,dopa , weight=1., delay=1.)
nest.SetStatus(poisson_noise, {'rate':1800.})
recorder = nest.Create('spike_detector',1)
voltmeter = nest.Create('multimeter', 1, params={'record_from': ['V_m'], 'interval' :0.1} )
nest.SetStatus(voltmeter, [{"to_file": True, "withtime": True, 'label' : 'volt'}])
time = 300.
key = 'C_m'
spread = .2
params = {
'b': 1.,
'delay':1.,
'dopamine_modulated':True,
'complementary':False,
'fmax': 20.,
'gain': 2.,
'gain_dopa': 1.,
'n': 0.07,
'p_i': .01,
'p_j': .01,
'p_ij': .00012,
'tau_i': 5.,
'tau_j': 6.,
'tau_e': 40.,
'tau_p': 200.,
'tau_n': 100.,
'value': 1.,
'k_pow':3.,
'reverse': 1.
}
nest.SetDefaults('bcpnn_dopamine_synapse', {'vt':vt_dopa[0]})
default = nest.GetStatus([post[0]], key)[0]
print 'Default value for ', key, 'is ', default
start = (1-spread)*default
end= (1+spread)*default
value = np.arange(start, end, (end-start)/sample_size)
for i in xrange(sample_size):
nest.SetStatus([post[i]], {key:value[i]})
nest.DivergentConnect(poisson_pre, pre, weight=4., delay=1.)
nest.DivergentConnect(poisson_post, post, weight=4., delay=1.)
nest.DivergentConnect(poisson_dopa, dopa, weight=4., delay=1.)
nest.ConvergentConnect(post, recorder)
nest.ConvergentConnect(voltmeter, post)
nest.SetStatus(poisson_pre, {'rate': 0.})
nest.CopyModel('bcpnn_dopamine_synapse', 'test', params)
nest.DivergentConnect(pre, post, model='test' )
conn = nest.GetConnections(pre, post)
def simul(pre_rate, post_rate, dopa_rate, duration):
nest.SetStatus(poisson_pre, {'rate': pre_rate})
nest.SetStatus(poisson_post, {'rate': post_rate})
nest.SetStatus(poisson_dopa, {'rate': dopa_rate})
global sim_time
global weight
sim_time+= duration
nest.Simulate(duration)
weight.append(np.mean([(np.log(a['p_ij']/(a['p_i']*a['p_j']))) for a in nest.GetStatus(conn)]))
step=250.
simul(1000.,1000.,1000.,step)
simul(2000.,1000.,1000.,step)
simul(2000.,1000.,1000.,step)
simul(3000.,0.,1500.,step)
simul(3000.,0.,1000.,step)
events = nest.GetStatus(voltmeter)[0]['events']
t = events['times']
pl.subplot(211)
pl.plot(t, events['V_m'])
pl.ylabel('Membrane potential [mV]')
pl.subplot(212)
pl.plot(weight)
pl.show()
nest.raster_plot.from_device(recorder, hist=True)
nest.raster_plot.show()
param = [{'C_m': 250.0,
'E_L': -70.0,
'E_ex': 0.0,
'E_in': -85.0,
'I_e': 0.0,
'V_m': -70.0,
'V_reset': -60.0,
'V_th': -55.0,
'archiver_length': 0,
'bias': 0.0,
'epsilon': 0.001,
'fmax': 20.0,
'frozen': False,
'g_L': 16.6667,
'gain': 1.0,
'global_id': 204,
'kappa': 1.0,
'local': True,
'local_id': 204,
'model': 'iaf_cond_alpha_bias',
'parent': 0,
'recordables': ['V_m',
't_ref_remaining',
'g_ex',
'g_in',
'z_j',
'e_j',
'p_j',
'bias',
'epsilon',
'kappa'],
'state': 0,
't_ref': 2.0,
't_spike': -1.0,
'tau_e': 100.0,
'tau_j': 10.0,
'tau_minus': 20.0,
'tau_minus_triplet': 110.0,
'tau_p': 1000.0,
'tau_syn_ex': 0.2,
'tau_syn_in': 2.0,
'thread': 0,
'type': 'neuron',
'vp': 0}]
|
pierreberthet/local-scripts
|
reduce_dopa.py
|
Python
|
gpl-2.0
| 4,102
|
[
"NEURON"
] |
eff669daa805400b7b992294a65cd8bf9fe9db9d3b96dee806103f12bb115322
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import chigger
reader = chigger.exodus.ExodusReader('../../input/mug_blocks_out.e')
mug = chigger.exodus.ExodusResult(reader, block=None, variable='convected')
window = chigger.RenderWindow(mug, size=[300,300], test=True)
window.write('none.png')
window.start()
|
Chuban/moose
|
python/chigger/tests/exodus/blocks/none.py
|
Python
|
lgpl-2.1
| 1,177
|
[
"MOOSE"
] |
6a3c9ab2eccba905e693492c47560fc066a5e8d3d23181ea2366cabc84fb6ad1
|
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""Unittest for ipaddress module."""
import unittest
import re
import contextlib
import operator
import ipaddress
class BaseTestCase(unittest.TestCase):
# One big change in ipaddress over the original ipaddr module is
# error reporting that tries to assume users *don't know the rules*
# for what constitutes an RFC compliant IP address
# Ensuring these errors are emitted correctly in all relevant cases
# meant moving to a more systematic test structure that allows the
# test structure to map more directly to the module structure
# Note that if the constructors are refactored so that addresses with
# multiple problems get classified differently, that's OK - just
# move the affected examples to the newly appropriate test case.
# There is some duplication between the original relatively ad hoc
# test suite and the new systematic tests. While some redundancy in
# testing is considered preferable to accidentally deleting a valid
# test, the original test suite will likely be reduced over time as
# redundant tests are identified.
@property
def factory(self):
raise NotImplementedError
@contextlib.contextmanager
def assertCleanError(self, exc_type, details, *args):
"""
Ensure exception does not display a context by default
Wraps unittest.TestCase.assertRaisesRegex
"""
if args:
details = details % args
cm = self.assertRaisesRegex(exc_type, details)
with cm as exc:
yield exc
# Ensure we produce clean tracebacks on failure
if exc.exception.__context__ is not None:
self.assertTrue(exc.exception.__suppress_context__)
def assertAddressError(self, details, *args):
"""Ensure a clean AddressValueError"""
return self.assertCleanError(ipaddress.AddressValueError,
details, *args)
def assertNetmaskError(self, details, *args):
"""Ensure a clean NetmaskValueError"""
return self.assertCleanError(ipaddress.NetmaskValueError,
details, *args)
def assertInstancesEqual(self, lhs, rhs):
"""Check constructor arguments produce equivalent instances"""
self.assertEqual(self.factory(lhs), self.factory(rhs))
class CommonTestMixin:
def test_empty_address(self):
with self.assertAddressError("Address cannot be empty"):
self.factory("")
def test_floats_rejected(self):
with self.assertAddressError(re.escape(repr("1.0"))):
self.factory(1.0)
def test_not_an_index_issue15559(self):
# Implementing __index__ makes for a very nasty interaction with the
# bytes constructor. Thus, we disallow implicit use as an integer
self.assertRaises(TypeError, operator.index, self.factory(1))
self.assertRaises(TypeError, hex, self.factory(1))
self.assertRaises(TypeError, bytes, self.factory(1))
class CommonTestMixin_v4(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("000.000.000.000", "0.0.0.0")
self.assertInstancesEqual("192.168.000.001", "192.168.0.1")
def test_int(self):
self.assertInstancesEqual(0, "0.0.0.0")
self.assertInstancesEqual(3232235521, "192.168.0.1")
def test_packed(self):
self.assertInstancesEqual(bytes.fromhex("00000000"), "0.0.0.0")
self.assertInstancesEqual(bytes.fromhex("c0a80001"), "192.168.0.1")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**32) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % 2**32)):
self.factory(2**32)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 4) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
assertBadLength(3)
assertBadLength(5)
class CommonTestMixin_v6(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("0000::0000", "::")
self.assertInstancesEqual("000::c0a8:0001", "::c0a8:1")
def test_int(self):
self.assertInstancesEqual(0, "::")
self.assertInstancesEqual(3232235521, "::c0a8:1")
def test_packed(self):
addr = bytes(12) + bytes.fromhex("00000000")
self.assertInstancesEqual(addr, "::")
addr = bytes(12) + bytes.fromhex("c0a80001")
self.assertInstancesEqual(addr, "::c0a8:1")
addr = bytes.fromhex("c0a80001") + bytes(12)
self.assertInstancesEqual(addr, "c0a8:1::")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**128) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % 2**128)):
self.factory(2**128)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 16) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
self.factory(addr)
assertBadLength(15)
assertBadLength(17)
class AddressTestCase_v4(BaseTestCase, CommonTestMixin_v4):
factory = ipaddress.IPv4Address
def test_network_passed_as_address(self):
addr = "127.0.0.1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv4Address(addr)
def test_bad_address_split(self):
def assertBadSplit(addr):
with self.assertAddressError("Expected 4 octets in %r", addr):
ipaddress.IPv4Address(addr)
assertBadSplit("127.0.1")
assertBadSplit("42.42.42.42.42")
assertBadSplit("42.42.42")
assertBadSplit("42.42")
assertBadSplit("42")
assertBadSplit("42..42.42.42")
assertBadSplit("42.42.42.42.")
assertBadSplit("42.42.42.42...")
assertBadSplit(".42.42.42.42")
assertBadSplit("...42.42.42.42")
assertBadSplit("016.016.016")
assertBadSplit("016.016")
assertBadSplit("016")
assertBadSplit("000")
assertBadSplit("0x0a.0x0a.0x0a")
assertBadSplit("0x0a.0x0a")
assertBadSplit("0x0a")
assertBadSplit(".")
assertBadSplit("bogus")
assertBadSplit("bogus.com")
assertBadSplit("1000")
assertBadSplit("1000000000000000")
assertBadSplit("192.168.0.1.com")
def test_empty_octet(self):
def assertBadOctet(addr):
with self.assertAddressError("Empty octet not permitted in %r",
addr):
ipaddress.IPv4Address(addr)
assertBadOctet("42..42.42")
assertBadOctet("...")
def test_invalid_characters(self):
def assertBadOctet(addr, octet):
msg = "Only decimal digits permitted in %r in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("0x0a.0x0a.0x0a.0x0a", "0x0a")
assertBadOctet("0xa.0x0a.0x0a.0x0a", "0xa")
assertBadOctet("42.42.42.-0", "-0")
assertBadOctet("42.42.42.+0", "+0")
assertBadOctet("42.42.42.-42", "-42")
assertBadOctet("+1.+2.+3.4", "+1")
assertBadOctet("1.2.3.4e0", "4e0")
assertBadOctet("1.2.3.4::", "4::")
assertBadOctet("1.a.2.3", "a")
def test_octal_decimal_ambiguity(self):
def assertBadOctet(addr, octet):
msg = "Ambiguous (octal/decimal) value in %r not permitted in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("016.016.016.016", "016")
assertBadOctet("001.000.008.016", "008")
def test_octet_length(self):
def assertBadOctet(addr, octet):
msg = "At most 3 characters permitted in %r in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("0000.000.000.000", "0000")
assertBadOctet("12345.67899.-54321.-98765", "12345")
def test_octet_limit(self):
def assertBadOctet(addr, octet):
msg = "Octet %d (> 255) not permitted in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("257.0.0.0", 257)
assertBadOctet("192.168.0.999", 999)
class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
factory = ipaddress.IPv6Address
def test_network_passed_as_address(self):
addr = "::1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv6Address(addr)
def test_bad_address_split_v6_not_enough_parts(self):
def assertBadSplit(addr):
msg = "At least 3 parts expected in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":")
assertBadSplit(":1")
assertBadSplit("FEDC:9878")
def test_bad_address_split_v6_too_many_colons(self):
def assertBadSplit(addr):
msg = "At most 8 colons permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("9:8:7:6:5:4:3::2:1")
assertBadSplit("10:9:8:7:6:5:4:3:2:1")
assertBadSplit("::8:7:6:5:4:3:2:1")
assertBadSplit("8:7:6:5:4:3:2:1::")
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe:0:0:0:0:0:0:0:1")
assertBadSplit("9:8:7:6:5:4:3:2:1")
assertBadSplit("7:6:5:4:3:2:1")
# A trailing IPv4 address is two parts
assertBadSplit("9:8:7:6:5:4:3:42.42.42.42")
assertBadSplit("7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts_with_double_colon(self):
def assertBadSplit(addr):
msg = "Expected at most 7 other parts with '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("1:2:3:4::5:6:7:8")
def test_bad_address_split_v6_repeated_double_colon(self):
def assertBadSplit(addr):
msg = "At most one '::' permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe::1::1")
assertBadSplit("1::2::3::4:5")
assertBadSplit("2001::db:::1")
assertBadSplit("3ffe::1::")
assertBadSplit("::3ffe::1")
assertBadSplit(":3ffe::1::1")
assertBadSplit("3ffe::1::1:")
assertBadSplit(":3ffe::1::1:")
assertBadSplit(":::")
assertBadSplit('2001:db8:::1')
def test_bad_address_split_v6_leading_colon(self):
def assertBadSplit(addr):
msg = "Leading ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":2001:db8::1")
assertBadSplit(":1:2:3:4:5:6:7")
assertBadSplit(":1:2:3:4:5:6:")
assertBadSplit(":6:5:4:3:2:1::")
def test_bad_address_split_v6_trailing_colon(self):
def assertBadSplit(addr):
msg = "Trailing ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("2001:db8::1:")
assertBadSplit("1:2:3:4:5:6:7:")
assertBadSplit("::1.2.3.4:")
assertBadSplit("::7:6:5:4:3:2:")
def test_bad_v4_part_in(self):
def assertBadAddressPart(addr, v4_error):
with self.assertAddressError("%s in %r", v4_error, addr):
ipaddress.IPv6Address(addr)
assertBadAddressPart("3ffe::1.net", "Expected 4 octets in '1.net'")
assertBadAddressPart("3ffe::127.0.1",
"Expected 4 octets in '127.0.1'")
assertBadAddressPart("::1.2.3",
"Expected 4 octets in '1.2.3'")
assertBadAddressPart("::1.2.3.4.5",
"Expected 4 octets in '1.2.3.4.5'")
assertBadAddressPart("3ffe::1.1.1.net",
"Only decimal digits permitted in 'net' "
"in '1.1.1.net'")
def test_invalid_characters(self):
def assertBadPart(addr, part):
msg = "Only hex digits permitted in %r in %r" % (part, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv6Address(addr)
assertBadPart("3ffe::goog", "goog")
assertBadPart("3ffe::-0", "-0")
assertBadPart("3ffe::+0", "+0")
assertBadPart("3ffe::-1", "-1")
assertBadPart("1.2.3.4::", "1.2.3.4")
assertBadPart('1234:axy::b', "axy")
def test_part_length(self):
def assertBadPart(addr, part):
msg = "At most 4 characters permitted in %r in %r"
with self.assertAddressError(msg, part, addr):
ipaddress.IPv6Address(addr)
assertBadPart("::00000", "00000")
assertBadPart("3ffe::10000", "10000")
assertBadPart("02001:db8::", "02001")
assertBadPart('2001:888888::1', "888888")
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("bogus", "Expected 4 octets")
assertBadAddress("google.com", "Expected 4 octets")
assertBadAddress("10/8", "Expected 4 octets")
assertBadAddress("::1.2.3.4", "Only decimal digits")
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_valid_netmask(self):
self.assertEqual(str(self.factory('192.0.2.0/255.255.255.0')),
'192.0.2.0/24')
for i in range(0, 33):
# Generate and re-parse the CIDR format (trivial).
net_str = '0.0.0.0/%d' % i
net = self.factory(net_str)
self.assertEqual(str(net), net_str)
# Generate and re-parse the expanded netmask.
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.netmask)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('0.0.0.0/0%d' % i)), net_str)
# Generate and re-parse the expanded hostmask. The ambiguous
# cases (/0 and /32) are treated as netmasks.
if i in (32, 0):
net_str = '0.0.0.0/%d' % (32 - i)
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.hostmask)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("1.2.3.4", "")
assertBadNetmask("1.2.3.4", "-1")
assertBadNetmask("1.2.3.4", "+1")
assertBadNetmask("1.2.3.4", " 1 ")
assertBadNetmask("1.2.3.4", "0x1")
assertBadNetmask("1.2.3.4", "33")
assertBadNetmask("1.2.3.4", "254.254.255.256")
assertBadNetmask("1.2.3.4", "1.a.2.3")
assertBadNetmask("1.1.1.1", "254.xyz.2.3")
assertBadNetmask("1.1.1.1", "240.255.0.0")
assertBadNetmask("1.1.1.1", "255.254.128.0")
assertBadNetmask("1.1.1.1", "0.1.127.255")
assertBadNetmask("1.1.1.1", "pudding")
assertBadNetmask("1.1.1.1", "::")
class InterfaceTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Interface
class NetworkTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Network
class NetmaskTestMixin_v6(CommonTestMixin_v6):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "cafe:cafe::/128/190"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("google.com", "At least 3 parts")
assertBadAddress("1.2.3.4", "At least 3 parts")
assertBadAddress("10/8", "At least 3 parts")
assertBadAddress("1234:axy::b", "Only hex digits")
def test_valid_netmask(self):
# We only support CIDR for IPv6, because expanded netmasks are not
# standard notation.
self.assertEqual(str(self.factory('2001:db8::/32')), '2001:db8::/32')
for i in range(0, 129):
# Generate and re-parse the CIDR format (trivial).
net_str = '::/%d' % i
self.assertEqual(str(self.factory(net_str)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('::/0%d' % i)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("::1", "")
assertBadNetmask("::1", "::1")
assertBadNetmask("::1", "1::")
assertBadNetmask("::1", "-1")
assertBadNetmask("::1", "+1")
assertBadNetmask("::1", " 1 ")
assertBadNetmask("::1", "0x1")
assertBadNetmask("::1", "129")
assertBadNetmask("::1", "1.2.3.4")
assertBadNetmask("::1", "pudding")
assertBadNetmask("::", "::")
class InterfaceTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Interface
class NetworkTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Network
class FactoryFunctionErrors(BaseTestCase):
def assertFactoryError(self, factory, kind):
"""Ensure a clean ValueError with the expected message"""
addr = "camelot"
msg = '%r does not appear to be an IPv4 or IPv6 %s'
with self.assertCleanError(ValueError, msg, addr, kind):
factory(addr)
def test_ip_address(self):
self.assertFactoryError(ipaddress.ip_address, "address")
def test_ip_interface(self):
self.assertFactoryError(ipaddress.ip_interface, "interface")
def test_ip_network(self):
self.assertFactoryError(ipaddress.ip_network, "network")
class ComparisonTests(unittest.TestCase):
v4addr = ipaddress.IPv4Address(1)
v4net = ipaddress.IPv4Network(1)
v4intf = ipaddress.IPv4Interface(1)
v6addr = ipaddress.IPv6Address(1)
v6net = ipaddress.IPv6Network(1)
v6intf = ipaddress.IPv6Interface(1)
v4_addresses = [v4addr, v4intf]
v4_objects = v4_addresses + [v4net]
v6_addresses = [v6addr, v6intf]
v6_objects = v6_addresses + [v6net]
objects = v4_objects + v6_objects
def test_foreign_type_equality(self):
# __eq__ should never raise TypeError directly
other = object()
for obj in self.objects:
self.assertNotEqual(obj, other)
self.assertFalse(obj == other)
self.assertEqual(obj.__eq__(other), NotImplemented)
self.assertEqual(obj.__ne__(other), NotImplemented)
def test_mixed_type_equality(self):
# Ensure none of the internal objects accidentally
# expose the right set of attributes to become "equal"
for lhs in self.objects:
for rhs in self.objects:
if lhs is rhs:
continue
self.assertNotEqual(lhs, rhs)
def test_containment(self):
for obj in self.v4_addresses:
self.assertIn(obj, self.v4net)
for obj in self.v6_addresses:
self.assertIn(obj, self.v6net)
for obj in self.v4_objects + [self.v6net]:
self.assertNotIn(obj, self.v6net)
for obj in self.v6_objects + [self.v4net]:
self.assertNotIn(obj, self.v4net)
def test_mixed_type_ordering(self):
for lhs in self.objects:
for rhs in self.objects:
if isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs)):
continue
self.assertRaises(TypeError, lambda: lhs < rhs)
self.assertRaises(TypeError, lambda: lhs > rhs)
self.assertRaises(TypeError, lambda: lhs <= rhs)
self.assertRaises(TypeError, lambda: lhs >= rhs)
def test_mixed_type_key(self):
# with get_mixed_type_key, you can sort addresses and network.
v4_ordered = [self.v4addr, self.v4net, self.v4intf]
v6_ordered = [self.v6addr, self.v6net, self.v6intf]
self.assertEqual(v4_ordered,
sorted(self.v4_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v6_ordered,
sorted(self.v6_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v4_ordered + v6_ordered,
sorted(self.objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(NotImplemented, ipaddress.get_mixed_type_key(object))
def test_incompatible_versions(self):
# These should always raise TypeError
v4addr = ipaddress.ip_address('1.1.1.1')
v4net = ipaddress.ip_network('1.1.1.1')
v6addr = ipaddress.ip_address('::1')
v6net = ipaddress.ip_address('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4_address = ipaddress.IPv4Address('1.2.3.4')
self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24')
self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24')
#self.ipv4_hostmask = ipaddress.IPv4Interface('10.0.0.1/0.255.255.255')
self.ipv6_address = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1')
self.ipv6_interface = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1/64')
self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64')
def testRepr(self):
self.assertEqual("IPv4Interface('1.2.3.4/32')",
repr(ipaddress.IPv4Interface('1.2.3.4')))
self.assertEqual("IPv6Interface('::1/128')",
repr(ipaddress.IPv6Interface('::1')))
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
ipaddress.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256,
ipaddress.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddress.IPv6Address('::1') + (2**16 - 2),
ipaddress.IPv6Address('::ffff'))
self.assertEqual(ipaddress.IPv6Address('::ffff') - (2**16 - 2),
ipaddress.IPv6Address('::1'))
def testInvalidIntToBytes(self):
self.assertRaises(ValueError, ipaddress.v4_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v4_int_to_packed,
2 ** ipaddress.IPV4LENGTH)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed,
2 ** ipaddress.IPV6LENGTH)
def testInternals(self):
first, last = ipaddress._find_address_range([
ipaddress.IPv4Address('10.10.10.10'),
ipaddress.IPv4Address('10.10.10.12')])
self.assertEqual(first, last)
self.assertEqual(128, ipaddress._count_righthand_zero_bits(0, 128))
self.assertEqual("IPv4Network('1.2.3.0/24')", repr(self.ipv4_network))
def testMissingAddressVersion(self):
class Broken(ipaddress._BaseAddress):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingNetworkVersion(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingAddressClass(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*address"):
broken._address_class
def testGetNetwork(self):
self.assertEqual(int(self.ipv4_network.network_address), 16909056)
self.assertEqual(str(self.ipv4_network.network_address), '1.2.3.0')
self.assertEqual(int(self.ipv6_network.network_address),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6_network.network_address),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6_network.hostmask),
'::ffff:ffff:ffff:ffff')
def testIpFromInt(self):
self.assertEqual(self.ipv4_interface._ip,
ipaddress.IPv4Interface(16909060)._ip)
ipv4 = ipaddress.ip_network('1.2.3.4')
ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4.network_address)))
self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6.network_address)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6_interface._ip,
ipaddress.IPv6Interface(v6_int)._ip)
self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version,
4)
self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version,
6)
def testIpFromPacked(self):
address = ipaddress.ip_address
self.assertEqual(self.ipv4_interface._ip,
ipaddress.ip_interface(b'\x01\x02\x03\x04')._ip)
self.assertEqual(address('255.254.253.252'),
address(b'\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6_interface.ip,
ipaddress.ip_interface(
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01').ip)
self.assertEqual(address('ffff:2:3:4:ffff::'),
address(b'\xff\xff\x00\x02\x00\x03\x00\x04' +
b'\xff\xff' + b'\x00' * 6))
self.assertEqual(address('::'),
address(b'\x00' * 16))
def testGetIp(self):
self.assertEqual(int(self.ipv4_interface.ip), 16909060)
self.assertEqual(str(self.ipv4_interface.ip), '1.2.3.4')
self.assertEqual(int(self.ipv6_interface.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6_interface.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4_network.netmask), 4294967040)
self.assertEqual(str(self.ipv4_network.netmask), '255.255.255.0')
self.assertEqual(int(self.ipv6_network.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6_network.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0)
self.assertEqual(ipv4_zero_netmask._prefix_from_prefix_string('0'), 0)
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0'))
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0.0.0.0'))
self.assertFalse(ipv4_zero_netmask._is_valid_netmask('invalid'))
ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0')
self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0)
self.assertEqual(ipv6_zero_netmask._prefix_from_prefix_string('0'), 0)
def testIPv4NetAndHostmasks(self):
net = self.ipv4_network
self.assertFalse(net._is_valid_netmask('invalid'))
self.assertTrue(net._is_valid_netmask('128.128.128.128'))
self.assertFalse(net._is_valid_netmask('128.128.128.127'))
self.assertFalse(net._is_valid_netmask('128.128.128.255'))
self.assertTrue(net._is_valid_netmask('255.128.128.128'))
self.assertFalse(net._is_hostmask('invalid'))
self.assertTrue(net._is_hostmask('128.255.255.255'))
self.assertFalse(net._is_hostmask('255.255.255.255'))
self.assertFalse(net._is_hostmask('1.2.3.4'))
net = ipaddress.IPv4Network('127.0.0.0/0.0.0.255')
self.assertEqual(net.prefixlen, 24)
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311)
self.assertEqual(str(self.ipv4_network.broadcast_address), '1.2.3.255')
self.assertEqual(int(self.ipv6_network.broadcast_address),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6_network.broadcast_address),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4_interface.network.prefixlen, 24)
self.assertEqual(self.ipv6_interface.network.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4_network.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4_network.supernet().network_address),
'1.2.2.0')
self.assertEqual(
ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(),
ipaddress.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6_network.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6_network.supernet().network_address),
'2001:658:22a:cafe::')
self.assertEqual(ipaddress.IPv6Interface('::0/0').network.supernet(),
ipaddress.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4_network.supernet(3).network_address),
'1.2.0.0')
self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6_network.supernet(3).network_address),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv4_network.supernet,
new_prefix=25)
self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2),
self.ipv4_network.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv6_network.supernet,
new_prefix=65)
self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2),
self.ipv6_network.supernet(new_prefix=62))
def testHosts(self):
hosts = list(self.ipv4_network.hosts())
self.assertEqual(254, len(hosts))
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
# special case where only 1 bit is left for address
self.assertEqual([ipaddress.IPv4Address('2.0.0.0'),
ipaddress.IPv4Address('2.0.0.1')],
list(ipaddress.ip_network('2.0.0.0/31').hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
sorted(self.ipv4_network.subnets(new_prefix=27)))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(new_prefix=23))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(prefixlen_diff=3,
new_prefix=27))
self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)),
sorted(self.ipv6_network.subnets(new_prefix=68)))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(new_prefix=63))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(prefixlen_diff=4,
new_prefix=68))
def testGetSubnets(self):
self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25)
self.assertEqual(str(list(
self.ipv4_network.subnets())[0].network_address),
'1.2.3.0')
self.assertEqual(str(list(
self.ipv4_network.subnets())[1].network_address),
'1.2.3.128')
self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddress.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddress.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4_network.subnets(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(65))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(65))
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError,
self.ipv4_interface.network.supernet, 25)
self.assertRaises(ValueError,
self.ipv6_interface.network.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(-1))
def testGetNum_Addresses(self):
self.assertEqual(self.ipv4_network.num_addresses, 256)
self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses,
128)
self.assertEqual(self.ipv4_network.supernet().num_addresses, 512)
self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616)
self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses,
9223372036854775808)
self.assertEqual(self.ipv6_network.supernet().num_addresses,
36893488147419103232)
def testContains(self):
self.assertIn(ipaddress.IPv4Interface('1.2.3.128/25'),
self.ipv4_network)
self.assertNotIn(ipaddress.IPv4Interface('1.2.4.1/24'),
self.ipv4_network)
# We can test addresses and string as well.
addr1 = ipaddress.IPv4Address('1.2.3.37')
self.assertIn(addr1, self.ipv4_network)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__(
ipaddress.IPv4Network('1.0.0.0/15')))
def testNth(self):
self.assertEqual(str(self.ipv4_network[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256)
self.assertEqual(str(self.ipv6_network[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertFalse(self.ipv4_interface == '')
self.assertFalse(self.ipv4_interface == [])
self.assertFalse(self.ipv4_interface == 2)
self.assertTrue(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv6_interface == '')
self.assertFalse(self.ipv6_interface == [])
self.assertFalse(self.ipv6_interface == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertTrue(self.ipv4_interface != '')
self.assertTrue(self.ipv4_interface != [])
self.assertTrue(self.ipv4_interface != 2)
self.assertTrue(self.ipv4_address !=
ipaddress.IPv4Address('1.2.3.5'))
self.assertTrue(self.ipv4_address != '')
self.assertTrue(self.ipv4_address != [])
self.assertTrue(self.ipv4_address != 2)
self.assertFalse(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv6_interface != '')
self.assertTrue(self.ipv6_interface != [])
self.assertTrue(self.ipv6_interface != 2)
self.assertTrue(self.ipv6_address !=
ipaddress.IPv4Address('1.2.3.4'))
self.assertTrue(self.ipv6_address != '')
self.assertTrue(self.ipv6_address != [])
self.assertTrue(self.ipv6_address != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface(
'1.2.3.4/255.255.255.255')), '1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddress.IPv6Interface('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
ip5 = ipaddress.IPv4Address('1.1.1.4')
ip6 = ipaddress.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30'),
ipaddress.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
#ip5 = ipaddress.IPv4Interface('1.1.1.4/30')
#ip6 = ipaddress.IPv4Interface('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30')])
# test only IP networks
ip1 = ipaddress.IPv4Network('1.1.0.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/24')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
ip4 = ipaddress.IPv4Network('1.1.3.0/24')
ip5 = ipaddress.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call
# [].sort
ip6 = ipaddress.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4, ip5,
ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/22'),
ipaddress.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddress.collapse_addresses([ip1, ip2])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ipaddress.ip_network('1.1.1.1/32')])
ip1 = ipaddress.IPv6Network('2001::/100')
ip2 = ipaddress.IPv6Network('2001::/120')
ip3 = ipaddress.IPv6Network('2001::/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3])
self.assertEqual(list(collapsed), [ip3])
# the toejam test
addr_tuples = [
(ipaddress.ip_address('1.1.1.1'),
ipaddress.ip_address('::1')),
(ipaddress.IPv4Network('1.1.0.0/24'),
ipaddress.IPv6Network('2001::/120')),
(ipaddress.IPv4Network('1.1.0.0/32'),
ipaddress.IPv6Network('2001::/128')),
]
for ip1, ip2 in addr_tuples:
self.assertRaises(TypeError, ipaddress.collapse_addresses,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddress.ip_address
#ipnet = ipaddress.ip_network
summarize = ipaddress.summarize_address_range
ip1 = ipaddress.ip_address('1.1.1.0')
ip2 = ipaddress.ip_address('1.1.1.255')
# summarize works only for IPv4 & IPv6
class IPv7Address(ipaddress.IPv6Address):
@property
def version(self):
return 7
ip_invalid1 = IPv7Address('::1')
ip_invalid2 = IPv7Address('::1')
self.assertRaises(ValueError, list,
summarize(ip_invalid1, ip_invalid2))
# test that a summary over ip4 & ip6 fails
self.assertRaises(TypeError, list,
summarize(ip1, ipaddress.IPv6Address('::1')))
# test a /24 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('1.1.1.8')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1.1.1.0/29'),
ipaddress.ip_network('1.1.1.8')])
# all!
ip1 = ipaddress.IPv4Address(0)
ip2 = ipaddress.IPv4Address(ipaddress.IPv4Address._ALL_ONES)
self.assertEqual([ipaddress.IPv4Network('0.0.0.0/0')],
list(summarize(ip1, ip2)))
ip1 = ipaddress.ip_address('1::')
ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('2::')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1::/16'),
ipaddress.ip_network('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, list,
summarize(ipaddress.ip_address('1.1.1.0'),
ipaddress.ip_address('1.1.0.0')))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_address('::'),
ipaddress.ip_network('1.1.0.0')))
def testAddressComparison(self):
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.1'))
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.2'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::1'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::2'))
def testInterfaceComparison(self):
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.1'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.2'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::1'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddress.IPv4Network('1.1.1.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/32')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip1), 0)
# if addresses are the same, sort by netmask
self.assertEqual(ip1.compare_networks(ip2), -1)
self.assertEqual(ip2.compare_networks(ip1), 1)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertEqual(ip3.compare_networks(ip1), 1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddress.IPv6Network('2001:2000::/96')
ip2 = ipaddress.IPv6Network('2001:2001::/96')
ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
self.assertRaises(TypeError,
self.ipv4_network.compare_networks,
self.ipv6_network)
ipv6 = ipaddress.IPv6Interface('::/0')
ipv4 = ipaddress.IPv4Interface('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddress.ip_network('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddress.ip_network('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddress.ip_network('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddress.ip_network('10.10.10.0/31')
ip2 = ipaddress.ip_network('10.10.10.0')
ip3 = ipaddress.ip_network('10.10.10.2/31')
ip4 = ipaddress.ip_network('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertRaises(TypeError, ip1.__lt__,
ipaddress.ip_address('10.10.10.0'))
self.assertRaises(TypeError, ip2.__lt__,
ipaddress.ip_address('10.10.10.0'))
# <=, >=
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.2'))
self.assertFalse(ipaddress.ip_network('1.1.1.2') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::2'))
self.assertFalse(ipaddress.ip_network('::2') <=
ipaddress.ip_network('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24')
self.assertRaises(ValueError, ipaddress.ip_network, '::1/120')
def testOverlaps(self):
other = ipaddress.IPv4Network('1.2.3.0/30')
other2 = ipaddress.IPv4Network('1.2.2.0/24')
other3 = ipaddress.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4_network.overlaps(other))
self.assertFalse(self.ipv4_network.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddress.IPv4Interface(ipv4_string)
v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'),
ipaddress.ip_address('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4_address.version, 4)
self.assertEqual(self.ipv6_address.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4_interface.max_prefixlen, 32)
self.assertEqual(self.ipv6_interface.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4_address.packed,
b'\x01\x02\x03\x04')
self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed,
b'\xff\xfe\xfd\xfc')
self.assertEqual(self.ipv6_address.packed,
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01')
self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed,
b'\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ b'\x00' * 6)
self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed,
b'\x00' * 6 + b'\x00\x01' + b'\x00' * 8)
def testIpType(self):
ipv4net = ipaddress.ip_network('1.2.3.4')
ipv4addr = ipaddress.ip_address('1.2.3.4')
ipv6net = ipaddress.ip_network('::1.2.3.4')
ipv6addr = ipaddress.ip_address('::1.2.3.4')
self.assertEqual(ipaddress.IPv4Network, type(ipv4net))
self.assertEqual(ipaddress.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddress.IPv6Network, type(ipv6net))
self.assertEqual(ipaddress.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddress.ip_interface(
'224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_network('240.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_interface(
'192.168.1.1/17').is_private)
self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_network(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_network(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_network('169.254.1.0/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_interface(
'169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddress.ip_interface(
'169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_network(
'127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddress.ip_network(
'127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback)
self.assertEqual(False,
ipaddress.ip_network('100.64.0.0/10').is_private)
self.assertEqual(False, ipaddress.ip_network('100.64.0.0/10').is_global)
self.assertEqual(True,
ipaddress.ip_network('192.0.2.128/25').is_private)
self.assertEqual(True,
ipaddress.ip_network('192.0.3.0/24').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('0.0.0.0').is_unspecified)
self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_address('240.0.0.1').is_reserved)
self.assertEqual(False,
ipaddress.ip_address('239.255.255.255').is_reserved)
self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private)
self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddress.ip_address('169.255.100.200').is_link_local)
self.assertEqual(True,
ipaddress.ip_address('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_network(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_network('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_network(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_network(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified)
self.assertEqual(True,
ipaddress.ip_network('2001::1/128').is_private)
self.assertEqual(True,
ipaddress.ip_network('200::1/128').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_address(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_address('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_address(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddress.ip_address('::1').is_loopback)
self.assertEqual(False, ipaddress.ip_address('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(
ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network('10.1.1.0/26')
addr3 = ipaddress.ip_network('10.2.1.0/24')
addr4 = ipaddress.ip_address('10.1.1.0')
addr5 = ipaddress.ip_network('2001:db8::0/32')
self.assertEqual(sorted(list(addr1.address_exclude(addr2))),
[ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
self.assertRaises(ValueError, list, addr1.address_exclude(addr3))
self.assertRaises(TypeError, list, addr1.address_exclude(addr4))
self.assertRaises(TypeError, list, addr1.address_exclude(addr5))
self.assertEqual(list(addr1.address_exclude(addr1)), [])
def testHash(self):
self.assertEqual(hash(ipaddress.ip_interface('10.1.1.0/24')),
hash(ipaddress.ip_interface('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')),
hash(ipaddress.ip_network('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')),
hash(ipaddress.ip_address('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddress.ip_address('1.2.3.4')),
hash(ipaddress.ip_address(
int(ipaddress.ip_address('1.2.3.4')._ip))))
ip1 = ipaddress.ip_address('10.1.1.0')
ip2 = ipaddress.ip_address('1::')
dummy = {}
dummy[self.ipv4_address] = None
dummy[self.ipv6_address] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertIn(self.ipv4_address, dummy)
self.assertIn(ip2, dummy)
def testIPBases(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.compressed)
net = self.ipv6_network
self.assertRaises(ValueError, net._string_from_ip_int, 2**128 + 1)
def testIPv6NetworkHelpers(self):
net = self.ipv6_network
self.assertEqual('2001:658:22a:cafe::/64', net.with_prefixlen)
self.assertEqual('2001:658:22a:cafe::/ffff:ffff:ffff:ffff::',
net.with_netmask)
self.assertEqual('2001:658:22a:cafe::/::ffff:ffff:ffff:ffff',
net.with_hostmask)
self.assertEqual('2001:658:22a:cafe::/64', str(net))
def testIPv4NetworkHelpers(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.with_prefixlen)
self.assertEqual('1.2.3.0/255.255.255.0', net.with_netmask)
self.assertEqual('1.2.3.0/0.0.0.255', net.with_hostmask)
self.assertEqual('1.2.3.0/24', str(net))
def testCopyConstructor(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network(addr1)
addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64')
addr4 = ipaddress.ip_interface(addr3)
addr5 = ipaddress.IPv4Address('1.1.1.1')
addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddress.IPv4Address(addr5))
self.assertEqual(addr6, ipaddress.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddress.IPv6Interface('2001::1')
addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
addr3 = ipaddress.IPv6Network('2001::/96')
addr4 = ipaddress.IPv4Address('192.168.178.1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddress.IPv6Interface('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96',
addr3.exploded)
self.assertEqual('192.168.178.1', addr4.exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4_address))
self.assertEqual(42540616829182469433547762482097946625,
int(self.ipv6_address))
def testForceVersion(self):
self.assertEqual(ipaddress.ip_network(1).version, 4)
self.assertEqual(ipaddress.IPv6Network(1).version, 6)
def testWithStar(self):
self.assertEqual(self.ipv4_interface.with_prefixlen, "1.2.3.4/24")
self.assertEqual(self.ipv4_interface.with_netmask,
"1.2.3.4/255.255.255.0")
self.assertEqual(self.ipv4_interface.with_hostmask,
"1.2.3.4/0.0.0.255")
self.assertEqual(self.ipv6_interface.with_prefixlen,
'2001:658:22a:cafe:200::1/64')
self.assertEqual(self.ipv6_interface.with_netmask,
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(self.ipv6_interface.with_hostmask,
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertNotIn('network_address', self.ipv4_network._cache)
self.assertNotIn('broadcast_address', self.ipv4_network._cache)
self.assertNotIn('hostmask', self.ipv4_network._cache)
# V4 - populate and test
self.assertEqual(self.ipv4_network.network_address,
ipaddress.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4_network.broadcast_address,
ipaddress.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4_network.hostmask,
ipaddress.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertIn('broadcast_address', self.ipv4_network._cache)
self.assertIn('hostmask', self.ipv4_network._cache)
# V6 - make sure we're empty
self.assertNotIn('broadcast_address', self.ipv6_network._cache)
self.assertNotIn('hostmask', self.ipv6_network._cache)
# V6 - populate and test
self.assertEqual(self.ipv6_network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6_interface.network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(
self.ipv6_network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
self.assertEqual(
self.ipv6_interface.network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_interface.network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertIn('broadcast_address', self.ipv6_network._cache)
self.assertIn('hostmask', self.ipv6_network._cache)
self.assertIn('broadcast_address', self.ipv6_interface.network._cache)
self.assertIn('hostmask', self.ipv6_interface.network._cache)
def testTeredo(self):
# stolen from wikipedia
server = ipaddress.IPv4Address('65.54.227.120')
client = ipaddress.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddress.ip_address(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
# i77
teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddress.IPv4Address('94.245.121.253'),
ipaddress.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1')
bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1')
self.assertEqual(ipaddress.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
if __name__ == '__main__':
unittest.main()
|
PennartLoettring/Poettrix
|
rootfs/usr/lib/python3.4/test/test_ipaddress.py
|
Python
|
gpl-2.0
| 74,848
|
[
"FEFF"
] |
d778f3e7b798c7a8349e9bb8d2bc4466158d3d2943eae852b86ce3f441c1a1c1
|
"""Code for converting notebooks to and from the v2 format.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .nbbase import (
new_code_cell, new_text_cell, new_worksheet, new_notebook, new_output,
nbformat, nbformat_minor
)
from IPython.nbformat import v2
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def convert_to_this_nbformat(nb, orig_version=2, orig_minor=0):
"""Convert a notebook to the v3 format.
Parameters
----------
nb : NotebookNode
The Python representation of the notebook to convert.
orig_version : int
The original version of the notebook to convert.
orig_minor : int
The original minor version of the notebook to convert (only relevant for v >= 3).
"""
if orig_version == 1:
nb = v2.convert_to_this_nbformat(nb)
orig_version = 2
if orig_version == 2:
# Mark the original nbformat so consumers know it has been converted.
nb.nbformat = nbformat
nb.nbformat_minor = nbformat_minor
nb.orig_nbformat = 2
return nb
elif orig_version == 3:
if orig_minor != nbformat_minor:
nb.orig_nbformat_minor = orig_minor
nb.nbformat_minor = nbformat_minor
return nb
else:
raise ValueError('Cannot convert a notebook from v%s to v3' % orig_version)
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/IPython/nbformat/v3/convert.py
|
Python
|
apache-2.0
| 1,966
|
[
"Brian"
] |
c92c616d309b814bb46ff4b8d9fc7fd5168d1c0ee37b4013511f4de52c61335d
|
#-*- coding: iso-8859-15 -*-
# SADR METEOLLSKY
# http://www.sadr.fr
# SEBASTIEN LECLERC 2017
# Inspired by :
# NACHO MAS 2013
# http://induino.wordpress.com
# Config file
##### INDI RELATED #####
#To start indiserver use 'localhost'
#otherwise not start and connect remote
#indiserver
#INDISERVER="localhost"
INDISERVER="allsky.sadr"
INDIPORT="7624"
INDIDEVICE="QHY CCD QHY5LII-C-6127d"
##### ARDUINO RELATED ####
DEVICEPORT="/dev/ttyACM0"
##### SITE RELATED ####
OWNERNAME="SADR"
SITENAME="HACIENDA DES ETOILES"
ALTITUDE=1540
#Visit http://weather.uwyo.edu/upperair/sounding.html
#See the sounding location close your site
SOUNDINGSTATION="07510"
##### RRD RELATED #####
#PATH TO GRAPHs
#CHARTPATH="/var/www/html/CHART/"
CHARTPATH="/media/freebox/Projets/Astronomie/SADR/Allsky/2_Travail/SADR/raspberry/allskySCRIPT/"
#EUMETSAT lastimagen. Choose one from:
#http://oiswww.eumetsat.org/IPPS/html/latestImages.html
#This is nice but only work at daylight time:
#EUMETSAT_LAST="http://oiswww.eumetsat.org/IPPS/html/latestImages/EUMETSAT_MSG_RGB-naturalcolor-westernEurope.jpg"
#This show rain
#EUMETSAT_LAST="http://oiswww.eumetsat.org/IPPS/html/latestImages/EUMETSAT_MSG_MPE-westernEurope.jpg"
#and this cloud cover at IR 39. Work at night
EUMETSAT_LAST="http://oiswww.eumetsat.org/IPPS/html/latestImages/EUMETSAT_MSG_IR039E-westernEurope.jpg"
##### ALLSKY PICTURE RELATED #####
#SADR WATERMARK FILE
WATERMARK="image/watermark.png"
##### PUSHETTA RELATED #####
API_KEY="57a4fc6d834526367da533545287aea54468b311"
CHANNEL_NAME="SADR Meteollsky"
|
broadcastyourseb/SADR
|
raspberry/allskySCRIPT/dev/meteollskyconfig.py
|
Python
|
apache-2.0
| 1,549
|
[
"VisIt"
] |
6db2a662bec179d318d8b914d3f50490cce89aa15d895c3236993a385808ed4e
|
# Copyright (c) 2001 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
__revision__ = "$Id: __init__.py,v 1.2 2002/12/02 19:58:54 myers_carpenter Exp $"
|
zooko/egtp_new
|
egtp/crypto/__init__.py
|
Python
|
lgpl-2.1
| 280
|
[
"VisIt"
] |
8363443d689a0d249f8cc0e69fa4fac53a1b3c6512583356698b7500536b3613
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbapplicationpersistenceprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of GslbApplicationPersistenceProfile Avi RESTful Object
description:
- This module is used to configure GslbApplicationPersistenceProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- Field introduced in 17.1.1.
name:
description:
- A user-friendly name for the persistence profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the persistence profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbApplicationPersistenceProfile object
avi_gslbapplicationpersistenceprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbapplicationpersistenceprofile
"""
RETURN = '''
obj:
description: GslbApplicationPersistenceProfile (api/gslbapplicationpersistenceprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbapplicationpersistenceprofile',
set([]))
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_gslbapplicationpersistenceprofile.py
|
Python
|
bsd-3-clause
| 3,659
|
[
"VisIt"
] |
7489335b8957769e7e2b5c0b159a09a5686d3024de695fe45cd4773e76f81607
|
import tornado.ioloop, tornado.web
from tornado.httpserver import HTTPServer
from drenaj.client.config.config import *
from drenaj.client.frontend.routes_config import routes_config
from jinja2 import Environment, FileSystemLoader
import drenaj.utils.drnj_time as drnj_time
application = tornado.web.Application(routes_config,
cookie_secret = 'vospRVBgTF6HTnghpd/za+UgiZ/NXUDUkTnYGx1d4hY=')
print('PATHPATHPATH')
print(os.path.join(os.path.dirname(__file__),'client/templates'))
application.settings['env'] = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__),'client/templates')))
application.settings['env'].globals['drnj_time'] = drnj_time
def bind_server(environment):
http_server = HTTPServer(application, xheaders=True)
http_server.listen(DRENAJ_VIS_PORT[environment])
import signal, os, sys
def start(environment, n_child_processes=4):
# run the worker
os.system("celery multi start worker -l debug -f worker.log -c %s -A drenaj.client.celery_app.client_endpoint" % n_child_processes)
signal.signal(signal.SIGINT, stop_all_workers)
# run the web service
print "Direnaj Local Visualization and Interaction Manager Starting on port %s" % DRENAJ_VIS_PORT[environment]
bind_server(environment)
tornado.ioloop.IOLoop.instance().start()
return application
def stop_all_workers(signal_no, frame):
os.system("celery multi stop worker")
print("How dare you? Bye bye!")
sys.exit(0)
def get_access_token():
import sys
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl
except:
from cgi import parse_qsl
import oauth2 as oauth
import certifi
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
keystore = KeyStore()
access_tokens = []
consumer_key = keystore.app_consumer_key
consumer_secret = keystore.app_consumer_secret
if consumer_key is None or consumer_secret is None:
print 'You need to edit this script and provide values for the'
print 'consumer_key and also consumer_secret.'
print ''
print 'The values you need come from Twitter - you need to register'
print 'as a developer your "application". This is needed only until'
print 'Twitter finishes the idea they have of a way to allow open-source'
print 'based libraries to have a token that can be used to generate a'
print 'one-time use key that will allow the library to make the request'
print 'on your behalf.'
print ''
sys.exit(1)
while True:
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
oauth_client.ca_certs = certifi.where()
print 'Requesting temp token from Twitter'
resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
print 'Invalid respond from Twitter requesting temp token: %s' % resp['status']
else:
request_token = dict(parse_qsl(content))
print ''
print 'Please visit this Twitter page and retrieve the pincode to be used'
print 'in the next step to obtaining an Authentication Token:'
print ''
print '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token'])
print ''
pincode = raw_input('Pincode? ')
if not pincode:
print('You did not enter any pincode, finishing setup.')
break
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(pincode)
print ''
print 'Generating and signing request for an access token'
print ''
oauth_client = oauth.Client(oauth_consumer, token)
oauth_client.ca_certs = certifi.where()
resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_callback=oob&oauth_verifier=%s' % pincode)
access_token = dict(parse_qsl(content))
if resp['status'] != '200':
print 'The request for a Token did not succeed: %s' % resp['status']
print access_token
else:
print 'Your Twitter Access Token key: %s' % access_token['oauth_token']
print ' Access Token secret: %s' % access_token['oauth_token_secret']
print ''
access_tokens.append([access_token['oauth_token'], access_token['oauth_token_secret']])
for access_token in access_tokens:
keystore.insert_access_token(access_token[0], access_token[1])
def main():
import argparse
parser = argparse.ArgumentParser(description='drenaj client')
parser.add_argument('command', help='used for starting or setup')
parser.add_argument('-c', '--n_children', default=1, help='number of child processes')
args = parser.parse_args()
keystore = KeyStore()
if args.command == 'runserver':
keystore.release_access_tokens()
if keystore.no_access_tokens():
get_access_token()
if not keystore.no_access_tokens():
start(DRENAJ_VIS_ENVIRONMENT, args.n_children)
else:
print("Please complete the setup process correctly to configure your access token key and secret.")
sys.exit(1)
else:
start(DRENAJ_VIS_ENVIRONMENT, args.n_children)
elif args.command == 'setup':
get_access_token()
if not keystore.no_access_tokens():
start(DRENAJ_VIS_ENVIRONMENT, args.n_children)
else:
print("Please complete the setup process correctly to configure your access token key and secret.")
sys.exit(1)
elif args.command == 'release_access_tokens':
keystore.release_access_tokens()
print("Access tokens released.")
if __name__ == "__main__":
main()
|
boun-cmpe-soslab/drenaj
|
drenaj/client_startup.py
|
Python
|
mit
| 6,386
|
[
"VisIt"
] |
2f9587cd96a5d015e010a215ccd4c5005c153eb6e1adaf5939d67116e2e24ff3
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
import unittest
from stoqlib.domain.test.domaintest import DomainTest
from stoqlib.lib.formatters import (format_phone_number,
format_sellable_description)
class TestFormatters(DomainTest):
def test_format_sellable_description(self):
sellable = self.create_sellable()
sellable.description = u"Cellphone"
self.assertEqual(format_sellable_description(sellable),
u"Cellphone")
storable = self.create_storable(product=sellable.product)
batch = self.create_storable_batch(storable=storable,
batch_number=u'666')
self.assertEqual(format_sellable_description(sellable, batch=batch),
u"Cellphone [Batch: 666]")
def test_format_phone_number(self):
self.assertEquals(format_phone_number("190"), "190")
self.assertEquals(format_phone_number("1052"), "1052")
self.assertEquals(format_phone_number("10325"), "103 25")
self.assertEquals(format_phone_number("991236789"), "99123-6789")
self.assertEquals(format_phone_number("0300123456"), "0300 123-456")
self.assertEquals(format_phone_number("03001234567"), "0300 123-4567")
self.assertEquals(format_phone_number("0500700600"), "0500 700-600")
self.assertEquals(format_phone_number("05007006005"), "0500 700-6005")
self.assertEquals(format_phone_number("0800197878"), "0800 197-878")
self.assertEquals(format_phone_number("08001234567"), "0800 123-4567")
self.assertEquals(format_phone_number("0900197878"), "0900 197-878")
self.assertEquals(format_phone_number("09001234567"), "0900 123-4567")
self.assertEquals(format_phone_number("1312345678"), "(13) 1234-5678")
self.assertEquals(format_phone_number("1512345678"), "(15) 1234-5678")
self.assertEquals(format_phone_number("1812345678"), "(18) 1234-5678")
self.assertEquals(format_phone_number("1912345678"), "(19) 1234-5678")
self.assertEquals(format_phone_number("12345678"), "1234-5678")
self.assertEquals(format_phone_number("1612345678"), "(16) 1234-5678")
self.assertEquals(format_phone_number("01612345678"), "(16) 1234-5678")
self.assertEquals(format_phone_number("(16)12345678"), "(16) 1234-5678")
self.assertEquals(format_phone_number("(016)12345678"), "(16) 1234-5678")
self.assertEquals(format_phone_number("11123456789"), "(11) 12345-6789")
self.assertEquals(format_phone_number("011123456789"), "(11) 12345-6789")
if __name__ == '__main__':
unittest.main()
|
andrebellafronte/stoq
|
stoqlib/lib/test/test_formatters.py
|
Python
|
gpl-2.0
| 3,551
|
[
"VisIt"
] |
aae6e8708e562a9f938fbb196ffaefd866bf6f5f2dc7154ce6a864a5a0729abc
|
# -*- coding: utf-8 -*-
"""Test sequences for graphiness.
"""
# Copyright (C) 2004-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from collections import defaultdict
import heapq
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult (dschult@colgate.edu)'
'Joel Miller (joel.c.miller.research@gmail.com)'
'Ben Edwards'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['is_graphical',
'is_multigraphical',
'is_pseudographical',
'is_digraphical',
'is_valid_degree_sequence_erdos_gallai',
'is_valid_degree_sequence_havel_hakimi',
'is_valid_degree_sequence', # deprecated
]
def is_graphical(sequence, method='eg'):
"""Returns True if sequence is a valid degree sequence.
A degree sequence is valid if some graph can realize it.
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
method : "eg" | "hh"
The method used to validate the degree sequence.
"eg" corresponds to the Erdős-Gallai algorithm, and
"hh" to the Havel-Hakimi algorithm.
Returns
-------
valid : bool
True if the sequence is a valid degree sequence and False if not.
Examples
--------
>>> G = nx.path_graph(4)
>>> sequence = G.degree().values()
>>> nx.is_valid_degree_sequence(sequence)
True
References
----------
Erdős-Gallai
[EG1960]_, [choudum1986]_
Havel-Hakimi
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
if method == 'eg':
valid = is_valid_degree_sequence_erdos_gallai(list(sequence))
elif method == 'hh':
valid = is_valid_degree_sequence_havel_hakimi(list(sequence))
else:
msg = "`method` must be 'eg' or 'hh'"
raise nx.NetworkXException(msg)
return valid
is_valid_degree_sequence = is_graphical
def _basic_graphical_tests(deg_sequence):
# Sort and perform some simple tests on the sequence
if not nx.utils.is_list_of_ints(deg_sequence):
raise nx.NetworkXUnfeasible
p = len(deg_sequence)
num_degs = [0]*p
dmax, dmin, dsum, n = 0, p, 0, 0
for d in deg_sequence:
# Reject if degree is negative or larger than the sequence length
if d<0 or d>=p:
raise nx.NetworkXUnfeasible
# Process only the non-zero integers
elif d>0:
dmax, dmin, dsum, n = max(dmax,d), min(dmin,d), dsum+d, n+1
num_degs[d] += 1
# Reject sequence if it has odd sum or is oversaturated
if dsum%2 or dsum>n*(n-1):
raise nx.NetworkXUnfeasible
return dmax,dmin,dsum,n,num_degs
def is_valid_degree_sequence_havel_hakimi(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation proceeds using the Havel-Hakimi theorem.
Worst-case run time is: O(s) where s is the sum of the sequence.
Parameters
----------
deg_sequence : list
A list of integers where each element specifies the degree of a node
in a graph.
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [1]_.
References
----------
.. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[havel1955]_, [hakimi1962]_, [CL1996]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
modstubs = [0]*(dmax+1)
# Successively reduce degree sequence by removing the maximum degree
while n > 0:
# Retrieve the maximum degree in the sequence
while num_degs[dmax] == 0:
dmax -= 1;
# If there are not enough stubs to connect to, then the sequence is
# not graphical
if dmax > n-1:
return False
# Remove largest stub in list
num_degs[dmax], n = num_degs[dmax]-1, n-1
# Reduce the next dmax largest stubs
mslen = 0
k = dmax
for i in range(dmax):
while num_degs[k] == 0:
k -= 1
num_degs[k], n = num_degs[k]-1, n-1
if k > 1:
modstubs[mslen] = k-1
mslen += 1
# Add back to the list any non-zero stubs that were removed
for i in range(mslen):
stub = modstubs[i]
num_degs[stub], n = num_degs[stub]+1, n+1
return True
def is_valid_degree_sequence_erdos_gallai(deg_sequence):
r"""Returns True if deg_sequence can be realized by a simple graph.
The validation is done using the Erdős-Gallai theorem [EG1960]_.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is graphical and False if not.
Notes
-----
This implementation uses an equivalent form of the Erdős-Gallai criterion.
Worst-case run time is: O(n) where n is the length of the sequence.
Specifically, a sequence d is graphical if and only if the
sum of the sequence is even and for all strong indices k in the sequence,
.. math::
\sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k)
= k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j )
A strong index k is any index where `d_k \geq k` and the value `n_j` is the
number of occurrences of j in d. The maximal strong index is called the
Durfee index.
This particular rearrangement comes from the proof of Theorem 3 in [2]_.
The ZZ condition says that for the sequence d if
.. math::
|d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)}
then d is graphical. This was shown in Theorem 6 in [2]_.
References
----------
.. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai",
Discrete Mathematics, 265, pp. 417-420 (2003).
.. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory
of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992).
[EG1960]_, [choudum1986]_
"""
try:
dmax,dmin,dsum,n,num_degs = _basic_graphical_tests(deg_sequence)
except nx.NetworkXUnfeasible:
return False
# Accept if sequence has no non-zero degrees or passes the ZZ condition
if n==0 or 4*dmin*n >= (dmax+dmin+1) * (dmax+dmin+1):
return True
# Perform the EG checks using the reformulation of Zverovich and Zverovich
k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0
for dk in range(dmax, dmin-1, -1):
if dk < k+1: # Check if already past Durfee index
return True
if num_degs[dk] > 0:
run_size = num_degs[dk] # Process a run of identical-valued degrees
if dk < k+run_size: # Check if end of run is past Durfee index
run_size = dk-k # Adjust back to Durfee index
sum_deg += run_size * dk
for v in range(run_size):
sum_nj += num_degs[k+v]
sum_jnj += (k+v) * num_degs[k+v]
k += run_size
if sum_deg > k*(n-1) - k*sum_nj + sum_jnj:
return False
return True
def is_multigraphical(sequence):
"""Returns True if some multigraph can realize the sequence.
Parameters
----------
deg_sequence : list
A list of integers
Returns
-------
valid : bool
True if deg_sequence is a multigraphic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] S. L. Hakimi. "On the realizability of a set of integers as
degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506
(1962).
"""
deg_sequence = list(sequence)
if not nx.utils.is_list_of_ints(deg_sequence):
return False
dsum, dmax = 0, 0
for d in deg_sequence:
if d<0:
return False
dsum, dmax = dsum+d, max(dmax,d)
if dsum%2 or dsum<2*dmax:
return False
return True
def is_pseudographical(sequence):
"""Returns True if some pseudograph can realize the sequence.
Every nonnegative integer sequence with an even sum is pseudographical
(see [1]_).
Parameters
----------
sequence : list or iterable container
A sequence of integer node degrees
Returns
-------
valid : bool
True if the sequence is a pseudographic degree sequence and False if not.
Notes
-----
The worst-case run time is O(n) where n is the length of the sequence.
References
----------
.. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs
and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12),
pp. 778-782 (1976).
"""
s = list(sequence)
if not nx.utils.is_list_of_ints(s):
return False
return sum(s)%2 == 0 and min(s) >= 0
def is_digraphical(in_sequence, out_sequence):
r"""Returns True if some directed graph can realize the in- and out-degree
sequences.
Parameters
----------
in_sequence : list or iterable container
A sequence of integer node in-degrees
out_sequence : list or iterable container
A sequence of integer node out-degrees
Returns
-------
valid : bool
True if in and out-sequences are digraphic False if not.
Notes
-----
This algorithm is from Kleitman and Wang [1]_.
The worst case runtime is O(s * log n) where s and n are the sum and length
of the sequences respectively.
References
----------
.. [1] D.J. Kleitman and D.L. Wang
Algorithms for Constructing Graphs and Digraphs with Given Valences
and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973)
"""
in_deg_sequence = list(in_sequence)
out_deg_sequence = list(out_sequence)
if not nx.utils.is_list_of_ints(in_deg_sequence):
return False
if not nx.utils.is_list_of_ints(out_deg_sequence):
return False
# Process the sequences and form two heaps to store degree pairs with
# either zero or non-zero out degrees
sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence)
maxn = max(nin, nout)
maxin = 0
if maxn==0:
return True
stubheap, zeroheap = [ ], [ ]
for n in range(maxn):
in_deg, out_deg = 0, 0
if n<nout:
out_deg = out_deg_sequence[n]
if n<nin:
in_deg = in_deg_sequence[n]
if in_deg<0 or out_deg<0:
return False
sumin, sumout, maxin = sumin+in_deg, sumout+out_deg, max(maxin, in_deg)
if in_deg > 0:
stubheap.append((-1*out_deg, -1*in_deg))
elif out_deg > 0:
zeroheap.append(-1*out_deg)
if sumin != sumout:
return False
heapq.heapify(stubheap)
heapq.heapify(zeroheap)
modstubs = [(0,0)]*(maxin+1)
# Successively reduce degree sequence by removing the maximum out degree
while stubheap:
# Take the first value in the sequence with non-zero in degree
(freeout, freein) = heapq.heappop( stubheap )
freein *= -1
if freein > len(stubheap)+len(zeroheap):
return False
# Attach out stubs to the nodes with the most in stubs
mslen = 0
for i in range(freein):
if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]):
stubout = heapq.heappop(zeroheap)
stubin = 0
else:
(stubout, stubin) = heapq.heappop(stubheap)
if stubout == 0:
return False
# Check if target is now totally connected
if stubout+1<0 or stubin<0:
modstubs[mslen] = (stubout+1, stubin)
mslen += 1
# Add back the nodes to the heap that still have available stubs
for i in range(mslen):
stub = modstubs[i]
if stub[1] < 0:
heapq.heappush(stubheap, stub)
else:
heapq.heappush(zeroheap, stub[0])
if freeout<0:
heapq.heappush(zeroheap, freeout)
return True
|
KNMI/VERCE
|
verce-hpc-pe/src/networkx/algorithms/graphical.py
|
Python
|
mit
| 12,990
|
[
"Brian"
] |
95ac421862d267ac17d54abed2216a7b85aad7248fa9dfb8fac300a3aad26b26
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.