text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# gaussfitter.py
# created by Adam Ginsburg (adam.ginsburg@colorado.edu or keflavich@gmail.com) 3/17/08)
import numpy
from numpy.ma import median
from numpy import pi
#from scipy import optimize,stats,pi
from mpfit import mpfit
""" Note about mpfit/leastsq: I switched everything over to the Markwardt mpfit
routine for a few reasons, but foremost being the ability to set limits on
parameters, not just force them to be fixed. As far as I can tell, leastsq
does not have that capability. """
"""
To do:
-turn into a class instead of a collection of objects
-implement WCS-based gaussian fitting with correct coordinates
"""
def moments(data,circle,rotate,vheight,estimator=median,**kwargs):
"""Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
the gaussian parameters of a 2D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above.
If using masked arrays, pass estimator=numpy.ma.median
"""
total = numpy.abs(data).sum()
Y, X = numpy.indices(data.shape) # python convention: reverse x,y numpy.indices
y = numpy.argmax((X*numpy.abs(data)).sum(axis=1)/total)
x = numpy.argmax((Y*numpy.abs(data)).sum(axis=0)/total)
col = data[int(y),:]
# FIRST moment, not second!
width_x = numpy.sqrt(numpy.abs((numpy.arange(col.size)-y)*col).sum()/numpy.abs(col).sum())
row = data[:, int(x)]
width_y = numpy.sqrt(numpy.abs((numpy.arange(row.size)-x)*row).sum()/numpy.abs(row).sum())
width = ( width_x + width_y ) / 2.
height = estimator(data.ravel())
amplitude = data.max()-height
mylist = [amplitude,x,y]
if numpy.isnan(width_y) or numpy.isnan(width_x) or numpy.isnan(height) or numpy.isnan(amplitude):
raise ValueError("something is nan")
if vheight==1:
mylist = [height] + mylist
if circle==0:
mylist = mylist + [width_x,width_y]
if rotate==1:
mylist = mylist + [0.] #rotation "moment" is just zero...
# also, circles don't rotate.
else:
mylist = mylist + [width]
return mylist
def twodgaussian(inpars, circle=0, rotate=1, vheight=1, shape=None):
"""Returns a 2d gaussian function of the form:
x' = numpy.cos(rota) * x - numpy.sin(rota) * y
y' = numpy.sin(rota) * x + numpy.cos(rota) * y
(rota should be in degrees)
g = b + a * numpy.exp ( - ( ((x-center_x)/width_x)**2 +
((y-center_y)/width_y)**2 ) / 2 )
inpars = [b,a,center_x,center_y,width_x,width_y,rota]
(b is background height, a is peak amplitude)
where x and y are the input parameters of the returned function,
and all other parameters are specified by this function
However, the above values are passed by list. The list should be:
inpars = (height,amplitude,center_x,center_y,width_x,width_y,rota)
You can choose to ignore / neglect some of the above input parameters
unumpy.sing the following options:
circle=0 - default is an elliptical gaussian (different x, y
widths), but can reduce the input by one parameter if it's a
circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can
remove last parameter by setting rotate=0
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
shape=None - if shape is set (to a 2-parameter list) then returns
an image with the gaussian defined by inpars
"""
inpars_old = inpars
inpars = list(inpars)
if vheight == 1:
height = inpars.pop(0)
height = float(height)
else:
height = float(0)
amplitude, center_y, center_x = inpars.pop(0),inpars.pop(0),inpars.pop(0)
amplitude = float(amplitude)
center_x = float(center_x)
center_y = float(center_y)
if circle == 1:
width = inpars.pop(0)
width_x = float(width)
width_y = float(width)
rotate = 0
else:
width_x, width_y = inpars.pop(0),inpars.pop(0)
width_x = float(width_x)
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
rota = pi/180. * float(rota)
rcen_x = center_x * numpy.cos(rota) - center_y * numpy.sin(rota)
rcen_y = center_x * numpy.sin(rota) + center_y * numpy.cos(rota)
else:
rcen_x = center_x
rcen_y = center_y
if len(inpars) > 0:
raise ValueError("There are still input parameters:" + str(inpars) + \
" and you've input: " + str(inpars_old) + \
" circle=%d, rotate=%d, vheight=%d" % (circle,rotate,vheight) )
def rotgauss(x,y):
if rotate==1:
xp = x * numpy.cos(rota) - y * numpy.sin(rota)
yp = x * numpy.sin(rota) + y * numpy.cos(rota)
else:
xp = x
yp = y
g = height+amplitude*numpy.exp(
-(((rcen_x-xp)/width_x)**2+
((rcen_y-yp)/width_y)**2)/2.)
return g
if shape is not None:
return rotgauss(*numpy.indices(shape))
else:
return rotgauss
def gaussfit(data,err=None,params=[],autoderiv=1,return_all=0,circle=0,
fixed=numpy.repeat(False,7),limitedmin=[False,False,False,False,True,True,True],
limitedmax=[False,False,False,False,False,False,True],
usemoment=numpy.array([],dtype='bool'),
minpars=numpy.repeat(0,7),maxpars=[0,0,0,0,0,0,360],
rotate=1,vheight=1,quiet=True,returnmp=False,
returnfitimage=False,**kwargs):
"""
Gaussian fitter with the ability to fit a variety of different forms of
2-dimensional gaussian.
Input Parameters:
data - 2-dimensional data array
err=None - error array with same size as data array
params=[] - initial input parameters for Gaussian function.
(height, amplitude, x, y, width_x, width_y, rota)
if not input, these will be determined from the moments of the system,
assuming no rotation
autoderiv=1 - use the autoderiv provided in the lmder.f function (the
alternative is to us an analytic derivative with lmdif.f: this method
is less robust)
return_all=0 - Default is to return only the Gaussian parameters.
1 - fit params, fit error
returnfitimage - returns (best fit params,best fit image)
returnmp - returns the full mpfit struct
circle=0 - default is an elliptical gaussian (different x, y widths),
but can reduce the input by one parameter if it's a circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can remove
last parameter by setting rotate=0. numpy.expects angle in DEGREES
vheight=1 - default allows a variable height-above-zero, i.e. an
additive constant for the Gaussian function. Can remove first
parameter by setting this to 0
usemoment - can choose which parameters to use a moment estimation for.
Other parameters will be taken from params. Needs to be a boolean
array.
Output:
Default output is a set of Gaussian parameters with the same shape as
the input parameters
Can also output the covariance matrix, 'infodict' that contains a lot
more detail about the fit (see scipy.optimize.leastsq), and a message
from leastsq telling what the exit status of the fitting routine was
Warning: Does NOT necessarily output a rotation angle between 0 and 360 degrees.
"""
usemoment=numpy.array(usemoment,dtype='bool')
params=numpy.array(params,dtype='float')
if usemoment.any() and len(params)==len(usemoment):
moment = numpy.array(moments(data,circle,rotate,vheight,**kwargs),dtype='float')
params[usemoment] = moment[usemoment]
elif params == [] or len(params)==0:
params = (moments(data,circle,rotate,vheight,**kwargs))
if vheight==0:
vheight=1
params = numpy.concatenate([[0],params])
fixed[0] = 1
# mpfit will fail if it is given a start parameter outside the allowed range:
for i in xrange(len(params)):
if params[i] > maxpars[i] and limitedmax[i]: params[i] = maxpars[i]
if params[i] < minpars[i] and limitedmin[i]: params[i] = minpars[i]
if err == None:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data))
else:
errorfunction = lambda p: numpy.ravel((twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)) - data)/err)
def mpfitfun(data,err):
if err == None:
def f(p,fjac=None): return [0,numpy.ravel(data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))]
else:
def f(p,fjac=None): return [0,numpy.ravel((data-twodgaussian(p,circle,rotate,vheight)\
(*numpy.indices(data.shape)))/err)]
return f
parinfo = [
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"XSHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"YSHIFT",'error':0},
{'n':4,'value':params[4],'limits':[minpars[4],maxpars[4]],'limited':[limitedmin[4],limitedmax[4]],'fixed':fixed[4],'parname':"XWIDTH",'error':0} ]
if vheight == 1:
parinfo.insert(0,{'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0})
if circle == 0:
parinfo.append({'n':5,'value':params[5],'limits':[minpars[5],maxpars[5]],'limited':[limitedmin[5],limitedmax[5]],'fixed':fixed[5],'parname':"YWIDTH",'error':0})
if rotate == 1:
parinfo.append({'n':6,'value':params[6],'limits':[minpars[6],maxpars[6]],'limited':[limitedmin[6],limitedmax[6]],'fixed':fixed[6],'parname':"ROTATION",'error':0})
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less
# efficient and useful. I only bothered putting it here because I was
# instructed to do so for a class project - please ask if you would
# like this feature implemented
raise ValueError("I'm sorry, I haven't implemented this feature yet.")
else:
# p, cov, infodict, errmsg, success = optimize.leastsq(errorfunction,\
# params, full_output=1)
mp = mpfit(mpfitfun(data,err),parinfo=parinfo,quiet=quiet)
if returnmp:
returns = (mp)
elif return_all == 0:
returns = mp.params
elif return_all == 1:
returns = mp.params,mp.perror
if returnfitimage:
fitimage = twodgaussian(mp.params,circle,rotate,vheight)(*numpy.indices(data.shape))
returns = (returns,fitimage)
return returns
def onedgaussian(x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
"""
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def onedgaussfit(xax,data,err=None,params=[0,1,0,1],fixed=[False,False,False,False],limitedmin=[False,False,False,True],
limitedmax=[False,False,False,False],minpars=[0,0,0,0],maxpars=[0,0,0,0],
quiet=True,shh=True):
"""
Inputs:
xax - x axis
data - y axis
err - error corresponding to data
params - Fit parameters: Height of background, Amplitude, Shift, Width
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
def mpfitfun(x,y,err):
if err == None:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))]
else:
def f(p,fjac=None): return [0,(y-onedgaussian(x,*p))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
parinfo = [ {'n':0,'value':params[0],'limits':[minpars[0],maxpars[0]],'limited':[limitedmin[0],limitedmax[0]],'fixed':fixed[0],'parname':"HEIGHT",'error':0} ,
{'n':1,'value':params[1],'limits':[minpars[1],maxpars[1]],'limited':[limitedmin[1],limitedmax[1]],'fixed':fixed[1],'parname':"AMPLITUDE",'error':0},
{'n':2,'value':params[2],'limits':[minpars[2],maxpars[2]],'limited':[limitedmin[2],limitedmax[2]],'fixed':fixed[2],'parname':"SHIFT",'error':0},
{'n':3,'value':params[3],'limits':[minpars[3],maxpars[3]],'limited':[limitedmin[3],limitedmax[3]],'fixed':fixed[3],'parname':"WIDTH",'error':0}]
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,onedgaussian(xax,*mpp),mpperr,chi2
def n_gaussian(pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
dx,sigma,a *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming dx,sigma,a repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for i in range(len(dx)):
v += a[i] * numpy.exp( - ( x - dx[i] )**2 / sigma[i]**2 )
return v
return g
def multigaussfit(xax,data,ngauss=1,err=None,params=[1,0,1],fixed=[False,False,False],limitedmin=[False,False,True],
limitedmax=[False,False,False],minpars=[0,0,0],maxpars=[0,0,0],
quiet=True,shh=True):
"""
An improvement on onedgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
ngauss - How many gaussians to fit? Default 1 (this could supercede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 3*ngauss. If ngauss > 1 and length = 3, they will
be replicated ngauss times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * ngauss
If len(params) % 3 == 0, ngauss will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != ngauss and (len(params) / 3) > ngauss:
ngauss = len(params) / 3
# make sure all various things are the right length; if they're not, fix them using the defaults
for parlist in (params,fixed,limitedmin,limitedmax,minpars,maxpars):
if len(parlist) != 3*ngauss:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of gaussians, it will just replicate
if len(parlist) == 3:
parlist *= ngauss
elif parlist==params:
parlist[:] = [1,0,1] * ngauss
elif parlist==fixed or parlist==limitedmax:
parlist[:] = [False,False,False] * ngauss
elif parlist==limitedmin:
parlist[:] = [False,False,True] * ngauss
elif parlist==minpars or parlist==maxpars:
parlist[:] = [0,0,0] * ngauss
def mpfitfun(x,y,err):
if err == None:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-n_gaussian(pars=p)(x))/err]
return f
if xax == None:
xax = numpy.arange(len(data))
parnames = {0:"SHIFT",1:"WIDTH",2:"AMPLITUDE"}
parinfo = [ {'n':ii,'value':params[ii],'limits':[minpars[ii],maxpars[ii]],'limited':[limitedmin[ii],limitedmax[ii]],'fixed':fixed[ii],'parname':parnames[ii/3]+str(ii/3),'error':ii} for ii in xrange(len(params)) ]
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
mpperr = mp.perror
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
if not shh:
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
return mpp,n_gaussian(pars=mpp)(xax),mpperr,chi2
| creyesp/RF_Estimation | LIB/gaussfitter.py | Python | gpl-2.0 | 17,932 | [
"Gaussian"
] | 0a8755cb4edcb412bdf363755a460ac48234f9e29302f3807e1095e2417f8c6b |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file
"""
# For Travis CI
import matplotlib
matplotlib.use("Agg")
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from KDEpy import NaiveKDE, TreeKDE, FFTKDE
def main():
here = os.path.abspath(os.path.dirname(__file__))
save_path = os.path.join(here, r"_static/img/")
# -----------------------------------------------------------------------------
# ------ ADVERTISEMENT PLOT: Create the plot that is shown in the README ------
# -----------------------------------------------------------------------------
plt.figure(figsize=(12, 5.5))
np.random.seed(42)
FONTSIZE = 15
plt.subplot(2, 3, 1)
n = 15
plt.title("Automatic bandwidth,\nrobust w.r.t multimodality", fontsize=FONTSIZE)
data = np.concatenate((np.random.randn(n), np.random.randn(n) + 10))
plt.scatter(data, np.zeros_like(data), marker="|", color="red", label="Data")
x, y = FFTKDE(bw="ISJ").fit(data)()
plt.plot(x, y, label="FFTKDE")
plt.yticks([])
plt.xticks([])
plt.grid(True, ls="--", zorder=-15)
plt.subplot(2, 3, 2)
plt.title("9+ kernel functions", fontsize=FONTSIZE)
for kernel in FFTKDE._available_kernels.keys():
x, y = FFTKDE(kernel=kernel).fit([0])()
plt.plot(x, y, label=kernel)
plt.yticks([])
plt.xticks([])
plt.grid(True, ls="--", zorder=-15)
plt.subplot(2, 3, 3)
plt.title("Fast 2D computations\nusing binning and FFT", fontsize=FONTSIZE)
n = 16
gen_random = lambda n: np.random.randn(n).reshape(-1, 1)
data1 = np.concatenate((gen_random(n), gen_random(n)), axis=1)
data2 = np.concatenate((gen_random(n) + 1, gen_random(n) + 4), axis=1)
data = np.concatenate((data1, data2))
grid_points = 2**7 # Grid points in each dimension
N = 8 # Number of contours
x, z = FFTKDE(bw=1).fit(data)((grid_points, grid_points))
x, y = np.unique(x[:, 0]), np.unique(x[:, 1])
z = z.reshape(grid_points, grid_points).T
plt.contour(x, y, z, N, linewidths=0.8, colors="k")
plt.contourf(x, y, z, N, cmap="PuBu")
plt.plot(data[:, 0], data[:, 1], "ok", ms=2)
plt.yticks([])
plt.xticks([])
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LightSource
from matplotlib import cm
ax = plt.subplot(2, 3, 4, projection="3d")
plt.title("Kernels normalized in any\ndimension for any $p$-norm", fontsize=FONTSIZE)
data = np.array([[0, 0]])
grid_points = 2**6 # Grid points in each dimension
x, z = FFTKDE(kernel="gaussian", bw=1, norm=2).fit(data)((grid_points, grid_points))
x, y = np.unique(x[:, 0]), np.unique(x[:, 1])
x, y = np.meshgrid(x, y)
z = z.reshape(grid_points, grid_points).T + 0.1
ls = LightSource(350, 45)
rgb = ls.shade(z, cmap=cm.PuBu, vert_exag=0.1, blend_mode="soft")
surf = ax.plot_surface(
x,
y,
z,
rstride=1,
cstride=1,
facecolors=rgb,
linewidth=0,
antialiased=False,
shade=True,
)
ax.view_init(30, 65)
plt.yticks([])
plt.xticks([])
ax.set_zticks([])
plt.subplot(2, 3, 5)
plt.title("Individual data points\nmay be weighted", fontsize=FONTSIZE)
np.random.seed(123)
n = 5
data = np.random.randn(n) * 2
weights = np.random.randn(n) ** 2 + 1
kernel = "triweight"
x, y = TreeKDE(kernel=kernel).fit(data, weights)()
plt.plot(x, y)
plt.scatter(data, np.zeros_like(data), s=weights * 20, color="red")
for d, w in zip(data, weights):
y = TreeKDE(kernel=kernel).fit([d], weights=[w])(x) * w / np.sum(weights)
plt.plot(x, y, "--k", zorder=-15, alpha=0.75)
plt.yticks([])
plt.xticks([])
plt.grid(True, ls="--", zorder=-15)
plt.subplot(2, 3, 6)
data = np.random.gamma(10, 100, size=(10**6))
st = time.perf_counter()
x, y = FFTKDE(kernel="gaussian", bw=100).fit(data)(2**10)
timed = (time.perf_counter() - st) * 1000
plt.plot(x, y)
plt.title(
("One million observations on\n1024 grid" + " points in {} ms".format(int(round(timed, 0)))),
fontsize=FONTSIZE,
)
data = np.random.choice(data, size=100, replace=False)
plt.scatter(data, np.zeros_like(data), marker="|", color="red", label="Data", s=3)
plt.yticks([])
plt.xticks([])
plt.grid(True, ls="--", zorder=-15)
plt.tight_layout()
plt.savefig(os.path.join(save_path, r"showcase.png"))
# -----------------------------------------------------------------------------
# ------ MINIMAL WORKING EXAMPLE: Showing a simle way to create a plot --------
# -----------------------------------------------------------------------------
plt.figure(figsize=(6, 3))
##############################
np.random.seed(42)
data = norm(loc=0, scale=1).rvs(2**3)
x, y = TreeKDE(kernel="gaussian", bw="silverman").fit(data).evaluate()
plt.plot(x, y, label="KDE estimate")
##############################
plt.plot(x, norm(loc=0, scale=1).pdf(x), label="True distribution")
plt.scatter(data, np.zeros_like(data), marker="|", color="red", label="Data")
plt.legend(loc="best")
plt.tight_layout()
plt.savefig(os.path.join(save_path, r"mwe.png"))
# -----------------------------------------------------------------------------
# ------ COMPARING BANDWIDTHS: Different bandwidths on the same data set ------
# -----------------------------------------------------------------------------
plt.figure(figsize=(6, 3))
##############################
data = norm(loc=0, scale=1).rvs(2**6)
for bw in [0.1, "silverman", 1.5]:
x, y = FFTKDE(kernel="triweight", bw=bw).fit(data).evaluate()
plt.plot(x, y, label="KDE estimate, bw={}".format(bw))
##############################
plt.scatter(data, np.zeros_like(data), marker="|", color="red", label="Data")
plt.legend(loc="best")
plt.tight_layout()
# plt.savefig(os.path.join(save_path, r'example2.png'))
# -----------------------------------------------------------------------------
# ------ EVERY ESTIMATOR: Comparing the different algorithms ------------------
# -----------------------------------------------------------------------------
plt.figure(figsize=(6, 3))
np.random.seed(42)
data = norm(loc=0, scale=1).rvs(2**3)
for kde in [NaiveKDE, TreeKDE, FFTKDE]:
x, y = kde(kernel="gaussian", bw="silverman").fit(data).evaluate()
plt.plot(x, y + np.random.randn() / 100, label=kde.__name__ + " estimate")
plt.plot(x, norm(loc=0, scale=1).pdf(x), label="True distribution")
plt.scatter(data, np.zeros_like(data), marker="|", color="red", label="Data")
plt.legend(loc="best")
plt.tight_layout()
if __name__ == "__main__":
# main()
np.random.seed(123)
x = [0, 0.1, 0.2, 0.3, 0.4, 1, 2, 3, 4, 5, 7, 9, 14, 19]
x = np.array(x)
x = np.linspace(0, 5) ** 2
y = np.sin(x)
plt.plot(x, y)
y += np.random.randn(len(y)) / 2
plt.scatter(x, y, label="Points")
x_interpol = np.linspace(min(x) - 1, max(x) + 1, num=2**6)
y_interpol = np.interp(x_interpol, x, y)
plt.plot(x_interpol, y_interpol, "--", label="Interpol")
kernel = FFTKDE._available_kernels["box"]
kernel_grid = np.linspace(-kernel.support, kernel.support, num=2**6)
bw = 0.02
kernel_weights = kernel(kernel_grid, bw=bw)
kernel_weights /= np.sum(kernel_weights)
print(kernel_weights)
from scipy.signal import convolve
evaluated = convolve(y_interpol, kernel_weights, mode="same").reshape(-1, 1)
plt.plot(x_interpol, evaluated)
plt.legend()
| tommyod/KDEpy | docs/source/examples.py | Python | gpl-3.0 | 7,718 | [
"Gaussian"
] | bb19baec5bb82aedfb04c2455c0415561c1051a48b54e15dd474f3e077db3f79 |
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country. (experimental)
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled. (experimental)
NB: both these geo attributes are experimental and may change in future
or be completely removed.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass(self._GEO_COUNTRIES)
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, countries):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES.
You may also manually call it from extractor's code if geo countries
information is not available beforehand (e.g. obtained during
extraction) or due to some another reason.
"""
if not self._x_forwarded_for_ip:
country_code = self._downloader.params.get('geo_bypass_country', None)
# If there is no explicit country for geo bypass specified and
# the extractor is known to be geo restricted let's fake IP
# as X-Forwarded-For right away.
if (not country_code and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
countries):
country_code = random.choice(countries)
if country_code:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal)
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type in ('TVEpisode', 'Episode'):
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
continue
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/rg3/youtube-dl/issues/12211
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or
last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing audio group an audio group, it represents
# a complete (with audio and video) format. So, for such cases
# we will ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
t = representation_ms_info[template_name]
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_webpage_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism, urlh = res
return self._parse_ism_formats(
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC')
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
f = parse_content_type(source_attributes.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# res attribute is not standard but seen several times
# in the wild
f.update({
'height': int_or_none(source_attributes.get('res')),
'format_id': source_attributes.get('label'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
url_base = self._search_regex(
r'(?:(?:https?|rtmp|rtsp):)?(//[^?]+)', url, 'format url')
http_base_url = '%s:%s' % ('http', url_base)
formats = []
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
http_base_url + '/manifest.mpd',
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
http_base_url + '/jwplayer.smil',
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
if track.get('kind') != 'captions':
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entries.append({
'id': this_video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
})
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = self._proto_relative_url(source.get('file'))
if not source_url:
continue
if base_url:
source_url = compat_urlparse.urljoin(base_url, source_url)
if source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar.Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| jicruz/heroku-bot | lib/youtube_dl/extractor/common.py | Python | gpl-3.0 | 121,381 | [
"VisIt"
] | bc18a422be5be4e3be2955c9b91adf5d6b7ffad36fa78ea7791c56a6155e31f9 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# relevant imports
import sys
import time
import espresso
import mpi4py.MPI as MPI
import Tetracryst # Preparation of tetrahedral crystal and constuctions of bonds in tetrahedral liquid
from espresso import Real3D, Int3D
from espresso.tools import decomp
from espresso.tools import timers
# integration steps, cutoff, skin, AdResS specifications
steps = 1000
timestep = 0.0005
intervals = 100
rc = 4.5 # cutoff coarse-grained potential
rca = 1.122462048309373 # cutoff atomistic potential (cutoff (2^(1/6)), WCA)
skin = 0.4
# Parameters for the thermostat
#gamma = 2.0
#temp = 1.0
# Parameters for size of AdResS dimensions
ex_size = 5.0
hy_size = 5.0
# read equilibrated configuration file
pid, type, x, y, z, vx, vy, vz, Lx, Ly, Lz = espresso.tools.readxyz("equilibrated_conf.xyz")
# Table for coarse-grained potential
tabCG = "table_potential.dat"
# FEC compensation table
tabFEC = "table_FEC_Gibbs.dat"
# number of CG particles
num_particlesCG = len(x)/4
# number of AT particles
num_particles = len(x)
# set up the system
sys.stdout.write('Setting up simulation ...\n')
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
system = espresso.System()
system.rng = espresso.esutil.RNG()
system.bc = espresso.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
# (H-)AdResS domain decomposition
system.storage = espresso.storage.DomainDecompositionAdress(system, nodeGrid, cellGrid)
# prepare AT particles
allParticlesAT = []
allParticles = []
tuples = []
for pidAT in range(num_particles):
allParticlesAT.append([pidAT, # add here these particles just temporarily
Real3D(x[pidAT], y[pidAT], z[pidAT]), # position
Real3D(vx[pidAT], vy[pidAT], vz[pidAT]), # velocity
Real3D(0, 0, 0), # force
1, 1.0, 1]) # type, mass, is AT particle
# create CG particles
for pidCG in range(num_particlesCG):
# we put CG molecule in first atom, later CG molecules will be positioned in the center
cmp = espresso.tools.AdressSetCG(4, pidCG, allParticlesAT)
# Preparation of tuples (tuples define, which atoms belong to which CG molecules)
tmptuple = [pidCG+num_particles]
for pidAT2 in range(4):
pid = pidCG*4+pidAT2
tmptuple.append(pid)
# append CG particles
allParticles.append([pidCG+num_particles, # CG particle has to be added first!
Real3D(cmp[0], cmp[1], cmp[2]), # pos
Real3D(0, 0, 0), # vel
Real3D(0, 0, 0), # force
0, 4.0, 0]) # type, mass, is not AT particle
# append AT particles
for pidAT in range(4):
pid = pidCG*4+pidAT
allParticles.append([pid, # now the AT particles can be added
(allParticlesAT[pid])[1], # pos
(allParticlesAT[pid])[2], # vel
(allParticlesAT[pid])[3], # force
(allParticlesAT[pid])[4], # type
(allParticlesAT[pid])[5], # mass
(allParticlesAT[pid])[6]]) # is AT particle
# append tuple to tuplelist
tuples.append(tmptuple)
# add particles to system
system.storage.addParticles(allParticles, "id", "pos", "v", "f", "type", "mass", "adrat")
# create FixedTupleList object
ftpl = espresso.FixedTupleListAdress(system.storage)
# and add the tuples
ftpl.addTuples(tuples)
system.storage.setFixedTuplesAdress(ftpl)
# add bonds between AT particles
fpl = espresso.FixedPairListAdress(system.storage, ftpl)
bonds = Tetracryst.makebonds(len(x))
fpl.addBonds(bonds)
# decompose after adding tuples and bonds
print "Added tuples and bonds, decomposing now ..."
system.storage.decompose()
print "done decomposing"
# AdResS Verlet list
vl = espresso.VerletListAdress(system, cutoff=rc, adrcut=rc,
dEx=ex_size, dHy=hy_size,
adrCenter=[Lx/2, Ly/2, Lz/2])
# non-bonded potentials
# LJ Capped WCA between AT and tabulated potential between CG particles
interNB = espresso.interaction.VerletListHadressLennardJones(vl, ftpl) # Here we need specific (H-)AdResS interaction type
potWCA = espresso.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift='auto', cutoff=rca)
potCG = espresso.interaction.Tabulated(itype=3, filename=tabCG, cutoff=rc) # CG
interNB.setPotentialAT(type1=1, type2=1, potential=potWCA) # AT
interNB.setPotentialCG(type1=0, type2=0, potential=potCG) # CG
system.addInteraction(interNB)
# bonded potentials
# Quartic potential between AT particles
potQuartic = espresso.interaction.Quartic(K=75.0, r0=1.0)
interQuartic = espresso.interaction.FixedPairListQuartic(system, fpl, potQuartic)
system.addInteraction(interQuartic)
# VelocityVerlet integrator
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = timestep
# add AdResS extension
adress = espresso.integrator.Adress(system, vl, ftpl)
integrator.addExtension(adress)
# add Langevin thermostat extension
#langevin = espresso.integrator.LangevinThermostat(system)
#langevin.gamma = gamma
#langevin.temperature = temp
#langevin.adress = True # enable AdResS!
#integrator.addExtension(langevin)
# add TDF (dummy, just testing)
fec = espresso.integrator.FreeEnergyCompensation(system, center=[Lx/2, Ly/2, Lz/2])
fec.addForce(itype=3, filename=tabFEC, type=0)
integrator.addExtension(fec)
# distribute atoms and CG molecules according to AdResS domain decomposition, place CG molecules in the center of mass
espresso.tools.AdressDecomp(system, integrator)
# system information
print ''
print 'AdResS Center =', [Lx/2, Ly/2, Lz/2]
print 'number of AT particles =', num_particles
print 'number of CG particles =', num_particlesCG
print 'density = %.4f' % (density)
print 'rc =', rc
print 'dt =', integrator.dt
print 'skin =', system.skin
print 'steps =', steps
print 'NodeGrid = %s' % (nodeGrid,)
print 'CellGrid = %s' % (cellGrid,)
print ''
# analysis
temperature = espresso.analysis.Temperature(system)
fmt = '%5d %8.4f %12.3f %12.3f %12.3f %12.3f %12.3f\n'
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
sys.stdout.write(' step Temp etotal enonbonded ebonded ekinetic ecorrection\n')
sys.stdout.write(fmt % (0, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# Density profile preparation
density_array_total = []
Adds = 0.0
densityprofilegrid = 100
# Timer, Steps
start_time = time.clock()
nsteps = steps / intervals
# integration and on the fly analysis
for s in range(1, intervals + 1):
integrator.run(nsteps)
step = nsteps * s
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Ep = interNB.computeEnergy()
Eb = interQuartic.computeEnergy()
Ecorr = fec.computeCompEnergy()
# calculate density profile
if s > 10:
densityprofile = espresso.analysis.XDensity(system)
density_array = densityprofile.compute(densityprofilegrid)
for i in range(len(density_array)):
if(i>=len(density_array_total)):
density_array_total.append(density_array[i])
else:
density_array_total[i] += density_array[i]
Adds += 1.0
sys.stdout.write(fmt % (step, T, Ek + Ep + Eb + Ecorr, Ep, Eb, Ek, Ecorr))
# correct the density profile according to number of samples
for i in range(len(density_array_total)):
density_array_total[i] /= Adds
# printing density profile
nameFile = 'density_profile_Gibbs.dat'
print ''
print "Printing the density profile to %s\n" %nameFile
tempFile = open (nameFile, 'w')
fmt = ' %12.8f %12.8f\n'
dr = Lx / float(densityprofilegrid)
for i in range( len(density_array_total) ):
tempFile.write(fmt % ( (i+0.5)*dr, density_array_total[i] ))
tempFile.close()
# simulation information
end_time = time.clock()
timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(num_particles)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
| BackupTheBerlios/espressopp | examples/hadress/hadressFEC/hadressDensityFEC.py | Python | gpl-3.0 | 8,414 | [
"CRYSTAL",
"ESPResSo"
] | cb24a3b5d9e8a1a6d2e577ef246dee8d7ccbf12a7f30b2d8068d79ff951792e4 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone's pep8 extensions.
In order to make the review process faster and easier for core devs we are
adding some Keystone specific pep8 checks. This will catch common errors
so that core devs don't have to.
There are two types of pep8 extensions. One is a function that takes either
a physical or logical line. The physical or logical line is the first param
in the function definition and can be followed by other parameters supported
by pep8. The second type is a class that parses AST trees. For more info
please see pep8.py.
"""
import ast
import re
import six
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
class CheckForMutableDefaultArgs(BaseASTChecker):
"""Checks for the use of mutable objects as function/method defaults.
We are only checking for list and dict literals at this time. This means
that a developer could specify an instance of their own and cause a bug.
The fix for this is probably more work than it's worth because it will
get caught during code review.
"""
CHECK_DESC = 'K001 Using mutable as a function/method default'
MUTABLES = (
ast.List, ast.ListComp,
ast.Dict, ast.DictComp,
ast.Set, ast.SetComp,
ast.Call)
def visit_FunctionDef(self, node):
for arg in node.args.defaults:
if isinstance(arg, self.MUTABLES):
self.add_error(arg)
super(CheckForMutableDefaultArgs, self).generic_visit(node)
def block_comments_begin_with_a_space(physical_line, line_number):
"""There should be a space after the # of block comments.
There is already a check in pep8 that enforces this rule for
inline comments.
Okay: # this is a comment
Okay: #!/usr/bin/python
Okay: # this is a comment
K002: #this is a comment
"""
MESSAGE = "K002 block comments should start with '# '"
# shebangs are OK
if line_number == 1 and physical_line.startswith('#!'):
return
text = physical_line.strip()
if text.startswith('#'): # look for block comments
if len(text) > 1 and not text[1].isspace():
return physical_line.index('#'), MESSAGE
class CheckForAssertingNoneEquality(BaseASTChecker):
"""Ensures that code does not use a None with assert(Not*)Equal."""
CHECK_DESC_IS = ('K003 Use self.assertIsNone(...) when comparing '
'against None')
CHECK_DESC_ISNOT = ('K004 Use assertIsNotNone(...) when comparing '
' against None')
def visit_Call(self, node):
# NOTE(dstanek): I wrote this in a verbose way to make it easier to
# read for those that have little experience with Python's AST.
if isinstance(node.func, ast.Attribute):
if node.func.attr == 'assertEqual':
for arg in node.args:
if isinstance(arg, ast.Name) and arg.id == 'None':
self.add_error(node, message=self.CHECK_DESC_IS)
elif node.func.attr == 'assertNotEqual':
for arg in node.args:
if isinstance(arg, ast.Name) and arg.id == 'None':
self.add_error(node, message=self.CHECK_DESC_ISNOT)
super(CheckForAssertingNoneEquality, self).generic_visit(node)
class CheckForLoggingIssues(BaseASTChecker):
DEBUG_CHECK_DESC = 'K005 Using translated string in debug logging'
NONDEBUG_CHECK_DESC = 'K006 Not using translating helper for logging'
EXCESS_HELPER_CHECK_DESC = 'K007 Using hints when _ is necessary'
LOG_MODULES = ('logging', 'keystone.openstack.common.log')
I18N_MODULES = (
'keystone.i18n._',
'keystone.i18n._LI',
'keystone.i18n._LW',
'keystone.i18n._LE',
'keystone.i18n._LC',
)
TRANS_HELPER_MAP = {
'debug': None,
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'error': '_LE',
'exception': '_LE',
'critical': '_LC',
}
def __init__(self, tree, filename):
super(CheckForLoggingIssues, self).__init__(tree, filename)
self.logger_names = []
self.logger_module_names = []
self.i18n_names = {}
# NOTE(dstanek): this kinda accounts for scopes when talking
# about only leaf node in the graph
self.assignments = {}
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
item._parent = node
self.visit(item)
elif isinstance(value, ast.AST):
value._parent = node
self.visit(value)
def _filter_imports(self, module_name, alias):
"""Keeps lists of logging and i18n imports
"""
if module_name in self.LOG_MODULES:
self.logger_module_names.append(alias.asname or alias.name)
elif module_name in self.I18N_MODULES:
self.i18n_names[alias.asname or alias.name] = alias.name
def visit_Import(self, node):
for alias in node.names:
self._filter_imports(alias.name, alias)
return super(CheckForLoggingIssues, self).generic_visit(node)
def visit_ImportFrom(self, node):
for alias in node.names:
full_name = '%s.%s' % (node.module, alias.name)
self._filter_imports(full_name, alias)
return super(CheckForLoggingIssues, self).generic_visit(node)
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Assign(self, node):
"""Look for 'LOG = logging.getLogger'
This handles the simple case:
name = [logging_module].getLogger(...)
- or -
name = [i18n_name](...)
And some much more comple ones:
name = [i18n_name](...) % X
- or -
self.name = [i18n_name](...) % X
"""
attr_node_types = (ast.Name, ast.Attribute)
if (len(node.targets) != 1
or not isinstance(node.targets[0], attr_node_types)):
# say no to: "x, y = ..."
return super(CheckForLoggingIssues, self).generic_visit(node)
target_name = self._find_name(node.targets[0])
if (isinstance(node.value, ast.BinOp) and
isinstance(node.value.op, ast.Mod)):
if (isinstance(node.value.left, ast.Call) and
isinstance(node.value.left.func, ast.Name) and
node.value.left.func.id in self.i18n_names):
# NOTE(dstanek): this is done to match cases like:
# `msg = _('something %s') % x`
node = ast.Assign(value=node.value.left)
if not isinstance(node.value, ast.Call):
# node.value must be a call to getLogger
self.assignments.pop(target_name, None)
return super(CheckForLoggingIssues, self).generic_visit(node)
# is this a call to an i18n function?
if (isinstance(node.value.func, ast.Name)
and node.value.func.id in self.i18n_names):
self.assignments[target_name] = node.value.func.id
return super(CheckForLoggingIssues, self).generic_visit(node)
if (not isinstance(node.value.func, ast.Attribute)
or not isinstance(node.value.func.value, attr_node_types)):
# function must be an attribute on an object like
# logging.getLogger
return super(CheckForLoggingIssues, self).generic_visit(node)
object_name = self._find_name(node.value.func.value)
func_name = node.value.func.attr
if (object_name in self.logger_module_names
and func_name == 'getLogger'):
self.logger_names.append(target_name)
return super(CheckForLoggingIssues, self).generic_visit(node)
def visit_Call(self, node):
"""Look for the 'LOG.*' calls.
"""
# obj.method
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
if isinstance(node.func.value, ast.Name):
method_name = node.func.attr
elif isinstance(node.func.value, ast.Attribute):
obj_name = self._find_name(node.func.value)
method_name = node.func.attr
else: # could be Subscript, Call or many more
return super(CheckForLoggingIssues, self).generic_visit(node)
# must be a logger instance and one of the support logging methods
if (obj_name not in self.logger_names
or method_name not in self.TRANS_HELPER_MAP):
return super(CheckForLoggingIssues, self).generic_visit(node)
# the call must have arguments
if not len(node.args):
return super(CheckForLoggingIssues, self).generic_visit(node)
if method_name == 'debug':
self._process_debug(node)
elif method_name in self.TRANS_HELPER_MAP:
self._process_non_debug(node, method_name)
return super(CheckForLoggingIssues, self).generic_visit(node)
def _process_debug(self, node):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if (isinstance(msg, ast.Call)
and isinstance(msg.func, ast.Name)
and msg.func.id in self.i18n_names):
self.add_error(msg, message=self.DEBUG_CHECK_DESC)
# if the first arg is a reference to a i18n call
elif (isinstance(msg, ast.Name)
and msg.id in self.assignments
and not self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.DEBUG_CHECK_DESC)
def _process_non_debug(self, node, method_name):
msg = node.args[0] # first arg to a logging method is the msg
# if first arg is a call to a i18n name
if isinstance(msg, ast.Call):
try:
func_name = msg.func.id
except AttributeError:
# in the case of logging only an exception, the msg function
# will not have an id associated with it, for instance:
# LOG.warning(six.text_type(e))
return
# the function name is the correct translation helper
# for the logging method
if func_name == self.TRANS_HELPER_MAP[method_name]:
return
# the function name is an alias for the correct translation
# helper for the loggine method
if (self.i18n_names[func_name] ==
self.TRANS_HELPER_MAP[method_name]):
return
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
# if the first arg is not a reference to the correct i18n hint
elif isinstance(msg, ast.Name):
# FIXME(dstanek): to make sure more robust we should be checking
# all names passed into a logging method. we can't right now
# because:
# 1. We have code like this that we'll fix when dealing with the %:
# msg = _('....') % {}
# LOG.warn(msg)
# 2. We also do LOG.exception(e) in several places. I'm not sure
# exactly what we should be doing about that.
if msg.id not in self.assignments:
return
helper_method_name = self.TRANS_HELPER_MAP[method_name]
if (self.assignments[msg.id] != helper_method_name
and not self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.NONDEBUG_CHECK_DESC)
elif (self.assignments[msg.id] == helper_method_name
and self._is_raised_later(node, msg.id)):
self.add_error(msg, message=self.EXCESS_HELPER_CHECK_DESC)
def _is_raised_later(self, node, name):
def find_peers(node):
node_for_line = node._parent
for _field, value in ast.iter_fields(node._parent._parent):
if isinstance(value, list) and node_for_line in value:
return value[value.index(node_for_line) + 1:]
continue
return []
peers = find_peers(node)
for peer in peers:
if isinstance(peer, ast.Raise):
if (isinstance(peer.type, ast.Call) and
len(peer.type.args) > 0 and
isinstance(peer.type.args[0], ast.Name) and
name in (a.id for a in peer.type.args)):
return True
else:
return False
elif isinstance(peer, ast.Assign):
if name in (t.id for t in peer.targets):
return False
def check_oslo_namespace_imports(logical_line, blank_before, filename):
oslo_namespace_imports = re.compile(
r"(((from)|(import))\s+oslo\.)|(from\s+oslo\s+import\s+)")
if re.match(oslo_namespace_imports, logical_line):
msg = ("K333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def dict_constructor_with_sequence_copy(logical_line):
"""Should use a dict comprehension instead of a dict constructor.
PEP-0274 introduced dict comprehension with performance enhancement
and it also makes code more readable.
Okay: lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
Okay: fool = dict(a='a', b='b')
K008: lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1]))
K008: attrs = dict([(k, _from_json(v))
K008: dict([[i,i] for i in range(3)])
"""
MESSAGE = ("K008 Must use a dict comprehension instead of a dict"
" constructor with a sequence of key-value pairs.")
dict_constructor_with_sequence_re = (
re.compile(r".*\bdict\((\[)?(\(|\[)(?!\{)"))
if dict_constructor_with_sequence_re.match(logical_line):
yield (0, MESSAGE)
def factory(register):
register(CheckForMutableDefaultArgs)
register(block_comments_begin_with_a_space)
register(CheckForAssertingNoneEquality)
register(CheckForLoggingIssues)
register(check_oslo_namespace_imports)
register(dict_constructor_with_sequence_copy)
| vivekdhayaal/keystone | keystone/tests/hacking/checks.py | Python | apache-2.0 | 16,856 | [
"VisIt"
] | da49f825ece8fbd56e4775a81198687a4fdc0d833427b5834cf3aa2e82a33c12 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import tempfile
import numpy as np
import mdtraj
import pkg_resources
def get_top():
return pkg_resources.resource_filename(__name__, 'data/test.pdb')
def create_traj(top=None, format='.xtc', dir=None, length=1000, start=0):
trajfile = tempfile.mktemp(suffix=format, dir=dir)
xyz = np.arange(start * 3 * 3, (start + length) * 3 * 3)
xyz = xyz.reshape((-1, 3, 3))
if top is None:
top = get_top()
t = mdtraj.load(top)
t.xyz = xyz
t.unitcell_vectors = np.array(length * [[0, 0, 1], [0, 1, 0], [1, 0, 0]]).reshape(length, 3, 3)
t.time = np.arange(length)
t.save(trajfile)
return trajfile, xyz, length
def create_trajectory_csv(dirname, data):
fname = tempfile.mktemp(suffix='.csv.dat', dir=dirname)
np.savetxt(fname, data)
return fname
def create_trajectory_numpy(dirname, data):
fname = tempfile.mktemp(suffix='.npy', dir=dirname)
np.save(fname, data)
return fname
def create_dummy_pdb(dirname, dims):
dummy_pdb = tempfile.mktemp('.pdb', dir=dirname)
with open(dummy_pdb, 'w') as f:
for i in range(dims):
print('ATOM %5d C ACE A 1 28.490 31.600 33.379 0.00 1.00' % i, file=f)
return dummy_pdb
def create_trajectory_trr(dims, dirname, data):
from mdtraj.core.trajectory import TRRTrajectoryFile
fname = tempfile.mktemp(suffix='.trr', dir=dirname)
with TRRTrajectoryFile(fname, 'w') as f:
f.write(data.reshape(-1, dims, 3))
return fname
def create_trajectory_xtc(dims, dirname, data):
from mdtraj.core.trajectory import XTCTrajectoryFile
fname = tempfile.mktemp(suffix='.xtc', dir=dirname)
shaped = data.reshape(-1, dims, 3)
with XTCTrajectoryFile(fname, 'w') as f:
f.write(shaped)
return fname
def create_trajectory_h5(dims, dirname, data):
import h5py
fname = tempfile.mktemp(suffix='.h5', dir=dirname)
with h5py.File(fname, mode='w') as f:
f.create_dataset('somedata', data=data.reshape(-1, dims, 3))
return fname
def create_trajectory_dcd(dims, dirname, data):
from mdtraj.core.trajectory import DCDTrajectoryFile
fname = tempfile.mktemp(suffix='.dcd', dir=dirname)
shaped = data.reshape(-1, dims, 3)
with DCDTrajectoryFile(fname, 'w') as f:
f.write(shaped * 10) # unit Angstroems is assumed by mdtraj.
return fname
def create_transform(reader):
from pyemma.coordinates.data._base.transformer import StreamingTransformer
class IdentityTransform(StreamingTransformer):
def dimension(self):
return reader.ndim
def describe(self):
return 'identity'
def _transform_array(self, X):
return X
t = IdentityTransform()
t.data_producer = reader
return t
| marscher/PyEMMA | pyemma/coordinates/tests/util.py | Python | lgpl-3.0 | 2,846 | [
"MDTraj"
] | 9272250ccd34ae2a5b978900e68d5d97787474114306d97ff722ef83febd645e |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
from skbio.stats.spatial import (procrustes, _get_disparity, _center,
_normalize)
class ProcrustesTests(TestCase):
"""test the procrustes module, using floating point numpy arrays
"""
def setUp(self):
"""creates inputs"""
# an L
self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
# a larger, shifted, mirrored L
self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
# an L shifted up 1, right 1, and with point 4 shifted an extra .5
# to the right
# pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
# data4, data5 are standardized (trace(A*A') = 1).
# procrustes should return an identical copy if they are used
# as the first matrix argument.
shiftangle = np.pi / 8
self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
[0, -1]], 'd') / np.sqrt(4)
self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
[np.cos(np.pi / 2 - shiftangle),
np.sin(np.pi / 2 - shiftangle)],
[-np.cos(shiftangle),
-np.sin(shiftangle)],
[-np.cos(np.pi / 2 - shiftangle),
-np.sin(np.pi / 2 - shiftangle)]],
'd') / np.sqrt(4)
def test_procrustes(self):
"""tests procrustes' ability to match two matrices.
the second matrix is a rotated, shifted, scaled, and mirrored version
of the first, in two dimensions only
"""
# can shift, mirror, and scale an 'L'?
a, b, disparity = procrustes(self.data1, self.data2)
np.testing.assert_allclose(b, a)
np.testing.assert_almost_equal(disparity, 0.)
# if first mtx is standardized, leaves first mtx unchanged?
m4, m5, disp45 = procrustes(self.data4, self.data5)
np.testing.assert_equal(m4, self.data4)
# at worst, data3 is an 'L' with one point off by .5
m1, m3, disp13 = procrustes(self.data1, self.data3)
self.assertTrue(disp13 < 0.5 ** 2)
def test_procrustes2(self):
"""procrustes disparity should not depend on order of matrices"""
m1, m3, disp13 = procrustes(self.data1, self.data3)
m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
np.testing.assert_almost_equal(disp13, disp31)
# try with 3d, 8 pts per
rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
[0.41124708, -0.03966978, -0.31854548],
[0.91910318, 1.39451809, -0.15295084],
[2.00452023, 0.50150048, 0.29485268],
[0.09453595, 0.67528885, 0.03283872],
[0.07015232, 2.18892599, -1.67266852],
[0.65029688, 1.60551637, 0.80013549],
[-0.6607528, 0.53644208, 0.17033891]])
rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
[-1.84888465, -0.92589646, -1.29335743],
[0.67031855, -1.35957463, 0.41938621],
[0.73967209, -0.20230757, 0.52418027],
[0.17752796, 0.09065607, 0.29827466],
[0.47999368, -0.88455717, -0.57547934],
[-0.11486344, -0.12608506, -0.3395779],
[-0.86106154, -0.28687488, 0.9644429]])
res1, res3, disp13 = procrustes(rand1, rand3)
res3_2, res1_2, disp31 = procrustes(rand3, rand1)
np.testing.assert_almost_equal(disp13, disp31)
def test_procrustes_shape_mismatch(self):
with self.assertRaises(ValueError):
procrustes(np.array([[1, 2], [3, 4]]),
np.array([[5, 6, 7], [8, 9, 10]]))
def test_procrustes_empty_rows_or_cols(self):
empty = np.array([[]])
with self.assertRaises(ValueError):
procrustes(empty, empty)
def test_procrustes_no_variation(self):
with self.assertRaises(ValueError):
procrustes(np.array([[42, 42], [42, 42]]),
np.array([[45, 45], [45, 45]]))
def test_get_disparity(self):
"""tests get_disparity"""
disp = _get_disparity(self.data1, self.data3)
disp2 = _get_disparity(self.data3, self.data1)
np.testing.assert_equal(disp, disp2)
np.testing.assert_equal(disp, (3. * 2. + (1. + 1.5 ** 2)))
d1 = np.append(self.data1, self.data1, 0)
d3 = np.append(self.data3, self.data3, 0)
disp3 = _get_disparity(d1, d3)
disp4 = _get_disparity(d3, d1)
np.testing.assert_equal(disp3, disp4)
# 2x points in same configuration should give 2x disparity
np.testing.assert_equal(disp3, 2. * disp)
def test_center(self):
centered_mtx = _center(self.data1)
column_means = centered_mtx.mean(0)
for col_mean in column_means:
np.testing.assert_equal(col_mean, 0.)
def test_normalize(self):
norm_mtx = _normalize(self.data1)
np.testing.assert_equal(np.trace(np.dot(norm_mtx,
np.transpose(norm_mtx))), 1.)
# match_points isn't yet tested, as it's almost a private function
# and test_procrustes() tests it implicitly.
if __name__ == '__main__':
main()
| jairideout/scikit-bio | skbio/stats/tests/test_spatial.py | Python | bsd-3-clause | 6,064 | [
"scikit-bio"
] | a542f1720b3731e44e4a943af02b7e709183ba709c878277ed609ee55a662180 |
#!/usr/bin/env python
# This tool takes a tab-delimited text file as input and creates filters on columns based on certain properties.
# The tool will skip over invalid lines within the file, informing the user about the number of lines skipped.
import sys, sets, re, os.path
from galaxy import eggs
assert sys.version_info[:2] >= ( 2, 4 )
def get_operands( filter_condition ):
# Note that the order of all_operators is important
items_to_strip = ['+', '-', '**', '*', '//', '/', '%', '<<', '>>', '&', '|', '^', '~', '<=', '<', '>=', '>', '==', '!=', '<>', ' and ', ' or ', ' not ', ' is ', ' is not ', ' in ', ' not in ']
for item in items_to_strip:
if filter_condition.find( item ) >= 0:
filter_condition = filter_condition.replace( item, ' ' )
operands = sets.Set( filter_condition.split( ' ' ) )
return operands
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
in_fname = sys.argv[1]
out_fname = sys.argv[2]
cond_text = sys.argv[3]
try:
in_columns = int( sys.argv[4] )
assert sys.argv[5] #check to see that the column types varaible isn't null
in_column_types = sys.argv[5].split( ',' )
except:
stop_err( "Data does not appear to be tabular. This tool can only be used with tab-delimited data." )
# Unescape if input has been escaped
mapped_str = {
'__lt__': '<',
'__le__': '<=',
'__eq__': '==',
'__ne__': '!=',
'__gt__': '>',
'__ge__': '>=',
'__sq__': '\'',
'__dq__': '"',
}
for key, value in mapped_str.items():
cond_text = cond_text.replace( key, value )
# Attempt to determine if the condition includes executable stuff and, if so, exit
secured = dir()
operands = get_operands(cond_text)
for operand in operands:
try:
check = int( operand )
except:
if operand in secured:
stop_err( "Illegal value '%s' in condition '%s'" % ( operand, cond_text ) )
# Prepare the column variable names and wrappers for column data types
cols, type_casts = [], []
for col in range( 1, in_columns + 1 ):
col_name = "c%d" % col
cols.append( col_name )
col_type = in_column_types[ col - 1 ]
type_cast = "%s(%s)" % ( col_type, col_name )
type_casts.append( type_cast )
col_str = ', '.join( cols ) # 'c1, c2, c3, c4'
type_cast_str = ', '.join( type_casts ) # 'str(c1), int(c2), int(c3), str(c4)'
assign = "%s = line.split( '\\t' )" % col_str
wrap = "%s = %s" % ( col_str, type_cast_str )
skipped_lines = 0
first_invalid_line = 0
invalid_line = None
lines_kept = 0
total_lines = 0
out = open( out_fname, 'wt' )
# Read and filter input file, skipping invalid lines
code = '''
for i, line in enumerate( file( in_fname ) ):
total_lines += 1
line = line.rstrip( '\\r\\n' )
if not line or line.startswith( '#' ):
skipped_lines += 1
if not invalid_line:
first_invalid_line = i + 1
invalid_line = line
continue
try:
%s
%s
if %s:
lines_kept += 1
print >> out, line
except:
skipped_lines += 1
if not invalid_line:
first_invalid_line = i + 1
invalid_line = line
''' % ( assign, wrap, cond_text )
valid_filter = True
try:
exec code
except Exception, e:
out.close()
if str( e ).startswith( 'invalid syntax' ):
valid_filter = False
stop_err( 'Filter condition "%s" likely invalid. See tool tips, syntax and examples.' % cond_text )
else:
stop_err( str( e ) )
if valid_filter:
out.close()
valid_lines = total_lines - skipped_lines
print 'Filtering with %s, ' % cond_text
if valid_lines > 0:
print 'kept %4.2f%% of %d lines.' % ( 100.0*lines_kept/valid_lines, total_lines )
else:
print 'Possible invalid filter condition "%s" or non-existent column referenced. See tool tips, syntax and examples.' % cond_text
if skipped_lines > 0:
print 'Skipped %d invalid lines starting at line #%d: "%s"' % ( skipped_lines, first_invalid_line, invalid_line )
| dbcls/dbcls-galaxy | tools/stats/filtering.py | Python | mit | 4,048 | [
"Galaxy"
] | 450437924a3bbc9e830bd7ba2d7dcd3889449a820e071dfc4a4634da63670630 |
#!/bin/env python
""" This integration test is for "Inner" Computing Element SingularityComputingElement
This test is here and not in the unit tests because it requires singularity to be installed.
"""
import os
import shutil
from DIRAC import gLogger
from DIRAC.tests.Utilities.utils import find_all
from DIRAC.Resources.Computing.test.Test_PoolComputingElement import jobScript, _stopJob
from DIRAC.WorkloadManagementSystem.Utilities.Utils import createJobWrapper
# sut
from DIRAC.Resources.Computing.SingularityComputingElement import SingularityComputingElement
gLogger.setLevel('DEBUG')
fj = find_all('pilot.json', '../', 'tests/Integration/Resources/Computing')[0]
def test_submitJob():
shutil.copy(fj, os.curdir)
with open('testJob.py', 'w') as execFile:
execFile.write(jobScript % '1')
os.chmod('testJob.py', 0o755)
ce = SingularityComputingElement('SingularityComputingElement')
res = ce.submitJob('testJob.py', None)
assert res['OK'] is False
assert res['ReschedulePayload'] is True
res = ce.getCEStatus()
assert res['OK'] is True
assert res['SubmittedJobs'] == 1
_stopJob(1)
for ff in ['testJob.py', 'pilot.json']:
if os.path.isfile(ff):
os.remove(ff)
def test_submitJobWrapper():
with open('testJob.py', 'w') as execFile:
execFile.write(jobScript % '2')
os.chmod('testJob.py', 0o755)
jobParams = {'JobType': 'User',
'Executable': 'testJob.py'}
resourceParams = {'GridCE': 'some_CE'}
optimizerParams = {}
wrapperFile = createJobWrapper(2,
jobParams,
resourceParams,
optimizerParams,
logLevel='DEBUG')['Value'] # This is not under test, assuming it works fine
shutil.copy(fj, os.curdir)
ce = SingularityComputingElement('SingularityComputingElement')
res = ce.submitJob(wrapperFile, proxy=None,
numberOfProcessors=4,
maxNumberOfProcessors=8,
wholeNode=False,
mpTag=True,
jobDesc={"jobParams": jobParams,
"resourceParams": resourceParams,
"optimizerParams": optimizerParams})
assert res['OK'] is False # This is False because the image can't be found
assert res['ReschedulePayload'] is True
res = ce.getCEStatus()
assert res['OK'] is True
assert res['SubmittedJobs'] == 1
_stopJob(2)
for ff in ['testJob.py', 'stop_job_2', 'job.info', 'std.out', 'pilot.json']:
if os.path.isfile(ff):
os.remove(ff)
if os.path.isdir('job'):
shutil.rmtree('job')
| yujikato/DIRAC | tests/Integration/Resources/Computing/Test_SingularityCE.py | Python | gpl-3.0 | 2,680 | [
"DIRAC"
] | 0d7e03216fd9a37c4698d70cf74abf2a0470bad357d4a9a1e79a2c98b8f46704 |
# Symmetry can only be used in EELS spectra calculations for GPAW svn 6305 above.
# Refer to A. Rubio and V. Olevano, et.al, Physical Review B 69, 245419 (2004)
# for comparision of results
import os
import sys
from math import sqrt
import numpy as np
from ase import Atoms
from ase.units import Bohr
from ase.parallel import paropen
from gpaw import GPAW
from gpaw.mpi import rank
from gpaw.mixer import Mixer
from gpaw.response.df0 import DF
from gpaw.utilities import devnull
if rank != 0:
sys.stdout = devnull
GS = 1
EELS = 1
nband = 60
if GS:
kpts = (20,20,7)
a=1.42
c=3.355
# AB stack
atoms = Atoms('C4',[
(1/3.0,1/3.0,0),
(2/3.0,2/3.0,0),
(0. ,0. ,0.5),
(1/3.0,1/3.0,0.5)
],
pbc=(1,1,1))
atoms.set_cell([(sqrt(3)*a/2.0,3/2.0*a,0),
(-sqrt(3)*a/2.0,3/2.0*a,0),
(0.,0.,2*c)],
scale_atoms=True)
calc = GPAW(xc='LDA',
kpts=kpts,
h=0.2,
basis='dzp',
nbands=nband+10,
convergence={'bands':nband},
eigensolver='cg',
mixer=Mixer(0.1,3),
width=0.05, txt='out.txt')
atoms.set_calculator(calc)
# view(atoms)
atoms.get_potential_energy()
calc.write('graphite.gpw','all')
if EELS:
f = paropen('graphite_q_list', 'w')
for i in range(1,8):
w = np.linspace(0, 40, 401)
q = np.array([i/20., 0., 0.]) # Gamma-M excitation
#q = np.array([i/20., -i/20., 0.]) # Gamma-K excitation
ecut = 40 + (i-1)*10
df = DF(calc='graphite.gpw', nbands=nband, q=q, w=w,
eta=0.2,ecut=ecut)
df.get_EELS_spectrum(filename='graphite_EELS_' + str(i))
df.check_sum_rule()
print >> f, sqrt(np.inner(df.qq_v / Bohr, df.qq_v / Bohr)), ecut
if rank == 0:
os.remove('graphite.gpw')
| robwarm/gpaw-symm | gpaw/test/big/response/graphite_EELS.py | Python | gpl-3.0 | 2,044 | [
"ASE",
"GPAW"
] | 6cb0b90a02dce434f7ab92fa38e5d52c2ffc83f8b150c37ce50f0b89fa66b3c5 |
# $Id$
#
# Copyright (C) 2004-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import RDConfig
# change this to use another viewer:
if RDConfig.molViewer in ('WEBLAB','DSVIEWER'):
from rdkit.Chem.DSViewer import *
elif RDConfig.molViewer=='PYMOL':
from rdkit.Chem.PyMol import *
else:
raise ValueError,'invalid RD_MOLVIEWER specified'
if __name__=='__main__':
import AllChem
import sys
if len(sys.argv)<2:
smi ='c1cccc2c1cccc2CC(=O)N'
else:
smi = sys.argv[1]
m = Chem.MolFromSmiles(smi)
m = Chem.AddHs(m)
AllChem.EmbedMolecule(m)
v = MolViewer()
v.ShowMol(m,'raw')
AllChem.UFFOptimizeMolecule(m)
v.ShowMol(m,'opt',showOnly=0,highlightFeatures=[(0,),(2,),(3,4)])
| rdkit/rdkit-orig | rdkit/Chem/ShowMols.py | Python | bsd-3-clause | 925 | [
"PyMOL",
"RDKit"
] | 74b201c5d4009b308bc7bd4d0e63763b058a34736ce6f597e23ba4707fc97bbc |
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from scipy.special import erf, erfinv
from scipy.stats import norm
from scipy.optimize import bisect
from numpy import mean, sqrt, asarray, max, min, any
from transforms import Transform
import pdb
class UnivariateGaussianization(Transform):
def __init__(self, mog):
self.mog = mog
def apply(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply model CDF
data = self.mog.cdf(data)
# apply inverse Gaussian CDF
result = erfinv(data * 2. - 1.)
result[result > 6.] = 6.
result[result < -6.] = -6.
return result * sqrt(2.)
def inverse(self, data, max_iter=100):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply Gaussian CDF
data = norm.cdf(data)
# apply inverse model CDF
val_max = mean(self.mog.means) + 1.
val_min = mean(self.mog.means) - 1.
for t in range(data.shape[1]):
# make sure root lies between val_min and val_max
while float(self.mog.cdf(val_min)) > data[0, t]:
val_min -= 1.
while float(self.mog.cdf(val_max)) < data[0, t]:
val_max += 1.
# find root numerically
data[0, t] = bisect(
f=lambda x: float(self.mog.cdf(x)) - data[0, t],
a=val_min,
b=val_max,
maxiter=max_iter,
disp=False)
return data
def logjacobian(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
data_ug = self.apply(data)
return self.mog.loglikelihood(data) - norm.logpdf(data_ug)
| lucastheis/isa | code/transforms/univariategaussianization.py | Python | mit | 1,634 | [
"Gaussian"
] | ba166315fd6e16686039618feefe7d2b789ed1ea335e1727c05304e10fc979ad |
import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron
# initialize the neuron
neuron_config = json_utilities.read('472423251_neuron_config.json')
neuron = GlifNeuron.from_dict(neuron_config)
# make a short square pulse. stimulus units should be in Amps.
stimulus = [ 0.0 ] * 100 + [ 10e-9 ] * 100 + [ 0.0 ] * 100
# important! set the neuron's dt value for your stimulus in seconds
neuron.dt = 5e-6
# simulate the neuron
output = neuron.run(stimulus)
voltage = output['voltage']
threshold = output['threshold']
spike_times = output['interpolated_spike_times']
| wvangeit/AllenSDK | doc_template/examples/glif_ex2.py | Python | gpl-3.0 | 619 | [
"NEURON"
] | ee3bfa5f0fde9a25df6cd4b239d84178f1a4f07197bbcb4986ed7332e90269c9 |
# (C) 2017, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import unittest
import subprocess
from pathlib import Path
import pwd
import os
import re
basepath='/tmp/u_point_unittest'
if not os.path.exists(basepath):
# make sure that it is writabel (ssh user@host)
ret=os.makedirs(basepath, mode=0o0777)
import logging
logging.basicConfig(filename=os.path.join(basepath,'unittest.log'), level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
from rts2_environment import RTS2Environment
# sequence matters
def suite_with_connection():
suite = unittest.TestSuite()
#suite.addTest(TestAcquisition('test_u_acquire'))
#suite.addTest(TestAcquisition('test_u_acquire_dss'))
#suite.addTest(TestAcquisition('test_u_acquire_rts2_dummy_dss'))
#suite.addTest(TestAcquisition('test_u_acquire_rts2_dummy_httpd_dss'))
suite.addTest(TestAcquisition('test_u_acquire_rts2_dummy_httpd_dss_bright_stars'))
#suite.addTest(TestAcquisition('test_acquisition'))
return suite
def suite_no_connection():
suite = unittest.TestSuite()
suite.addTest(TestAnalysisModel('test_u_analyze_u_model'))
suite.addTest(TestAnalysisModel('test_u_simulate_u_model'))
#suite.addTest(TestAnalysisModel('test_analysis_model'))
return suite
#@unittest.skip('class not yet implemented')
class TestAnalysisModel(unittest.TestCase):
def tearDown(self):
pass
def setUp(self):
pass
def exec_cmd(self,cmd=None,using_shell=False):
logger.info('== setUp: {} ==='.format(cmd[0]))
logger.info('executing: {}'.format(cmd))
proc = subprocess.Popen( cmd, shell=using_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = proc.communicate()
stdo_l = stdo.decode("utf-8").split('\n')
for ln in stdo_l:
logger.info('TestAcquisition::setUp:stdo:{}: {}'.format(cmd[0],ln))
stde_l = stde.decode("utf-8").split('\n')
for ln in stde_l:
logger.info('TestAcquisition::setUp:stde:{}: {}'.format(cmd[0],ln))
return stdo_l,stde_l
#@unittest.skip('feature not yet implemented')
def test_u_analyze_u_model(self):
logger.info('== {} =='.format(self._testMethodName))
cmd=['../u_analyze.py','--toc','--base-path', basepath, '--level', 'DEBUG']
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
# ToDo only quick, add meaningful asserts
cmd=['../u_model.py','--toc','--base-path', basepath, '--level', 'DEBUG']
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
# ToDo only quick, add meaningful asserts
#ToDo somting wrong, float: ([ -+]?\d*\.\d+|\d+)
m=re.compile('MA : polar axis left-right alignment[ ]?:([-+]?\d*.*?)\[arcsec\]')
val=None
for o_l in stdo_l:
print(o_l)
v = m.match(o_l)
if v:
print('match')
val = abs(float(v.group(1)))
break
val_max=3.
if val is not None:
self.assertLess(val,val_max, msg='return value: {}, instead of max: {}'.format(val, val_max))
else:
self.assertEqual(1.,val_max, msg='return value: None, instead of max: {}'.format(val_max))
#@unittest.skip('feature not yet implemented')
def test_u_simulate_u_model(self):
logger.info('== {} =='.format(self._testMethodName))
cmd=['../u_simulate.py','--toc','--base-path', basepath, '--level', 'DEBUG', '--model-class', 'u_upoint']
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
# ToDo only quick, add meaningful asserts
cmd=['../u_model.py','--toc','--base-path', basepath, '--analyzed-positions', 'simulation_data.txt', '--level', 'DEBUG', '--model-class', 'u_upoint']
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
# ToDo only quick, add meaningful asserts
#ToDo somting wrong, float: ([ -+]?\d*\.\d+|\d+)
m=re.compile('MA : polar axis left-right alignment[ ]?:([-+]?\d*.*?)\[arcsec\]')
val=None
for o_l in stdo_l:
print(o_l)
v = m.match(o_l)
if v:
print('match')
val = abs(float(v.group(1)))
break
val_max=3.
if val is not None:
# ToDo quick
diff=abs(30.-val)
self.assertLess(diff,val_max, msg='return value: {}, instead of max: {}'.format(val, val_max))
else:
self.assertEqual(1.,val_max, msg='return value: None, instead of max: {}'.format(val_max))
@unittest.skip('feature not yet implemented')
def test_analysis_model(self):
logger.info('== {} =='.format(self._testMethodName))
#@unittest.skip('class not yet implemented')
class TestAcquisition(RTS2Environment):
#setUp, tearDown see base class
def setUpCmds(self):
fn=Path(os.path.join(basepath,'observable.cat'))
if not fn.is_file():
# ./u_select.py --toc
cmd=[ '../u_select.py', '--base-path', basepath ]
self.exec_cmd(cmd=cmd)
fn=Path(os.path.join(basepath,'nominal_positions.nml'))
if not fn.is_file():
# ./u_acquire.py --create --toc --eq-mount
cmd=[ '../u_acquire.py', '--create','--toc', '--eq-mount', '--base-path', basepath, '--force-overwrite', '--lon-step', '40', '--lat-step', '20']
self.exec_cmd(cmd=cmd)
def exec_cmd(self,cmd=None,using_shell=False):
logger.info('== setUp: {} ==='.format(cmd[0]))
logger.info('executing: {}'.format(cmd))
proc = subprocess.Popen( cmd, shell=using_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = proc.communicate()
stdo_l = stdo.decode("utf-8").split('\n')
for ln in stdo_l:
logger.info('TestAcquisition::setUp:stdo:{}: {}'.format(cmd[0],ln))
stde_l = stde.decode("utf-8").split('\n')
for ln in stde_l:
logger.info('TestAcquisition::setUp:stde:{}: {}'.format(cmd[0],ln))
return stdo_l,stde_l
@unittest.skip('feature not yet implemented')
def test_u_acquire(self):
logger.info('== {} =='.format(self._testMethodName))
self.setUpCmds()
cmd=['../u_acquire.py','--toc','--device-class','DeviceDss','--base-path', basepath]
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
@unittest.skip('feature not yet implemented')
def test_u_acquire_dss(self):
logger.info('== {} =='.format(self._testMethodName))
self.setUpCmds()
cmd=['../u_acquire.py','--toc','--device-class','DeviceDss','--fetch-dss','--base-path', basepath]
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
@unittest.skip('feature not yet implemented')
def test_u_acquire_rts2_dummy_dss(self):
logger.info('== {} =='.format(self._testMethodName))
self.setUpCmds()
# ToDo ugly
uid=self.uid=pwd.getpwuid(os.getuid())[0]
acq_script='/home/{}/rts2/scripts/u_point/unittest/u_acquire_fetch_dss_continuous.sh'.format(uid)
cmd=['rts2-scriptexec', '--port', '1617','-d','C0','-s',' exe {} '.format(acq_script)]
stdo_l,stde_l=self.exec_cmd(cmd=cmd, using_shell=False)
@unittest.skip('feature not yet implemented')
def test_u_acquire_rts2_dummy_httpd_dss(self):
logger.info('== {} =='.format(self._testMethodName))
self.setUpCmds()
cmd=['../u_acquire.py','--toc','--device-class','DeviceRts2Httpd','--fetch-dss','--base-path', basepath, '--level', 'DEBUG']
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
#@unittest.skip('feature not yet implemented')
def test_u_acquire_rts2_dummy_httpd_dss_bright_stars(self):
logger.info('== {} =='.format(self._testMethodName))
self.setUpCmds()
cmd=['../u_acquire.py','--toc','--device-class','DeviceRts2Httpd','--fetch-dss','--use-bright-stars', '--base-path', basepath, '--level', 'DEBUG']
stdo_l,stde_l=self.exec_cmd(cmd=cmd)
@unittest.skip('feature not yet implemented')
def test_acquisition(self):
logger.info('== {} =='.format(self._testMethodName))
pass
if __name__ == '__main__':
suiteNoConnection = suite_no_connection()
suiteWithConnection = suite_with_connection()
# a list is a list: breaking unittest independency, ok it's Python
alltests = unittest.TestSuite([suiteWithConnection,suiteNoConnection])
#unittest.TextTestRunner(verbosity=0).run(alltests)
unittest.TextTestRunner(verbosity=0).run(suiteNoConnection)
| RTS2/rts2 | scripts/u_point/unittest/test_executables.py | Python | lgpl-3.0 | 9,404 | [
"VisIt"
] | 76ff5764e4cad11bf1ee91e907aab7daf08082e41113bd50e1f6c616cd19218a |
# encoding=utf-8
from typing import List
import numpy as np
import xgboost as xgb
import logging
import re
import tensorflow as tf
import tensorflow_addons as tfa
def build_xgb_nn_model(tree_num=10):
"""
use xgb leaf features inside.
To have a different model space in moe, just use xgb leaf features here.
:return:
"""
inputs = []
embeddings = []
for i in range(tree_num):
inputs.append(tf.keras.Input(shape=(1,), name="tree_" + str(i)))
embeddings.append(
tf.keras.layers.Embedding(128, 32)(inputs[-1])
)
# leaf = tf.keras.Input(shape=(10,), name="tree_leaf")
# inputs.append(leaf)
leaf_context = tf.keras.layers.concatenate(embeddings, axis=-1)
flatten_context = tf.keras.layers.Flatten()(leaf_context)
fusion_context = tf.keras.layers.Dense(32, 'relu')(flatten_context)
output = tf.keras.layers.Dense(1, 'sigmoid')(fusion_context)
model = tf.keras.models.Model(inputs=inputs, outputs=[output], name='xgb_nn')
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[
'accuracy',
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
tf.keras.metrics.AUC(),
tfa.metrics.F1Score(num_classes=1, threshold=0.5)
])
model.summary()
return model
class XGBLeafDataSource(object):
"""
read the files and parse out the "app_rate" feature and use the xgb to get the leafs.
"""
xgb_modelfile = 'xgb.fornn.model'
xgb_param_dist = {'objective': 'binary:logistic', 'n_estimators': 10, 'max_depth': 6, 'subsample': 0.8,
'learning_rate': 0.1}
xgbclf = xgb.XGBClassifier(**xgb_param_dist)
try:
xgbclf.load_model(xgb_modelfile)
logging.info('load xgb model ok')
except:
pass
def __init__(self, path, batch_size, span):
self.path = path
self.batch_size = batch_size
self.span = span # feature span
pass
def iter_test(self):
while True:
# features_size = 406
features, label = np.random.random((self.batch_size, 406)), np.random.randint(0, 2, (self.batch_size, 1))
# print(label)
yield {'app_rate': features}, label
def iter(self, epoch=None):
k = 0
s, e = self.span
while True:
# print(k)
if k == epoch:
# k = 0
break
k += 1
with open(self.path, 'r') as f:
batch = []
ys = []
for line in f:
us = line.split(" ")
y = int(us[1])
app_rate = [float(i) for i in us[s:e]]
# print(app_rate)
batch.append(app_rate)
ys.append(y)
if len(batch) == self.batch_size:
leafs = XGBLeafDataSource.xgbclf.apply(np.array(batch))
fs = {}
t = 0
for tree_leaf in np.transpose(leafs):
fs['tree_' + str(t)] = tree_leaf
t += 1
yield fs, np.array(ys)
batch = []
ys = []
def xgbnn_train():
train_path = "./data_store/train_data"
valid_path = "./data_store/valid_data"
model = build_xgb_nn_model(10)
model_path = './xgbnn_model/weights'
csvfile = './xgbnn.csv'
submit = './submit.xgbnn.csv'
batch_size = 10
phone_source = XGBLeafDataSource(train_path, batch_size, (47, 453))
valid_phone_source = XGBLeafDataSource(valid_path, batch_size, (47, 453))
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=model_path,
save_weights_only=True,
monitor='val_f1_score',
mode='max',
save_best_only=True)
csvlogger = tf.keras.callbacks.CSVLogger(filename=csvfile)
model.fit(phone_source.iter(),
validation_data=valid_phone_source.iter(),
steps_per_epoch=100, epochs=20,
validation_steps=20,
callbacks=[model_checkpoint_callback,csvlogger ]
)
# model.save_weights(model_path)
def main():
train_path = "./data_store/train_data"
valid_path = "./data_store/valid_data"
test_path = "./data_store/test_data"
batch_size = 10
phone_source = XGBLeafDataSource(train_path, batch_size, (47, 453))
for leafs in phone_source.iter(1):
# pass
# leafs = xgbclf.apply(features['app_rate'])
print(type(leafs))
print(leafs)
input("Press any key .. ")
pass
if __name__ == '__main__':
xgbnn_train()
# main()
| eryueniaobp/contest | xiaomi/phone_xgbnn.py | Python | apache-2.0 | 4,774 | [
"MOE"
] | 8e8c62ca3c9ea56038258466f821954263a380403f304960d9c56d54cac21a4b |
########################################################################
# $HeadURL $
# File: RequestTask.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2011/10/12 12:08:51
########################################################################
""" :mod: RequestTask
=================
.. module: RequestTask
:synopsis: base class for requests execution in separate subprocesses
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
Base class for requests execution in a separate subprocesses.
:deprecated:
"""
__RCSID__ = "$Id$"
# #
# @file RequestTask.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2011/10/12 12:09:18
# @brief Definition of RequestTask class.
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
class RequestTask( object ):
"""
.. class:: RequestTask
Base class for DMS 'transfer', 'removal' and 'register' Requests processing.
This class is meant to be executed as a ProcessTask inside ProcessPool.
The most important and common global DIRAC objects are created in RequestTask constructor.
This includes gLogger, gConfig, gProxyManager, S_OK and S_ERROR. The constructor also
imports a set of common modules: os, sys, re, time and everything from types module.
All other DIRAC tools and clients (i.e. DataManager) are instance attributes of RequestTask class
All currently proxied tools are::
DataLoggingClient -- self.dataLoggingClient()
RequestClient -- self.requestClient()
StorageFactory -- self.storageFactory()
SubLogger message handles for all levels are also proxied, so you can directly use them in your code, i.e.::
self.info("An info message")
self.debug("This will be shown only in debug")
For handling sub-request one has to register their actions handlers using :self.addOperationAction:
method. This method checks if handler is defined as a method of inherited class and then puts its
definition into internal operation dispatcher dictionary with a key of sub-request's operation name.
Each operation handler should have the signature::
def operationName( self, index, requestObj, subRequestAttrs, subRequestFiles )
where index is a sub-request counter, requestObj is a RequestContainer instance,
subRequestAttrs is a dict with sub-request attributes and subRequestFiles is a dict with
files attached to the sub-request.
Handlers shoudl always return S_OK with value of (modified or not) requestObj, S_ERROR with some
error message otherwise.
Processing of request is done automatically in self.__call__, one doesn't have to worry about changing
credentials, looping over subrequests or request finalizing -- only sub-request processing matters in
the all inherited classes.
Concerning :MonitringClient: (or better known its global instance :gMonitor:), if someone wants to send
some metric over there, she has to put in agent's code registration of activity and then in a particular
task use :RequestTask.addMark: to save monitoring data. All monitored activities are held in
:RequestTask.__monitor: dict which at the end of processing is returned from :RequestTask.__call__:.
The values are then processed and pushed to the gMonitor instance in the default callback function.
"""
## reference to DataLoggingClient
__dataLoggingClient = None
# # reference to RequestClient
__requestClient = None
# # reference to StotageFactory
__storageFactory = None
# # subLogger
__log = None
# # request type
__requestType = None
# # placeholder for request owner DB
requestOwnerDN = None
# # placeholder for Request owner group
requestOwnerGroup = None
# # operation dispatcher for SubRequests,
# # a dictonary
# # "operation" => methodToRun
# #
__operationDispatcher = {}
# # holder for DataManager proxy file
__dataManagerProxy = None
# # monitoring dict
__monitor = {}
def __init__( self, requestString, requestName, executionOrder, jobID, configPath ):
""" c'tor
:param self: self reference
:param str requestString: XML serialised RequestContainer
:param str requestName: request name
:param list executionOrder: request execution order
:param int jobID: jobID
:param str sourceServer: request's source server
:param str configPath: path in CS for parent agent
"""
# # fixtures
# # python fixtures
import os, os.path, sys, time, re, types
self.makeGlobal( "os", os )
self.makeGlobal( "os.path", os.path )
self.makeGlobal( "sys", sys )
self.makeGlobal( "time", time )
self.makeGlobal( "re", re )
# # export all Types from types
[ self.makeGlobal( item, getattr( types, item ) ) for item in dir( types ) if "Type" in item ]
# # DIRAC fixtures
from DIRAC.FrameworkSystem.Client.Logger import gLogger
self.__log = gLogger.getSubLogger( "%s/%s" % ( self.__class__.__name__, str( requestName ) ) )
self.always = self.__log.always
self.notice = self.__log.notice
self.info = self.__log.info
self.debug = self.__log.debug
self.warn = self.__log.warn
self.error = self.__log.error
self.exception = self.__log.exception
self.fatal = self.__log.fatal
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupsWithVOMSAttribute
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.DataManagementSystem.Client.Datamanager import DataManager
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
# # export DIRAC global tools and functions
self.makeGlobal( "S_OK", S_OK )
self.makeGlobal( "S_ERROR", S_ERROR )
self.makeGlobal( "gLogger", gLogger )
self.makeGlobal( "gConfig", gConfig )
self.makeGlobal( "gProxyManager", gProxyManager )
self.makeGlobal( "getGroupsWithVOMSAttribute", getGroupsWithVOMSAttribute )
self.makeGlobal( "gConfigurationData", gConfigurationData )
# # save request string
self.requestString = requestString
# # build request object
from DIRAC.RequestManagementSystem.Client.RequestContainer import RequestContainer
self.requestObj = RequestContainer( init = False )
self.requestObj.parseRequest( request = self.requestString )
# # save request name
self.requestName = requestName
# # .. and jobID
self.jobID = jobID
# # .. and execution order
self.executionOrder = executionOrder
# # save config path
self.__configPath = configPath
# # set requestType
self.setRequestType( gConfig.getValue( os.path.join( configPath, "RequestType" ), "" ) )
# # get log level
self.__log.setLevel( gConfig.getValue( os.path.join( configPath, self.__class__.__name__, "LogLevel" ), "INFO" ) )
# # clear monitoring
self.__monitor = {}
# # save DataManager proxy
if "X509_USER_PROXY" in os.environ:
self.info( "saving path to current proxy file" )
self.__dataManagerProxy = os.environ["X509_USER_PROXY"]
else:
self.error( "'X509_USER_PROXY' environment variable not set" )
self.fc = FileCatalog()
self.dm = DataManager()
self.dm = DataManager()
self.fc = FileCatalog()
def dataManagerProxy( self ):
""" get dataManagerProxy file
:param self: self reference
"""
return self.__dataManagerProxy
def addMark( self, name, value = 1 ):
""" add mark to __monitor dict
:param self: self reference
:param name: mark name
:param value: value to be
"""
if name not in self.__monitor:
self.__monitor.setdefault( name, 0 )
self.__monitor[name] += value
def monitor( self ):
""" get monitoring dict
:param cls: class reference
"""
return self.__monitor
def makeGlobal( self, objName, objDef ):
""" export :objDef: to global name space using :objName: name
:param self: self reference
:param str objName: symbol name
:param mixed objDef: symbol definition
:throws: NameError if symbol of that name is already in
"""
if objName not in __builtins__:
if type( __builtins__ ) == type( {} ):
__builtins__[objName] = objDef
else:
setattr( __builtins__, objName, objDef )
return True
def requestType( self ):
""" get request type
:params self: self reference
"""
return self.__requestType
def setRequestType( self, requestType ):
""" set request type
:param self: self reference
"""
self.debug( "Setting requestType to %s" % str( requestType ) )
self.__requestType = requestType
@classmethod
def dataLoggingClient( cls ):
""" DataLoggingClient getter
:param cls: class reference
"""
if not cls.__dataLoggingClient:
from DIRAC.DataManagementSystem.Client.DataLoggingClient import DataLoggingClient
cls.__dataLoggingClient = DataLoggingClient()
return cls.__dataLoggingClient
@classmethod
def requestClient( cls ):
""" RequestClient getter
:param cls: class reference
"""
if not cls.__requestClient:
from DIRAC.RequestManagementSystem.Client.RequestClient import RequestClient
cls.__requestClient = RequestClient()
return cls.__requestClient
@classmethod
def storageFactory( cls ):
""" StorageFactory getter
:param cls: class reference
"""
if not cls.__storageFactory:
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
cls.__storageFactory = StorageFactory()
return cls.__storageFactory
def changeProxy( self, ownerDN, ownerGroup ):
""" get proxy from gProxyManager, save it to file
:param self: self reference
:param str ownerDN: request owner DN
:param str ownerGroup: request owner group
:return: S_OK with name of newly created owner proxy file
"""
ownerProxy = gProxyManager.downloadVOMSProxy( str( ownerDN ), str( ownerGroup ) )
if not ownerProxy["OK"] or not ownerProxy["Value"]:
reason = ownerProxy["Message"] if "Message" in ownerProxy else "No valid proxy found in ProxyManager."
return S_ERROR( "Change proxy error for '%s'@'%s': %s" % ( ownerDN, ownerGroup, reason ) )
ownerProxyFile = ownerProxy["Value"].dumpAllToFile()
if not ownerProxyFile["OK"]:
return S_ERROR( ownerProxyFile["Message"] )
ownerProxyFile = ownerProxyFile["Value"]
os.environ["X509_USER_PROXY"] = ownerProxyFile
return S_OK( ownerProxyFile )
######################################################################
# operationDispatcher
@classmethod
def operationDispatcher( cls ):
""" operation dispatcher getter
:param cls: class reference
"""
return cls.__operationDispatcher
@classmethod
def addOperationAction( cls, operation, methodToRun, overwrite = True ):
""" register handler :methodToRun: for SubRequest operation :operation:
:warn: all handlers should have the same signature
:param self: self reference
:param str operation: SubRequest operation name
:param MethodType methodToRun: handler to be executed for SubRequest
:param bool overwrite: flag to overwrite handler, if already present
:return: S_OK/S_ERROR
Every action handler should return S_OK with of a structure::
{ "OK" : True,
"Value" : requestObj # that has been sent to operation handler
}
otherwise S_ERROR.
"""
if operation in cls.__operationDispatcher and not overwrite:
return S_ERROR( "addOperationAction: operation for '%s' is already registered" % operation )
if type( methodToRun ) is not MethodType:
return S_ERROR( "addOperationAction: wrong type (%s = types.MethodType) for '%s' operation" % \
( str( type( methodToRun ) ), operation ) )
cls.__operationDispatcher[operation] = methodToRun
return S_OK()
def __call__( self ):
""" generic function to process one Request of a type requestType
This method could be run in a thread.
:param self: self reference
:param str requestType: request type
:return: S_OK/S_ERROR
"""
self.always( "executing request %s" % self.requestName )
################################################################
# # get ownerDN and ownerGroup
ownerDN = self.requestObj.getAttribute( "OwnerDN" )
if not ownerDN["OK"]:
return ownerDN
ownerDN = ownerDN["Value"]
ownerGroup = self.requestObj.getAttribute( "OwnerGroup" )
if not ownerGroup["OK"]:
return ownerGroup
ownerGroup = ownerGroup["Value"]
# # save request owner
self.requestOwnerDN = ownerDN if ownerDN else ""
self.requestOwnerGroup = ownerGroup if ownerGroup else ""
#################################################################
# # change proxy
ownerProxyFile = None
if ownerDN and ownerGroup:
ownerProxyFile = self.changeProxy( ownerDN, ownerGroup )
if not ownerProxyFile["OK"]:
self.error( "handleReuqest: unable to get proxy for '%s'@'%s': %s" % ( ownerDN,
ownerGroup,
ownerProxyFile["Message"] ) )
# update = self.putBackRequest( self.requestName, self.requestString )
# if not update["OK"]:
# self.error( "handleRequest: error when updating request: %s" % update["Message"] )
# return update
# return ownerProxyFile
ownerProxyFile = None
else:
ownerProxyFile = ownerProxyFile["Value"]
if ownerProxyFile:
# self.ownerProxyFile = ownerProxyFile
self.info( "Will execute request for '%s'@'%s' using proxy file %s" % ( ownerDN, ownerGroup, ownerProxyFile ) )
else:
self.info( "Will execute request for DataManager using her/his proxy" )
#################################################################
# # execute handlers
ret = { "OK" : False, "Message" : "" }
useServerCert = gConfig.useServerCertificate()
try:
# Execute task with the owner proxy even for contacting DIRAC services
if useServerCert:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
ret = self.handleRequest()
finally:
if useServerCert:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
# # delete owner proxy
if self.__dataManagerProxy:
os.environ["X509_USER_PROXY"] = self.__dataManagerProxy
if ownerProxyFile and os.path.exists( ownerProxyFile ):
os.unlink( ownerProxyFile )
if not ret["OK"]:
self.error( "handleRequest: error during request processing: %s" % ret["Message"] )
self.error( "handleRequest: will put original request back" )
update = self.putBackRequest( self.requestName, self.requestString )
if not update["OK"]:
self.error( "handleRequest: error when putting back request: %s" % update["Message"] )
# # return at least
return ret
def handleRequest( self ):
""" read SubRequests and ExecutionOrder, fire registered handlers upon SubRequests operations
:param self: self reference
:param dict requestDict: request dictionary as read from self.readRequest
"""
##############################################################
# here comes the processing
##############################################################
res = self.requestObj.getNumSubRequests( self.__requestType )
if not res["OK"]:
errMsg = "handleRequest: failed to obtain number of '%s' subrequests." % self.__requestType
self.error( errMsg, res["Message"] )
return S_ERROR( res["Message"] )
# # for gMonitor
self.addMark( "Execute", 1 )
# # process sub requests
for index in range( res["Value"] ):
self.info( "handleRequest: processing subrequest %s." % str( index ) )
subRequestAttrs = self.requestObj.getSubRequestAttributes( index, self.__requestType )["Value"]
if subRequestAttrs["ExecutionOrder"]:
subExecutionOrder = int( subRequestAttrs["ExecutionOrder"] )
else:
subExecutionOrder = 0
subRequestStatus = subRequestAttrs["Status"]
if subRequestStatus != "Waiting":
self.info( "handleRequest: subrequest %s has status '%s' and is not to be executed." % ( str( index ),
subRequestStatus ) )
continue
if subExecutionOrder <= self.executionOrder:
operation = subRequestAttrs["Operation"]
if operation not in self.operationDispatcher():
self.error( "handleRequest: '%s' operation not supported" % operation )
else:
self.info( "handleRequest: will execute %s '%s' subrequest" % ( str( index ), operation ) )
# # get files
subRequestFiles = self.requestObj.getSubRequestFiles( index, self.__requestType )["Value"]
# # execute operation action
ret = self.operationDispatcher()[operation].__call__( index,
self.requestObj,
subRequestAttrs,
subRequestFiles )
################################################
# # error in operation action?
if not ret["OK"]:
self.error( "handleRequest: error when handling subrequest %s: %s" % ( str( index ), ret["Message"] ) )
self.requestObj.setSubRequestAttributeValue( index, self.__requestType, "Error", ret["Message"] )
else:
# # update ref to requestObj
self.requestObj = ret["Value"]
# # check if subrequest status == Done, disable finalisation if not
subRequestDone = self.requestObj.isSubRequestDone( index, self.__requestType )
if not subRequestDone["OK"]:
self.error( "handleRequest: unable to determine subrequest status: %s" % subRequestDone["Message"] )
else:
if not subRequestDone["Value"]:
self.warn( "handleRequest: subrequest %s is not done yet" % str( index ) )
################################################
# Generate the new request string after operation
newRequestString = self.requestObj.toXML()['Value']
update = self.putBackRequest( self.requestName, newRequestString )
if not update["OK"]:
self.error( "handleRequest: error when updating request: %s" % update["Message"] )
return update
# # get request status
if self.jobID:
requestStatus = self.requestClient().getRequestStatus( self.requestName )
if not requestStatus["OK"]:
return requestStatus
requestStatus = requestStatus["Value"]
# # finalize request if jobID is present and request status = 'Done'
self.info( "handleRequest: request status is %s" % requestStatus )
if ( requestStatus["RequestStatus"] == "Done" ) and ( requestStatus["SubRequestStatus"] not in ( "Waiting", "Assigned" ) ):
self.debug( "handleRequest: request is going to be finalised" )
finalize = self.requestClient().finalizeRequest( self.requestName, self.jobID )
if not finalize["OK"]:
self.error( "handleRequest: error in request finalization: %s" % finalize["Message"] )
return finalize
self.info( "handleRequest: request is finalised" )
# # for gMonitor
self.addMark( "Done", 1 )
# # should return S_OK with monitor dict
return S_OK( { "monitor" : self.monitor() } )
def putBackRequest( self, requestName, requestString ):
""" put request back
:param self: self reference
:param str requestName: request name
:param str requestString: XML-serilised request
:param str sourceServer: request server URL
"""
update = self.requestClient().updateRequest( requestName, requestString )
if not update["OK"]:
self.error( "putBackRequest: error when updating request: %s" % update["Message"] )
return update
return S_OK()
| avedaee/DIRAC | DataManagementSystem/private/RequestTask.py | Python | gpl-3.0 | 20,366 | [
"DIRAC"
] | 1bc1610cda3333a630138a3f60da79807479f5af86a004dedb86cec478de1234 |
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'Brian Wickman'
| foursquare/commons-old | src/python/twitter/common/java/__init__.py | Python | apache-2.0 | 930 | [
"Brian"
] | 8784f5842e8a254a38f75a994593af7acffed888a8ce727d35c334692bf88da4 |
#!/usr/bin/env python3
## INFO ########################################################################
## ##
## kibu-vr ##
## ======= ##
## ##
## Oculus Rift + Leap Motion + Python 3 + Blender + Arch Linux ##
## Version: 0.1.0.154 (20150402) ##
## File: build.py ##
## ##
## For more information about the project, visit ##
## <http://vr.kibu.hu>. ##
## Copyright (C) 2015 Peter Varo, Kitchen Budapest ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from os import getcwd
from copy import deepcopy
# Module level constants
CURRENT_DIR = '.'
POST_COMMIT = 0 # True or False => skip CLIC (version) changes only
# Import cutils modules
try:
import cutils.ccom
import cutils.clic
import cutils.cver
web_dev = '.js', '.css', '.html'
c_build = ('SConstruct',)
exclude = deepcopy(cutils.ccom.EXCLUDE)
exclude['folders'].append('build')
exclude['folders'].append('ovr_sdk')
ccom_include = deepcopy(cutils.ccom.INCLUDE)
ccom_include['extensions'].extend(web_dev)
ccom_include['names'].extend(c_build)
clic_include = deepcopy(cutils.clic.INCLUDE)
clic_include['extensions'].extend(web_dev)
clic_include['names'].extend(c_build)
# Update version
cutils.cver.version(CURRENT_DIR, sub_max=9, rev_max=9, build_max=999)
# Collect all special comments
cutils.ccom.collect(CURRENT_DIR,
include=ccom_include,
exclude=exclude,
overwrite=POST_COMMIT)
# Update header comments
cutils.clic.header(CURRENT_DIR,
include=clic_include,
exclude=exclude,
overwrite=POST_COMMIT)
except ImportError:
print('[WARNING] cutils modules are missing: '
'install it from http://www.cutils.org')
| kitchenbudapest/vr | build.py | Python | gpl-3.0 | 3,670 | [
"VisIt"
] | bef39453e88da063e9e981d5ef2067e80489978e794a06539be894555160ca88 |
"""
Tests for the Course Outline view and supporting views.
"""
import datetime
import json
import re
import six
from completion import waffle
from completion.models import BlockCompletion
from completion.test_utils import CompletionWaffleTestMixin
from django.contrib.sites.models import Site
from django.test import override_settings
from django.urls import reverse
from django.utils import timezone
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import Mock, patch
from opaque_keys.edx.keys import CourseKey, UsageKey
from pyquery import PyQuery as pq
from six import text_type
from waffle.models import Switch
from waffle.testutils import override_switch
from lms.djangoapps.courseware.tests.factories import StaffFactory
from gating import api as lms_gating_api
from lms.djangoapps.course_api.blocks.transformers.milestones import MilestonesAndSpecialExamsTransformer
from openedx.core.djangoapps.schedules.models import Schedule
from openedx.core.djangoapps.schedules.tests.factories import ScheduleFactory
from openedx.core.lib.gating import api as gating_api
from openedx.features.course_experience.views.course_outline import (
DEFAULT_COMPLETION_TRACKING_START,
CourseOutlineFragmentView
)
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .test_course_home import course_home_url
TEST_PASSWORD = 'test'
GATING_NAMESPACE_QUALIFIER = '.gating'
class TestCourseOutlinePage(SharedModuleStoreTestCase):
"""
Test the course outline view.
"""
@classmethod
def setUpClass(cls):
"""
Set up an array of various courses to be tested.
"""
# setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase
# pylint: disable=super-method-not-called
with super(TestCourseOutlinePage, cls).setUpClassAndTestData():
cls.courses = []
course = CourseFactory.create(self_paced=True)
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
course.children = [chapter]
chapter.children = [sequential]
sequential.children = [vertical]
cls.courses.append(course)
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(
category='vertical',
parent_location=sequential.location,
display_name="Vertical 1"
)
vertical2 = ItemFactory.create(
category='vertical',
parent_location=sequential2.location,
display_name="Vertical 2"
)
course.children = [chapter]
chapter.children = [sequential, sequential2]
sequential.children = [vertical]
sequential2.children = [vertical2]
cls.courses.append(course)
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(
category='sequential',
parent_location=chapter.location,
due=datetime.datetime.now(),
graded=True,
format='Homework',
)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
course.children = [chapter]
chapter.children = [sequential]
sequential.children = [vertical]
cls.courses.append(course)
@classmethod
def setUpTestData(cls):
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
for course in cls.courses:
CourseEnrollment.enroll(cls.user, course.id)
def setUp(self):
"""
Set up for the tests.
"""
super(TestCourseOutlinePage, self).setUp()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def test_outline_details(self):
for course in self.courses:
url = course_home_url(course)
response = self.client.get(url)
self.assertTrue(course.children)
for chapter in course.children:
self.assertContains(response, chapter.display_name)
self.assertTrue(chapter.children)
for sequential in chapter.children:
self.assertContains(response, sequential.display_name)
if sequential.graded:
self.assertContains(response, sequential.due.strftime(u'%Y-%m-%d %H:%M:%S'))
self.assertContains(response, sequential.format)
self.assertTrue(sequential.children)
def test_reset_course_deadlines(self):
course = self.courses[0]
enrollment = CourseEnrollment.objects.get(course_id=course.id)
ScheduleFactory(
start_date=timezone.now() - datetime.timedelta(1),
enrollment=enrollment
)
url = '{}{}'.format(course_home_url(course), 'reset_deadlines')
self.client.post(url)
updated_schedule = Schedule.objects.get(enrollment=enrollment)
self.assertEqual(updated_schedule.start_date.day, datetime.datetime.today().day)
class TestCourseOutlinePageWithPrerequisites(SharedModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Test the course outline view with prerequisites.
"""
TRANSFORMER_CLASS_TO_TEST = MilestonesAndSpecialExamsTransformer
@classmethod
def setUpClass(cls):
"""
Creates a test course that can be used for non-destructive tests
"""
# pylint: disable=super-method-not-called
cls.PREREQ_REQUIRED = '(Prerequisite required)'
cls.UNLOCKED = 'Unlocked'
with super(TestCourseOutlinePageWithPrerequisites, cls).setUpClassAndTestData():
cls.course, cls.course_blocks = cls.create_test_course()
@classmethod
def setUpTestData(cls):
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(cls.user, cls.course.id)
@classmethod
def create_test_course(cls):
"""Creates a test course."""
course = CourseFactory.create()
course.enable_subsection_gating = True
course_blocks = {}
with cls.store.bulk_operations(course.id):
course_blocks['chapter'] = ItemFactory.create(
category='chapter',
parent_location=course.location
)
course_blocks['prerequisite'] = ItemFactory.create(
category='sequential',
parent_location=course_blocks['chapter'].location,
display_name='Prerequisite Exam'
)
course_blocks['gated_content'] = ItemFactory.create(
category='sequential',
parent_location=course_blocks['chapter'].location,
display_name='Gated Content'
)
course_blocks['prerequisite_vertical'] = ItemFactory.create(
category='vertical',
parent_location=course_blocks['prerequisite'].location
)
course_blocks['gated_content_vertical'] = ItemFactory.create(
category='vertical',
parent_location=course_blocks['gated_content'].location
)
course.children = [course_blocks['chapter']]
course_blocks['chapter'].children = [course_blocks['prerequisite'], course_blocks['gated_content']]
course_blocks['prerequisite'].children = [course_blocks['prerequisite_vertical']]
course_blocks['gated_content'].children = [course_blocks['gated_content_vertical']]
if hasattr(cls, 'user'):
CourseEnrollment.enroll(cls.user, course.id)
return course, course_blocks
def setUp(self):
"""
Set up for the tests.
"""
super(TestCourseOutlinePageWithPrerequisites, self).setUp()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def setup_gated_section(self, gated_block, gating_block):
"""
Test helper to create a gating requirement
Args:
gated_block: The block the that learner will not have access to until they complete the gating block
gating_block: (The prerequisite) The block that must be completed to get access to the gated block
"""
gating_api.add_prerequisite(self.course.id, six.text_type(gating_block.location))
gating_api.set_required_content(self.course.id, gated_block.location, gating_block.location, 100)
def test_content_locked(self):
"""
Test that a sequential/subsection with unmet prereqs correctly indicated that its content is locked
"""
course = self.course
self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite'])
response = self.client.get(course_home_url(course))
self.assertEqual(response.status_code, 200)
response_content = pq(response.content)
# check lock icon is present
lock_icon = response_content('.fa-lock')
self.assertTrue(lock_icon, "lock icon is not present, but should be")
subsection = lock_icon.parents('.subsection-text')
# check that subsection-title-name is the display name
gated_subsection_title = self.course_blocks['gated_content'].display_name
self.assertIn(gated_subsection_title, subsection.children('.subsection-title').html())
# check that it says prerequisite required
self.assertIn("Prerequisite:", subsection.children('.details').html())
# check that there is not a screen reader message
self.assertFalse(subsection.children('.sr'))
def test_content_unlocked(self):
"""
Test that a sequential/subsection with met prereqs correctly indicated that its content is unlocked
"""
course = self.course
self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite'])
# complete the prerequisite to unlock the gated content
# this call triggers reevaluation of prerequisites fulfilled by the gating block.
with patch('openedx.core.lib.gating.api.get_subsection_completion_percentage', Mock(return_value=100)):
lms_gating_api.evaluate_prerequisite(
self.course,
Mock(location=self.course_blocks['prerequisite'].location, percent_graded=1.0),
self.user,
)
response = self.client.get(course_home_url(course))
self.assertEqual(response.status_code, 200)
response_content = pq(response.content)
# check unlock icon is not present
unlock_icon = response_content('.fa-unlock')
self.assertFalse(unlock_icon, "unlock icon is present, yet shouldn't be.")
gated_subsection_title = self.course_blocks['gated_content'].display_name
every_subsection_on_outline = response_content('.subsection-title')
subsection_has_gated_text = False
says_prerequisite_required = False
for subsection_contents in every_subsection_on_outline.contents():
subsection_has_gated_text = gated_subsection_title in subsection_contents
says_prerequisite_required = "Prerequisite:" in subsection_contents
# check that subsection-title-name is the display name of gated content section
self.assertTrue(subsection_has_gated_text)
self.assertFalse(says_prerequisite_required)
class TestCourseOutlineResumeCourse(SharedModuleStoreTestCase, CompletionWaffleTestMixin):
"""
Test start course and resume course for the course outline view.
Technically, this mixes course home and course outline tests, but checking
the counts of start/resume course should be done together to avoid false
positives.
"""
@classmethod
def setUpClass(cls):
"""
Creates a test course that can be used for non-destructive tests
"""
# setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase
# pylint: disable=super-method-not-called
with super(TestCourseOutlineResumeCourse, cls).setUpClassAndTestData():
cls.course = cls.create_test_course()
@classmethod
def setUpTestData(cls):
"""Set up and enroll our fake user in the course."""
cls.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(cls.user, cls.course.id)
cls.site = Site.objects.get_current()
@classmethod
def create_test_course(cls):
"""
Creates a test course.
"""
course = CourseFactory.create()
with cls.store.bulk_operations(course.id):
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
chapter2 = ItemFactory.create(category='chapter', parent_location=course.location)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location)
sequential3 = ItemFactory.create(category='sequential', parent_location=chapter2.location)
sequential4 = ItemFactory.create(category='sequential', parent_location=chapter2.location)
vertical = ItemFactory.create(category='vertical', parent_location=sequential.location)
vertical2 = ItemFactory.create(category='vertical', parent_location=sequential2.location)
vertical3 = ItemFactory.create(category='vertical', parent_location=sequential3.location)
vertical4 = ItemFactory.create(category='vertical', parent_location=sequential4.location)
course.children = [chapter, chapter2]
chapter.children = [sequential, sequential2]
chapter2.children = [sequential3, sequential4]
sequential.children = [vertical]
sequential2.children = [vertical2]
sequential3.children = [vertical3]
sequential4.children = [vertical4]
if hasattr(cls, 'user'):
CourseEnrollment.enroll(cls.user, course.id)
return course
def setUp(self):
"""
Set up for the tests.
"""
super(TestCourseOutlineResumeCourse, self).setUp()
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def visit_sequential(self, course, chapter, sequential):
"""
Navigates to the provided sequential.
"""
last_accessed_url = reverse(
'courseware_section',
kwargs={
'course_id': text_type(course.id),
'chapter': chapter.url_name,
'section': sequential.url_name,
}
)
self.assertEqual(200, self.client.get(last_accessed_url).status_code)
@override_switch(
'{}.{}'.format(
waffle.WAFFLE_NAMESPACE, waffle.ENABLE_COMPLETION_TRACKING
),
active=True
)
def complete_sequential(self, course, sequential):
"""
Completes provided sequential.
"""
course_key = CourseKey.from_string(str(course.id))
# Fake a visit to sequence2/vertical2
block_key = UsageKey.from_string(six.text_type(sequential.location))
if block_key.course_key.run is None:
# Old mongo keys must be annotated with course run info before calling submit_completion:
block_key = block_key.replace(course_key=course_key)
completion = 1.0
BlockCompletion.objects.submit_completion(
user=self.user,
block_key=block_key,
completion=completion
)
def visit_course_home(self, course, start_count=0, resume_count=0):
"""
Helper function to navigates to course home page, test for resume buttons
:param course: course factory object
:param start_count: number of times 'Start Course' should appear
:param resume_count: number of times 'Resume Course' should appear
:return: response object
"""
response = self.client.get(course_home_url(course))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Start Course', count=start_count)
self.assertContains(response, 'Resume Course', count=resume_count)
return response
def test_course_home_completion(self):
"""
Test that completed blocks appear checked on course home page
"""
self.override_waffle_switch(True)
course = self.course
vertical = course.children[0].children[0].children[0]
response = self.client.get(course_home_url(course))
content = pq(response.content)
self.assertEqual(len(content('.fa-check')), 0)
self.complete_sequential(self.course, vertical)
response = self.client.get(course_home_url(course))
content = pq(response.content)
# Subsection should be checked
self.assertEqual(len(content('.fa-check')), 1)
def test_start_course(self):
"""
Tests that the start course button appears when the course has never been accessed.
Technically, this is a course home test, and not a course outline test, but checking the counts of
start/resume course should be done together to not get a false positive.
"""
course = self.course
response = self.visit_course_home(course, start_count=1, resume_count=0)
content = pq(response.content)
self.assertTrue(content('.action-resume-course').attr('href').endswith('/course/' + course.url_name))
@override_settings(LMS_BASE='test_url:9999')
def test_resume_course_with_completion_api(self):
"""
Tests completion API resume button functionality
"""
self.override_waffle_switch(True)
# Course tree
course = self.course
vertical1 = course.children[0].children[0].children[0]
vertical2 = course.children[0].children[1].children[0]
self.complete_sequential(self.course, vertical1)
# Test for 'resume' link
response = self.visit_course_home(course, resume_count=1)
# Test for 'resume' link URL - should be vertical 1
content = pq(response.content)
self.assertTrue(content('.action-resume-course').attr('href').endswith('/vertical/' + vertical1.url_name))
self.complete_sequential(self.course, vertical2)
# Test for 'resume' link
response = self.visit_course_home(course, resume_count=1)
# Test for 'resume' link URL - should be vertical 2
content = pq(response.content)
self.assertTrue(content('.action-resume-course').attr('href').endswith('/vertical/' + vertical2.url_name))
# visit sequential 1, make sure 'Resume Course' URL is robust against 'Last Visited'
# (even though I visited seq1/vert1, 'Resume Course' still points to seq2/vert2)
self.visit_sequential(course, course.children[0], course.children[0].children[0])
# Test for 'resume' link URL - should be vertical 2 (last completed block, NOT last visited)
response = self.visit_course_home(course, resume_count=1)
content = pq(response.content)
self.assertTrue(content('.action-resume-course').attr('href').endswith('/vertical/' + vertical2.url_name))
def test_resume_course_deleted_sequential(self):
"""
Tests resume course when the last completed sequential is deleted and
there is another sequential in the vertical.
"""
course = self.create_test_course()
# first navigate to a sequential to make it the last accessed
chapter = course.children[0]
self.assertGreaterEqual(len(chapter.children), 2)
sequential = chapter.children[0]
sequential2 = chapter.children[1]
self.complete_sequential(course, sequential)
self.complete_sequential(course, sequential2)
# remove one of the sequentials from the chapter
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
self.store.delete_item(sequential.location, self.user.id)
# check resume course buttons
response = self.visit_course_home(course, resume_count=1)
content = pq(response.content)
self.assertTrue(content('.action-resume-course').attr('href').endswith('/sequential/' + sequential2.url_name))
def test_resume_course_deleted_sequentials(self):
"""
Tests resume course when the last completed sequential is deleted and
there are no sequentials left in the vertical.
"""
course = self.create_test_course()
# first navigate to a sequential to make it the last accessed
chapter = course.children[0]
self.assertEqual(len(chapter.children), 2)
sequential = chapter.children[0]
self.complete_sequential(course, sequential)
# remove all sequentials from chapter
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
for sequential in chapter.children:
self.store.delete_item(sequential.location, self.user.id)
# check resume course buttons
self.visit_course_home(course, start_count=1, resume_count=0)
def test_course_home_for_global_staff(self):
"""
Tests that staff user can access the course home without being enrolled
in the course.
"""
course = self.course
self.user.is_staff = True
self.user.save()
self.override_waffle_switch(True)
CourseEnrollment.get_enrollment(self.user, course.id).delete()
response = self.visit_course_home(course, start_count=1, resume_count=0)
content = pq(response.content)
self.assertTrue(content('.action-resume-course').attr('href').endswith('/course/' + course.url_name))
@override_switch(
'{}.{}'.format(
waffle.WAFFLE_NAMESPACE, waffle.ENABLE_COMPLETION_TRACKING
),
active=True
)
def test_course_outline_auto_open(self):
"""
Tests that the course outline auto-opens to the first subsection
in a course if a user has no completion data, and to the
last-accessed subsection if a user does have completion data.
"""
def get_sequential_button(url, is_hidden):
is_hidden_string = "is-hidden" if is_hidden else ""
return "<olclass=\"outline-itemaccordion-panel" + is_hidden_string + "\"" \
"id=\"" + url + "_contents\"" \
"aria-labelledby=\"" + url + "\"" \
">"
# Course tree
course = self.course
chapter1 = course.children[0]
chapter2 = course.children[1]
response_content = self.client.get(course_home_url(course)).content
stripped_response = text_type(re.sub(b"\\s+", b"", response_content), "utf-8")
self.assertIn(get_sequential_button(text_type(chapter1.location), False), stripped_response)
self.assertIn(get_sequential_button(text_type(chapter2.location), True), stripped_response)
content = pq(response_content)
button = content('#expand-collapse-outline-all-button')
self.assertEqual('Expand All', button.children()[0].text)
def test_user_enrolled_after_completion_collection(self):
"""
Tests that the _completion_data_collection_start() method returns the created
time of the waffle switch that enables completion data tracking.
"""
view = CourseOutlineFragmentView()
switches = waffle.waffle()
# pylint: disable=protected-access
switch_name = switches._namespaced_name(waffle.ENABLE_COMPLETION_TRACKING)
switch, _ = Switch.objects.get_or_create(name=switch_name)
self.assertEqual(switch.created, view._completion_data_collection_start())
switch.delete()
def test_user_enrolled_after_completion_collection_default(self):
"""
Tests that the _completion_data_collection_start() method returns a default constant
when no Switch object exists for completion data tracking.
"""
view = CourseOutlineFragmentView()
# pylint: disable=protected-access
self.assertEqual(DEFAULT_COMPLETION_TRACKING_START, view._completion_data_collection_start())
class TestCourseOutlinePreview(SharedModuleStoreTestCase):
"""
Unit tests for staff preview of the course outline.
"""
def update_masquerade(self, course, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': six.text_type(course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({'role': role, 'group_id': group_id, 'user_name': user_name}),
'application/json'
)
self.assertEqual(response.status_code, 200)
return response
def test_preview(self):
"""
Verify the behavior of preview for the course outline.
"""
course = CourseFactory.create(
start=datetime.datetime.now() - datetime.timedelta(days=30)
)
staff_user = StaffFactory(course_key=course.id, password=TEST_PASSWORD)
CourseEnrollment.enroll(staff_user, course.id)
future_date = datetime.datetime.now() + datetime.timedelta(days=30)
with self.store.bulk_operations(course.id):
chapter = ItemFactory.create(
category='chapter',
parent_location=course.location,
display_name='First Chapter',
)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=sequential.location)
chapter = ItemFactory.create(
category='chapter',
parent_location=course.location,
display_name='Future Chapter',
start=future_date,
)
sequential = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=sequential.location)
# Verify that a staff user sees a chapter with a due date in the future
self.client.login(username=staff_user.username, password='test')
url = course_home_url(course)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Future Chapter')
# Verify that staff masquerading as a learner see the future chapter.
self.update_masquerade(course, role='student')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Future Chapter')
| cpennington/edx-platform | openedx/features/course_experience/tests/views/test_course_outline.py | Python | agpl-3.0 | 28,296 | [
"VisIt"
] | b1196deb62e59f255e5c483179c863231ad7cef3d981318b76ca84f20978d611 |
# -*- coding: utf-8 -*-
#
# Copyright 2012 - 2013 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import random
import numpy as np
from pythics.lib import GrowableArray
#import pdb
#
# basic functionality: initialize, clear, run
#
def initialize(shell, **kwargs):
shell.interact(kwargs.copy())
clear(**kwargs)
def clear(seed, messages, plot, **kwargs):
global x, y, t
t = 0
x = 0.0
y = 0.0
messages.clear()
plot.clear()
plot.set_plot_properties(
title='Random Walk',
x_label='x position',
y_label='y position',
aspect_ratio='equal')
plot.new_curve('random_walk', memory='growable', length=10000, animated=True,
line_color='red', line_width=0.5)
random.seed(seed.value)
def run(prob_N_update, messages, plot, stop, **kwargs):
global x, y, t
N_update = prob_N_update.value
messages.write('Starting simulation...\n')
data = GrowableArray(cols=2, length=1000)
while True:
t += 1
x += random.uniform(-1.0, 1.0)
y += random.uniform(-1.0, 1.0)
data.append((x,y))
if (t % N_update) == 0:
plot.append_data('random_walk', data[:])
data.clear()
if stop.value: break
stop.value = False
messages.write('Stopped simulation, t = %d.\n' % t)
| LunarLanding/Pythics | pythics/examples/random_walk_simulator.py | Python | gpl-3.0 | 2,043 | [
"Brian"
] | 1330b000cd84295de9e3096f5c0ec8ce0c5dbdf1c2107e9cb5825461cb1ff14c |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import re
from ... import gloo
class Compiler(object):
"""
Compiler is used to convert Function and Variable instances into
ready-to-use GLSL code. This class handles name mangling to ensure that
there are no name collisions amongst global objects. The final name of
each object may be retrieved using ``Compiler.__getitem__(obj)``.
Accepts multiple root Functions as keyword arguments. ``compile()`` then
returns a dict of GLSL strings with the same keys.
Example::
# initialize with two main functions
compiler = Compiler(vert=v_func, frag=f_func)
# compile and extract shaders
code = compiler.compile()
v_code = code['vert']
f_code = code['frag']
# look up name of some object
name = compiler[obj]
"""
def __init__(self, **shaders):
# cache of compilation results for each function and variable
self._object_names = {} # {object: name}
self.shaders = shaders
def __getitem__(self, item):
"""
Return the name of the specified object, if it has been assigned one.
"""
return self._object_names[item]
def compile(self, pretty=True):
""" Compile all code and return a dict {name: code} where the keys
are determined by the keyword arguments passed to __init__().
Parameters
----------
pretty : bool
If True, use a slower method to mangle object names. This produces
GLSL that is more readable.
If False, then the output is mostly unreadable GLSL, but is about
10x faster to compile.
"""
# Authoritative mapping of {obj: name}
self._object_names = {}
#
# 1. collect list of dependencies for each shader
#
# maps {shader_name: [deps]}
self._shader_deps = {}
for shader_name, shader in self.shaders.items():
this_shader_deps = []
self._shader_deps[shader_name] = this_shader_deps
dep_set = set()
for dep in shader.dependencies(sort=True):
# visit each object no more than once per shader
if dep.name is None or dep in dep_set:
continue
this_shader_deps.append(dep)
dep_set.add(dep)
#
# 2. Assign names to all objects.
#
if pretty:
self._rename_objects_pretty()
else:
self._rename_objects_fast()
#
# 3. Now we have a complete namespace; concatenate all definitions
# together in topological order.
#
compiled = {}
obj_names = self._object_names
for shader_name, shader in self.shaders.items():
code = []
for dep in self._shader_deps[shader_name]:
dep_code = dep.definition(obj_names)
if dep_code is not None:
# strip out version pragma if present;
regex = r'#version (\d+)'
m = re.search(regex, dep_code)
if m is not None:
# check requested version
if m.group(1) != '120':
raise RuntimeError("Currently only GLSL #version "
"120 is supported.")
dep_code = re.sub(regex, '', dep_code)
code.append(dep_code)
compiled[shader_name] = '\n'.join(code)
self.code = compiled
return compiled
def _rename_objects_fast(self):
""" Rename all objects quickly to guaranteed-unique names using the
id() of each object.
This produces mostly unreadable GLSL, but is about 10x faster to
compile.
"""
for shader_name, deps in self._shader_deps.items():
for dep in deps:
name = dep.name
if name != 'main':
ext = '_%x' % id(dep)
name = name[:32-len(ext)] + ext
self._object_names[dep] = name
def _rename_objects_pretty(self):
""" Rename all objects like "name_1" to avoid conflicts. Objects are
only renamed if necessary.
This method produces more readable GLSL, but is rather slow.
"""
#
# 1. For each object, add its static names to the global namespace
# and make a list of the shaders used by the object.
#
# {name: obj} mapping for finding unique names
# initialize with reserved keywords.
self._global_ns = dict([(kwd, None) for kwd in gloo.util.KEYWORDS])
# functions are local per-shader
self._shader_ns = dict([(shader, {}) for shader in self.shaders])
# for each object, keep a list of shaders the object appears in
obj_shaders = {}
for shader_name, deps in self._shader_deps.items():
for dep in deps:
# Add static names to namespace
for name in dep.static_names():
self._global_ns[name] = None
obj_shaders.setdefault(dep, []).append(shader_name)
#
# 2. Assign new object names
#
name_index = {}
for obj, shaders in obj_shaders.items():
name = obj.name
if self._name_available(obj, name, shaders):
# hooray, we get to keep this name
self._assign_name(obj, name, shaders)
else:
# boo, find a new name
while True:
index = name_index.get(name, 0) + 1
name_index[name] = index
ext = '_%d' % index
new_name = name[:32-len(ext)] + ext
if self._name_available(obj, new_name, shaders):
self._assign_name(obj, new_name, shaders)
break
def _is_global(self, obj):
""" Return True if *obj* should be declared in the global namespace.
Some objects need to be declared only in per-shader namespaces:
functions, static variables, and const variables may all be given
different definitions in each shader.
"""
# todo: right now we assume all Variables are global, and all
# Functions are local. Is this actually correct? Are there any
# global functions? Are there any local variables?
from .variable import Variable
return isinstance(obj, Variable)
def _name_available(self, obj, name, shaders):
""" Return True if *name* is available for *obj* in *shaders*.
"""
if name in self._global_ns:
return False
shaders = self.shaders if self._is_global(obj) else shaders
for shader in shaders:
if name in self._shader_ns[shader]:
return False
return True
def _assign_name(self, obj, name, shaders):
""" Assign *name* to *obj* in *shaders*.
"""
if self._is_global(obj):
assert name not in self._global_ns
self._global_ns[name] = obj
else:
for shader in shaders:
ns = self._shader_ns[shader]
assert name not in ns
ns[name] = obj
self._object_names[obj] = name
| hronoses/vispy | vispy/visuals/shaders/compiler.py | Python | bsd-3-clause | 7,604 | [
"VisIt"
] | 5192d3eb507f59a8d3fd0d4193419e977833ea89639953b0cdd1cec472c81a06 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import struct
def getrec(reclabelarray, verbose=False):
"""Reads binary files JOBARC and JAINDX and returns contents
of each record in *reclabelarray*.
"""
knownlabels = {
"AU_LENGT": 'DOUBLE',
"CHARGE_E": 'DOUBLE',
"AMU ": 'DOUBLE',
"NUC_MAGN": 'DOUBLE',
"MASS_ELE": 'DOUBLE',
"MASS_PRO": 'DOUBLE',
"HBAR ": 'DOUBLE',
"AU_MASSP": 'DOUBLE',
"SP_LIGHT": 'DOUBLE',
"AU_EV ": 'DOUBLE',
"AVOGADRO": 'DOUBLE',
"AU_ENERG": 'DOUBLE',
"AU_CM-1 ": 'DOUBLE',
"CM-1_KCA": 'DOUBLE',
"CM-1_KJ ": 'DOUBLE',
"AU_DIPOL": 'DOUBLE',
"AU_VELOC": 'DOUBLE',
"AU_TIME ": 'DOUBLE',
"EL_GFACT": 'DOUBLE',
"EA_IRREP": 'INTEGER',
"UHFRHF ": 'INTEGER',
"IFLAGS ": 'INTEGER',
"IFLAGS2 ": 'INTEGER',
"OCCUPYA ": 'INTEGER',
"NUMDROPA": 'INTEGER',
"JODAFLAG": 'INTEGER',
"TITLE ": 'CHARACTER',
"NCNSTRNT": 'INTEGER',
"ICNSTRNT": 'INTEGER',
"VCNSTRNT": 'DOUBLE',
"NMPROTON": 'INTEGER',
"NREALATM": 'INTEGER',
"COORDINT": 'DOUBLE',
"VARNAINT": 'DOUBLE',
"COORD000": 'DOUBLE',
"ROTCONST": 'DOUBLE',
"ORIENT2 ": 'DOUBLE', # input orientation into interial frame
"LINEAR ": 'INTEGER',
"NATOMS ": 'INTEGER',
"COORD ": 'DOUBLE',
"ORIENTMT": 'DOUBLE', # input orientation from ZMAT (mostly useful for Cartesians) to Cfour standard orientation
"ATOMMASS": 'DOUBLE',
"ORIENT3 ": 'DOUBLE',
"FULLPTGP": 'CHARACTER',
"FULLORDR": 'INTEGER',
"FULLNIRR": 'INTEGER',
"FULLNORB": 'INTEGER',
"FULLSYOP": 'DOUBLE',
"FULLPERM": 'INTEGER',
"FULLMEMB": 'INTEGER',
"FULLPOPV": 'INTEGER',
"FULLCLSS": 'INTEGER',
"FULLSTGP": 'CHARACTER',
"ZMAT2MOL": 'INTEGER',
"COMPPTGP": 'CHARACTER',
"COMPORDR": 'INTEGER',
"COMPNIRR": 'INTEGER',
"COMPNORB": 'INTEGER',
"COMPSYOP": 'DOUBLE',
"COMPPERM": 'INTEGER',
"COMPMEMB": 'INTEGER',
"COMPPOPV": 'INTEGER',
"COMPCLSS": 'INTEGER',
"COMPSTGP": 'CHARACTER',
"BMATRIX ": 'DOUBLE',
"NUCREP ": 'DOUBLE',
"TIEDCORD": 'INTEGER',
"MPVMZMAT": 'INTEGER',
"ATOMCHRG": 'INTEGER',
"NTOTSHEL": 'INTEGER',
"NTOTPRIM": 'INTEGER',
"BASISEXP": 'DOUBLE',
"BASISCNT": 'DOUBLE',
"SHELLSIZ": 'INTEGER',
"SHELLPRM": 'INTEGER',
"SHELLANG": 'INTEGER',
"SHELLLOC": 'INTEGER',
"SHOFFSET": 'INTEGER',
"SHELLORB": 'INTEGER',
"PROFFSET": 'INTEGER',
"PRIMORBT": 'INTEGER',
"FULSHLNM": 'INTEGER',
"FULSHLTP": 'INTEGER',
"FULSHLSZ": 'INTEGER',
"FULSHLAT": 'INTEGER',
"JODAOUT ": 'INTEGER',
"NUMIIII ": 'INTEGER',
"NUMIJIJ ": 'INTEGER',
"NUMIIJJ ": 'INTEGER',
"NUMIJKL ": 'INTEGER',
"NBASTOT ": 'INTEGER',
"NAOBASFN": 'INTEGER',
"NUMBASIR": 'INTEGER',
"FAOBASIR": 'DOUBLE',
"AO2SO ": 'DOUBLE',
"FULLSOAO": 'DOUBLE',
"FULLAOSO": 'DOUBLE',
"AO2SOINV": 'DOUBLE',
"CART3CMP": 'DOUBLE',
"CART2CMP": 'DOUBLE',
"CMP3CART": 'DOUBLE',
"CMP2CART": 'DOUBLE',
"ANGMOMBF": 'INTEGER',
"NBASATOM": 'INTEGER',
"NAOBFORB": 'INTEGER',
"MAP2ZMAT": 'INTEGER',
"CENTERBF": 'INTEGER',
"CNTERBF0": 'INTEGER',
"ANMOMBF0": 'INTEGER',
"CMP2ZMAT": 'DOUBLE',
"ZMAT2CMP": 'DOUBLE',
"OVERLAP ": 'DOUBLE',
"ONEHAMIL": 'DOUBLE',
"AOOVRLAP": 'DOUBLE',
"SHALFMAT": 'DOUBLE',
"SCFEVCA0": 'DOUBLE',
"RPPBMAT ": 'DOUBLE',
"OCCUPYA0": 'INTEGER',
"SYMPOPOA": 'INTEGER',
"SYMPOPVA": 'INTEGER',
"SCFEVLA0": 'DOUBLE',
"SCFDENSA": 'DOUBLE',
"FOCKA ": 'DOUBLE',
"SMHALF ": 'DOUBLE',
"EVECOAOA": 'DOUBLE',
"ONEHMOA ": 'DOUBLE',
"NOCCORB ": 'INTEGER',
"NVRTORB ": 'INTEGER',
"SCFENEG ": 'DOUBLE',
"TOTENERG": 'DOUBLE',
"IRREPALP": 'INTEGER',
"OMEGA_A ": 'DOUBLE',
"EVECAOXA": 'DOUBLE',
"EVALORDR": 'DOUBLE',
"EVECAO_A": 'DOUBLE',
"EVCSYMAF": 'CHARACTER',
"EVCSYMAC": 'CHARACTER',
"TESTVECT": 'DOUBLE',
"MODROPA ": 'INTEGER',
"VRHARMON": 'DOUBLE',
"NEWRECRD": 'INTEGER',
"VRCORIOL": 'DOUBLE',
"VRQUADRA": 'DOUBLE',
"VRANHARM": 'DOUBLE',
"REFINERT": 'DOUBLE',
"DIDQ ": 'DOUBLE',
"REFCOORD": 'DOUBLE',
"REFDIPOL": 'DOUBLE',
"REFGRADI": 'DOUBLE',
"REFDIPDR": 'DOUBLE',
"REFNORMC": 'DOUBLE',
"REFD2EZ ": 'DOUBLE',
"REFFREQS": 'DOUBLE',
"REFORIEN": 'DOUBLE',
"NUSECORD": 'INTEGER',
"NZMATANH": 'INTEGER',
"ISELECTQ": 'INTEGER',
"NEXTGEOM": 'DOUBLE',
"NEXTGEO1": 'DOUBLE',
"FCMDISPL": 'DOUBLE',
"GRDDISPL": 'DOUBLE',
"DPMDISPL": 'DOUBLE',
"DIPDISPL": 'DOUBLE',
"NMRDISPL": 'DOUBLE',
"SRTDISPL": 'DOUBLE',
"CHIDISPL": 'DOUBLE',
"POLDISPL": 'DOUBLE',
"EFGDISPL": 'DOUBLE',
"THEDISPL": 'DOUBLE',
"JFCDISPL": 'DOUBLE',
"JSDDISPL": 'DOUBLE',
"JSODISPL": 'DOUBLE',
"JDSODISP": 'DOUBLE',
"CUBCOUNT": 'INTEGER',
"FCMMAPER": 'DOUBLE',
"QPLSMINS": 'INTEGER',
"CUBCOORD": 'INTEGER',
"PASS1 ": 'INTEGER',
"REFFORDR": 'INTEGER',
"REFFSYOP": 'DOUBLE',
"REFFPERM": 'INTEGER',
"REFNUMIC": 'INTEGER',
"REFAMAT ": 'DOUBLE',
"REFTTEN ": 'DOUBLE',
"REFLINER": 'INTEGER',
"DIPOLMOM": 'DOUBLE',
"POLARTEN": 'DOUBLE',
"CHITENSO": 'DOUBLE',
"EFGTENSO": 'DOUBLE',
"IRREPPOP": 'INTEGER',
"REORDERA": 'INTEGER',
"IRREPBET": 'INTEGER',
"SCFEVLB0": 'DOUBLE',
"SCFEVCB0": 'DOUBLE',
"IRREPCOU": 'INTEGER',
"IDROPA ": 'INTEGER',
"OCCSCF ": 'INTEGER',
"VRTSCF ": 'INTEGER',
"SCFEVECA": 'DOUBLE',
"NCOMPA ": 'INTEGER',
"NBASCOMP": 'INTEGER',
"SCFEVALA": 'DOUBLE',
"SCFEVALB": 'DOUBLE',
"SVAVA0 ": 'INTEGER',
"SVAVA0X ": 'INTEGER',
"SVAVA0I ": 'INTEGER',
"SVBVB0 ": 'INTEGER',
"SVBVB0X ": 'INTEGER',
"SVBVB0I ": 'INTEGER',
"SOAOA0 ": 'INTEGER',
"SOAOA0X ": 'INTEGER',
"SOAOA0I ": 'INTEGER',
"SOBOB0 ": 'INTEGER',
"SOBOB0X ": 'INTEGER',
"SOBOB0I ": 'INTEGER',
"SVAVA1 ": 'INTEGER',
"SVAVA1X ": 'INTEGER',
"SVAVA1I ": 'INTEGER',
"SVBVB1 ": 'INTEGER',
"SVBVB1X ": 'INTEGER',
"SVBVB1I ": 'INTEGER',
"SOAOA1 ": 'INTEGER',
"SOAOA1X ": 'INTEGER',
"SOAOA1I ": 'INTEGER',
"SOBOB1 ": 'INTEGER',
"SOBOB1X ": 'INTEGER',
"SOBOB1I ": 'INTEGER',
"SVAOA2 ": 'INTEGER',
"SVAOA2X ": 'INTEGER',
"SVAOA2I ": 'INTEGER',
"SVBOB2 ": 'INTEGER',
"SVBOB2X ": 'INTEGER',
"SVBOB2I ": 'INTEGER',
"SOBVA2 ": 'INTEGER',
"SOBVA2X ": 'INTEGER',
"SOBVA2I ": 'INTEGER',
"SVBOA2 ": 'INTEGER',
"SVBOA2X ": 'INTEGER',
"SVBOA2I ": 'INTEGER',
"SVAVB2 ": 'INTEGER',
"SVAVB2X ": 'INTEGER',
"SVAVB2I ": 'INTEGER',
"SOAOB2 ": 'INTEGER',
"SOAOB2X ": 'INTEGER',
"SOAOB2I ": 'INTEGER',
"SOAVA2 ": 'INTEGER',
"SOAVA2X ": 'INTEGER',
"SOAVA2I ": 'INTEGER',
"SOBVB2 ": 'INTEGER',
"SOBVB2X ": 'INTEGER',
"SOBVB2I ": 'INTEGER',
"SOAVB2 ": 'INTEGER',
"SOAVB2X ": 'INTEGER',
"SOAVB2I ": 'INTEGER',
"SVAVA2 ": 'INTEGER',
"SVAVA2X ": 'INTEGER',
"SVAVA2I ": 'INTEGER',
"SVBVB2 ": 'INTEGER',
"SVBVB2X ": 'INTEGER',
"SVBVB2I ": 'INTEGER',
"SOAOA2 ": 'INTEGER',
"SOAOA2X ": 'INTEGER',
"SOAOA2I ": 'INTEGER',
"SOBOB2 ": 'INTEGER',
"SOBOB2X ": 'INTEGER',
"SOBOB2I ": 'INTEGER',
"SYMPOPOB": 'INTEGER',
"SYMPOPVB": 'INTEGER',
"T2NORM ": 'DOUBLE',
"MOIOVEC ": 'INTEGER',
"MOIOWRD ": 'INTEGER',
"MOIOSIZ ": 'INTEGER',
"MOIODIS ": 'INTEGER',
"MOIOFIL ": 'INTEGER',
"ISYMTYP ": 'INTEGER',
"TOTRECMO": 'INTEGER',
"TOTWRDMO": 'INTEGER',
"RELDENSA": 'DOUBLE',
"IINTERMA": 'DOUBLE',
"OCCNUM_A": 'DOUBLE',
"SCRATCH ": 'DOUBLE',
"SETUP2 ": 'INTEGER',
"MOLHES2 ": 'INTEGER',
"GRAD2 ": 'INTEGER',
"COORDMAS": 'INTEGER',
"NUCMULT ": 'INTEGER',
"SYMCOORD": 'DOUBLE',
"SYMCOOR2": 'DOUBLE',
"SYMCOOR3": 'DOUBLE',
"SYMMLENG": 'INTEGER',
"SKIP ": 'INTEGER',
"NSYMPERT": 'INTEGER',
"NPERTB ": 'INTEGER',
"TRANSINV": 'INTEGER',
"IBADNUMB": 'INTEGER',
"IBADINDX": 'INTEGER',
"IBADIRRP": 'INTEGER',
"IBADPERT": 'INTEGER',
"IBADSPIN": 'INTEGER',
"TREATPER": 'INTEGER',
"MAXAODSZ": 'INTEGER',
"PERTINFO": 'INTEGER',
"GRADIENT": 'DOUBLE',
"HESSIANM": 'DOUBLE',
"GRDZORDR": 'DOUBLE',
"D2EZORDR": 'DOUBLE',
"REALCORD": 'DOUBLE',
"DUMSTRIP": 'INTEGER',
"BMATRIXC": 'DOUBLE',
"REALATOM": 'INTEGER',
"NORMCORD": 'DOUBLE',
"DIPDERIV": 'DOUBLE',
"I4CDCALC": 'DOUBLE',
"FREQUENC": 'DOUBLE',
"RATMMASS": 'DOUBLE',
"RATMPOSN": 'INTEGER',
"DEGENERT": 'INTEGER',
"REFSHILD": 'DOUBLE',
"CORIZETA": 'DOUBLE',
"NMPOINTX": 'INTEGER',
"REFD3EDX": 'DOUBLE',
"BPPTOB ": 'DOUBLE',
"BPTOB ": 'DOUBLE',
"BSRTOB ": 'DOUBLE',
"BARTOB ": 'DOUBLE',
"VRTOTAL ": 'DOUBLE',
"D2DIPOLE": 'DOUBLE',
"D3DIPOLE": 'DOUBLE',
"D1DIPOLE": 'DOUBLE',
"REFNORM2": 'DOUBLE',
"NUSECOR2": 'INTEGER',
"FCMDISP2": 'DOUBLE',
"RGTDISPL": 'DOUBLE',
"CUBCOOR1": 'INTEGER',
"CUBCOOR2": 'INTEGER',
"REFFPEM2": 'INTEGER',
"RGTTENSO": 'DOUBLE',
"REFFPER2": 'INTEGER',
"REFD4EDX": 'DOUBLE',
"ZPE_ANHA": 'DOUBLE',
"OPENSLOT": 'INTEGER',
"BOLTZMAN": 'DOUBLE',
"MRCCOCC ": 'INTEGER',
"ABELPTGP": 'CHARACTER',
"ABELORDR": 'INTEGER',
"ABELNIRR": 'INTEGER',
"ABELNORB": 'INTEGER',
"ABELSYOP": 'DOUBLE',
"ABELPERM": 'INTEGER',
"ABELMEMB": 'INTEGER',
"ABELPOPV": 'INTEGER',
"ABELCLSS": 'INTEGER',
"ABELSTGP": 'CHARACTER',
"REALCHRG": 'INTEGER', # atom/mol? charge taking into acct edp
"NSOSCF ": 'INTEGER', # whether is spin orbital calc?
"SCFVCFLA": 'DOUBLE', # scf vector expanded from sph to cart basis for symm anal - determin orb sym
"EFG_SYM1": 'INTEGER', # symmetry property of components of electric field gradient integrals
"EFG_SYM2": 'INTEGER', # symm prop of comp of EFG
"DCTDISPL": 'DOUBLE',
"DANGERUS": 'INTEGER', #?
"FULLCHAR": 'CHARACTER', #?
"FULLDEGN": 'CHARACTER', #?
"FULLLABL": 'CHARACTER', #?
"FULLNIRX": 'CHARACTER', #?
"COMPCHAR": 'CHARACTER', #?
"COMPDEGN": 'CHARACTER', #?
"COMPLABL": 'CHARACTER', #?
"COMPNIRX": 'CHARACTER', #?
"ROTVECX ": 'CHARACTER', #?
"ROTVECY ": 'CHARACTER', #?
"ROTVECZ ": 'CHARACTER', #?
"COMPNSYQ": 'CHARACTER', #?
"COMPSYQT": 'CHARACTER', #?
"COMPSYMQ": 'CHARACTER', #?
"TRAVECX ": 'CHARACTER', #?
"TRAVECY ": 'CHARACTER', #?
"TRAVECZ ": 'CHARACTER', #?
"NVIBSYM ": 'CHARACTER', #?
"NUMVIBRT": 'CHARACTER', #?
"SBGRPSYM": 'CHARACTER', #?
"ORDERREF": 'CHARACTER', #?
"OPERSREF": 'CHARACTER', #?
"NVIBSYMF": 'CHARACTER', #?
"FULLNSYQ": 'CHARACTER', #?
"FULLSYQT": 'CHARACTER', #?
"FULLSYMQ": 'CHARACTER', #?
"INVPSMAT": 'CHARACTER', #?
"FDCOORDS": 'CHARACTER', #?
"FDCALCTP": 'CHARACTER', #?
"NUMPOINT": 'CHARACTER', #?
"NPTIRREP": 'CHARACTER', #?
"GRDPOINT": 'CHARACTER', #?
"DIPPOINT": 'CHARACTER', #?
"ENGPOINT": 'CHARACTER', #?
"PASS1FIN": 'CHARACTER', #?
"REFENERG": 'CHARACTER', #?
"NEXTCALC": 'CHARACTER', #?
"PRINSPIN": 'CHARACTER', #?
"PRINFROM": 'CHARACTER', #?
"PRININTO": 'CHARACTER', #?
"NEXTGEOF": 'CHARACTER', #?
"ZPE_HARM": 'DOUBLE', #?
"NDROPPED": 'INTEGER',
"REFCPTGP": 'INTEGER', #?
"REFFPTGP": 'INTEGER', #?
}
with open('JAINDX', mode='rb') as file: # b is important -> binary
fileContent = file.read()
fileLength = len(fileContent)
if fileLength == 16012:
srcints = 4
srcrecs = 4
elif fileLength == 16020:
srcints = 4
srcrecs = 8
elif fileLength == 24016:
srcints = 8
srcrecs = 4
elif fileLength == 24024:
srcints = 8
srcrecs = 8
# fixed number of slots for options
nopt = 1000
type2len = {
'DOUBLE': 8,
'INTEGER': srcints,
'CHARACTER': 1,
}
intlen2format = {
4: 'i',
8: 'l',
}
type2format = {
'DOUBLE': 'd',
'INTEGER': intlen2format[type2len['INTEGER']],
'CHARACTER': 'c',
}
if verbose:
print('\n<<< JAINDX >>>\n')
posf = srcrecs
istr = intlen2format[srcrecs]
jastart = struct.unpack(istr, fileContent[:posf])
if verbose:
print('%10s%10d%10d' % ('start', 0, posf))
poss = posf
posf = poss + 8 * nopt
istr = '8s' * nopt
jaindx = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('jaindx', poss, posf))
poss = posf
posf = poss + srcints * nopt
istr = intlen2format[srcints] * nopt
jaindx2 = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('jaindx2', poss, posf))
poss = posf
posf = poss + srcints * nopt
istr = intlen2format[srcints] * nopt
jaindx3 = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('jaindx3', poss, posf))
poss = posf
posf = poss + srcints
istr = intlen2format[srcints]
jamid = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('mid', poss, posf))
poss = posf
posf = poss + srcrecs
istr = intlen2format[srcrecs]
jaend = struct.unpack(istr, fileContent[poss:posf])
if verbose:
print('%10s%10d%10d' % ('end', poss, posf))
nrecs = jaindx.index('OPENSLOT') # number of active records
if verbose:
print('\n')
print('%20s%10d' % ('File Length:', fileLength))
print('%20s%10d' % ('srcints Int Length:', srcints))
print('%20s%10d' % ('srcrecs Int Length:', srcrecs))
print('%20s%10d' % ('First Rec:', jastart[0]))
print('%20s%10d' % ('Second Rec:', jamid[0]))
print('%20s%10d' % ('Last Rec:', jaend[0]))
print('%20s%10d' % ('Full Records:', nrecs))
print('\n')
print('\n<<< JOBARC >>>\n')
with open('JOBARC', mode='rb') as file: # b is important -> binary
fileContent = file.read()
returnRecords = {}
poss = 0
for item in range(nrecs):
posf = poss + type2len[knownlabels[jaindx[item]]] * jaindx3[item]
istr = type2format[knownlabels[jaindx[item]]] * jaindx3[item]
if knownlabels[jaindx[item]] == 'CHARACTER':
bound = type2len[knownlabels[jaindx[item]]] * jaindx3[item] * 8
posf = poss + bound
istr = str(bound) + 's'
jobarc = struct.unpack(istr, fileContent[poss:posf])
if verbose:
#print item, istr, poss, posf, '\t', jaindx[item], jaindx2[item], jaindx3[item], jobarc
if jaindx3[item] < 120:
print(jaindx[item], jaindx2[item], jaindx3[item], jobarc)
poss = posf
if jaindx[item] in reclabelarray:
returnRecords[jaindx[item]] = jobarc
return returnRecords
#if __name__ == "__main__":
# want = ['NATOMS ', 'AU_LENGT', 'COORD ', 'HBAR ', 'ATOMCHRG']
## got = get_jajo_record(want)
# got = getrec(want)
# for item in got.keys():
# print item, got[item]
| lothian/psi4 | psi4/driver/qcdb/jajo.py | Python | lgpl-3.0 | 17,941 | [
"Avogadro",
"CFOUR",
"Psi4"
] | a12946b8e62f783a95680faf2fcae3861c9ddd7fb31c7ae342583346c87858a9 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.448984
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/mediaplayeradd.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class mediaplayeradd(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(mediaplayeradd, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_29768904 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_29768904
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_mediaplayeradd= 'respond'
## END CLASS DEFINITION
if not hasattr(mediaplayeradd, '_initCheetahAttributes'):
templateAPIClass = getattr(mediaplayeradd, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(mediaplayeradd)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=mediaplayeradd()).run()
| MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/web/mediaplayeradd.py | Python | gpl-2.0 | 5,260 | [
"VisIt"
] | cf214c935612b040c685d4a066ed0ee65b184d30f039c556e14e32a2edeba55c |
#!/usr/bin/python2
# -*- Mode: Python; py-indent-offset: 8 -*-
# (C) Copyright Zack Rusin 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Zack Rusin <zack@kde.org>
import license
import gl_XML
import sys, getopt
class PrintGlEnums(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_enums.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2005 Brian Paul All Rights Reserved.""", "BRIAN PAUL")
self.enum_table = {}
def printRealHeader(self):
print '#include "main/glheader.h"'
print '#include "main/enums.h"'
print '#include "main/imports.h"'
print '#include "main/mtypes.h"'
print ''
print 'typedef struct {'
print ' size_t offset;'
print ' int n;'
print '} enum_elt;'
print ''
return
def print_code(self):
print """
typedef int (*cfunc)(const void *, const void *);
/**
* Compare a key name to an element in the \c all_enums array.
*
* \c bsearch always passes the key as the first parameter and the pointer
* to the array element as the second parameter. We can elimiate some
* extra work by taking advantage of that fact.
*
* \param a Pointer to the desired enum name.
* \param b Pointer to an element of the \c all_enums array.
*/
static int compar_name( const char *a, const enum_elt *b )
{
return strcmp( a, & enum_string_table[ b->offset ] );
}
/**
* Compare a key enum value to an element in the \c all_enums array.
*
* \c bsearch always passes the key as the first parameter and the pointer
* to the array element as the second parameter. We can elimiate some
* extra work by taking advantage of that fact.
*
* \param a Pointer to the desired enum name.
* \param b Pointer to an index into the \c all_enums array.
*/
static int compar_nr( const int *a, const unsigned *b )
{
return a[0] - all_enums[*b].n;
}
static char token_tmp[20];
const char *_mesa_lookup_enum_by_nr( int nr )
{
unsigned * i;
i = (unsigned *) _mesa_bsearch(& nr, reduced_enums,
Elements(reduced_enums),
sizeof(reduced_enums[0]),
(cfunc) compar_nr);
if ( i != NULL ) {
return & enum_string_table[ all_enums[ *i ].offset ];
}
else {
/* this is not re-entrant safe, no big deal here */
_mesa_snprintf(token_tmp, sizeof(token_tmp) - 1, "0x%x", nr);
token_tmp[sizeof(token_tmp) - 1] = '\\0';
return token_tmp;
}
}
/**
* Primitive names
*/
static const char *prim_names[PRIM_MAX+3] = {
"GL_POINTS",
"GL_LINES",
"GL_LINE_LOOP",
"GL_LINE_STRIP",
"GL_TRIANGLES",
"GL_TRIANGLE_STRIP",
"GL_TRIANGLE_FAN",
"GL_QUADS",
"GL_QUAD_STRIP",
"GL_POLYGON",
"GL_LINES_ADJACENCY",
"GL_LINE_STRIP_ADJACENCY",
"GL_TRIANGLES_ADJACENCY",
"GL_TRIANGLE_STRIP_ADJACENCY",
"outside begin/end",
"unknown state"
};
/* Get the name of an enum given that it is a primitive type. Avoids
* GL_FALSE/GL_POINTS ambiguity and others.
*/
const char *
_mesa_lookup_prim_by_nr(GLuint nr)
{
if (nr < Elements(prim_names))
return prim_names[nr];
else
return "invalid mode";
}
int _mesa_lookup_enum_by_name( const char *symbol )
{
enum_elt * f = NULL;
if ( symbol != NULL ) {
f = (enum_elt *) _mesa_bsearch(symbol, all_enums,
Elements(all_enums),
sizeof( enum_elt ),
(cfunc) compar_name);
}
return (f != NULL) ? f->n : -1;
}
"""
return
def printBody(self, api_list):
self.enum_table = {}
for api in api_list:
self.process_enums( api )
keys = self.enum_table.keys()
keys.sort()
name_table = []
enum_table = {}
for enum in keys:
low_pri = 9
for [name, pri] in self.enum_table[ enum ]:
name_table.append( [name, enum] )
if pri < low_pri:
low_pri = pri
enum_table[enum] = name
name_table.sort()
string_offsets = {}
i = 0;
print 'LONGSTRING static const char enum_string_table[] = '
for [name, enum] in name_table:
print ' "%s\\0"' % (name)
string_offsets[ name ] = i
i += len(name) + 1
print ' ;'
print ''
print 'static const enum_elt all_enums[%u] =' % (len(name_table))
print '{'
for [name, enum] in name_table:
print ' { %5u, 0x%08X }, /* %s */' % (string_offsets[name], enum, name)
print '};'
print ''
print 'static const unsigned reduced_enums[%u] =' % (len(keys))
print '{'
for enum in keys:
name = enum_table[ enum ]
if [name, enum] not in name_table:
print ' /* Error! %s, 0x%04x */ 0,' % (name, enum)
else:
i = name_table.index( [name, enum] )
print ' %4u, /* %s */' % (i, name)
print '};'
self.print_code()
return
def process_enums(self, api):
for obj in api.enumIterateByName():
if obj.value not in self.enum_table:
self.enum_table[ obj.value ] = []
enum = self.enum_table[ obj.value ]
name = "GL_" + obj.name
priority = obj.priority()
already_in = False;
for n, p in enum:
if n == name:
already_in = True
if not already_in:
enum.append( [name, priority] )
def show_usage():
print "Usage: %s [-f input_file_name]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:")
except Exception,e:
show_usage()
api_list = []
for (arg,val) in args:
if arg == "-f":
api = gl_XML.parse_GL_API( val )
api_list.append(api);
printer = PrintGlEnums()
printer.Print( api_list )
| devlato/kolibrios-llvm | contrib/sdk/sources/Mesa/src/mapi/glapi/gen/gl_enums.py | Python | mit | 7,309 | [
"Brian"
] | 3102edb43d16211077adef1646532ea3c54da8cfa8107e86dc9f17b51177dc94 |
#
# Copyright 2013-2018 Universidad Complutense de Madrid
#
# This file is part of PyEmir
#
# PyEmir is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyEmir is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyEmir. If not, see <http://www.gnu.org/licenses/>.
#
"""AIV Recipes for EMIR"""
from __future__ import division
import logging
import math
import numpy
import scipy.interpolate as itpl
import scipy.optimize as opz
from scipy import ndimage
from astropy.modeling import models, fitting
import astropy.wcs
import photutils
from photutils import CircularAperture
from numina.array.recenter import centering_centroid
from numina.array.utils import image_box
from numina.array.fwhm import compute_fwhm_2d_spline
from numina.constants import FWHM_G
from numina.array.fwhm import compute_fwhm_2d_simple
from numina.array.utils import expand_region
from emirdrp.core import EMIR_PIXSCALE
from .procedures import compute_fwhm_enclosed_direct
from .procedures import compute_fwhm_enclosed_grow
from .procedures import moments
from .procedures import AnnulusBackgroundEstimator
from .procedures import image_box2d
_logger = logging.getLogger(__name__)
# returns y,x
def compute_fwhm(img, center):
X = numpy.arange(img.shape[0])
Y = numpy.arange(img.shape[1])
bb = itpl.RectBivariateSpline(X, Y, img)
# We assume that the peak is in the center...
peak = bb(*center)[0, 0]
def f1(x):
return bb(x, center[1]) - 0.5 * peak
def f2(y):
return bb(center[0], y) - 0.5 * peak
def compute_fwhm_1(U, V, fun, center):
cp = int(math.floor(center + 0.5))
# Min on the rigth
r_idx = V[cp:].argmin()
u_r = U[cp + r_idx]
if V[cp + r_idx] > 0.5 * peak:
# FIXME: we have a problem
# brentq will raise anyway
pass
sol_r = opz.brentq(fun, center, u_r)
# Min in the left
rV = V[cp - 1::-1]
rU = U[cp - 1::-1]
l_idx = rV.argmin()
u_l = rU[l_idx]
if rV[l_idx] > 0.5 * peak:
# FIXME: we have a problem
# brentq will raise anyway
pass
sol_l = opz.brentq(fun, u_l, center)
fwhm = sol_r - sol_l
return fwhm
U = X
V = bb.ev(U, [center[1] for _ in U])
fwhm_x = compute_fwhm_1(U, V, f1, center[0])
U = Y
V = bb.ev([center[0] for _ in U], U)
fwhm_y = compute_fwhm_1(U, V, f2, center[1])
return center[0], center[1], peak, fwhm_x, fwhm_y
# Background in an annulus, mode is HSM
def compute_fwhm_global(data, center, box):
sl = image_box(center, data.shape, box)
raster = data[sl]
background = raster.min()
braster = raster - background
newc = center[0] - sl[0].start, center[1] - sl[1].start
try:
res = compute_fwhm(braster, newc)
return (res[1] + sl[1].start, res[0] + sl[0].start,
res[2], res[3], res[4])
except ValueError as error:
_logger.warning("%s", error)
return center[1], center[0], -99.0, -99.0, -99.0
except Exception as error:
_logger.warning("%s", error)
return center[1], center[0], -199.0, -199.0, -199.0
# returns x,y
def gauss_model(data, center_r):
sl = image_box(center_r, data.shape, box=(4, 4))
raster = data[sl]
# background
background = raster.min()
b_raster = raster - background
new_c = center_r[0] - sl[0].start, center_r[1] - sl[1].start
yi, xi = numpy.indices(b_raster.shape)
g = models.Gaussian2D(amplitude=b_raster.max(), x_mean=new_c[
1], y_mean=new_c[0], x_stddev=1.0, y_stddev=1.0)
f1 = fitting.LevMarLSQFitter() # @UndefinedVariable
t = f1(g, xi, yi, b_raster)
mm = t.x_mean.value + sl[1].start, t.y_mean.value + \
sl[0].start, t.amplitude.value, t.x_stddev.value, t.y_stddev.value
return mm
def recenter_char(data, centers_i, recenter_maxdist, recenter_nloop, recenter_half_box, do_recenter):
# recentered values
centers_r = numpy.empty_like(centers_i)
# Ignore certain pinholes
compute_mask = numpy.ones((centers_i.shape[0],), dtype='bool')
status_array = numpy.ones((centers_i.shape[0],), dtype='int')
for idx, (xi, yi) in enumerate(centers_i):
# A failsafe
_logger.info('for pinhole %i', idx)
_logger.info('center is x=%7.2f y=%7.2f', xi, yi)
if (xi > data.shape[1] - 5 or xi < 5 or
yi > data.shape[0] - 5 or yi < 5):
_logger.info('pinhole too near to the border')
compute_mask[idx] = False
centers_r[idx] = xi, yi
status_array[idx] = 0
else:
if do_recenter and (recenter_maxdist > 0.0):
kk = centering_centroid(
data, xi, yi,
box=recenter_half_box,
maxdist=recenter_maxdist, nloop=recenter_nloop
)
xc, yc, _back, status, msg = centering_centroid(
data, xi, yi,
box=recenter_half_box,
maxdist=recenter_maxdist, nloop=recenter_nloop
)
_logger.info('new center is x=%7.2f y=%7.2f', xc, yc)
# Log in X,Y format
_logger.debug('recenter message: %s', msg)
centers_r[idx] = xc, yc
status_array[idx] = status
else:
centers_r[idx] = xi, yi
status_array[idx] = 0
return centers_r, compute_mask, status_array
def pinhole_char(data, ncenters, box=4, recenter_pinhole=True, maxdist=10.0):
ibox = (box, box)
# convert FITS x, y coordinates (pixel center 1)
# to python/scipy/astropy (pixel center 0 and x,y -> y,x)
centers_i = ncenters[:, 0:2] - 1
centers_r, cmask, starr = recenter_char(data, centers_i,
recenter_maxdist=maxdist,
recenter_nloop=10,
recenter_half_box=ibox,
do_recenter=recenter_pinhole
)
# Result 0
nresults = 11
mm0 = numpy.empty((centers_r.shape[0], nresults))
mm0[:, 0:2] = centers_r
mm0[:, 2] = starr
mm0[:, 3:] = -99
# compute the FWHM without fitting
for idx, (xc, yc) in enumerate(centers_r):
_logger.info('For pinhole %i', idx)
if cmask[idx]:
_logger.info('compute model-free FWHM')
try:
res1 = compute_fwhm_global(data, (yc, xc), box=ibox)
fmt1 = 'x=%7.2f y=%7.2f peak=%6.3f fwhm_x=%6.3f fwhm_y=%6.3f'
_logger.info(fmt1, *res1)
mm0[idx, 3:6] = res1[2:]
except Exception as error:
_logger.exception("unable to obtain FWHM, %s", error)
_logger.info('compute Gaussian 2Dfitting')
try:
res2 = gauss_model(data, (yc, xc))
fmt2 = 'x=%7.2f y=%7.2f peak=%6.3f stdev_x=%6.3f stdev_y=%6.3f'
_logger.info(fmt2, *res2)
mm0[idx, 6:9] = res2[2:]
except Exception as error:
_logger.exception("unable to obtain FWHM, %s", error)
else:
_logger.info('skipping')
# Photometry in coordinates
# x=centers_r[:,0]
# y=centers_r[:,1]
# with radius 2.0 pixels and 4.0 pixels
apertures = [2.0, 4.0]
for idx, rad in enumerate(apertures):
_logger.info('compute photometry with aperture radii %s', rad)
apertures = CircularAperture(centers_r[:,:2], r=rad)
faper = photutils.aperture_photometry(data, apertures)
mm0[:, 9+idx] = faper['aperture_sum']
_logger.info('done')
# Convert coordinates to FITS
mm0[:, 0:2] += 1
return mm0
def pinhole_char2(
data, ncenters,
recenter_pinhole=True,
recenter_half_box=5,
recenter_nloop=10,
recenter_maxdist=10.0,
back_buff=3,
back_width=5,
phot_niter=10,
phot_rad=8
):
sigma0 = 1.0
rad = 3 * sigma0 * FWHM_G
box = recenter_half_box
recenter_half_box = (box, box)
# convert FITS x, y coordinates (pixel center 1)
# to python/scipy/astropy (pixel center 0 and x,y -> y,x)
centers_i = ncenters[:, 0:2] - 1
centers_r, cmask, starr = recenter_char(data, centers_i, recenter_maxdist,
recenter_nloop, recenter_half_box,
do_recenter=recenter_pinhole
)
# Number of results
nresults = 35
mm0 = numpy.empty((centers_r.shape[0], nresults))
mm0[:, 0:2] = centers_i
mm0[:, 2:4] = centers_r
mm0[:, 4] = starr
mm0[:, 5:] = -99
# Fitter
fitter = fitting.LevMarLSQFitter() # @UndefinedVariable
rplot = phot_rad
for idx, (x0, y0) in enumerate(centers_r):
_logger.info('For pinhole %i', idx)
if not cmask[idx]:
_logger.info('skipping')
# Fill result with -99
continue
# Initial photometric radius
rad = 3.0
# Loop to find better photometry radius and background annulus
irad = rad
bck = 0.0
for i in range(phot_niter):
phot_rad = rad
# Sky background annulus
rs1 = rad + back_buff
rs2 = rs1 + back_width
_logger.debug('Iter %d, annulus r1=%5.2f r2=%5.2f', i, rs1, rs2)
bckestim = AnnulusBackgroundEstimator(r1=rs1, r2=rs2)
# Crop the image to obtain the background
sl_sky = image_box2d(x0, y0, data.shape, (rs2, rs2))
raster_sky = data[sl_sky]
# Logical coordinates
xx0 = x0 - sl_sky[1].start
yy0 = y0 - sl_sky[0].start
# FIXME, perhaps we dont need to crop the image
try:
bck = bckestim(raster_sky, xx0, yy0)
_logger.debug('Iter %d, background %f in '
'annulus r1=%5.2f r2=%5.2f',
i, bck, rs1, rs2)
except Exception as error:
_logger.warning('Error in background estimation %s', error)
break
# Radius of the fit
fit_rad = max(rplot, rad)
sl = image_box2d(x0, y0, data.shape, (fit_rad, fit_rad))
part = data[sl]
# Logical coordinates
xx0 = x0 - sl[1].start
yy0 = y0 - sl[0].start
yy, xx = numpy.mgrid[sl]
_logger.debug('Iter %d, radial fit', i)
# Photometry
dist = numpy.sqrt((xx - x0) ** 2 + (yy - y0) ** 2)
phot_mask = dist < fit_rad
r1 = dist[phot_mask]
part_s = part - bck
f1 = part_s[phot_mask]
# Fit radial profile
model = models.Gaussian1D(amplitude=f1.max(), mean=0, stddev=1.0)
model.mean.fixed = True # Mean is always 0.0
g1d_f = fitter(model, r1, f1, weights=(r1 + 1e-12) ** -1)
rpeak = g1d_f.amplitude.value
# sometimes the fit is negative
rsigma = abs(g1d_f.stddev.value)
rfwhm = rsigma * FWHM_G
rad = 2.5 * rfwhm
_logger.debug('Iter %d, new rad is %f', i, rad)
if abs(rad - irad) < 1e-3:
# reached convergence
_logger.debug('Convergence in iter %d', i)
break
else:
irad = rad
else:
_logger.debug('no convergence in photometric radius determination')
_logger.info('background %6.2f, r1 %7.2f r2 %7.2f', bck, rs1, rs2)
mm0[idx, 5:5 + 3] = bck, rs1, rs2
aper_rad = rad
ca = CircularAperture([(xx0, yy0)], aper_rad)
m = photutils.aperture_photometry(part_s, ca)
flux_aper = m['aperture_sum'][0]
_logger.info('aper rad %f, aper flux %f', aper_rad, flux_aper)
mm0[idx, 8:8 + 2] = aper_rad, flux_aper
_logger.info('Radial fit, peak: %f fwhm %f', rpeak, rfwhm)
try:
dpeak, dfwhm, smsg = compute_fwhm_enclosed_direct(
part_s, xx0, yy0, maxrad=fit_rad)
_logger.info('Enclosed direct, peak: %f fwhm %f', dpeak, dfwhm)
except Exception as error:
_logger.warning('Error in compute_fwhm_enclosed_direct %s', error)
dpeak, dfwhm = -99.0, -99.0
try:
eamp, efwhm, epeak, emsg = compute_fwhm_enclosed_grow(
part_s, xx0, yy0, maxrad=fit_rad)
_logger.info('Enclosed fit, peak: %f fwhm %f', epeak, efwhm)
except Exception as error:
_logger.warning('Error in compute_fwhm_enclosed_grow %s', error)
eamp, efwhm, epeak, emsg = [-99.0] * 4
mm0[idx, 10:10 + 6] = epeak, efwhm, dpeak, dfwhm, rpeak, rfwhm
try:
res_simple = compute_fwhm_2d_simple(part_s, xx0, yy0)
_logger.info('Simple, peak: %f fwhm x %f fwhm %f', *res_simple)
mm0[idx, 16:16 + 3] = res_simple
except Exception as error:
_logger.warning('Error in compute_fwhm_2d_simple %s', error)
mm0[idx, 16:16 + 3] = -99.0
try:
res_spline = compute_fwhm_2d_spline(part_s, xx0, yy0)
_logger.info('Spline, peak: %f fwhm x %f fwhm %f', *res_spline)
mm0[idx, 19:19 + 3] = res_spline
except Exception as error:
_logger.warning('Error in compute_fwhm_2d_spline %s', error)
mm0[idx, 19:19 + 3] = -99.0
# Bidimensional fit
# Fit in a smaller box
fit2d_rad = int(math.ceil(fit_rad))
fit2d_half_box = (fit2d_rad, fit2d_rad)
sl1 = image_box2d(x0, y0, data.shape, fit2d_half_box)
part1 = data[sl1]
yy1, xx1 = numpy.mgrid[sl1]
g2d = models.Gaussian2D(amplitude=rpeak, x_mean=x0, y_mean=y0,
x_stddev=1.0, y_stddev=1.0)
g2d_f = fitter(g2d, xx1, yy1, part1 - bck)
res_gauss2d = (g2d_f.amplitude.value,
g2d_f.x_mean.value + 1, # FITS coordinates
g2d_f.y_mean.value + 1, # FITS coordinates
g2d_f.x_stddev.value * FWHM_G,
g2d_f.y_stddev.value * FWHM_G,
g2d_f.theta.value
)
_logger.info('Gauss2d, %s', res_gauss2d)
mm0[idx, 22:22 + 6] = res_gauss2d
# Moments
moments_half_box = fit2d_half_box
res_moments = moments(data, x0, y0, moments_half_box)
_logger.info('Mxx %f Myy %f Mxy %f e %f pa %f', *res_moments)
mm0[idx, 28:28 + 5] = res_moments
# Photometry in coordinates
# x=centers_r[:,0]
# y=centers_r[:,1]
# with radius 2.0 pixels and 4.0 pixels
apertures = [2.0, 4.0]
for idx, rad in enumerate(apertures):
_logger.info('compute photometry with aperture radii %s', rad)
apertures = CircularAperture(centers_r[:,:2], r=rad)
faper = photutils.aperture_photometry(data, apertures)
mm0[:, 33+idx] = faper['aperture_sum']
_logger.info('done')
# FITS coordinates
mm0[:, :4] += 1
return mm0
def shape_of_slices(tup_of_s):
return tuple(m.stop - m.start for m in tup_of_s)
def normalize(data):
b = data.max()
a = data.min()
if b != a:
data_22 = (2 * data - b - a) / (b - a)
else:
data_22 = data - b
return data_22
def normalize_raw(arr):
"""Rescale float image between 0 and 1
This is an extension of image_as_float of scikit-image
when the original image was uint16 but later was
processed and transformed to float32
Parameters
----------
arr; ndarray
Returns
-------
A ndarray mapped between 0 and 1
"""
# FIXME: use other limits acording to original arr.dtype
# This applies only to uint16 images
# As images were positive, the range is 0,1
return numpy.clip(arr / 65535.0, 0.0, 1.0)
def char_slit(data, regions, box_increase=3, slit_size_ratio=4.0):
result = []
for r in regions:
_logger.debug('initial region %s', r)
oshape = shape_of_slices(r)
ratio = oshape[0] / oshape[1]
if (slit_size_ratio > 0) and (ratio < slit_size_ratio):
_logger.debug("this is not a slit, ratio=%f", ratio)
continue
_logger.debug('initial shape %s', oshape)
_logger.debug('ratio %f', ratio)
rp = expand_region(r, box_increase, box_increase,
start=0, stop=2048)
_logger.debug('expanded region %r', rp)
ref = rp[0].start, rp[1].start
_logger.debug('reference point %r', ref)
datas = data[rp]
c = ndimage.center_of_mass(datas)
fc = datas.shape[0] // 2
cc = datas.shape[1] // 2
_logger.debug("%d %d %d %d", fc, cc, c[0], c[1])
_peak, fwhm_x, fwhm_y = compute_fwhm_2d_simple(datas, c[1], c[0])
_logger.debug('x=%f y=%f', c[1] + ref[1], c[0] + ref[0])
_logger.debug('fwhm_x %f fwhm_y %f', fwhm_x, fwhm_y)
# colrow = ref[1] + cc + 1, ref[0] + fc + 1
result.append([c[1] + ref[1] + 1, c[0] + ref[0] + 1, fwhm_x, fwhm_y])
return result
| nicocardiel/pyemir | emirdrp/recipes/aiv/common.py | Python | gpl-3.0 | 17,887 | [
"Gaussian"
] | 793cd2ba2cab468b8645cb8e19aa754a2c327411be1b8d297415e14c228259f6 |
# Utility file for reading the excel file containing all the toxicity data
import pandas as pd
import xlrd
import pdb
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
##
# @brief Read all the toxicity data from excel file
#
# @param filepath - Full filepath of excel file
# @param toxicity - Type of toxicity
#
# @return - Input data and output data frames as Panda dataframes
def import_data(filepath,toxicity):
# Reading the toxicity data
if toxicity is "CNT":
# Read the file located at filepath
cnt_data = pd.read_excel(filepath,4)
# Ignore all the particle species that are not carbon
cnt_data = cnt_data.loc[cnt_data['Particle Species']=='Carbon']
# Construct the input output data for models
# Refer to ``A_MetaAnalysis_of_Carbon_Nanotube_Pulmonary_Toxicity_Studies --
# How_Physical_Dimensions_and_Impurities_Affect_the_Toxicity_of_Carbon_Nanotubes"
# Input: size and shape,impurities and exposure characteristics
# Output: PMN, MAC, LAH, TP -- before sampling -- we store both mean and SD
# We will also store the Author and publication data columns for Model
# Validation
size_shape_data = cnt_data[['Config. (1=SW, 2=MW)',
'min length','length median, nm','max length','min dia',
'diameter median, nm','max dia', 'MMAD, nm','SA m2/g','Purity']]
# Converting the data from percentage dose to total and 24 hours average
# dose
impurities_wt = cnt_data[['%wt Oxidized C', '% wt Co','% wt Al',
'%wt Fe', '%wt Cu', '%wt Cr', '%wt Ni']]
# Also multiplying by 10^4 to convert from uq/kg and % wt to pg/kg
impurities_total = np.multiply(impurities_wt.values,\
10000*cnt_data[['Total Dose (ug/kg)']].values)
# Recreating a dataframe from these values
impurities_total_df = pd.DataFrame(impurities_total,columns=['C Total',\
'Co Total','Al Total','Fe Total','Cu Total','Cr Total','Ni Total'])
impurities_24_hr_dose = np.multiply(impurities_wt.values,\
10000*cnt_data[['Avg 24-hr Dose (ug/kg)']].values)
# Recreating a dataframe from these values
impurities_24_df = pd.DataFrame(impurities_total,columns=['C 24 Hour',\
'Co 24 Hour','Al 24 Hour','Fe 24 Hour','Cu 24 Hour','Cr 24 Hour','Ni 24 Hour'])
# Getting exposure characteristics
exposure = cnt_data[['Exp. Hrs. ','Exp. Per. (hrs)',
'animal (1=rats, 2=mice)',
'species (1=sprague-dawley, 2=wistar, 3=C57BL/6, 4=ICR, 5=Crl:CD(SD)IGS BR, 6=BALB/cAnNCrl)',
'sex (1=male, 2=female)','mean animal mass, g','Post Exp. (days)',
'Exp. Mode (1=inhalation, 2=instillation, 3=aspiration)',
'mass conc. (mg/m3)','Total Dose (ug/kg)','Avg 24-hr Dose (ug/kg)',
'Total Dose (m2/kg)', 'Avg 24-hr Dose (m2/kg)']]
# Auxiliary data
aux_data = cnt_data[['Author(s)', 'Year', 'No. of Subjects (N)']]
# Merging all the input dataframes again
input_data = pd.concat([aux_data,size_shape_data,impurities_total_df,\
impurities_24_df,exposure],axis=1)
# Outputs: PMN, MAC,LDH and TP -- we only use fold of control data as
# specified by the Authors in the paper
# In order to obtain multiple columns of data, we will sample num_samples times the
# number of animals
output_data = cnt_data[['BAL Neutrophils (fold of control)',\
'BAL Neutrophils (fold of control) SD','BAL Macrophages (fold of control)'
,'BAL Macrophages (fold of control) SD','BAL LDH (fold of control)',
'BAL LDH (fold of control) SD','BAL Total Protein (fold of control)',
'BAL Total Protein (fold of control) SD']]
else:
# We need to prepare data for Silver nanoparticle toxicity study
# Read the file located at filepath
cnt_data = pd.read_excel(filepath,1)
# Auxiliary data
aux_data = cnt_data[['Author(s)', 'Year', 'No. of Subjects (N)']]
size_shape_data = cnt_data[['Particle Type (1=basic, 2 = citratecapped, 3 = PVPcapped)','diameter mean, nm']]
exposure = cnt_data[[ 'Exp. Mode (1=inhalation, 2=instillation, 3=aspiration, 4=nose-inhalation)',
'species (1=sprague-dawley, 2=Brown-Norway (BN), 3=C57BL/6, 4=F344/DuCrl)',
'mean animal mass, g','sex (1=male, 2=female)',
'Surface Area (m^2/g)','mass conc. (ug/m3)','Exp. Hrs.',
'Total Dose (ug/kg)','Post Exp. (days)']]
# Merging all the input dataframes again
input_data = pd.concat([aux_data,size_shape_data,exposure],axis=1)
# Outputs: PMN, MAC,LDH,TP, Total Cell Count -- we only use fold of control data as
# specified by the Authors in the paper
# In order to obtain multiple columns of data, we will sample num_samples times the
# number of animals
output_data = cnt_data[['BAL Neutrophils (fold of control)',\
'BAL Neutrophils (fold of control) SD','BAL Macrophages (fold of control)'
,'BAL Macrophages (fold of control) SD','BAL LDH (fold of control)',
'BAL LDH (fold of control) SD','BAL Total Protein (fold of control)',
'BAL Total Protein (fold of control) SD',
'BAL Total Cell Count (fold of control)',
'BAL Total Cell Count (fold of control) SD']]
return (input_data,output_data)
##
# @brief Prepare data from an excel file that can be sent for learning a random
# forest
#
# @param filepath - Full path of excel file containing data
# @param output_feature - What feature output value are we trying to predict,
# possible values are PMN,MAC,LDH and TP
# @param author_exclude - The authors that should be excluded from the learning
# part and just be tested against
# @param num_sample - Number of samples to be drawn from output value Gaussian
# distribution for one sample animal. Default is 100 - same as the paper
# @param toxicity - type of toxicity we are analyzing, defaults to Carbon Nano
# Tubes (CNT)
#
# @return Input data and output data for random forest learning
def prepare_data_rf(filepath,output_feature='PMN',author_exclude=None,\
num_sample=100,toxicity="CNT",other_excludes = None):
# Outputs from this function
train_inp = [];train_out = [];test_inp = [];test_out = []
# Read the excel file and get all the input data
(input_data,output_data) = import_data(filepath,toxicity)
# In case we are trying to replicate Table IV of the paper, where a part of
# the data is used for testing and the excluded part is for testing
if author_exclude is not None:
exclude_ind = np.full((input_data.shape[0],),False,dtype=np.bool_)
for author_details in author_exclude:
curr_ind = (input_data['Author(s)'].values==author_details[0])*(input_data['Year'].values== author_details[1])
exclude_ind = np.logical_or(exclude_ind,curr_ind)
else:
exclude_ind = np.full((input_data.shape[0],),False,dtype=np.bool_)
# Other exlcudes permitted via list of dict objects
if other_excludes is not None:
for exclude in other_excludes:
for key in exclude.keys():
curr_ind = input_data[key].values==exclude[key]
exclude_ind = np.logical_or(exclude_ind,curr_ind)
# Training Data
print "Training Data"
(train_inp,train_out) = sample_input_output(input_data.loc[~exclude_ind,:],\
output_data.loc[~exclude_ind,:],output_feature,num_sample)
print "Validation Data"
# Testing Data
(test_inp,test_out) = sample_input_output(input_data.loc[exclude_ind,:],\
output_data.loc[exclude_ind,:],output_feature,num_sample)
return (train_inp,train_out,test_inp,test_out,\
input_data.columns[3:].values)
def sample_input_output(input_data,output_data,output_feature,num_sample):
# Sample input data based on the number of samples required for each test
# subject
input_data_sampled = input_data.loc[np.repeat(input_data.index.values,\
num_sample*input_data['No. of Subjects (N)'].values)]
# For input values, we simply replicated the values. However,
# we need to sample the output values from a truncated Gaussian distribution
# Selecting the relevant column of output
if output_feature=='PMN':
feat_index = 0
elif output_feature=='MAC':
feat_index = 1
elif output_feature=='LDH':
feat_index = 2
elif output_feature=='TP':
feat_index = 3
else:
# Implies it is Total cell count
feat_index = 4
output_data_sampled =output_sampler(output_data.iloc[:,2*feat_index:2*feat_index+2].values,\
input_data['No. of Subjects (N)'].values,num_sample)
# Removing the number of test subjects values and reindexing
input_data_sampled = input_data_sampled.drop("No. of Subjects (N)",axis=1).reset_index(drop=True)
# Finally removing the names and year and just returning numpy arrays for
# training RF models
# We also drop all the input values where the output value is NaN or in
# other words that data is useless for the current learning problem
input_data_sampled =input_data_sampled.iloc[~np.isnan(output_data_sampled[:,0]),2:].values
# Because we are only learning a tree for a single output value
output_data_sampled = output_data_sampled[~np.isnan(output_data_sampled[:,0]),0]
return (input_data_sampled,output_data_sampled)
def output_sampler(output_data,num_subjects,num_sample):
output_data_sampled = np.zeros((np.sum(num_subjects)*num_sample,\
output_data.shape[1]/2))
mean_nan = np.zeros((output_data_sampled.shape[1],))
sd_nan = np.zeros((output_data_sampled.shape[1],))
# Going over all the rows in output_data and sampling
for i in range(output_data.shape[0]):
# Sampling for each output value
for j in range(output_data_sampled.shape[1]):
# Core sampling part: for each test subjects- sample
# num_subjects*num_samples from a constrained normal distribution
# Lets check for data validity in input
# if mean value is not present --> That experiments is invalid
if np.isnan(output_data[i,2*j]):
mean_nan[j] = mean_nan[j]+1
# Just saying that all the current samples are NaN (not a number)
#print "For index ",i," and output index ",j
#print "Mean value is not a number, generating NaN samples only"
samples = np.full((num_subjects[i]*num_sample),np.nan)
# If SD value is not given, lets assume it to be a very small value
elif np.isnan(output_data[i,2*j+1]):
sd_nan[j] = sd_nan[j]+1
#print "For index ",i," and output index ",j
#print "Standard deviation value not given, sampling with almost 0 SD"
samples = sample_truncated(output_data[i,2*j],1e-5,\
num_subjects[i]*num_sample)
else:
# This mean both mean and SD are provided
samples = sample_truncated(output_data[i,2*j],output_data[i,2*j+1],\
num_subjects[i]*num_sample)
# Filling in all the values
output_data_sampled[np.sum(num_subjects[:i])*num_sample:np.sum(num_subjects[:i+1])*num_sample,j]\
=samples
# Printing summary stats
print "Output data summary: Total Not a Number (NaN) samples"
print "Total number of studies: ",output_data.shape[0]
print "index is 0=PMN,1=MAC,2=LDH,3=TP:"
print "mean NaN: ", mean_nan
print "SD NaN: ", sd_nan
return output_data_sampled
# Sample num_samples for a truncated Gaussian distribution with lower bound
# being 0 and upper bound being +infinity
def sample_truncated(mu,sigma,num_sample):
lower, upper = 0, np.inf
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
return X.rvs(num_sample)
# To prove the point that sampling from a truncated distribution does not induce
# bias in sampling
def demo_samples():
lower, upper = 0, np.inf
mu, sigma = 2.75, 1.55
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
N = stats.norm(loc=mu, scale=sigma)
fig, ax = plt.subplots(3, sharex=True)
ax[0].hist(X.rvs(10000), normed=True)
ax[0].set_title('Truncated Gaussian Distribution Samples')
ax[1].hist(N.rvs(10000), normed=True)
ax[1].set_title('Gaussian Distribution Samples')
# Sample 10000 points and make all the negative points 0
samples = N.rvs(10000)
modified_samples = np.zeros(samples.shape)
for i in range(samples.shape[0]):
if samples[i]<0:
modified_samples[i] = 0
else:
modified_samples[i] = samples[i]
ax[2].hist(modified_samples, normed=True)
ax[2].set_title('Gaussian Distribution Samples with moving negative values strategy')
plt.show()
if __name__=="__main__":
filepath = './data/Carbon_Nanotube_Pulmonary_Toxicity_Data_Set_20120313.xls'
data = prepare_data_rf(filepath)
| surenkum/feature_selection | utils.py | Python | gpl-3.0 | 13,413 | [
"Gaussian"
] | 0d854a138e321a440ed1ab86d81862e17fb40be07b4a278a6fc6fb0e8f3dfc9b |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDss(RPackage):
"""Dispersion shrinkage for sequencing data.
DSS is an R library performing differntial analysis for count-based
sequencing data. It detectes differentially expressed genes (DEGs) from
RNA-seq, and differentially methylated loci or regions (DML/DMRs) from
bisulfite sequencing (BS-seq). The core of DSS is a new dispersion
shrinkage method for estimating the dispersion parameter from Gamma-Poisson
or Beta-Binomial distributions."""
homepage = "http://bioconductor.org/packages/DSS/"
git = "https://git.bioconductor.org/packages/DSS"
version('2.36.0', commit='841c7ed')
version('2.34.0', commit='f9819c7')
version('2.32.0', commit='ffb502d')
depends_on('r@3.3:', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-bsseq', type=('build', 'run'))
depends_on('r-biocparallel', when='@2.36.0:', type=('build', 'run'))
depends_on('r-delayedarray', when='@2.36.0:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-dss/package.py | Python | lgpl-2.1 | 1,236 | [
"Bioconductor"
] | fd08fe22bb0eb831529a9a2aac77229581c65633b446ecd8a2c912b35afea6ff |
"Base class for simulation windows"
import gtk
from gettext import gettext as _
from ase.gui.widgets import oops, pack, help
from ase import Atoms
from ase.constraints import FixAtoms
class Simulation(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
self.gui = gui
def packtext(self, vbox, text, label=None):
"Pack an text frame into the window."
pack(vbox, gtk.Label(""))
txtframe = gtk.Frame(label)
txtlbl = gtk.Label(text)
txtframe.add(txtlbl)
txtlbl.show()
pack(vbox, txtframe)
pack(vbox, gtk.Label(""))
def packimageselection(self, outerbox, txt1=_(" (rerun simulation)"),
txt2=_(" (continue simulation)")):
"Make the frame for selecting starting config if more than one."
self.startframe = gtk.Frame(_("Select starting configuration:"))
pack(outerbox, [self.startframe])
vbox = gtk.VBox()
self.startframe.add(vbox)
vbox.show()
self.numconfig_format = _("There are currently %i "
"configurations loaded.")
self.numconfig_label = gtk.Label("")
pack(vbox, [self.numconfig_label])
lbl = gtk.Label(_("Choose which one to use as the "
"initial configuration"))
pack(vbox, [lbl])
self.start_radio_first = gtk.RadioButton(
None, _("The first configuration %s.") % txt1)
pack(vbox, [self.start_radio_first])
self.start_radio_nth = gtk.RadioButton(self.start_radio_first,
_("Configuration number "))
self.start_nth_adj = gtk.Adjustment(0, 0, 1, 1)
self.start_nth_spin = gtk.SpinButton(self.start_nth_adj, 0, 0)
self.start_nth_spin.set_sensitive(False)
pack(vbox, [self.start_radio_nth, self.start_nth_spin])
self.start_radio_last = gtk.RadioButton(self.start_radio_first,
_("The last configuration %s.") % txt2)
self.start_radio_last.set_active(True)
pack(vbox, self.start_radio_last)
self.start_radio_nth.connect("toggled", self.start_radio_nth_toggled)
self.setupimageselection()
def start_radio_nth_toggled(self, widget):
self.start_nth_spin.set_sensitive(self.start_radio_nth.get_active())
def setupimageselection(self):
"Decide if the start image selection frame should be shown."
n = self.gui.images.nimages
if n <= 1:
self.startframe.hide()
else:
self.startframe.show()
if self.start_nth_adj.value >= n:
self.start_nth_adj.value = n-1
self.start_nth_adj.upper = n-1
self.numconfig_label.set_text(self.numconfig_format % (n,))
def getimagenumber(self):
"Get the image number selected in the start image frame."
nmax = self.gui.images.nimages
if nmax <= 1:
return 0
elif self.start_radio_first.get_active():
return 0
elif self.start_radio_nth.get_active():
return self.start_nth_adj.value
else:
assert self.start_radio_last.get_active()
return nmax-1
def makebutbox(self, vbox, helptext=None):
self.buttons = gtk.HButtonBox()
runbut = gtk.Button(_("Run"))
runbut.connect('clicked', self.run)
closebut = gtk.Button(stock=gtk.STOCK_CLOSE)
closebut.connect('clicked', lambda x: self.destroy())
for w in (runbut, closebut):
self.buttons.pack_start(w, 0, 0)
w.show()
if helptext:
helpbut = [help(helptext)]
else:
helpbut = []
pack(vbox, helpbut + [self.buttons], end=True, bottom=True)
def setup_atoms(self):
self.atoms = self.get_atoms()
if self.atoms is None:
return False
try:
self.calculator = self.gui.simulation['calc']
except KeyError:
oops(_("No calculator: Use Calculate/Set Calculator on the menu."))
return False
self.atoms.set_calculator(self.calculator())
return True
def get_atoms(self):
"Make an atoms object from the active image"
images = self.gui.images
if images.natoms < 1:
oops(_("No atoms present"))
return None
n = self.getimagenumber()
natoms = len(images.P[n]) / images.repeat.prod()
constraint = None
if not images.dynamic.all():
constraint = FixAtoms(mask=1-images.dynamic)
return Atoms(positions=images.P[n,:natoms],
symbols=images.Z[:natoms],
cell=images.A[n],
magmoms=images.M[n,:natoms],
tags=images.T[n,:natoms],
pbc=images.pbc,
constraint=constraint)
def begin(self, **kwargs):
if self.gui.simulation.has_key('progress'):
self.gui.simulation['progress'].begin(**kwargs)
def end(self):
if self.gui.simulation.has_key('progress'):
self.gui.simulation['progress'].end()
def prepare_store_atoms(self):
"Informs the gui that the next configuration should be the first."
self.gui.prepare_new_atoms()
self.count_steps = 0
def store_atoms(self):
"Observes the minimization and stores the atoms in the gui."
self.gui.append_atoms(self.atoms)
self.count_steps += 1
| grhawk/ASE | tools/ase/gui/simulation.py | Python | gpl-2.0 | 5,562 | [
"ASE"
] | 2c73e6a44b07b6bf56ae4529c7cfee346625ecc3f2141affdfb50738008ad8b1 |
#!/usr/bin/env python
''' VIC Image Driver testing '''
import os
import re
import xarray as xr
import pandas as pd
import numpy as np
import numpy.testing as npt
import glob
import warnings
def test_image_driver_no_output_file_nans(fnames, domain_file):
'''
Test that all VIC image driver output files have the same nan structure as
the domain file
'''
ds_domain = xr.open_dataset(domain_file)
for fname in fnames:
ds_output = xr.open_dataset(fname)
assert_nan_equal(ds_domain, ds_output)
def assert_nan_equal(ds_domain, ds_output):
"""
test to see if nans in two data arrays are the same in order to check
domain first dataarray is domain file, second dataarray is a variable in
the output dataset for image driver
"""
# get lat/lng coordinate names from domain file mask
match_vars = ds_domain['mask'].coords
# check to be sure that mask, lats and lons match between domain file and
# output file
for var in match_vars:
# raise AssertionError if they don't match
npt.assert_allclose(ds_output[var], ds_domain[var], equal_nan=True)
# check that nans are occurring in the same place in the arrays
# check all variables in the dataset
mask_dims = set(ds_domain['mask'].dims)
for var, da in ds_output.data_vars.items():
# skip variables that don't have mask_dims (e.g. time_bnds)
if not all(d in da.dims for d in mask_dims):
continue
# get dimensions to reduce DataArray on
dim_diff = mask_dims.symmetric_difference(set(da.dims))
# reduce DataArray
da_null_reduced = da.isnull().all(dim=dim_diff)
# raise AssertionError if NaNs do not match
npt.assert_array_equal(da_null_reduced.values,
np.isnan(ds_domain['mask']))
def check_multistream_image(fnames):
'''
Test the multistream aggregation in the image driver
'''
def get_pandas_how_from_cell_method(cell_method):
if cell_method == 'time: end':
how = 'last'
elif cell_method == 'time: beg':
how = 'first'
elif cell_method == 'time: mean':
how = np.mean
elif cell_method == 'time: minimum':
how = 'min'
elif cell_method == 'time: maximum':
how = 'max'
elif cell_method == 'time: sum':
how = 'sum'
else:
raise ValueError('Unknown cell method argument: %s', cell_method)
return how
def reindex_xr_obj_timedim(obj, freq):
# Here we're basically rounding the timestamp in the time index to even
# values.
new = pd.date_range(obj.time[0].values, freq=freq,
periods=len(obj.time))
return instant_ds.reindex({'time': new}, method='nearest')
streams = {} # Dictionary to store parsed stream names
stream_fnames = {}
for path in fnames:
# split up the path name to get info about the stream
resultdir, fname = os.path.split(path)
pieces = re.split('[_.]', fname)
stream = '_'.join(pieces[:-2]) # stream name
freq_n = pieces[2] # stream frequency n
stream_fnames[stream] = path
# set the stream frequency for resample
if 'NSTEPS' in stream:
inst_stream = stream
else:
if 'NDAYS' in stream:
streams[stream] = '{}D'.format(freq_n)
elif 'NHOURS' in stream:
streams[stream] = '{}H'.format(freq_n)
elif 'NMINUTES' in stream:
streams[stream] = '{}min'.format(freq_n)
elif 'NSECONDS' in stream:
streams[stream] = '{}S'.format(freq_n)
else:
ValueError('stream %s not supported in this test' % stream)
# Open the instantaneous stream
instant_ds = xr.open_dataset(stream_fnames[inst_stream])
instant_ds = reindex_xr_obj_timedim(instant_ds, '1H') # TODO infer freq
# Loop over all streams
for stream, freq in streams.items():
print(stream, freq)
agg_ds = xr.open_dataset(stream_fnames[stream])
agg_ds = reindex_xr_obj_timedim(agg_ds, freq)
# Loop over the variables in the stream
for key, agg_da in agg_ds.data_vars.items():
how = get_pandas_how_from_cell_method(agg_da.attrs['cell_methods'])
# Resample of the instantaneous data
expected = instant_ds[key].resample(freq, dim='time', how=how,
label='left', closed='left')
# Get the aggregated values (from VIC)
actual = agg_da
# Compare the actual and expected (with tolerance)
try:
npt.assert_array_equal(actual.values, expected.values)
except AssertionError as e:
print('Variable=%s, freq=%s, how=%s: failed comparison' %
(key, freq, how))
print('actual=%s\nexpected=%s' % (actual, expected))
print(np.abs(actual - expected).max())
raise e
def setup_subdirs_and_fill_in_global_param_mpi_test(
s, list_n_proc, result_basedir, state_basedir, test_data_dir):
''' Fill in global parameter output directories for multiple runs for mpi
testing, image driver
Parameters
----------
s: <string.Template>
Template of the global param file to be filled in
list_n_proc: <list>
A list of number of processors to run and compare
result_basedir: <str>
Base directory of output fluxes results; runs with different number of
processors are output to subdirectories under the base directory
state_basedir: <str>
Base directory of output state results; runs with different number of
processors are output to subdirectories under the base directory
test_data_dir: <str>
Base directory of test data
Returns
----------
list_global_param: <list>
A list of global parameter strings to be run with parameters filled in
Require
----------
os
'''
list_global_param = []
for j, n_proc in enumerate(list_n_proc):
# Set up subdirectories for results and states
result_dir = os.path.join(result_basedir,
'processors_{}'.format(n_proc))
state_dir = os.path.join(state_basedir,
'processors_{}'.format(n_proc))
os.makedirs(result_dir, exist_ok=True)
os.makedirs(state_dir, exist_ok=True)
# Fill in global parameter options
list_global_param.append(s.safe_substitute(test_data_dir=test_data_dir,
result_dir=result_dir,
state_dir=state_dir))
return(list_global_param)
def check_mpi_fluxes(result_basedir, list_n_proc):
''' Check whether all the fluxes are the same with different number of
processors, image driver
Parameters
----------
result_basedir: <str>
Base directory of output fluxes results; runs with different number of
processors are output to subdirectories under the base directory
list_n_proc: <list>
A list of number of processors to run and compare
Require
----------
os
glob
numpy
warnings
'''
# Read the first run - as base
n_proc = list_n_proc[0]
result_dir = os.path.join(
result_basedir,
'processors_{}'.format(n_proc))
if len(glob.glob(os.path.join(result_dir, '*.nc'))) > 1:
warnings.warn(
'More than one netCDF file found under directory {}'.
format(result_dir))
fname = glob.glob(os.path.join(result_dir, '*.nc'))[0]
ds_first_run = xr.open_dataset(fname)
# Loop over all rest runs and compare fluxes with the base run
for i, n_proc in enumerate(list_n_proc):
# Skip the first run
if i == 0:
continue
# Read flux results for this run
result_dir = os.path.join(
result_basedir,
'processors_{}'.format(n_proc))
if len(glob.glob(os.path.join(result_dir, '*.nc'))) > 1:
warnings.warn('More than one netCDF file found under '
'directory {}'.format(result_dir))
fname = glob.glob(os.path.join(result_dir, '*.nc'))[0]
ds_current_run = xr.open_dataset(fname)
# Compare current run with base run
for var in ds_first_run.data_vars:
npt.assert_array_equal(ds_current_run[var].values,
ds_first_run[var].values,
err_msg='Fluxes are not an exact match')
def check_mpi_states(state_basedir, list_n_proc):
''' Check whether all the output states are the same with different number
of processors, image driver
Parameters
----------
state_basedir: <str>
Base directory of output states; runs with different number of
processors are output to subdirectories under the base directory
list_n_proc: <list>
A list of number of processors to run and compare
Require
----------
os
glob
numpy
warnings
'''
# Read the first run - as base
n_proc = list_n_proc[0]
state_dir = os.path.join(
state_basedir,
'processors_{}'.format(n_proc))
if len(glob.glob(os.path.join(state_dir, '*.nc'))) > 1:
warnings.warn('More than one netCDF file found under '
'directory {}'.format(state_dir))
fname = glob.glob(os.path.join(state_dir, '*.nc'))[0]
ds_first_run = xr.open_dataset(fname)
# Loop over all rest runs and compare fluxes with the base run
for i, n_proc in enumerate(list_n_proc):
# Skip the first run
if i == 0:
continue
# Read output states for this run
state_dir = os.path.join(
state_basedir,
'processors_{}'.format(n_proc))
if len(glob.glob(os.path.join(state_dir, '*.nc'))) > 1:
warnings.warn('More than one netCDF file found under '
'directory {}'.format(state_dir))
fname = glob.glob(os.path.join(state_dir, '*.nc'))[0]
ds_current_run = xr.open_dataset(fname)
# Compare current run with base run
for var in ds_first_run.data_vars:
npt.assert_array_equal(ds_current_run[var].values,
ds_first_run[var].values,
err_msg='States are not an exact match')
| dgergel/VIC | tests/test_image_driver.py | Python | gpl-2.0 | 10,677 | [
"NetCDF"
] | 464695bfcf838258fb2e1a356c938ebf2b465bdb316a816a7b6b3684257c8e63 |
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import logging
import collections
import copy
from markdown.util import etree
from markdown.inlinepatterns import Pattern
from MooseMarkdownExtension import MooseMarkdownExtension
from MooseMarkdownCommon import MooseMarkdownCommon
from MooseObjectParameterTable import MooseObjectParameterTable
import MooseDocs
from MooseDocs.common.nodes import SyntaxNode, ActionNode, MooseObjectNode, MarkdownFileNodeBase
from . import misc # used for addScrollSpy function
from .. import common
LOG = logging.getLogger(__name__)
class AppSyntaxExtension(MooseMarkdownExtension):
"""
Extensions that comprise the MOOSE flavored markdown.
"""
@staticmethod
def defaultConfig():
"""Default settings for MooseMarkdownCommon."""
config = MooseMarkdownExtension.defaultConfig()
config['executable'] = [MooseDocs.ROOT_DIR, "The executable or directory to utilize for "
"generating application syntax."]
config['repo'] = ['', "The remote repository to create hyperlinks."]
config['branch'] = ['master', "The branch name to consider in repository links."]
config['links'] = [dict(), "The set of paths for generating input file and source code "
"links to objects."]
config['install'] = ['', "The location to install system and object documentation."]
config['hide'] = [dict(), "Suppress warnings for syntax listed in this dictionary."]
config['pairs'] = [list(), "Testing."]
return config
def __init__(self, **kwargs):
super(AppSyntaxExtension, self).__init__(**kwargs)
# Storage for the MooseApp syntax tree
self.__app_syntax = None
def getMooseAppSyntax(self):
"""
Return the MooseAppSyntax object.
"""
return self.__app_syntax
def extendMarkdown(self, md, md_globals):
"""
Builds the extensions for MOOSE flavored markdown.
"""
md.registerExtension(self)
# Create a config object
config = self.getConfigs()
# Build syntax from JSON
exe = os.path.join(MooseDocs.ROOT_DIR, self.getConfig('executable'))
self.__app_syntax = common.moose_docs_app_syntax(exe, hide=config['hide'])
config['syntax'] = self.__app_syntax
# Populate the database for input file and children objects
LOG.debug('Creating input file and source code use database.')
repo = os.path.join(config['repo'], 'blob', config['branch'])
database = common.MooseLinkDatabase(repo=repo, links=config['links'])
# Inline Patterns
system = MooseCompleteSyntax(markdown_instance=md, **config)
md.inlinePatterns.add('moose_complete_syntax', system, '_begin')
params = MooseParameters(markdown_instance=md, **config)
md.inlinePatterns.add('moose_syntax_parameters', params, '_begin')
desc = MooseDescription(markdown_instance=md, **config)
md.inlinePatterns.add('moose_syntax_description', desc, '_begin')
child_list = MooseFileList(markdown_instance=md,
database=database.children,
title='Child Objects',
command='children',
**config)
md.inlinePatterns.add('moose_child_list', child_list, '_begin')
input_list = MooseFileList(markdown_instance=md,
database=database.inputs,
title='Input Files',
command='inputs',
**config)
md.inlinePatterns.add('moose_input_list', input_list, '_begin')
object_list = MooseObjectList(markdown_instance=md, **config)
md.inlinePatterns.add('moose_object_list', object_list, '_begin')
action_list = MooseActionList(markdown_instance=md, **config)
md.inlinePatterns.add('moose_action_list', action_list, '_begin')
subsystem_list = MooseSubSystemList(markdown_instance=md, **config)
md.inlinePatterns.add('moose_subsystem_list', subsystem_list, '_begin')
def makeExtension(*args, **kwargs): #pylint: disable=invalid-name
"""
Create the AppSyntaxExtension.
"""
return AppSyntaxExtension(*args, **kwargs)
class MooseSyntaxBase(MooseMarkdownCommon, Pattern):
"""
Base for MOOSE system/object pattern matching.
Args:
regex[str]: The regular expression to match.
yaml[MooseYaml]: The MooseYaml object for the application.
syntax[dict]: A dictionary of MooseApplicatinSyntax objects.
"""
@staticmethod
def defaultSettings():
"""Default settings for MooseSyntaxBase."""
settings = MooseMarkdownCommon.defaultSettings()
settings['actions'] = (True, "Enable/disable action syntax lookup (this is used for "
"shared syntax such as BCs/Pressure).")
settings['objects'] = (True, "Enable/disable MooseObject syntax lookup (this is used for "
"shared syntax such as BCs/Pressure).")
settings['syntax'] = (True, "Enable/disable SyntaxNode lookup (this is needed for shared "
"syntax).")
return settings
def __init__(self, regex, markdown_instance=None, syntax=None, **kwargs):
MooseMarkdownCommon.__init__(self, **kwargs)
Pattern.__init__(self, regex, markdown_instance)
self._syntax = syntax
self.__cache = dict()
def initMatch(self, match):
"""
Initialize method for return the nodes and settings.
"""
# Extract Syntax and Settings
syntax = match.group('syntax')
settings = self.getSettings(match.group('settings'))
# Types to search for
types = []
if settings.get('actions', False):
types.append(ActionNode)
if settings.get('objects', False):
types.append(MooseObjectNode)
if settings.get('syntax', False):
types.append(SyntaxNode)
if not types:
return self.createErrorElement("The 'actions', 'objects', and 'syntax' flags cannot "
"all be False.")
# Locate the node
if syntax in self.__cache:
nodes = self.__cache[syntax]
else:
filter_ = lambda n: syntax == n.full_name and isinstance(n, tuple(types))
nodes = self._syntax.findall(filter_=filter_)
self.__cache[syntax] = nodes
return nodes, settings
def checkForErrors(self, nodes, match, settings, unique=True):
"""
Perform error checking on the nodes.
"""
syntax = match.group('syntax')
command = match.group('command')
# Failed to locate syntax
if not nodes:
items = []
if settings.get('actions', False):
items.append('Action')
if settings.get('objects', False):
items.append('MooseObject')
if settings.get('syntax', False):
items.append('syntax')
msg = 'Failed to locate {} for the command: `!syntax {} {}`' \
.format(' or '.join(items), str(command), str(syntax))
if isinstance(self.markdown.current, MarkdownFileNodeBase):
msg += ' in the file {}'.format(self.markdown.current.filename)
return msg + '.'
# Non-unique
if unique and len(nodes) > 1:
msg = 'The syntax provided is not unique, the following objects were located for the ' \
'command `!syntax {} {}`'.format(str(command), str(syntax))
if isinstance(self.markdown.current, MarkdownFileNodeBase):
msg += ' in the file {}'.format(self.markdown.current.filename)
msg += ':'
for node in nodes:
msg += '\n {}'.format(repr(node))
return msg
return None
def clearCache(self):
"""
Clears the search cache, this is for testing only.
"""
self.__cache = dict()
class MooseParameters(MooseSyntaxBase):
"""
Creates parameter tables for Actions and MooseObjects.
"""
RE = r'^!syntax\s+(?P<command>parameters)\s+(?P<syntax>.*?)(?:$|\s+)(?P<settings>.*)'
@staticmethod
def defaultSettings():
"""Default settings for MooseParameters."""
settings = MooseSyntaxBase.defaultSettings()
settings['title'] = ('Input Parameters', "Title to include prior to the parameter table.")
settings['title_level'] = (2, "The HTML heading level to apply to the title")
settings.pop('syntax') # syntax nodes do not have parameters
return settings
def __init__(self, **kwargs):
super(MooseParameters, self).__init__(self.RE, **kwargs)
def handleMatch(self, match):
"""
Return table(s) of input parameters.
"""
nodes, settings = self.initMatch(match)
msg = self.checkForErrors(nodes, match, settings)
if msg:
return self.createErrorElement(msg)
info = nodes[0]
# Parameters dict()
parameters = dict()
if isinstance(info, SyntaxNode):
for action in info.actions():
if action.parameters is not None:
parameters.update(action.parameters)
elif info.parameters:
parameters.update(info.parameters)
# Create the tables (generate 'Required' and 'Optional' initially so that they come out in
# the proper order)
tables = collections.OrderedDict()
tables['Required'] = MooseObjectParameterTable()
tables['Optional'] = MooseObjectParameterTable()
# Loop through the parameters in yaml object
for param in parameters.itervalues() or []:
name = param['group_name']
if not name and param['required']:
name = 'Required'
elif not name and not param['required']:
name = 'Optional'
if name not in tables:
tables[name] = MooseObjectParameterTable()
tables[name].addParam(param)
# Produces a debug message if parameters are empty, but generally we just want to include
# the !parameters command, if parameters exist then a table is produce otherwise nothing
# happens. This will allow for parameters to be added and the table appear if it was empty.
if not any(tables.values()):
LOG.debug('Unable to locate parameters for %s.', info.name)
else:
el = self.applyElementSettings(etree.Element('div'), settings)
if settings['title']:
title = etree.SubElement(el, 'h{}'.format(str(settings['title_level'])))
title.text = settings['title']
for key, table in tables.iteritems():
if table:
subtitle = etree.SubElement(el, 'h{}'.format(str(settings['title_level'] + 1)))
subtitle.text = '{} {}'.format(key, 'Parameters')
el.append(table.html())
return el
class MooseDescription(MooseSyntaxBase):
"""
Creates parameter tables for Actions and MooseObjects.
"""
RE = r'^!syntax\s+(?P<command>description)\s+(?P<syntax>.*?)(?:$|\s+)(?P<settings>.*)'
@staticmethod
def defaultSettings():
"""Default settings for MooseDescription."""
settings = MooseSyntaxBase.defaultSettings()
settings.pop('syntax') # 'syntax nodes do not have descriptions'
return settings
def __init__(self, **kwargs):
super(MooseDescription, self).__init__(self.RE, **kwargs)
def handleMatch(self, match):
"""
Return the class description html element.
"""
# Extract Syntax and Settings
nodes, settings = self.initMatch(match)
msg = self.checkForErrors(nodes, match, settings)
if msg:
return self.createErrorElement(msg)
info = nodes[0]
# Create an Error element, but only produce warning/error LOG if the object is not hidden
if info.description is None:
msg = "Failed to locate class description for {} syntax".format(info.name)
if isinstance(self.markdown.current, MarkdownFileNodeBase):
msg += " in the file {}".format(self.markdown.current.filename)
return self.createErrorElement(msg + '.', error=not self.markdown.current.hidden)
# Create the html element with supplied styles
el = self.applyElementSettings(etree.Element('p'), settings)
el.text = info.description
return el
class MooseFileList(MooseSyntaxBase):
"""
A file list creation object designed to work with MooseLinkDatabase information.
Args:
database[dict]: The MooseLinkDatabase dictionary to consider.
repo[str]: The repository for creating links to GitHub/GitLab.
title[str]: The default title.
command[str]: The command to associate with the "!syntax" keyword.
"""
RE = r'^!syntax\s+(?P<command>{})\s+(?P<syntax>.*?)(?:$|\s+)(?P<settings>.*)'
@staticmethod
def defaultSettings():
settings = MooseSyntaxBase.defaultSettings()
settings['title'] = ('default', "The title display prior to tables ('default' will apply "
"a generic title.).")
settings['title_level'] = (2, "The HTML heading level to apply to the title.")
settings['syntax'] = (False, settings['syntax'][1])
settings['actions'] = (False, settings['actions'][1])
return settings
def __init__(self, database=None, title=None, command=None, **kwargs):
super(MooseFileList, self).__init__(self.RE.format(command), **kwargs)
self._database = database
self._title = title
def handleMatch(self, match):
"""
Create the list element.
"""
# Extract the desired node.
nodes, settings = self.initMatch(match)
msg = self.checkForErrors(nodes, match, settings)
if msg:
return self.createErrorElement(msg)
info = nodes[0]
# Update title
if (settings['title'] is not None) and (settings['title'].lower() == 'default'):
settings['title'] = self._title
# Print the item information
el = self.applyElementSettings(etree.Element('div'), settings)
if settings['title'] is not None:
el.set('id', '#{}'.format(settings['title'].lower().replace(' ', '-')))
if settings['title_level'] == 2:
el.set('class', 'section scrollspy')
self._listhelper(el, info, settings)
return el
def _listhelper(self, parent, info, settings):
"""
Helper method for dumping link lists.
Args:
parent[etree.Element]: The parent element the headers and lists are to be applied
info[SyntaxNode]: The desired object from which the list will be created.
settings[dict]: The current settings.
"""
level = int(settings['title_level'])
has_items = False
for k, db in self._database.iteritems():
if info.name in db:
has_items = True
h3 = etree.SubElement(parent, 'h{}'.format(level + 1))
h3.text = k
ul = etree.SubElement(parent, 'ul')
ul.set('style', "max-height:350px;overflow-y:Scroll")
for j in db[info.name]:
ul.append(j.html())
if has_items and settings['title'] is not None:
h2 = etree.Element('h{}'.format(level))
h2.text = settings['title']
parent.insert(0, h2)
class MooseCompleteSyntax(MooseMarkdownCommon, Pattern):
"""
Display the complete system syntax for the compiled application.
The static methods of this class are used by the 'objects' and 'actions' command as well.
"""
RE = r'^!syntax\s+complete(?:$|\s+)(?P<settings>.*)'
@staticmethod
def defaultSettings():
settings = MooseMarkdownCommon.defaultSettings()
settings['groups'] = (None, "The configured 'groups' to include.")
return settings
def __init__(self, markdown_instance=None, syntax=None, **kwargs):
MooseMarkdownCommon.__init__(self, **kwargs)
Pattern.__init__(self, self.RE, markdown_instance)
self._syntax = syntax
self._install = kwargs.get('install')
def handleMatch(self, match):
"""
Creates complete list of objects.
"""
settings = self.getSettings(match.group('settings'))
groups = self.sortGroups(self._syntax, settings)
div = etree.Element('div')
div.set('class', 'moose-system-list')
for obj in self._syntax.syntax(recursive=True):
if obj.name and obj.hasGroups(set(g[0] for g in groups)):
self.addHeader(div, obj)
self.addObjects(div, obj, groups)
misc.ScrollContents.addScrollSpy(div)
return div
@staticmethod
def sortGroups(node, settings):
"""
Re-order groups to begin with 'framework' and limit groups based on settings.
"""
# Create a set() of all groups
groups = set()
for n in node.findall():
for key, value in n.groups.iteritems():
groups.add((key, value))
# Remove groups not in 'groups' settings
if settings['groups'] is not None:
for pair in copy.copy(groups):
if pair[0] not in settings['groups']:
groups.remove(pair)
# Remove framework
groups = groups
has_framework = False
framework = ('framework', 'Framework')
if framework in groups:
groups.remove(framework)
has_framework = True
# Sort and restore framework at beginning
groups = sorted(groups)
if has_framework:
groups.insert(0, framework)
return groups
@staticmethod
def addHeader(div, obj):
"""
Adds syntax header.
"""
level = obj.full_name.count('/') + 1
hid = obj.full_name.strip('/').replace('/', '-').lower()
h = etree.SubElement(div, 'h{}'.format(level))
h.text = obj.full_name.strip('/')
h.set('id', hid)
a = etree.SubElement(h, 'a')
a.set('href', obj.markdown('systems', absolute=False))
if obj.hidden:
a.set('data-moose-disable-link-error', '1')
i = etree.SubElement(a, 'i')
i.set('class', 'material-icons')
i.text = 'input'
for group in obj.groups.itervalues():
chip = etree.SubElement(h, 'div')
chip.set('class', 'chip moose-chip')
chip.text = group
@staticmethod
def addObjects(div, node, groups):
"""
Add all MooseObject to the <div> container.
"""
return MooseCompleteSyntax._addItemsHelper(div, groups, node.objects, 'Objects')
@staticmethod
def addActions(div, node, groups):
"""
Add all Actions to the <div> container.
"""
return MooseCompleteSyntax._addItemsHelper(div, groups, node.actions, 'Actions')
@staticmethod
def addSyntax(div, node, groups):
"""
Add all SyntaxNode (i.e., subsystems) to the <div> container.
"""
return MooseCompleteSyntax._addItemsHelper(div, groups, node.syntax, 'Systems')
@staticmethod
def _addItemsHelper(div, groups, func, title):
"""
Helper for adding objects/actions to the supplied div object.
"""
el = MooseDocs.common.MooseCollapsible()
for folder, group in groups:
objects = func(group=folder)
if objects:
el.addHeader('{} {}'.format(group, title))
for obj in objects:
a = etree.Element('a')
a.text = obj.name
a.set('href', '/' + obj.markdown('', absolute=False))
if obj.hidden:
a.set('data-moose-disable-link-error', '1')
if hasattr(obj, 'description'):
el.addItem(a, obj.description)
else:
el.addItem(a)
if el:
div.append(el.element())
return True
return False
class MoosePartialSyntax(MooseSyntaxBase):
"""
Creates dynamic lists for MooseObjects or Actions.
"""
RE = r'^!syntax\s+(?P<command>{})\s+(?P<syntax>.*?)(?:$|\s+)(?P<settings>.*)'
@staticmethod
def defaultSettings():
settings = MooseSyntaxBase.defaultSettings()
settings['title'] = (None, "Title to include prior to the syntax list.")
settings['title_level'] = (2, "The HTML heading level to apply to the title")
settings['groups'] = (None, "The configured 'groups' to include.")
return settings
def __init__(self, syntax=None, command=None, **kwargs):
MooseSyntaxBase.__init__(self, self.RE.format(command), syntax=syntax, **kwargs)
self._install = kwargs.get('install')
def addSyntax(self, div, info, groups): #pylint: disable=unused-argument,no-self-use
"""
Abstract method for the adding either the action or object list.
"""
return False
def handleMatch(self, match):
"""
Handle the regex match for this extension.
"""
# Extract the desired nodes
syntax = match.group('syntax')
nodes, settings = self.initMatch(match)
msg = self.checkForErrors(nodes, match, settings)
if msg:
return self.createErrorElement(msg)
info = nodes[0]
# Error if not a SyntaxNode
if not isinstance(info, SyntaxNode):
msg = "The given syntax '{}' did not return a SyntaxNode in file {}." \
.format(syntax, self.markdown.current.filename)
return self.createErrorElement(msg)
# Create and return the element
div = etree.Element('div')
div.set('class', 'moose-system-list')
groups = MooseCompleteSyntax.sortGroups(info, settings)
added_items = self.addSyntax(div, info, groups)
# Add the title if data was added
if added_items and settings['title'] is not None:
h = etree.Element('h{}'.format(int(settings['title_level'])))
h.text = settings['title']
div.insert(0, h)
return div
class MooseObjectList(MoosePartialSyntax):
"""
Create the "!syntax objects" command.
"""
@staticmethod
def defaultSettings():
"""Add default settings for object lists."""
settings = MoosePartialSyntax.defaultSettings()
settings['title'] = ("Available Sub-Objects", settings['title'][1])
settings['actions'] = (False, settings['actions'][1])
settings['objects'] = (False, settings['objects'][1])
return settings
def __init__(self, **kwargs):
super(MooseObjectList, self).__init__(command='objects', **kwargs)
def addSyntax(self, div, info, groups):
"""
Adds objects.
"""
return MooseCompleteSyntax.addObjects(div, info, groups)
class MooseActionList(MoosePartialSyntax):
"""
Create the "!syntax actions" command.
"""
@staticmethod
def defaultSettings():
"""Add default settings for action lists."""
settings = MoosePartialSyntax.defaultSettings()
settings['title'] = ("Associated Actions", settings['title'][1])
settings['actions'] = (False, settings['actions'][1])
settings['objects'] = (False, settings['objects'][1])
return settings
def __init__(self, **kwargs):
super(MooseActionList, self).__init__(command='actions', **kwargs)
def addSyntax(self, div, info, groups):
"""
Adds actions.
"""
return MooseCompleteSyntax.addActions(div, info, groups)
class MooseSubSystemList(MoosePartialSyntax):
"""
Create the "!syntax subsystems" command.
"""
@staticmethod
def defaultSettings():
"""Add default settings for subsystem lists."""
settings = MoosePartialSyntax.defaultSettings()
settings['title'] = ("Available Sub-Systems", settings['title'][1])
settings['actions'] = (False, settings['actions'][1])
settings['objects'] = (False, settings['objects'][1])
return settings
def __init__(self, **kwargs):
super(MooseSubSystemList, self).__init__(command='subsystems', **kwargs)
def addSyntax(self, div, info, groups):
"""
Adds Sub-Systems.
"""
return MooseCompleteSyntax.addSyntax(div, info, groups)
| liuwenf/moose | python/MooseDocs/extensions/app_syntax.py | Python | lgpl-2.1 | 26,487 | [
"MOOSE"
] | 587b76fe8d93e2c6172a92743d78af9c8493abc31c7e977282e448535f00a76b |
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import time
from threading import Thread
from numpy import array, hstack, average
# ============= enthought library imports =======================
from traits.api import Instance, HasTraits, Str, Bool, Float
from pychron.core.helpers.filetools import pathtolist
from pychron.core.helpers.strtools import csv_to_floats
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.envisage.view_util import open_view
from pychron.lasers.stage_managers.stage_visualizer import StageVisualizer
from pychron.stage.calibration.calibrator import TrayCalibrator
class Result(HasTraits):
hole_id = Str
corrected = Bool
dx = Float
dy = Float
nx = Float
ny = Float
class SemiAutoCalibrator(TrayCalibrator):
"""
1a. user move to center
b. record position
2a. user move to right
b. record position
3. traverse holes finding autocenter position
"""
stage_map = Instance('pychron.stage.maps.base_stage_map.BaseStageMap')
def handle(self, step, x, y, canvas):
ret = None
if step == 'Calibrate':
self.stage_map.clear_correction_file()
canvas.new_calibration_item()
self.calibration_step = 'Locate Center'
elif step == 'Locate Center':
canvas.calibration_item.set_center(x, y)
# check stage map has at least one calibration hole.
# if not issue warning and ask for manual locate right
if self._check_auto_calibration():
self.calibration_step = 'Auto Calibrate'
else:
name = self.stage_map.name
msg = 'Auto Rotation calibration not available.\n ' \
'{} has no calibration holes'.format(name)
self.warning_dialog(msg)
self.calibration_step = 'Locate Right'
ret = dict(cx=x, cy=y, clear_corrections=False)
canvas.calibration_item.rotation = 0
canvas.calibration_item.set_right(x, y)
elif step == 'Locate Right':
canvas.calibration_item.set_right(x, y)
ret = dict(calibration_step='Traverse',
clear_corrections=False,
rotation=canvas.calibration_item.rotation)
elif step == 'Auto Calibrate':
self._alive = True
t = Thread(target=self._auto_calibrate,
args=(canvas.calibration_item,))
t.start()
self.calibration_enabled = False
# self.calibration_step = 'Cancel'
elif step == 'Traverse':
if self.confirmation_dialog('Start Autocentering Traverse'):
self._alive = True
t = Thread(target=self._traverse,
args=(canvas.calibration_item,))
t.start()
self.calibration_enabled = False
# self.calibration_step = 'Cancel'
else:
self.calibration_step = 'Calibrate'
self.calibration_enabled = True
return ret
def _auto_calibrate(self, calibration):
smap = self.stage_map
rrot = lrot = None
# locate right
if self._alive:
east = smap.get_calibration_hole('east')
if east is not None:
center = smap.get_calibration_hole('center')
if center is not None:
self.debug('walk out to locate east')
for hid in range(int(center.id) + 1, int(east.id), 2):
if not self._alive:
break
hole = smap.get_hole(hid)
npt, corrected = self._autocenter(hole)
if corrected:
rrot = calibration.calculate_rotation(*npt)
calibration.set_right(*npt)
self.debug('Calculated rotation= {}'.format(rrot))
self.stage_manager.close_open_images()
if self._alive:
self.debug('Locate east {}'.format(east.id))
npt, corrected = self._autocenter(east)
if corrected:
rrot = calibration.calculate_rotation(*npt)
calibration.set_right(*npt)
self.debug('Calculated rotation= {}'.format(rrot))
self.stage_manager.close_open_images()
# locate left
if self._alive:
hole = smap.get_calibration_hole('west')
if hole is not None:
self.debug('Locate west {}'.format(hole.id))
npt, corrected = self._autocenter(hole)
if corrected:
lrot = calibration.calculate_rotation(*npt, sense='west')
self.debug('Calculated rotation= {}'.format(lrot))
self.stage_manager.close_open_images()
if self._alive:
if lrot is None:
rot = rrot
elif rrot is None:
rot = lrot
else:
self.debug('rrot={}, lrot={}'.format(rrot, lrot))
# average rotation
rot = (rrot + lrot) / 2.
if rot is None:
self.warning('failed calculating rotation')
self.calibration_step = 'Calibrate'
return
# set rotation
calibration.rotation = rot
self.rotation = rot
self.save_event = {'clear_corrections': False}
# traverse holes
self._traverse(calibration)
# move back to center hole
center = self.stage_map.get_calibration_hole('center')
if center:
x, y = center.corrected_position if center.has_correction else center.nominal_position
self.stage_manager.linear_move(x, y, block=True, force=True, use_calibration=False)
else:
self.warning('No calibration hole defined for "center" in Stage Map file {}'.format(
self.stage_map.file_path))
def _traverse(self, calibration):
"""
visit each hole in holes
record autocenter position
warn user about failures
"""
sm = self.stage_manager
smap = self.stage_map
# holes = smap.row_ends(alternate=True)
holes = list(smap.circumference_holes())
holes.extend(smap.mid_holes())
results = []
points = []
center = calibration.center
dxs, dys = array([]), array([])
guess = None
weights = [1, 2, 3, 4, 5, 6]
# holes = [smap.get_hole(1), smap.get_hole(3), smap.get_hole(5)]
success = True
non_corrected = 0
for hi in holes:
sm.close_open_images()
if not self._alive:
self.info('hole traverse canceled')
break
nominal_x, nominal_y = smap.map_to_calibration(hi.nominal_position,
center,
calibration.rotation)
n = len(dxs)
if n:
lim = min(6, n)
dx = average(dxs, weights=weights[:lim])
dy = average(dys, weights=weights[:lim])
guess = nominal_x - dx, nominal_y - dy
npt, corrected = self._autocenter(hi, guess=guess)
if not corrected:
non_corrected += 1
self.info('Failed to autocenter {}'.format(hi.id))
npt = nominal_x, nominal_y
if non_corrected > 5:
invoke_in_main_thread(self.warning_dialog,
'6 consecutive holes failed to autocenter. Autocalibration Canceled')
success = False
break
else:
non_corrected = -1
non_corrected += 1
dx = nominal_x - npt[0]
dy = nominal_y - npt[1]
dxs = hstack((dxs[-5:], dx))
dys = hstack((dys[-5:], dy))
res = Result(hole_id=hi.id, corrected=corrected,
dx=dx, dy=dy,
nx=nominal_x, ny=nominal_y)
results.append(res)
points.append((npt, corrected))
sm.close_open_images()
if success:
smap.generate_row_interpolated_corrections()
# display the results
sv = StageVisualizer()
sv.results = results
sv.set_stage_map(self.stage_map, points, calibration)
sv.save()
invoke_in_main_thread(open_view, sv)
# reset calibration manager
self.calibration_step = 'Calibrate'
self.calibration_enabled = True
def _autocenter(self, hi, guess=None):
self.debug('autocentering hole={}, guess={}'.format(hi.id, guess))
sm = self.stage_manager
kw = {'block': True, 'force': True}
if guess is None:
x, y = hi.x, hi.y
# move to nominal hole position
kw['use_calibration'] = True
else:
x, y = guess
kw['use_calibration'] = False
sm.linear_move(x, y, **kw)
# delay for image refresh
time.sleep(0.5)
# autocenter
npt, corrected, interp = sm.autocenter(holenum=hi.id, save=True,
inform=False)
return npt, corrected
def _check_auto_calibration(self):
smap = self.stage_map
l = smap.get_calibration_hole('west')
r = smap.get_calibration_hole('east')
return l is not None or r is not None
class AutoCalibrator(SemiAutoCalibrator):
"""
1a. move to center position automatically
b. autocenter
2a. move to right position automatically
b. autocenter
"""
_warned = False
def handle(self, step, x, y, canvas):
center_guess = self._get_center_guess()
center_hole = self.stage_map.get_calibration_hole('center')
if not self._check_auto_calibration():
if not self._warned:
self.warning_dialog('Auto calibration not available. Stage map not properly configured')
self._warned = True
return super(AutoCalibrator, self).handle(step, x, y, canvas)
if center_guess is None or center_hole is None:
if not self._warned:
self.warning_dialog('Center hole/Center guess not configured. Center hole={}, Guess={}'.format(
center_hole, center_guess))
self._warned = True
return super(AutoCalibrator, self).handle(step, x, y, canvas)
else:
ret = None
if step == 'Calibrate':
self.stage_map.clear_correction_file()
canvas.new_calibration_item()
self.calibration_enabled = False
self._alive = True
t = Thread(target=self._auto_calibrate,
args=(canvas.calibration_item, center_hole, center_guess))
t.start()
return ret
def _auto_calibrate(self, calibration, center_hole, center_guess):
npos, corrected = self._autocenter(center_hole, center_guess)
if not corrected:
invoke_in_main_thread(self.warning_dialog, 'Failed to located center hole. Try SemiAutoCalibration')
self._warned = False
self.calibration_step = 'Calibrate'
self.calibration_enabled = True
else:
super(AutoCalibrator, self)._auto_calibrate(calibration)
def _get_center_guess(self):
path = self.stage_map.center_guess_path
if os.path.isfile(path):
try:
guess = pathtolist(path)
return csv_to_floats(guess[0])
except BaseException as e:
self.debug('Failed parsing center guess file {}. error={}'.format(path, e))
# ============= EOF =============================================
| UManPychron/pychron | pychron/stage/calibration/auto_calibrator.py | Python | apache-2.0 | 12,969 | [
"VisIt"
] | 5ac1a98e2a1754567db56a878492852b10e9216e7b94cd74301862234f3a32b3 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# userstats - Front end to user stats
# Copyright (C) 2003-2010 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.userstats import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
| heromod/migrid | mig/cgi-bin/userstats.py | Python | gpl-2.0 | 1,074 | [
"Brian"
] | a985e67e02a7f56b385c362c3a1007992f6212f3683ea41eab86ed37d3d9ae3c |
import sys
class ProgressBar(object):
__author__ = "Laura Domicevica"
__version__ = "09.2016"
"""
Shows the progress of analysis in frame rate and percentage.
In August 2016, used for residence time analysis, since others are shown by MDAnalysis.
"""
def __init__(self, title):
self.title = title
def update(self, frame_n, total_n_frames):
percentage = frame_n/float(total_n_frames)*100
percent_text = str(int(percentage))+"%"
sys.stdout.write(self.title +" frame "+ str(frame_n)+"/"+str(total_n_frames)+" ["+percent_text+"]"+ "\r")
sys.stdout.flush()
def finish(self, total_n_frames):
sys.stdout.write(self.title +" frame "+ str(total_n_frames)+"/"+str(total_n_frames)+" [100%]"+ "\r") | ldomic/lintools | lintools/analysis/progressbar.py | Python | gpl-3.0 | 786 | [
"MDAnalysis"
] | 63a017cc5737be29df0e9f609bfa791e75113ff163b350de8627bb9891eeace4 |
from subdown_version import __version__
introstr="""
_________ ___. ________
/ _____/ __ __ \_ |__ \______ \ ____ __ _ __ ____
\_____ \ | | \ | __ \ | | \ / _ \ \ \/ \/ / / \
/ \| | / | \_\ \ | ` \( <_> ) \ / | | \
/_______ /|____/ |___ //_______ / \____/ \/\_/ |___| /
\/ \/ \/ \/ v"""+__version__+"""
BY
_________ .__
/ _____/_____ |__| ____ ___.__._____ _____
\_____ \ \__ \ | | / \ < | |\__ \ / \
/ \ / __ \_| || | \ \___ | / __ \_| Y Y \
/_______ /(____ /|__||___| / / ____|(____ /|__|_| /
\/ \/ \/ \/ \/ \/
Visit Me at : https://sainyamkapoor.github.io
For more info Regarding this app Visit : http://sainyamkapoor.github.io/SubDown
"""
"""Code begins"""
from os import path, remove
from urllib2 import ProxyHandler,HTTPBasicAuthHandler, build_opener, HTTPHandler, install_opener
from sys import argv
from sub_db_down import sub_db
from open_sub_down import open_sub
proxy_url=''
username=''
password=''
if proxy_url is '':
proxy_str=''
proxy_dict={}
elif username is not '':
proxy_str='http://'+username+':'+password+'@'+proxy_url
proxy_dict={'http':proxy_str}
else:
proxy_str='http://'+proxy_url
proxy_dict={'http':proxy_str}
proxy = ProxyHandler(proxy_dict)
auth = HTTPBasicAuthHandler()
opener = build_opener(proxy, auth, HTTPHandler)
install_opener(opener)
if __name__=='__main__':
try:
arg_path = argv[1]
filename=arg_path.split('\\')[-1]
except IndexError :
raw_input("No arguments Passed \n Press Enter to exit")
arg_path=None
replace = [".avi",".mp4",".mkv",".mpg",".mpeg"]
clean_path=arg_path
print introstr
for content in replace:
filename = filename.replace(content,'')
clean_path = clean_path.replace(content,"")
print "[SubDown]: Trying to Download " +filename
if path.exists(clean_path+".srt"):
ans=str(raw_input("Looks Like the SRT file exist\nWould You like To delete that file and Download a new one? :"))
ans=ans.lower()
if ans=='y' or ans=='yes':
remove(clean_path+".srt")
else :
exit(0)
if sub_db(arg_path,opener)==0:
open_sub(clean_path,filename,arg_path)
| saikpr/subdown | subdown.py | Python | mit | 2,478 | [
"VisIt"
] | 8c27b2a00cf7e3b605e1de64664c391dd2bbff5169b1b179db660d8add9488b4 |
import os
import vtk
import numpy as np
from ensight2vtk_single_encas import ensight2vtk
from post_proc_cfd import post_proc_cfd
from post_proc_cfd_diff import post_proc_cfd_diff
from multiprocessing import Pool
def run_script():
#case_list = ["case1", "case3", "case4", "case5", "case7"]
#case_list = ["case1", "case8", "case12"]
#case_list = ["case13"]
case_list = ["case1", "case3", "case4", "case5", "case7", "case8", "case12", "case13", "case14"]
dir_path = "/raid/sansomk/caseFiles/mri/VWI_proj/"
ensight_dir = "ensight"
fluent_dir = "fluent_dsa"
#fluent_dir = "fluent_dsa_2" # tested cell based calculation on case 4
vtk_out = "vtk_out"
vtk_file_1 = "wall_outfile_node.vtu"
vtk_file_2 = "inlet_outfile_node.vtu"
vtk_file_3 = "interior_outfile_node.vtu"
wall = True
surface = True
interior = True
file_pattern = "_dsa_0.15_fluent.encas"
for case in case_list:
print(case)
head_dir = os.path.join(dir_path, case, fluent_dir)
ensight_path = os.path.join(head_dir, ensight_dir)
ensight_file = "{0}{1}".format(case, file_pattern)
out_dir = os.path.join(head_dir, vtk_out)
ensight2vtk(ensight_path, out_dir, ensight_file,
vtk_file_1, vtk_file_2, vtk_file_3,
wall, surface, interior)
post_proc_cfd(out_dir, vtk_file_1,"point",
"calc_test_node.vtu", "calc_test_node_stats.vtu", N_peak=9)
def rerun_cfd_analysis():
#case_list = ["case1"]
case_list = ["case1", "case3", "case4", "case5", "case7", "case8", "case12", "case13", "case14"]
peak_list = [8, 8, 6, 8, 8, 8, 8, 8, 8]
dir_path = "/raid/sansomk/caseFiles/mri/VWI_proj/"
fluent_dir = "fluent_dsa"
vtk_out = "vtk_out"
vtk_file_1 = "wall_outfile_node.vtu"
#vtk_file_2 = "inlet_outfile_node.vtu"
wall = True
surface = True
file_pattern = "_dsa_0.15_fluent.encas"
for case, p in zip(case_list, peak_list):
print(case)
head_dir = os.path.join(dir_path, case, fluent_dir)
out_dir = os.path.join(head_dir, vtk_out)
post_proc_cfd(out_dir, vtk_file_1,"point",
"calc_test_node.vtu", "calc_test_node_stats.vtu", N_peak=int(p))
def rerun_cfd_analysis_diff():
# this uses different internal stuff from vtk to get results.
#case_list = ["case1"]
case_list = ["case1", "case3", "case4", "case5", "case7", "case8", "case12", "case13", "case14"]
peak_list = [8, 8, 6, 8, 8, 8, 8, 8, 8]
dir_path = "/raid/sansomk/caseFiles/mri/VWI_proj/"
fluent_dir = "fluent_dsa"
vtk_out = "vtk_out"
vtk_file_1 = "wall_outfile_node.vtu"
#vtk_file_2 = "inlet_outfile_node.vtu"
wall = True
surface = True
file_pattern = "_dsa_0.15_fluent.encas"
arg_list =[]
for case, p in zip(case_list, peak_list):
head_dir = os.path.join(dir_path, case, fluent_dir)
out_dir = os.path.join(head_dir, vtk_out)
arg_list.append([out_dir, vtk_file_1, "point", "calc_test_node.vtu", "calc_test_node_stats.vtu", int(p)])
print(arg_list[-1])
with Pool(processes=len(case_list)) as pool:
result_list = pool.map(post_proc_cfd_diff, arg_list)
#with Pool(5) as p:
#print(p.map(f, [1, 2, 3]))
#for case, p in zip(case_list, peak_list):
#print(case)
#head_dir = os.path.join(dir_path, case, fluent_dir)
#out_dir = os.path.join(head_dir, vtk_out)
#post_proc_cfd_diff(out_dir, vtk_file_1,"point",
#"calc_test_node_diff.vtu", "calc_test_node_stats_diff.vtu", N_peak=int(p))
def run_convert_ensight():
#case_list = ["case1", "case3", "case4", "case5", "case7"]
case_list = ["case1"]
dir_path = "/raid/sansomk/caseFiles/mri/VWI_proj/"
ensight_dir = "ensight"
fluent_dir = "fluent_dsa"
#fluent_dir = "fluent_dsa_2" # tested cell based calculation on case 4
vtk_out = "vtk_out"
vtk_file_1 = "wall_outfile_node.vtu"
vtk_file_2 = "inlet_outfile_node.vtu"
vtk_file_3 = "interior_outfile_node.vtu"
wall = True
inlet = True
interior = False
file_pattern = "_dsa_0.15_fluent.encas"
for case in case_list:
print(case)
head_dir = os.path.join(dir_path, case, fluent_dir)
ensight_path = os.path.join(head_dir, ensight_dir)
ensight_file = "{0}{1}".format(case, file_pattern)
out_dir = os.path.join(head_dir, vtk_out)
ensight2vtk(ensight_path, out_dir, ensight_file,
vtk_file_1, vtk_file_2, vtk_file_3,
wall, inlet, interior)
if ( __name__ == '__main__' ):
#run_script()
#run_convert_ensight()
#rerun_cfd_analysis()
rerun_cfd_analysis_diff()
| kayarre/Tools | vtk/post_process_main.py | Python | bsd-2-clause | 4,785 | [
"VTK"
] | 5c6d54ba1b3bd059db8149d2d79fb78aaedf621cf192b4ab000457a44d87446e |
"""
A simple VTK input file for PyQt, the qt bindings for python.
See http://www.trolltech.com for qt documentation, and
http://www.river-bank.demon.co.uk or http://www.thekompany.com
for the qt python bindings.
*******************************************************
NOTE: The widget provided by this module is not free of bugs and it
is recommended that you consider using the
QVTKRenderWindowInteractor widget that is also in this directory
instead of this one.
*******************************************************
Created by David Gobbi, December 2001
Based on vtkTkRenderWindget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support
"""
"""
This class should work with both the UNIX version of Qt and also on
Win32.
Depending on the OpenGL graphics drivers, it may not
be possible to have more than one QVTKRenderWidget
per application.
In short, this class is experimental. A proper implementation
will probably require a QVTKRenderWidget that is written in
C++ and then wrapped to be made available through python,
similar to the vtkTkRenderWidget.
"""
# Problems on Win32:
# 1. The widget is not cleaned up properly and crashes the
# application.
import vtk
import math, os, sys
from qt import *
class QVTKRenderWidget(QWidget):
"""
A QVTKRenderWidget for Python and Qt.
Use GetRenderWindow() to get the vtkRenderWindow.
Create with the keyword stereo=1 in order to
generate a stereo-capable window.
"""
def __init__(self, parent=None, name=None, *args, **kw):
# miscellaneous protected variables
self._CurrentRenderer = None
self._CurrentCamera = None
self._CurrentZoom = 1.0
self._CurrentLight = None
self._ViewportCenterX = 0
self._ViewportCenterY = 0
self._Picker = vtk.vtkCellPicker()
self._PickedActor = None
self._PickedProperty = vtk.vtkProperty()
self._PickedProperty.SetColor(1,0,0)
self._PrePickedProperty = None
# these record the previous mouse position
self._LastX = 0
self._LastY = 0
# the current interaction mode (Rotate, Pan, Zoom, etc)
self._Mode = None
self._ActiveButton = 0
# used by the LOD actors
self._DesiredUpdateRate = 15
self._StillUpdateRate = 0.0001
# private attributes
self.__oldFocus = None
self.__saveX = 0
self.__saveY = 0
self.__saveState = 0
self.__connected = 0 # is QT->VTK connection done?
# do special handling of some keywords:
# stereo, rw
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
del kw['stereo']
rw = None
if kw.has_key('rw'):
rw = kw['rw']
del kw['rw']
# create qt-level widget
apply(QWidget.__init__, (self,parent,name) + args, kw)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
# do all the necessary qt setup
self.setBackgroundMode(2) # NoBackground
self.setMouseTracking(1) # get all mouse events
self.setFocusPolicy(2) # ClickFocus
if parent == None:
self.show()
if self.isVisible():
if self.__connected == 0:
size = self.size()
self._RenderWindow.SetSize(size.width(),size.height())
self._RenderWindow.SetWindowInfo(str(int(self.winId())))
self.__connected = 1
def show(self):
QWidget.show(self)
self.repaint() # needed for initial contents display on Win32
def paintEvent(self,ev):
if self.isVisible():
if self.__connected == 0:
size = self.size()
self._RenderWindow.SetSize(size.width(),size.height())
self._RenderWindow.SetWindowInfo(str(int(self.winId())))
self.__connected = 1
if self.__connected:
self.Render()
def resizeEvent(self,ev):
self.repaint()
def enterEvent(self,ev):
if not self.hasFocus():
self.__oldFocus = self.focusWidget()
self.setFocus()
def leaveEvent(self,ev):
if (self.__saveState & 0x7) == 0 and self.__oldFocus:
self.__oldFocus.setFocus()
self.__oldFocus = None
def mousePressEvent(self,ev):
if self._Mode != None:
return
if (ev.button() == 2 or
ev.button() == 1 and ev.state() & 16):
self._Mode = "Zoom"
self._ActiveButton = ev.button()
elif (ev.button() == 4 or
ev.button() == 1 and ev.state() & 8):
self._Mode = "Pan"
self._ActiveButton = ev.button()
elif (ev.button() == 1):
self._Mode = "Rotate"
self._ActiveButton = ev.button()
if self._Mode != None:
self._RenderWindow.SetDesiredUpdateRate(self._DesiredUpdateRate)
self.UpdateRenderer(ev.x(),ev.y())
def mouseReleaseEvent(self,ev):
if self._Mode == None:
return
self._RenderWindow.SetDesiredUpdateRate(self._StillUpdateRate)
if self._CurrentRenderer:
self.Render()
if ev.button() == self._ActiveButton:
self._Mode = None
self._ActiveButton = 0
def mouseMoveEvent(self,ev):
self.__saveState = ev.state()
self.__saveX = ev.x()
self.__saveY = ev.y()
if self._Mode == "Pan":
self.Pan(ev.x(),ev.y())
elif self._Mode == "Rotate":
self.Rotate(ev.x(),ev.y())
elif self._Mode == "Zoom":
self.Zoom(ev.x(),ev.y())
def keyPressEvent(self,ev):
if ev.key() == ord('R'):
self.Reset(self.__saveX,self.__saveY)
if ev.key() == ord('W'):
self.Wireframe()
if ev.key() == ord('S'):
self.Surface()
if ev.key() == ord('P'):
self.PickActor(self.__saveX,self.__saveY)
def contextMenuEvent(self,ev):
ev.accept();
def SetDesiredUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._DesiredUpdateRate = rate
def GetDesiredUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._DesiredUpdateRate
def SetStillUpdateRate(self, rate):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
self._StillUpdateRate = rate
def GetStillUpdateRate(self):
"""Mirrors the method with the same name in
vtkRenderWindowInteractor."""
return self._StillUpdateRate
def GetZoomFactor(self):
return self._CurrentZoom
def GetRenderWindow(self):
return self._RenderWindow
def GetPicker(self):
return self._Picker
def Render(self):
if (self._CurrentLight):
light = self._CurrentLight
light.SetPosition(self._CurrentCamera.GetPosition())
light.SetFocalPoint(self._CurrentCamera.GetFocalPoint())
self._RenderWindow.Render()
def UpdateRenderer(self,x,y):
"""
UpdateRenderer will identify the renderer under the mouse and set
up _CurrentRenderer, _CurrentCamera, and _CurrentLight.
"""
windowX = self.width()
windowY = self.height()
renderers = self._RenderWindow.GetRenderers()
numRenderers = renderers.GetNumberOfItems()
self._CurrentRenderer = None
renderers.InitTraversal()
for i in range(0,numRenderers):
renderer = renderers.GetNextItem()
vx,vy = (0,0)
if (windowX > 1):
vx = float(x)/(windowX-1)
if (windowY > 1):
vy = (windowY-float(y)-1)/(windowY-1)
(vpxmin,vpymin,vpxmax,vpymax) = renderer.GetViewport()
if (vx >= vpxmin and vx <= vpxmax and
vy >= vpymin and vy <= vpymax):
self._CurrentRenderer = renderer
self._ViewportCenterX = float(windowX)*(vpxmax-vpxmin)/2.0\
+vpxmin
self._ViewportCenterY = float(windowY)*(vpymax-vpymin)/2.0\
+vpymin
self._CurrentCamera = self._CurrentRenderer.GetActiveCamera()
lights = self._CurrentRenderer.GetLights()
lights.InitTraversal()
self._CurrentLight = lights.GetNextItem()
break
self._LastX = x
self._LastY = y
def GetCurrentRenderer(self):
return self._CurrentRenderer
def Rotate(self,x,y):
if self._CurrentRenderer:
self._CurrentCamera.Azimuth(self._LastX - x)
self._CurrentCamera.Elevation(y - self._LastY)
self._CurrentCamera.OrthogonalizeViewUp()
self._LastX = x
self._LastY = y
self._CurrentRenderer.ResetCameraClippingRange()
self.Render()
def Pan(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
(pPoint0,pPoint1,pPoint2) = camera.GetPosition()
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
if (camera.GetParallelProjection()):
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetFocalPoint(fx,fy,fz)
renderer.SetWorldPoint(pPoint0,pPoint1,pPoint2,1.0)
renderer.WorldToDisplay()
fx,fy,fz = renderer.GetDisplayPoint()
renderer.SetDisplayPoint(fx-x+self._LastX,
fy+y-self._LastY,
fz)
renderer.DisplayToWorld()
fx,fy,fz,fw = renderer.GetWorldPoint()
camera.SetPosition(fx,fy,fz)
else:
(fPoint0,fPoint1,fPoint2) = camera.GetFocalPoint()
# Specify a point location in world coordinates
renderer.SetWorldPoint(fPoint0,fPoint1,fPoint2,1.0)
renderer.WorldToDisplay()
# Convert world point coordinates to display coordinates
dPoint = renderer.GetDisplayPoint()
focalDepth = dPoint[2]
aPoint0 = self._ViewportCenterX + (x - self._LastX)
aPoint1 = self._ViewportCenterY - (y - self._LastY)
renderer.SetDisplayPoint(aPoint0,aPoint1,focalDepth)
renderer.DisplayToWorld()
(rPoint0,rPoint1,rPoint2,rPoint3) = renderer.GetWorldPoint()
if (rPoint3 != 0.0):
rPoint0 = rPoint0/rPoint3
rPoint1 = rPoint1/rPoint3
rPoint2 = rPoint2/rPoint3
camera.SetFocalPoint((fPoint0 - rPoint0) + fPoint0,
(fPoint1 - rPoint1) + fPoint1,
(fPoint2 - rPoint2) + fPoint2)
camera.SetPosition((fPoint0 - rPoint0) + pPoint0,
(fPoint1 - rPoint1) + pPoint1,
(fPoint2 - rPoint2) + pPoint2)
self._LastX = x
self._LastY = y
self.Render()
def Zoom(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
camera = self._CurrentCamera
zoomFactor = math.pow(1.02,(0.5*(self._LastY - y)))
self._CurrentZoom = self._CurrentZoom * zoomFactor
if camera.GetParallelProjection():
parallelScale = camera.GetParallelScale()/zoomFactor
camera.SetParallelScale(parallelScale)
else:
camera.Dolly(zoomFactor)
renderer.ResetCameraClippingRange()
self._LastX = x
self._LastY = y
self.Render()
def Reset(self,x,y):
if self._CurrentRenderer:
self._CurrentRenderer.ResetCamera()
self.Render()
def Wireframe(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToWireframe()
self.Render()
def Surface(self):
actors = self._CurrentRenderer.GetActors()
numActors = actors.GetNumberOfItems()
actors.InitTraversal()
for i in range(0,numActors):
actor = actors.GetNextItem()
actor.GetProperty().SetRepresentationToSurface()
self.Render()
def PickActor(self,x,y):
if self._CurrentRenderer:
renderer = self._CurrentRenderer
picker = self._Picker
windowY = self.height()
picker.Pick(x,(windowY - y - 1),0.0,renderer)
actor = picker.GetActor()
if (self._PickedActor != None and
self._PrePickedProperty != None):
self._PickedActor.SetProperty(self._PrePickedProperty)
# release hold of the property
self._PrePickedProperty.UnRegister(self._PrePickedProperty)
self._PrePickedProperty = None
if (actor != None):
self._PickedActor = actor
self._PrePickedProperty = self._PickedActor.GetProperty()
# hold onto the property
self._PrePickedProperty.Register(self._PrePickedProperty)
self._PickedActor.SetProperty(self._PickedProperty)
self.Render()
#----------------------------------------------------------------------------
def QVTKRenderWidgetConeExample():
"""Like it says, just a simple example
"""
# every QT app needs an app
app = QApplication(['QVTKRenderWidget'])
# create the widget
widget = QVTKRenderWidget()
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# close the application when window is closed
qApp.setMainWidget(widget)
# start event processing
app.exec_loop()
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
| b3c/VTK-5.8 | Wrapping/Python/vtk/qt/QVTKRenderWidget.py | Python | bsd-3-clause | 15,276 | [
"VTK"
] | b89358ce95d7c8af5723a427a7d4214c74d3fdc98118b185259dc984bb69180d |
#!/usr/bin/env python
'''
Input ke_cutoff (kinetic energy cutoff) or mesh (#grids) for FFT-based Coulomb
integral.
If ke_cutoff and mesh are not specified in the input, they will be chosen
automatically in cell.build function based on the basis set. You can set
ke_cutoff or mesh to control the performance/accuracy of Coulomb integrals.
'''
import numpy
import pyscf.lib
from pyscf.pbc import gto
cell = gto.Cell()
cell.atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = numpy.eye(3)*3.5668
cell.mesh = [25,25,25] # 25 grids on each direction, => 25^3 grids in total
cell.build()
cell.ke_cutoff = 40 # Eh ~ mesh = [20,20,20] ~ 21^3 grids in total
cell.build()
| gkc1000/pyscf | examples/pbc/06-input_ke_cutoff.py | Python | apache-2.0 | 1,018 | [
"PySCF"
] | c5007b77d65e83675f35393b4806d068dc124a35bbab6e0945141eedd52fcead |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test gluon.probability with HybridBlock.forward api
"""
import mxnet as mx
import numpy as _np
from mxnet import np, npx, autograd
from mxnet import gluon
import mxnet.gluon.probability as mgp
from mxnet.gluon.probability import StochasticBlock, StochasticSequential
from mxnet.gluon import HybridBlock
from mxnet.test_utils import use_np, assert_almost_equal
from common import with_seed
from numpy.testing import assert_array_equal
import pytest
import scipy.stats as ss
import scipy.special as scipy_special
import itertools
from numbers import Number
def prob_to_logit(prob):
return np.log(prob) - np.log1p(-prob)
def _distribution_method_invoker(dist, func, *args):
"""Wrapper for invoking different types of class methods with one unified
interface.
Parameters
----------
dist : Distribution
func : method
"""
if (len(args) == 0):
out = getattr(dist, func)
if callable(out):
return out()
else:
return out
return getattr(dist, func)(*args)
def test_mgp_getF():
# Test getF
getF = mgp.utils.getF
nd = mx.nd
sym = mx.sym
assert getF(nd.ones((2, 2)), nd.ones((2, 2))) == nd
assert getF(sym.ones((2, 2)), sym.ones((2, 2))) == sym
assert getF(1.0, 2.0) == nd
# Test exception
with pytest.raises(TypeError):
getF(nd.ones((2, 2)), sym.ones((2, 2)))
getF(sym.ones((2, 2)), nd.ones((2, 2)))
@with_seed()
@use_np
def test_gluon_uniform():
class TestUniform(HybridBlock):
def __init__(self, func):
super(TestUniform, self).__init__()
self._func = func
def forward(self, low, high, *args):
uniform = mgp.Uniform(low, high, validate_args=True)
return _distribution_method_invoker(uniform, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(low, high)
net = TestUniform("log_prob")
if hybridize:
net.hybridize()
for i in range(2):
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(low, high)
net = TestUniform("cdf")
if hybridize:
net.hybridize()
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestUniform("icdf")
if hybridize:
net.hybridize()
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
net = TestUniform("entropy")
if hybridize:
net.hybridize()
mx_out = net(low, high).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_normal():
class TestNormal(HybridBlock):
def __init__(self, func):
super(TestNormal, self).__init__()
self._func = func
def forward(self, loc, scale, *args):
normal = mgp.Normal(loc, scale, validate_args=True)
return _distribution_method_invoker(normal, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestNormal("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestNormal("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestNormal("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestNormal("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_laplace():
class TestLaplace(HybridBlock):
def __init__(self, func):
super(TestLaplace, self).__init__()
self._func = func
def forward(self, loc, scale, *args):
laplace = mgp.Laplace(loc, scale, validate_args=True)
return _distribution_method_invoker(laplace, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.laplace(size=shape)
net = TestLaplace("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.laplace(size=shape)
net = TestLaplace("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestLaplace("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestLaplace("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_cauchy():
class TestCauchy(HybridBlock):
def __init__(self, func):
self._func = func
super(TestCauchy, self).__init__()
def forward(self, loc, scale, *args):
cauchy = mgp.Cauchy(loc, scale, validate_args=True)
return _distribution_method_invoker(cauchy, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("sample")
if hybridize:
net.hybridize()
mx_out = net(loc, scale)
desired_shape = (shape,) if isinstance(shape, Number) else shape
assert mx_out.shape == desired_shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape, low=1e-4, high=1.0-1e-4)
net = TestCauchy("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestCauchy("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_half_cauchy():
class TestHalfCauchy(HybridBlock):
def __init__(self, func):
super(TestHalfCauchy, self).__init__()
self._func = func
def forward(self, scale, *args):
half_normal = mgp.HalfCauchy(scale, validate_args=True)
return getattr(half_normal, self._func)(*args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
net = TestHalfCauchy("sample")
if hybridize:
net.hybridize()
mx_out = net(scale).asnumpy()
if isinstance(shape, Number):
shape = (shape,)
assert mx_out.shape == shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfCauchy("log_prob")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfCauchy("cdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape, high=1.0-1e-4)
net = TestHalfCauchy("icdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_poisson():
class TestPoisson(HybridBlock):
def __init__(self, func):
self._func = func
super(TestPoisson, self).__init__()
def forward(self, rate, *args):
poisson = mgp.Poisson(rate, validate_args=True)
return _distribution_method_invoker(poisson, self._func, *args)
shapes = [(1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
rate = np.random.uniform(0.5, 1.5, shape)
net = TestPoisson("sample")
if hybridize:
net.hybridize()
mx_out = net(rate).asnumpy()
assert mx_out.shape == rate.shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
rate = np.random.uniform(0.5, 1.5, shape)
samples = np.random.randint(0, 5, shape).astype('float')
net = TestPoisson("log_prob")
if hybridize:
net.hybridize()
mx_out = net(rate, samples).asnumpy()
np_out = ss.poisson(mu=rate.asnumpy()).logpmf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_geometric():
class TestGeometric(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestGeometric, self).__init__()
self._is_logit = is_logit
self._func = func
def forward(self, params, *args):
dist = mgp.Geometric(logit=params, validate_args=True) if self._is_logit else \
mgp.Geometric(prob=params, validate_args=True)
return _distribution_method_invoker(dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = np.random.randint(0, 10, size=shape).astype('float32')
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestGeometric("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
np_out = ss.geom.logpmf(sample.asnumpy() + 1, prob.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test variance
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestGeometric("variance", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.geom(prob.asnumpy()).var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
# Add lower bound constraint, otherwise scipy would raise warning.
prob = np.random.uniform(low=0.1, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestGeometric("entropy", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.geom(prob.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_negative_binomial():
class TestNegativeBinomial(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestNegativeBinomial, self).__init__()
self._is_logit = is_logit
self._func = func
def forward(self, n, params, *args):
dist = mgp.NegativeBinomial(n=n, logit=params, validate_args=True) if self._is_logit else \
mgp.NegativeBinomial(n=n, prob=params, validate_args=True)
return _distribution_method_invoker(dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
n = np.random.randint(1, 10, size=shape).astype('float32')
prob = np.random.uniform(low=0.2, high=0.6, size=shape)
sample = np.random.randint(0, 10, size=shape).astype('float32')
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestNegativeBinomial("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(n, param, sample).asnumpy()
np_out = ss.nbinom(n=n.asnumpy(), p=prob.asnumpy()
).logpmf(sample.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test mean and variance
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance']:
for use_logit in [True, False]:
n = np.random.randint(1, 10, size=shape).astype('float32')
prob = np.random.uniform(low=0.2, high=0.6, size=shape)
net = TestNegativeBinomial(func, use_logit)
param = prob
if use_logit:
param = prob_to_logit(param)
if hybridize:
net.hybridize()
mx_out = net(n, param).asnumpy()
ss_nbinom = ss.nbinom(n=n.asnumpy(), p=1 - prob.asnumpy())
if func == 'mean':
np_out = ss_nbinom.mean()
else:
np_out = ss_nbinom.var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_exponential():
class TestExponential(HybridBlock):
def __init__(self, func):
self._func = func
super(TestExponential, self).__init__()
def forward(self, scale, *args):
exponential = mgp.Exponential(scale, validate_args=True)
return _distribution_method_invoker(exponential, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(0.2, 1.2, size=shape)
net = TestExponential("log_prob")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(0.2, 1.2, size=shape)
net = TestExponential("cdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(0.0, 1.0, size=shape)
net = TestExponential("icdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
net = TestExponential("entropy")
if hybridize:
net.hybridize()
mx_out = net(scale).asnumpy()
np_out = ss.expon(scale=scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_weibull():
class TestWeibull(HybridBlock):
def __init__(self, func):
super(TestWeibull, self).__init__()
self._func = func
def forward(self, concentration, scale, *args):
weibull = mgp.Weibull(concentration, scale, validate_args=True)
return _distribution_method_invoker(weibull, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestWeibull("log_prob")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale, samples).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(
), scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestWeibull("cdf")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale, samples).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(
), scale=scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestWeibull("icdf")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale, samples).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(
), scale=scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
concentration = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
net = TestWeibull("entropy")
if hybridize:
net.hybridize()
mx_out = net(concentration, scale).asnumpy()
np_out = ss.weibull_min(c=concentration.asnumpy(),
scale=scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_pareto():
class TestPareto(HybridBlock):
def __init__(self, func):
super(TestPareto, self).__init__()
self._func = func
def forward(self, alpha, scale, *args):
pareto = mgp.Pareto(alpha, scale, validate_args=True)
return _distribution_method_invoker(pareto, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(1, 2, size=shape)
net = TestPareto("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).logpdf(
samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(1.0, 2.0, size=shape)
net = TestPareto("cdf")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).cdf(
samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
samples = np.random.uniform(size=shape)
net = TestPareto("icdf")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).ppf(
samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(size=shape)
scale = np.random.uniform(size=shape)
net = TestPareto("entropy")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale).asnumpy()
np_out = ss.pareto(b=alpha.asnumpy(), scale=scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_gamma():
class TestGamma(HybridBlock):
def __init__(self, func):
super(TestGamma, self).__init__()
self._func = func
def forward(self, shape, scale, *args):
gamma = mgp.Gamma(shape, scale, validate_args=True)
return _distribution_method_invoker(gamma, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(0.5, 1.5, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestGamma("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, scale, samples).asnumpy()
np_out = ss.gamma(a=alpha.asnumpy(), loc=0,
scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance', 'entropy']:
alpha = np.random.uniform(0.5, 1.5, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestGamma(func)
if hybridize:
net.hybridize()
mx_out = net(alpha, scale).asnumpy()
ss_gamma = ss.gamma(a=alpha.asnumpy(), loc=0,
scale=scale.asnumpy())
if func == 'mean':
np_out = ss_gamma.mean()
elif func == 'variance':
np_out = ss_gamma.var()
else:
np_out = ss_gamma.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_dirichlet():
class TestDirichlet(HybridBlock):
def __init__(self, func):
super(TestDirichlet, self).__init__()
self._func = func
def forward(self, alpha, *args):
dirichlet = mgp.Dirichlet(alpha, validate_args=True)
return _distribution_method_invoker(dirichlet, self._func, *args)
event_shapes = [2, 4, 6]
batch_shapes = [None, (2, 3)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for hybridize in [True, False]:
desired_shape = (
batch_shape if batch_shape is not None else ()) + (event_shape,)
alpha = np.random.uniform(1.0, 5.0, size=desired_shape)
net = TestDirichlet("sample")
if hybridize:
net.hybridize()
mx_out = net(alpha).asnumpy()
# Check shape
assert mx_out.shape == desired_shape
# Check simplex
assert_almost_equal(mx_out.sum(-1), _np.ones_like(mx_out.sum(-1)), atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test log_prob
# Scipy does not support batch `alpha`, thus we skip multi-dimensional batch_shape case.
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes[:1]):
for hybridize in [True, False]:
desired_shape = (
batch_shape if batch_shape is not None else ()) + (event_shape,)
alpha = np.random.uniform(1.0, 5.0, size=desired_shape)
np_samples = _np.random.dirichlet(
[10.0 / event_shape] * event_shape, size=batch_shape)
net = TestDirichlet("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, np.array(np_samples)).asnumpy()
np_out = ss.dirichlet(alpha=alpha.asnumpy()).logpdf(np_samples)
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes[:1]):
for hybridize in [True, False]:
for func in ['mean', 'variance', 'entropy']:
desired_shape = (
batch_shape if batch_shape is not None else ()) + (event_shape,)
alpha = np.random.uniform(1.0, 5.0, desired_shape)
net = TestDirichlet(func)
if hybridize:
net.hybridize()
mx_out = net(alpha).asnumpy()
ss_dir = ss.dirichlet(alpha=alpha.asnumpy())
if func == 'mean':
np_out = ss_dir.mean()
elif func == 'variance':
np_out = ss_dir.var()
else:
np_out = ss_dir.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_beta():
class TestBeta(HybridBlock):
def __init__(self, func):
super(TestBeta, self).__init__()
self._func = func
def forward(self, alpha, beta, *args):
beta_dist = mgp.Beta(alpha, beta, validate_args=True)
return _distribution_method_invoker(beta_dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
alpha = np.random.uniform(0.5, 1.5, shape)
beta = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestBeta("log_prob")
if hybridize:
net.hybridize()
mx_out = net(alpha, beta, samples).asnumpy()
np_out = ss.beta(alpha.asnumpy(), beta.asnumpy()
).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance', 'entropy']:
alpha = np.random.uniform(0.5, 1.5, shape)
beta = np.random.uniform(0.5, 1.5, shape)
net = TestBeta(func)
if hybridize:
net.hybridize()
mx_out = net(alpha, beta).asnumpy()
ss_beta = ss.beta(alpha.asnumpy(), beta.asnumpy())
if func == 'mean':
np_out = ss_beta.mean()
elif func == 'variance':
np_out = ss_beta.var()
else:
np_out = ss_beta.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_fisher_snedecor():
class TestFisherSnedecor(HybridBlock):
def __init__(self, func):
super(TestFisherSnedecor, self).__init__()
self._func = func
def forward(self, df1, df2, *args):
beta_dist = mgp.FisherSnedecor(df1, df2, validate_args=True)
return _distribution_method_invoker(beta_dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
df1 = np.random.uniform(0.5, 1.5, shape)
df2 = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestFisherSnedecor("log_prob")
if hybridize:
net.hybridize()
mx_out = net(df1, df2, samples).asnumpy()
np_out = ss.f(dfn=df1.asnumpy(), dfd=df2.asnumpy()
).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean` and `var`
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance']:
df1 = np.random.uniform(0.5, 1.5, shape)
df2 = np.random.uniform(4.0, 6.0, shape)
net = TestFisherSnedecor(func)
if hybridize:
net.hybridize()
mx_out = net(df1, df2).asnumpy()
ss_f = ss.f(dfn=df1.asnumpy(), dfd=df2.asnumpy())
if func == 'mean':
np_out = ss_f.mean()
else:
np_out = ss_f.var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_student_t():
class TestT(HybridBlock):
def __init__(self, func):
super(TestT, self).__init__()
self._func = func
def forward(self, df, loc, scale, *args):
t_dist = mgp.StudentT(df, loc, scale, validate_args=True)
return _distribution_method_invoker(t_dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.zeros(shape)
scale = np.random.uniform(0.5, 1.5, shape)
df = np.random.uniform(2, 4, shape)
samples = np.random.uniform(0, 4, size=shape)
net = TestT("log_prob")
if hybridize:
net.hybridize()
mx_out = net(df, loc, scale, samples).asnumpy()
np_out = ss.t(loc=0, scale=scale.asnumpy(),
df=df.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test `mean`, `var` and `entropy`
for shape, hybridize in itertools.product(shapes, [False, True]):
for func in ['mean', 'variance', 'entropy']:
loc = np.zeros(shape)
scale = np.random.uniform(0.5, 1.5, shape)
df = np.random.uniform(3, 4, shape)
net = TestT(func)
if hybridize:
net.hybridize()
mx_out = net(df, loc, scale).asnumpy()
ss_f = ss.t(loc=0, scale=scale.asnumpy(), df=df.asnumpy())
if func == 'mean':
np_out = ss_f.mean()
elif func == 'variance':
np_out = ss_f.var()
else:
np_out = ss_f.entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_gumbel():
class TestGumbel(HybridBlock):
def __init__(self, func):
super(TestGumbel, self).__init__()
self._func = func
def forward(self, loc, scale, *args):
normal = mgp.Gumbel(loc, scale, validate_args=True)
return getattr(normal, self._func)(*args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestGumbel("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.gumbel_r(loc=loc.asnumpy(),
scale=scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestGumbel("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.gumbel_r(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestGumbel("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.gumbel_r(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestGumbel("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.gumbel_r(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_multinomial():
class TestMultinomial(HybridBlock):
def __init__(self, func, num_events, total_count, is_logit, batch_shape=None, sample_shape=None):
super(TestMultinomial, self).__init__()
self._num_events = num_events
self._total_count = total_count
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._sample_shape = sample_shape
def forward(self, params, *args):
multinomial = (
mgp.Multinomial(self._num_events, logit=params, total_count=self._total_count,
validate_args=True)
if self._is_logit else
mgp.Multinomial(self._num_events, prob=params, total_count=self._total_count,
validate_args=True)
)
if self._func == 'sample':
return multinomial.sample(self._batch_shape)
if self._func == 'sample_n':
return multinomial.sample_n(self._sample_shape)
return _distribution_method_invoker(multinomial, self._func, *args)
def one_hot(a, num_classes):
return np.identity(num_classes)[a]
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [None, (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestMultinomial("sample", event_shape, _np.random.randint(1, 5),
use_logit, batch_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape + (event_shape,)
# Test sample_n
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestMultinomial("sample_n", event_shape, _np.random.randint(1, 5),
use_logit, batch_shape, sample_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
sample_shape = () if sample_shape is None else sample_shape
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
assert mx_out.shape == desired_shape + (event_shape,)
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob
sample_shape = () if sample_shape is None else sample_shape
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
samples = np.random.choice(event_shape, size=desired_shape)
samples = one_hot(samples, event_shape)
if use_logit:
param = np.log(param)
net = TestMultinomial("log_prob", event_shape,
_np.random.randint(1, 5), use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, samples).asnumpy()
# Check shape
assert mx_out.shape == desired_shape
@with_seed()
@use_np
def test_gluon_binomial():
class TestBinomial(HybridBlock):
def __init__(self, func, is_logit=False, n=1):
super(TestBinomial, self).__init__()
self._is_logit = is_logit
self._func = func
self._n = n
def forward(self, params, *args):
dist = mgp.Binomial(n=self._n, logit=params, validate_args=True) \
if self._is_logit else \
mgp.Binomial(n=self._n, prob=params, validate_args=True)
return _distribution_method_invoker(dist, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
for use_logit in [True, False]:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
net = TestBinomial('sample', use_logit, n=float(n))
param = prob
if use_logit:
param = prob_to_logit(param)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = (shape,) if isinstance(shape, int) else shape
assert mx_out.shape == desired_shape
# Test sample_n
prefix_shape = (2, 3)
for shape in shapes:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
dist = mgp.Binomial(n=n, prob=prob)
samples = dist.sample_n(prefix_shape)
assert samples.shape == (prefix_shape + prob.shape)
# Test log_prob
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
sample = np.random.randint(0, n, size=shape).astype('float32')
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBinomial("log_prob", use_logit, n=float(n))
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
np_out = ss.binom(n=n, p=prob.asnumpy()).logpmf(sample.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test mean and variance
for shape, hybridize in itertools.product(shapes, [True, False]):
for func in ['mean', 'variance']:
for use_logit in [True, False]:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
net = TestBinomial(func, use_logit, n=float(n))
param = prob
if use_logit:
param = prob_to_logit(param)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
ss_binom = ss.binom(n=n, p=prob.asnumpy())
if func == 'mean':
np_out = ss_binom.mean()
else:
np_out = ss_binom.var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_bernoulli():
class TestBernoulli(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestBernoulli, self).__init__()
self._is_logit = is_logit
self._func = func
def forward(self, params, *args):
bernoulli = mgp.Bernoulli(logit=params, validate_args=True) if self._is_logit else \
mgp.Bernoulli(prob=params, validate_args=True)
return _distribution_method_invoker(bernoulli, self._func, *args)
# Test log_prob
shapes = [(), (1,), (2, 3), 6]
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = npx.random.bernoulli(prob=0.5, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBernoulli("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
np_out = _np.log(ss.bernoulli.pmf(sample.asnumpy(), prob.asnumpy()))
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test variance
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = npx.random.bernoulli(prob=0.5, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBernoulli("variance", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.bernoulli(prob.asnumpy()).var()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = npx.random.bernoulli(prob=0.5, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestBernoulli("entropy", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
np_out = ss.bernoulli(prob.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_relaxed_bernoulli():
class TestRelaxedBernoulli(HybridBlock):
def __init__(self, func, is_logit=False):
super(TestRelaxedBernoulli, self).__init__()
self._is_logit = is_logit
self._func = func
def forward(self, params, *args):
relaxed_bernoulli = mgp.RelaxedBernoulli(T=1.0, logit=params, validate_args=True)\
if self._is_logit else \
mgp.RelaxedBernoulli(T=1.0, prob=params, validate_args=True)
if self._func == "sample":
return relaxed_bernoulli.sample()
return _distribution_method_invoker(relaxed_bernoulli, self._func, *args)
def prob_to_logit(prob):
return np.log(prob) - np.log1p(-prob)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
param.attach_grad()
net = TestRelaxedBernoulli("sample", use_logit)
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(param)
mx_out.backward()
desired_shape = (shape,) if isinstance(shape, int) else shape
assert param.grad.shape == desired_shape
for shape, hybridize, use_logit in itertools.product(shapes, [True, False], [True, False]):
prob = np.random.uniform(size=shape)
sample = np.random.uniform(0.1, 0.9, size=shape)
param = prob
if use_logit:
param = prob_to_logit(param)
net = TestRelaxedBernoulli("log_prob", use_logit)
if hybridize:
net.hybridize()
mx_out = net(param, sample).asnumpy()
desired_shape = (shape,) if isinstance(shape, int) else shape
assert mx_out.shape == desired_shape
@with_seed()
@use_np
def test_gluon_categorical():
class TestCategorical(HybridBlock):
def __init__(self, func, is_logit=False, batch_shape=None, num_events=None, sample_shape=None):
super(TestCategorical, self).__init__()
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._num_events = num_events
self._sample_shape = sample_shape
def forward(self, params, *args):
categorical = mgp.Categorical(self._num_events, logit=params, validate_args=True)\
if self._is_logit else \
mgp.Categorical(self._num_events, prob=params,
validate_args=True)
if self._func == "sample":
return categorical.sample(self._batch_shape)
if self._func == "sample_n":
return categorical.sample_n(self._sample_shape)
return _distribution_method_invoker(categorical, self._func, *args)
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [(), (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob.astype('float32')
if use_logit:
param = np.log(param)
net = TestCategorical("sample", use_logit,
batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape
# Test sample_n
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob.astype('float32')
if use_logit:
param = np.log(param)
net = TestCategorical("sample_n",
is_logit=use_logit, batch_shape=batch_shape,
num_events=event_shape, sample_shape=sample_shape
)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
assert mx_out.shape == desired_shape
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob.astype('float32')
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
samples = np.random.choice(event_shape, size=desired_shape)
if use_logit:
param = np.log(param)
net = TestCategorical("log_prob", use_logit,
batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param, samples)
# Check shape
assert mx_out.shape == desired_shape
# Check value
log_pmf, indices = np.broadcast_arrays(
np.log(prob), np.expand_dims(samples, -1))
if indices.ndim >= 1:
indices = indices[..., :1]
expect_log_prob = _np.take_along_axis(
log_pmf, indices.astype('int'), axis=-1).asnumpy()
assert_almost_equal(mx_out.asnumpy(), expect_log_prob.squeeze(), atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test enumerate_support
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob.astype('float32')
if use_logit:
param = np.log(param)
net = TestCategorical("enumerate_support",
use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = (event_shape,) + \
(batch_shape if batch_shape is not None else ())
assert mx_out.shape == desired_shape
@with_seed()
@use_np
def test_gluon_one_hot_categorical():
def one_hot(a, num_classes):
return np.identity(num_classes)[a]
class TestOneHotCategorical(HybridBlock):
def __init__(self, func, is_logit=False, batch_shape=None, num_events=None):
super(TestOneHotCategorical, self).__init__()
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._num_events = num_events
def forward(self, params, *args):
categorical = mgp.OneHotCategorical(num_events=self._num_events, logit=params) \
if self._is_logit else \
mgp.OneHotCategorical(num_events=self._num_events, prob=params)
if self._func == "sample":
return categorical.sample(self._batch_shape)
return _distribution_method_invoker(categorical, self._func, *args)
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [(), (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestOneHotCategorical(
"sample", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape + (event_shape,)
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
samples = np.random.choice(event_shape, size=desired_shape)
samples = one_hot(samples, event_shape)
if use_logit:
param = np.log(param)
net = TestOneHotCategorical(
"log_prob", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param, samples)
# Check shape
assert mx_out.shape == desired_shape
# Test enumerate support
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
param = prob
if use_logit:
param = np.log(param)
net = TestOneHotCategorical(
"enumerate_support", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param).asnumpy()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == (event_shape,) + \
desired_shape + (event_shape,)
@with_seed()
@use_np
def test_relaxed_one_hot_categorical():
class TestRelaxedOneHotCategorical(HybridBlock):
def __init__(self, func, is_logit=False, batch_shape=None, num_events=None):
super(TestRelaxedOneHotCategorical, self).__init__()
self._is_logit = is_logit
self._func = func
self._batch_shape = batch_shape
self._num_events = num_events
def forward(self, params, *args):
categorical = mgp.RelaxedOneHotCategorical(T=1.0, num_events=self._num_events, logit=params) \
if self._is_logit else \
mgp.RelaxedOneHotCategorical(
T=1.0, num_events=self._num_events, prob=params)
if self._func == "sample":
return categorical.sample(self._batch_shape)
return _distribution_method_invoker(categorical, self._func, *args)
event_shapes = [2, 5, 10]
batch_shapes = [None, (2, 3)] # , (4, 0, 5)]
sample_shapes = [(), (2,), (3, 4)]
# Test sampling
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
prob = prob.astype('float32')
param = prob
if use_logit:
param = np.log(param)
param.attach_grad()
net = TestRelaxedOneHotCategorical(
"sample", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(param)
mx_out.backward()
desired_shape = batch_shape if batch_shape is not None else ()
assert mx_out.shape == desired_shape + (event_shape,)
assert param.grad.shape == param.shape
# Test log_prob
for event_shape, batch_shape, sample_shape in itertools.product(event_shapes, batch_shapes, sample_shapes):
for use_logit, hybridize in itertools.product([True, False], [True, False]):
prob = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=batch_shape))
eps = _np.finfo('float32').eps
prob = np.clip(prob, eps, 1 - eps)
param = prob
desired_shape = sample_shape + \
(batch_shape if batch_shape is not None else ())
# Samples from a Relaxed One-hot Categorical lie on a simplex.
samples = np.array(_np.random.dirichlet(
[1 / event_shape] * event_shape, size=desired_shape))
if use_logit:
param = np.log(param)
net = TestRelaxedOneHotCategorical(
"log_prob", use_logit, batch_shape, event_shape)
if hybridize:
net.hybridize()
mx_out = net(param, samples)
# Check shape
assert mx_out.shape == desired_shape
@with_seed()
@use_np
def test_gluon_mvn():
class TestMVN(HybridBlock):
def __init__(self, func, param_type):
super(TestMVN, self).__init__()
self._func = func
# cov, precision or scale_tril
self._param_type = param_type
def forward(self, loc, cov, *args):
mvn = mgp.MultivariateNormal(loc=loc, **{self._param_type: cov},
validate_args=True)
return _distribution_method_invoker(mvn, self._func, *args)
def _stable_inv(cov):
"""
Force the precision matrix to be symmetric.
"""
precision = np.linalg.inv(cov)
precision_t = np.swapaxes(precision, -1, -2)
return (precision + precision_t) / 2
event_shapes = [3, 5]
loc_shapes = [(), (2,), (4, 2)]
cov_shapes = [(), (2,), (4, 2)]
cov_func = {
'cov': lambda s: s,
'precision': lambda s: _stable_inv(s),
'scale_tril': lambda s: np.linalg.cholesky(s)
}
# Test sampling
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
for cov_type in cov_func.keys():
for hybridize in [True, False]:
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
loc.attach_grad()
_s.attach_grad()
# Full covariance matrix
sigma = np.matmul(_s, np.swapaxes(
_s, -1, -2)) + np.eye(event_shape)
cov_param = cov_func[cov_type](sigma)
net = TestMVN('sample', cov_type)
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(loc, cov_param)
desired_shape = (loc + sigma[..., 0]).shape
assert mx_out.shape == desired_shape
mx_out.backward()
assert loc.grad.shape == loc.shape
assert _s.grad.shape == _s.shape
# Test log_prob
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
for cov_type in cov_func.keys():
for hybridize in [True, False]:
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
samples = np.random.normal(
np.zeros_like(loc), np.ones_like(_s[..., 0]))
loc.attach_grad()
_s.attach_grad()
# Full covariance matrix
sigma = np.matmul(_s, np.swapaxes(
_s, -1, -2)) + np.eye(event_shape)
cov_param = cov_func[cov_type](sigma)
net = TestMVN('log_prob', cov_type)
if hybridize:
net.hybridize()
mx_out = net(loc, cov_param, samples)
assert mx_out.shape == samples.shape[:-1]
if mx_out.shape == ():
mx_out_t = mx_out.asnumpy()
else:
mx_out_t = mx_out.asnumpy().flatten()[0]
# Select the first element in the batch, because scipy does not support batching.
loc_t = loc.reshape(-1, event_shape)[0].asnumpy()
sigma_t = sigma.reshape(-1, event_shape,
event_shape)[0].asnumpy()
samples_t = samples.reshape(-1, event_shape).asnumpy()[0]
scipy_mvn = ss.multivariate_normal(loc_t, sigma_t)
ss_out = scipy_mvn.logpdf(samples_t)
assert_almost_equal(mx_out_t, ss_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
for cov_type in cov_func.keys():
for hybridize in [True, False]:
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
loc.attach_grad()
_s.attach_grad()
# Full covariance matrix
sigma = np.matmul(_s, np.swapaxes(
_s, -1, -2)) + np.eye(event_shape)
cov_param = cov_func[cov_type](sigma)
net = TestMVN('entropy', cov_type)
if hybridize:
net.hybridize()
mx_out = net(loc, cov_param)
assert mx_out.shape == sigma.shape[:-2]
if mx_out.shape == ():
mx_out_t = mx_out.asnumpy()
else:
mx_out_t = mx_out.asnumpy().flatten()[0]
# Select the first element in the batch, because scipy does not support batching.
loc_t = loc.reshape(-1, event_shape)[0].asnumpy()
sigma_t = sigma.reshape(-1, event_shape,
event_shape)[0].asnumpy()
scipy_mvn = ss.multivariate_normal(loc_t, sigma_t)
ss_out = scipy_mvn.entropy()
assert_almost_equal(mx_out_t, ss_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_gluon_half_normal():
class TestHalfNormal(HybridBlock):
def __init__(self, func):
super(TestHalfNormal, self).__init__()
self._func = func
def forward(self, scale, *args):
half_normal = mgp.HalfNormal(scale, validate_args=True)
return getattr(half_normal, self._func)(*args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
net = TestHalfNormal("sample")
if hybridize:
net.hybridize()
mx_out = net(scale).asnumpy()
if isinstance(shape, Number):
shape = (shape,)
assert mx_out.shape == shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfNormal("log_prob")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfnorm(0, scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfNormal("cdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfnorm(0, scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestHalfNormal("icdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfnorm(0, scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@with_seed()
@use_np
def test_affine_transform():
r"""
Test the correctness of affine transformation by performing it
on a standard normal, since N(\mu, \sigma^2) = \mu + \sigma * N(0, 1)
"""
class TestAffineTransform(HybridBlock):
def __init__(self, func):
super(TestAffineTransform, self).__init__()
self._func = func
def forward(self, loc, scale, *args):
std_normal = mgp.Normal(np.zeros_like(loc),
np.ones_like(scale))
transforms = [mgp.AffineTransform(loc=0, scale=scale),
mgp.AffineTransform(loc=loc, scale=1)]
transformed_normal = mgp.TransformedDistribution(
std_normal, transforms)
if (len(args) == 0):
return getattr(transformed_normal, self._func)
return getattr(transformed_normal, self._func)(*args)
shapes = [(1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
loc.attach_grad()
scale = np.random.uniform(0.5, 1.5, shape)
scale.attach_grad()
samples = np.random.normal(size=shape)
net = TestAffineTransform('log_prob')
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(loc, scale, samples)
np_out = _np.log(ss.norm(loc.asnumpy(),
scale.asnumpy()).pdf(samples.asnumpy()))
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
mx_out.backward()
loc_expected_grad = ((samples - loc) / scale ** 2).asnumpy()
scale_expected_grad = (samples - loc) ** 2 * \
np.power(scale, -3) - (1 / scale)
assert_almost_equal(loc.grad.asnumpy(), loc_expected_grad, atol=1e-4,
rtol=1e-3, use_broadcast=False)
assert_almost_equal(scale.grad.asnumpy(), scale_expected_grad, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
loc.attach_grad()
scale = np.random.uniform(0.5, 1.5, shape)
scale.attach_grad()
if not isinstance(shape, tuple):
shape = (shape,)
expected_shape = (4, 5) + shape
net = TestAffineTransform('sample')
mx_out = net(loc, scale, expected_shape).asnumpy()
assert mx_out.shape == expected_shape
@with_seed()
@use_np
def test_compose_transform():
class TestComposeTransform(HybridBlock):
def __init__(self, func):
super(TestComposeTransform, self).__init__()
self._func = func
def forward(self, loc, scale, *args):
# Generate a log_normal distribution.
std_normal = mgp.Normal(np.zeros_like(loc),
np.ones_like(scale))
transforms = mgp.ComposeTransform([
mgp.AffineTransform(loc=0, scale=scale),
mgp.AffineTransform(loc=loc, scale=1),
mgp.ExpTransform()
])
transformed_normal = mgp.TransformedDistribution(
std_normal, transforms)
if (len(args) == 0):
return getattr(transformed_normal, self._func)
return getattr(transformed_normal, self._func)(*args)
shapes = [(1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
loc.attach_grad()
scale = np.random.uniform(0.5, 1.5, shape)
scale.attach_grad()
samples = np.random.uniform(1, 2, size=shape)
net = TestComposeTransform('log_prob')
if hybridize:
net.hybridize()
with autograd.record():
mx_out = net(loc, scale, samples)
np_out = ss.lognorm(s=scale.asnumpy(), scale=np.exp(
loc).asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_cached_property():
x = np.random.normal()
x.attach_grad()
scale = 0.1
class Dummy(object):
def __init__(self, x):
super(Dummy, self).__init__()
self.x = x
@mgp.cached_property
def y(self):
return scale * self.x + 1
with autograd.record():
obj = Dummy(x)
obj.y.backward()
assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,)))
class DummyBlock(HybridBlock):
def forward(self, x):
obj = Dummy(x)
return obj.y
x = np.random.normal()
x.attach_grad()
net = DummyBlock()
with autograd.record():
y = net(x)
y.backward()
assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,)))
x = np.random.normal()
x.attach_grad()
net.hybridize()
with autograd.record():
y = net(x)
y.backward()
assert_almost_equal(x.grad.asnumpy(), scale * np.ones((1,)))
@use_np
def test_independent():
class TestIndependent(HybridBlock):
def __init__(self, event_dim, func):
super(TestIndependent, self).__init__()
self._event_dim = event_dim
self._func = func
def forward(self, logit, *args):
base_dist = mgp.Bernoulli(logit=logit)
reshaped_dist = mgp.Independent(base_dist, self._event_dim)
return getattr(reshaped_dist, self._func)(*args)
event_shapes = [(1,), (4,), (2, 2)]
batch_shapes = [(2, 3), (2,)]
for (batch_shape, event_shape) in itertools.product(batch_shapes, event_shapes):
for hybridize in [False, True]:
for func in ['log_prob']:
full_shape = batch_shape + event_shape
logit = np.random.normal(0, 2, size=full_shape)
samples = np.round(np.random.uniform(size=full_shape))
net = TestIndependent(len(event_shape), func)
if hybridize:
net.hybridize()
mx_out = net(logit, samples)
assert mx_out.shape == batch_shape
@with_seed()
@use_np
def test_gluon_kl():
def _test_zero_kl(p, shape):
"""Check if KL(p || p) = 0
Parameters
----------
p : Distribution
"""
mx_out = mgp.kl_divergence(p, p).asnumpy()
np_out = _np.zeros(shape)
assert_almost_equal(mx_out, np_out, atol=1e-3,
rtol=1e-2, use_broadcast=False)
def _test_monte_carlo(p, q, M=50000):
r"""Check if KL(p || q) is approximately equal to
1/M * \Sum_{i=1}^{M} log(p(x_i) / q(x_i)), x_i ~ p(x)
"""
kl = mgp.kl_divergence(p, q)
mc_approx = mgp.empirical_kl(p, q, M)
assert_almost_equal(mc_approx.asnumpy(), kl.asnumpy(), atol=1e-1,
rtol=1e-1, use_broadcast=False)
def _dist_factory(dist, *param_funcs):
"""Generate a distribution object with parameters of random value.
Parameters
----------
dist : Type
A type of distribution.
param_funcs : List
A list of functions that generate valid parameters for `dist`
"""
params = [f() if callable(f) else f for f in param_funcs]
return dist(*params)
# could cause longer runtime and potential flaky tests
monte_carlo_test = False
repeated_times = 50000
shapes = [(), (1,), (2, 3), 6]
# Test kl between same distributions
# uniform
for shape in shapes:
dist = mgp.Uniform
def low(): return np.random.uniform(0, 1, shape)
def high(): return np.random.uniform(1, 2, shape)
_test_zero_kl(_dist_factory(dist, low, high), shape)
# normal, laplace, cauchy, gumbel
for dist in [mgp.Normal, mgp.Laplace, mgp.Cauchy, mgp.Gumbel]:
for shape in shapes:
def loc(): return np.random.uniform(-1, 1, shape)
def scale(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, loc, scale), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, loc, scale),
_dist_factory(dist, loc, scale),
repeated_times)
# poisson
for shape in shapes[1:]:
dist = mgp.Poisson
def rate(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, rate), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, rate),
_dist_factory(dist, rate),
repeated_times)
# exponential, geometric
for dist in [mgp.Exponential, mgp.Geometric]:
for shape in shapes:
def s(): return np.random.uniform(size=shape, low=1e-3)
_test_zero_kl(_dist_factory(dist, s), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, s),
_dist_factory(dist, s),
repeated_times)
# pareto
for shape in shapes:
dist = mgp.Pareto
def alpha(): return np.random.uniform(size=shape)
def scale(): return np.random.uniform(size=shape)
_test_zero_kl(_dist_factory(dist, alpha, scale), shape)
for shape in shapes:
dist = mgp.HalfNormal
def scale(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, scale), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, scale),
_dist_factory(dist, scale),
repeated_times)
# gamma, beta
for dist in [mgp.Gamma, mgp.Beta]:
for shape in shapes:
def param1(): return np.random.uniform(0.5, 1.5, shape)
def param2(): return np.random.uniform(0.5, 1.5, shape)
_test_zero_kl(_dist_factory(dist, param1, param2), shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, param1, param2),
_dist_factory(dist, param1, param2),
50000)
# binomial
for shape in shapes:
n = _np.random.randint(5, 10)
prob = np.random.uniform(low=0.1, size=shape)
dist = mgp.Binomial(n=n, prob=prob)
_test_zero_kl(dist, shape)
# bernoulli
for shape in shapes:
prob = np.random.uniform(size=shape)
dist = mgp.Bernoulli(prob=prob)
_test_zero_kl(dist, shape)
event_shapes = [3, 5, 10]
loc_shapes = [(), (2,), (4, 2)]
cov_shapes = [(), (2,), (4, 2)]
for loc_shape, cov_shape, event_shape in itertools.product(loc_shapes, cov_shapes, event_shapes):
loc = np.random.randn(*(loc_shape + (event_shape,)))
_s = np.random.randn(*(cov_shape + (event_shape, event_shape)))
sigma = np.matmul(_s, np.swapaxes(_s, -1, -2)) + np.eye(event_shape)
dist = mgp.MultivariateNormal(loc, cov=sigma)
desired_shape = (loc + sigma[..., 0]).shape[:-1]
_test_zero_kl(dist, desired_shape)
batch_shapes = loc_shapes
# dirichlet
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
desired_shape = (batch_shape if batch_shape is not None else ())
dist = mgp.Dirichlet
def alpha(): return np.random.uniform(
0.5, 1.5, size=(desired_shape + (event_shape,)))
_test_zero_kl(_dist_factory(dist, alpha), desired_shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, alpha),
_dist_factory(dist, alpha),
50000)
# categorical, One-hot categorical
for dist in [mgp.Categorical, mgp.OneHotCategorical]:
for event_shape, batch_shape in itertools.product(event_shapes, batch_shapes):
prob = (lambda:
np.array(_np.random.dirichlet([1 / event_shape] * event_shape, size=batch_shape)))
_test_zero_kl(_dist_factory(dist, event_shape, prob), batch_shape)
if monte_carlo_test:
_test_monte_carlo(_dist_factory(dist, event_shape, prob),
_dist_factory(dist, event_shape, prob),
repeated_times)
# Test kl between different distributions
# KL(Uniform || ...)
for shape in shapes:
rhs_dists = [
mgp.Normal(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
mgp.Gumbel(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
]
for rhs_dist in rhs_dists:
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
lhs_dist = mgp.Uniform(low, high)
kl = mgp.kl_divergence(lhs_dist, rhs_dist)
assert kl.shape == low.shape
if monte_carlo_test:
_test_monte_carlo(lhs_dist, rhs_dist, repeated_times)
# KL(Exponential || ...)
for shape in shapes:
rhs_dists = [
mgp.Normal(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
mgp.Gumbel(np.random.uniform(-1, 1, shape),
np.random.uniform(0.5, 1.5, shape)),
mgp.Gamma(np.random.uniform(0.5, 1.5, shape),
np.random.uniform(0.5, 1.5, shape))
]
for rhs_dist in rhs_dists:
s = np.random.uniform(size=shape)
lhs_dist = mgp.Exponential(s)
kl = mgp.kl_divergence(lhs_dist, rhs_dist)
assert kl.shape == s.shape
if monte_carlo_test:
_test_monte_carlo(lhs_dist, rhs_dist, repeated_times)
@pytest.mark.garbage_expected
@with_seed()
@use_np
def test_gluon_stochastic_block():
class dummyBlock(StochasticBlock):
"""In this test case, we generate samples from a Gaussian parameterized
by `loc` and `scale` and accumulate the KL-divergence between it and
its prior and the l2 norm of `loc` into the block's loss storage."""
@StochasticBlock.collectLoss
def forward(self, loc, scale):
qz = mgp.Normal(loc, scale)
# prior
pz = mgp.Normal(np.zeros_like(loc), np.ones_like(scale))
self.add_loss(mgp.kl_divergence(qz, pz))
self.add_loss((loc ** 2).sum(1))
return qz.sample()
shape = (4, 4)
for hybridize in [True, False]:
net = dummyBlock()
if hybridize:
net.hybridize()
loc = np.random.randn(*shape)
scale = np.random.rand(*shape)
mx_out = net(loc, scale).asnumpy()
kl = net.losses[0].asnumpy()
l2_norm = net.losses[1].asnumpy()
assert mx_out.shape == loc.shape
assert kl.shape == loc.shape
assert l2_norm.shape == shape[:-1]
if hybridize:
net.export('dummyBlock', epoch=0)
@with_seed()
@use_np
def test_gluon_stochastic_block_exception():
class problemBlock(StochasticBlock):
def forward(self, loc, scale):
qz = mgp.Normal(loc, scale)
# prior
pz = mgp.Normal(np.zeros_like(loc), np.ones_like(scale))
self.add_loss(mgp.kl_divergence(qz, pz))
self.add_loss((loc ** 2).sum(1))
return qz.sample()
shape = (4, 4)
for hybridize in [True, False]:
net = problemBlock()
if hybridize:
net.hybridize()
loc = np.random.randn(*shape)
scale = np.random.rand(*shape)
with pytest.raises(ValueError):
mx_out = net(loc, scale).asnumpy()
@pytest.mark.garbage_expected
@with_seed()
@use_np
def test_gluon_stochastic_sequential():
class normalBlock(HybridBlock):
def forward(self, x):
return (x + 1)
class stochasticBlock(StochasticBlock):
@StochasticBlock.collectLoss
def forward(self, x):
self.add_loss(x ** 2)
self.add_loss(x - 1)
return (x + 1)
class problemBlock(StochasticBlock):
def forward(self, x):
self.add_loss(x ** 2)
self.add_loss(x - 1)
return (x + 1)
shape = (4, 4)
for hybridize in [True, False]:
initial_value = np.ones(shape)
net = StochasticSequential()
net.add(stochasticBlock())
net.add(normalBlock())
net.add(stochasticBlock())
net.add(normalBlock())
if hybridize:
net.hybridize()
mx_out = net(initial_value).asnumpy()
assert_almost_equal(mx_out, _np.ones(shape) * 5)
accumulated_loss = net.losses
assert len(accumulated_loss) == 2
assert_almost_equal(accumulated_loss[0][0].asnumpy(), _np.ones(shape))
assert_almost_equal(
accumulated_loss[0][1].asnumpy(), _np.ones(shape) - 1)
assert_almost_equal(
accumulated_loss[1][0].asnumpy(), _np.ones(shape) * 9)
assert_almost_equal(
accumulated_loss[1][1].asnumpy(), _np.ones(shape) + 1)
for hybridize in [True, False]:
initial_value = np.ones(shape)
net = StochasticSequential()
net.add(stochasticBlock())
net.add(normalBlock())
net.add(problemBlock())
net.add(normalBlock())
if hybridize:
net.hybridize()
with pytest.raises(ValueError):
mx_out = net(initial_value).asnumpy()
@with_seed()
@use_np
def test_gluon_domain_map():
class TestDomainMap(HybridBlock):
def __init__(self, constraint_type, bijective):
super(TestDomainMap, self).__init__()
self._constraint_type = getattr(mgp.constraint, constraint_type)
def forward(self, *params):
value = params[0]
constraint_param = params[1:]
if len(constraint_param) == 0:
constraint = self._constraint_type()
else:
constraint = self._constraint_type(*constraint_param)
if bijective:
bijector = mgp.biject_to(constraint)
value = bijector(value)
else:
transformation = mgp.transform_to(constraint)
value = transformation(value)
return (value, constraint.check(value))
constraints_zoo = [
# (constraint_type, constraint_param)
('Positive', ()),
('GreaterThan', [np.random.randn(2, 2)]),
('GreaterThanEq', [np.random.randn(2, 2)]),
('LessThan', [np.random.randn(2, 2)]),
('Interval', [np.random.uniform(0, 1, (2, 2)),
np.random.uniform(2, 3, (2, 2))]),
('HalfOpenInterval', [np.random.uniform(
0, 1, (2, 2)), np.random.uniform(2, 3, (2, 2))])
]
test_sample = np.random.randn(2, 2)
for (constraint_type, constraint_arg) in constraints_zoo:
for bijective in [True, False]:
for hybridize in [True, False]:
net = TestDomainMap(constraint_type, bijective)
if hybridize:
net.hybridize()
constrained_out, constraint_status = net(
test_sample, *constraint_arg)
assert_almost_equal(constrained_out.asnumpy(),
constraint_status.asnumpy())
| sxjscience/mxnet | tests/python/unittest/test_gluon_probability_v2.py | Python | apache-2.0 | 94,412 | [
"Gaussian"
] | 0bfdb8a155592bdec2e20e2e2c2107129a04c09d76cf6d9c693a9e0b36361366 |
#!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
from collections import deque
from subprocess import call
from optparse import OptionParser
from tempfile import mkstemp
import os
import random
import re
import shlex
import shutil
import subprocess
import sys
import time
import resource
FNULL = open('/dev/null', 'w')
base_path = os.path.dirname(sys.argv[0])[:-len('src/py/')]
file_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def main():
(options, args) = get_options()
start_time = time.time()
fasta_file = options.fasta_file
error_files = []
reads_untrimmed_location = [options.first_mates, options.second_mates]
reads_trimmed_location = []
shell_file = options.output_dir + "/commands.sh"
sam_output_location_dir = options.output_dir + "/sam/"
sam_output_location = sam_output_location_dir + "library.sam"
singleton_output_dir = options.output_dir + "/singleton/"
singleton_output_location = singleton_output_dir + "singletons.csv"
ensure_dir(sam_output_location_dir)
ensure_dir(singleton_output_dir)
global shell_file_fp
shell_file_fp = open(shell_file, 'w')
setup_shell_file()
bins_dir = options.output_dir + "/bins/"
ensure_dir(bins_dir)
input_fasta_saved = options.fasta_file
output_dir_saved = options.output_dir
all_contig_lengths = {}
if options.min_contig_length > 0:
step("FILTERING ASSEMBLY CONTIGS LESS THAN " + str(options.min_contig_length) + ' BPs')
all_contig_lengths = filter_short_contigs(options)
results(options.fasta_file)
fasta_file = options.fasta_file
input_fasta_saved = options.fasta_file
step("ALIGNING READS")
unaligned_dir = run_bowtie2(options, sam_output_location)
contig_lengths = get_contig_lengths(sam_output_location)
step("RUNNING SAMTOOLS")
bam_location, sorted_bam_location, pileup_file = \
run_samtools(options, sam_output_location, index=True)
if options.coverage_file is None:
step("CALCULATING CONTIG COVERAGE")
options.coverage_file = calculate_contig_coverage(options, pileup_file)
results(options.coverage_file)
pileup_file = run_abundance_by_kmers(options)
results(options.coverage_file)
contig_abundances = get_contig_abundances(options.coverage_file)
step("CALCULATING ASSEMBLY PROBABILITY")
run_lap(options, sam_output_location, reads_trimmed_location)
if options.threads > 1:
step("PARTITIONING COVERAGE FILE")
run_split_pileup(options, pileup_file)
step("DEPTH OF COVERAGE")
error_files.append(run_depth_of_coverage(options, pileup_file))
contig_to_bin_map, bin_dir_dict = bin_coverage(options,bins_dir)
split_sam_by_bin(sam_output_location, contig_to_bin_map, bin_dir_dict)
outputBreakpointDir = options.output_dir + "/breakpoint/"
ouputBreakpointLocation = outputBreakpointDir + "errorsDetected.csv"
ensure_dir(outputBreakpointDir)
step("BREAKPOINT")
error_files.append(run_breakpoint_finder(options,\
unaligned_dir, outputBreakpointDir))
for bin_dir in os.listdir(bins_dir):
#if 'bin' in bin_dir:
coverages = bin_dir
options.fasta_file = os.path.abspath(output_dir_saved) + '/bins/'\
+ bin_dir + '/' + os.path.basename(input_fasta_saved)
options.output_dir = os.path.abspath(output_dir_saved) + '/bins/'\
+ bin_dir
bin_dir_infix = '/bins/' + bin_dir + '/'
bin_dir = os.path.abspath(options.output_dir) + '/bins/' + bin_dir + '/'
#warning("Bin dir is: %s" % bin_dir)
sam_output_location_dir = options.output_dir + '/sam/'
sam_output_location = sam_output_location_dir + 'library.sam'
step("RUNNING SAMTOOLS ON COVERAGE BIN " + coverages)
bam_location, sorted_bam_location, pileup_file = \
run_samtools(options, sam_output_location, with_pileup=False)
#step("DEPTH OF COVERAGE")
#error_files.append(run_depth_of_coverage(options, pileup_file))
step("MATE-PAIR HAPPINESS ON COVERAGE BIN " + coverages)
try:
error_files.append(run_reapr(options, sorted_bam_location))
except:
e = sys.exc_info()[0]
error("Reapr failed to run with: %s" % str(e))
options.output_dir = output_dir_saved
options.fasta_file = input_fasta_saved
step("SUMMARY")
summary_file = open(options.output_dir + "/summary.gff", 'w')
suspicious_file = open(options.output_dir + "/suspicious.gff", 'w')
summary_table_file = open(options.output_dir + "/summary.tsv", 'w')
#suspicious_table_file = open(options.output_dir + "/suspicious.tsv", 'w')
misassemblies = []
for error_file in error_files:
if error_file:
for line in open(error_file, 'r'):
misassemblies.append(line.strip().split('\t'))
# Sort misassemblies by start site.
misassemblies.sort(key = lambda misassembly: (misassembly[0], int(misassembly[3]), int(misassembly[4])))
final_misassemblies = []
for misassembly in misassemblies:
# Truncate starting/ending region if it is near the end of the contigs.
if int(misassembly[3]) <= options.ignore_end_distances and \
int(misassembly[4]) > options.ignore_end_distances:
misassembly[3] = str(options.ignore_end_distances + 1)
if int(misassembly[4]) >= (contig_lengths[misassembly[0]] - options.ignore_end_distances) and \
int(misassembly[3]) < (contig_lengths[misassembly[0]] - options.ignore_end_distances):
misassembly[4] = str(contig_lengths[misassembly[0]] - options.ignore_end_distances - 1)
# Don't print a flagged region if it occurs near the ends of the contig.
if int(misassembly[3]) > options.ignore_end_distances and \
int(misassembly[4]) < (contig_lengths[misassembly[0]] - options.ignore_end_distances):
summary_file.write('\t'.join(misassembly) + '\n')
final_misassemblies.append(misassembly)
summary_file.close()
results(options.output_dir + "/summary.gff")
#=====
# Open Read Frame (ORF) filtering
#===
orf_filtered_misassemblies = []
if options.orf_file:
call_arr = ["sort", "-T ./", "-k1,1", "-k4,4n", options.orf_file, "-o", options.output_dir + "/" + options.orf_file + "_sorted"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr)
call_arr = ["sort", "-T ./", "-k1,1", "-k4,4n", summary_file.name, "-o", summary_file.name+"_sorted"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
call_arr = ["mv" , summary_file.name + "_sorted", summary_file.name]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
call_arr = ["mv" , options.output_dir + "/" + options.orf_file+"_sorted", options.orf_file]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
#We have been given an orf file, we should filter based on its contents
orf_summary_file = open(options.output_dir + "/orf_filtered_summary.gff", 'w')
summary_file = open(summary_file.name, 'r')
orf_fp = open(options.orf_file, 'r')
cur_orf = orf_fp.readline()
split_cur_orf = cur_orf.split('\t')
split_cur_orf[3],split_cur_orf[4] = int(split_cur_orf[3]),int(split_cur_orf[4])
#Cycle through misassemblies
for cur_missassembly in summary_file:
split_mis = cur_missassembly.split('\t')
split_mis[3],split_mis[4] = int(split_mis[3]), int(split_mis[4])
while True:
#Misassembly before any orfs contigs
if not cur_orf or ( split_cur_orf[0] > split_mis[0] ):
break
#Misassembly after current orf contig
elif split_cur_orf[0] < split_mis[0]:
cur_orf = None
cur_orf = orf_fp.readline()
while cur_orf:
split_cur_orf = cur_orf.split('\t')
split_cur_orf[3],split_cur_orf[4] = int(split_cur_orf[3]),int(split_cur_orf[4])
if split_cur_orf[0] >= split_mis[0]:
break
cur_orf = orf_fp.readline()
if not cur_orf:
break
#First and second again
else:
#Perfect
##DO WORK #Break to move on
while True:
if not cur_orf:
break
#Done
elif split_mis[4] < split_cur_orf[3]:
break
# Advance error file to next line
elif ( split_mis[3] < split_cur_orf[3] and split_mis[4]<split_cur_orf[4] )\
or ( split_mis[3] >= split_cur_orf[3] and split_mis[4] <= split_cur_orf[4] )\
or ( split_mis[3] >= split_cur_orf[3] and split_mis[3] <= split_cur_orf[4] and split_mis[4] >= split_cur_orf[4] )\
or ( split_mis[3] < split_cur_orf[3] and split_mis[4] >= split_cur_orf[4] ):
orf_summary_file.write(cur_missassembly)
orf_filtered_misassemblies.append(cur_missassembly.strip().split('\t'))
break
#Error output
#Advance Error file
elif split_mis[3] > split_cur_orf[4]:
cur_orf = orf_fp.readline()
if cur_orf:
split_cur_orf = cur_orf.split('\t')
split_cur_orf[3],split_cur_orf[4] = int(split_cur_orf[3]),int(split_cur_orf[4])
#Advance orf file, reevaluate
else:
break
break
# Find regions with multiple misassembly signatures.
#suspicious_regions = find_suspicious_regions(misassemblies, options.min_suspicious_regions)
suspicious_regions = find_sliding_suspicious_regions(final_misassemblies, options.suspicious_window_size, options.min_suspicious_regions)
final_suspicious_misassemblies = []
for region in suspicious_regions:
#if int(region[3]) > options.ignore_end_distances and \
# int(region[4]) <= (contig_lengths[region[0]] - options.ignore_end_distances):
if int(region[4]) > (contig_lengths[region[0]] - options.ignore_end_distances):
region[4] = str(contig_lengths[region[0]] - options.ignore_end_distances)
suspicious_file.write('\t'.join(region) + '\n')
final_suspicious_misassemblies.append(region)
results(options.output_dir + "/suspicious.gff")
# Output summary table.
generate_summary_table(options.output_dir + "/summary.tsv", all_contig_lengths, \
contig_lengths, contig_abundances, final_misassemblies)
results(options.output_dir + "/summary.tsv")
if options.orf_file:
generate_summary_table(options.output_dir + "/orf_summary.tsv", \
all_contig_lengths, contig_lengths, contig_abundances, orf_filtered_misassemblies,orf=True)
joined_summary_fp = open(options.output_dir + "/joined_summary.tsv", 'w')
call_arr = ["join", "-a1" , "-o", "0", "1.2", "1.3", "1.4", "1.5", "1.6", "1.7", "1.8", "1.9", "1.10", "2.3","2.4","2.5","2.6","2.7","2.8","2.9","2.10", '-e', "0", '-1', '1', '-2', '1' , "-t", ' ', options.output_dir + "/summary.tsv", options.output_dir + "/orf_summary.tsv"]
out_cmd(joined_summary_fp.name, FNULL.name, call_arr)
call(call_arr, stdout = joined_summary_fp, stderr = FNULL)
# Output suspicious table.
#generate_summary_table(options.output_dir + "/suspicious.tsv", all_contig_lengths, \
# contig_lengths, final_suspicious_misassemblies)
#results(options.output_dir + "/suspicious.tsv")
if options.email:
notify_complete(options.email,time.time()-start_time)
def get_options():
parser = OptionParser()
parser.add_option("-a", "--assembly-fasta", dest="fasta_file", \
help="Candidate assembly file", metavar="FILE")
parser.add_option("-r", "--reads", dest="reads_filenames", \
help="First Read File", metavar="FILE")
parser.add_option("-1", "--1", dest="first_mates", \
help="Fastq filenames separated by commas that contain the first mates.")
parser.add_option("-2", "--2", dest="second_mates", \
help="Fastq filenames separated by commas that contain the second mates.")
parser.add_option("-c", "--coverage-file", dest="coverage_file", \
help="Assembly created per-contig coverage file")
parser.add_option("-o", "--output-dir", dest="output_dir", \
help = "Output directory", default="data/output/")
parser.add_option("-w", "--window-size", dest="window_size", \
help = "Sliding window size when determining misassemblies.", default = "201")
parser.add_option("-q", "--fastq", dest="fastq_file", \
default=False, action='store_true', \
help="if set, input reads are fastq format (fasta by default).")
parser.add_option("-p", "--threads", dest="threads", \
help = "Number of threads", default="8")
parser.add_option("-I", "--minins", dest="min_insert_size", \
help="Min insert sizes for mate pairs separated by commas.", default="0")
parser.add_option("-X", "--maxins", dest="max_insert_size", \
help="Max insert sizes for mate pairs separated by commas.", default="500")
parser.add_option("-n", "--orientation", dest="orientation", default="fr", \
help="Orientation of the mates.")
parser.add_option("-m", "--mu" , dest="mu", default = "180", \
help="average mate pair insert sizes.")
parser.add_option("-t", "--sigma" , dest="sigma", default = "18", \
help="standard deviation of mate pair insert sizes.")
parser.add_option("-x", "--max-alignments", dest="max_alignments", default = "10000", \
help="bowtie2 parameter to set the max number of alignments.")
parser.add_option("-e", "--email", dest="email", \
help="Email to notify when job completes")
parser.add_option("-g", "--min-coverage", dest="min_coverage", type="int", default=0, \
help="Minimum average coverage to run misassembly detection.")
parser.add_option("-l", "--coverage-multiplier", dest="coverage_multiplier", type=float, default=0.0, \
help="When binning by coverage, the new high = high + high * multiplier")
parser.add_option("-s", "--min-suspicious", dest="min_suspicious_regions", default=2, type=int, \
help="Minimum number of overlapping flagged miassemblies to mark region as suspicious.")
parser.add_option("-d", "--suspicious-window-size", dest="suspicious_window_size", default=2000, type=int, \
help="Mark region as suspicious if multiple signatures occur within this window size.")
parser.add_option('-z', "--min-contig-length", dest="min_contig_length", default=1000, type=int, \
help="Ignore contigs smaller than this length.")
parser.add_option('-b', "--ignore-ends", dest="ignore_end_distances", default=0, type=int, \
help="Ignore flagged regions within b bps from the ends of the contigs.")
parser.add_option('-k', "--breakpoint-bin", dest="breakpoints_bin", default="50", type=str, \
help="Bin sized used to find breakpoints.")
parser.add_option('-f', "--orf-file", dest="orf_file", help="gff formatted file containing orfs")
parser.add_option("--kmer", dest="kmer_length", help="kmer length used for abundance estimation", \
default = "15")
(options, args) = parser.parse_args()
should_err = False
if not options.fasta_file:
warning("You need to provide a fasta file with -a")
should_err = True
#if not options.read_file_1:
# warning("You need to provide the first read file with -r")
# should_err = True
#if not options.read_file_2:
# warning("You need to provide the second read file with -d")
# should_err = True
if not options.coverage_file:
warning("Coverage file not provided, will create one.")
#should_err = True
if should_err:
parser.print_help()
exit(-1)
return (options,args)
def ran_command(st, fp):
fp.write(st)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
assert os.path.exists(d)
def notify_complete(target_email,t):
call(['echo "Completed in %d" | mail -s "Job Completed" %s' % (t, target_email) ],shell=True)
def setup_shell_file():
if shell_file_fp:
shell_file_fp.write("#!/bin/bash\n")
def line(x):
print ("-"*x, file=sys.stderr)
def step(*objs):
line(75)
print(bcolors.HEADER + "STEP:\t" + bcolors.ENDC, *objs, file=sys.stderr)
def out_cmd(std_out = "", std_err = "", *objs):
#line(75)
if shell_file_fp:
if std_out != "":
std_out_sht = " 1>%s " % (std_out)
else:
std_out_sht = ""
if std_err != "":
std_err_sht = " 2>%s " % (std_err)
else:
std_err_sht = ""
shell_file_fp.write(' '.join(*objs) + std_out_sht + std_err_sht + "\n")
shell_file_fp.flush()
print(bcolors.OKBLUE + "COMMAND:\t" + bcolors.ENDC, ' '.join(*objs), file=sys.stderr)
def results(*objs):
print(bcolors.WARNING + "RESULTS:\t" + bcolors.ENDC,*objs, file=sys.stderr)
def warning(*objs):
print("INFO:\t",*objs, file=sys.stderr)
def error(*objs):
print(bcolors.WARNING + "ERROR:\t" + bcolors.ENDC, *objs, file=sys.stderr)
def filter_short_contigs(options):
"""
Filter out contigs less than a certain length.
"""
filtered_fasta_filename = options.output_dir + '/filtered_assembly.fasta'
filtered_assembly_file = open(filtered_fasta_filename, 'w')
all_contig_lengths = {}
curr_length = 0
with open(options.fasta_file,'r') as assembly:
for contig in contig_reader(assembly):
curr_length = len(''.join(contig['sequence']))
if curr_length >= options.min_contig_length:
filtered_assembly_file.write(contig['name'])
filtered_assembly_file.writelines(contig['sequence'])
filtered_assembly_file.write('\n')
all_contig_lengths[contig['name'].strip()[1:]] = curr_length
filtered_assembly_file.close()
options.fasta_file = filtered_fasta_filename
return all_contig_lengths
def get_contig_lengths(sam_filename):
"""
Return a dictionary of contig names => contig lengths from a SAM file.
"""
sam_file = open(sam_filename, 'r')
# Build dictionary of contig lengths.
contig_lengths = {}
pattern = re.compile('SN:(?P<contig>[\w_\|\.\-]+)\s*LN:(?P<length>\d+)')
line = sam_file.readline()
while line.startswith("@"):
if line.startswith("@SQ"):
matches = pattern.search(line)
if len(matches.groups()) == 2:
contig_lengths[matches.group('contig')] = int(matches.group('length'))
line = sam_file.readline()
return contig_lengths
def get_contig_abundances(abundance_filename):
"""
Return a dictionary of contig names => contig abundances from the '/coverage/temp.cvg'.
"""
abundance_file = open(abundance_filename, 'r')
# Build a dictionary of contig abundances.
contig_abundances = {}
for line in abundance_file:
contig_and_abundance = line.strip().split()
contig_abundances[contig_and_abundance[0]] = int(round(float(contig_and_abundance[1])))
return contig_abundances
def find_sliding_suspicious_regions(misassemblies, sliding_window = 2000, min_cutoff = 2):
"""
Output any region that has multiple misassembly signature types within the sliding window.
"""
regions =[]
for misassembly in misassemblies:
regions.append([misassembly[0], misassembly[3], 'START', misassembly[2]])
regions.append([misassembly[0], misassembly[4], 'END', misassembly[2]])
regions.sort(key = lambda region: (region[0], int(region[1])))
"""
Example:
relocref 36601 START Breakpoint_finder
relocref 36801 END Breakpoint_finder
relocref 67968 START REAPR
relocref 68054 START REAPR
relocref 69866 END REAPR
relocref 69867 START REAPR
relocref 71833 END REAPR
relocref 73001 START Breakpoint_finder
relocref 73201 END Breakpoint_finder
"""
# Store all the signatures, starting, and ending points within a given window.
#start_points = deque([])
#end_points = deque([])
#signatures = deque([])
signatures = []
curr_contig = None
count = 0
suspicious_regions = []
for index in xrange(0, len(misassemblies)):
curr_contig = misassemblies[index][0]
count = 0
second_index = index + 1
signatures = [misassemblies[index][2]]
start_point = int(misassemblies[index][3])
end_point = int(misassemblies[index][4]) + sliding_window
# While we are on the same contig, and still in the sliding window...
while second_index < len(misassemblies) and \
misassemblies[second_index][0] == curr_contig and \
int(misassemblies[second_index][3]) < (int(misassemblies[index][4]) + sliding_window):
if misassemblies[second_index][2] not in signatures:
signatures.append(misassemblies[second_index][2])
count += 1
if int(misassemblies[second_index][4]) > end_point:
end_point = int(misassemblies[second_index][4])
second_index += 1
if len(signatures) >= min_cutoff:
suspicious_regions.append([misassemblies[index][0], '.', 'SUSPICIOUS', str(start_point), str(end_point), '.', '.', '.', 'color=#181009;' + ','.join(signatures)])
# Hack to correct for overlapping suspicious regions.
compressed_suspicious_regions = []
prev_region = None
for region in suspicious_regions:
if prev_region is None:
prev_region = region
else:
if prev_region[0] == region[0] and int(prev_region[4]) >= int(region[3]):
prev_region[4] = region[4]
else:
compressed_suspicious_regions.append(prev_region)
prev_region = region
if prev_region:
compressed_suspicious_regions.append(prev_region)
return compressed_suspicious_regions
def find_suspicious_regions(misassemblies, min_cutoff = 2):
"""
Given a list of miassemblies in gff format
"""
regions =[]
for misassembly in misassemblies:
regions.append([misassembly[0], misassembly[3], 'START', misassembly[2]])
regions.append([misassembly[0], misassembly[4], 'END', misassembly[2]])
regions.sort(key = lambda region: (region[0], int(region[1])))
"""
Example:
relocref 36601 START Breakpoint_finder
relocref 36801 END Breakpoint_finder
relocref 67968 START REAPR
relocref 68054 START REAPR
relocref 69866 END REAPR
relocref 69867 START REAPR
relocref 71833 END REAPR
relocref 73001 START Breakpoint_finder
relocref 73201 END Breakpoint_finder
"""
curr_contig = None
curr_index = 0
curr_length = -1
start_indexes = []
start_region = 0
end_index = 0
signatures = []
recording = False
signature_starts = defaultdict(list)
curr_coverage = 0
suspicious_regions = []
for region in regions:
if curr_contig is None:
curr_contig = region[0]
recording = False
signature_starts = defaultdict(list)
# We have found a new contig, process the previous contig results.
if region[0] != curr_contig:
curr_contig = region[0]
recording = False
if region[2] == 'START':
curr_coverage += 1
if region[3] not in signatures: signatures.append(region[3])
signature_starts[region[3]].append(region[1])
# Record start point.
if curr_coverage == min_cutoff:
start_region = region[1]
recording == True
start_indexes.append(region[1])
else:
curr_coverage -= 1
end_index = region[1]
if region[3] in signatures: signatures.remove(region[3])
# If we were recording, and min signatures drop belows threshold,
# then we need to output our results
if curr_coverage < min_cutoff and recording:
min_start = None
suspicious_regions.append([region[0], '.', 'SUSPICIOUS', str(start_region), str(end_index), '.', '.', '.', 'color=#181009;' + ','.join(signatures)])
signatures = []
recording = False
if curr_coverage >= min_cutoff:
recording = True
# Hack to correct for overlapping suspicious regions.
compressed_suspicious_regions = []
prev_region = None
for region in suspicious_regions:
if prev_region is None:
prev_region = region
else:
if prev_region[0] == region[0] and int(prev_region[4]) >= int(region[3]):
prev_region[4] = region[4]
else:
compressed_suspicious_regions.append(prev_region)
prev_region = region
if prev_region:
compressed_suspicious_regions.append(prev_region)
return compressed_suspicious_regions
def generate_summary_table(table_filename, all_contig_lengths, filtered_contig_lengths, contig_abundances, misassemblies, orf=False):
"""
Output the misassemblies in a table format:
contig_name contig_length low_cov low_cov_bps high_cov high_cov_bps ...
CONTIG1 12000 1 100 0 0 ...
CONTIG2 100 NA NA NA ...
"""
table_file = open(table_filename, 'w')
if orf:
table_file.write("contig_name\tcontig_length\tabundance\torf_low_cov\torf_low_cov_bps\torf_high_cov\torf_high_cov_bps\torf_reapr\torf_reapr_bps\torf_breakpoints\torf_breakpoints_bps\n")
else:
table_file.write("contig_name\tcontig_length\tabundance\tlow_cov\tlow_cov_bps\thigh_cov\thigh_cov_bps\treapr\treapr_bps\tbreakpoints\tbreakpoints_bps\n")
prev_contig = None
curr_contig = None
# Misassembly signatures
low_coverage = 0
low_coverage_bps = 0
high_coverage = 0
high_coverage_bps = 0
reapr = 0
reapr_bps = 0
breakpoints = 0
breakpoints_bps = 0
processed_contigs = set()
for misassembly in misassemblies:
"""
contig00001 REAPR Read_orientation 88920 97033 . . . Note=Warning: Bad read orientation;colour=1
contig00001 REAPR FCD 89074 90927 0.546142 . . Note=Error: FCD failure;colour=17
contig00001 DEPTH_COV low_coverage 90818 95238 29.500000 . . low=30.000000;high=70.000000;color=#7800ef
"""
curr_contig = misassembly[0]
if prev_contig is None:
prev_contig = curr_contig
if curr_contig != prev_contig:
# Output previous contig stats.
table_file.write(prev_contig + '\t' + str(filtered_contig_lengths[prev_contig]) + '\t' + str(contig_abundances[prev_contig]) + '\t' + \
str(low_coverage) + '\t' + str(low_coverage_bps) + '\t' + str(high_coverage) + '\t' + \
str(high_coverage_bps) + '\t' + str(reapr) + '\t' + str(reapr_bps) + '\t' + str(breakpoints) + '\t' + \
str(breakpoints_bps) + '\n')
processed_contigs.add(prev_contig)
# Reset misassembly signature counts.
low_coverage = 0
low_coverage_bps = 0
high_coverage = 0
high_coverage_bps = 0
reapr = 0
reapr_bps = 0
breakpoints = 0
breakpoints_bps = 0
prev_contig = curr_contig
# Process the current contig misassembly.
if misassembly[1] == 'REAPR':
if 'Warning' not in misassembly[8]:
reapr += 1
reapr_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
elif misassembly[1] == 'DEPTH_COV':
if misassembly[2] == 'Low_coverage':
low_coverage += 1
low_coverage_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
else:
high_coverage += 1
high_coverage_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
elif misassembly[1] == 'Breakpoint_finder':
breakpoints += 1
breakpoints_bps += (int(misassembly[4]) - int(misassembly[3]) + 1)
else:
print("Unhandled error: " + misassembly[1])
if prev_contig:
# Output previous contig stats.
table_file.write(prev_contig + '\t' + str(filtered_contig_lengths[prev_contig]) + '\t' + str(contig_abundances[prev_contig]) + '\t' + \
str(low_coverage) + '\t' + str(low_coverage_bps) + '\t' + str(high_coverage) + '\t' + \
str(high_coverage_bps) + '\t' + str(reapr) + '\t' + str(reapr_bps) + '\t' + str(breakpoints) + '\t' + \
str(breakpoints_bps) + '\n')
processed_contigs.add(prev_contig)
# We need to add the remaining, error-free contigs.
for contig in filtered_contig_lengths:
if contig not in processed_contigs:
table_file.write(contig + '\t' + str(filtered_contig_lengths[contig]) + '\t' + str(contig_abundances[contig]) + '\t' + \
'0\t0\t0\t0\t0\t0\t0\t0\n')
processed_contigs.add(contig)
# Finally, add the contigs that were filtered out prior to evaluation.
for contig in all_contig_lengths:
if contig not in processed_contigs:
table_file.write(contig + '\t' + str(all_contig_lengths[contig]) + '\t' + 'NA\t' + \
'NA\tNA\tNA\tNA\tNA\tNA\tNA\tNA\n')
processed_contigs.add(contig)
def calculate_contig_coverage(options, pileup_file):
"""
Calculate contig coverage. The coverage of a contig is the mean per-bp coverage.
"""
coverage_filename = options.output_dir + '/coverage/temp.cvg'
coverage_file = open(coverage_filename, 'w')
prev_contig = None
curr_contig = None
length = 0
curr_coverage = 0
for record in open(pileup_file, 'r'):
fields = record.strip().split()
if prev_contig != fields[0]:
if prev_contig:
coverage_file.write(prev_contig + '\t' + str(float(curr_coverage) / length) + '\n')
prev_contig = fields[0]
length = 0
curr_coverage = 0
curr_coverage += int(fields[3])
length += 1
if prev_contig:
coverage_file.write(prev_contig + '\t' + str(float(curr_coverage) / length) + '\n')
coverage_file.close()
return coverage_filename
def build_bowtie2_index(index_name, reads_file):
"""
Build a Bowtie2 index.
"""
command = os.path.join(base_path, "bin/bowtie2-2.2.2/bowtie2-build ") + os.path.abspath(reads_file) + " " + os.path.abspath(index_name)
# Bad workaround.
out_cmd(FNULL.name, FNULL.name, [command])
bowtie2_build_proc = subprocess.Popen(command, shell = True, stdout = FNULL, stderr = FNULL)
bowtie_output, err = bowtie2_build_proc.communicate()
bowtie2_build_proc.wait()
return index_name
def run_bowtie2(options = None, output_sam = 'temp.sam'):
"""
Run Bowtie2 with the given options and save the SAM file.
"""
# Using bowtie2.
# Create the bowtie2 index if it wasn't given as input.
#if not assembly_index:
if not os.path.exists(os.path.abspath(options.output_dir) + '/indexes'):
os.makedirs(os.path.abspath(options.output_dir) + '/indexes')
fd, index_path = mkstemp(prefix='temp_',\
dir=(os.path.abspath(options.output_dir) + '/indexes/'))
try:
os.mkdirs(os.path.dirname(index_path))
except:
pass
fasta_file = options.fasta_file
build_bowtie2_index(os.path.abspath(index_path), os.path.abspath(fasta_file))
assembly_index = os.path.abspath(index_path)
unaligned_dir = os.path.abspath(options.output_dir) + '/unaligned_reads/'
ensure_dir(unaligned_dir)
unaligned_file = unaligned_dir + 'unaligned.reads'
#input_sam_file = output_sam_file
read_type = " -f "
if options.fastq_file:
read_type = " -q "
bowtie2_args = ""
bowtie2_unaligned_check_args = ""
if options.first_mates:
bowtie2_args = "-a -x " + assembly_index + " -1 " + options.first_mates\
+ " -2 " + options.second_mates + " -p " + options.threads\
+ " --very-sensitive -a " + " --reorder --"\
+ options.orientation + " -I " + options.min_insert_size\
+ " -X " + options.max_insert_size + " --no-mixed" #+ " --un-conc "\
#+ unaligned_file
bowtie2_unaligned_check_args = "-a -x " + assembly_index + read_type + " -U "\
+ options.first_mates + "," + options.second_mates + " --very-sensitive -a "\
+ " --reorder -p " + options.threads + " --un " + unaligned_file
else:
bowtie2_args = "-a -x " + assembly_index + read_type + " -U "\
+ options.reads_filenames + " --very-sensitive -a "\
+ " --reorder -p " + options.threads + " --un " + unaligned_file
if not options:
sys.stderr.write("[ERROR] No Bowtie2 options specified" + '\n')
return
# Using bowtie 2.
command = os.path.join(base_path, "bin/bowtie2-2.2.2/bowtie2 ") + bowtie2_args + " -S " + output_sam
out_cmd( FNULL.name, FNULL.name,[command])
#call(command.split())
args = shlex.split(command)
bowtie_proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=FNULL)
bowtie_output, err = bowtie_proc.communicate()
if bowtie2_unaligned_check_args != "":
command = os.path.join(base_path, "bin/bowtie2-2.2.2/bowtie2 ") + bowtie2_unaligned_check_args + " -S " + output_sam + "_2.sam"
out_cmd( FNULL.name, FNULL.name, [command])
args = shlex.split(command)
bowtie_proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=FNULL)
bowtie_output, err = bowtie_proc.communicate()
return unaligned_dir
def run_breakpoint_finder(options,unaligned,breakpoint_dir):
'''
attempts to find breakpoints
'''
std_err_file = open(breakpoint_dir + 'splitter_std_err.log', 'w')
call_arr = [os.path.join(base_path,'src/py/breakpoint_splitter.py'),\
'-u', unaligned,\
'-o', breakpoint_dir + 'split_reads/']
out_cmd( "", std_err_file.name, call_arr)
call(call_arr, stderr=std_err_file)
std_err_file.close()
std_err_file = open(breakpoint_dir + 'std_err.log','w')
call_arr = [os.path.join(base_path, 'src/py/breakpoint_finder.py'),\
'-a', options.fasta_file,\
'-r', breakpoint_dir + 'split_reads/',\
'-b', options.breakpoints_bin, '-o', breakpoint_dir,\
'-c', options.coverage_file,\
'-p', options.threads]
out_cmd( "", std_err_file.name,call_arr)
call(call_arr,stderr=std_err_file)
results(breakpoint_dir + 'interesting_bins.gff')
return breakpoint_dir + 'interesting_bins.gff'
def split_sam_by_bin(sam_output_location, contig_to_bin_map, bin_dir_dict):
common_header = ""
output_bin = {}
output_fp = {}
for bin in set(contig_to_bin_map.values()):
output_bin[bin] = ""
bin_dir = bin_dir_dict[bin]
if os.path.exists(bin_dir):
ensure_dir(bin_dir + "sam/")
output_fp[bin] = open(bin_dir + "sam/"\
+ os.path.basename(sam_output_location), 'w')
else:
error("Bin dir did not exist")
error("%s" % (str(bin_dir_dict)))
with open(sam_output_location, 'r') as sam_file:
for line in sam_file:
if line.split()[0] == "@HD" or line.split()[0] == "@PG"\
or line.split()[0] == "@CO" or line.split()[0] == "@RG":
for fp in output_fp.values():
fp.write(line)
elif line.split()[0] == "@SQ":
# TODO: Clean up.
if line.split()[1].split(':')[1] in contig_to_bin_map:
bin = contig_to_bin_map[line.split()[1].split(':')[1]]
output_fp[bin].write(line)
else:
line_split = line.split('\t')
if line_split[2] == '*':
pass
else:
# TODO: Clean up.
if line_split[2] in contig_to_bin_map:
bin = contig_to_bin_map[line_split[2]]
output_fp[bin].write(line)
def increment_coverage_window(options, low, high):
""" Find new low/high boundaries for coverage bins. """
low = high
prev_high = high
high = int(high + high * options.coverage_multiplier)
if high == prev_high:
high = high + 1
#warning("Incremented coverage window to: %d -~- %d" % (low, high))
return low, high
def bin_coverage(options, bin_dir):
contig_to_coverage_map = {}
contig_to_bin_map = {}
bin_to_name_map = {}
with open(options.coverage_file,'r') as coverage_file:
for line in coverage_file:
split_line = line.split()
if float(split_line[1]) >= options.min_coverage:
# Only store contigs who are above minimum avg coverage.
contig_to_coverage_map[split_line[0]] = float(split_line[1])
else:
warning("Not binning contig: %s due to lower than minimum coverage %f"\
% (split_line[0], options.min_coverage))
max_cvg = max(contig_to_coverage_map.values())
high = int(options.min_coverage + options.min_coverage * .1)
if high <= options.min_coverage:
high = high + 1
low = options.min_coverage
curr_bin = 0
bins = []
while len(contig_to_bin_map) < len(contig_to_coverage_map):
slice_dict = {k: v for k,v in contig_to_coverage_map.iteritems() if low<=v and high>v}
for contig in slice_dict.keys():
contig_to_bin_map[contig] = curr_bin
bin_to_name_map[curr_bin] = (low, high)
low, high = increment_coverage_window(options, low, high)
curr_bin += 1
bin_set = set(contig_to_bin_map.values())
fp_dict = {}
bin_dir_dict = {}
open_fp_count = 0
unopened_fp = {}
processed_file_names = {}
for bin in bin_set:
#a_new_bin = bin_dir + "bin" + str(bin) + "/"
a_new_bin = bin_dir + str(bin_to_name_map[bin][0]) + "x-" + str(bin_to_name_map[bin][1]) + "x/"
bin_dir_dict[bin] = a_new_bin
ensure_dir(a_new_bin)
shutil.copy(options.coverage_file, a_new_bin +\
os.path.basename(options.coverage_file))
if open_fp_count < (file_limit/2):
fp_dict[bin] = open(a_new_bin + os.path.basename(options.fasta_file),'w')
open_fp_count += 1
else:
unopened_fp[bin] = a_new_bin + os.path.basename(options.fasta_file)
#fp_dict[bin].close()
#fp_dict[bin] = a_new_bin + os.path.basename(options.fasta_file)
warning("Contig to bin map is: %s" %(str(contig_to_bin_map)))
while True:
with open(options.fasta_file,'r') as assembly:
for contig in contig_reader(assembly):
# TODO: Clean up.
if contig['name'][1:].strip() in contig_to_bin_map:
bin = contig_to_bin_map[contig['name'][1:].strip()]
if bin in fp_dict.keys() and not fp_dict[bin].closed:
with fp_dict[bin] as bin_file:
bin_file.write(contig['name'])
bin_file.writelines(contig['sequence'])
else:
warning("Throwing away contig: %s due to not being in contig_to_bin_map" % (contig['name'][1:].strip()))
temp_key_list = fp_dict.keys()[:]
for bin in temp_key_list:
fp_dict[bin].close()
open_fp_count -= 1
processed_file_names[bin] = fp_dict[bin]
del fp_dict[bin]
if len(unopened_fp.keys()) == 0:
break
temp_key_list = unopened_fp.keys()[:]
for bin in temp_key_list:
if open_fp_count < (file_limit /2 ):
fp_dict[bin] = open(unopened_fp[bin],'w')
del unopened_fp[bin]
opened_fp_count += 1
else:
break
for fp in processed_file_names.values():
name = fp.name
if os.stat(name).st_size <= 10:
warning("Would have removed tree: %s for file: %s" % (os.path.dirname(name), name))
shutil.rmtree(os.path.dirname(name))
return contig_to_bin_map,bin_dir_dict
def contig_reader(fasta_file):
save_line = ""
contig = {}
in_contig = False
for line in fasta_file:
if line[0] == '>' and in_contig:
save_line = line
ret_contig = contig
contig = {}
contig['sequence'] = []
contig['name'] = line.split()[0].strip() + "\n"
yield ret_contig
elif line[0] == '>':
contig['name'] = line.split()[0].strip() + "\n"
contig['sequence'] = []
in_contig = True
else:
contig['sequence'].append(line.strip())
yield contig
def run_lap(options, sam_output_location, reads_trimmed_location):
""" Calculate the LAP using the previously computed SAM file. """
output_probs_dir = options.output_dir + "/lap/"
ensure_dir(output_probs_dir)
output_probs_location = output_probs_dir + "output.prob"
fp = open(output_probs_location, "w")
reads = [options.reads_filenames]
if options.first_mates:
reads = [options.first_mates, options.second_mates]
call_arr = []
if options.first_mates:
call_arr = [os.path.join(base_path, "bin/lap/aligner/calc_prob.py"), "-a", options.fasta_file, "-s", sam_output_location, "-q", "-1", options.first_mates, "-2", options.second_mates, "-n", options.coverage_file, '-o', options.orientation, "-I", options.min_insert_size, "-X", options.max_insert_size, '-p', options.threads]
else:
call_arr = [os.path.join(base_path, "bin/lap/aligner/calc_prob.py"), "-a", options.fasta_file, "-s", sam_output_location, "-q", "-i", ','.join(reads), "-n", options.coverage_file, '-p', options.threads]
out_cmd(fp.name, "", call_arr)
#warning("That command outputs to: ", output_probs_location)
results(output_probs_location)
call(call_arr, stdout=fp)
output_sum_probs_location = output_probs_dir + "output.sum"
call_arr = [os.path.join(base_path, "bin/lap/aligner/sum_prob.py"), "-i", output_probs_location, "-t", "1e-80"]
out_cmd( output_sum_probs_location, "", call_arr)
call(call_arr, stdout=open(output_sum_probs_location,'w'))
results(output_sum_probs_location)
def run_samtools(options, sam_output_location, with_pileup = True, index=False):
""" Takes a sam file and runs samtools to create bam, sorted bam, and mpileup. """
bam_dir = options.output_dir + "/bam/"
ensure_dir(bam_dir)
bam_location = bam_dir + "library.bam"
sorted_bam_location = bam_dir + "sorted_library"
bam_fp = open(bam_location, 'w+')
error_file_location = bam_dir + "error.log"
error_fp = open(error_file_location, 'w+')
#warning("About to run samtools view to create bam")
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "view", "-bS", sam_output_location]
out_cmd(bam_fp.name, error_fp.name, call_arr)
#warning("That command outputs to file: ", bam_location)
call(call_arr, stdout = bam_fp, stderr = error_fp)
#warning("About to attempt to sort bam")
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "sort", bam_location, sorted_bam_location]
out_cmd( "", FNULL.name, call_arr)
call(call_arr, stderr = FNULL)
coverage_file_dir = options.output_dir + "/coverage/"
ensure_dir(coverage_file_dir)
pileup_file = coverage_file_dir + "mpileup_output.out"
p_fp = open(pileup_file, 'w')
if with_pileup:
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "mpileup", "-A", "-f", options.fasta_file, sorted_bam_location + ".bam"]
out_cmd(p_fp.name, FNULL.name, call_arr)
results(pileup_file)
#warning("That command outputs to file: ", pileup_file)
call(call_arr, stdout = p_fp, stderr = FNULL)
if index:
call_arr = [os.path.join(base_path, "bin/Reapr_1.0.17/src/samtools"), "index", sorted_bam_location + ".bam"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout = FNULL, stderr = FNULL)
return (bam_location, sorted_bam_location, pileup_file)
def run_split_pileup(options, pileup_file):
""" Split the pileup file into a number of chunks. """
call_arr = [os.path.join(base_path, "src/py/split_pileup.py"), "-p", pileup_file, "-c", options.threads]
out_cmd("","",call_arr)
call(call_arr)
def run_abundance_by_kmers(options):
""" Pileup based on k-mer abundances."""
coverage_filename = options.output_dir + '/coverage/temp_kmer.cvg'
coverage_file = open(coverage_filename, 'w')
options.kmer_pileup_file = options.output_dir + "/coverage/kmer_pileup"
options.coverage_file = options.output_dir + '/coverage/temp_kmer.cvg'
# ./src/py/abundance_by_kmers.py -a test/test_kmer_abun.fna -r test/test_kmers_abun_lib.fastq -k 15 -t 4 -e .98 -p tmp_kmer_abun_15_30 -m 30
call_arr = [os.path.join(base_path, "src/py/abundance_by_kmers.py"), \
"-a", options.fasta_file,\
"-r", options.reads_filenames,\
"-k", options.kmer_length,\
"-t", options.threads,\
"-e", ".98",
"-p", options.kmer_pileup_file]
out_cmd("","",call_arr)
call(call_arr, stdout=coverage_file)
return options.kmer_pileup_file
def run_depth_of_coverage(options, pileup_file):
""" Run depth of coverage. """
dp_fp = options.output_dir + "/coverage/errors_cov.gff"
abundance_file = options.coverage_file
#call_arr = ["src/py/depth_of_coverage.py", "-a", abundance_file, "-m", pileup_file, "-w", options.window_size, "-o", dp_fp, "-g", "-e"]
call_arr = [os.path.join(base_path, "src/py/depth_of_coverage.py"), "-m", pileup_file, "-w", options.window_size, "-o", dp_fp, "-g", "-e", "-c", options.threads]
out_cmd("","",call_arr)
call(call_arr)
results(dp_fp)
return dp_fp
def run_reapr(options, sorted_bam_location):
""" Run REAPR. """
reapr_command = os.path.join(base_path, "bin/Reapr_1.0.17/reapr")
#warning("About to run facheck")
call_arr = [reapr_command, "facheck", options.fasta_file ]
out_cmd("","",call_arr)
call(call_arr)
reapr_output_dir = options.output_dir + "/reapr"
reapr_perfect_prefix = options.output_dir + "/r_perfect_prefix"
#warning("About to run reapr pipeline")
call_arr = [reapr_command, "pipeline", options.fasta_file,\
sorted_bam_location + ".bam", reapr_output_dir]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout=FNULL, stderr=FNULL)
call_arr = ["gunzip", reapr_output_dir + "/03.score.errors.gff"]
out_cmd(FNULL.name, FNULL.name, call_arr)
call(call_arr, stdout=FNULL, stderr=FNULL)
if os.path.exists(reapr_output_dir + "/03.score.errors.gff"):
return reapr_output_dir + "/03.score.errors.gff"
else:
return None
if __name__ == '__main__':
main()
| cmhill/VALET | src/py/pipeline.py | Python | mit | 48,961 | [
"Bowtie"
] | 263ac5cff0304215eadf4c400d9747acee8d528abbd15ca4c714d7020cb557db |
""" The mind is a service the distributes "task" to executors
"""
import types
import pprint
from DIRAC import gLogger
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, isReturnStructure
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.ExecutorDispatcher import ExecutorDispatcher, ExecutorDispatcherCallbacks
class ExecutorMindHandler( RequestHandler ):
MSG_DEFINITIONS = { 'ProcessTask' : { 'taskId' : ( types.IntType, types.LongType ),
'taskStub' : types.StringTypes,
'eType' : types.StringTypes },
'TaskDone' : { 'taskId' : ( types.IntType, types.LongType ),
'taskStub' : types.StringTypes },
'TaskFreeze' : { 'taskId' : ( types.IntType, types.LongType ),
'taskStub' : types.StringTypes,
'freezeTime' : ( types.IntType, types.LongType ) },
'TaskError' : { 'taskId': ( types.IntType, types.LongType ),
'errorMsg' : types.StringTypes,
'taskStub' : types.StringTypes,
'eType' : types.StringTypes},
'ExecutorError' : { 'taskId': ( types.IntType, types.LongType ),
'errorMsg' : types.StringTypes,
'eType' : types.StringTypes } }
class MindCallbacks( ExecutorDispatcherCallbacks ):
def __init__( self, sendTaskCB, dispatchCB, disconnectCB, taskProcCB, taskFreezeCB, taskErrCB ):
self.__sendTaskCB = sendTaskCB
self.__dispatchCB = dispatchCB
self.__disconnectCB = disconnectCB
self.__taskProcDB = taskProcCB
self.__taskFreezeCB = taskFreezeCB
self.__taskErrCB = taskErrCB
self.__allowedClients = []
def cbSendTask( self, taskId, taskObj, eId, eType ):
return self.__sendTaskCB( taskId, taskObj, eId, eType )
def cbDispatch( self, taskId, taskObj, pathExecuted ):
return self.__dispatchCB( taskId, taskObj, pathExecuted )
def cbDisconectExecutor( self, eId ):
return self.__disconnectCB( eId )
def cbTaskError( self, taskId, taskObj, errorMsg ):
return self.__taskErrCB( taskId, taskObj, errorMsg )
def cbTaskProcessed( self, taskId, taskObj, eType ):
return self.__taskProcDB( taskId, taskObj, eType )
def cbTaskFreeze( self, taskId, taskObj, eType ):
return self.__taskFreezeCB( taskId, taskObj, eType )
###
# End of callbacks
###
@classmethod
def initializeHandler( cls, serviceInfoDict ):
gLogger.notice( "Initializing Executor dispatcher" )
cls.__eDispatch = ExecutorDispatcher( cls.srv_getMonitor() )
cls.__callbacks = ExecutorMindHandler.MindCallbacks( cls.__sendTask,
cls.exec_dispatch,
cls.__execDisconnected,
cls.exec_taskProcessed,
cls.exec_taskFreeze,
cls.exec_taskError )
cls.__eDispatch.setCallbacks( cls.__callbacks )
cls.__allowedClients = []
if cls.log.shown( "VERBOSE" ):
gThreadScheduler.setMinValidPeriod( 1 )
gThreadScheduler.addPeriodicTask( 10, lambda: cls.log.verbose( "== Internal state ==\n%s\n===========" % pprint.pformat( cls.__eDispatch._internals() ) ) )
return S_OK()
@classmethod
def setAllowedClients( cls, aClients ):
if not isinstance( aClients, (list, tuple) ):
aClients = ( aClients, )
cls.__allowedClients = aClients
@classmethod
def __sendTask( self, taskId, taskObj, eId, eType ):
try:
result = self.exec_prepareToSend( taskId, taskObj, eId )
if not result[ 'OK' ]:
return result
except Exception as excp:
gLogger.exception( "Exception while executing prepareToSend: %s" % str( excp ), lException = excp )
return S_ERROR( "Cannot presend task" )
try:
result = self.exec_serializeTask( taskObj )
except Exception as excp:
gLogger.exception( "Exception while serializing task %s" % taskId, lException = excp )
return S_ERROR( "Cannot serialize task %s: %s" % ( taskId, str( excp ) ) )
if not isReturnStructure( result ):
raise Exception( "exec_serializeTask does not return a return structure" )
if not result[ 'OK' ]:
return result
taskStub = result[ 'Value' ]
result = self.srv_msgCreate( "ProcessTask" )
if not result[ 'OK' ]:
return result
msgObj = result[ 'Value' ]
msgObj.taskId = taskId
msgObj.taskStub = taskStub
msgObj.eType = eType
return self.srv_msgSend( eId, msgObj )
@classmethod
def __execDisconnected( cls, trid ):
result = cls.srv_disconnectClient( trid )
if not result[ 'OK' ]:
return result
return cls.exec_executorDisconnected( trid )
auth_conn_new = [ 'all' ]
def conn_new( self, trid, identity, kwargs ):
if 'executorTypes' in kwargs and kwargs[ 'executorTypes' ]:
return S_OK()
for aClient in self.__allowedClients:
if aClient in kwargs and kwargs[ aClient ]:
return S_OK()
return S_ERROR( "Only executors are allowed to connect" )
auth_conn_connected = [ 'all' ]
def conn_connected( self, trid, identity, kwargs ):
for aClient in self.__allowedClients:
if aClient in kwargs and kwargs[ aClient ]:
return S_OK()
try:
numTasks = max( 1, int( kwargs[ 'maxTasks' ] ) )
except:
numTasks = 1
self.__eDispatch.addExecutor( trid, kwargs[ 'executorTypes' ] )
return self.exec_executorConnected( trid, kwargs[ 'executorTypes' ] )
auth_conn_drop = [ 'all' ]
def conn_drop( self, trid ):
self.__eDispatch.removeExecutor( trid )
return S_OK()
auth_msg_TaskDone = [ 'all' ]
def msg_TaskDone( self, msgObj ):
taskId = msgObj.taskId
try:
result = self.exec_deserializeTask( msgObj.taskStub )
except Exception as excp:
gLogger.exception( "Exception while deserializing task %s" % taskId, lException = excp )
return S_ERROR( "Cannot deserialize task %s: %s" % ( taskId, str( excp ) ) )
if not isReturnStructure( result ):
raise Exception( "exec_deserializeTask does not return a return structure" )
if not result[ 'OK' ]:
return result
taskObj = result[ 'Value' ]
result = self.__eDispatch.taskProcessed( self.srv_getTransportID(), msgObj.taskId, taskObj )
if not result[ 'OK' ]:
gLogger.error( "There was a problem processing task", "%s: %s" % ( taskId, result[ 'Message' ] ) )
return S_OK()
auth_msg_TaskFreeze = [ 'all' ]
def msg_TaskFreeze( self, msgObj ):
taskId = msgObj.taskId
try:
result = self.exec_deserializeTask( msgObj.taskStub )
except Exception as excp:
gLogger.exception( "Exception while deserializing task %s" % taskId, lException = excp )
return S_ERROR( "Cannot deserialize task %s: %s" % ( taskId, str( excp ) ) )
if not isReturnStructure( result ):
raise Exception( "exec_deserializeTask does not return a return structure" )
if not result[ 'OK' ]:
return result
taskObj = result[ 'Value' ]
result = self.__eDispatch.freezeTask( self.srv_getTransportID(), msgObj.taskId,
msgObj.freezeTime, taskObj )
if not result[ 'OK' ]:
gLogger.error( "There was a problem freezing task", "%s: %s" % ( taskId, result[ 'Message' ] ) )
return S_OK()
auth_msg_TaskError = [ 'all' ]
def msg_TaskError( self, msgObj ):
taskId = msgObj.taskId
try:
result = self.exec_deserializeTask( msgObj.taskStub )
except Exception as excp:
gLogger.exception( "Exception while deserializing task %s" % taskId, lException = excp )
return S_ERROR( "Cannot deserialize task %s: %s" % ( taskId, str( excp ) ) )
if not isReturnStructure( result ):
raise Exception( "exec_deserializeTask does not return a return structure" )
if not result[ 'OK' ]:
return result
taskObj = result[ 'Value' ]
#TODO: Check the executor has privileges over the task
self.__eDispatch.removeTask( msgObj.taskId )
try:
self.exec_taskError( msgObj.taskId, taskObj, msgObj.errorMsg )
except Exception as excp:
gLogger.exception( "Exception when processing task %s" % msgObj.taskId, lException = excp )
return S_OK()
auth_msg_ExecutorError = [ 'all' ]
def msg_ExecutorError( self, msgObj ):
gLogger.info( "Disconnecting executor by error: %s" % msgObj.errorMsg )
self.__eDispatch.removeExecutor( self.srv_getTransportID() )
return self.srv_disconnect()
#######
# Utilities functions
#######
@classmethod
def getTaskIds( cls ):
return cls.__eDispatch.getTaskIds()
@classmethod
def getExecutorsConnected( cls ):
return cls.__eDispatch.getExecutorsConnected()
@classmethod
def setFailedOnTooFrozen( cls, value ):
#If a task is frozen too many times, send error or forget task?
cls.__eDispatch.setFailedOnTooFrozen( value )
@classmethod
def setFreezeOnFailedDispatch( cls, value ):
#If a task fails to properly dispatch, freeze it?
cls.__eDispatch.setFreezeOnFailedDispatch( value )
@classmethod
def setFreezeOnUnknownExecutor( cls, value ):
#If a task needs to go to an executor that has not connected. Forget the task?
cls.__eDispatch.setFreezeOnUnknownExecutor( value )
#######
# Methods that can be overwritten
#######
@classmethod
def exec_executorDisconnected( cls, trid ):
return S_OK()
@classmethod
def exec_executorConnected( cls, execName, trid ):
return S_OK()
@classmethod
def exec_prepareToSend( cls, taskId, taskObj, eId ):
return S_OK()
########
# Methods to be used by the real services
########
@classmethod
def executeTask( cls, taskId, taskObj ):
return cls.__eDispatch.addTask( taskId, taskObj )
@classmethod
def forgetTask( cls, taskId ):
return cls.__eDispatch.removeTask( taskId )
########
# Methods that need to be overwritten
########
@classmethod
def exec_dispatch( cls, taskId, taskObj, pathExecuted ):
raise Exception( "No exec_dispatch defined or it is not a classmethod!!" )
@classmethod
def exec_serializeTask( cls, taskObj ):
raise Exception( "No exec_serializeTask defined or it is not a classmethod!!" )
@classmethod
def exec_deserializeTask( cls, taskStub ):
raise Exception( "No exec_deserializeTask defined or it is not a classmethod!!" )
@classmethod
def exec_taskError( cls, taskId, taskObj, errorMsg ):
raise Exception( "No exec_taskError defined or it is not a classmethod!!" )
@classmethod
def exec_taskProcessed( cls, taskId, taskObj, eType ):
raise Exception( "No exec_taskProcessed defined or it is not a classmethod!!" )
@classmethod
def exec_taskFreeze( cls, taskId, taskObj, eType ):
return S_OK()
| chaen/DIRAC | Core/Base/ExecutorMindHandler.py | Python | gpl-3.0 | 11,256 | [
"DIRAC"
] | 8c1c5aa3efa8373303d21fa162a6c326d258d25a512d3b33d47c80c35eb5c952 |
#
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
# The example can be run by executing: ipython tsne.py -pylab
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
import sys
def Hbeta(D = Math.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta);
sumP = sum(P);
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape;
sum_X = Math.sum(Math.square(X), 1);
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);
P = Math.zeros((n, n));
beta = Math.ones((n, 1));
logU = Math.log(perplexity);
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf;
betamax = Math.inf;
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];
(H, thisP) = Hbeta(Di, beta[i]);
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU;
tries = 0;
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy();
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2;
else:
beta[i] = (beta[i] + betamax) / 2;
else:
betamax = beta[i].copy();
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2;
else:
beta[i] = (beta[i] + betamin) / 2;
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i]);
Hdiff = H - logU;
tries = tries + 1;
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;
# Return final P-matrix
print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta))
return P;
def pca(X = Math.array([]), no_dims = 50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print "Preprocessing the data using PCA..."
(n, d) = X.shape;
X = X - Math.tile(Math.mean(X, 0), (n, 1));
(l, M) = Math.linalg.eig(Math.dot(X.T, X));
Y = Math.dot(X, M[:,0:no_dims]);
return Y;
def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if X.dtype != "float64":
print "Error: array X should have type float64.";
return -1;
#if no_dims.__class__ != "<type 'int'>": # doesn't work yet!
# print "Error: number of dimensions should be an integer.";
# return -1;
# Initialize variables
X = pca(X, initial_dims).real;
(n, d) = X.shape;
max_iter = 50;
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
Y = Math.random.randn(n, no_dims);
dY = Math.zeros((n, no_dims));
iY = Math.zeros((n, no_dims));
gains = Math.ones((n, no_dims));
# Compute P-values
P = x2p(X, 1e-5, perplexity);
P = P + Math.transpose(P);
P = P / Math.sum(P);
P = P * 4; # early exaggeration
P = Math.maximum(P, 1e-12);
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1);
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / Math.sum(num);
Q = Math.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = Math.sum(P * Math.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# Return solution
return Y;
if __name__ == "__main__":
print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
print "Running example on 2,500 MNIST digits..."
reload(sys);
sys.setdefaultencoding('utf8');
sys.getdefaultencoding();
X = Math.loadtxt("d4500.txt");
#labels = Math.loadtxt("labels.txt");
text_file = open("lable-4500.txt", "r")
labels = text_file.readlines()
Y = tsne(X, 2, 50, 20.0);
#Plot.scatter(Y[:,0], Y[:,1], 20, labels)
Plot.scatter(
Y[:, 0], Y[:, 1], marker = 'o', c = Y[:, 1],
cmap = Plot.get_cmap('Spectral'))
for label, x, y in zip(labels, Y[:, 0], Y[:, 1]):
Plot.annotate(label, xy = (x, y), xytext = (-20, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
Plot.show()
| masoodking/LinkPrediction | SME/tsne_python/tsne.py | Python | bsd-3-clause | 5,844 | [
"Gaussian"
] | 51a5789f55a63de5c983f626ea6162c1b83382b3091d9289ae00fcb31bd4dd10 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging as base_logging
# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow
from taskflow.listeners import base as base_listener
from taskflow import states
from taskflow import task
from taskflow.utils import misc
from cinder.i18n import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SENSITIVE_ITEM = ['image_location']
def safe_print_image_location(source):
if type(dict()) == type(source):
if SENSITIVE_ITEM[0] in source.keys():
if len(source[SENSITIVE_ITEM[0]]) >= 1 and source[SENSITIVE_ITEM[0]]:
try:
temp_source = list(source[SENSITIVE_ITEM[0]])
temp_source[0] = "{SANITIZED}"
source[SENSITIVE_ITEM[0]] = tuple(temp_source)
except Exception:
return ""
return source
elif type(tuple()) == type(source):
target = list(source)
if target and len(target) >= 1:
target[0] = "{SANITIZED}"
return tuple(target)
elif type(list()) == type(source):
target = list(source)
if target and len(target) >= 1:
target[0] = "{SANITIZED}"
return target
def _make_task_name(cls, addons=None):
"""Makes a pretty name for a task class."""
base_name = ".".join([cls.__module__, cls.__name__])
extra = ''
if addons:
extra = ';%s' % (", ".join([str(a) for a in addons]))
return base_name + extra
class CinderTask(task.Task):
"""The root task class for all cinder tasks.
It automatically names the given task using the module and class that
implement the given task as the task name.
"""
def __init__(self, addons=None, **kwargs):
super(CinderTask, self).__init__(_make_task_name(self.__class__,
addons),
**kwargs)
class DynamicLogListener(base_listener.ListenerBase):
"""This is used to attach to taskflow engines while they are running.
It provides a bunch of useful features that expose the actions happening
inside a taskflow engine, which can be useful for developers for debugging,
for operations folks for monitoring and tracking of the resource actions
and more...
"""
def __init__(self, engine,
task_listen_for=(misc.Notifier.ANY,),
flow_listen_for=(misc.Notifier.ANY,),
logger=None):
super(DynamicLogListener, self).__init__(
engine,
task_listen_for=task_listen_for,
flow_listen_for=flow_listen_for)
if logger is None:
self._logger = LOG
else:
self._logger = logger
def _flow_receiver(self, state, details):
# Gets called on flow state changes.
level = base_logging.DEBUG
if state in (states.FAILURE, states.REVERTED):
level = base_logging.WARNING
self._logger.log(level,
_("Flow '%(flow_name)s' (%(flow_uuid)s) transitioned"
" into state '%(state)s' from state"
" '%(old_state)s'") %
{'flow_name': details['flow_name'],
'flow_uuid': details['flow_uuid'],
'state': state,
'old_state': details.get('old_state')})
def _task_receiver(self, state, details):
# Gets called on task state changes.
if 'result' in details and state in base_listener.FINISH_STATES:
# If the task failed, it's useful to show the exception traceback
# and any other available exception information.
result = details.get('result')
if isinstance(result, misc.Failure):
self._logger.warn(_("Task '%(task_name)s' (%(task_uuid)s)"
" transitioned into state '%(state)s'") %
{'task_name': details['task_name'],
'task_uuid': details['task_uuid'],
'state': state},
exc_info=tuple(result.exc_info))
else:
# Otherwise, depending on the enabled logging level/state we
# will show or hide results that the task may have produced
# during execution.
level = base_logging.DEBUG
if state == states.FAILURE:
level = base_logging.WARNING
if (self._logger.isEnabledFor(base_logging.DEBUG) or
state == states.FAILURE):
self._logger.log(level,
_("Task '%(task_name)s' (%(task_uuid)s)"
" transitioned into state '%(state)s'"
" with result '%(result)s'") %
{'task_name': details['task_name'],
'task_uuid': details['task_uuid'],
'state': state, 'result': safe_print_image_location(result)})
else:
self._logger.log(level,
_("Task '%(task_name)s' (%(task_uuid)s)"
" transitioned into state"
" '%(state)s'") %
{'task_name': details['task_name'],
'task_uuid': details['task_uuid'],
'state': state})
else:
level = base_logging.DEBUG
if state in (states.REVERTING, states.RETRYING):
level = base_logging.WARNING
self._logger.log(level,
_("Task '%(task_name)s' (%(task_uuid)s)"
" transitioned into state '%(state)s'") %
{'task_name': details['task_name'],
'task_uuid': details['task_uuid'],
'state': state})
| hybrid-storage-dev/cinder-fs-111t-hybrid-cherry | flow_utils.py | Python | apache-2.0 | 6,804 | [
"VisIt"
] | 57430bc54f7485be3821c520a518ebf54acf8e5b70afa2f8fe660f425c0116b2 |
"""
Utility functions for transcripts.
++++++++++++++++++++++++++++++++++
"""
from functools import wraps
from django.conf import settings
import os
import copy
import json
import requests
import logging
from pysrt import SubRipTime, SubRipItem, SubRipFile
from pysrt.srtexc import Error
from lxml import etree
from opaque_keys.edx.locator import BlockUsageLocator
from HTMLParser import HTMLParser
from six import text_type
from xmodule.modulestore.django import modulestore
from xmodule.exceptions import NotFoundError
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from .bumper_utils import get_bumper_settings
try:
from edxval import api as edxval_api
except ImportError:
edxval_api = None
log = logging.getLogger(__name__)
NON_EXISTENT_TRANSCRIPT = 'non_existent_dummy_file_name'
class TranscriptException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsGenerationException(Exception): # pylint: disable=missing-docstring
pass
class GetTranscriptsFromYouTubeException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsRequestValidationException(Exception): # pylint: disable=missing-docstring
pass
def exception_decorator(func):
"""
Generate NotFoundError for TranscriptsGenerationException, UnicodeDecodeError.
Args:
`func`: Input function
Returns:
'wrapper': Decorated function
"""
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except (TranscriptsGenerationException, UnicodeDecodeError) as ex:
log.exception(text_type(ex))
raise NotFoundError
return wrapper
def generate_subs(speed, source_speed, source_subs):
"""
Generate transcripts from one speed to another speed.
Args:
`speed`: float, for this speed subtitles will be generated,
`source_speed`: float, speed of source_subs
`source_subs`: dict, existing subtitles for speed `source_speed`.
Returns:
`subs`: dict, actual subtitles.
"""
if speed == source_speed:
return source_subs
coefficient = 1.0 * speed / source_speed
subs = {
'start': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['start']
],
'end': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['end']
],
'text': source_subs['text']}
return subs
def save_to_store(content, name, mime_type, location):
"""
Save named content to store by location.
Returns location of saved content.
"""
content_location = Transcript.asset_location(location, name)
content = StaticContent(content_location, name, mime_type, content)
contentstore().save(content)
return content_location
def save_subs_to_store(subs, subs_id, item, language='en'):
"""
Save transcripts into `StaticContent`.
Args:
`subs_id`: str, subtitles id
`item`: video module instance
`language`: two chars str ('uk'), language of translation of transcripts
Returns: location of saved subtitles.
"""
filedata = json.dumps(subs, indent=2)
filename = subs_filename(subs_id, language)
return save_to_store(filedata, filename, 'application/json', item.location)
def youtube_video_transcript_name(youtube_text_api):
"""
Get the transcript name from available transcripts of video
with respect to language from youtube server
"""
utf8_parser = etree.XMLParser(encoding='utf-8')
transcripts_param = {'type': 'list', 'v': youtube_text_api['params']['v']}
lang = youtube_text_api['params']['lang']
# get list of transcripts of specific video
# url-form
# http://video.google.com/timedtext?type=list&v={VideoId}
youtube_response = requests.get('http://' + youtube_text_api['url'], params=transcripts_param)
if youtube_response.status_code == 200 and youtube_response.text:
youtube_data = etree.fromstring(youtube_response.content, parser=utf8_parser)
# iterate all transcripts information from youtube server
for element in youtube_data:
# search specific language code such as 'en' in transcripts info list
if element.tag == 'track' and element.get('lang_code', '') == lang:
return element.get('name')
return None
def get_transcripts_from_youtube(youtube_id, settings, i18n, youtube_transcript_name=''):
"""
Gets transcripts from youtube for youtube_id.
Parses only utf-8 encoded transcripts.
Other encodings are not supported at the moment.
Returns (status, transcripts): bool, dict.
"""
_ = i18n.ugettext
utf8_parser = etree.XMLParser(encoding='utf-8')
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
# if the transcript name is not empty on youtube server we have to pass
# name param in url in order to get transcript
# example http://video.google.com/timedtext?lang=en&v={VideoId}&name={transcript_name}
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
data = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if data.status_code != 200 or not data.text:
msg = _("Can't receive transcripts from Youtube for {youtube_id}. Status code: {status_code}.").format(
youtube_id=youtube_id,
status_code=data.status_code
)
raise GetTranscriptsFromYouTubeException(msg)
sub_starts, sub_ends, sub_texts = [], [], []
xmltree = etree.fromstring(data.content, parser=utf8_parser)
for element in xmltree:
if element.tag == "text":
start = float(element.get("start"))
duration = float(element.get("dur", 0)) # dur is not mandatory
text = element.text
end = start + duration
if text:
# Start and end should be ints representing the millisecond timestamp.
sub_starts.append(int(start * 1000))
sub_ends.append(int((end + 0.0001) * 1000))
sub_texts.append(text.replace('\n', ' '))
return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}
def download_youtube_subs(youtube_id, video_descriptor, settings):
"""
Download transcripts from Youtube.
Args:
youtube_id: str, actual youtube_id of the video.
video_descriptor: video descriptor instance.
We save transcripts for 1.0 speed, as for other speed conversion is done on front-end.
Returns:
Serialized sjson transcript content, if transcripts were successfully downloaded and saved.
Raises:
GetTranscriptsFromYouTubeException, if fails.
"""
i18n = video_descriptor.runtime.service(video_descriptor, "i18n")
_ = i18n.ugettext
subs = get_transcripts_from_youtube(youtube_id, settings, i18n)
return json.dumps(subs, indent=2)
def remove_subs_from_store(subs_id, item, lang='en'):
"""
Remove from store, if transcripts content exists.
"""
filename = subs_filename(subs_id, lang)
Transcript.delete_asset(item.location, filename)
def generate_subs_from_source(speed_subs, subs_type, subs_filedata, item, language='en'):
"""Generate transcripts from source files (like SubRip format, etc.)
and save them to assets for `item` module.
We expect, that speed of source subs equal to 1
:param speed_subs: dictionary {speed: sub_id, ...}
:param subs_type: type of source subs: "srt", ...
:param subs_filedata:unicode, content of source subs.
:param item: module object.
:param language: str, language of translation of transcripts
:returns: True, if all subs are generated and saved successfully.
"""
_ = item.runtime.service(item, "i18n").ugettext
if subs_type.lower() != 'srt':
raise TranscriptsGenerationException(_("We support only SubRip (*.srt) transcripts format."))
try:
srt_subs_obj = SubRipFile.from_string(subs_filedata)
except Exception as ex:
msg = _("Something wrong with SubRip transcripts file during parsing. Inner message is {error_message}").format(
error_message=text_type(ex)
)
raise TranscriptsGenerationException(msg)
if not srt_subs_obj:
raise TranscriptsGenerationException(_("Something wrong with SubRip transcripts file during parsing."))
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs_obj:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts}
for speed, subs_id in speed_subs.iteritems():
save_subs_to_store(
generate_subs(speed, 1, subs),
subs_id,
item,
language
)
return subs
def generate_srt_from_sjson(sjson_subs, speed):
"""Generate transcripts with speed = 1.0 from sjson to SubRip (*.srt).
:param sjson_subs: "sjson" subs.
:param speed: speed of `sjson_subs`.
:returns: "srt" subs.
"""
output = ''
equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])
if not equal_len:
return output
sjson_speed_1 = generate_subs(speed, 1, sjson_subs)
for i in range(len(sjson_speed_1['start'])):
item = SubRipItem(
index=i,
start=SubRipTime(milliseconds=sjson_speed_1['start'][i]),
end=SubRipTime(milliseconds=sjson_speed_1['end'][i]),
text=sjson_speed_1['text'][i]
)
output += (unicode(item))
output += '\n'
return output
def generate_sjson_from_srt(srt_subs):
"""
Generate transcripts from sjson to SubRip (*.srt).
Arguments:
srt_subs(SubRip): "SRT" subs object
Returns:
Subs converted to "SJSON" format.
"""
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
sjson_subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts
}
return sjson_subs
def copy_or_rename_transcript(new_name, old_name, item, delete_old=False, user=None):
"""
Renames `old_name` transcript file in storage to `new_name`.
If `old_name` is not found in storage, raises `NotFoundError`.
If `delete_old` is True, removes `old_name` files from storage.
"""
filename = u'subs_{0}.srt.sjson'.format(old_name)
content_location = StaticContent.compute_location(item.location.course_key, filename)
transcripts = contentstore().find(content_location).data
save_subs_to_store(json.loads(transcripts), new_name, item)
item.sub = new_name
item.save_with_metadata(user)
if delete_old:
remove_subs_from_store(old_name, item)
def get_html5_ids(html5_sources):
"""
Helper method to parse out an HTML5 source into the ideas
NOTE: This assumes that '/' are not in the filename
"""
html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources]
return html5_ids
def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):
"""
Does some specific things, that can be done only on save.
Video player item has some video fields: HTML5 ones and Youtube one.
If value of `sub` field of `new_item` is cleared, transcripts should be removed.
`item` is video module instance with updated values of fields,
but actually have not been saved to store yet.
`old_metadata` contains old values of XFields.
# 1.
If value of `sub` field of `new_item` is different from values of video fields of `new_item`,
and `new_item.sub` file is present, then code in this function creates copies of
`new_item.sub` file with new names. That names are equal to values of video fields of `new_item`
After that `sub` field of `new_item` is changed to one of values of video fields.
This whole action ensures that after user changes video fields, proper `sub` files, corresponding
to new values of video fields, will be presented in system.
# 2. convert /static/filename.srt to filename.srt in self.transcripts.
(it is done to allow user to enter both /static/filename.srt and filename.srt)
# 3. Generate transcripts translation only when user clicks `save` button, not while switching tabs.
a) delete sjson translation for those languages, which were removed from `item.transcripts`.
Note: we are not deleting old SRT files to give user more flexibility.
b) For all SRT files in`item.transcripts` regenerate new SJSON files.
(To avoid confusing situation if you attempt to correct a translation by uploading
a new version of the SRT file with same name).
"""
_ = item.runtime.service(item, "i18n").ugettext
# # 1.
# html5_ids = get_html5_ids(item.html5_sources)
# # Youtube transcript source should always have a higher priority than html5 sources. Appending
# # `youtube_id_1_0` at the end helps achieve this when we read transcripts list.
# possible_video_id_list = html5_ids + [item.youtube_id_1_0]
# sub_name = item.sub
# for video_id in possible_video_id_list:
# if not video_id:
# continue
# if not sub_name:
# remove_subs_from_store(video_id, item)
# continue
# # copy_or_rename_transcript changes item.sub of module
# try:
# # updates item.sub with `video_id`, if it is successful.
# copy_or_rename_transcript(video_id, sub_name, item, user=user)
# except NotFoundError:
# # subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.
# log.debug(
# "Copying %s file content to %s name is failed, "
# "original file does not exist.",
# sub_name, video_id
# )
# 2.
if generate_translation:
for lang, filename in item.transcripts.items():
item.transcripts[lang] = os.path.split(filename)[-1]
# 3.
if generate_translation:
old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()
new_langs = set(item.transcripts)
html5_ids = get_html5_ids(item.html5_sources)
possible_video_id_list = html5_ids + [item.youtube_id_1_0]
for lang in old_langs.difference(new_langs): # 3a
for video_id in possible_video_id_list:
if video_id:
remove_subs_from_store(video_id, item, lang)
reraised_message = ''
for lang in new_langs: # 3b
try:
generate_sjson_for_all_speeds(
item,
item.transcripts[lang],
{speed: subs_id for subs_id, speed in youtube_speed_dict(item).iteritems()},
lang,
)
except TranscriptException as ex:
pass
if reraised_message:
item.save_with_metadata(user)
raise TranscriptException(reraised_message)
def youtube_speed_dict(item):
"""
Returns {speed: youtube_ids, ...} dict for existing youtube_ids
"""
yt_ids = [item.youtube_id_0_75, item.youtube_id_1_0, item.youtube_id_1_25, item.youtube_id_1_5]
yt_speeds = [0.75, 1.00, 1.25, 1.50]
youtube_ids = {p[0]: p[1] for p in zip(yt_ids, yt_speeds) if p[0]}
return youtube_ids
def subs_filename(subs_id, lang='en'):
"""
Generate proper filename for storage.
"""
if lang == 'en':
return u'subs_{0}.srt.sjson'.format(subs_id)
else:
return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)
def generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, lang):
"""
Generates sjson from srt for given lang.
`item` is module object.
"""
_ = item.runtime.service(item, "i18n").ugettext
try:
srt_transcripts = contentstore().find(Transcript.asset_location(item.location, user_filename))
except NotFoundError as ex:
raise TranscriptException(_("{exception_message}: Can't find uploaded transcripts: {user_filename}").format(
exception_message=text_type(ex),
user_filename=user_filename
))
if not lang:
lang = item.transcript_language
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
generate_subs_from_source(
result_subs_dict,
os.path.splitext(user_filename)[1][1:],
srt_transcripts.data.decode('utf-8-sig'),
item,
lang
)
def get_or_create_sjson(item, transcripts):
"""
Get sjson if already exists, otherwise generate it.
Generate sjson with subs_id name, from user uploaded srt.
Subs_id is extracted from srt filename, which was set by user.
Args:
transcipts (dict): dictionary of (language: file) pairs.
Raises:
TranscriptException: when srt subtitles do not exist,
and exceptions from generate_subs_from_source.
`item` is module object.
"""
user_filename = transcripts[item.transcript_language]
user_subs_id = os.path.splitext(user_filename)[0]
source_subs_id, result_subs_dict = user_subs_id, {1.0: user_subs_id}
try:
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
except NotFoundError: # generating sjson from srt
generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, item.transcript_language)
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
return sjson_transcript
def get_video_ids_info(edx_video_id, youtube_id_1_0, html5_sources):
"""
Returns list internal or external video ids.
Arguments:
edx_video_id (unicode): edx_video_id
youtube_id_1_0 (unicode): youtube id
html5_sources (list): html5 video ids
Returns:
tuple: external or internal, video ids list
"""
clean = lambda item: item.strip() if isinstance(item, basestring) else item
external = not bool(clean(edx_video_id))
video_ids = [edx_video_id, youtube_id_1_0] + get_html5_ids(html5_sources)
# video_ids cleanup
video_ids = filter(lambda item: bool(clean(item)), video_ids)
return external, video_ids
def clean_video_id(edx_video_id):
"""
Cleans an edx video ID.
Arguments:
edx_video_id(unicode): edx-val's video identifier
"""
return edx_video_id and edx_video_id.strip()
def get_video_transcript_content(edx_video_id, language_code):
"""
Gets video transcript content, only if the corresponding feature flag is enabled for the given `course_id`.
Arguments:
language_code(unicode): Language code of the requested transcript
edx_video_id(unicode): edx-val's video identifier
Returns:
A dict containing transcript's file name and its sjson content.
"""
transcript = None
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
transcript = edxval_api.get_video_transcript_data(edx_video_id, language_code)
return transcript
def get_available_transcript_languages(edx_video_id):
"""
Gets available transcript languages for a video.
Arguments:
edx_video_id(unicode): edx-val's video identifier
Returns:
A list containing distinct transcript language codes against all the passed video ids.
"""
available_languages = []
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
available_languages = edxval_api.get_available_transcript_languages(video_id=edx_video_id)
return available_languages
def convert_video_transcript(file_name, content, output_format):
"""
Convert video transcript into desired format
Arguments:
file_name: name of transcript file along with its extension
content: transcript content stream
output_format: the format in which transcript will be converted
Returns:
A dict containing the new transcript filename and the content converted into desired format.
"""
name_and_extension = os.path.splitext(file_name)
basename, input_format = name_and_extension[0], name_and_extension[1][1:]
filename = u'{base_name}.{ext}'.format(base_name=basename, ext=output_format)
converted_transcript = Transcript.convert(content, input_format=input_format, output_format=output_format)
return dict(filename=filename, content=converted_transcript)
class Transcript(object):
"""
Container for transcript methods.
"""
SRT = 'srt'
TXT = 'txt'
SJSON = 'sjson'
mime_types = {
SRT: 'application/x-subrip; charset=utf-8',
TXT: 'text/plain; charset=utf-8',
SJSON: 'application/json',
}
@staticmethod
def convert(content, input_format, output_format):
"""
Convert transcript `content` from `input_format` to `output_format`.
Accepted input formats: sjson, srt.
Accepted output format: srt, txt, sjson.
Raises:
TranscriptsGenerationException: On parsing the invalid srt content during conversion from srt to sjson.
"""
assert input_format in ('srt', 'sjson')
assert output_format in ('txt', 'srt', 'sjson')
if input_format == output_format:
return content
if input_format == 'srt':
if output_format == 'txt':
text = SubRipFile.from_string(content.decode('utf8')).text
return HTMLParser().unescape(text)
elif output_format == 'sjson':
try:
# With error handling (set to 'ERROR_RAISE'), we will be getting
# the exception if something went wrong in parsing the transcript.
srt_subs = SubRipFile.from_string(
# Skip byte order mark(BOM) character
content.decode('utf-8-sig'),
error_handling=SubRipFile.ERROR_RAISE
)
except Error as ex: # Base exception from pysrt
raise TranscriptsGenerationException(text_type(ex))
return json.dumps(generate_sjson_from_srt(srt_subs))
if input_format == 'sjson':
if output_format == 'txt':
text = json.loads(content)['text']
return HTMLParser().unescape("\n".join(text))
elif output_format == 'srt':
return generate_srt_from_sjson(json.loads(content), speed=1.0)
@staticmethod
def asset(location, subs_id, lang='en', filename=None):
"""
Get asset from contentstore, asset location is built from subs_id and lang.
`location` is module location.
"""
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val. It will be saving a contentstore hit for a hardcoded
# dummy-non-existent-transcript name.
if NON_EXISTENT_TRANSCRIPT in [subs_id, filename]:
raise NotFoundError
asset_filename = subs_filename(subs_id, lang) if not filename else filename
return Transcript.get_asset(location, asset_filename)
@staticmethod
def get_asset(location, filename):
"""
Return asset by location and filename.
"""
return contentstore().find(Transcript.asset_location(location, filename))
@staticmethod
def asset_location(location, filename):
"""
Return asset location. `location` is module location.
"""
# If user transcript filename is empty, raise `TranscriptException` to avoid `InvalidKeyError`.
if not filename:
raise TranscriptException("Transcript not uploaded yet")
return StaticContent.compute_location(location.course_key, filename)
@staticmethod
def delete_asset(location, filename):
"""
Delete asset by location and filename.
"""
try:
contentstore().delete(Transcript.asset_location(location, filename))
log.info("Transcript asset %s was removed from store.", filename)
except NotFoundError:
pass
return StaticContent.compute_location(location.course_key, filename)
class VideoTranscriptsMixin(object):
"""Mixin class for transcript functionality.
This is necessary for both VideoModule and VideoDescriptor.
"""
def available_translations(self, transcripts, verify_assets=None, is_bumper=False):
"""
Return a list of language codes for which we have transcripts.
Arguments:
verify_assets (boolean): If True, checks to ensure that the transcripts
really exist in the contentstore. If False, we just look at the
VideoDescriptor fields and do not query the contentstore. One reason
we might do this is to avoid slamming contentstore() with queries
when trying to make a listing of videos and their languages.
Defaults to `not FALLBACK_TO_ENGLISH_TRANSCRIPTS`.
transcripts (dict): A dict with all transcripts and a sub.
include_val_transcripts(boolean): If True, adds the edx-val transcript languages as well.
"""
translations = []
if verify_assets is None:
verify_assets = not settings.FEATURES.get('FALLBACK_TO_ENGLISH_TRANSCRIPTS')
sub, other_langs = transcripts["sub"], transcripts["transcripts"]
if verify_assets:
all_langs = dict(**other_langs)
if sub:
all_langs.update({'en': sub})
for language, filename in all_langs.iteritems():
try:
# for bumper videos, transcripts are stored in content store only
if is_bumper:
get_transcript_for_video(self.location, filename, filename, language)
else:
get_transcript(self, language)
except NotFoundError:
continue
translations.append(language)
else:
# If we're not verifying the assets, we just trust our field values
translations = list(other_langs)
if not translations or sub:
translations += ['en']
# to clean redundant language codes.
return list(set(translations))
def get_transcript(self, transcripts, transcript_format='srt', lang=None):
"""
Returns transcript, filename and MIME type.
transcripts (dict): A dict with all transcripts and a sub.
Raises:
- NotFoundError if cannot find transcript file in storage.
- ValueError if transcript file is empty or incorrect JSON.
- KeyError if transcript file has incorrect format.
If language is 'en', self.sub should be correct subtitles name.
If language is 'en', but if self.sub is not defined, this means that we
should search for video name in order to get proper transcript (old style courses).
If language is not 'en', give back transcript in proper language and format.
"""
if not lang:
lang = self.get_default_transcript_language(transcripts)
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if lang == 'en':
if sub: # HTML5 case and (Youtube case for new style videos)
transcript_name = sub
elif self.youtube_id_1_0: # old courses
transcript_name = self.youtube_id_1_0
else:
log.debug("No subtitles for 'en' language")
raise ValueError
data = Transcript.asset(self.location, transcript_name, lang).data
filename = u'{}.{}'.format(transcript_name, transcript_format)
content = Transcript.convert(data, 'sjson', transcript_format)
else:
data = Transcript.asset(self.location, None, None, other_lang[lang]).data
filename = u'{}.{}'.format(os.path.splitext(other_lang[lang])[0], transcript_format)
content = Transcript.convert(data, 'srt', transcript_format)
if not content:
log.debug('no subtitles produced in get_transcript')
raise ValueError
return content, filename, Transcript.mime_types[transcript_format]
def get_default_transcript_language(self, transcripts):
"""
Returns the default transcript language for this video module.
Args:
transcripts (dict): A dict with all transcripts and a sub.
"""
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.transcript_language in other_lang:
transcript_language = self.transcript_language
elif sub:
transcript_language = u'en'
elif len(other_lang) > 0:
transcript_language = sorted(other_lang)[0]
else:
transcript_language = u'en'
return transcript_language
def get_transcripts_info(self, is_bumper=False):
"""
Returns a transcript dictionary for the video.
Arguments:
is_bumper(bool): If True, the request is for the bumper transcripts
include_val_transcripts(bool): If True, include edx-val transcripts as well
"""
if is_bumper:
transcripts = copy.deepcopy(get_bumper_settings(self).get('transcripts', {}))
sub = transcripts.pop("en", "")
else:
transcripts = self.transcripts if self.transcripts else {}
sub = self.sub
# Only attach transcripts that are not empty.
transcripts = {
language_code: transcript_file
for language_code, transcript_file in transcripts.items() if transcript_file != ''
}
# bumper transcripts are stored in content store so we don't need to include val transcripts
if not is_bumper:
transcript_languages = get_available_transcript_languages(edx_video_id=self.edx_video_id)
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val.
for language_code in transcript_languages:
if language_code == 'en' and not sub:
sub = NON_EXISTENT_TRANSCRIPT
elif not transcripts.get(language_code):
transcripts[language_code] = NON_EXISTENT_TRANSCRIPT
return {
"sub": sub,
"transcripts": transcripts,
}
@exception_decorator
def get_transcript_from_val(edx_video_id, lang=None, output_format=Transcript.SRT):
"""
Get video transcript from edx-val.
Arguments:
edx_video_id (unicode): video identifier
lang (unicode): transcript language
output_format (unicode): transcript output format
Returns:
tuple containing content, filename, mimetype
"""
transcript = get_video_transcript_content(edx_video_id, lang)
if not transcript:
raise NotFoundError(u'Transcript not found for {}, lang: {}'.format(edx_video_id, lang))
transcript_conversion_props = dict(transcript, output_format=output_format)
transcript = convert_video_transcript(**transcript_conversion_props)
filename = transcript['filename']
content = transcript['content']
mimetype = Transcript.mime_types[output_format]
return content, filename, mimetype
def get_transcript_for_video(video_location, subs_id, file_name, language):
"""
Get video transcript from content store.
NOTE: Transcripts can be searched from content store by two ways:
1. by an id(a.k.a subs_id) which will be used to construct transcript filename
2. by providing transcript filename
Arguments:
video_location (Locator): Video location
subs_id (unicode): id for a transcript in content store
file_name (unicode): file_name for a transcript in content store
language (unicode): transcript language
Returns:
tuple containing transcript input_format, basename, content
"""
try:
if subs_id is None:
raise NotFoundError
content = Transcript.asset(video_location, subs_id, language).data
base_name = subs_id
input_format = Transcript.SJSON
except NotFoundError:
content = Transcript.asset(video_location, None, language, file_name).data
base_name = os.path.splitext(file_name)[0]
input_format = Transcript.SRT
return input_format, base_name, content
@exception_decorator
def get_transcript_from_contentstore(video, language, output_format, transcripts_info, youtube_id=None):
"""
Get video transcript from content store.
Arguments:
video (Video Descriptor): Video descriptor
language (unicode): transcript language
output_format (unicode): transcript output format
transcripts_info (dict): transcript info for a video
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
input_format, base_name, transcript_content = None, None, None
if output_format not in (Transcript.SRT, Transcript.SJSON, Transcript.TXT):
raise NotFoundError('Invalid transcript format `{output_format}`'.format(output_format=output_format))
sub, other_languages = transcripts_info['sub'], transcripts_info['transcripts']
transcripts = dict(other_languages)
# this is sent in case of a translation dispatch and we need to use it as our subs_id.
possible_sub_ids = [youtube_id, sub, video.youtube_id_1_0] + get_html5_ids(video.html5_sources)
for sub_id in possible_sub_ids:
try:
transcripts[u'en'] = sub_id
input_format, base_name, transcript_content = get_transcript_for_video(
video.location,
subs_id=sub_id,
file_name=transcripts[language],
language=language
)
break
except (KeyError, NotFoundError):
continue
if transcript_content is None:
raise NotFoundError('No transcript for `{lang}` language'.format(
lang=language
))
# add language prefix to transcript file only if language is not None
language_prefix = '{}_'.format(language) if language else ''
transcript_name = u'{}{}.{}'.format(language_prefix, base_name, output_format)
transcript_content = Transcript.convert(transcript_content, input_format=input_format, output_format=output_format)
if not transcript_content.strip():
raise NotFoundError('No transcript content')
if youtube_id:
youtube_ids = youtube_speed_dict(video)
transcript_content = json.dumps(
generate_subs(youtube_ids.get(youtube_id, 1), 1, json.loads(transcript_content))
)
return transcript_content, transcript_name, Transcript.mime_types[output_format]
def get_transcript(video, lang=None, output_format=Transcript.SRT, youtube_id=None):
"""
Get video transcript from edx-val or content store.
Arguments:
video (Video Descriptor): Video Descriptor
lang (unicode): transcript language
output_format (unicode): transcript output format
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
transcripts_info = video.get_transcripts_info()
if not lang:
lang = video.get_default_transcript_language(transcripts_info)
try:
edx_video_id = clean_video_id(video.edx_video_id)
if not edx_video_id:
raise NotFoundError
return get_transcript_from_val(edx_video_id, lang, output_format)
except NotFoundError:
return get_transcript_from_contentstore(
video,
lang,
youtube_id=youtube_id,
output_format=output_format,
transcripts_info=transcripts_info
)
| ahmedaljazzar/edx-platform | common/lib/xmodule/xmodule/video_module/transcripts_utils.py | Python | agpl-3.0 | 36,950 | [
"FEFF"
] | 4b156d35bef1964ee52faf86eb2347b7b3b3ad41c807cd211447cb5095d60fd6 |
# coding=utf-8
import os
from setuptools import setup
from sys import version_info
if version_info.major != 3 or version_info.minor < 5:
raise RuntimeError('friction requires python 3.5 or newer')
setup(
name='friction',
version='1.0.0',
description=(
'a browser-based gallery viewer tailored for viewing large '
'collections of pornographic manga'
),
long_description=(
'please visit the homepage: https://github.com/tinruufu/friction'
),
long_description_content_type='text/markdown',
url='https://github.com/tinruufu/friction',
author='ティン・ルーフ',
author_email='tinruufu+pypi@gmail.com',
packages=['friction'],
scripts=[
os.path.join('scripts', 'friction'),
os.path.join('scripts', 'friction-ui'),
],
app=[
os.path.join('scripts', 'friction-ui'),
],
license='MIT',
platforms=['any'],
install_requires=[
'flask>=1.1,<1.2',
'pillow',
'python-magic',
'rarfile',
],
setup_requires=[
'py2app',
],
classifiers=[
'Intended Audience :: End Users/Desktop',
'Topic :: Multimedia :: Graphics :: Viewers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
],
include_package_data=True,
options={
'py2app': {
'iconfile': 'scripts/icon.icns',
'plist': {
'NSHumanReadableCopyright': '©2016 ティン・ルーフ',
},
}
},
)
| tinruufu/friction | setup.py | Python | mit | 1,563 | [
"VisIt"
] | 8b1e4c973746c57db247aa720f58ef581ed2f6302a58addfaec1bd7116b6599c |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import cc
from pyscf import lib
from pyscf import ao2mo
from pyscf.cc import gccsd
from pyscf.cc import uccsd
from pyscf.cc import eom_uccsd
from pyscf.cc import eom_gccsd
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 0
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-12)
mol1 = mol.copy()
mol1.spin = 2
mol1.build()
mf0 = scf.UHF(mol1).run(conv_tol=1e-12)
mf1 = copy.copy(mf0)
nocca, noccb = mol1.nelec
nmo = mol1.nao_nr()
nvira, nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(12)
mf1.mo_coeff = numpy.random.random((2,nmo,nmo)) - .5
gmf = scf.addons.convert_to_ghf(mf1)
orbspin = gmf.mo_coeff.orbspin
ucc1 = cc.UCCSD(mf1)
eris1 = ucc1.ao2mo()
numpy.random.seed(11)
no = nocca + noccb
nv = nvira + nvirb
r1 = numpy.random.random((no,nv)) - .9
r2 = numpy.random.random((no,no,nv,nv)) - .9
r2 = r2 - r2.transpose(1,0,2,3)
r2 = r2 - r2.transpose(0,1,3,2)
r1 = cc.addons.spin2spatial(r1, orbspin)
r2 = cc.addons.spin2spatial(r2, orbspin)
r1,r2 = eom_uccsd.vector_to_amplitudes_ee(
eom_uccsd.amplitudes_to_vector_ee(r1,r2), ucc1.nmo, ucc1.nocc)
ucc1.t1 = r1
ucc1.t2 = r2
ucc = cc.UCCSD(mf)
ucc.max_space = 0
ucc.conv_tol = 1e-8
ecc = ucc.kernel()[0]
ucc0 = cc.UCCSD(mf0)
ucc0.conv_tol = 1e-8
ucc0.direct = True
ucc0.kernel()
def tearDownModule():
global mol, mf, mol1, mf0, mf1, gmf, ucc, ucc0, ucc1, eris1
del mol, mf, mol1, mf0, mf1, gmf, ucc, ucc0, ucc1, eris1
class KnownValues(unittest.TestCase):
def test_ipccsd(self):
eom = ucc.eomip_method()
e,v = eom.kernel(nroots=1, koopmans=False)
self.assertAlmostEqual(e, 0.42789083399175043, 6)
e,v = ucc.ipccsd(nroots=8)
self.assertAlmostEqual(e[0], 0.42789083399175043, 6)
self.assertAlmostEqual(e[2], 0.50226861340475437, 6)
self.assertAlmostEqual(e[4], 0.68550641152952585, 6)
e,v = ucc.ipccsd(nroots=4, guess=v[:4])
self.assertAlmostEqual(e[2], 0.50226861340475437, 6)
def test_ipccsd_koopmans(self):
e,v = ucc.ipccsd(nroots=8, koopmans=True)
self.assertAlmostEqual(e[0], 0.42789083399175043, 6)
self.assertAlmostEqual(e[2], 0.50226861340475437, 6)
self.assertAlmostEqual(e[4], 0.68550641152952585, 6)
def test_eaccsd(self):
eom = ucc.eomea_method()
e,v = eom.kernel(nroots=1, koopmans=False)
self.assertAlmostEqual(e, 0.19050592137699729, 6)
e,v = ucc.eaccsd(nroots=8)
self.assertAlmostEqual(e[0], 0.19050592137699729, 6)
self.assertAlmostEqual(e[2], 0.28345228891172214, 6)
self.assertAlmostEqual(e[4], 0.52280673926459342, 6)
e,v = ucc.eaccsd(nroots=4, guess=v[:4])
self.assertAlmostEqual(e[2], 0.28345228891172214, 6)
def test_eaccsd_koopmans(self):
e,v = ucc.eaccsd(nroots=6, koopmans=True)
self.assertAlmostEqual(e[0], 0.19050592137699729, 6)
self.assertAlmostEqual(e[2], 0.28345228891172214, 6)
self.assertAlmostEqual(e[4], 1.02136493172648370, 6)
gcc1 = gccsd.GCCSD(scf.addons.convert_to_ghf(mf)).run()
e1 = gcc1.eaccsd(nroots=6, koopmans=True)[0]
self.assertAlmostEqual(abs(e1-e).max(), 0, 6)
def test_eomee(self):
self.assertAlmostEqual(ecc, -0.13539788719099638, 6)
eom = ucc.eomee_method()
e,v = eom.kernel(nroots=1, koopmans=False)
self.assertAlmostEqual(e, 0.28114509667240556, 6)
e,v = ucc.eeccsd(nroots=4)
self.assertAlmostEqual(e[0], 0.28114509667240556, 6)
self.assertAlmostEqual(e[1], 0.28114509667240556, 6)
self.assertAlmostEqual(e[2], 0.28114509667240556, 6)
self.assertAlmostEqual(e[3], 0.30819728420902842, 6)
e,v = ucc.eeccsd(nroots=4, guess=v[:4])
self.assertAlmostEqual(e[3], 0.30819728420902842, 6)
def test_eomee_ccsd_spin_keep(self):
e, v = ucc.eomee_ccsd(nroots=2, koopmans=False)
self.assertAlmostEqual(e[0], 0.28114509667240556, 6)
self.assertAlmostEqual(e[1], 0.30819728420902842, 6)
e, v = ucc.eomee_ccsd(nroots=2, koopmans=True)
self.assertAlmostEqual(e[0], 0.28114509667240556, 6)
self.assertAlmostEqual(e[1], 0.30819728420902842, 6)
def test_eomsf_ccsd(self):
e, v = ucc.eomsf_ccsd(nroots=2, koopmans=False)
self.assertAlmostEqual(e[0], 0.28114509667240556, 6)
self.assertAlmostEqual(e[1], 0.28114509667240556, 6)
e, v = ucc.eomsf_ccsd(nroots=2, koopmans=True)
self.assertAlmostEqual(e[0], 0.28114509667240556, 6)
self.assertAlmostEqual(e[1], 0.28114509667240556, 6)
def test_ucc_update_amps(self):
gcc1 = gccsd.GCCSD(gmf)
r1g = gcc1.spatial2spin(ucc1.t1, orbspin)
r2g = gcc1.spatial2spin(ucc1.t2, orbspin)
r1g, r2g = gcc1.update_amps(r1g, r2g, gcc1.ao2mo())
u1g = gcc1.spin2spatial(r1g, orbspin)
u2g = gcc1.spin2spatial(r2g, orbspin)
t1, t2 = ucc1.update_amps(ucc1.t1, ucc1.t2, eris1)
self.assertAlmostEqual(abs(u1g[0]-t1[0]).max(), 0, 7)
self.assertAlmostEqual(abs(u1g[1]-t1[1]).max(), 0, 7)
self.assertAlmostEqual(abs(u2g[0]-t2[0]).max(), 0, 6)
self.assertAlmostEqual(abs(u2g[1]-t2[1]).max(), 0, 6)
self.assertAlmostEqual(abs(u2g[2]-t2[2]).max(), 0, 6)
self.assertAlmostEqual(float(abs(r1g-gcc1.spatial2spin(t1, orbspin)).max()), 0, 6)
self.assertAlmostEqual(float(abs(r2g-gcc1.spatial2spin(t2, orbspin)).max()), 0, 6)
self.assertAlmostEqual(uccsd.energy(ucc1, r1, r2, eris1), -7.2775115532675771, 8)
e0, t1, t2 = ucc1.init_amps(eris1)
self.assertAlmostEqual(lib.finger(cc.addons.spatial2spin(t1, orbspin)), 148.57054876656397, 8)
self.assertAlmostEqual(lib.finger(cc.addons.spatial2spin(t2, orbspin)),-349.94207953071475, 8)
self.assertAlmostEqual(e0, 30.640616265644827, 2)
def test_ucc_eomee_ccsd_matvec(self):
numpy.random.seed(10)
r1 = [numpy.random.random((nocca,nvira))-.9,
numpy.random.random((noccb,nvirb))-.9]
r2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
r2[0] = r2[0] - r2[0].transpose(1,0,2,3)
r2[0] = r2[0] - r2[0].transpose(0,1,3,2)
r2[2] = r2[2] - r2[2].transpose(1,0,2,3)
r2[2] = r2[2] - r2[2].transpose(0,1,3,2)
gcc1 = cc.addons.convert_to_gccsd(ucc1)
gr1 = gcc1.spatial2spin(r1)
gr2 = gcc1.spatial2spin(r2)
gee1 = eom_gccsd.EOMEE(gcc1)
gvec = eom_gccsd.amplitudes_to_vector_ee(gr1, gr2)
vecref = eom_gccsd.eeccsd_matvec(gee1, gvec)
vec = eom_uccsd.amplitudes_to_vector_ee(r1,r2)
uee1 = eom_uccsd.EOMEESpinKeep(ucc1)
vec1 = eom_uccsd.eomee_ccsd_matvec(uee1, vec)
uv = eom_uccsd.amplitudes_to_vector_ee(r1, r2)
gv = eom_gccsd.amplitudes_to_vector_ee(gr1, gr2)
r1, r2 = uee1.vector_to_amplitudes(uee1.matvec(uv))
gr1, gr2 = gee1.vector_to_amplitudes(gee1.matvec(gv))
r1, r2 = uee1.vector_to_amplitudes(vec1)
gr1, gr2 = gee1.vector_to_amplitudes(vecref)
self.assertAlmostEqual(float(abs(gr1-gcc1.spatial2spin(r1)).max()), 0, 9)
self.assertAlmostEqual(float(abs(gr2-gcc1.spatial2spin(r2)).max()), 0, 9)
self.assertAlmostEqual(lib.finger(vec1), 49.499911123484523, 9)
def test_ucc_eomee_ccsd_diag(self): # FIXME: compare to EOMEE-GCCSD diag
vec1, vec2 = eom_uccsd.EOMEE(ucc1).get_diag()
self.assertAlmostEqual(lib.finger(vec1), 62.767648620751018, 9)
self.assertAlmostEqual(lib.finger(vec2), 156.2976365433517, 9)
def test_ucc_eomee_init_guess(self):
uee = eom_uccsd.EOMEESpinKeep(ucc1)
diag = uee.get_diag()[0]
guess = uee.get_init_guess(nroots=1, koopmans=False, diag=diag)
self.assertAlmostEqual(lib.finger(guess[0]), -0.99525784369029358, 9)
guess = uee.get_init_guess(nroots=1, koopmans=True, diag=diag)
self.assertAlmostEqual(lib.finger(guess[0]), -0.84387013299273794, 9)
guess = uee.get_init_guess(nroots=4, koopmans=False, diag=diag)
self.assertAlmostEqual(lib.finger(guess), -0.98261980006133565, 9)
guess = uee.get_init_guess(nroots=4, koopmans=True, diag=diag)
self.assertAlmostEqual(lib.finger(guess), -0.38124032366955651, 9)
def test_ucc_eomsf_ccsd_matvec(self):
numpy.random.seed(10)
myeom = eom_uccsd.EOMEESpinFlip(ucc1)
vec = numpy.random.random(myeom.vector_size()) - .9
vec1 = eom_uccsd.eomsf_ccsd_matvec(myeom, vec)
self.assertAlmostEqual(lib.finger(vec1), -1655.5564756993756, 8)
r1, r2 = myeom.vector_to_amplitudes(vec)
gr1 = eom_uccsd.spatial2spin_eomsf(r1, orbspin)
gr2 = eom_uccsd.spatial2spin_eomsf(r2, orbspin)
gvec = eom_gccsd.amplitudes_to_vector_ee(gr1, gr2)
gcc1 = cc.addons.convert_to_gccsd(ucc1)
gee1 = eom_gccsd.EOMEE(gcc1)
vecref = eom_gccsd.eeccsd_matvec(gee1, gvec)
gr1, gr2 = gee1.vector_to_amplitudes(vecref)
v1, v2 = myeom.vector_to_amplitudes(vec1)
self.assertAlmostEqual(float(abs(gr1-eom_uccsd.spatial2spin_eomsf(v1, orbspin)).max()), 0, 9)
self.assertAlmostEqual(float(abs(gr2-eom_uccsd.spatial2spin_eomsf(v2, orbspin)).max()), 0, 9)
# def test_ucc_eomip_matvec(self):
#
# def test_ucc_eomea_matvec(self):
########################################
# With 4-fold symmetry in integrals
# max_memory = 0
# direct = True
def test_eomee1(self):
self.assertAlmostEqual(ucc0.e_corr, -0.10805861805688141, 6)
e,v = ucc0.eeccsd(nroots=4)
self.assertAlmostEqual(e[0],-0.28757438579564343, 6)
self.assertAlmostEqual(e[1], 7.0932490003970672e-05, 6)
self.assertAlmostEqual(e[2], 0.026861582690761672, 6)
self.assertAlmostEqual(e[3], 0.091111388761653589, 6)
e,v = ucc0.eeccsd(nroots=4, guess=v[:4])
self.assertAlmostEqual(e[3], 0.091111388761653589, 6)
def test_vector_to_amplitudes_eomsf(self):
eomsf = eom_uccsd.EOMEESpinFlip(ucc0)
size = eomsf.vector_size()
v = numpy.random.random(size)
r1, r2 = eomsf.vector_to_amplitudes(v)
v1 = eomsf.amplitudes_to_vector(r1, r2)
self.assertAlmostEqual(abs(v-v1).max(), 0, 12)
def test_spatial2spin_eomsf(self):
eomsf = eom_uccsd.EOMEESpinFlip(ucc0)
size = eomsf.vector_size()
v = numpy.random.random(size)
r1, r2 = eomsf.vector_to_amplitudes(v)
v1 = eom_uccsd.spin2spatial_eomsf(eom_uccsd.spatial2spin_eomsf(r1, orbspin), orbspin)
v2 = eom_uccsd.spin2spatial_eomsf(eom_uccsd.spatial2spin_eomsf(r2, orbspin), orbspin)
self.assertAlmostEqual(abs(r1[0]-v1[0]).max(), 0, 12)
self.assertAlmostEqual(abs(r1[1]-v1[1]).max(), 0, 12)
self.assertAlmostEqual(abs(r2[0]-v2[0]).max(), 0, 12)
self.assertAlmostEqual(abs(r2[1]-v2[1]).max(), 0, 12)
self.assertAlmostEqual(abs(r2[2]-v2[2]).max(), 0, 12)
self.assertAlmostEqual(abs(r2[3]-v2[3]).max(), 0, 12)
def test_vector_to_amplitudes_eom_spin_keep(self):
eomsf = eom_uccsd.EOMEESpinKeep(ucc0)
size = eomsf.vector_size()
v = numpy.random.random(size)
r1, r2 = eomsf.vector_to_amplitudes(v)
v1 = eomsf.amplitudes_to_vector(r1, r2)
self.assertAlmostEqual(abs(v-v1).max(), 0, 12)
if __name__ == "__main__":
print("Tests for UCCSD")
unittest.main()
| gkc1000/pyscf | pyscf/cc/test/test_eom_uccsd.py | Python | apache-2.0 | 12,389 | [
"PySCF"
] | 12640030cd0030321cc5a4ab4d169c2c08c198e2f70bbb53dbb96ea88c788c04 |
#!/usr/bin/env python
"""Kubernetes sandbox components."""
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import sandbox
import sandlet
def set_gke_cluster_context(gke_cluster_name):
logging.info('Changing cluster to %s.', gke_cluster_name)
clusters = subprocess.check_output(
['kubectl', 'config', 'get-clusters']).split('\n')
cluster = [c for c in clusters if c.endswith('_%s' % gke_cluster_name)]
if not cluster:
raise sandbox.SandboxError(
'Cannot change GKE cluster context, cluster %s not found',
gke_cluster_name)
with open(os.devnull, 'w') as devnull:
subprocess.call(['kubectl', 'config', 'use-context', cluster[0]],
stdout=devnull)
class HelmComponent(sandlet.SandletComponent):
"""A helm resource."""
def __init__(self, name, sandbox_name, helm_config):
super(HelmComponent, self).__init__(name, sandbox_name)
self.helm_config = helm_config
try:
subprocess.check_output(['helm'], stderr=subprocess.STDOUT)
except OSError:
raise sandbox.SandboxError(
'Could not find helm binary. Please visit '
'https://github.com/kubernetes/helm to download helm.')
def start(self):
logging.info('Initializing helm.')
try:
subprocess.check_output(['helm', 'init'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise sandbox.SandboxError('Failed to initialize helm: %s', e.output)
# helm init on a fresh cluster takes a while to be ready.
# Wait until 'helm list' returns cleanly.
with open(os.devnull, 'w') as devnull:
start_time = time.time()
while time.time() - start_time < 120:
try:
subprocess.check_call(['helm', 'list'], stdout=devnull,
stderr=devnull)
logging.info('Helm is ready.')
break
except subprocess.CalledProcessError:
time.sleep(5)
else:
raise sandbox.SandboxError(
'Timed out waiting for helm to become ready.')
logging.info('Installing helm.')
try:
subprocess.check_output(
['helm', 'install', os.path.join(os.environ['VTTOP'], 'helm/vitess'),
'-n', self.sandbox_name, '--namespace', self.sandbox_name,
'--replace', '--values', self.helm_config],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise sandbox.SandboxError('Failed to install helm: %s' % e.output)
logging.info('Finished installing helm.')
def stop(self):
subprocess.call(['helm', 'delete', self.sandbox_name, '--purge'])
def is_up(self):
return True
def is_down(self):
return not bool(subprocess.check_output(
['kubectl', 'get', 'pods', '--namespace', self.sandbox_name]))
class KubernetesResource(sandlet.SandletComponent):
"""A Kubernetes resource (pod, replicationcontroller, etc.)."""
def __init__(self, name, sandbox_name, template_file, **template_params):
super(KubernetesResource, self).__init__(name, sandbox_name)
self.template_file = template_file
self.template_params = template_params
def start(self):
super(KubernetesResource, self).start()
with open(self.template_file, 'r') as template_file:
template = template_file.read()
for name, value in self.template_params.items():
template = re.sub('{{%s}}' % name, str(value), template)
with tempfile.NamedTemporaryFile() as f:
f.write(template)
f.flush()
os.system('kubectl create --namespace %s -f %s' % (
self.sandbox_name, f.name))
def stop(self):
with open(self.template_file, 'r') as template_file:
template = template_file.read()
for name, value in self.template_params.items():
template = re.sub('{{%s}}' % name, str(value), template)
with tempfile.NamedTemporaryFile() as f:
f.write(template)
f.flush()
os.system('kubectl delete --namespace %s -f %s' % (
self.sandbox_name, f.name))
super(KubernetesResource, self).stop()
def get_forwarded_ip(service, namespace='default', max_wait_s=60):
"""Returns an external IP address exposed by a service."""
start_time = time.time()
while time.time() - start_time < max_wait_s:
try:
service_info = json.loads(subprocess.check_output(
['kubectl', 'get', 'service', service, '--namespace=%s' % namespace,
'-o', 'json']))
return service_info['status']['loadBalancer']['ingress'][0]['ip']
except (KeyError, subprocess.CalledProcessError):
time.sleep(1)
return ''
| theskyinflames/bpulse-go-client | vendor/github.com/youtube/vitess/test/cluster/sandbox/kubernetes_components.py | Python | apache-2.0 | 4,618 | [
"VisIt"
] | 0a4fd12bb9bb1882b4e15fdf1a308c23191a7aca8bbca37569ccadf1fdcd93e8 |
import os
import pysam
import tempfile
import shutil
from iva import contig, mapping, seed, mummer, graph, edge, common
import pyfastaq
class Assembly:
def __init__(self, contigs_file=None, map_index_k=15, map_index_s=3, threads=1, kmc_threads=1, max_insert=800, map_minid=0.5, min_clip=3, ext_min_cov=5, ext_min_ratio=2, ext_bases=100, verbose=0, seed_min_cov=5, seed_min_ratio=10, seed_min_kmer_count=200, seed_max_kmer_count=1000000000, seed_start_length=None, seed_stop_length=500, seed_overlap_length=None, make_new_seeds=False, contig_iter_trim=10, seed_ext_max_bases=50, max_contigs=50, clean=True, strand_bias=0):
self.contigs = {}
self.contig_lengths = {}
self.map_index_k = map_index_k
self.map_index_s = map_index_s
self.threads = threads
self.kmc_threads = kmc_threads
self.max_insert = max_insert
self.map_minid = map_minid
self.min_clip = min_clip
self.ext_min_cov = ext_min_cov
self.ext_min_ratio = ext_min_ratio
self.ext_bases = ext_bases
self.verbose = verbose
self.clean = clean
self.make_new_seeds = make_new_seeds
self.seed_start_length = seed_start_length
self.seed_stop_length = seed_stop_length
self.seed_min_kmer_count = seed_min_kmer_count
self.seed_max_kmer_count = seed_max_kmer_count
self.seed_ext_max_bases = seed_ext_max_bases
self.seed_overlap_length = seed_overlap_length
self.seed_min_cov = seed_min_cov
self.seed_min_ratio = seed_min_ratio
self.contig_iter_trim = contig_iter_trim
self.max_contigs = max_contigs
self.strand_bias = strand_bias
self.contigs_trimmed_for_strand_bias = set()
self.used_seeds = set()
if contigs_file is None:
self.make_new_seeds = True
else:
contigs = {}
pyfastaq.tasks.file_to_dict(contigs_file, contigs)
for ctg in contigs:
self._add_contig(contigs[ctg])
def _add_contig(self, ctg, min_length=1):
if len(ctg) < min_length:
return
assert ctg.id not in self.contigs
assert len(ctg) > 0
self.contigs[ctg.id] = contig.Contig(ctg, verbose=self.verbose)
self.contig_lengths[ctg.id] = [[len(self.contigs[ctg.id]), 0, 0]]
def write_contigs_to_file(self, filename, min_length=None, do_not_write=None, only_write=None, biggest_first=False, order_by_orfs=False, prefix=None):
if do_not_write is None:
do_not_write = set()
if only_write is None:
only_write = set()
printed = 0
if min_length is None:
min_length = self.map_index_k + 1
f = pyfastaq.utils.open_file_write(filename)
if biggest_first:
contig_names = self._contig_names_size_order(biggest_first=True)
elif order_by_orfs:
names = self._get_contig_order_by_orfs()
contig_names = [x[0] for x in names]
contig_revcomp = [x[1] for x in names]
else:
contig_names = sorted(list(self.contigs.keys()))
for i in range(len(contig_names)):
name = contig_names[i]
if len(self.contigs[name]) >= min_length and name not in do_not_write and (name in only_write or len(only_write)==0):
if order_by_orfs and contig_revcomp[i]:
self.contigs[name].fa.revcomp()
if prefix is None:
print(self.contigs[name].fa, file=f)
else:
printed += 1
self.contigs[name].fa.id = prefix + '.' + str(printed).zfill(5)
print(self.contigs[name].fa, file=f)
self.contigs[name].fa.id = name
if order_by_orfs and contig_revcomp[i]:
self.contigs[name].fa.revcomp()
pyfastaq.utils.close(f)
def _get_contig_order_by_orfs(self, min_length=300):
longest_orfs = []
no_orfs = set()
ordered_names = []
for contig in self.contigs.values():
orfs = contig.fa.all_orfs(min_length)
reverse = False
max_length = 0
for coords, rev in orfs:
if len(coords) > max_length:
max_length = len(coords)
reverse = rev
if max_length > 0:
longest_orfs.append((contig.fa.id, max_length, reverse))
else:
no_orfs.add((contig.fa.id, len(contig.fa)))
all_in_size_order = self._contig_names_size_order(biggest_first=True)
ordered_names = sorted(longest_orfs, key=lambda x:x[1], reverse=True)
ordered_names = [(x[0], x[2]) for x in ordered_names]
for t in sorted(no_orfs, key=lambda x:x[1], reverse=True):
ordered_names.append((t[0], False))
return ordered_names
def _map_reads(self, fwd_reads, rev_reads, out_prefix, required_flag=None, exclude_flag=None, sort_reads=False, mate_ref=None, no_map_contigs=None):
if no_map_contigs is None:
no_map_contigs = set()
if self.verbose:
print(' map reads', fwd_reads, rev_reads, sep='\t')
reference = out_prefix + '.ref.fa'
self.write_contigs_to_file(reference, do_not_write=no_map_contigs)
mapping.map_reads(fwd_reads, rev_reads, reference, out_prefix, index_k=self.map_index_k, index_s=self.map_index_s, threads=self.threads, max_insert=self.max_insert, minid=self.map_minid, verbose=self.verbose, required_flag=required_flag, sort=sort_reads, exclude_flag=exclude_flag)
if self.clean:
os.unlink(reference)
os.unlink(reference + '.fai')
def _extend_contigs_with_bam(self, bam_in, out_prefix=None, output_all_useful_reads=False):
if out_prefix is not None:
fa_out1 = pyfastaq.utils.open_file_write(out_prefix + '_1.fa')
fa_out2 = pyfastaq.utils.open_file_write(out_prefix + '_2.fa')
keep_read_types = set([mapping.CAN_EXTEND_LEFT, mapping.CAN_EXTEND_RIGHT, mapping.KEEP])
if output_all_useful_reads:
keep_read_types.add(mapping.BOTH_UNMAPPED)
previous_sam = None
left_seqs = []
right_seqs = []
sam_reader = pysam.Samfile(bam_in, "rb")
for current_sam in sam_reader.fetch(until_eof=True):
if previous_sam is None:
previous_sam = current_sam
continue
previous_type, current_type = mapping.get_pair_type(previous_sam, current_sam, self._get_ref_length_sam_pair(sam_reader, previous_sam, current_sam), self.max_insert, min_clip=self.min_clip)
for sam, sam_type in [(previous_sam, previous_type), (current_sam, current_type)]:
if sam_type == mapping.CAN_EXTEND_LEFT:
name = mapping.get_ref_name(sam, sam_reader)
clipped = mapping.soft_clipped(sam)[0]
self.contigs[name].add_left_kmer(common.decode(sam.seq[:clipped]))
elif sam_type == mapping.CAN_EXTEND_RIGHT:
name = mapping.get_ref_name(sam, sam_reader)
self.contigs[name].add_right_kmer(common.decode(sam.seq[sam.qend:]))
if out_prefix is not None and sam_type in keep_read_types:
if sam.is_read1:
print(mapping.sam_to_fasta(sam), file=fa_out1)
else:
print(mapping.sam_to_fasta(sam), file=fa_out2)
previous_sam = None
if out_prefix is not None:
pyfastaq.utils.close(fa_out1)
pyfastaq.utils.close(fa_out2)
total_bases_added = 0
for ctg in self.contigs:
left_length, right_length = self.contigs[ctg].extend(self.ext_min_cov, self.ext_min_ratio, self.ext_bases)
if self.verbose:
print(' extend contig ' + ctg, 'new_length:' + str(len(self.contigs[ctg])), 'added_left:' + str(left_length), 'added_right:' + str(right_length), sep='\t')
self.contig_lengths[ctg].append([len(self.contigs[ctg]), left_length, right_length])
total_bases_added += left_length + right_length
return total_bases_added
def _trim_contig_for_strand_bias(self, bam, ctg_name):
assert os.path.exists(bam)
if ctg_name in self.contigs_trimmed_for_strand_bias:
return
ctg_length = len(self.contigs[ctg_name])
fwd_cov = mapping.get_bam_region_coverage(bam, ctg_name, ctg_length)
rev_cov = mapping.get_bam_region_coverage(bam, ctg_name, ctg_length, rev=True)
first_good_base = 0
while first_good_base < ctg_length:
total_cov = fwd_cov[first_good_base] + rev_cov[first_good_base]
if total_cov >= self.ext_min_cov and min(fwd_cov[first_good_base], rev_cov[first_good_base]) / total_cov >= self.strand_bias:
break
first_good_base += 1
last_good_base = ctg_length - 1
while last_good_base > first_good_base:
total_cov = fwd_cov[last_good_base] + rev_cov[last_good_base]
if total_cov >= self.ext_min_cov and min(fwd_cov[last_good_base], rev_cov[last_good_base]) / total_cov >= self.strand_bias:
break
last_good_base -= 1
if self.verbose >= 2:
print('Trimming strand biased ends of contig', ctg_name, '- good base range is', first_good_base + 1, 'to', last_good_base + 1, 'from', ctg_length, 'bases')
self.contigs[ctg_name].fa.seq = self.contigs[ctg_name].fa.seq[first_good_base:last_good_base+1]
def _good_intervals_from_strand_coverage(self, fwd_cov, rev_cov):
assert len(fwd_cov) == len(rev_cov)
good_intervals = []
start = None
cov_ok = False
for i in range(len(fwd_cov)):
total_cov = fwd_cov[i] + rev_cov[i]
cov_ok = total_cov >= self.ext_min_cov and min(fwd_cov[i], rev_cov[i]) / total_cov >= self.strand_bias
if cov_ok:
if start is None:
start = i
else:
if start is not None:
good_intervals.append((start, i-1))
start = None
if cov_ok and start is not None:
good_intervals.append((start, i-1))
return good_intervals
def _subcontigs_from_strand_bias(self, bam, ctg_name):
ctg_length = len(self.contigs[ctg_name])
fwd_cov = mapping.get_bam_region_coverage(bam, ctg_name, ctg_length)
rev_cov = mapping.get_bam_region_coverage(bam, ctg_name, ctg_length, rev=True)
good_intervals = self._good_intervals_from_strand_coverage(fwd_cov, rev_cov)
new_contigs = []
if len(good_intervals) == 1:
self.contigs[ctg_name].fa.seq = self.contigs[ctg_name].fa.seq[good_intervals[0][0]:good_intervals[0][1]+1]
elif len(good_intervals) > 1:
for i in range(len(good_intervals)):
start = good_intervals[i][0]
end = good_intervals[i][1]
if end - start + 1 >= 100:
new_contigs.append(pyfastaq.sequences.Fasta(ctg_name + '.' + str(i+1), self.contigs[ctg_name].fa[start:end+1]))
return new_contigs
def _trim_strand_biased_ends(self, reads_prefix, out_prefix=None, tag_as_trimmed=False, break_contigs=False):
tmpdir = tempfile.mkdtemp(prefix='tmp.trim_strand_biased_ends.', dir=os.getcwd())
tmp_prefix = os.path.join(tmpdir, 'out')
sorted_bam = tmp_prefix + '.bam'
unsorted_bam = tmp_prefix + '.unsorted.bam'
original_map_minid = self.map_minid
self.map_minid = 0.9
self._map_reads(reads_prefix + '_1.fa', reads_prefix + '_2.fa', tmp_prefix, sort_reads=True)
assert os.path.exists(sorted_bam)
self.map_minid = original_map_minid
new_contigs = []
contigs_to_remove = set()
for ctg in self.contigs:
if break_contigs:
subcontigs = self._subcontigs_from_strand_bias(sorted_bam, ctg)
if len(subcontigs):
new_contigs.extend(subcontigs)
contigs_to_remove.add(ctg)
elif ctg not in self.contigs_trimmed_for_strand_bias:
self._trim_contig_for_strand_bias(sorted_bam, ctg)
# contig could get completely trimmed so nothing left, in which
# case, we need to remove it
if len(self.contigs[ctg]) == 0:
contigs_to_remove.add(ctg)
elif tag_as_trimmed:
self.contigs_trimmed_for_strand_bias.add(ctg)
for ctg in contigs_to_remove:
self._remove_contig(ctg)
for ctg in new_contigs:
self._add_contig(ctg, min_length=0.75 * self.self.seed_stop_length)
if out_prefix is not None:
mapping.bam_file_to_fasta_pair_files(unsorted_bam, out_prefix + '_1.fa', out_prefix + '_2.fa', remove_proper_pairs=True)
shutil.rmtree(tmpdir)
def trim_contigs(self, trim):
for ctg in self.contigs:
if self._contig_worth_extending(ctg):
self.contigs[ctg].fa.trim(trim, trim)
self.contig_lengths[ctg][-1][0] -= 2 * trim
def _contig_worth_extending(self, name):
if name in self.contigs_trimmed_for_strand_bias:
return False
return len(self.contig_lengths[name]) < 3 \
or self.contig_lengths[name][-1][0] > max([self.contig_lengths[name][x][0] for x in range(len(self.contig_lengths[name])-2)])
def _worth_extending(self):
for ctg in self.contigs:
if self._contig_worth_extending(ctg):
return True
return False
def _extend_with_reads(self, reads_prefix, out_prefix, no_map_contigs):
tmpdir = tempfile.mkdtemp(prefix='tmp.extend.', dir=os.getcwd())
tmp_prefix = os.path.join(tmpdir, 'reads')
total_bases_added = 0
for i in range(5):
bam_prefix = out_prefix + '.' + str(i+1) + '.map'
bam = bam_prefix + '.bam'
self._map_reads(reads_prefix + '_1.fa', reads_prefix + '_2.fa', bam_prefix, no_map_contigs=no_map_contigs)
reads_prefix = tmp_prefix + '.' + str(i)
bases_added = self._extend_contigs_with_bam(bam, out_prefix=reads_prefix)
total_bases_added += bases_added
if self.clean:
os.unlink(bam)
if bases_added < 0.2 * self.ext_bases:
break
shutil.rmtree(tmpdir)
return total_bases_added
def _read_pair_extension_iterations(self, reads_prefix, out_prefix, no_map_contigs=None):
if no_map_contigs is None:
no_map_contigs = set()
assert(len(self.contigs) > len(no_map_contigs))
if self.verbose:
print('{:-^79}'.format(' ' + out_prefix + ' start extension subiteration 0001 '), flush=True)
bases_added = self._extend_with_reads(reads_prefix, out_prefix + '.1', no_map_contigs)
current_reads_prefix = reads_prefix
if bases_added == 0:
return True
try_contig_trim = False
i = 1
while self._worth_extending() or try_contig_trim:
i += 1
if self.verbose:
print('{:-^79}'.format(' ' + out_prefix + ' start extension subiteration ' + str(i).zfill(4) + ' '), flush=True)
if i % 5 == 0:
tmpdir = tempfile.mkdtemp(prefix='tmp.filter_reads.', dir=os.getcwd())
tmp_prefix = os.path.join(tmpdir, 'out')
bam = tmp_prefix + '.bam'
original_map_minid = self.map_minid
self.map_minid = 0.9
self._map_reads(current_reads_prefix + '_1.fa', current_reads_prefix + '_2.fa', tmp_prefix)
self.map_minid = original_map_minid
filter_prefix = reads_prefix + '.subiter.' + str(i) + '.reads'
mapping.bam_file_to_fasta_pair_files(bam, filter_prefix + '_1.fa', filter_prefix + '_2.fa', remove_proper_pairs=True)
if current_reads_prefix != reads_prefix:
os.unlink(current_reads_prefix + '_1.fa')
os.unlink(current_reads_prefix + '_2.fa')
current_reads_prefix = filter_prefix
shutil.rmtree(tmpdir)
iter_prefix = out_prefix + '.' + str(i)
bases_added = self._extend_with_reads(current_reads_prefix, iter_prefix, no_map_contigs)
if bases_added == 0:
if not try_contig_trim:
if self.verbose:
print(' No bases added. Try trimming contigs')
self._trim_strand_biased_ends(reads_prefix, tag_as_trimmed=False)
if len(self.contigs) <= len(no_map_contigs):
if self.verbose:
print(' lost contigs during trimming. No more iterations')
return False
self.trim_contigs(self.contig_iter_trim)
try_contig_trim = True
else:
if self.verbose:
print(' No bases added after trimming. No more iterations')
break
else:
try_contig_trim = False
if current_reads_prefix != reads_prefix:
os.unlink(current_reads_prefix + '_1.fa')
os.unlink(current_reads_prefix + '_2.fa')
return True
def read_pair_extend(self, reads_prefix, out_prefix):
assert(len(self.contigs))
current_reads_prefix = reads_prefix
i = 1
new_seed_name = 'dummy'
while 1:
if self.verbose:
print('{:_^79}'.format(' START ITERATION ' + str(i) + ' '), flush=True)
self._read_pair_extension_iterations(current_reads_prefix, out_prefix + '.' + str(i))
filtered_reads_prefix = out_prefix + '.' + str(i) + '.filtered'
self._trim_strand_biased_ends(reads_prefix, tag_as_trimmed=True, out_prefix=filtered_reads_prefix)
self._remove_contained_contigs(list(self.contigs.keys()))
self._merge_overlapping_contigs(list(self.contigs.keys()))
if reads_prefix != current_reads_prefix:
os.unlink(current_reads_prefix + '_1.fa')
os.unlink(current_reads_prefix + '_2.fa')
current_reads_prefix = filtered_reads_prefix
i += 1
reads_left = os.path.getsize(current_reads_prefix + '_1.fa') > 0 and os.path.getsize(current_reads_prefix + '_2.fa') > 0
if not self.make_new_seeds or new_seed_name is None or not self.make_new_seeds or len(self.contigs) >= self.max_contigs or not reads_left:
if reads_prefix != current_reads_prefix:
os.unlink(current_reads_prefix + '_1.fa')
os.unlink(current_reads_prefix + '_2.fa')
break
if self.verbose:
print('{:_^79}'.format(' Try making new seed '), flush=True)
new_seed_name = self.add_new_seed_contig(current_reads_prefix + '_1.fa', current_reads_prefix + '_2.fa')
if new_seed_name is None:
if self.verbose:
print('Couldn\'t make new seed and extend it. Stopping assembly.')
if len(self.contigs) == 0:
print('No contigs made.')
print('Read coverage may be too low, in which case try reducing --seed_min_kmer_cov, --ext_min_cov and --seed_ext_min_cov.')
print('Alternatively --max_insert could be incorrect, which is currently set to:', self.max_insert)
if reads_prefix != current_reads_prefix:
os.unlink(current_reads_prefix + '_1.fa')
os.unlink(current_reads_prefix + '_2.fa')
break
def _run_nucmer(self, contigs_to_use=None):
if contigs_to_use is None:
contigs_to_use = set()
if len(contigs_to_use) + len(self.contigs) <= 1:
return []
tmpdir = tempfile.mkdtemp(prefix='tmp.remove_self_contigs.', dir=os.getcwd())
nucmer_out = os.path.join(tmpdir, 'nucmer.out')
contigs_fasta = os.path.join(tmpdir, 'contigs.fa')
self.write_contigs_to_file(contigs_fasta, only_write=contigs_to_use)
mummer.run_nucmer(contigs_fasta, contigs_fasta, nucmer_out)
hits = [hit for hit in mummer.file_reader(nucmer_out) if not hit.is_self_hit()]
for hit in hits:
hit.sort()
hits = list(set(hits))
shutil.rmtree(tmpdir)
return hits
def _remove_contained_contigs(self, contigs):
if len(contigs) <= 1:
return
hits = self._run_nucmer(contigs_to_use=contigs)
for contig in self._contig_names_size_order()[:-1]:
if self._contig_contained_in_nucmer_hits(hits, contig, 95):
hits = self._remove_contig_from_nucmer_hits(hits, contig)
self._remove_contig(contig)
contigs.remove(contig)
def _coords_to_new_contig(self, coords_list):
new_contig = pyfastaq.sequences.Fasta(coords_list[0][0], '')
for name, coords, reverse in coords_list:
assert name in self.contigs
if reverse:
seq = pyfastaq.sequences.Fasta('ni', self.contigs[name].fa.seq[coords.start:coords.end+1])
seq.revcomp()
new_contig.seq += seq.seq
else:
new_contig.seq += self.contigs[name].fa.seq[coords.start:coords.end+1]
return new_contig
def _merge_overlapping_contigs(self, contigs):
if len(contigs) <= 1:
return
hits = self._run_nucmer(contigs)
assembly_graph = graph.Graph(self, contigs=contigs)
for hit in hits:
e = hit.to_graph_edge()
if e is not None:
assembly_graph.add_edge(e)
for connected_component in assembly_graph.connected_components():
if len(connected_component) < 2:
continue
simple_path = assembly_graph.find_simple_path(connected_component)
assert assembly_graph.simple_path_is_consistent(simple_path)
if len(simple_path) > 1:
simple_path = assembly_graph.remove_redundant_nodes_from_simple_path(simple_path)
coords = assembly_graph.merged_coords_from_simple_nonredundant_path(simple_path)
new_contig = self._coords_to_new_contig(coords)
for name, x, y in coords:
self._remove_contig(name)
self._add_contig(new_contig)
def _contig_names_size_order(self, biggest_first=False):
return sorted(self.contigs, key=lambda x:len(self.contigs[x]), reverse=biggest_first)
def _contig_contained_in_nucmer_hits(self, hits, contig, min_percent):
assert contig in self.contigs
contig_length = len(self.contigs[contig])
coords = []
for hit in [hit for hit in hits if contig in [hit.qry_name, hit.ref_name] and hit.qry_name != hit.ref_name]:
start = min(hit.qry_start, hit.qry_end)
end = max(hit.qry_start, hit.qry_end)
coords.append(pyfastaq.intervals.Interval(start, end))
if len(coords) == 0:
return False
pyfastaq.intervals.merge_overlapping_in_list(coords)
total_bases_matched = pyfastaq.intervals.length_sum_from_list(coords)
return min_percent <= 100.0 * total_bases_matched / len(self.contigs[contig])
def _remove_contig_from_nucmer_hits(self, hits, contig):
return [x for x in hits if contig not in [x.ref_name, x.qry_name]]
def _remove_contig(self, contig):
if contig in self.contigs:
del self.contigs[contig]
if contig in self.contig_lengths:
del self.contig_lengths[contig]
if contig in self.contigs_trimmed_for_strand_bias:
self.contigs_trimmed_for_strand_bias.remove(contig)
def _get_ref_length(self, samfile, sam):
if sam.is_unmapped:
return None
else:
return len(self.contigs[mapping.get_ref_name(sam, samfile)])
def _get_ref_length_sam_pair(self, samfile, sam1, sam2):
len1 = self._get_ref_length(samfile, sam1)
len2 = self._get_ref_length(samfile, sam2)
if len1 == len2:
return len1
else:
return None
def _get_unmapped_pairs(self, reads1, reads2, out_prefix):
self._map_reads(reads1, reads2, out_prefix, required_flag=12)
mapping.bam_file_to_fasta_pair_files(out_prefix + '.bam', out_prefix + '_1.fa', out_prefix + '_2.fa')
os.unlink(out_prefix + '.bam')
def add_new_seed_contig(self, reads1, reads2, contig_name=None, max_attempts=10):
if len(self.contigs):
tmpdir = tempfile.mkdtemp(prefix='tmp.make_seed.', dir=os.getcwd())
tmp_prefix = os.path.join(tmpdir, 'out')
seed_reads1 = tmp_prefix + '_1.fa'
seed_reads2 = tmp_prefix + '_2.fa'
if contig_name is not None:
self._map_reads(reads1, reads2, tmp_prefix, required_flag=5, exclude_flag=8, mate_ref=contig_name)
mapping.bam_to_fasta(tmp_prefix + '.bam', seed_reads1)
seed_reads2 = None
else:
self._get_unmapped_pairs(reads1, reads2, tmp_prefix)
else:
seed_reads1 = reads1
seed_reads2 = reads2
made_seed = False
for i in range(max_attempts):
s = seed.Seed(reads1=seed_reads1, reads2=seed_reads2, extend_length=self.seed_ext_max_bases, seed_length=self.seed_start_length, seed_min_count=self.seed_min_kmer_count, seed_max_count=self.seed_max_kmer_count, ext_min_cov=self.seed_min_cov, ext_min_ratio=self.seed_min_ratio, verbose=self.verbose, kmc_threads=self.kmc_threads, map_threads=self.threads, sequences_to_ignore=self.used_seeds, contigs_to_check=self.contigs)
if s.seq is None or len(s.seq) == 0:
break
if self.seed_overlap_length is None:
s.overlap_length = len(s.seq)
else:
s.overlap_length = self.seed_overlap_length
s.extend(reads1, reads2, self.seed_stop_length)
self.used_seeds.add(s.seq)
if len(s.seq) >= 0.75 * self.seed_stop_length:
made_seed = True
break
elif self.verbose:
print(" Couldn't extend seed enough. That was attempt", i+1, 'of', max_attempts, flush=True)
if len(self.contigs):
shutil.rmtree(tmpdir)
if not made_seed or len(s.seq) == 0:
return None
if self.verbose:
print(" Extended seed OK.", flush=True)
new_name = 'seeded.' + '1'.zfill(5)
i = 1
while new_name in self.contigs:
i += 1
new_name = 'seeded.' + str(i).zfill(5)
self._add_contig(pyfastaq.sequences.Fasta(new_name, s.seq))
return new_name
| satta/iva | iva/assembly.py | Python | gpl-3.0 | 27,357 | [
"pysam"
] | 0f966d2d48e1faaecc9adf1d0d7091ceeba07728052ae324898e87de57d1ab93 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions that act on molecule objects."""
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver.p4util import temp_circular_import_blocker
from psi4.driver import qcdb
from psi4.driver.p4util.exceptions import *
def molecule_set_attr(self, name, value):
"""Function to redefine __setattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "set_variable")
fxn(name, value)
return
object.__setattr__(self, name, value)
def molecule_get_attr(self, name):
"""Function to redefine __getattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "get_variable")
return fxn(name)
return object.__getattribute__(self, name)
@classmethod
def molecule_from_string(cls,
molstr,
dtype=None,
name=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
return_dict=False,
enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='none',
missing_enabled_return_efp='none',
verbose=1):
molrec = qcel.molparse.from_string(
molstr=molstr,
dtype=dtype,
name=name,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
return_processed=False,
enable_qm=enable_qm,
enable_efp=enable_efp,
missing_enabled_return_qm=missing_enabled_return_qm,
missing_enabled_return_efp=missing_enabled_return_efp,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec['qm']), molrec
else:
return core.Molecule.from_dict(molrec['qm'])
@classmethod
def molecule_from_arrays(cls,
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units='Angstrom',
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
comment=None,
provenance=None,
connectivity=None,
missing_enabled_return='error',
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.e-3,
verbose=1,
return_dict=False):
"""Construct Molecule from unvalidated arrays and variables.
Light wrapper around :py:func:`~qcelemental.molparse.from_arrays`
that is a full-featured constructor to dictionary representa-
tion of Molecule. This follows one step further to return
Molecule instance.
Parameters
----------
See :py:func:`~qcelemental.molparse.from_arrays`.
Returns
-------
:py:class:`psi4.core.Molecule`
"""
molrec = qcel.molparse.from_arrays(
geom=geom,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
name=name,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
molecular_charge=molecular_charge,
molecular_multiplicity=molecular_multiplicity,
comment=comment,
provenance=provenance,
connectivity=connectivity,
domain='qm',
missing_enabled_return=missing_enabled_return,
tooclose=tooclose,
zero_ghost_fragments=zero_ghost_fragments,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec), molrec
else:
return core.Molecule.from_dict(molrec)
@classmethod
def molecule_from_schema(cls, molschema, return_dict=False, nonphysical=False, verbose=1):
"""Construct Molecule from non-Psi4 schema.
Light wrapper around :py:func:`~psi4.core.Molecule.from_arrays`.
Parameters
----------
molschema : dict
Dictionary form of Molecule following known schema.
return_dict : bool, optional
Additionally return Molecule dictionary intermediate.
nonphysical : bool, optional
Do allow masses outside an element's natural range to pass validation?
verbose : int, optional
Amount of printing.
Returns
-------
mol : :py:class:`psi4.core.Molecule`
molrec : dict, optional
Dictionary representation of instance.
Only provided if `return_dict` is True.
"""
molrec = qcel.molparse.from_schema(molschema, nonphysical=nonphysical, verbose=verbose)
qmol = core.Molecule.from_dict(molrec)
geom = np.array(molrec["geom"]).reshape((-1, 3))
qmol._initial_cartesian = core.Matrix.from_array(geom)
if return_dict:
return qmol, molrec
else:
return qmol
def dynamic_variable_bind(cls):
"""Function to dynamically add extra members to
the core.Molecule class.
"""
cls.__setattr__ = molecule_set_attr
cls.__getattr__ = molecule_get_attr
cls.to_arrays = qcdb.Molecule.to_arrays
cls.to_dict = qcdb.Molecule.to_dict
cls.BFS = qcdb.Molecule.BFS
cls.B787 = qcdb.Molecule.B787
cls.scramble = qcdb.Molecule.scramble
cls.from_arrays = molecule_from_arrays
cls.from_string = molecule_from_string
cls.to_string = qcdb.Molecule.to_string
cls.from_schema = molecule_from_schema
cls.to_schema = qcdb.Molecule.to_schema
cls.run_dftd3 = qcdb.Molecule.run_dftd3
cls.format_molecule_for_mol = qcdb.Molecule.format_molecule_for_mol
dynamic_variable_bind(core.Molecule) # pass class type, not class instance
#
# Define geometry to be used by PSI4.
# The molecule created by this will be set in options.
#
# geometry("
# O 1.0 0.0 0.0
# H 0.0 1.0 0.0
# H 0.0 0.0 0.0
#
def geometry(geom, name="default"):
"""Function to create a molecule object of name *name* from the
geometry in string *geom*. Permitted for user use but deprecated
in driver in favor of explicit molecule-passing. Comments within
the string are filtered.
"""
molrec = qcel.molparse.from_string(
geom, enable_qm=True, missing_enabled_return_qm='minimal', enable_efp=True, missing_enabled_return_efp='none')
molecule = core.Molecule.from_dict(molrec['qm'])
if "geom" in molrec["qm"]:
geom = np.array(molrec["qm"]["geom"]).reshape((-1, 3))
if molrec["qm"]["units"] == "Angstrom":
geom = geom / qcel.constants.bohr2angstroms
molecule._initial_cartesian = core.Matrix.from_array(geom)
molecule.set_name(name)
if 'efp' in molrec:
try:
import pylibefp
except ImportError as e: # py36 ModuleNotFoundError
raise ImportError("""Install pylibefp to use EFP functionality. `conda install pylibefp -c psi4` Or build with `-DENABLE_libefp=ON`""") from e
#print('Using pylibefp: {} (version {})'.format(pylibefp.__file__, pylibefp.__version__))
efpobj = pylibefp.from_dict(molrec['efp'])
# pylibefp.core.efp rides along on molecule
molecule.EFP = efpobj
# Attempt to go ahead and construct the molecule
try:
molecule.update_geometry()
except:
core.print_out("Molecule: geometry: Molecule is not complete, please use 'update_geometry'\n"
" once all variables are set.\n")
activate(molecule)
return molecule
def activate(mol):
"""Function to set molecule object *mol* as the current active molecule.
Permitted for user use but deprecated in driver in favor of explicit
molecule-passing.
"""
core.set_active_molecule(mol)
| ashutoshvt/psi4 | psi4/driver/molutil.py | Python | lgpl-3.0 | 9,732 | [
"Psi4"
] | 38ba71bc1bac46b3c72164d6c1c5e5cd9a423bf1f9db83997383373bf84b8289 |
# -*- coding: utf-8 -*-
"""
==============================
1D Wasserstein barycenter demo
==============================
This example illustrates the computation of regularized Wassersyein Barycenter
as proposed in [3].
[3] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & Peyré, G. (2015).
Iterative Bregman projections for regularized transportation problems
SIAM Journal on Scientific Computing, 37(2), A1111-A1138.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
# necessary for 3d plot even if not used
from mpl_toolkits.mplot3d import Axes3D # noqa
from matplotlib.collections import PolyCollection
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n, m=60, s=8)
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
n_distributions = A.shape[1]
# loss matrix + normalization
M = ot.utils.dist0(n)
M /= M.max()
##############################################################################
# Plot data
# ---------
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.tight_layout()
##############################################################################
# Barycenter computation
# ----------------------
#%% barycenter computation
alpha = 0.2 # 0<=alpha<=1
weights = np.array([1 - alpha, alpha])
# l2bary
bary_l2 = A.dot(weights)
# wasserstein
reg = 1e-3
bary_wass = ot.bregman.barycenter(A, M, reg, weights)
pl.figure(2)
pl.clf()
pl.subplot(2, 1, 1)
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.subplot(2, 1, 2)
pl.plot(x, bary_l2, 'r', label='l2')
pl.plot(x, bary_wass, 'g', label='Wasserstein')
pl.legend()
pl.title('Barycenters')
pl.tight_layout()
##############################################################################
# Barycentric interpolation
# -------------------------
#%% barycenter interpolation
n_alpha = 11
alpha_list = np.linspace(0, 1, n_alpha)
B_l2 = np.zeros((n, n_alpha))
B_wass = np.copy(B_l2)
for i in range(0, n_alpha):
alpha = alpha_list[i]
weights = np.array([1 - alpha, alpha])
B_l2[:, i] = A.dot(weights)
B_wass[:, i] = ot.bregman.barycenter(A, M, reg, weights)
#%% plot interpolation
pl.figure(3)
cmap = pl.cm.get_cmap('viridis')
verts = []
zs = alpha_list
for i, z in enumerate(zs):
ys = B_l2[:, i]
verts.append(list(zip(x, ys)))
ax = pl.gcf().gca(projection='3d')
poly = PolyCollection(verts, facecolors=[cmap(a) for a in alpha_list])
poly.set_alpha(0.7)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('x')
ax.set_xlim3d(0, n)
ax.set_ylabel('$\\alpha$')
ax.set_ylim3d(0, 1)
ax.set_zlabel('')
ax.set_zlim3d(0, B_l2.max() * 1.01)
pl.title('Barycenter interpolation with l2')
pl.tight_layout()
pl.figure(4)
cmap = pl.cm.get_cmap('viridis')
verts = []
zs = alpha_list
for i, z in enumerate(zs):
ys = B_wass[:, i]
verts.append(list(zip(x, ys)))
ax = pl.gcf().gca(projection='3d')
poly = PolyCollection(verts, facecolors=[cmap(a) for a in alpha_list])
poly.set_alpha(0.7)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('x')
ax.set_xlim3d(0, n)
ax.set_ylabel('$\\alpha$')
ax.set_ylim3d(0, 1)
ax.set_zlabel('')
ax.set_zlim3d(0, B_l2.max() * 1.01)
pl.title('Barycenter interpolation with Wasserstein')
pl.tight_layout()
pl.show()
| rflamary/POT | examples/plot_barycenter_1D.py | Python | mit | 3,676 | [
"Gaussian"
] | 345ff02819a78e1caf9c9f86f04da67c2068521ad8d7cad2d3a5a39d9ee549c3 |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Implementation for `pmg structure` CLI.
"""
import sys
from tabulate import tabulate
from pymatgen import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher, \
ElementComparator
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Aug 13 2016"
def convert_fmt(args):
"""
Convert files from one format to another
Args:
args (dict): Args from argparse.
"""
if len(args.filenames) != 2:
print("File format conversion takes in only two filenames.")
s = Structure.from_file(args.filenames[0],
primitive="prim" in args.filenames[1].lower())
s.to(filename=args.filenames[1])
def analyze_symmetry(args):
"""
Analyze symmetry of structures in files.
Args:
args (dict): Args from argparse.
"""
tolerance = args.symmetry
t = []
for filename in args.filenames:
s = Structure.from_file(filename, primitive=False)
finder = SpacegroupAnalyzer(s, tolerance)
dataset = finder.get_symmetry_dataset()
t.append([filename, dataset["international"], dataset["number"],
dataset["hall"]])
print(tabulate(t, headers=["Filename", "Int Symbol", "Int number", "Hall"]))
def analyze_localenv(args):
"""
Analyze local env of structures in files.
Args:
args (dict): Args for argparse.
"""
bonds = {}
for bond in args.localenv:
toks = bond.split("=")
species = toks[0].split("-")
bonds[(species[0], species[1])] = float(toks[1])
for filename in args.filenames:
print("Analyzing %s..." % filename)
data = []
s = Structure.from_file(filename)
for i, site in enumerate(s):
for species, dist in bonds.items():
if species[0] in [sp.symbol
for sp in site.species.keys()]:
dists = [d for nn, d in s.get_neighbors(site, dist)
if species[1] in
[sp.symbol for sp in nn.species.keys()]]
dists = ", ".join(["%.3f" % d for d in sorted(dists)])
data.append([i, species[0], species[1], dists])
print(tabulate(data, headers=["#", "Center", "Ligand", "Dists"]))
def compare_structures(args):
"""
Compare structures in files for similarity using structure matcher.
Args:
args (dict): Args from argparse.
"""
filenames = args.filenames
if len(filenames) < 2:
print("You need more than one structure to compare!")
sys.exit(-1)
try:
structures = [Structure.from_file(fn) for fn in filenames]
except Exception as ex:
print("Error converting file. Are they in the right format?")
print(str(ex))
sys.exit(-1)
m = StructureMatcher() if args.group == "species" \
else StructureMatcher(comparator=ElementComparator())
for i, grp in enumerate(m.group_structures(structures)):
print("Group {}: ".format(i))
for s in grp:
print("- {} ({})".format(filenames[structures.index(s)],
s.formula))
print()
def analyze_structures(args):
"""
Master function to handle which operation to perform.
Args:
args (dict): Args from argparse.
"""
if args.convert:
convert_fmt(args)
elif args.symmetry:
analyze_symmetry(args)
elif args.group:
compare_structures(args)
elif args.localenv:
analyze_localenv(args)
| gVallverdu/pymatgen | pymatgen/cli/pmg_structure.py | Python | mit | 3,894 | [
"pymatgen"
] | 34578d76f5c78d3b4c8e3b1ed09f27985f4bb309a3172952cf6f4e0b959d8f68 |
#!/usr/bin/env python3
# Copyright 2012 David Y. Gonzalez
# This file is part of A.L.E.C.
# A.L.E.C. is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# [at your option] any later version.
# A.L.E.C. is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with A.L.E.C. If not, see <http://www.gnu.org/licenses/>.
import sys,os,platform
from subprocess import call
from time import sleep
#conseguir os username
un = os.environ['USERNAME']
#conseguir cwd
sep = os.sep #"/" on linux "\\" on Win
cwd = os.getcwd()
script_path = sys.path[0] + sep + "Endgame" # "Endgame" would be the name of your Netbeans project folder
project_name = script_path.split(sep)[-1].lower()
pardir = os.pardir
pathsep = os.pathsep #":" on linux ";" on Win
curdir = os.curdir #mostly "."
linesep = os.linesep # \n or \f\n
#conseguir os name
osname = sys.platform
isLinux = osname.startswith('linux')
isWin = osname.startswith('win')
#otras variables
cup = os.environ["CUP"]
jflex = os.environ["JFLEX"]
#Cambiando nombre de la ventana
if isWin:
call("title A.L.E.C. C2 Flavor - %s" % project_name, shell=True)
else:
call("\x1b]2;A.L.E.C. C2 Flavor - %s\x07" % project_name, shell=True)
#definiendo metodos
def handle_jflex(package_name="analisis", jflex_name="scanner01.lex"):
#java -jar "%JFLEX%\lib\JFlex.jar" 4scanner.lex -d src\analisis
str_jflex = "java -jar \"{1}{0}lib{0}JFlex.jar\" src{0}{3}{0}{2} -d src{0}{4}".format(sep,jflex,jflex_name,project_name,package_name)
print_exe(str_jflex)
call(str_jflex, shell=True)
def handle_cup(package_name="analisis", parser_java="Parser01.java", simbolos_java="Simbolos01.java", cup_name="parser01.cup"):
#java -jar "%CUP%\java-cup-11a.jar" -package analisis -parser Parser -symbols Simbolos 5parser.cup
str_cup = "java -jar \"{1}{0}java-cup-11a.jar\" -package {2} -parser {3} -interface -symbols {4} src{0}{6}{0}{5}".format(sep, cup, package_name, parser_java.split('.')[-2], simbolos_java.split('.')[-2], cup_name,project_name)
print_exe(str_cup)
res = call(str_cup, shell=True)
if not res == 0:
print("%sCUP program didn't end well, master %s. I recommend you check the logs." % (linesep, un))
return
print("Moving Files")
# antes de mover, modificar el fichero
with open(parser_java, "r+") as f:
f.seek(558, 0)
f.write("BigParser ")
if isWin:
#move Parser.java src/analisis
#move Simbolos.java src/analisis
str_move = "move {1} src{0}{2}"
call(str_move.format(sep, parser_java, package_name), shell=True)
call(str_move.format(sep, simbolos_java, package_name), shell=True)
elif isLinux:
#mv Parser.java Simbolos.java src/analisis
call("mv -v {1} {2} src{0}{3}".format(sep, parser_java, simbolos_java, package_name), shell=True)
else:
print("could't move files... dang it!")
def compile_java():
#cd "src"
os.chdir("src")
print("cd src")
#javac -d "../build/classes" -cp ".;../lib/java-cup-11a-runtime.jar" proyecto1_200819312/proyecto1_200819312.java
str_javac = "javac -d \"{3}{0}build{0}classes\" -cp \"{2}{1}{3}{0}lib{0}java-cup-11a-runtime.jar\" {4}{0}{5}".format(sep, pathsep, curdir, pardir, project_name, "Main.java")
print_exe(str_javac)
call(str_javac, shell=True)
os.chdir(pardir)#go back
print("cd",pardir)
def run_java(graphical = False):
#cd build/classes
chdir = "build{0}classes".format(sep)
os.chdir(chdir)
print("cd " + chdir)
#java -Dswing.defaultlaf=com.sun.java.swing.plaf.nimbus.NimbusLookAndFeel -cp ".;../../lib/java-cup-11a-runtime.jar;" proyecto1_200819312.Proyecto1_200819312
if graphical:
str_java = "{6}java -Dswing.defaultlaf=com.sun.java.swing.plaf.nimbus.NimbusLookAndFeel -cp \"{2}{1}{3}{0}{3}{0}lib{0}java-cup-11a-runtime.jar{1}{3}{0}{3}{0}lib{0}log4j-1.2.17.jar\" {4}.{5}{7}".format(sep, pathsep, curdir, pardir, project_name, "Main", "start \"%s\" cmd /K " % project_name if isWin else "terminator -T \"A.L.E.C. - %s Flavor\" -x " % project_name, " &" if isLinux else "")
else:
str_java = "java -cp \"{2}{1}{3}{0}{3}{0}lib{0}java-cup-11a-runtime.jar\" {4}.{5} {6}".format(sep, pathsep, curdir, pardir, project_name, "Main", "{1}{0}{1}{0}src{0}test.txt".format(sep, pardir))
print_exe(str_java)
res = call(str_java, shell=True)
sleep(1)
if not res == 0:
print("%sIt is to my knowledge your last excecution ended with code %d. I recommend you visit the logs." % (linesep, res))
chdir = pardir + sep + pardir
os.chdir(chdir)#go back
#print(linesep + "cd",chdir)
def erase_log(log_name="log-file.txt"):
chdir = "log"
os.chdir(chdir)
print("cd " + chdir)
try:
f = open(log_name, "w")
f.close()
print('"{0}" has been wiped clean!'.format(log_name))
except IOError:
print("File must be in use or may not exist, %s" % un)
pass
chdir = pardir
os.chdir(chdir)#go back
def run_jar():
#cd build/classes
# chdir = "build{0}classes".format(sep)
chdir = "dist"
os.chdir(chdir)
print("cd " + chdir)
#java -Dswing.defaultlaf=com.sun.java.swing.plaf.nimbus.NimbusLookAndFeel -cp ".;../../lib/java-cup-11a-runtime.jar;" proyecto1_200819312.Proyecto1_200819312
# str_java = "{5}java -jar -Dswing.defaultlaf=com.sun.java.swing.plaf.nimbus.NimbusLookAndFeel -cp \"{2}{1}lib{0}java-cup-11a-runtime.jar{1}lib{0}log4j-1.2.17.jar{1}lib{0}AbsoluteLayout.jar\" {4}.jar {6}".format(sep, pathsep, curdir, pardir, project_name, "start \"%s\" cmd /K " % project_name if isWin else "terminator -T \"A.L.E.C. - %s Flavor\" -x ", " &" if isLinux else "")
str_java = "{1}java -jar -Dswing.defaultlaf=com.sun.java.swing.plaf.nimbus.NimbusLookAndFeel {0}.jar {2}".format(project_name, "start \"%s\" cmd /K " % project_name if isWin else "terminator -T \"A.L.E.C. - %s Flavor\" -x ", " &" if isLinux else "")
print_exe(str_java)
res = call(str_java, shell=True)
sleep(1)
if not res == 0:
print("%sIt is to my knowledge your last excecution ended with code %d. I recommend you visit the logs." % (linesep, res))
chdir = pardir
os.chdir(chdir)#go back
#print(linesep + "cd",chdir)
def handle_Input(key):
if key == 1:
handle_jflex()
elif key == 2:
handle_cup()
elif key == 3:
erase_log()
elif key == 4:
run_java(graphical=True)
elif key == 5:
run_jar()
else:
print("selection invalid!")
def clear_clr():
if isWin:
call("cls",shell=True)
elif isLinux:
call("clear",shell=True)
else:
print("couldn't clear the screen!")
def print_exe(str_command):
#print("Excecuting Statement: ")
print(str_command)
def check_cwd():
global cwd
print("CWD -> %s" % cwd)
if cwd != script_path:
print("Changing CWD to -> " + script_path)
cwd = script_path
os.chdir(cwd)
def press_any_key():
# try:
# input("%sPress ENTER to continue, %s" % (linesep,un))
# except SyntaxError:#for python2
# pass
print("%sPress ANY KEY to continue, %s" % (linesep,un))
if isWin:
import msvcrt
getch = msvcrt.getch()
elif isLinux:
import tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSANOW, old_settings)
#return ch
def print_mainheader():
clear_clr()
print("A.L.E.C. Accomplice Library for Exclusive Coding - \"Compiladores 2\" Flavor")
print("Greetings master %s!%s" % (un, linesep))
print("You're working on %s%s" % (platform.platform(), linesep))
check_cwd()
print("Project name: " + project_name)
#menu princpal
def go():
while True:
print_mainheader()
print("%sThese are your options for \"Compiladores 2\"" % linesep)
print('1.) JFlex "Scanner01.lex"')
print('2.) CUP "Parser01.cup"')
print("3.) Erase Log")
print("4.) Run Java")
print("5.) Run Jar")
print("%sJust press ENTER or type 0 to EXIT" % linesep)
key = ""
try:
key = input('Select option: ') # python3 doesn't throw SyntaxError
except SyntaxError:#for python2
pass
if not key or key == "0": break;
print()
try:
handle_Input(int(key))
except ValueError:
print("Not a valid choice, %s" % un)
press_any_key()
print("%sWe're done here" % linesep)
go()
| yzaguirre/topdog-tools | alec.py | Python | apache-2.0 | 8,372 | [
"VisIt"
] | 6f7d34cd94d75ff46a560ae6dacf5ab4cfa45f85bea086e7e48e420b8f3231a7 |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""The ASTNGBuilder makes astng from living object and / or from _ast
The builder is not thread safe and can't be used to parse different sources
at the same time.
"""
__docformat__ = "restructuredtext en"
import sys
from os.path import splitext, basename, exists, abspath
from logilab.common.modutils import modpath_from_file
from logilab.astng.exceptions import ASTNGBuildingException, InferenceError
from logilab.astng.raw_building import InspectBuilder
from logilab.astng.rebuilder import TreeRebuilder
from logilab.astng.manager import ASTNGManager
from logilab.astng.bases import YES, Instance
from _ast import PyCF_ONLY_AST
def parse(string):
return compile(string, "<string>", 'exec', PyCF_ONLY_AST)
if sys.version_info >= (3, 0):
from tokenize import detect_encoding
def open_source_file(filename):
byte_stream = open(filename, 'bU')
encoding = detect_encoding(byte_stream.readline)[0]
stream = open(filename, 'U', encoding=encoding)
try:
data = stream.read()
except UnicodeError, uex: # wrong encodingg
# detect_encoding returns utf-8 if no encoding specified
msg = 'Wrong (%s) or no encoding specified' % encoding
raise ASTNGBuildingException(msg)
return stream, encoding, data
else:
import re
_ENCODING_RGX = re.compile("\s*#+.*coding[:=]\s*([-\w.]+)")
def _guess_encoding(string):
"""get encoding from a python file as string or return None if not found
"""
# check for UTF-8 byte-order mark
if string.startswith('\xef\xbb\xbf'):
return 'UTF-8'
for line in string.split('\n', 2)[:2]:
# check for encoding declaration
match = _ENCODING_RGX.match(line)
if match is not None:
return match.group(1)
def open_source_file(filename):
"""get data for parsing a file"""
stream = open(filename, 'U')
data = stream.read()
encoding = _guess_encoding(data)
return stream, encoding, data
# ast NG builder ##############################################################
MANAGER = ASTNGManager()
class ASTNGBuilder(InspectBuilder):
"""provide astng building methods"""
rebuilder = TreeRebuilder()
def __init__(self, manager=None):
InspectBuilder.__init__(self)
self._manager = manager or MANAGER
def module_build(self, module, modname=None):
"""build an astng from a living module instance
"""
node = None
path = getattr(module, '__file__', None)
if path is not None:
path_, ext = splitext(module.__file__)
if ext in ('.py', '.pyc', '.pyo') and exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
stream, encoding, data = open_source_file(path)
except IOError, exc:
msg = 'Unable to load file %r (%s)' % (path, exc)
raise ASTNGBuildingException(msg)
except SyntaxError, exc: # py3k encoding specification error
raise ASTNGBuildingException(exc)
except LookupError, exc: # unknown encoding
raise ASTNGBuildingException(exc)
# get module name if necessary
if modname is None:
try:
modname = '.'.join(modpath_from_file(path))
except ImportError:
modname = splitext(basename(path))[0]
# build astng representation
node = self.string_build(data, modname, path)
node.file_encoding = encoding
return node
def string_build(self, data, modname='', path=None):
"""build astng from source code string and return rebuilded astng"""
module = self._data_build(data, modname, path)
self._manager.astng_cache[module.name] = module
# post tree building steps after we stored the module in the cache:
for from_node in module._from_nodes:
self.add_from_names_to_locals(from_node)
# handle delayed assattr nodes
for delayed in module._delayed_assattr:
self.delayed_assattr(delayed)
if modname:
for transformer in self._manager.transformers:
transformer(module)
return module
def _data_build(self, data, modname, path):
"""build tree node from data and add some informations"""
# this method could be wrapped with a pickle/cache function
node = parse(data + '\n')
if path is not None:
node_file = abspath(path)
else:
node_file = '<?>'
if modname.endswith('.__init__'):
modname = modname[:-9]
package = True
else:
package = path and path.find('__init__.py') > -1 or False
self.rebuilder.init()
module = self.rebuilder.visit_module(node, modname, package)
module.file = module.path = node_file
module._from_nodes = self.rebuilder._from_nodes
module._delayed_assattr = self.rebuilder._delayed_assattr
return module
def add_from_names_to_locals(self, node):
"""store imported names to the locals;
resort the locals if coming from a delayed node
"""
_key_func = lambda node: node.fromlineno
def sort_locals(my_list):
my_list.sort(key=_key_func)
for (name, asname) in node.names:
if name == '*':
try:
imported = node.root().import_module(node.modname)
except ASTNGBuildingException:
continue
for name in imported.wildcard_import_names():
node.parent.set_local(name, node)
sort_locals(node.parent.scope().locals[name])
else:
node.parent.set_local(asname or name, node)
sort_locals(node.parent.scope().locals[asname or name])
def delayed_assattr(self, node):
"""visit a AssAttr node -> add name to locals, handle members
definition
"""
try:
frame = node.frame()
for infered in node.expr.infer():
if infered is YES:
continue
try:
if infered.__class__ is Instance:
infered = infered._proxied
iattrs = infered.instance_attrs
elif isinstance(infered, Instance):
# Const, Tuple, ... we may be wrong, may be not, but
# anyway we don't want to pollute builtin's namespace
continue
elif infered.is_function:
iattrs = infered.instance_attrs
else:
iattrs = infered.locals
except AttributeError:
# XXX log error
#import traceback
#traceback.print_exc()
continue
values = iattrs.setdefault(node.attrname, [])
if node in values:
continue
# get assign in __init__ first XXX useful ?
if frame.name == '__init__' and values and not \
values[0].frame().name == '__init__':
values.insert(0, node)
else:
values.append(node)
except InferenceError:
pass
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/logilab/astng/builder.py | Python | agpl-3.0 | 8,738 | [
"VisIt"
] | f38188a483110c019d96ea70328ea7c110bf37c22d9e4e614efda94f4753a192 |
#!/usr/bin/env python3
# Copyright 2015-2018 Ian Leonard <antonlacon@gmail.com>
#
# This file is bot_cfg.py and is part of the Foundational IRC Bot for
# Twitch.tv project.
#
# bot_cfg.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the license.
#
# bot_cfg.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bot_cfg.y. If not, see <http://www.gnu.org/licenses/>.
# Twitch IRC variables
host_server = "irc.twitch.tv"
host_port = 6667
bot_admin = "" # the administrator of the bot
bot_handle = "" # must be lowercase
bot_password = "oauth:" # visit http://twitchapps.com/tmi/ to obtain
channel = "#" # first character is a hashtag
# Game Service Handles - Leave empty to disable corresponding !command
xbox_handle = ""
playstation_handle = ""
steam_handle = ""
# Special Effects
# Sound effects are played by VLC, so this bot should run on the streaming machine to make
# use of this feature (or wait for VLC remote control support, which may never come).
# Path to the VLC executable
vlc_bin = "C:\Program Files\VideoLAN\VLC\vlc.exe"
# Invoking multiple sound effects will make them play concurrently. There is no queue or
# blocking for these commands. Stopping playback involves killing VLC via task manager.
# Sound Effect 1 Settings
# The string to listen to for the first sound effect
sfx1_alias = "!sfx1"
# The file path to the first sound effect
sfx1_path = ""
# Sound Effect 2 Settings
sfx2_alias = "!sfx2"
sfx2_path = ""
# Chat Moderation
# Language strike out settings
# Note that the strike count only lasts until bot restarts, unless a database file is used.
# Bans last until cleared in Twitch channel settings.
# System set up:
# On first offenses, the message is deleted. User is allowed to continue to send messages.
# If user used 50% or more of the allowed chances, the user is given a X-second timeout per below.
# When the strike count is reached, the user is banned from the channel.
#
# If the strikes_until_ban is set to 0, no bans or long timeouts will be issued;
# only messages will be removed.
# Ban user on this strike number
strikes_until_ban = 5
# Length of silence timeout for repeated strikes in seconds (default 600 seconds = 10 minutes)
strikes_timeout_duration = 600
# Suppress messages in all uppercase letters with at least this many characters:
uppercase_message_suppress_length = 20
| antonlacon/foundationalbot | bot_cfg.py | Python | gpl-3.0 | 2,723 | [
"VisIt"
] | 80dc036a28c61c739293fa30bf7d4c1a400aca96b795c56027066a39e04e0de4 |
from .util import APPS_DIR_PATH, EPLUS_DIR_PATTERN
from .outputs import OUTPUT_FILES_LAYOUTS, get_output_files_layout
from .epw import get_simulated_epw_path
from .simulation import SIMULATION_COMMAND_STYLES, SIMULATION_INPUT_COMMAND_STYLES, get_simulation_base_command, \
get_simulation_input_command_style, get_simulation_command_style
| Openergy/oplus | oplus/compatibility/__init__.py | Python | mpl-2.0 | 342 | [
"EPW"
] | efa0c1c748b04965165cfe09dfc21dec308ae1dd9819fcd45361a3c4fd729e5e |
#=============================================================================
# gpx_layer_waypoints.py
# Waypoint map layer
# Copyright 2013--2016, Trinity College
# Last modified: 31 March 2016
#=============================================================================
import gtk
import math
import cairo
from gpx_layer import GpxEditableLayer, GpxTool, points_close
from gpx_data_gpx import GpxWaypoint
import pykarta.draw
# This is a Pykarta map layer for rendering GPX waypoins
class WaypointLayer(GpxEditableLayer):
def __init__(self, gpx_data):
GpxEditableLayer.__init__(self)
# Two way connexion between this layer and data store
self.layer_objs = gpx_data.waypoints
self.layer_objs.add_client('map_layer', self)
# Receives notification of changes in the selection which are made
# by some other client of the data store.
def on_select(self, path, source, client_name):
if path is not None:
wp = self.layer_objs[path[0]]
if source == 'treeview_double_click':
self.containing_map.set_center_and_zoom_in(wp.lat, wp.lon, 14)
else:
self.containing_map.make_visible(wp.lat, wp.lon)
GpxEditableLayer.on_select(self, path, source, client_name)
# Wrap a waypoint up in an object which contains its projected
# coordinates and marker image.
def create_renderer(self, obj, index):
class WaypointRenderer(object):
def __init__(self, obj, index, layer):
self.obj = obj
self.index = index
containing_map = layer.containing_map
self.projected_point = containing_map.project_point(obj)
self.sym = containing_map.symbols.get_symbol(obj.sym, default="Dot").get_renderer(containing_map)
self.label = obj.name if containing_map.get_zoom() > 8 else None
def draw(self, ctx, selected_path):
x, y = self.projected_point
if selected_path is not None and self.index == selected_path[0]:
pykarta.draw.x_marks_the_spot(ctx, x, y, self.sym.x_size)
self.sym.blit(ctx, x, y)
if self.label:
pykarta.draw.poi_label(ctx, x + self.sym.label_offset, y, self.label)
def move(self, x, y):
self.projected_point = (x, y)
def drop(self, containing_map):
obj = self.obj
obj.lat, obj.lon = containing_map.unproject_point(*self.projected_point)
obj.src = "User Placed"
obj.ele = "" # not valid at new location
obj.time = "" # no longer where we were then
return WaypointRenderer(obj, index, self)
# Click once to select a waypoint. Bring the mouse down again
# and move to drag it to a new position.
def create_tool_select_adjust(self):
class WaypointSelector(GpxTool):
def __init__(self, layer):
self.layer = layer
self.dragged_obj = None
self.moved = False
def on_button_press(self, gdkevent):
event_point = (gdkevent.x, gdkevent.y)
for obj in reversed(self.layer.visible_objs):
if points_close(obj.projected_point, event_point):
if self.layer.selected_path is not None and self.layer.selected_path[0] == obj.index:
self.dragged_obj = obj
self.layer.containing_map.set_cursor(gtk.gdk.FLEUR)
else:
self.layer.select((obj.index,))
return True
return False
def on_motion(self, gdkevent):
if self.dragged_obj is not None:
self.dragged_obj.move(gdkevent.x, gdkevent.y)
self.layer.redraw()
self.moved = True
return True
return False
def on_button_release(self, gdkevent):
if self.dragged_obj is not None:
if self.moved:
self.dragged_obj.drop(self.layer.containing_map)
self.layer.layer_objs.touch(self.dragged_obj.index)
self.layer.select(self.layer.selected_path) # so form will be updated
self.layer.containing_map.set_cursor(None)
self.layer.redraw()
self.dragged_obj = None
self.moved = False
return True
return False
return WaypointSelector(self)
# Click to create waypoints.
def create_tool_draw(self):
class WaypointDrawer(GpxTool):
def __init__(self, layer):
self.layer = layer
def on_button_press(self, gdkevent):
lat, lon = self.layer.containing_map.unproject_point(gdkevent.x, gdkevent.y)
waypoint = GpxWaypoint(lat, lon)
waypoint.name = "New Point"
waypoint.src = "User Placed"
waypoint_index = len(self.layer.layer_objs)
self.layer.layer_objs.append(waypoint)
self.layer.set_stale()
self.layer.select((waypoint_index,))
return True
return WaypointDrawer(self)
# Click to delete waypoints.
def create_tool_delete(self):
class WaypointDeleter(GpxTool):
def __init__(self, layer):
self.layer = layer
def on_button_press(self, gdkevent):
event_point = (gdkevent.x, gdkevent.y)
for obj in reversed(self.layer.visible_objs):
if points_close(obj.projected_point, event_point):
del self.layer.layer_objs[obj.index]
#self.layer.set_stale()
return True
return False
return WaypointDeleter(self)
| david672orford/GPX_Trip_Planner | Code/gpx_layer_waypoints.py | Python | gpl-2.0 | 4,877 | [
"FLEUR"
] | e64bab91eaf63b2633de02b048b337d70c8b6a4c5db4535cb8da3e8ea9db4302 |
def ASoP_tests():
import os.path
import sys
import numpy as np
import iris
from ASoP1_spectral import make_hist_maps
from ASoP1_spectral import plot_hist_maps
from ASoP1_spectral import plot_hist1d
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
my_data_path = os.path.join(THIS_DIR, 'reference/test_precipitation_dataset.nc')
# Read in test dataset provided, load data and create histograms
filename=my_data_path
ppn=make_hist_maps.read_data_cube(filename)
histmap=make_hist_maps.make_hist_ppn(ppn)
# Save the histograms in another netCDF file. This should compare exactly with the
# test_precipitation_hist.nc file provided.
my_histmap_file = os.path.join(THIS_DIR, 'mytest_histmap.nc')
iris.save(histmap, my_histmap_file)
# Plot set of maps of contributions to rainfall over the period
# NOTE: Designed to compared two datasets (e.g. on different timescales) side by side.
# Since only one dataset is used here, plots the SAME data side by side.
filename_a=my_histmap_file
filename_b=filename_a
histcube_a=make_hist_maps.read_data_cube(filename_a)
histcube_b=make_hist_maps.read_data_cube(filename_b)
runtitle='Artificial rainfall dataset'
# Calculate actual and fractional contributions to rainfall over period
avg_rain_bins_a,avg_rain_bins_frac_a=make_hist_maps.calc_rain_contr(histcube_a)
avg_rain_bins_b,avg_rain_bins_frac_b=make_hist_maps.calc_rain_contr(histcube_b)
# Check that the fractional contributions sum to 1.0 at all points
test_a=avg_rain_bins_frac_a.collapsed('precipitation_flux', iris.analysis.SUM)
test_b=avg_rain_bins_frac_b.collapsed('precipitation_flux', iris.analysis.SUM)
if round(np.max(test_a.data),12) != 1.0 or round(np.max(test_b.data),12) != 1.0:
raise Exception('One or more fractional histograms does not sum to 1.0')
#------------
# Plotting
#------------
# First plot actual contribution to total rainfall in period. This should compare exactly with the
# test_precipitation_histmap_act.png file provided.
plotname=os.path.join(THIS_DIR, 'mytest_histmap_act.png')
plot_hist_maps.plot_rain_contr(avg_rain_bins_a,avg_rain_bins_b,
plotname,runtitle,'hourly','hourly')
# Now plot fractional contribution to total rainfall in period. This should compare exactly with the
# test_precipitation_histmap_frac.png file provided.
plotname=os.path.join(THIS_DIR, 'mytest_histmap_frac.png')
plot_hist_maps.plot_rain_contr(avg_rain_bins_frac_a,avg_rain_bins_frac_b,
plotname,runtitle,'hourly','hourly',frac=1)
# Plot 1-d histograms of fractional and actual contributions for a small region.
# These should compare exactly with the test_hist1d_frac(act).png files provided.
region=[215.0,-5.0,225.0,5.0]
filenames=[my_histmap_file]
runtitles=['Artificial rainfall dataset']
plottitle='Artificial rainfall dataset for testing'
timesc='hourly'
plotname=os.path.join(THIS_DIR, 'mytest_hist1d_frac.png')
plot_hist1d.plot_1dhist(plotname,region,filenames,runtitles,plottitle,
timesc,frac=1,col_offset=2,log=1)
plotname=os.path.join(THIS_DIR, 'mytest_hist1d_act.png')
plot_hist1d.plot_1dhist(plotname,region,filenames,runtitles,plottitle,
timesc,col_offset=2,log=1)
| gillmmartin/ASoP1-Spectral | ASoP1_Spectral/tests/ASoP1_tests.py | Python | apache-2.0 | 3,509 | [
"NetCDF"
] | ecdb209626813ab98282a157c8bd6970dc5d9363964346efd61da9b560bd40c5 |
#
# Copyright 2019-2020 Johannes Hoermann (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os.path, re, sys
import numpy as np
from glob import glob
from cycler import cycler
from itertools import cycle
from itertools import groupby
import matplotlib.pyplot as plt
# Ensure variable is defined
try:
datadir
except NameError:
try:
datadir = sys.argv[1]
except:
datadir = 'data'
try:
figfile
except NameError:
try:
figfile = sys.argv[2]
except:
figfile = 'fig.png'
try:
param
except NameError:
try:
param = sys.argv[3]
except:
param = 'c'
try:
param_unit
except NameError:
try:
param_label = sys.argv[4]
except:
param_label = 'c (\mathrm{mM})'
try:
glob_pattern
except NameError:
glob_pattern = os.path.join(datadir, 'NaCl*.txt')
def right_align_legend(leg):
hp = leg._legend_box.get_children()[1]
for vp in hp.get_children():
for row in vp.get_children():
row.set_width(100) # need to adapt this manually
row.mode= "expand"
row.align="right"
# sort file names as normal humans expect
# https://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
scientific_number_regex = '([-+]?[\d]+\.?[\d]*(?:[Ee][-+]?[\d]+)?)'
def alpha_num_order(x):
"""Sort the given iterable in the way that humans expect."""
def convert(text):
try:
ret = float(text) # if text.isdigit() else text
except:
ret = text
return ret
return [ convert(c) for c in re.split(scientific_number_regex, x) ]
dat_files = sorted(glob(glob_pattern),key=alpha_num_order)
N = len(dat_files) # number of data sets
M = 2 # number of species
# matplotlib settings
SMALL_SIZE = 8
MEDIUM_SIZE = 12
BIGGER_SIZE = 16
# plt.rc('axes', prop_cycle=default_cycler)
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure titlex
plt.rcParams["figure.figsize"] = (16,10) # the standard figure size
plt.rcParams["lines.linewidth"] = 3
plt.rcParams["lines.markersize"] = 14
plt.rcParams["lines.markeredgewidth"]=1
# line styles
# https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
# linestyle_str = [
# ('solid', 'solid'), # Same as (0, ()) or '-'
# ('dotted', 'dotted'), # Same as (0, (1, 1)) or '.'
# ('dashed', 'dashed'), # Same as '--'
# ('dashdot', 'dashdot')] # Same as '-.'
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
# color maps for potential and concentration plots
cmap_u = plt.get_cmap('Reds')
cmap_c = [plt.get_cmap('Oranges'), plt.get_cmap('Blues')]
# general line style cycler
line_cycler = cycler( linestyle = [ s for _,s in linestyle_tuple ] )
# potential anc concentration cyclers
u_cycler = cycler( color = cmap_u( np.linspace(0.4,0.8,N) ) )
u_cycler = len(line_cycler)*u_cycler + len(u_cycler)*line_cycler
c_cyclers = [ cycler( color = cmap( np.linspace(0.4,0.8,N) ) ) for cmap in cmap_c ]
c_cyclers = [ len(line_cycler)*c_cycler + len(c_cycler)*line_cycler for c_cycler in c_cyclers ]
# https://matplotlib.org/3.1.1/tutorials/intermediate/constrainedlayout_guide.html
fig, (ax1,ax2,ax3) = plt.subplots(
nrows=1, ncols=3, figsize=[24,7], constrained_layout=True)
ax1.set_xlabel('z (nm)')
ax1.set_ylabel('potential (V)')
ax2.set_xlabel('z (nm)')
ax2.set_ylabel('concentration (mM)')
ax3.set_xlabel('z (nm)')
ax3.set_ylabel('concentration (mM)')
# ax1.axvline(x=pnp.lambda_D()*1e9, label='Debye Length', color='grey', linestyle=':')
species_label = [
'$[\mathrm{Na}^+], ' + param_label + '$',
'$[\mathrm{Cl}^-], ' + param_label + '$']
c_regex = re.compile(r'{}_{}'.format(param,scientific_number_regex))
c_graph_handles = [ [] for _ in range(M) ]
for f, u_style, c_styles in zip(dat_files,u_cycler,zip(*c_cyclers)):
print("Processing {:s}".format(f))
# extract nominal concentration from file name
nominal_c = float( c_regex.search(f).group(1) )
dat = np.loadtxt(f,unpack=True)
x = dat[0,:]
u = dat[1,:]
c = dat[2:,:]
c_label = '{:> 4.2g}'.format(nominal_c)
# potential
ax1.plot(x*1e9, u, marker=None, label=c_label, linewidth=1, **u_style)
for i in range(c.shape[0]):
# concentration
ax2.plot(x*1e9, c[i], marker='',
label=c_label, linewidth=2, **c_styles[i])
# semilog concentration
c_graph_handles[i].extend( ax3.semilogy(x*1e9, c[i], marker='',
label=c_label, linewidth=2, **c_styles[i]) )
# legend placement
# https://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
u_legend = ax1.legend(loc='center right', title='potential, ${}$'.format(param_label), bbox_to_anchor=(-0.2,0.5) )
first_c_legend = ax3.legend(handles=c_graph_handles[0], title=species_label[0], loc='upper left', bbox_to_anchor=(1.00, 1.02) )
second_c_legend = ax3.legend(handles=c_graph_handles[1], title=species_label[1], loc='lower left', bbox_to_anchor=(1.00,-0.02) )
ax3.add_artist(first_c_legend) # add automatically removed first legend again
c_legends = [ first_c_legend, second_c_legend ]
legends = [ u_legend, *c_legends ]
for l in legends:
right_align_legend(l)
# https://matplotlib.org/3.1.1/tutorials/intermediate/constrainedlayout_guide.html
for l in legends:
l.set_in_layout(False)
# trigger a draw so that constrained_layout is executed once
# before we turn it off when printing....
fig.canvas.draw()
# we want the legend included in the bbox_inches='tight' calcs.
for l in legends:
l.set_in_layout(True)
# we don't want the layout to change at this point.
fig.set_constrained_layout(False)
# fig.tight_layout(pad=3.0, w_pad=2.0, h_pad=1.0)
# plt.show()
fig.savefig(figfile, bbox_inches='tight', dpi=100)
| libAtoms/matscipy | examples/electrochemistry/pnp_batch/cell_1d/stern_layer_sweep/pnp_plot.py | Python | lgpl-2.1 | 7,482 | [
"Matscipy"
] | 0e44979414523e171eaca3dc5577ac38dad57dfd94ba4482b0d260a8c40914c4 |
#!/usr/bin/env python
from __future__ import division
import os
import re
import string
import argparse
from collections import defaultdict
import pysam
import pandas as pd
from pyfasta import Fasta
def alleleCount(baseList,refNuc):
pos = defaultdict(int)
neg = defaultdict(int)
if refNuc in ['A', 'T']:
for (base, isReverse) in baseList:
if isReverse: # negative strand
neg[base] += 1
else: # positive strand
pos[base] += 1
elif refNuc == 'C': # only negative strand
for (base, isReverse) in baseList:
if isReverse:
neg[base] += 1
elif refNuc == 'G': # only positive strand
for (base, isReverse) in baseList:
if not isReverse:
pos[base] += 1
aCount = pos['A'] + neg['A']
tCount = pos['T'] + neg['T']
cCount = pos['C'] + neg['C']
gCount = pos['G'] + neg['G']
total = aCount + tCount + cCount + gCount
posCov = sum([pos[base] for base in ['A', 'T', 'C', 'G']])
negCov = sum([neg[base] for base in ['A', 'T', 'C', 'G']])
return aCount, tCount, cCount, gCount, total, posCov, negCov
def snpDetermine(aCount, tCount, cCount, gCount, total, refNuc, majorAlleleFreq, buffer):
freqA = aCount/total
freqT = tCount/total
freqC = cCount/total
freqG = gCount/total
#freqCounter = Counter(A=freqA, T=freqT, C=freqC, G=freqG)
freqList = sorted([(freqA, 'A'), (freqT, 'T'), (freqG, 'G'), (freqC, 'C')], reverse=True)
freqDict = {'A':freqA, 'T':freqT, 'C':freqC, 'G':freqG}
primaryAllele = ''
secondaryAllele = ''
#(primary, secondary) = freqCounter.most_common(2)
primary, secondary = freqList[:2]
#homozygous case
if primary[0] >= majorAlleleFreq and primary[1] != refNuc:
#print max(freqList)
primaryAllele = primary[1]
secondaryAllele = 'NA'
snpType = 'homo'
#heterozygous case
elif (0.5-buffer <= primary[0] <= 0.5+buffer) and (0.5-buffer <= secondary[0] <= 0.5+buffer):
snpType = 'het'
primaryAllele = primary[1]
secondaryAllele = secondary[1]
else:
snpType = None
return (freqList,freqDict,primaryAllele,secondaryAllele,snpType)
def snpOutput(bamfile, genomeFile, coverage=5,majorAlleleFreq=0.9,buffer=0.1):
bam = pysam.Samfile(bamfile, 'rb')
root = os.path.splitext(os.path.basename(bamfile))[0]
homFile = open('{}.homozygous.txt'.format(root), 'w')
hetFile = open('{}.heterozygous.txt'.format(root), 'w')
openFile = Fasta(genomeFile)
for col in bam.pileup():
chr = bam.getrname(col.tid)
pos = col.pos
cov = col.n
refNuc = openFile[chr][pos]
baseList = []
for pileupRead in col.pileups:
if not pileupRead.is_del:
isReverse = pileupRead.alignment.is_reverse
base = pileupRead.alignment.seq[pileupRead.qpos]
baseList += [(base, isReverse)]
(aCount,tCount,cCount,gCount,total,posCov,negCov) = alleleCount(baseList,refNuc)
if total >= coverage:
(freqList,freqDict,allele1,allele2,snpType) = snpDetermine(aCount,tCount,cCount,gCount,total,refNuc,majorAlleleFreq,buffer)
eachLine = (('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n')%(chr,refNuc,pos,allele1,allele2,snpType,total,posCov,negCov,('\t'.join(map(str,(freqDict[base] for base in ['A','T','C','G']))))))
if snpType == 'het':
hetFile.write(eachLine)
elif snpType == 'homo':
homFile.write(eachLine)
homFile.close()
hetFile.close()
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('bamfile', help='input BAM file')
parser.add_argument('-c', '--coverage', type=int, default=5, help='coverage or minimum number of reads desired')
parser.add_argument('-m', '--majorAlleleFreq',type=float, default=0.9, help='frequency to be considered homozygous allele')
parser.add_argument('-b', '--buffer',type=float,default=0.1, help='buffer on either side of 0.5 to be considered heterozygous allele')
parser.add_argument('-g', '--genomeFile', help='input FASTA file')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
snpOutput(args.bamfile,args.genomeFile,args.coverage,args.majorAlleleFreq,args.buffer)
if __name__ == '__main__':
main()
| paoyangchen-laboratory/methgo | scripts/snp/snp.py | Python | mit | 4,408 | [
"pysam"
] | 2c7321680f13c26c34c3e9c1ebed09fc86ca1fb411c2fbf0074522349b166983 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
import datetime
from kiwi.currency import currency
from stoqlib.database.runtime import get_current_branch
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.purchase import PurchaseOrder
from stoqlib.gui.slaves.paymentconfirmslave import (PurchasePaymentConfirmSlave,
SalePaymentConfirmSlave)
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.lib.dateutils import localtoday
class TestPurchasePaymentConfirmSlave(GUITest):
def test_create(self):
# We are creating a cost center, but it should not appear in the slave,
# since payment is not a lonely payment.
self.create_cost_center()
self.create_cost_center(is_active=False)
payment = self.create_payment()
payment.identifier = 12345
payment.method = self.get_payment_method(u'money')
payment.description = u'payment description'
order = self.create_purchase_order()
self.create_purchase_order_item(order)
order.identifier = 68395
order.status = PurchaseOrder.ORDER_PENDING
order.confirm()
payment.group = order.group
slave = PurchasePaymentConfirmSlave(self.store, [payment])
self.assertSensitive(slave, ['source_account', 'destination_account'])
self.check_editor(slave, 'editor-purchase-payment-confirm-create')
class TestSalePaymentConfirmSlave(GUITest):
def test_create(self):
# We are creating a cost center, but it should not appear in the slave,
# since payment is not a lonely payment.
self.create_cost_center()
self.create_cost_center(is_active=False)
payment = self.create_payment()
payment.identifier = 12345
payment.method = self.get_payment_method(u'money')
payment.description = u'payment description'
sale = self.create_sale()
sale.identifier = 47384
sale_item = self.create_sale_item(sale=sale)
self.create_storable(sale_item.sellable.product,
get_current_branch(self.store), 10)
payment.group = sale.group
sale.order()
slave = SalePaymentConfirmSlave(self.store, [payment])
self.check_editor(slave, 'editor-sale-payment-confirm-create')
def test_penalty_and_interest(self):
sale = self.create_sale()
sale_item = self.create_sale_item(sale=sale)
self.create_storable(sale_item.sellable.product,
get_current_branch(self.store), 10)
payment = self.create_payment(payment_type=Payment.TYPE_OUT, value=100,
date=localtoday().date() - datetime.timedelta(5))
sale.group = payment.group
sale.order()
payment.method.daily_interest = 1
payment.method.penalty = 1
slave = PurchasePaymentConfirmSlave(self.store, [payment])
# Penalty and interest enabled
self.assertEquals(slave.penalty.read(), currency('1'))
self.assertEquals(slave.interest.read(), currency('5.05'))
# Penalty disabled and interest enabled
self.click(slave.pay_penalty)
self.assertEquals(slave.penalty.read(), currency('0'))
self.assertEquals(slave.interest.read(), currency('5'))
# Penalty enabled and interest disabled
self.click(slave.pay_penalty)
self.click(slave.pay_interest)
self.assertEquals(slave.penalty.read(), currency('1'))
self.assertEquals(slave.interest.read(), currency('0'))
# Penalty and interest disabled
self.click(slave.pay_penalty)
self.assertEquals(slave.penalty.read(), currency('0'))
self.assertEquals(slave.interest.read(), currency('0'))
class TestLonelyPaymentConfirmSlave(GUITest):
def test_create(self):
# We are creating a cost center, and it should appear in the slave,
# since payment is a lonely payment.
self.create_cost_center()
self.create_cost_center(is_active=False)
payment = self.create_payment()
payment.identifier = 28567
payment.method = self.get_payment_method(u'money')
payment.description = u'payment description'
slave = PurchasePaymentConfirmSlave(self.store, [payment])
self.check_editor(slave, 'editor-lonely-payment-confirm-create')
def test_penalty_and_interest(self):
payment = self.create_payment(payment_type=Payment.TYPE_OUT, value=100,
date=localtoday().date() - datetime.timedelta(5))
payment.method.daily_interest = 1
payment.method.penalty = 1
slave = PurchasePaymentConfirmSlave(self.store, [payment])
# Penalty and interest enabled
self.assertEquals(slave.penalty.read(), currency('1'))
self.assertEquals(slave.interest.read(), currency('5.05'))
# Penalty disabled and interest enabled
self.click(slave.pay_penalty)
self.assertEquals(slave.penalty.read(), currency('0'))
self.assertEquals(slave.interest.read(), currency('5'))
# Penalty enabled and interest disabled
self.click(slave.pay_penalty)
self.click(slave.pay_interest)
self.assertEquals(slave.penalty.read(), currency('1'))
self.assertEquals(slave.interest.read(), currency('0'))
# Penalty and interest disabled
self.click(slave.pay_penalty)
self.assertEquals(slave.penalty.read(), currency('0'))
self.assertEquals(slave.interest.read(), currency('0'))
| andrebellafronte/stoq | stoqlib/gui/test/test_payment_confirm_slave.py | Python | gpl-2.0 | 6,498 | [
"VisIt"
] | f9b9c06a0f42ba9b3ece2d24dc517690bdebb46cb5cc9e3423e4c8b1cddf301e |
from __future__ import print_function, division
import copy
from collections import defaultdict
from sympy.core.containers import Dict
from sympy.core.expr import Expr
from sympy.core.compatibility import is_sequence, as_int, range
from sympy.core.logic import fuzzy_and
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.utilities.iterables import uniq
from .matrices import MatrixBase, ShapeError, a2idx
from .dense import Matrix
import collections
class SparseMatrix(MatrixBase):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
See Also
========
sympy.matrices.dense.Matrix
"""
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], SparseMatrix):
self.rows = args[0].rows
self.cols = args[0].cols
self._smat = dict(args[0]._smat)
return
self._smat = {}
if len(args) == 3:
self.rows = as_int(args[0])
self.cols = as_int(args[1])
if isinstance(args[2], collections.Callable):
op = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(
op(self._sympify(i), self._sympify(j)))
if value:
self._smat[(i, j)] = value
elif isinstance(args[2], (dict, Dict)):
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
v = args[2][key]
if v:
self._smat[key] = self._sympify(v)
elif is_sequence(args[2]):
if len(args[2]) != self.rows*self.cols:
raise ValueError(
'List length (%s) != rows*columns (%s)' %
(len(args[2]), self.rows*self.cols))
flat_list = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(flat_list[i*self.cols + j])
if value:
self._smat[(i, j)] = value
else:
# handle full matrix forms with _handle_creation_inputs
r, c, _list = Matrix._handle_creation_inputs(*args)
self.rows = r
self.cols = c
for i in range(self.rows):
for j in range(self.cols):
value = _list[self.cols*i + j]
if value:
self._smat[(i, j)] = value
def __add__(self, other):
"""Add other to self, efficiently if possible.
When adding a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> A = SparseMatrix(eye(3)) + SparseMatrix(eye(3))
>>> B = SparseMatrix(eye(3)) + eye(3)
>>> A
Matrix([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]])
>>> A == B
True
>>> isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix)
False
"""
if isinstance(other, SparseMatrix):
return self.add(other)
elif isinstance(other, MatrixBase):
return other._new(other + self)
else:
raise NotImplementedError(
"Cannot add %s to %s" %
tuple([c.__class__.__name__ for c in (other, self)]))
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, SparseMatrix):
return self._smat == other._smat
elif isinstance(other, MatrixBase):
return self._smat == MutableSparseMatrix(other)._smat
except AttributeError:
return False
def __getitem__(self, key):
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._smat.get((i, j), S.Zero)
except (TypeError, IndexError):
if isinstance(i, slice):
# XXX remove list() when PY2 support is dropped
i = list(range(self.rows))[i]
elif is_sequence(i):
pass
elif isinstance(i, Expr) and not i.is_number:
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
else:
if i >= self.rows:
raise IndexError('Row index out of bounds')
i = [i]
if isinstance(j, slice):
# XXX remove list() when PY2 support is dropped
j = list(range(self.cols))[j]
elif is_sequence(j):
pass
elif isinstance(j, Expr) and not j.is_number:
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
else:
if j >= self.cols:
raise IndexError('Col index out of bounds')
j = [j]
return self.extract(i, j)
# check for single arg, like M[:] or M[3]
if isinstance(key, slice):
lo, hi = key.indices(len(self))[:2]
L = []
for i in range(lo, hi):
m, n = divmod(i, self.cols)
L.append(self._smat.get((m, n), S.Zero))
return L
i, j = divmod(a2idx(key, len(self)), self.cols)
return self._smat.get((i, j), S.Zero)
def __mul__(self, other):
"""Multiply self and other, watching for non-matrix entities.
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, zeros
>>> I = SparseMatrix(eye(3))
>>> I*I == I
True
>>> Z = zeros(3)
>>> I*Z
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> I*2 == 2*I
True
"""
if isinstance(other, SparseMatrix):
return self.multiply(other)
if isinstance(other, MatrixBase):
return other._new(self*self._new(other))
return self.scalar_multiply(other)
def __ne__(self, other):
return not self == other
def __neg__(self):
"""Negate all elements of self.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> -SparseMatrix(eye(3))
Matrix([
[-1, 0, 0],
[ 0, -1, 0],
[ 0, 0, -1]])
"""
rv = self.copy()
for k, v in rv._smat.items():
rv._smat[k] = -v
return rv
def __rmul__(self, other):
"""Return product the same type as other (if a Matrix).
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import Matrix, SparseMatrix
>>> A = Matrix(2, 2, range(1, 5))
>>> S = SparseMatrix(2, 2, range(2, 6))
>>> A*S == S*A
False
>>> (isinstance(A*S, SparseMatrix) ==
... isinstance(S*A, SparseMatrix) == False)
True
"""
if isinstance(other, MatrixBase):
return other*other._new(self)
return self.scalar_multiply(other)
def __setitem__(self, key, value):
raise NotImplementedError()
def _cholesky_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L = self._cholesky_sparse()
Y = L._lower_triangular_solve(rhs)
rv = L.T._upper_triangular_solve(Y)
return rv
def _cholesky_sparse(self):
"""Algorithm for numeric Cholesky factorization of a sparse matrix."""
Crowstruc = self.row_structure_symbolic_cholesky()
C = self.zeros(self.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = self[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] -= summ
C[i, j] /= C[j, j]
else:
C[j, j] = self[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
summ += C[j, k]**2
else:
break
C[j, j] -= summ
C[j, j] = sqrt(C[j, j])
return C
def _diagonal_solve(self, rhs):
"Diagonal solve."
return self._new(self.rows, 1, lambda i, j: rhs[i, 0] / self[i, i])
def _eval_conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
conj = self.copy()
for key, value in self._smat.items():
conj._smat[key] = value.conjugate()
return conj
def as_real_imag(self):
"""Returns tuple containing (real , imaginary) part of sparse matrix"""
from sympy.functions.elementary.complexes import re, im
real_smat = self.copy()
im_smat = self.copy()
for key, value in self._smat.items():
real_smat._smat[key] = re(value)
im_smat._smat[key] = im(value)
return (real_smat, im_smat)
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using Cholesky or LDL (default)
decomposition as selected with the ``method`` keyword: 'CH' or 'LDL',
respectively.
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix([
... [ 2, -1, 0],
... [-1, 2, -1],
... [ 0, 0, 2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
sym = self.is_symmetric()
M = self.as_mutable()
I = M.eye(M.rows)
if not sym:
t = M.T
r1 = M[0, :]
M = t*M
I = t*I
method = kwargs.get('method', 'LDL')
if method in "LDL":
solve = M._LDL_solve
elif method == "CH":
solve = M._cholesky_solve
else:
raise NotImplementedError(
'Method may be "CH" or "LDL", not %s.' % method)
rv = M.hstack(*[solve(I[:, i]) for i in range(I.cols)])
if not sym:
scale = (r1*rv[:, 0])[0, 0]
rv /= scale
return self._new(rv)
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = S.Zero
for i in range(self.cols):
trace += self._smat.get((i, i), 0)
return trace
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
tran = self.zeros(self.cols, self.rows)
for key, value in self._smat.items():
key = key[1], key[0] # reverse
tran._smat[key] = value
return tran
def _LDL_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L, D = self._LDL_sparse()
Z = L._lower_triangular_solve(rhs)
Y = D._diagonal_solve(Z)
return L.T._upper_triangular_solve(Y)
def _LDL_sparse(self):
"""Algorithm for numeric LDL factization, exploiting sparse structure.
"""
Lrowstruc = self.row_structure_symbolic_cholesky()
L = self.eye(self.rows)
D = self.zeros(self.rows, self.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = self[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] -= summ
L[i, j] /= D[j, j]
elif i == j:
D[i, i] = self[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] -= summ
return L, D
def _lower_triangular_solve(self, rhs):
"""Fast algorithm for solving a lower-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows):
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Fast algorithm for solving an upper-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows - 1, -1, -1):
rows[i].reverse()
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def add(self, other):
"""Add two sparse matrices with dictionary representation.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, ones
>>> SparseMatrix(eye(3)).add(SparseMatrix(ones(3)))
Matrix([
[2, 1, 1],
[1, 2, 1],
[1, 1, 2]])
>>> SparseMatrix(eye(3)).add(-SparseMatrix(eye(3)))
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
Only the non-zero elements are stored, so the resulting dictionary
that is used to represent the sparse matrix is empty:
>>> _._smat
{}
See Also
========
multiply
"""
if not isinstance(other, SparseMatrix):
raise ValueError('only use add with %s, not %s' %
tuple([c.__class__.__name__ for c in (self, other)]))
if self.shape != other.shape:
raise ShapeError()
M = self.copy()
for i, v in other._smat.items():
v = M[i] + v
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def cholesky(self):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('Cholesky decomposition applies only to '
'symmetric matrices.')
M = self.as_mutable()._cholesky_sparse()
if M.has(nan) or M.has(oo):
raise ValueError('Cholesky decomposition applies only to '
'positive-definite matrices')
return self._new(M)
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
col_op
row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))]
def col(self, j):
"""Returns column j from self as a column vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.col(0)
Matrix([
[1],
[3]])
See Also
========
row
col_list
"""
return self[:, j]
def copy(self):
return self._new(self.rows, self.cols, self._smat)
def extract(self, rowsList, colsList):
urow = list(uniq(rowsList))
ucol = list(uniq(colsList))
smat = {}
if len(urow)*len(ucol) < len(self._smat):
# there are fewer elements requested than there are elements in the matrix
for i, r in enumerate(urow):
for j, c in enumerate(ucol):
smat[i, j] = self._smat.get((r, c), 0)
else:
# most of the request will be zeros so check all of self's entries,
# keeping only the ones that are desired
for rk, ck in self._smat:
if rk in urow and ck in ucol:
smat[(urow.index(rk), ucol.index(ck))] = self._smat[(rk, ck)]
rv = self._new(len(urow), len(ucol), smat)
# rv is nominally correct but there might be rows/cols
# which require duplication
if len(rowsList) != len(urow):
for i, r in enumerate(rowsList):
i_previous = rowsList.index(r)
if i_previous != i:
rv = rv.row_insert(i, rv.row(i_previous))
if len(colsList) != len(ucol):
for i, c in enumerate(colsList):
i_previous = colsList.index(c)
if i_previous != i:
rv = rv.col_insert(i, rv.col(i_previous))
return rv
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
return cls(n, n, {(i, i): S.One for i in range(n)})
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(self[key].has(*patterns) for key in self._smat)
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = SparseMatrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
d = self._smat
yield self.is_square
if len(d) <= self.rows:
yield fuzzy_and(
d[i, i].is_real for i, j in d if i == j)
else:
yield fuzzy_and(
d[i, i].is_real for i in range(self.rows) if (i, i) in d)
yield fuzzy_and(
((self[i, j] - self[j, i].conjugate()).is_zero
if (j, i) in d else False) for (i, j) in d)
return fuzzy_and(i for i in cond())
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self._smat) == self.rows
def is_symmetric(self, simplify=True):
"""Return True if self is symmetric.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> M = SparseMatrix(eye(3))
>>> M.is_symmetric()
True
>>> M[0, 2] = 1
>>> M.is_symmetric()
False
"""
if simplify:
return all((k[1], k[0]) in self._smat and
not (self[k] - self[(k[1], k[0])]).simplify()
for k in self._smat)
else:
return all((k[1], k[0]) in self._smat and
self[k] == self[(k[1], k[0])] for k in self._smat)
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('LDL decomposition applies only to '
'symmetric matrices.')
L, D = self.as_mutable()._LDL_sparse()
if L.has(nan) or L.has(oo) or D.has(nan) or D.has(oo):
raise ValueError('LDL decomposition applies only to '
'positive-definite matrices')
return self._new(L), self._new(D)
def liupc(self):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(self.rows)]
for r, c, _ in self.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*self.rows
virtual = [inf]*self.rows
for r in range(self.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def multiply(self, other):
"""Fast multiplication exploiting the sparsity of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> A, B = SparseMatrix(ones(4, 3)), SparseMatrix(ones(3, 4))
>>> A.multiply(B) == 3*ones(4)
True
See Also
========
add
"""
A = self
B = other
# sort B's row_list into list of rows
Blist = [[] for i in range(B.rows)]
for i, j, v in B.row_list():
Blist[i].append((j, v))
Cdict = defaultdict(int)
for k, j, Akj in A.row_list():
for n, Bjn in Blist[j]:
temp = Akj*Bjn
Cdict[k, n] += temp
rv = self.zeros(A.rows, B.cols)
rv._smat = {k: v for k, v in Cdict.items() if v}
return rv
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
def reshape(self, rows, cols):
"""Reshape matrix while retaining original size.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix(4, 2, range(8))
>>> S.reshape(2, 4)
Matrix([
[0, 1, 2, 3],
[4, 5, 6, 7]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
smat = {}
for k, v in self._smat.items():
i, j = k
n = i*self.cols + j
ii, jj = divmod(n, cols)
smat[(ii, jj)] = self._smat[(i, j)]
return self._new(rows, cols, smat)
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
row_op
col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(list(self._smat.keys()), key=lambda k: list(k))]
def row_structure_symbolic_cholesky(self):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = self.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(self.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def row(self, i):
"""Returns column i from self as a row vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.row(0)
Matrix([[1, 2]])
See Also
========
col
row_list
"""
return self[i,:]
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
v = scalar*self._smat[i]
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def tolist(self):
"""Convert this sparse matrix into a list of nested Python lists.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.tolist()
[[1, 2], [3, 4]]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> SparseMatrix(ones(0, 3)).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
I, J = self.shape
return [[self[i, j] for j in range(J)] for i in range(I)]
RL = property(row_list, None, None, "Alternate faster representation")
CL = property(col_list, None, None, "Alternate faster representation")
__matmul__ = __mul__
__rmatmul__ = __rmul__
extract.__doc__ = MatrixBase.extract.__doc__
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls(r, c, {})
class MutableSparseMatrix(SparseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1; M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2; M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]; M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]; M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[(i, j)] = value
elif (i, j) in self._smat:
del self._smat[(i, j)]
def as_mutable(self):
return self.copy()
__hash__ = None
def col_del(self, k):
"""Delete the given column of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.col_del(0)
>>> M
Matrix([
[0],
[1]])
See Also
========
row_del
"""
newD = {}
k = a2idx(k, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
def col_join(self, other):
"""Returns B augmented beneath A (row-wise joining)::
[A]
[B]
Examples
========
>>> from sympy import SparseMatrix, Matrix, ones
>>> A = SparseMatrix(ones(3))
>>> A
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> B = SparseMatrix.eye(3)
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.col_join(B); C
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C == A.col_join(Matrix(B))
True
Joining along columns is the same as appending rows at the end
of the matrix:
>>> C == A.row_insert(A.rows, Matrix(B))
True
"""
if not self:
return type(self)(other)
A, B = self, other
if not A.cols == B.cols:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i + A.rows, j)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i + A.rows, j] = v
A.rows += B.rows
return A
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i) for i in range(self.rows).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[1, 0] = -1
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[ 2, 4, 0],
[-1, 0, 0],
[ 0, 0, 2]])
"""
for i in range(self.rows):
v = self._smat.get((i, j), S.Zero)
fv = f(v, i)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def col_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.col_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
"""
if i > j:
i, j = j, i
rows = self.col_list()
temp = []
for ii, jj, v in rows:
if jj == i:
self._smat.pop((ii, jj))
temp.append((ii, v))
elif jj == j:
self._smat.pop((ii, jj))
self._smat[ii, i] = v
elif jj > j:
break
for k, v in temp:
self._smat[k, j] = v
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be of type list or tuple.")
self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
# include this here because it's not part of BaseMatrix
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(
"The Matrix `value` doesn't have the same dimensions "
"as the in sub-Matrix given by `key`.")
if not isinstance(value, SparseMatrix):
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
else:
if (rhi - rlo)*(chi - clo) < len(self):
for i in range(rlo, rhi):
for j in range(clo, chi):
self._smat.pop((i, j), None)
else:
for i, j, v in self.row_list():
if rlo <= i < rhi and clo <= j < chi:
self._smat.pop((i, j), None)
for k, v in value._smat.items():
i, j = k
self[i + rlo, j + clo] = value[i, j]
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.zeros(3); M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1); M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not value:
self._smat = {}
else:
v = self._sympify(value)
self._smat = dict([((i, j), v)
for i in range(self.rows) for j in range(self.cols)])
def row_del(self, k):
"""Delete the given row of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.row_del(0)
>>> M
Matrix([[0, 1]])
See Also
========
col_del
"""
newD = {}
k = a2idx(k, self.rows)
for (i, j) in self._smat:
if i == k:
pass
elif i > k:
newD[i - 1, j] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.rows -= 1
def row_join(self, other):
"""Returns B appended after A (column-wise augmenting)::
[A B]
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
>>> A
Matrix([
[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
>>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.row_join(B); C
Matrix([
[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
>>> C == A.row_join(Matrix(B))
True
Joining at row ends is the same as appending columns at the end
of the matrix:
>>> C == A.col_insert(A.cols, B)
True
"""
if not self:
return type(self)(other)
A, B = self, other
if not A.rows == B.rows:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i, j + A.cols)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[(i, j + A.cols)] = v
A.cols += B.cols
return A
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
zip_row_op
col_op
"""
for j in range(self.cols):
v = self._smat.get((i, j), S.Zero)
fv = f(v, j)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def row_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.row_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
"""
if i > j:
i, j = j, i
rows = self.row_list()
temp = []
for ii, jj, v in rows:
if ii == i:
self._smat.pop((ii, jj))
temp.append((jj, v))
elif ii == j:
self._smat.pop((ii, jj))
self._smat[i, jj] = v
elif ii > j:
break
for k, v in temp:
self._smat[j, k] = v
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
row_op
col_op
"""
self.row_op(i, lambda v, j: f(v, self[k, j]))
| madan96/sympy | sympy/matrices/sparse.py | Python | bsd-3-clause | 46,101 | [
"DIRAC"
] | e6afed86af30d12836b2b3f01be487905212f95c5b12dbb46e8f22f5ad04c819 |
# ammoTrackingMultiplier
#
# Used by:
# Charges from group: Advanced Artillery Ammo (8 of 8)
# Charges from group: Advanced Autocannon Ammo (8 of 8)
# Charges from group: Advanced Beam Laser Crystal (8 of 8)
# Charges from group: Advanced Blaster Charge (8 of 8)
# Charges from group: Advanced Pulse Laser Crystal (8 of 8)
# Charges from group: Advanced Railgun Charge (8 of 8)
# Charges from group: Projectile Ammo (129 of 129)
type = "passive"
def handler(fit, module, context):
module.multiplyItemAttr("trackingSpeed", module.getModifiedChargeAttr("trackingSpeedMultiplier"))
| Ebag333/Pyfa | eos/effects/ammotrackingmultiplier.py | Python | gpl-3.0 | 585 | [
"CRYSTAL"
] | f06bb8a6333093e9da714c9fbe80186a38983c740517b8f098029e8236a3f861 |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
from matplotlib.backends.backend_pdf import PdfPages
from scipy.signal._peak_finding import argrelextrema
import sklearn
import sklearn.metrics
from sklearn.metrics.ranking import roc_curve, auc
import sklearn.preprocessing
from scipy import interp
import Levenshtein
from matplotlib.colors import LinearSegmentedColormap
#========================================================================
# Postprocess prediction signal to create segments assigned to highest activity neuron.
# Segments are generated while the sum of all output neurons, that do not
# predict zero, is above the treshold.
# All timesteps within one segment are labeled as belonging to the neuron
# that had the highest overall activity during the segment.
# prediction_orig: raw classifier output
# target_orig: not requiered
# treshold: minimum overall acitivity to activate classifier
# gestureMinLength: minimum gesture length to activate classifier
#
#
# returns: binary matrix of same shape is prediction_orig but containing
# the segmented information.
#========================================================================
def calcMaxActivityPrediction(prediction_orig, target_orig, treshold, gestureMinLength=1):
prediction = np.copy(prediction_orig)
i = 0
start = 0
end = 0
if not type(treshold) is np.ndarray:
treshold = np.ones((prediction_orig.shape[0],1))*treshold
while i < prediction.shape[0]: #iterate over all timesteps
j = i
posSum = np.sum(prediction[j,:][prediction[j,:]>0]) #sum up all neuron output above zero
while j < prediction.shape[0] and posSum > treshold[j]: # while the sum of all output neurons is above the treshold shift j
posSum = np.sum(prediction[j,:][prediction[j,:]>0])
j +=1 #after this loop, j is the end of the found segment
if j - i > gestureMinLength: #if the segments (from i to j) is longer than the minumum number of timesteps
start = i #i is segment start
end = j #j is segment end
sums = np.sum(prediction[start:end,:],0) #sum up all output neuron activities
predicted_class = np.argmax(sums)
prediction[start:end+1,:]= 0 # set all timesteps within segment to zero
prediction[start:end,predicted_class]= 1 # assign all timesteps to the output neuron with highest activity
else:
prediction[i:j+1,:]= 0 #if the segement is too short, remove all predictions
i = j + 1
return prediction
#===============================================================================
# This is the actual mapping algorithm.
# It reduces prediction and target to lists of segments, then tries to find an
# optimal mapping following the rules described in the thesis.
# prediction: binary prediction matrix
# target: binary ground truth target matrix
# threshold: for no gesture signal
# plot: indicates wether plots of mapping shall be generated
#
#
# returns two lists: predicted label and true label. Segments are NOT ordered anymore.
#===============================================================================
def calcInputSegmentSeries(prediction, target, treshold, plot=False):
prediction = addTresholdSignal(prediction, treshold) #add threshold to prediction represent no gesture.
target = addNoGestureSignal(target) #add no gesture to target as well
predictionInt = np.argmax(prediction, 1) #convert binary prediction matrix to list of intergers
targetInt = np.argmax(target,1) #convert binary target matrix to list of integers
inds = [0] #search for beginning and end of segments (everytime the values of predictionInt changes)
for i in range(1,len(predictionInt)):
if predictionInt[i-1] != predictionInt[i]:
inds.append(i)
inds.append(len(prediction)-1)
if plot :
plt.figure()
cmap = mpl.cm.gist_earth
for i in range(prediction.shape[1]):
plt.plot(prediction[:,i],c=cmap(float(i)/(prediction.shape[1])))
lastI = 0
for i in inds:
#plt.plot([i,i],[-2,2], c='black')
x = np.arange(lastI,i+1)
y1 = 0
y2 = prediction[x,predictionInt[lastI]]
#print predictionInt[i], prediction.shape[1], float(predictionInt[i]) / prediction.shape[1]
plt.fill_between(x, y1, y2, facecolor=cmap(float(predictionInt[lastI])/(prediction.shape[1])), alpha=0.5)
lastI = i
plt.plot(target)
#create a binary array indicating which datasteps have been mapped
mapped = np.zeros(targetInt.shape)
#===============================================================================
# Those two arrays will later contain the predicted and true label of each segment
#===============================================================================
segmentPredicted = []
segmentTarget = []
# first iterate over the whole array and map as many true positives as positive
# double detections will also be mapped in this iteration
for i in range(1,len(inds)):
start = inds[i-1]
end = inds[i]
targetSegment = targetInt[start:end]
predictedClass = predictionInt[start]
if predictedClass != prediction.shape[1]-1: #wenn es sich nicht um ein no gesture signal handelt
#check for tp case
tpInds = np.add(np.where(targetSegment==predictedClass),start)
#print tpInds, tpInds.size
if not tpInds.size==0 and not np.max(mapped[tpInds])!=0: #segment has not been mapped
segmentPredicted.append(predictedClass) #add a segment with label predictedClass
segmentTarget.append(predictedClass) #true label is also predictedClass
if plot:
plt.fill_between(np.arange(start,end+1),0,-1,facecolor='blue')
mapSegment(mapped, targetInt, predictedClass, start) #map the area of the segment, as true positive has been detected
elif not tpInds.size==0 and np.max(mapped[tpInds])!=0: #segment has been mapped already
segmentPredicted.append(predictedClass) #add a segment with predicted class
segmentTarget.append(prediction.shape[1]-1) #add no gesture label as true label for this segment
if plot:
plt.fill_between(np.arange(start,end+1),0,-1,facecolor='red')
#if plot:
# plt.fill_between(np.arange(0,len(prediction)),0,mapped,facecolor='blue',alpha=0.5)
# check for wrong gesture segments and false positives
for i in range(1,len(inds)):
start = inds[i-1]
end = inds[i]
targetSegment = targetInt[start:end]
predictedClass = predictionInt[start]
#check for tp case
tpInds = np.add(np.where(targetSegment==predictedClass),start)
if predictedClass != prediction.shape[1]-1: #wenn es sich nicht um ein no gesture signal handelt
if tpInds.size==0:
bins = np.bincount(targetSegment,None,prediction.shape[1])
################################################################
#uncomment this to allow each target to be classified only once
#if np.any(bins[:-1]) and np.max(mapped[start:end]) == 0:
################################################################
if np.any(bins[:-1]): #eine andere geste findet statt
trueClass = np.argmax(bins[:-1])
mapSegment(mapped, targetInt, trueClass, start)
else:
trueClass= prediction.shape[1]-1
segmentPredicted.append(predictedClass) #add predicted class
segmentTarget.append(trueClass) #add actual class
if plot:
plt.fill_between(np.arange(start,end+1),0,-1,facecolor='green')
plt.annotate(str(predictedClass)+'/'+str(trueClass), xy=(start,0))
#print targetSegment, bins, predictedClass, trueClass
# search for target signals that have not been mapped (false negatives)
targetInds=[]
targetInds.append(0)
for i in range(1,len(targetInt)):
if targetInt[i-1] != targetInt[i]:
targetInds.append(i)
targetInds.append(len(prediction)-1)
for i in range(1,len(targetInds)):
start = targetInds[i-1]
end = targetInds[i]
targetSegment = targetInt[start:end]
predictedClass = prediction.shape[1]-1
trueClass = targetInt[start]
if trueClass != prediction.shape[1]-1: #wenn es sich nicht um ein no gesture signal handelt
if mapped[start]==0:
#print 'trueClass ',trueClass, ' pred ',predictedClass
segmentPredicted.append(predictedClass)
segmentTarget.append(trueClass)
mapSegment(mapped, targetInt, trueClass, start)
if plot:
plt.fill_between(np.arange(start,end+1),0,-1,facecolor='yellow')
else:
segmentPredicted.append(prediction.shape[1]-1)
segmentTarget.append(prediction.shape[1]-1)
#if plot:
#plt.fill_between(np.arange(0,len(prediction)),0,mapped,facecolor='blue',alpha=0.5)
pred = np.array(segmentPredicted)
targ = np.array(segmentTarget)
return pred, targ
#============================================================================
# Adds a constant treshold signal to the given input data
#============================================================================
def addTresholdSignal(prediction, treshold):
return np.append(prediction, np.ones((prediction.shape[0],1))*treshold, 1)
#===============================================================================
# Adds the no gesture signal as new collumn to the targets.
# No gesture signal is 1 when all other signals are 0.
#===============================================================================
def addNoGestureSignal(target):
inds = np.max(target,1)==0
inds = np.atleast_2d(inds.astype('int')).T
return np.append(target, inds, 1)
#===============================================================================
# Plots a given confusion matrix
#===============================================================================
def plot_confusion_matrix(cm, gestures=None,title='Confusion matrix', cmap=cm.Blues):
fig = plt.figure(figsize=(15,15))
maxVal = np.max(cm.flatten()[:-1])
plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0, vmax=maxVal)
plt.title(title)
plt.colorbar()
if gestures is not None:
tick_marks = np.arange(len(gestures))
plt.xticks(tick_marks, gestures, rotation=45)
plt.yticks(tick_marks, gestures)
ind_array = np.arange(0, len(cm), 1)
x, y = np.meshgrid(ind_array, ind_array)
for x_val, y_val in zip(x.flatten(), y.flatten()):
c = str(cm[y_val,x_val])
plt.text(x_val, y_val, c, va='center', ha='center')
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return fig
#===============================================================================
# Create the Recall, Precision, F1Score plot.
# Also returns the treshold for which the best f1 score was achieved.
# prediction: classifier output
# target: true output
#===============================================================================
def calcTPFPForThresholds(prediction, target, title='', postProcess=False, gestureLength=10):
gestureNames = ['left','right','forward','backward','bounce up','bounce down','turn left','turn right','shake lr','shake ud','no gesture']
lines = []
#Tested area ranges from 0 to maxTreshold, tests are performed every stepsize
maxTreshold = 2
stepsize = 0.01
tprs = np.zeros((int(maxTreshold*(1/stepsize)),prediction.shape[1]+1))
fprs = np.zeros((int(maxTreshold*(1/stepsize)),prediction.shape[1]+1))
f1score = np.zeros((int(maxTreshold*(1/stepsize)),prediction.shape[1]+1))
# Evaluate prediction with varying tresholds.
for ind, currentTreshold in enumerate(np.arange(0,maxTreshold,stepsize)):
if not postProcess:
pred_new = calcMaxActivityPrediction(prediction, target, currentTreshold, gestureLength)
pred, targ= calcInputSegmentSeries(pred_new, target, 0.5, False)
else:
pred, targ= calcInputSegmentSeries(prediction, target, currentTreshold, False)
conf = sklearn.metrics.confusion_matrix(targ,pred)
#for classNr in range(prediction.shape[1]+1):
# tprs[ind,classNr] = calcTPRFromConfMatr(conf, classNr)
# fprs[ind,classNr] = calcFPRFromConfMatr(conf, classNr)
tprs[ind] = sklearn.metrics.recall_score(targ,pred,average=None)
fprs[ind] = sklearn.metrics.precision_score(targ,pred,average=None)
f1score[ind] = sklearn.metrics.f1_score(targ, pred, average=None)
matplotlib.rcParams.update({'font.size': 20})
fig, axes = plt.subplots(3, 1, True, figsize=(20,20))
fig.tight_layout(h_pad=2)
fig.suptitle(title)
axes[0].set_title('Recall')
axes[0].xaxis.set_ticks(np.arange(0,maxTreshold*(1/stepsize),10))
axes[0].xaxis.set_ticklabels(np.arange(0,maxTreshold,stepsize*10))
axes[0].set_xlabel('Treshold')
cmap = mpl.cm.jet
for i in range(prediction.shape[1]):
lin, = axes[0].plot(tprs[:,i], c=cmap(float(i)/prediction.shape[1]), label=gestureNames[i],linewidth=2)
lines.append(lin)
lin, = axes[0].plot(tprs[:,prediction.shape[1]], c='black', label='No gesture',linewidth=2)
axes[0].plot(np.mean(tprs,1), c='Black', linestyle='--', linewidth=10, label='Mean')
lines.append(lin)
axes[0].set_ylim(-0.05,1.05)
axes[1].set_title('Precision')
axes[1].xaxis.set_ticks(np.arange(0,maxTreshold*(1/stepsize),10))
axes[1].xaxis.set_ticklabels(np.arange(0,maxTreshold,stepsize*10))
axes[1].set_xlabel('Treshold')
for i in range(prediction.shape[1]):
axes[1].plot(fprs[:,i],c=cmap(float(i)/prediction.shape[1]), label=gestureNames[i],linewidth=2)
axes[1].plot(fprs[:,prediction.shape[1]], c='black', label='No gesture',linewidth=2)
axes[1].plot(np.mean(fprs,1), c='Black', linestyle='--', linewidth=10, label='Mean')
axes[1].set_ylim(-0.05,1.05)
axes[2].set_title('F1Score')
axes[2].xaxis.set_ticks(np.arange(0,maxTreshold*(1/stepsize),10))
axes[2].xaxis.set_ticklabels(np.arange(0,maxTreshold,stepsize*10))
axes[2].set_xlabel('Treshold')
for i in range(prediction.shape[1]):
axes[2].plot(f1score[:,i], c=cmap(float(i)/prediction.shape[1]), label=gestureNames[i],linewidth=2)
axes[2].plot(f1score[:,prediction.shape[1]],c='black', label='No gesture',linewidth=2)
axes[2].plot(np.mean(f1score,1), c='Black', linestyle='--', linewidth=10, label='Mean')
axes[2].set_ylim(-0.05,1.05)
# axes[3].set_title('F1Score and Levenshtein Error')
# axes[3].xaxis.set_ticks(np.arange(0,maxTreshold*(1/stepsize),10))
# axes[3].xaxis.set_ticklabels(np.arange(0,maxTreshold,stepsize*10))
# axes[3].set_xlabel('Treshold')
# lin, = axes[3].plot(np.mean(f1score,1), c='Black', linestyle='--', linewidth=10, label='Mean F1 Score')
# lines.append(lin)
# gestureNames.append('Mean F1 Score')
# lin, = axes[3].plot(calcLevenshteinForTresholds(prediction, target, maxTreshold, stepsize), c='Green', linestyle='--', linewidth=10, label='Levensthein')
# lines.append(lin)
# gestureNames.append('Levenshtein Error')
# axes[3].set_ylim(-0.05,2.05)
fig.legend( lines, gestureNames, loc = '3',ncol=1, labelspacing=0. )
tresholds = np.argmax(f1score, 0) * stepsize
bestF1Score = np.max(np.mean(f1score,1))
bestF1ScoreTreshold = np.argmax(np.mean(f1score,1))*stepsize
if postProcess:
t_newPred = np.copy(prediction)
for i in range(len(tresholds)-1):
inds = t_newPred[:,i] < tresholds[i]
t_newPred[:,i][inds]=0
pred, targ= calcInputSegmentSeries(t_newPred, target, 0.05, False)
conf = sklearn.metrics.confusion_matrix(targ,pred)
f1score = sklearn.metrics.f1_score(targ, pred, average=None)
bestF1AfterPostProcess = np.mean(f1score)
print conf, bestF1Score, bestF1AfterPostProcess
print 'Best found f1 score', bestF1Score, 'at treshold:', bestF1ScoreTreshold
return tresholds, bestF1Score, bestF1ScoreTreshold
#============================================================================
# Plot minimum errors along all dimensions
# errs: error space as generated by optimizer
# params: list of parameters
# ranges: list of ranges for all parameters
# pp: the pdf file writer
#============================================================================
def plotMinErrors(errs, params, ranges, pp, cmap='Blues'):
minVal = np.min(errs)
min_ind = np.unravel_index(errs.argmin(), errs.shape)
for i in range(0,len(min_ind)):
for j in range(i,len(min_ind)):
if(j != i and errs.shape[i] > 1 and errs.shape[j] > 1 and \
params[i][1] != '_instance' and params[j][1] != '_instance' ):
minAxes = range(0,len(min_ind))
minAxes.remove(i)
minAxes.remove(j)
mins = np.min(errs,tuple(minAxes))
plt.figure()
plt.imshow(mins, interpolation='nearest',cmap=cmap,vmin=0, vmax=1)
plt.xlabel(params[j][1])
plt.ylabel(params[i][1])
plt.colorbar()
if ranges is not None:
tick_marks = np.arange(len(mins[0]))
plt.xticks(tick_marks, ranges[j], rotation=45)
tick_marks = np.arange(len(mins))
plt.yticks(tick_marks, ranges[i])
plt.tight_layout()
if pp is not None:
pp.savefig()
#===============================================================================
# Creates a more distinctive colormap.
#===============================================================================
def getSpecificColorMap():
cdict = {'red': ((0.0, 1.0, 1.0),
(0.05, 1.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green':((0.0, 1.0, 1.0),
(0.05, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(0.05, 1.0, 1.0 ),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0))}
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict)
return blue_red1
def mergePredictions(predictions, addTreshold=False, treshold=0.0, plot=False):
if addTreshold:
predictions = np.append(predictions, np.ones((len(predictions),1))*treshold, 1)
vals = np.max(predictions,1)
inds = np.argmax(predictions, 1)
if plot:
plt.figure()
plt.plot(predictions, color='grey')
plt.plot(vals, color='r')
plt.plot(inds, color='g')
return vals, inds
def calcConfusionMatrix(input_signal,target_signal):
nGestures = len(target_signal[0])
valsP, indsP = mergePredictions(input_signal, True, 0.5)
valsT, indsT = mergePredictions(target_signal, True, 0.5)
changesP = np.where(indsP[:-1] != indsP[1:])[0] + 1 # indexes where predicted gesture changes
changesT = np.where(indsT[:-1] != indsT[1:])[0] + 1 # indexes where actual gesture changes
detections = []
classifiedGestures = [[[] for x in range(nGestures+1)] for x in range(nGestures+1)]
# fuer jedes segment, auch wenn gerade keine gestge stattfindet
lastInd = 0
for ind in changesT:
cur_valsP = valsP[lastInd:ind]
cur_indsP = indsP[lastInd:ind]
cur_valsT = valsT[lastInd:ind]
cur_indsT = indsT[lastInd:ind]
occurences = np.bincount(cur_indsP, None, nGestures+1) # +1 wegen "keine geste"
detectedGesture = np.argmax(occurences)
actualGesture = cur_indsT[0]
classifiedGestures[actualGesture][detectedGesture].append((lastInd,ind))
detections.append((detectedGesture,actualGesture))
lastInd = ind
confusionMatrix = np.zeros((nGestures+1,nGestures+1)) # +1 wegen "keine geste"
for det, act in detections:
confusionMatrix[act][det] = confusionMatrix[act][det] + 1
return confusionMatrix, classifiedGestures
def calcF1ScoreFromConfusionMatrix(cm, replaceNan = True):
f1Scores = np.zeros((len(cm),1))
for i in range(0,len(cm)):
tp = cm[i][i]
fp = np.sum(cm[:,i])-tp
fn = np.sum(cm[i,:])-tp
f1Scores[i]= (2.*tp)/(2.*tp+ fn+ fp)
occurences = np.sum(cm,1)
#replace nan
if replaceNan:
mean = np.mean(f1Scores[np.invert(np.isnan(f1Scores))])
f1Scores[np.isnan(f1Scores)]=mean
return f1Scores, occurences
def calc1MinusF1Average(input_signal,target_signal,noSilence = False):
cm, _ = calcConfusionMatrix(input_signal, target_signal)
f1Scores, _ = calcF1ScoreFromConfusionMatrix(cm)
if noSilence:
return 1-np.mean(f1Scores[:-1])
return 1-np.mean(f1Scores)
def calc1MinusF1FromMaxApp(input_signal,target_signal, treshold = 0.5, gestureLength = 10):
t_maxApp_prediction = calcMaxActivityPrediction(input_signal,target_signal,treshold,gestureLength)
pred_MaxApp, targ_MaxApp = calcInputSegmentSeries(t_maxApp_prediction, target_signal, 0.5)
return 1.0 - np.mean(sklearn.metrics.f1_score(targ_MaxApp,pred_MaxApp,average=None))
def calcAccuracyFromMaxApp(input_signal,target_signal, treshold = 0.5, gestureLength = 10):
t_maxApp_prediction = calcMaxActivityPrediction(input_signal,target_signal,treshold,gestureLength)
pred_MaxApp, targ_MaxApp = calcInputSegmentSeries(t_maxApp_prediction, target_signal, 0.5)
return sklearn.metrics.accuracy_score(targ_MaxApp,pred_MaxApp)
#==============================================================================
# Smoothes input signal by applying a floating average.
#==============================================================================
def calcFloatingAverage(input_signal,target_signal):
offset = 5
floatingSum = np.zeros(input_signal.shape)
for i in range(offset,input_signal.shape[0]):
floatingSum[i] = np.sum(input_signal[i-offset:i,:],0)
return floatingSum
def calcF1OverFloatingAverage(input_signal,target_signal):
return calc1MinusF1Average(calcFloatingAverage(input_signal, target_signal),target_signal)
###
### counts max of last n steps
###
def createMaxTargetSignal(t_prediction, treshold):
filterLength = 6
t_max = np.zeros((t_prediction.shape[0],1))
t_prediction = addTresholdSignal(t_prediction, treshold)
for i in range(1,filterLength):
t_max[i] = np.argmax(np.bincount(np.argmax(t_prediction[0:i,:], 1)))
for i in range(filterLength,t_prediction.shape[0]):
t_max[i] = np.argmax(np.bincount(np.argmax(t_prediction[i-filterLength:i,:], 1)))
return t_max
def calc1MinusConfusionFromMaxTargetSignal(input_signal,target_signal, vis=False):
treshold = 0.4
maxPred= createMaxTargetSignal(input_signal,treshold)
maxTarg= createMaxTargetSignal(target_signal, 0.9)
confMatrix = sklearn.metrics.confusion_matrix(maxTarg, maxPred,None)
f1scores = sklearn.metrics.f1_score(maxTarg,maxPred,average=None)
#print f1scores
f1score = np.mean(f1scores)
if vis:
plt.figure()
plt.plot(maxPred)
plt.plot(maxTarg)
plt.plot(input_signal)
plot_confusion_matrix(confMatrix)
#print confMatrix
#print f1score
return 1-f1score
def visCalcConfusionFromMaxTargetSignal(input_signal,target_signal, treshold=0.4):
maxPred= createMaxTargetSignal(input_signal,treshold)
maxTarg= createMaxTargetSignal(target_signal, np.max([0.001,treshold]))
confMatrix = sklearn.metrics.confusion_matrix(maxTarg, maxPred,None)
f1scores = sklearn.metrics.f1_score(maxTarg,maxPred,average=None)
f1score = np.mean(f1scores)
report = sklearn.metrics.classification_report(maxTarg.astype('int'), maxPred.astype('int'))
#print report
return confMatrix, f1scores, f1score
def countTargetAndPredictedSignalsPerGesture(input_signal,target_signal):
results = []
for sigNr in range(0,len(input_signal[0])):
cur_input_signal = input_signal[:,sigNr]
cur_target_signal = target_signal[:,sigNr]
nDataPoints = len(cur_input_signal)
treshold = 0.5
n_totalTarget = 0
n_totalPredicted = 0
i = 0
while i < nDataPoints:
if cur_target_signal[i] == 1.0:
n_totalTarget = n_totalTarget+1
while i+1 < nDataPoints and cur_target_signal[i] == 1 and cur_target_signal[i+1] == 1 :
i = i+1
i = i+1
i = 0
while i < nDataPoints:
if cur_input_signal[i] > treshold:
n_totalPredicted= n_totalPredicted+1
while i+1 < nDataPoints and cur_input_signal[i] > treshold and cur_input_signal[i+1] > treshold:
i = i+1
i = i+1
results.append((n_totalTarget, n_totalPredicted))
return results
def plotMinErrorsToFIle(opt):
pdfFileName = 'minErrors.pdf'
pdfFilePath = getProjectPath()+'results/pdf/'+pdfFileName
pp = PdfPages(pdfFilePath)
plotMinErrors(opt.errors, opt.parameters, opt.parameter_ranges, pp)
pp.close()
def plotMinErrorsSqueezed(errs, params,ranges,pp, cmap='Blues'):
minVal = np.min(errs)
min_ind = np.unravel_index(errs.argmin(), errs.shape)
for i in range(0,len(min_ind)):
for j in range(i,len(min_ind)):
if(j != i and errs.shape[i] > 1 and errs.shape[j] > 1 and \
params[i][1] != '_instance' and params[j][1] != '_instance' ):
minAxes = range(0,len(min_ind))
minAxes.remove(i)
minAxes.remove(j)
mins = np.min(errs,tuple(minAxes))
minTeiler = np.min(ranges[j])
j_range = ranges[j] / np.max([0.1,minTeiler])
newMins = np.empty((mins.shape[0],0))
print ranges[j],len(ranges[j]), mins.shape
for entry_ind, entry in enumerate(j_range):
for _ in range(int(entry)):
newMins = np.append(newMins,np.atleast_2d(mins[:,entry_ind]).T,1)
minTeiler = np.min(ranges[i])
i_range = ranges[i] / np.max([0.1,minTeiler])
newMins2D = np.empty((0,newMins.shape[1]))
for entry_ind, entry in enumerate(i_range):
for _ in range(int(entry)):
print newMins.shape, newMins2D.shape
newMins2D = np.append(newMins2D,np.atleast_2d(newMins[entry_ind,:]),0)
plt.figure()
plt.imshow(newMins2D, interpolation='nearest',aspect='auto',cmap=cmap,vmin=0, vmax=1,extent=[0, newMins2D.shape[0], 0, newMins2D.shape[1]])
plt.xscale('log')
plt.xlabel(params[j][1])
plt.ylabel(params[i][1])
plt.colorbar()
if ranges is not None:
tick_marks = np.arange(1,len(newMins[0]),100)
plt.xticks(tick_marks, ranges[j], rotation=45)
tick_marks = np.arange(len(newMins))
plt.yticks(tick_marks, ranges[i])
plt.tight_layout()
if pp is not None:
pass
#pp.savefig()
#plot_confusion_matrix(cm, gestures, title, cmap)
#TODO:plot all dims
def plotAlongAxisErrors(errs, params,ranges,plotAxis, xAxis, yAxis, pp, cmap='Blues'):
minVal = np.min(errs)
min_ind = np.unravel_index(errs.argmin(), errs.shape)
nParams = len(params)
if plotAxis >= nParams or xAxis >= nParams or yAxis >= nParams or plotAxis is None or xAxis is None or yAxis is None:
print 'Error in plot along axis:' , nParams
return
minAxes = range(0,len(params))
minAxes.remove(plotAxis)
minAxes.remove(xAxis)
minAxes.remove(yAxis)
totalMins = np.min(errs,tuple(minAxes),None,True)
print totalMins.shape
for i in range(0, len(ranges[plotAxis])):
plt.figure()
plt.title(params[plotAxis][1] + ' = ' + str(ranges[plotAxis][i]))
mins = np.delete(totalMins, range(0,i), plotAxis)
mins = np.delete(mins, range(1,100),plotAxis)
mins = np.atleast_2d(np.squeeze(mins))
plt.imshow(mins, interpolation='nearest',cmap=cmap,vmin=0, vmax=1)
print mins.shape
print params[yAxis][1]
print params[xAxis][1]
if(mins.shape[0]!=len(ranges[xAxis])):
plt.xlabel(params[xAxis][1])
plt.ylabel(params[yAxis][1])
if ranges is not None:
tick_marks = np.arange(len(mins[0]))
plt.xticks(tick_marks, ranges[xAxis], rotation=45)
tick_marks = np.arange(len(mins))
plt.yticks(tick_marks, ranges[yAxis])
else:
plt.xlabel(params[yAxis][1])
plt.ylabel(params[xAxis][1])
if ranges is not None:
tick_marks = np.arange(len(mins[0]))
plt.xticks(tick_marks, ranges[yAxis], rotation=45)
tick_marks = np.arange(len(mins))
plt.yticks(tick_marks, ranges[xAxis])
plt.colorbar()
plt.tight_layout()
if pp is not None:
pp.savefig()
def showROC(prediction, target):
nGestures = target.shape[1]
n_classes = nGestures
y_test = target
y_score = prediction
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i],label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
#plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
def calcTPRFromConfMatr(conf, classNr):
return float(conf[classNr,classNr])/(np.sum(conf[classNr,:]))
def calcFPRFromConfMatr(conf, classNr):
fp = (np.sum(conf[:,classNr])-conf[classNr,classNr])
tn = np.sum(conf)-np.sum(conf[classNr,:])-np.sum(conf[:,classNr])+conf[classNr,classNr]
return float(fp)/(fp+tn)
def calcLevenshteinForTresholds(prediction, target, maxTreshold, stepsize):
levs = np.zeros((int(maxTreshold*(1/stepsize)),1))
for ind, i in enumerate(np.arange(0,maxTreshold,stepsize)):
levs[ind] = calcLevenshteinError(prediction, target, i)
return levs
def plotLevenshteinForTresholds(prediction, target):
maxTreshold = 1.5
stepsize = 0.01
levs = calcLevenshteinForTresholds(prediction, target,maxTreshold, stepsize)
fig, axes = plt.subplots(1, 1, figsize=(20,20))
axes.set_title('Levenshtein Distances')
axes.xaxis.set_ticks(np.arange(0,maxTreshold*(1/stepsize),10))
axes.xaxis.set_ticklabels(np.arange(0,maxTreshold,stepsize*10))
axes.set_xlabel('Treshold')
axes.plot(levs)
print 'bestLevenshtein: ', np.min(levs), 'at', np.argmin(levs)*stepsize
def getLevenshteinIntSequence(prediction, target, treshold):
prediction = addTresholdSignal(prediction, treshold)
predictionInt = np.argmax(prediction, 1)
inds = np.where(predictionInt[:-1] != predictionInt[1:])
predictionInt = predictionInt[inds]
predictionInt[np.where(predictionInt!=prediction.shape[1]-1)]
target = addNoGestureSignal(target)
targetInt = np.argmax(target,1)
inds = np.where(targetInt[:-1] != targetInt[1:])
targetInt= targetInt[inds]
targetInt[np.where(targetInt!=target.shape[1]-1)]
return predictionInt, targetInt
def getLevenshteinSequence(prediction, target, treshold):
predictionInt, targetInt = getLevenshteinIntSequence(prediction, target, treshold)
predictionInt = predictionInt +65
predictionChar = map(chr,predictionInt)
pred = ''.join(predictionChar)
targetInt = targetInt+65
targetChar = map(chr,targetInt)
targ = ''.join(targetChar)
return pred, targ
def plotLevenshteinStrings(prediction, target, treshold):
pred, targ = getLevenshteinIntSequence(prediction, target, treshold)
maxClass = np.max(targ)
predBin =np.zeros((len(pred)+1,maxClass+1))
targBin =np.zeros((len(targ)+1,maxClass+1))
for i in range(len(pred)):
predBin[i,pred[i]]=1
predBin[i+1,pred[i]]=1
for i in range(len(targ)):
targBin[i,targ[i]]=-1
targBin[i+1,targ[i]]=-1
cmap = mpl.cm.jet
plt.figure()
plt.ylim(-1.5,1.5)
for i in range(prediction.shape[1]):
plt.fill_between(range(len(predBin)), 1, 0,where=predBin[:,i]==1,facecolor=cmap(float(i)/prediction.shape[1]), alpha=0.8)
plt.fill_between(range(len(targBin)), -1, 0,where=targBin[:,i]==-1,facecolor=cmap(float(i)/prediction.shape[1]), alpha=0.8)
print predBin
def calcLevenshteinDistance(prediction, target, treshold=0.4):
pred, targ = getLevenshteinSequence(prediction, target, treshold)
levDist = Levenshtein.distance(pred, targ)
return levDist
def calcLevenshteinError(prediction, target, treshold=0.4):
pred, targ = getLevenshteinSequence(prediction, target, treshold)
levDist = calcLevenshteinDistance(prediction, target, treshold)
levError = levDist/float(len(targ))
return levError
def postProcessPrediction(prediction, tresholds):
t_newPred = np.copy(prediction)
for i in range(len(tresholds)-1):
inds = t_newPred[:,i] < tresholds[i]
t_newPred[:,i][inds]=0
return t_newPred
def getMinima(errs, nr=-1):
inds = argrelextrema(errs, np.less,order=1, mode='wrap')
indTable = np.zeros((len(inds[0]),len(errs.shape)))
for i in range(0,len(inds)):
indTable[:,i] = inds[i]
if nr == -1:
return indTable
else:
return indTable[nr,:]
def calc1MinusF1FromInputSegment(prediction, target, treshold=0.4):
pred, targ = calcInputSegmentSeries(prediction, target, treshold, False)
conf = sklearn.metrics.confusion_matrix(targ, pred)
f1 = np.mean(sklearn.metrics.f1_score(targ,pred,average=None))
print conf
print sklearn.metrics.f1_score(targ,pred,average=None)
print np.mean(sklearn.metrics.f1_score(targ,pred,average=None))
calcTPFPForThresholds(prediction, target)
return 1-f1
def normalize_confusion_maxtrix(confus):
conf = np.copy(confus).astype('float')
for i in range(len(conf)-1):
conf[i,:] = conf[i,:] / float(np.sum(conf[i,:]))
conf[len(conf)-1,:] = conf[len(conf)-1,:] / float(np.sum(conf[len(conf)-1,:])*(len(conf)-1))
return conf
def mapSegment(mapped, targetInt, predictedClass, ind):
j = ind
while targetInt[j] == predictedClass: #wenn singal am anfang is muss man zurueck laufen
j = j-1
while targetInt[j] != predictedClass:
j = j+1
startDel = j
while j < len(targetInt) and targetInt[j] == predictedClass:
j = j+1
endDel = j
mapped[startDel:endDel] = 1
def removeSegment(target, classNr, ind):
j = ind
while target[j,classNr] == 1: #wenn singal am anfang is muss man zurueck laufen
j = j-1
startDel = j
j = j+1
while j < len(target) and target[j,classNr] == 1:
j = j+1
endDel = j
target[startDel:endDel,classNr] = 0
#return target
| ravenshooter/BA_Analysis | Evaluation.py | Python | mit | 38,190 | [
"NEURON"
] | 0f7caa44789d2023f1a990fef1320621fa3d6b5d15762ab892f9e84e97ed4c2f |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# grid_script - the core job handling daemon on a MiG server
# Copyright (C) 2003-2012 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Main script running on the MiG server"""
import sys
import time
import datetime
import calendar
import threading
import os
import signal
import copy
import jobscriptgenerator
from jobqueue import JobQueue
from shared.base import client_id_dir
from shared.conf import get_configuration_object, get_resource_exe
from shared.defaults import default_vgrid
from shared.fileio import pickle, unpickle, unpickle_and_change_status, \
send_message_to_grid_script
from shared.gridscript import clean_grid_stdin, \
remove_jobrequest_pending_files, check_mrsl_files, requeue_job, \
server_cleanup, load_queue, save_queue, load_schedule_cache, \
save_schedule_cache, arc_job_status, clean_arc_job
from shared.notification import notify_user_thread
from shared.resadm import atomic_resource_exe_restart, put_exe_pgid
from shared.vgrid import job_fits_res_vgrid, validated_vgrid_list
try:
import servercomm
except ImportError, ime:
print 'could not import servercomm, probably due to missing pycurl'
print ime
(configuration, logger) = (None, None)
(job_queue, executing_queue, scheduler) = (None, None, None)
(job_time_out_thread, job_time_out_stop) = (None, None)
def time_out_jobs(stop_event):
"""Examine queue of current executing jobs and send a JOBTIMEOUT
message if specified cputime is exceeded.
Please note that (under load) this decoupling of time out check
and handling may result in this function sending multiple
JOBTIMEOUT messages for the same job to the input pipe before
the first one gets through the pipe and handled. Thus we may see
'echoes' in the log.
"""
# We must make sure that thread keeps running even
# if the time out code unexpectantly throws an exception
try:
# Keep running until main sends stop signal
counter = 0
while not stop_event.isSet():
counter = (counter + 1) % 60
# Responsive sleep for 60 seconds
if 0 < counter:
time.sleep(1)
continue
qlen = executing_queue.queue_length()
if qlen == 0:
logger.info('No jobs in executing_queue')
else:
logger.info('time_out_jobs(): %d job(s) in queue' % qlen)
# TODO: this is a race - 'Main' may modify executing_queue at
# any time!
# Especially since we ask it to remove the job we just looked
# at and then look at the next index, so we may skip jobs
# if it removes the job before we dequeue next index.
# We should use locking or at least remove from back to
# make it slightly better.
for i in range(0, qlen):
job = executing_queue.get_job(i)
if not job:
logger.warning(
'time-out RC? found empty job in slot %d!' % i)
continue
try:
delay = int(job['EXECUTION_DELAY'])
except StandardError, err:
logger.warning(
'no execution delay field: %s Exception: %s'
% (job, err))
delay = 0
try:
cputime = int(job['CPUTIME'])
except StandardError, err:
logger.warning('cputime extraction failed for %s! %s'
% (job, err))
cputime = 120
extra_cputime = 90
total_cputime = delay + extra_cputime + cputime
timestamp = job['EXECUTING_TIMESTAMP']
# the canonical way to convert time.gmtime() to
# a datetime... All times in UTC timezone
start_executing_datetime = \
datetime.datetime.utcfromtimestamp(calendar.timegm(
timestamp))
last_valid_finish_time = start_executing_datetime\
+ datetime.timedelta(seconds=total_cputime)
# now, in utc timezone
now = datetime.datetime.utcnow()
if now > last_valid_finish_time:
logger.info(
'timing out job %s: allowed time %s, delay %s'
% (job['JOB_ID'], total_cputime, delay))
grid_script_msg = 'JOBTIMEOUT %s %s %s\n'\
% (job['UNIQUE_RESOURCE_NAME'], job['EXE'
], job['JOB_ID'])
send_message_to_grid_script(grid_script_msg,
logger, configuration)
elif job['UNIQUE_RESOURCE_NAME'] == 'ARC':
if not configuration.arc_clusters:
logger.error('ARC backend disabled - ignore %s' % \
job)
continue
jobstatus = arc_job_status(job, configuration, logger)
# take action if the job is failed or killed.
# No action for a finished job, since other
# machinery will be at work to update it
if jobstatus in ['FINISHED', 'FAILED', 'KILLED']:
logger.debug(
'discovered %s job %s, clean it on the server'
% (jobstatus, job['JOB_ID']))
if jobstatus in ['FAILED', 'KILLED']:
msg = '(failed inside ARC)'
else:
msg = None
exec_job = executing_queue.dequeue_job_by_id(
job['JOB_ID'])
if exec_job:
# job was still there, clean up here
# (otherwise, someone else picked it up in
# the meantime)
clean_arc_job(exec_job, jobstatus, msg,
configuration, logger, False)
else:
logger.debug(
'Status %s for ARC job %s, no action required'
% (jobstatus, job['JOB_ID']))
except Exception, err:
logger.error('time_out_jobs: unexpected exception: %s' % err)
logger.info('time_out_jobs: time out thread terminating')
def clean_shutdown(signum, frame):
"""Request clean shutdown when pending requests are handled"""
print '--- REQUESTING SAFE SHUTDOWN ---'
shutdown_msg = 'SHUTDOWN\n'
send_message_to_grid_script(shutdown_msg, logger, configuration)
def graceful_shutdown():
"""This function is responsible for shutting down the server in a
graceful way. It should only be called by the SHUTDOWN request
handler to avoid interfering with other active requests.
"""
msg = '%s: graceful_shutdown called' % sys.argv[0]
print msg
try:
logger.info(msg)
job_time_out_stop.set()
print 'graceful_shutdown: giving time out thread a chance to terminate'
# make sure queue gets saved even if timeout thread goes haywire
job_time_out_thread.join(5)
print 'graceful_shutdown: saving state'
if job_queue and not save_queue(job_queue, job_queue_path,
logger):
logger.warning('failed to save job queue')
if executing_queue and not save_queue(executing_queue,
executing_queue_path, logger):
logger.warning('failed to save executing queue')
if scheduler and not save_schedule_cache(scheduler.get_cache(),
schedule_cache_path, logger):
logger.warning('failed to save scheduler cache')
print 'graceful_shutdown: saved state; now blocking for timeout thread'
# Now make sure timeout thread finishes
job_time_out_thread.join()
configuration.logger_obj.shutdown()
except StandardError:
pass
sys.exit(0)
# ## Main ###
# register ctrl+c signal handler to shutdown system cleanly
signal.signal(signal.SIGINT, clean_shutdown)
print """
Running main grid 'daemon'.
Set the MIG_CONF environment to the server configuration path
unless it is available in the default path
mig/server/MiGserver.conf
"""
configuration = get_configuration_object()
logger = configuration.logger
logger.info('Starting MiG server')
# Load queues from file dump if available
job_queue_path = os.path.join(configuration.mig_system_files,
'job_queue.pickle')
executing_queue_path = os.path.join(configuration.mig_system_files,
'executing_queue.pickle')
schedule_cache_path = os.path.join(configuration.mig_system_files,
'schedule_cache.pickle')
only_new_jobs = True
job_queue = load_queue(job_queue_path, logger)
executing_queue = load_queue(executing_queue_path, logger)
if not job_queue or not executing_queue:
logger.warning('Could not load queues from previous run')
only_new_jobs = False
job_queue = JobQueue(logger)
executing_queue = JobQueue(logger)
else:
logger.info('Loaded queues from previous run')
# Always use an empty done queue after restart
done_queue = JobQueue(logger)
schedule_cache = load_schedule_cache(schedule_cache_path, logger)
if not schedule_cache:
logger.warning('Could not load schedule cache from previous run')
else:
logger.info('Loaded schedule cache from previous run')
logger.info('starting scheduler ' + configuration.sched_alg)
if configuration.sched_alg == 'FirstFit':
from firstfitscheduler import FirstFitScheduler
scheduler = FirstFitScheduler(logger, configuration)
elif configuration.sched_alg == 'BestFit':
from bestfitscheduler import BestFitScheduler
scheduler = BestFitScheduler(logger, configuration)
elif configuration.sched_alg == 'FairFit':
from fairfitscheduler import FairFitScheduler
scheduler = FairFitScheduler(logger, configuration)
elif configuration.sched_alg == 'MaxThroughput':
from maxthroughputscheduler import MaxThroughputScheduler
scheduler = MaxThroughputScheduler(logger, configuration)
elif configuration.sched_alg == 'Random':
from randomscheduler import RandomScheduler
scheduler = RandomScheduler(logger, configuration)
elif configuration.sched_alg == 'FIFO':
from fifoscheduler import FIFOScheduler
scheduler = FIFOScheduler(logger, configuration)
else:
from firstfitscheduler import FirstFitScheduler
print 'Unknown sched_alg %s - using FirstFit scheduler'\
% configuration.sched_alg
scheduler = FirstFitScheduler(logger, configuration)
scheduler.attach_job_queue(job_queue)
scheduler.attach_done_queue(done_queue)
if schedule_cache:
scheduler.set_cache(schedule_cache)
# redirect grid_stdin to sys.stdin
try:
if not os.path.exists(configuration.grid_stdin):
logger.info('creating grid_script input pipe %s'
% configuration.grid_stdin)
try:
os.mkfifo(configuration.grid_stdin)
except StandardError, err:
logger.error('Could not create missing grid_stdin fifo: '
+ '%s exception: %s '
% (configuration.grid_stdin, err))
grid_stdin = open(configuration.grid_stdin, 'r')
except StandardError:
logger.error('failed to open grid_stdin! %s' % sys.exc_info()[0])
sys.exit(1)
logger.info('cleaning pipe')
clean_grid_stdin(grid_stdin)
# Make sure empty job home exists
empty_home = os.path.join(configuration.user_home,
configuration.empty_job_name)
if not os.path.exists(empty_home):
logger.info('creating empty job home dir %s' % empty_home)
try:
os.mkdir(empty_home)
except Exception, exc:
logger.error('failed to create empty job home dir %s: %s'
% (empty_home, exc))
msg = 'Checking for mRSL files with status parse or queued'
print msg
logger.info(msg)
check_mrsl_files(configuration, job_queue, executing_queue,
only_new_jobs, logger)
msg = 'Cleaning up after pending job requests'
print msg
remove_jobrequest_pending_files(configuration)
# start the timer function to check if cputime is exceeded
logger.info('starting time_out_jobs()')
job_time_out_stop = threading.Event()
job_time_out_thread = threading.Thread(target=time_out_jobs,
args=(job_time_out_stop, ))
job_time_out_thread.start()
msg = 'Starting main loop'
print msg
logger.info(msg)
# main loop
loop_counter = 0
last_read_from_grid_stdin_empty = False
# print str(executing_queue.queue_length())
# print str(job_queue.queue_length())
# print str(done_queue.queue_length())
# TODO: perhaps we should run the pipe reader as main thread
# and then spawn threads for actual handling. It should of
# course still be thread safe.
while True:
line = grid_stdin.readline()
strip_line = line.strip()
cap_line = strip_line.upper()
linelist = strip_line.split(' ')
if strip_line == '':
if last_read_from_grid_stdin_empty:
time.sleep(1)
last_read_from_grid_stdin_empty = True
# no reason to investigate content of line
continue
else:
last_read_from_grid_stdin_empty = False
if cap_line.find('USERJOBFILE ') == 0:
# ********* *********
# ********* USER JOB *********
# ********* *********
print cap_line
logger.info(cap_line)
# add to queue
file_userjob = configuration.mrsl_files_dir\
+ strip_line.replace('USERJOBFILE ', '') + '.mRSL'
dict_userjob = unpickle_and_change_status(file_userjob, 'QUEUED'
, logger)
if not dict_userjob:
logger.error('Could not unpickle and change status. '
+ 'Job not enqueued!')
continue
# Set owner to be able to do per-user job statistics
user_str = strip_line.replace('USERJOBFILE ', '')
(user_id, filename) = user_str.split(os.sep)
dict_userjob['OWNER'] = user_id
dict_userjob['MIGRATE_COUNT'] = str(0)
# ARC jobs: directly submit, and put in executing_queue
if dict_userjob['JOBTYPE'] == 'arc':
if not configuration.arc_clusters:
logger.error('ARC backend disabled - ignore %s' % \
dict_userjob)
continue
logger.debug('ARC Job' )
(arc_job, msg) = jobscriptgenerator.create_arc_job(\
dict_userjob, configuration, logger)
if not arc_job:
# something has gone wrong
logger.error('Job NOT submitted (%s)' % msg)
# discard this job (as FAILED, including message)
# see gridscript::requeue_job for how to do this...
dict_userjob['STATUS'] = 'FAILED'
dict_userjob['FAILED_TIMESTAMP'] = time.gmtime()
# and create an execution history (basically empty)
hist = (
{'QUEUED_TIMESTAMP': dict_userjob['QUEUED_TIMESTAMP'],
'EXECUTING_TIMESTAMP': dict_userjob['FAILED_TIMESTAMP'],
'FAILED_TIMESTAMP': dict_userjob['FAILED_TIMESTAMP'],
'FAILED_MESSAGE': ('ARC Submission failed: %s' % msg),
'UNIQUE_RESOURCE_NAME': 'ARC',})
dict_userjob['EXECUTION_HISTORY'] = [hist]
# should also notify the user (if requested)
# not implented for this branch.
else:
# all fine, job is now in some ARC queue
logger.debug('Job submitted (%s,%s)' % (arc_job['SESSIONID'], arc_job['ARCID']))
# set some job fields for job status retrieval, and
# put in exec.queue for job status queries and timeout
dict_userjob['SESSIONID'] = arc_job['SESSIONID']
# abuse these two fields,
# expected by timeout thread to be there anyway
dict_userjob['UNIQUE_RESOURCE_NAME'] = 'ARC'
dict_userjob['EXE'] = arc_job['ARCID']
# this one is used by the timeout thread as well
# We put in a wild guess, 10 minutes. Perhaps not enough
dict_userjob['EXECUTION_DELAY'] = 600
# set to executing even though it is kind-of wrong...
dict_userjob['STATUS'] = 'EXECUTING'
dict_userjob['EXECUTING_TIMESTAMP'] = time.gmtime()
executing_queue.enqueue_job(dict_userjob, \
executing_queue.queue_length())
# Either way, save the job mrsl.
# Status is EXECUTING or FAILED
pickle(dict_userjob, file_userjob, logger)
# go on with scheduling loop (do not use scheduler magic below)
continue
# following: non-ARC code
# put job in queue
job_queue.enqueue_job(dict_userjob, job_queue.queue_length())
user_dict = {}
user_dict['USER_ID'] = user_id
# Update list of users - create user if new
scheduler.update_users(user_dict)
user_dict = scheduler.find_user(user_dict)
user_dict['QUEUE_HIST'].pop(0)
user_dict['QUEUE_HIST'].append(dict_userjob)
scheduler.update_seen(user_dict)
elif cap_line.find('SERVERJOBFILE ') == 0:
# ********* *********
# ********* SERVER JOB *********
# ********* *********
print cap_line
logger.info(cap_line)
# add to queue
file_serverjob = configuration.mrsl_files_dir\
+ strip_line.replace('SERVERJOBFILE ', '') + '.mRSL'
dict_serverjob = unpickle(file_serverjob, logger)
if dict_serverjob == False:
logger.error(
'Could not unpickle migrated job - not put into queue!')
continue
# put job in queue
job_queue.enqueue_job(dict_serverjob, job_queue.queue_length())
elif cap_line.find('JOBSCHEDULE ') == 0:
# ********* *********
# ********* SCHEDULE DUMP *********
# ********* *********
print cap_line
logger.info(cap_line)
if len(linelist) != 2:
logger.error('Invalid job schedule request %s' % linelist)
continue
# read values
job_id = linelist[1]
# find job in queue and dump schedule values to mRSL for job status
job_dict = job_queue.get_job_by_id(job_id)
if not job_dict:
logger.info('Job is not in waiting queue - no schedule to update')
continue
client_dir = client_id_dir(job_dict['USER_CERT'])
file_serverjob = configuration.mrsl_files_dir + client_dir\
+ os.sep + job_id + '.mRSL'
dict_serverjob = unpickle(file_serverjob, logger)
if dict_serverjob == False:
logger.error('Could not unpickle job - not updating schedule!')
continue
# update and save schedule
scheduler.copy_schedule(job_dict, dict_serverjob)
pickle(dict_serverjob, file_serverjob, logger)
elif cap_line.find('RESOURCEREQUEST ') == 0:
# ********* *********
# ********* RESOURCE REQUEST *********
# ********* *********
print cap_line
logger.info(cap_line)
logger.info('RESOURCEREQUEST: %d job(s) in the queue.' % \
job_queue.queue_length())
if len(linelist) != 8:
logger.error('Invalid resource request %s' % linelist)
continue
# read values
exe = linelist[1]
unique_resource_name = linelist[2]
cputime = linelist[3]
nodecount = linelist[4]
localjobname = linelist[5]
execution_delay = linelist[6]
exe_pgid = linelist[7]
last_job_failed = False
# read resource config file
res_file = os.path.join(configuration.resource_home,
unique_resource_name, 'config')
resource_config = unpickle(res_file, logger)
if resource_config == False:
logger.error('error unpickling resource config for %s'
% unique_resource_name)
continue
sandboxed = resource_config.get('SANDBOX', False)
# Write the PGID of EXE to PGID file
(status, msg) = put_exe_pgid(
configuration.resource_home,
unique_resource_name,
exe,
exe_pgid,
logger,
sandboxed,
)
if status:
logger.info(msg)
else:
logger.error(
'Problem writing EXE PGID to file, job request aborted: %s'
% msg)
# we cannot create and dispatch job without pgid written to file!
continue
job_dict = None
# mark job failed if resource requests a new job and
# previously dispatched job is not marked done yet
last_req_file = os.path.join(configuration.resource_home,
unique_resource_name,
'last_request.%s' % exe)
last_req = unpickle(last_req_file, logger)
if last_req == False:
# last_req could not be pickled, this is probably
# because it is the first request from the resource
last_req = {'EMPTY_JOB': True}
if last_req.get('EMPTY_JOB', False) or not last_req.get('USER_CERT',
None):
# Dequeue empty job and cleanup (if not already done in FINISH)
# This is done to avoid them stacking up in the executing_queue
# in case of a faulty resource who keeps requesting jobs
job_dict = \
executing_queue.dequeue_job_by_id(last_req.get(
'JOB_ID', ''), log_errors=False)
if job_dict:
logger.info('last job was an empty job which did not finish')
if not server_cleanup(
job_dict['SESSIONID'],
job_dict['IOSESSIONID'],
job_dict['LOCALJOBNAME'],
job_dict['JOB_ID'],
configuration,
logger,
):
logger.error('could not clean up MiG server')
else:
logger.info('last job was an empty job which already finished')
else:
# open the mRSL file belonging to the last request
# and check if the status is FINISHED or CANCELED.
last_job_ok_status_list = ['FINISHED', 'CANCELED']
client_dir = client_id_dir(last_req['USER_CERT'])
filenamelast = os.path.join(configuration.mrsl_files_dir,
client_dir,
last_req['JOB_ID'] + '.mRSL')
job_dict = unpickle(filenamelast, logger)
if job_dict:
if job_dict['STATUS'] not in last_job_ok_status_list:
last_job_failed = True
exe_job = \
executing_queue.get_job_by_id(job_dict['JOB_ID'
])
if exe_job:
# Ignore missing fields
(last_res, last_exe) = ('', '')
if exe_job.has_key('UNIQUE_RESOURCE_NAME'):
last_res = exe_job['UNIQUE_RESOURCE_NAME']
if exe_job.has_key('EXE'):
last_exe = exe_job['EXE']
if exe_job and last_res == unique_resource_name\
and last_exe == exe:
logger.info(
'%s:%s requested job and was NOT done with last %s'
% (unique_resource_name, exe, job_dict['JOB_ID']))
print 'YOU ARE NOT DONE WITH %s' % job_dict['JOB_ID']
# Clear any scheduling data for exe_job before requeue
scheduler.clear_schedule(exe_job)
requeue_job(
exe_job,
'RESOURCE DIED',
job_queue,
executing_queue,
configuration,
logger,
)
else:
logger.info(
'%s:%s requested job but last %s was rescheduled'
% (unique_resource_name, exe, job_dict['JOB_ID']))
print 'YOUR LAST JOB %s WAS RESCHEDULED'\
% job_dict['JOB_ID']
else:
logger.info('%s requested job and previous was done'
% unique_resource_name)
print 'OK, last job %s was done' % job_dict['JOB_ID']
# Now update resource config fields with requested attributes
resource_config['CPUTIME'] = cputime
# overwrite execution_delay attribute
resource_config['EXECUTION_DELAY'] = execution_delay
# overwrite number of available nodes (a pbs resource might not
# want a job for all nodes)
resource_config['NODECOUNT'] = nodecount
resource_config['RESOURCE_ID'] = '%s_%s'\
% (unique_resource_name, exe)
# specify vgrid
(status, exe_conf) = get_resource_exe(resource_config, exe,
logger)
if not status:
logger.error('could not get exe configuration for resource!')
continue
last_request_dict = {'RESOURCE_CONFIG': resource_config,
'CREATED_TIME': datetime.datetime.now(),
'STATUS': ''}
# find the vgrid that should receive the job request
last_vgrid = 0
if not exe_conf.get('vgrid', ''):
# fall back to default vgrid
exe_conf['vgrid'] = [default_vgrid]
if isinstance(exe_conf['vgrid'], basestring):
exe_conf['vgrid'] = list(exe_conf['vgrid'])
exe_vgrids = exe_conf['vgrid']
if last_req.has_key('LAST_VGRID'):
# index of last vgrid found
last_vgrid_index = last_req['LAST_VGRID']
# make sure the index is within bounds (some vgrids
# might have been removed from conf since last run)
res_vgrid_count = len(exe_vgrids)
if last_vgrid_index + 1 > res_vgrid_count - 1:
# out of bounds, use index 0
pass
else:
# within bounds
last_vgrid = last_vgrid_index + 1
# The scheduler checks the vgrids in the order as they appear in
# the list, so to be fair the order of the vgrids in the list
# should be cycled according to the last_request
vgrids_in_prioritized_order = []
list_indexes = range(last_vgrid, len(exe_vgrids))
list_indexes = list_indexes + range(0, last_vgrid)
for index in list_indexes:
# replace "" with default_vgrid
add_vgrid = exe_conf['vgrid'][index]
if add_vgrid == '':
add_vgrid = default_vgrid
vgrids_in_prioritized_order.append(add_vgrid)
logger.info('vgrids in prioritized order: %s (last %s)'
% (vgrids_in_prioritized_order, last_vgrid))
# set found values
resource_config['VGRID'] = vgrids_in_prioritized_order
resource_config['LAST_VGRID'] = last_vgrid
last_request_dict['LAST_VGRID'] = last_vgrid
# Update list of resources
scheduler.update_resources(resource_config)
scheduler.update_seen(resource_config)
if job_queue.queue_length() == 0 or last_job_failed or nodecount < 1:
# No jobs: Create 'empty' job script and double sleep time if
# repeated empty job
if not last_req.has_key('EMPTY_JOB'):
sleep_factor = 1.0
else:
sleep_factor = 2.0
print 'N'
(empty_job, msg) = jobscriptgenerator.create_empty_job(
unique_resource_name,
exe,
cputime,
sleep_factor,
localjobname,
execution_delay,
configuration,
logger,
)
(new_job, msg) = \
jobscriptgenerator.create_job_script(
unique_resource_name,
exe,
empty_job,
resource_config,
localjobname,
configuration,
logger,
)
if new_job:
last_request_dict['JOB_ID'] = empty_job['JOB_ID']
last_request_dict['STATUS'] = 'No jobs in queue'
if last_job_failed:
last_request_dict['STATUS'] = \
'Last job failed - forced empty job'
last_request_dict['EXECUTING_TIMESTAMP'] = time.gmtime()
last_request_dict['EXECUTION_DELAY'] = \
empty_job['EXECUTION_DELAY']
last_request_dict['UNIQUE_RESOURCE_NAME'] = \
unique_resource_name
last_request_dict['PUBLICNAME'] = resource_config.get(
'PUBLICNAME', 'HIDDEN')
last_request_dict['EXE'] = exe
last_request_dict['RESOURCE_CONFIG'] = resource_config
last_request_dict['LOCALJOBNAME'] = localjobname
last_request_dict['SESSIONID'] = new_job['SESSIONID']
last_request_dict['IOSESSIONID'] = new_job['IOSESSIONID']
last_request_dict['CPUTIME'] = empty_job['CPUTIME']
last_request_dict['EMPTY_JOB'] = True
executing_queue.enqueue_job(last_request_dict,
executing_queue.queue_length())
logger.info('empty job script created')
else:
msg = 'Failed to create job script: %s' % msg
print msg
logger.error(msg)
continue
else:
# there are jobs in the queue
# Expire outdated jobs - expire_jobs removes them from queue
# and returns them in a list: handle the file update here.
expired_jobs = scheduler.expire_jobs()
for expired in expired_jobs:
# tell the user about the expired job - we do not wait for
# notification to finish but hope for the best since this
# script is long running.
# The thread only writes a message to the notify pipe so it
# finishes immediately if the notify daemon is listening and
# blocks indefinitely otherwise.
notify_user_thread(
expired,
configuration.myfiles_py_location,
'EXPIRED',
logger,
False,
configuration,
)
client_dir = client_id_dir(expired['USER_CERT'])
expired_file = configuration.mrsl_files_dir + client_dir\
+ os.sep + expired['JOB_ID'] + '.mRSL'
if not unpickle_and_change_status(expired_file,
'EXPIRED', logger):
logger.error('Could not unpickle and change status. '
+ 'Job could not be officially expired!'
)
continue
# Remove references to expired jobs
expired_jobs = []
# Schedule and create appropriate job script
# loop until a non-cancelled job is scheduled (fixes small
# race condition if a job has not been dequeued after the
# status in the mRSL file has been changed to FROZEN or CANCELED)
while True:
job_dict = scheduler.schedule(resource_config)
if not job_dict:
break
client_dir = client_id_dir(job_dict['USER_CERT'])
mrsl_filename = configuration.mrsl_files_dir\
+ client_dir + '/' + job_dict['JOB_ID'] + '.mRSL'
dummy_dict = unpickle(mrsl_filename, logger)
# The job status should be "QUEUED" at this point
if dummy_dict == False:
logger.error('error unpickling mrsl in %s'
% mrsl_filename)
continue
if dummy_dict['STATUS'] == 'QUEUED':
break
if not job_dict:
# no jobs in the queue fits the resource!
print 'X'
logger.info('No jobs in the queue can be executed by '
+ 'resource, queue length: %s'
% job_queue.queue_length())
# Create 'empty' job script and double sleep time if
# repeated empty job
if not last_req.has_key('EMPTY_JOB'):
sleep_factor = 1.0
else:
sleep_factor = 2.0
(empty_job, msg) = jobscriptgenerator.create_empty_job(
unique_resource_name,
exe,
cputime,
sleep_factor,
localjobname,
execution_delay,
configuration,
logger,
)
(new_job, msg) = \
jobscriptgenerator.create_job_script(
unique_resource_name,
exe,
empty_job,
resource_config,
localjobname,
configuration,
logger,
)
if new_job:
last_request_dict['JOB_ID'] = empty_job['JOB_ID']
last_request_dict['STATUS'] = \
'No jobs in queue can be executed by resource'
last_request_dict['EXECUTING_TIMESTAMP'] = \
time.gmtime()
last_request_dict['EXECUTION_DELAY'] = \
execution_delay
last_request_dict['UNIQUE_RESOURCE_NAME'] = \
unique_resource_name
last_request_dict['PUBLICNAME'] = resource_config.get(
'PUBLICNAME', 'HIDDEN')
last_request_dict['EXE'] = exe
last_request_dict['RESOURCE_CONFIG'] = \
resource_config
last_request_dict['LOCALJOBNAME'] = localjobname
last_request_dict['SESSIONID'] = new_job['SESSIONID']
last_request_dict['IOSESSIONID'] = new_job['IOSESSIONID']
last_request_dict['CPUTIME'] = empty_job['CPUTIME']
last_request_dict['EMPTY_JOB'] = True
executing_queue.enqueue_job(last_request_dict,
executing_queue.queue_length())
logger.info('empty job script created')
else:
# a job has been scheduled to be executed on this
# resource: change status in the mRSL file
client_dir = client_id_dir(job_dict['USER_CERT'])
mrsl_filename = os.path.join(configuration.mrsl_files_dir,
client_dir,
job_dict['JOB_ID'] + '.mRSL')
mrsl_dict = unpickle(mrsl_filename, logger)
if mrsl_dict:
(new_job, msg) = \
jobscriptgenerator.create_job_script(
unique_resource_name,
exe,
job_dict,
resource_config,
localjobname,
configuration,
logger,
)
if new_job:
# mrsl_dict now contains entire job_dict with updates
# Fix legacy VGRID fields
mrsl_dict['VGRID'] = validated_vgrid_list(
configuration, mrsl_dict)
# Select actual VGrid to use
(match, active_job_vgrid, active_res_vgrid) = \
job_fits_res_vgrid(mrsl_dict['VGRID'],
vgrids_in_prioritized_order)
# Write executing details to mRSL file
mrsl_dict['STATUS'] = 'EXECUTING'
mrsl_dict['EXECUTING_TIMESTAMP'] = time.gmtime()
mrsl_dict['EXECUTION_DELAY'] = execution_delay
mrsl_dict['UNIQUE_RESOURCE_NAME'] = \
unique_resource_name
mrsl_dict['PUBLICNAME'] = resource_config.get(
'PUBLICNAME', 'HIDDEN')
mrsl_dict['EXE'] = exe
mrsl_dict['RESOURCE_VGRID'] = active_res_vgrid
mrsl_dict['RESOURCE_CONFIG'] = resource_config
mrsl_dict['LOCALJOBNAME'] = localjobname
mrsl_dict['SESSIONID'] = new_job['SESSIONID']
mrsl_dict['IOSESSIONID'] = new_job['IOSESSIONID']
mrsl_dict['MOUNTSSHPUBLICKEY'] = new_job['MOUNTSSHPUBLICKEY']
mrsl_dict['MOUNTSSHPRIVATEKEY'] = new_job['MOUNTSSHPRIVATEKEY']
# pickle the new version
pickle(mrsl_dict, mrsl_filename, logger)
last_request_dict['STATUS'] = 'Job assigned'
last_request_dict['CPUTIME'] = \
job_dict['CPUTIME']
last_request_dict['EXECUTION_DELAY'] = \
execution_delay
last_request_dict['NODECOUNT'] = \
job_dict['NODECOUNT']
# job id and user_cert is used to check if the current
# job is done when a resource requests a new job
last_request_dict['JOB_ID'] = job_dict['JOB_ID']
last_request_dict['USER_CERT'] = \
job_dict['USER_CERT']
# Save actual VGrid for fair VGrid cycling
try:
vgrid_index = vgrids_in_prioritized_order.index(
active_res_vgrid)
except StandardError:
# fall back to simple increment
vgrid_index = last_vgrid
last_request_dict['LAST_VGRID'] = vgrid_index
print 'Job assigned ' + job_dict['JOB_ID']
logger.info('Job %s assigned to %s execution unit %s'
% (job_dict['JOB_ID'],
unique_resource_name, exe))
# put job in executing queue
executing_queue.enqueue_job(mrsl_dict,
executing_queue.queue_length())
print 'executing_queue length %d'\
% executing_queue.queue_length()
else:
# put original job in back in job queue
job_queue.enqueue_job(job_dict,
job_queue.queue_length())
msg = 'error creating new job script, job requeued'
print msg
logger.error(msg)
else:
logger.error('error unpickling mRSL: %s'
% mrsl_filename)
pickle(last_request_dict, last_req_file, logger)
# Save last_request_dict to vgrid_home/vgrid_name to make
# seperate vgrid monitors possible
# contains names on vgrids where last_request_dict should
# be saved unmodified
original_last_request_dict_vgrids = []
# contains names on vgrids where last_request_dict should
# be overwritten with a "Executing job for another vgrid"
# version
executing_in_other_vgrids = []
# if empty_job:
# empty job, make sure this job request is seen on monitors
# for all vgrids this resource is in
# original_last_request_dict_vgrids = vgrids_in_prioritized_order
# TODO: must detect if it is a real or empty job.
# problem: after a job has been executed in a
# vgrid and the resource gets an empty job the monitor
# says "executing in other vgrid" which of course should
# be no jobs in grid queue can be executed by resource.
if job_dict:
# real job scheduled!
if job_dict.has_key('VGRID'):
original_last_request_dict_vgrids += job_dict['VGRID']
else:
# no vgrid specified, this means default vgrid.
original_last_request_dict_vgrids.append([default_vgrid])
# overwrite last_request_dict for vgrids that
# the resource is in but not executing the job
logger.info('job: %s' % job_dict)
for res_vgrid in vgrids_in_prioritized_order:
if res_vgrid not in original_last_request_dict_vgrids:
executing_in_other_vgrids.append(res_vgrid)
else:
# empty job, make sure this job request is seen on monitors
# for all vgrids this resource is in
original_last_request_dict_vgrids = \
vgrids_in_prioritized_order
# save monitor_last_request files
# for vgrid_monitor in original_last_request_dict_vgrids:
# loop all vgrids where this resource is taking jobs
for vgrid_name in vgrids_in_prioritized_order:
logger.info("vgrid_name: '%s' org '%s' exe '%s'"
% (vgrid_name,
original_last_request_dict_vgrids,
executing_in_other_vgrids))
monitor_last_request_file = configuration.vgrid_home\
+ os.sep + vgrid_name + os.sep\
+ 'monitor_last_request_' + unique_resource_name + '_'\
+ exe
if vgrid_name in original_last_request_dict_vgrids:
pickle(last_request_dict, monitor_last_request_file,
logger)
logger.info('vgrid_name: %s status: %s' % (vgrid_name,
last_request_dict['STATUS']))
elif vgrid_name in executing_in_other_vgrids:
# create modified last_request_dict and save
new_last_request_dict = copy.deepcopy(last_request_dict)
new_last_request_dict['STATUS'] = \
'Executing job for another vgrid'
logger.info('vgrid_name: %s status: %s' % (vgrid_name,
new_last_request_dict['STATUS']))
pickle(new_last_request_dict,
monitor_last_request_file, logger)
else:
# we should never enter this else, vgrid_name must be in
# original_last_request_dict_vgrids or
# executing_in_other_vgrids
logger.error(
'Entered else condition that never should be entered ' + \
'during creation of last_request_dict in grid_script!' + \
" vgrid_name: '%s' not in '%s' or '%s'"
% (vgrid_name, original_last_request_dict_vgrids,
executing_in_other_vgrids))
# delete requestnewjob lock
lock_file = os.path.join(configuration.resource_home,
unique_resource_name,
'jobrequest_pending.%s' % exe)
try:
os.remove(lock_file)
except OSError, ose:
logger.error('Error removing %s: %s' % (lock_file, ose))
# Experimental pricing code
# TODO: update price *after* publishing status so that price fits delay?
if configuration.enable_server_dist:
scheduler.update_price(resource_config)
elif cap_line.find('RESOURCEFINISHEDJOB ') == 0:
# ********* *********
# ********* RESOURCE FINISHED *********
# ********* *********
# format: RESOURCEFINISHEDJOB RESOURCE_ID/LOCALJOBNAME
print cap_line
logger.info(cap_line)
logger.info('RESOURCEFINISHEDJOB: %d job(s) in the queue.' % \
job_queue.queue_length())
if len(linelist) != 5:
logger.error('Invalid resourcefinishedjob request')
continue
# read values
res_name = linelist[1]
exe_name = linelist[2]
sessionid = linelist[3]
job_id = linelist[4]
msg = 'RESOURCEFINISHEDJOB: %s:%s finished job %s id %s'\
% (res_name, exe_name, sessionid, job_id)
job_dict = executing_queue.get_job_by_id(job_id)
if not job_dict:
msg += \
', but job is not in executing queue, ignoring result.'
elif job_dict['UNIQUE_RESOURCE_NAME'] != res_name\
or job_dict['EXE'] != exe_name:
msg += \
', but job is being executed by %s:%s, ignoring result.'\
% (job_dict['UNIQUE_RESOURCE_NAME'], job_dict['EXE'])
elif job_dict['UNIQUE_RESOURCE_NAME'] == 'ARC':
if not configuration.arc_clusters:
logger.error('ARC backend disabled - ignore %s' % \
job_dict)
continue
msg += (', which is an ARC job (ID %s).' % job_dict['EXE'])
# remove from the executing queue
executing_queue.dequeue_job_by_id(job_id)
# job status has been checked by put script already
# we need to clean up the job remainder (links, queue, and ARC
# side)
clean_arc_job(job_dict, 'FINISHED', None,
configuration, logger, False)
msg += 'ARC job completed'
else:
# Clean up the server for files associated with the finished job
if not server_cleanup(
job_dict['SESSIONID'],
job_dict['IOSESSIONID'],
job_dict['LOCALJOBNAME'],
job_id,
configuration,
logger,
):
logger.error('could not clean up MiG server')
if configuration.enable_server_dist\
and not job_dict.has_key('EMPTY_JOB'):
# TODO: we should probably support resources migrating and
# handing back job as first contact with new server
# Still not sure if we need finished handling at all, though...
scheduler.finished_job(res_name, job_dict)
executing_queue.dequeue_job_by_id(job_id)
msg += '%s removed from executing queue.' % job_id
# print msg
logger.info(msg)
elif cap_line.find('RESTARTEXEFAILED') == 0:
# ********* *********
# ********* RESTART EXE FAILED *********
# ********* *********
print cap_line
logger.info(cap_line)
logger.info(
'Before restart exe failed: %d job(s) in the executing queue.' % \
executing_queue.queue_length())
if len(linelist) != 4:
logger.error('Invalid restart exe failed request')
continue
# read values
res_name = linelist[1]
exe_name = linelist[2]
job_id = linelist[3]
logger.info('Restart exe failed: adding retry job for %s %s'
% (res_name, exe_name))
(retry_job, msg) = jobscriptgenerator.create_restart_job(
res_name,
exe_name,
300,
1,
'RESTART-EXE-FAILED',
0,
configuration,
logger,
)
executing_queue.enqueue_job(retry_job,
executing_queue.queue_length())
logger.info(
'After restart exe failed: %d job(s) in the executing queue.' % \
executing_queue.queue_length())
elif cap_line.find('JOBACTION') == 0:
# ********* *********
# ********* JOB STATE CHANGE *********
# ********* *********
print cap_line
logger.info(cap_line)
logger.info('Job action: %d job(s) in the queue.' % \
job_queue.queue_length())
if len(linelist) != 6:
logger.error('Invalid job action request')
continue
# read values
job_id = linelist[1]
original_status = linelist[2]
new_status = linelist[3]
unique_resource_name = linelist[4]
exe = linelist[5]
# read resource config file
res_file = os.path.join(configuration.resource_home,
unique_resource_name, 'config')
resource_config = unpickle(res_file, logger)
other_status_list = ['PARSE']
queued_status_list = ['QUEUED', 'RETRY', 'FROZEN']
executing_status_list = ['EXECUTING']
# Only cancel is accepted for non-queued states
if original_status not in queued_status_list and \
new_status != 'CANCELED':
logger.error('change to %s not supported for jobs in %s states'
% (new_status, ', '.join(other_status_list)))
if original_status in other_status_list:
pass
elif original_status in queued_status_list:
if new_status == 'CANCELED':
job_dict = job_queue.dequeue_job_by_id(job_id)
else:
job_dict = job_queue.get_job_by_id(job_id)
scheduler.clear_schedule(job_dict)
job_dict['STATUS'] = new_status
elif original_status in executing_status_list:
# Retrieve job_dict
num_executing_jobs_before = executing_queue.queue_length()
job_dict = executing_queue.dequeue_job_by_id(job_id)
num_executing_jobs_after = executing_queue.queue_length()
logger.info('Number of jobs in executing queue. '
+ 'Before cancel: %s. After cancel: %s'
% (num_executing_jobs_before,
num_executing_jobs_after))
if not job_dict:
# We are seeing a race in the handling of executing jobs - do
# nothing. Job timeout must have just killed the job we are
# trying to cancel
logger.info(
'Cancel job: Could not get job_dict for executing job')
continue
# special treatment of ARC jobs: delete two links and cancel job
# in ARC
if unique_resource_name == 'ARC':
if not configuration.arc_clusters:
logger.error('ARC backend disabled - ignore %s' % \
job_dict)
continue
# remove from the executing queue
executing_queue.dequeue_job_by_id(job_id)
# job status has been set by the cancel request already, but
# we need to kill the ARC job, or clean it (if already
# finished), and clean up the job remainder links
clean_arc_job(job_dict, 'CANCELED', None,
configuration, logger, True)
logger.debug('ARC job completed')
continue
if not server_cleanup(
job_dict['SESSIONID'],
job_dict['IOSESSIONID'],
job_dict['LOCALJOBNAME'],
job_dict['JOB_ID'],
configuration,
logger,
):
logger.error('could not clean up MiG server')
if not resource_config.get('SANDBOX', False):
logger.info(
'Killing running job with atomic_resource_exe_restart')
(status, msg) = \
atomic_resource_exe_restart(unique_resource_name,
exe, configuration, logger)
if status:
logger.info('atomic_resource_exe_restart ok: res %s:%s'
% (unique_resource_name, exe))
else:
logger.error(
'atomic_resource_exe_restart FAILED: %s res %s:%s'
% (msg, unique_resource_name, exe))
#kill_job_by_exe_restart(unique_resource_name, exe,
# configuration, logger)
# Make sure we do not loose exes even if restart fails
retry_message = 'RESTARTEXEFAILED %s %s %s\n'\
% (unique_resource_name, exe, job_id)
send_message_to_grid_script(retry_message, logger,
configuration)
elif cap_line.find('JOBTIMEOUT') == 0:
print cap_line
logger.info(cap_line)
logger.info('job timeout: %d job(s) in the executing queue.' % \
executing_queue.queue_length())
if len(linelist) != 4:
logger.error('Invalid timeout job request')
continue
# read values
unique_resource_name = linelist[1]
exe_name = linelist[2]
jobid = linelist[3]
msg = 'JOBTIMEOUT: %s timed out.' % jobid
print msg
logger.info(msg)
# read resource config file
res_file = os.path.join(configuration.resource_home,
unique_resource_name, 'config')
resource_config = unpickle(res_file, logger)
# Retrieve job_dict
job_dict = executing_queue.get_job_by_id(jobid)
# special treatment of ARC jobs: delete two links and
# clean job in ARC system, do not retry.
if job_dict and unique_resource_name == 'ARC':
if not configuration.arc_clusters:
logger.error('ARC backend disabled - ignore %s' % \
job_dict)
continue
# remove from the executing queue
executing_queue.dequeue_job_by_id(jobid)
# job status has been set by the cancel request already, but
# we need to kill the ARC job, or clean it (if already finished),
# and clean up the job remainder links
clean_arc_job(job_dict, 'FAILED', 'Job timed out',
configuration, logger, True)
logger.debug('ARC job timed out, removed')
continue
# Execution information is removed from job_dict in
# requeue_job - save here
exe = ''
if job_dict:
exe = job_dict['EXE']
# Check if job has already been rescheduled due to resource
# failure. Important to match both unique resource and exe
# name to avoid problems when job is rescheduled to another
# exe on same resource.
# IMPORTANT: both empty and real jobs may require exe
# restart on time out. If frontend script can't deliver
# status file within time frame (network outage etc) the
# session id will be invalidated resulting in rejection
# and no automatic restart of exe.
if job_dict and unique_resource_name\
== job_dict['UNIQUE_RESOURCE_NAME'] and exe_name == exe:
if job_dict.has_key('EMPTY_JOB'):
# Empty job timed out, cleanup server and
# remove from Executing queue
if not server_cleanup(
job_dict['SESSIONID'],
job_dict['IOSESSIONID'],
job_dict['LOCALJOBNAME'],
job_dict['JOB_ID'],
configuration,
logger,
):
logger.error('could not clean up MiG server')
executing_queue.dequeue_job_by_id(job_dict['JOB_ID'])
else:
# Real job, requeue job
# Clear any scheduling data for exe_job before requeue
scheduler.clear_schedule(job_dict)
requeue_job(
job_dict,
'JOB TIMEOUT',
job_queue,
executing_queue,
configuration,
logger,
)
# Restart non-sandbox resources for all timed out jobs
if not resource_config.get('SANDBOX', False):
# TODO: atomic_resource_exe_restart is not always effective
# The imada resources have been seen to hang in wait for input
# files loop across an atomic_resource_exe_restart run
# (server PGID was 'starting').
(status, msg) = \
atomic_resource_exe_restart(unique_resource_name,
exe, configuration, logger)
if status:
logger.info('atomic_resource_exe_restart ok: res %s:%s'
% (unique_resource_name, exe))
else:
logger.error(
'atomic_resource_exe_restart FAILED: %s, res %s:%s'
% (msg, unique_resource_name, exe))
# Make sure we do not loose exes even if restart fails
retry_message = 'RESTARTEXEFAILED %s %s %s\n'\
% (unique_resource_name, exe_name,
job_dict['JOB_ID'])
send_message_to_grid_script(retry_message, logger,
configuration)
logger.info('requested restart exe retry attempt')
elif cap_line.find('JOBQUEUEINFO') == 0:
details = linelist[1:]
if not details:
details.append('JOB_ID')
logger.info('--- DISPLAYING JOB QUEUE INFORMATION ---\n%s' % \
'\n'.join(job_queue.format_queue(details)))
job_queue.show_queue(details)
elif cap_line.find('DROPQUEUED') == 0:
logger.info('--- REMOVING JOBS FROM JOB QUEUE ---')
job_list = linelist[1:]
if not job_list:
logger.info('No jobs specified for removal')
for job_id in job_list:
try:
job_queue.dequeue_job_by_id(job_id)
logger.info("Removed job %s from job queue" % job_id)
except Exception, exc:
logger.error("Failed to remove job %s from job queue: %s" \
% (job_id, exc))
elif cap_line.find('EXECUTINGQUEUEINFO') == 0:
details = linelist[1:]
if not details:
details.append('JOB_ID')
logger.info('--- DISPLAYING EXECUTING QUEUE INFORMATION ---\n%s' % \
'\n'.join(executing_queue.format_queue(details)))
executing_queue.show_queue(details)
elif cap_line.find('DROPEXECUTING') == 0:
logger.info('--- REMOVING JOBS FROM EXECUTING QUEUE ---')
job_list = linelist[1:]
if not job_list:
logger.info('No jobs specified for removal')
for job_id in job_list:
try:
executing_queue.dequeue_job_by_id(job_id)
logger.info("Removed job %s from executing queue" % job_id)
except Exception, exc:
logger.error("Failed to remove job %s from exe queue: %s" \
% (job_id, exc))
elif cap_line.find('DONEQUEUEINFO') == 0:
details = linelist[1:]
if not details:
details.append('JOB_ID')
logger.info('--- DISPLAYING DONE QUEUE INFORMATION ---\n%s' % \
'\n'.join(done_queue.format_queue(details)))
done_queue.show_queue(details)
elif cap_line.find('DROPDONE') == 0:
logger.info('--- REMOVING JOBS FROM DONE QUEUE ---')
job_list = linelist[1:]
if not job_list:
logger.info('No jobs specified for removal')
for job_id in job_list:
try:
done_queue.dequeue_job_by_id(job_id)
logger.info("Removed job %s from done queue" % job_id)
except Exception, exc:
logger.error("Failed to remove job %s from exe queue: %s" \
% (job_id, exc))
elif cap_line.find('STARTTIMEOUTTHREAD') == 0:
logger.info('--- STARTING TIME OUT THREAD ---')
job_time_out_stop.clear()
job_time_out_thread = threading.Thread(target=time_out_jobs,
args=(job_time_out_stop, ))
job_time_out_thread.start()
elif cap_line.find('CHECKTIMEOUTTHREAD') == 0:
logger.info('--- CHECKING TIME OUT THREAD ---')
logger.info('--- TIME OUT THREAD IS ALIVE: %s ---'
% job_time_out_thread.isAlive())
elif cap_line.find('RELOADCONFIG') == 0:
logger.info('--- RELOADING CONFIGURATION ---')
configuration.reload_config(True)
elif cap_line.find('SHUTDOWN') == 0:
logger.info('--- SAFE SHUTDOWN INITIATED ---')
print '--- SAFE SHUTDOWN INITIATED ---'
graceful_shutdown()
else:
print 'not understood: %s' % cap_line
logger.error('not understood: %s' % cap_line)
time.sleep(1)
# Experimental distributed server code
if configuration.enable_server_dist:
servercomm.exchange_status(configuration, scheduler,
loop_counter)
# TMP: Auto restart time out thread until we find the death cause
if not job_time_out_thread.isAlive():
logger.warning('--- TIME OUT THREAD DIED: %s %s %s---'
% (job_time_out_thread,
job_time_out_thread.isAlive(),
job_time_out_stop.isSet()))
logger.info('ressurect time out thread with executing queue:')
logger.info('%s' % executing_queue.show_queue(['ALL']))
job_time_out_stop.clear()
job_time_out_thread = threading.Thread(target=time_out_jobs,
args=(job_time_out_stop, ))
job_time_out_thread.start()
sys.stdout.flush()
loop_counter += 1
logger.debug('loop ended')
| heromod/migrid | mig/server/grid_script.py | Python | gpl-2.0 | 65,403 | [
"Brian"
] | caf63af2d3e4502f8ac070488c86c66a0c5015ea4540232e308f561090b33b8b |
#!/usr/bin/env python
import sys
import os
import netCDF4
import numpy
import json
import re
import copy
import logging
import argparse
log = logging.getLogger(__name__)
logging.basicConfig(level = logging.DEBUG)
fill_value = -9999.
reqdims = []
reqvars = []
# Main function.
def convert(gridfile,datafile,outputfile):
if(not reqdims or not reqvars):
read_drq("data_request.json")
levels = parse_grid(gridfile)
variables = parse_variables(datafile,levels)
timdim = reqdims[0]
timdim["nvals"] = len(variables[0]["values"])
write_nc(outputfile,variables,[timdim] + levels)
# Static reader function of the data request
def read_drq(filename):
global reqdims,reqvars
with open(filename) as jsonfile:
data = json.load(jsonfile)
reqdims = data["dimensions"]
reqvars = data["variables"]
# Parse static variables
def parse_grid(filename):
f = open(filename,'r')
lines = (l for l in f.readlines() if not l.strip().startswith('#'))
dims = next(lines).split()
converters = {}
for i in range(len(dims)):
converters[i] = lambda s:int(s.strip() or 0)
numdata = numpy.loadtxt(lines,dtype = "int",converters = converters).transpose()
result = copy.copy(reqdims[1:])
for dim in result:
dimname = dim["name"]
dim["nvals"] = numdata[dims.index(dimname)] if dimname in dims else 0
return result
# Parse dynamic variables
def parse_variables(datafile,levels):
variables = []
lev_index_key = "level_index"
f = open(datafile,'r')
lines = (l for l in f.readlines() if not l.strip().startswith('#'))
varnames = next(lines).split()
if(varnames[0].lower() not in ["time","times"]):
log.error("First column in data file is not recognized as time stamps...aborting")
return variables
converters = {}
for i in range(1,len(varnames) - 1):
converters[i] = lambda s:float(s.strip() or 0)
numdata = numpy.loadtxt(lines,skiprows = 0,converters = converters).transpose()
# Create time variable
reqtimvar = [v for v in reqvars if v["name"] == "time"][0]
variables.append(copy.copy(reqtimvar))
times = numdata[0,:]
variables[0]["values"] = times
# Create variable-column dictionary
colindex = 0
for varname in varnames[1:]:
colindex += 1
lev = 0
if('_' in varname):
colstr = varname.split('_')
varname = colstr[0]
lev = int(colstr[1]) if len(colstr) > 1 else 1
log.info("Processing column %d: variable %s at level %d" % (colindex,varname,lev))
existingvars = [v for v in variables if v.get("name",None) == varname]
if(len(existingvars) > 1):
log.error("Multiple variable columns found for %s...skipping column %d" % (varname,colindex))
if(any(existingvars)):
existingvars[0][lev_index_key][lev - 1] = colindex
else:
reqvarlist = [v for v in reqvars if v["name"] == varname]
if(any(reqvarlist)):
reqvar = reqvarlist[0]
vardict = copy.copy(reqvar)
variables.append(vardict)
vardict[lev_index_key] = [colindex]
for level in levels:
levdim = level.get("name",None)
if levdim in vardict["dimensions"]:
vardict[lev_index_key] = [-1] * level.get("nvals",0)
vardict[lev_index_key][lev - 1] = colindex
break
# Extract data from entire block
for variable in variables[1:]:
lev_indices = variable[lev_index_key]
if(-1 in lev_indices):
log.error("Could not find a data column for variable %s at column %d" % (variable["name"],lev_indices.index(-1)))
variable["values"] = numdata[lev_indices,:]
return variables
# Write variables to netcdf
def write_nc(filename,variables,dimensions):
root = netCDF4.Dataset(filename,'w',format = "NETCDF4")
#root.description = data_descr
# Make dimensions
for d in dimensions:
root.createDimension(d["name"],d["nvals"])
# make variables
for v in variables:
dims = v.get("dimensions",[])
ncvar = root.createVariable(v["name"],"f8",dims)
ncvar.units = v["unit"]
ncvar.long_name = v["long_name"]
ncvar.fill_value = fill_value
ncvar.missing_value = missval
if(len(dims) == 1):
ncvar[:] = v["values"][:]
elif(len(dims) == 2):
ncvar[:,:] = v["values"][:,:]
root.close()
def main(args):
parser = argparse.ArgumentParser(description = "Conversion tool for single-column model ascii output o netcdf")
parser.add_argument("datafile",metavar = "FILE",type = str,help = "Model output ascii file")
parser.add_argument("gridfile",metavar = "FILE",type = str,help = "Model vertical levels ascii file")
args = parser.parse_args()
datafile = args.datafile
if(not os.path.isfile(datafile)):
log.error("Input file %s does not exist")
return
levelfile = args.gridfile
if(not os.path.isfile(levelfile)):
log.error("Input file %s does not exist")
return
outputfile = "output.nc"
convert(levelfile,datafile,outputfile)
if __name__ == "__main__":
main(sys.argv[1:])
| eWUDAPT/eWUDAPT-analysis | eWUDAPT_analysis/txt2nc.py | Python | apache-2.0 | 5,331 | [
"NetCDF"
] | 658dc2b9b410a7d5cc2a0baebb0c1a4e6b53286fefcb2a11b98266f180a521b9 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from py4j.protocol import Py4JError
from zoo.orca.data.utils import *
from zoo.orca import OrcaContext
from zoo.common.nncontext import init_nncontext
from zoo import ZooContext, get_node_and_core_number
from zoo.util import nest
class XShards(object):
"""
A collection of data which can be pre-processed in parallel.
"""
def transform_shard(self, func, *args):
"""
Transform each shard in the XShards using specified function.
:param func: pre-processing function
:param args: arguments for the pre-processing function
:return: DataShard
"""
pass
def collect(self):
"""
Returns a list that contains all of the elements in this XShards
:return: list of elements
"""
pass
def num_partitions(self):
"""
return the number of partitions in this XShards
:return: an int
"""
pass
@classmethod
def load_pickle(cls, path, minPartitions=None):
"""
Load XShards from pickle files.
:param path: The pickle file path/directory
:param minPartitions: The minimum partitions for the XShards
:return: SparkXShards object
"""
sc = init_nncontext()
return SparkXShards(sc.pickleFile(path, minPartitions))
@staticmethod
def partition(data, num_shards=None):
"""
Partition local in memory data and form a SparkXShards
:param data: np.ndarray, a tuple, list, dict of np.ndarray, or a nested structure
made of tuple, list, dict with ndarray as the leaf value
:param num_shards: the number of shards that the data will be partitioned into
:return: a SparkXShards
"""
sc = init_nncontext()
node_num, core_num = get_node_and_core_number()
shard_num = node_num * core_num if num_shards is None else num_shards
import numpy as np
type_err_msg = """
The types supported in zoo.orca.data.XShards.partition are
1. np.ndarray
2. a tuple, list, dict of np.ndarray
3. nested structure made of tuple, list, dict with ndarray as the leaf value
But got data of type {}
""".format(type(data))
supported_types = {list, tuple, dict}
if isinstance(data, np.ndarray):
if data.shape[0] < shard_num:
raise ValueError("The length of data {} is smaller than the total number "
"of shards {}. Please adjust the num_shards option to be "
"at most {}.".format(data.shape[0], shard_num, data.shape[0]))
arrays = np.array_split(data, shard_num)
rdd = sc.parallelize(arrays)
else:
assert type(data) in supported_types, type_err_msg
flattened = nest.flatten(data)
data_length = len(flattened[0])
data_to_be_shard = []
if data_length < shard_num:
raise ValueError("The length of data {} is smaller than the total number "
"of shards {}. Please adjust the num_shards option to be "
"at most {}.".format(data_length, shard_num, data_length))
for i in range(shard_num):
data_to_be_shard.append([])
for x in flattened:
assert len(x) == data_length, \
"the ndarrays in data must all have the same size in first dimension, " \
"got first ndarray of size {} and another {}".format(data_length, len(x))
x_parts = np.array_split(x, shard_num)
for idx, x_part in enumerate(x_parts):
data_to_be_shard[idx].append(x_part)
data_to_be_shard = [nest.pack_sequence_as(data, shard) for shard in data_to_be_shard]
rdd = sc.parallelize(data_to_be_shard)
data_shards = SparkXShards(rdd)
return data_shards
class SparkXShards(XShards):
"""
A collection of data which can be pre-processed in parallel on Spark
"""
def __init__(self, rdd, transient=False):
self.rdd = rdd
self.user_cached = False
if transient:
self.eager = False
else:
self.eager = OrcaContext._eager_mode
self.rdd.cache()
if self.eager:
self.compute()
self.type = {}
def transform_shard(self, func, *args):
"""
Return a new SparkXShards by applying a function to each shard of this SparkXShards
:param func: python function to process data. The first argument is the data shard.
:param args: other arguments in this function.
:return: a new SparkXShards.
"""
def transform(iter, func, *args):
for x in iter:
yield func(x, *args)
transformed_shard = SparkXShards(self.rdd.mapPartitions(lambda iter:
transform(iter, func, *args)))
self._uncache()
return transformed_shard
def collect(self):
"""
Returns a list that contains all of the elements in this SparkXShards
:return: a list of data elements.
"""
return self.rdd.collect()
def cache(self):
"""
Persist this SparkXShards in memory
:return:
"""
self.user_cached = True
self.rdd.cache()
return self
def uncache(self):
"""
Make this SparkXShards as non-persistent, and remove all blocks for it from memory
:return:
"""
self.user_cached = False
if self.is_cached():
try:
self.rdd.unpersist()
except Py4JError:
print("Try to unpersist an uncached rdd")
return self
def _uncache(self):
if not self.user_cached:
self.uncache()
def is_cached(self):
return self.rdd.is_cached
def compute(self):
self.rdd.count()
return self
def num_partitions(self):
"""
Get number of partitions for this SparkXShards.
:return: number of partitions.
"""
return self.rdd.getNumPartitions()
def repartition(self, num_partitions):
"""
Return a new SparkXShards that has exactly num_partitions partitions.
:param num_partitions: target number of partitions
:return: a new SparkXShards object.
"""
if self._get_class_name() == 'pandas.core.frame.DataFrame':
import pandas as pd
if num_partitions > self.rdd.getNumPartitions():
rdd = self.rdd\
.flatMap(lambda df: df.apply(lambda row: (row[0], row.values.tolist()), axis=1)
.values.tolist())\
.partitionBy(num_partitions)
schema = self._get_schema()
def merge_rows(iter):
data = [value[1] for value in list(iter)]
if data:
df = pd.DataFrame(data=data, columns=schema['columns'])\
.astype(schema['dtypes'])
return [df]
else:
# no data in this partition
return iter
repartitioned_shard = SparkXShards(rdd.mapPartitions(merge_rows))
else:
def combine_df(iter):
dfs = list(iter)
if len(dfs) > 0:
return [pd.concat(dfs)]
else:
return iter
rdd = self.rdd.coalesce(num_partitions)
repartitioned_shard = SparkXShards(rdd.mapPartitions(combine_df))
elif self._get_class_name() == 'list':
if num_partitions > self.rdd.getNumPartitions():
rdd = self.rdd \
.flatMap(lambda data: data) \
.repartition(num_partitions)
repartitioned_shard = SparkXShards(rdd.mapPartitions(
lambda iter: [list(iter)]))
else:
rdd = self.rdd.coalesce(num_partitions)
from functools import reduce
repartitioned_shard = SparkXShards(rdd.mapPartitions(
lambda iter: [reduce(lambda l1, l2: l1 + l2, iter)]))
elif self._get_class_name() == 'numpy.ndarray':
elem = self.rdd.first()
shape = elem.shape
dtype = elem.dtype
if len(shape) > 0:
if num_partitions > self.rdd.getNumPartitions():
rdd = self.rdd\
.flatMap(lambda data: list(data))\
.repartition(num_partitions)
repartitioned_shard = SparkXShards(rdd.mapPartitions(
lambda iter: np.stack([list(iter)], axis=0)
.astype(dtype)))
else:
rdd = self.rdd.coalesce(num_partitions)
from functools import reduce
repartitioned_shard = SparkXShards(rdd.mapPartitions(
lambda iter: [np.concatenate(list(iter), axis=0)]))
else:
repartitioned_shard = SparkXShards(self.rdd.repartition(num_partitions))
else:
repartitioned_shard = SparkXShards(self.rdd.repartition(num_partitions))
self._uncache()
return repartitioned_shard
def partition_by(self, cols, num_partitions=None):
"""
Return a new SparkXShards partitioned using the specified columns.
This is only applicable for SparkXShards of Pandas DataFrame.
:param cols: specified columns to partition by.
:param num_partitions: target number of partitions. If not specified,
the new SparkXShards would keep the current partition number.
:return: a new SparkXShards.
"""
if self._get_class_name() == 'pandas.core.frame.DataFrame':
import pandas as pd
schema = self._get_schema()
# if partition by a column
if isinstance(cols, str):
if cols not in schema['columns']:
raise Exception("The partition column is not in the DataFrame")
# change data to key value pairs
rdd = self.rdd.flatMap(
lambda df: df.apply(lambda row: (row[cols], row.values.tolist()), axis=1)
.values.tolist())
partition_num = self.rdd.getNumPartitions() if not num_partitions \
else num_partitions
# partition with key
partitioned_rdd = rdd.partitionBy(partition_num)
else:
raise Exception("Only support partition by a column name")
def merge(iterator):
data = [value[1] for value in list(iterator)]
if data:
df = pd.DataFrame(data=data, columns=schema['columns']).astype(schema['dtypes'])
return [df]
else:
# no data in this partition
return []
# merge records to df in each partition
partitioned_shard = SparkXShards(partitioned_rdd.mapPartitions(merge))
self._uncache()
return partitioned_shard
else:
raise Exception("Currently only support partition by for XShards"
" of Pandas DataFrame")
def unique(self):
"""
Return a unique list of elements of this SparkXShards.
This is only applicable for SparkXShards of Pandas Series.
:return: a unique list of elements of this SparkXShards.
"""
if self._get_class_name() == 'pandas.core.series.Series':
import pandas as pd
rdd = self.rdd.map(lambda s: s.unique())
import numpy as np
result = rdd.reduce(lambda list1, list2: pd.unique(np.concatenate((list1, list2),
axis=0)))
return result
else:
# we may support numpy or other types later
raise Exception("Currently only support unique() on XShards of Pandas Series")
def split(self):
"""
Split SparkXShards into multiple SparkXShards.
Each element in the SparkXShards needs be a list or tuple with same length.
:return: Splits of SparkXShards. If element in the input SparkDataShard is not
list or tuple, return list of input SparkDataShards.
"""
# get number of splits
list_split_length = self.rdd.map(lambda data: len(data) if isinstance(data, list) or
isinstance(data, tuple) else 1).collect()
# check if each element has same splits
if list_split_length.count(list_split_length[0]) != len(list_split_length):
raise Exception("Cannot split this XShards because its partitions "
"have different split length")
else:
if list_split_length[0] > 1:
def get_data(order):
def transform(data):
return data[order]
return transform
split_shard_list = [SparkXShards(self.rdd.map(get_data(i)))
for i in range(list_split_length[0])]
self._uncache()
return split_shard_list
else:
return [self]
def zip(self, other):
"""
Zips this SparkXShards with another one, returning key-value pairs with the first element
in each SparkXShards, second element in each SparkXShards, etc. Assumes that the two
SparkXShards have the *same number of partitions* and the *same number of elements
in each partition*(e.g. one was made through a transform_shard on the other
:param other: another SparkXShards
:return: zipped SparkXShards
"""
assert isinstance(other, SparkXShards), "other should be a SparkXShards"
assert self.num_partitions() == other.num_partitions(), \
"The two SparkXShards should have the same number of partitions"
try:
rdd = self.rdd.zip(other.rdd)
zipped_shard = SparkXShards(rdd)
other._uncache()
self._uncache()
return zipped_shard
except Exception:
raise ValueError("The two SparkXShards should have the same number of elements "
"in each partition")
def __len__(self):
return self.rdd.map(lambda data: len(data) if hasattr(data, '__len__') else 1)\
.reduce(lambda l1, l2: l1 + l2)
def save_pickle(self, path, batchSize=10):
"""
Save this SparkXShards as a SequenceFile of serialized objects.
The serializer used is pyspark.serializers.PickleSerializer, default batch size is 10.
:param path: target path.
:param batchSize: batch size for each sequence file chunk.
"""
self.rdd.saveAsPickleFile(path, batchSize)
return self
def __del__(self):
self.uncache()
def __getitem__(self, key):
def get_data(data):
assert hasattr(data, '__getitem__'), \
"No selection operation available for this XShards"
try:
value = data[key]
except:
raise Exception("Invalid key for this XShards")
return value
return SparkXShards(self.rdd.map(get_data), transient=True)
def _for_each(self, func, *args, **kwargs):
def utility_func(x, func, *args, **kwargs):
try:
result = func(x, *args, **kwargs)
except Exception as e:
return e
return result
result_rdd = self.rdd.map(lambda x: utility_func(x, func, *args, **kwargs))
return result_rdd
def _get_schema(self):
if 'schema' in self.type:
return self.type['schema']
else:
if self._get_class_name() == 'pandas.core.frame.DataFrame':
import pandas as pd
columns, dtypes = self.rdd.map(lambda x: (x.columns, x.dtypes)).first()
self.type['schema'] = {'columns': columns, 'dtypes': dtypes}
return self.type['schema']
return None
def _get_class_name(self):
if 'class_name' in self.type:
return self.type['class_name']
else:
self.type['class_name'] = self._for_each(get_class_name).first()
return self.type['class_name']
class SharedValue(object):
def __init__(self, data):
sc = init_nncontext()
self.broadcast_data = sc.broadcast(data)
self._value = None
@property
def value(self):
self._value = self.broadcast_data.value
return self._value
def unpersist(self):
self.broadcast_data.unpersist()
| intel-analytics/analytics-zoo | pyzoo/zoo/orca/data/shard.py | Python | apache-2.0 | 17,847 | [
"ORCA"
] | 090d13c03de85251e0305ef83460d0571aa7ba40d0a886f0d292a4bc588610c5 |
# ----------------------------------------------------------
# AdvaS Advanced Search
# module for phonetic algorithms
#
# (C) 2002 - 2005 Frank Hofmann, Chemnitz, Germany
# email fh@efho.de
# ----------------------------------------------------------
# changed 2005-01-24
import string
import re
def soundex (term):
"Return the soundex value to a string argument."
# Create and compare soundex codes of English words.
#
# Soundex is an algorithm that hashes English strings into
# alpha-numerical value that represents what the word sounds
# like. For more information on soundex and some notes on the
# differences in implemenations visit:
# http://www.bluepoof.com/Soundex/info.html
#
# This version modified by Nathan Heagy at Front Logic Inc., to be
# compatible with php's soundexing and much faster.
#
# eAndroid / Nathan Heagy / Jul 29 2000
# changes by Frank Hofmann / Jan 02 2005
# generate translation table only once. used to translate into soundex numbers
#table = string.maketrans('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', '0123012002245501262301020201230120022455012623010202')
table = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ', '01230120022455012623010202')
# check parameter
if not term:
return "0000" # could be Z000 for compatibility with other implementations
# end if
# convert into uppercase letters
term = string.upper(term)
first_char = term[0]
# translate the string into soundex code according to the table above
term = string.translate(term[1:], table)
# remove all 0s
term = term.replace("0", "")
# remove duplicate numbers in-a-row
str2 = first_char
for x in term:
if x != str2[-1]:
str2 = str2 + x
# end if
# end for
# pad with zeros
str2 = str2 + "0"*len(str2)
# take the first four letters
return_value = str2[:4]
# return value
return return_value
def metaphone (term):
"returns metaphone code for a given string"
# implementation of the original algorithm from Lawrence Philips
# extended/rewritten by M. Kuhn
# improvements with thanks to John Machin <sjmachin@lexicon.net>
# define return value
code = ""
i = 0
term_length = len(term)
if (term_length == 0):
# empty string ?
return code
# end if
# extension #1 (added 2005-01-28)
# convert to lowercase
term = term.lower()
# extension #2 (added 2005-01-28)
# remove all non-english characters, first
term = re.sub(r'[^a-z]', '', term)
if len(term) == 0:
# nothing left
return code
# end if
# extension #3 (added 2005-01-24)
# conflate repeated letters
firstChar = term[0]
str2 = firstChar
for x in term:
if x != str2[-1]:
str2 = str2 + x
# end if
# end for
# extension #4 (added 2005-01-24)
# remove any vowels unless a vowel is the first letter
firstChar = str2[0]
str3 = firstChar
for x in str2[1:]:
if (re.search(r'[^aeiou]', x)):
str3 = str3 + x
# end if
# end for
term = str3
term_length = len(term)
if term_length == 0:
# nothing left
return code
# end if
# check for exceptions
if (term_length > 1):
# get first two characters
first_chars = term[0:2]
# build translation table
table = {
"ae":"e",
"gn":"n",
"kn":"n",
"pn":"n",
"wr":"n",
"wh":"w"
}
if first_chars in list(table.keys()):
term = term[2:]
code = table[first_chars]
term_length = len(term)
# end if
elif (term[0] == "x"):
term = ""
code = "s"
term_length = 0
# end if
# define standard translation table
st_trans = {
"b":"b",
"c":"k",
"d":"t",
"g":"k",
"h":"h",
"k":"k",
"p":"p",
"q":"k",
"s":"s",
"t":"t",
"v":"f",
"w":"w",
"x":"ks",
"y":"y",
"z":"s"
}
i = 0
while (i < term_length):
# init character to add, init basic patterns
add_char = ""
part_n_2 = ""
part_n_3 = ""
part_n_4 = ""
part_c_2 = ""
part_c_3 = ""
# extract a number of patterns, if possible
if (i < (term_length - 1)):
part_n_2 = term[i:i + 2]
if (i > 0):
part_c_2 = term[i - 1:i + 1]
part_c_3 = term[i - 1:i + 2]
# end if
# end if
if (i < (term_length - 2)):
part_n_3 = term[i:i + 3]
# end if
if (i < (term_length - 3)):
part_n_4 = term[i:i + 4]
# end if
# use table with conditions for translations
if (term[i] == "b"):
add_char = st_trans["b"]
if (i == (term_length - 1)):
if (i > 0):
if (term[i - 1] == "m"):
add_char = ""
# end if
# end if
# end if
elif (term[i] == "c"):
add_char = st_trans["c"]
if (part_n_2 == "ch"):
add_char = "x"
elif (re.search(r'c[iey]', part_n_2)):
add_char = "s"
# end if
if (part_n_3 == "cia"):
add_char = "x"
# end if
if (re.search(r'sc[iey]', part_c_3)):
add_char = ""
# end if
elif (term[i] == "d"):
add_char = st_trans["d"]
if (re.search(r'dg[eyi]', part_n_3)):
add_char = "j"
# end if
elif (term[i] == "g"):
add_char = st_trans["g"]
if (part_n_2 == "gh"):
if (i == (term_length - 2)):
add_char = ""
# end if
elif (re.search(r'gh[aeiouy]', part_n_3)):
add_char = ""
elif (part_n_2 == "gn"):
add_char = ""
elif (part_n_4 == "gned"):
add_char = ""
elif (re.search(r'dg[eyi]', part_c_3)):
add_char = ""
elif (part_n_2 == "gi"):
if (part_c_3 != "ggi"):
add_char = "j"
# end if
elif (part_n_2 == "ge"):
if (part_c_3 != "gge"):
add_char = "j"
# end if
elif (part_n_2 == "gy"):
if (part_c_3 != "ggy"):
add_char = "j"
# end if
elif (part_n_2 == "gg"):
add_char = ""
# end if
elif (term[i] == "h"):
add_char = st_trans["h"]
if (re.search(r'[aeiouy]h[^aeiouy]', part_c_3)):
add_char = ""
elif (re.search(r'[csptg]h', part_c_2)):
add_char = ""
# end if
elif (term[i] == "k"):
add_char = st_trans["k"]
if (part_c_2 == "ck"):
add_char = ""
# end if
elif (term[i] == "p"):
add_char = st_trans["p"]
if (part_n_2 == "ph"):
add_char = "f"
# end if
elif (term[i] == "q"):
add_char = st_trans["q"]
elif (term[i] == "s"):
add_char = st_trans["s"]
if (part_n_2 == "sh"):
add_char = "x"
# end if
if (re.search(r'si[ao]', part_n_3)):
add_char = "x"
# end if
elif (term[i] == "t"):
add_char = st_trans["t"]
if (part_n_2 == "th"):
add_char = "0"
# end if
if (re.search(r'ti[ao]', part_n_3)):
add_char = "x"
# end if
elif (term[i] == "v"):
add_char = st_trans["v"]
elif (term[i] == "w"):
add_char = st_trans["w"]
if (re.search(r'w[^aeiouy]', part_n_2)):
add_char = ""
# end if
elif (term[i] == "x"):
add_char = st_trans["x"]
elif (term[i] == "y"):
add_char = st_trans["y"]
elif (term[i] == "z"):
add_char = st_trans["z"]
else:
# alternative
add_char = term[i]
# end if
code = code + add_char
i += 1
# end while
# return metaphone code
return code
def nysiis (term):
"returns New York State Identification and Intelligence Algorithm (NYSIIS) code for the given term"
code = ""
i = 0
term_length = len(term)
if (term_length == 0):
# empty string ?
return code
# end if
# build translation table for the first characters
table = {
"mac":"mcc",
"ph":"ff",
"kn":"nn",
"pf":"ff",
"k":"c",
"sch":"sss"
}
for table_entry in list(table.keys()):
table_value = table[table_entry] # get table value
table_value_len = len(table_value) # calculate its length
first_chars = term[0:table_value_len]
if (first_chars == table_entry):
term = table_value + term[table_value_len:]
break
# end if
# end for
# build translation table for the last characters
table = {
"ee":"y",
"ie":"y",
"dt":"d",
"rt":"d",
"rd":"d",
"nt":"d",
"nd":"d",
}
for table_entry in list(table.keys()):
table_value = table[table_entry] # get table value
table_entry_len = len(table_entry) # calculate its length
last_chars = term[(0 - table_entry_len):]
# print last_chars, ", ", table_entry, ", ", table_value
if (last_chars == table_entry):
term = term[:(0 - table_value_len + 1)] + table_value
break
# end if
# end for
# initialize code
code = term
# transform ev->af
code = re.sub(r'ev', r'af', code)
# transform a,e,i,o,u->a
code = re.sub(r'[aeiouy]', r'a', code)
# transform q->g
code = re.sub(r'q', r'g', code)
# transform z->s
code = re.sub(r'z', r's', code)
# transform m->n
code = re.sub(r'm', r'n', code)
# transform kn->n
code = re.sub(r'kn', r'n', code)
# transform k->c
code = re.sub(r'k', r'c', code)
# transform sch->sss
code = re.sub(r'sch', r'sss', code)
# transform ph->ff
code = re.sub(r'ph', r'ff', code)
# transform h-> if previous or next is nonvowel -> previous
occur = re.findall(r'([a-z]{0,1}?)h([a-z]{0,1}?)', code)
# print occur
for occur_group in occur:
occur_item_previous = occur_group[0]
occur_item_next = occur_group[1]
if ((re.match(r'[^aeiouy]', occur_item_previous)) or (re.match(r'[^aeiouy]', occur_item_next))):
if (occur_item_previous != ""):
# make substitution
code = re.sub (occur_item_previous + "h", occur_item_previous * 2, code, 1)
# end if
# end if
# end for
# transform w-> if previous is vowel -> previous
occur = re.findall(r'([aeiouy]{1}?)w', code)
# print occur
for occur_group in occur:
occur_item_previous = occur_group[0]
# make substitution
code = re.sub (occur_item_previous + "w", occur_item_previous * 2, code, 1)
# end for
# check last character
# -s, remove
code = re.sub (r's$', r'', code)
# -ay, replace by -y
code = re.sub (r'ay$', r'y', code)
# -a, remove
code = re.sub (r'a$', r'', code)
# return nysiis code
return code
def caverphone (term):
"returns the language key using the caverphone algorithm 2.0"
# Developed at the University of Otago, New Zealand.
# Project: Caversham Project (http://caversham.otago.ac.nz)
# Developer: David Hood, University of Otago, New Zealand
# Contact: caversham@otago.ac.nz
# Project Technical Paper: http://caversham.otago.ac.nz/files/working/ctp150804.pdf
# Version 2.0 (2004-08-15)
code = ""
i = 0
term_length = len(term)
if (term_length == 0):
# empty string ?
return code
# end if
# convert to lowercase
code = term.lower()
# remove anything not in the standard alphabet (a-z)
code = re.sub(r'[^a-z]', '', code)
# remove final e
if code.endswith("e"):
code = code[:-1]
# if the name starts with cough, rough, tough, enough or trough -> cou2f (rou2f, tou2f, enou2f, trough)
code = re.sub(r'^([crt]|(en)|(tr))ough', r'\1ou2f', code)
# if the name starts with gn -> 2n
code = re.sub(r'^gn', r'2n', code)
# if the name ends with mb -> m2
code = re.sub(r'mb$', r'm2', code)
# replace cq -> 2q
code = re.sub(r'cq', r'2q', code)
# replace c[i,e,y] -> s[i,e,y]
code = re.sub(r'c([iey])', r's\1', code)
# replace tch -> 2ch
code = re.sub(r'tch', r'2ch', code)
# replace c,q,x -> k
code = re.sub(r'[cqx]', r'k', code)
# replace v -> f
code = re.sub(r'v', r'f', code)
# replace dg -> 2g
code = re.sub(r'dg', r'2g', code)
# replace ti[o,a] -> si[o,a]
code = re.sub(r'ti([oa])', r'si\1', code)
# replace d -> t
code = re.sub(r'd', r't', code)
# replace ph -> fh
code = re.sub(r'ph', r'fh', code)
# replace b -> p
code = re.sub(r'b', r'p', code)
# replace sh -> s2
code = re.sub(r'sh', r's2', code)
# replace z -> s
code = re.sub(r'z', r's', code)
# replace initial vowel [aeiou] -> A
code = re.sub(r'^[aeiou]', r'A', code)
# replace all other vowels [aeiou] -> 3
code = re.sub(r'[aeiou]', r'3', code)
# replace j -> y
code = re.sub(r'j', r'y', code)
# replace an initial y3 -> Y3
code = re.sub(r'^y3', r'Y3', code)
# replace an initial y -> A
code = re.sub(r'^y', r'A', code)
# replace y -> 3
code = re.sub(r'y', r'3', code)
# replace 3gh3 -> 3kh3
code = re.sub(r'3gh3', r'3kh3', code)
# replace gh -> 22
code = re.sub(r'gh', r'22', code)
# replace g -> k
code = re.sub(r'g', r'k', code)
# replace groups of s,t,p,k,f,m,n by its single, upper-case equivalent
for single_letter in ["s", "t", "p", "k", "f", "m", "n"]:
otherParts = re.split(single_letter + "+", code)
code = string.join(otherParts, string.upper(single_letter))
# replace w[3,h3] by W[3,h3]
code = re.sub(r'w(h?3)', r'W\1', code)
# replace final w with 3
code = re.sub(r'w$', r'3', code)
# replace w -> 2
code = re.sub(r'w', r'2', code)
# replace h at the beginning with an A
code = re.sub(r'^h', r'A', code)
# replace all other occurrences of h with a 2
code = re.sub(r'h', r'2', code)
# replace r3 with R3
code = re.sub(r'r3', r'R3', code)
# replace final r -> 3
code = re.sub(r'r$', r'3', code)
# replace r with 2
code = re.sub(r'r', r'2', code)
# replace l3 with L3
code = re.sub(r'l3', r'L3', code)
# replace final l -> 3
code = re.sub(r'l$', r'3', code)
# replace l with 2
code = re.sub(r'l', r'2', code)
# remove all 2's
code = re.sub(r'2', r'', code)
# replace the final 3 -> A
code = re.sub(r'3$', r'A', code)
# remove all 3's
code = re.sub(r'3', r'', code)
# extend the code by 10 '1' (one)
code += '1' * 10
# take the first 10 characters
caverphoneCode = code[:10]
# return caverphone code
return caverphoneCode
| weblyzard/ewrt | src/eWRT/lib/thirdparty/advas/phonetics.py | Python | gpl-3.0 | 15,761 | [
"VisIt"
] | 2da37e6c9b27170a39c563acb2e787af1e5032b7f5c5ca1bfde3f09887eca6b3 |
# GromacsWrapper: utilities.py
# Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`gromacs.utilities` -- Helper functions and classes
========================================================
The module defines some convenience functions and classes that are
used in other modules; they do *not* make use of :mod:`gromacs.tools`
or :mod:`gromacs.cbook` and can be safely imported at any time.
Classes
-------
:class:`FileUtils` provides functions related to filename handling. It
can be used as a base or mixin class. The :class:`gromacs.analysis.Simulation`
class is derived from it.
.. autoclass:: FileUtils
:members:
.. autoclass:: AttributeDict
.. autoclass:: Timedelta
Functions
---------
Some additional convenience functions that deal with files and
directories:
.. function:: openany(directory[,mode='r'])
Context manager to open a compressed (bzip2, gzip) or plain file
(uses :func:`anyopen`).
.. autofunction:: anyopen
.. autofunction:: realpath
.. function:: in_dir(directory[,create=True])
Context manager to execute a code block in a directory.
* The *directory* is created if it does not exist (unless
*create* = ``False`` is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
.. autofunction:: find_first
.. autofunction:: withextsep
.. autofunction:: which
Functions that improve list processing and which do *not* treat
strings as lists:
.. autofunction:: iterable
.. autofunction:: asiterable
.. autofunction:: firstof
Functions that help handling Gromacs files:
.. autofunction:: unlink_f
.. autofunction:: unlink_gmx
.. autofunction:: unlink_gmx_backups
.. autofunction:: number_pdbs
Functions that make working with matplotlib_ easier:
.. _matplotlib: http://matplotlib.sourceforge.net/
.. autofunction:: activate_subplot
.. autofunction:: remove_legend
Miscellaneous functions:
.. autofunction:: convert_aa_code
.. autofunction:: autoconvert
Data
----
.. autodata:: amino_acid_codes
"""
from __future__ import absolute_import, with_statement
__docformat__ = "restructuredtext en"
import os
import glob
import fnmatch
import re
import warnings
import errno
import subprocess
from contextlib import contextmanager
import bz2, gzip
import datetime
import numpy
import logging
logger = logging.getLogger('gromacs.utilities')
from .exceptions import AutoCorrectionWarning
class AttributeDict(dict):
"""A dictionary with pythonic access to keys as attributes --- useful for interactive work."""
def __getattribute__(self, x):
try:
return super(AttributeDict,self).__getattribute__(x)
except AttributeError:
return self[x]
def __setattr__(self, name, value):
try:
super(AttributeDict, self).__setitem__(name, value)
except KeyError:
super(AttributeDict, self).__setattr__(name, value)
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
def autoconvert(s):
"""Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
"""
if type(s) is not str:
return s
for converter in int, float, str: # try them in increasing order of lenience
try:
s = [converter(i) for i in s.split()]
if len(s) == 1:
return s[0]
else:
return numpy.array(s)
except (ValueError,AttributeError):
pass
raise ValueError("Failed to autoconvert {0!r}".format(s))
@contextmanager
def openany(datasource, mode='r', **kwargs):
"""Open the datasource and close it when the context exits.
:Arguments:
*datasource*
a stream or a filename
*mode*
``'r'`` opens for reading, ``'w'`` for writing ['r']
*kwargs*
additional keyword arguments that are passed through to the
actual handler; if these are not appropriate then an
exception will be raised by the handler
"""
stream, filename = anyopen(datasource, mode=mode, **kwargs)
try:
yield stream
finally:
stream.close()
def anyopen(datasource, mode='r', **kwargs):
"""Open datasource (gzipped, bzipped, uncompressed) and return a stream.
:Arguments:
*datasource*
a stream or a filename
*mode*
``'r'`` opens for reading, ``'w'`` for writing ['r']
*kwargs*
additional keyword arguments that are passed through to the
actual handler; if these are not appropriate then an
exception will be raised by the handler
"""
handlers = {'bz2': bz2.BZ2File, 'gz': gzip.open, '': file}
if mode.startswith('r'):
if hasattr(datasource,'next') or hasattr(datasource,'readline'):
stream = datasource
try:
filename = '({0})'.format(stream.name) # maybe that does not always work?
except AttributeError:
filename = str(type(stream))
else:
stream = None
filename = datasource
for ext in ('bz2', 'gz', ''): # file == '' should be last
openfunc = handlers[ext]
stream = _get_stream(datasource, openfunc, mode=mode, **kwargs)
if stream is not None:
break
if stream is None:
raise IOError("Cannot open {filename!r} in mode={mode!r}.".format(**vars()))
elif mode.startswith('w'):
if hasattr(datasource, 'write'):
stream = datasource
try:
filename = '({0})'.format(stream.name) # maybe that does not always work?
except AttributeError:
filename = str(type(stream))
else:
stream = None
filename = datasource
name, ext = os.path.splitext(filename)
if ext.startswith(os.path.extsep):
ext = ext[1:]
if not ext in ('bz2', 'gz'):
ext = '' # anything else but bz2 or gz is just a normal file
openfunc = handlers[ext]
stream = openfunc(datasource, mode=mode, **kwargs)
if stream is None:
raise IOError("Cannot open {filename!r} in mode={mode!r} with type {ext!r}.".format(**vars()))
else:
raise NotImplementedError("Sorry, mode={mode!r} is not implemented for {datasource!r}".format(**vars()))
return stream, filename
def _get_stream(filename, openfunction=file, mode='r'):
try:
stream = openfunction(filename, mode=mode)
except IOError:
return None
try:
stream.readline()
stream.close()
stream = openfunction(filename,'r')
except IOError:
stream.close()
stream = None
return stream
# TODO: make it work for non-default charge state amino acids.
#: translation table for 1-letter codes --> 3-letter codes
#: .. Note: This does not work for HISB and non-default charge state aa!
amino_acid_codes = {'A':'ALA', 'C':'CYS', 'D':'ASP', 'E':'GLU',
'F':'PHE', 'G':'GLY', 'H':'HIS', 'I':'ILE',
'K':'LYS', 'L':'LEU', 'M':'MET', 'N':'ASN',
'P':'PRO', 'Q':'GLN', 'R':'ARG', 'S':'SER',
'T':'THR', 'V':'VAL', 'W':'TRP', 'Y':'TYR'}
inverse_aa_codes = {three: one for one,three in amino_acid_codes.items()}
def convert_aa_code(x):
"""Converts between 3-letter and 1-letter amino acid codes."""
if len(x) == 1:
return amino_acid_codes[x.upper()]
elif len(x) == 3:
return inverse_aa_codes[x.upper()]
else:
raise ValueError("Can only convert 1-letter or 3-letter amino acid codes, "
"not %r" % x)
@contextmanager
def in_dir(directory, create=True):
"""Context manager to execute a code block in a directory.
* The directory is created if it does not exist (unless
create=False is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
"""
startdir = os.getcwd()
try:
try:
os.chdir(directory)
logger.debug("Working in {directory!r}...".format(**vars()))
except OSError, err:
if create and err.errno == errno.ENOENT:
os.makedirs(directory)
os.chdir(directory)
logger.info("Working in {directory!r} (newly created)...".format(**vars()))
else:
logger.exception("Failed to start working in {directory!r}.".format(**vars()))
raise
yield os.getcwd()
finally:
os.chdir(startdir)
def realpath(*args):
"""Join all args and return the real path, rooted at /.
Expands ``~`` and environment variables such as :envvar:`$HOME`.
Returns ``None`` if any of the args is none.
"""
if None in args:
return None
return os.path.realpath(
os.path.expandvars(os.path.expanduser(os.path.join(*args))))
def find_first(filename, suffices=None):
"""Find first *filename* with a suffix from *suffices*.
:Arguments:
*filename*
base filename; this file name is checked first
*suffices*
list of suffices that are tried in turn on the root of *filename*; can contain the
ext separator (:data:`os.path.extsep`) or not
:Returns: The first match or ``None``.
"""
# struct is not reliable as it depends on qscript so now we just try everything...
root,extension = os.path.splitext(filename)
if suffices is None:
suffices = []
else:
suffices = withextsep(suffices)
extensions = [extension] + suffices # native name is first
for ext in extensions:
fn = root + ext
if os.path.exists(fn):
return fn
return None
def withextsep(extensions):
"""Return list in which each element is guaranteed to start with :data:`os.path.extsep`."""
def dottify(x):
if x.startswith(os.path.extsep):
return x
return os.path.extsep + x
return [dottify(x) for x in asiterable(extensions)]
def find_files(directory, pattern):
"""Find files recursively under *directory*, matching *pattern* (generator).
*pattern* is a UNIX-style glob pattern as used ny :func:`fnmatch.fnmatch`.
Recipe by Bruno Oliveira from
http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
"""
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def which(program):
"""Determine full path of executable *program* on :envvar:`PATH`.
(Jay at http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python)
.. versionadded:: 0.5.1
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
real_program = realpath(program)
if is_exe(real_program):
return real_program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class FileUtils(object):
"""Mixin class to provide additional file-related capabilities."""
#: Default extension for files read/written by this class.
default_extension = None
def _init_filename(self, filename=None, ext=None):
"""Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix.
"""
extension = ext or self.default_extension
filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True)
#: Current full path of the object for reading and writing I/O.
self.real_filename = os.path.realpath(filename)
def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the class object.
Typical uses::
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension
(``use_my_ext=False``) and if provided, another extension is
appended. Chooses a default if no filename is given.
Raises a ``ValueError`` exception if no default file name is known.
If ``set_default=True`` then the default filename is also set.
``use_my_ext=True`` lets the suffix of a provided filename take
priority over a default ``ext`` tension.
.. versionchanged:: 0.3.1
An empty string as *ext* = "" will suppress appending an extension.
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith(os.extsep):
ext = ext[1:] # strip a dot to avoid annoying mistakes
if ext != "":
filename = filename + os.extsep + ext
return filename
def check_file_exists(self, filename, resolve='exception', force=None):
"""If a file exists then continue with the action specified in ``resolve``.
``resolve`` must be one of
"ignore"
always return ``False``
"indicate"
return ``True`` if it exists
"warn"
indicate and issue a :exc:`UserWarning`
"exception"
raise :exc:`IOError` if it exists
Alternatively, set *force* for the following behaviour (which
ignores *resolve*):
``True``
same as *resolve* = "ignore" (will allow overwriting of files)
``False``
same as *resolve* = "exception" (will prevent overwriting of files)
``None``
ignored, do whatever *resolve* says
"""
def _warn(x):
msg = "File {0!r} already exists.".format(x)
logger.warn(msg)
warnings.warn(msg)
return True
def _raise(x):
msg = "File {0!r} already exists.".format(x)
logger.error(msg)
raise IOError(errno.EEXIST, x, msg)
solutions = {'ignore': lambda x: False, # file exists, but we pretend that it doesn't
'indicate': lambda x: True, # yes, file exists
'warn': _warn,
'warning': _warn,
'exception': _raise,
'raise': _raise,
}
if force is True:
resolve = 'ignore'
elif force is False:
resolve = 'exception'
if not os.path.isfile(filename):
return False
else:
return solutions[resolve](filename)
def infix_filename(self, name, default, infix, ext=None):
"""Unless *name* is provided, insert *infix* before the extension *ext* of *default*."""
if name is None:
p, oldext = os.path.splitext(default)
if ext is None:
ext = oldext
if ext.startswith(os.extsep):
ext = ext[1:]
name = self.filename(p+infix, ext=ext)
return name
def __repr__(self):
fmt = "{0!s}(filename=%r)".format(self.__class__.__name__)
try:
fn = self.filename()
except ValueError:
fn = None
return fmt % fn
def iterable(obj):
"""Returns ``True`` if *obj* can be iterated over and is *not* a string."""
if isinstance(obj, basestring):
return False # avoid iterating over characters of a string
if hasattr(obj, 'next'):
return True # any iterator will do
try:
len(obj) # anything else that might work
except TypeError:
return False
return True
def asiterable(obj):
"""Returns obj so that it can be iterated over; a string is *not* treated as iterable"""
if not iterable(obj):
obj = [obj]
return obj
def firstof(obj):
"""Returns the first entry of a sequence or the obj.
Treats strings as single objects.
"""
return asiterable(obj)[0]
# In utilities so that it can be safely used in tools, cbook, ...
def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def unlink_gmx(*args):
"""Unlink (remove) Gromacs file(s) and all corresponding backups."""
for path in args:
unlink_f(path)
unlink_gmx_backups(*args)
def unlink_gmx_backups(*args):
"""Unlink (rm) all backup files corresponding to the listed files."""
for path in args:
dirname, filename = os.path.split(path)
fbaks = glob.glob(os.path.join(dirname, '#'+filename+'.*#'))
for bak in fbaks:
unlink_f(bak)
def mkdir_p(path):
"""Create a directory *path* with subdirs but do not complain if it exists.
This is like GNU ``mkdir -p path``.
"""
try:
os.makedirs(path)
except OSError, err:
if err.errno != errno.EEXIST:
raise
def cat(f=None, o=None):
"""Concatenate files *f*=[...] and write to *o*"""
# need f, o to be compatible with trjcat and eneconv
if f is None or o is None:
return
target = o
infiles = asiterable(f)
logger.debug("cat {0!s} > {1!s} ".format(" ".join(infiles), target))
with open(target, 'w') as out:
rc = subprocess.call(['cat'] + infiles, stdout=out)
if rc != 0:
msg = "failed with return code {0:d}: cat {1!r} > {2!r} ".format(rc, " ".join(infiles), target)
logger.exception(msg)
raise OSError(errno.EIO, msg, target)
# helpers for matplotlib
def activate_subplot(numPlot):
"""Make subplot *numPlot* active on the canvas.
Use this if a simple ``subplot(numRows, numCols, numPlot)``
overwrites the subplot instead of activating it.
"""
# see http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg07156.html
from pylab import gcf, axes
numPlot -= 1 # index is 0-based, plots are 1-based
return axes(gcf().get_axes()[numPlot])
def remove_legend(ax=None):
"""Remove legend for axes or gca.
See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html
"""
from pylab import gca, draw
if ax is None:
ax = gca()
ax.legend_ = None
draw()
# time functions
class Timedelta(datetime.timedelta):
"""Extension of :class:`datetime.timedelta`.
Provides attributes ddays, dhours, dminutes, dseconds to measure
the delta in normal time units.
ashours gives the total time in fractional hours.
"""
@property
def dhours(self):
"""Hours component of the timedelta."""
return self.seconds / 3600
@property
def dminutes(self):
"""Minutes component of the timedelta."""
return self.seconds/60 - 60*self.dhours
@property
def dseconds(self):
"""Seconds component of the timedelta."""
return self.seconds - 3600*self.dhours - 60*self.dminutes
@property
def ashours(self):
"""Timedelta in (fractional) hours."""
return 24*self.days + self.seconds / 3600.0
def strftime(self, fmt="%d:%H:%M:%S"):
"""Primitive string formatter.
The only directives understood are the following:
============ ==========================
Directive meaning
============ ==========================
%d day as integer
%H hour [00-23]
%h hours including days
%M minute as integer [00-59]
%S second as integer [00-59]
============ ==========================
"""
substitutions = {
"%d": str(self.days),
"%H": "{0:02d}".format(self.dhours),
"%h": str(24*self.days + self.dhours),
"%M": "{0:02d}".format(self.dminutes),
"%S": "{0:02d}".format(self.dseconds),
}
s = fmt
for search, replacement in substitutions.items():
s = s.replace(search, replacement)
return s
NUMBERED_PDB = re.compile(r"(?P<PREFIX>.*\D)(?P<NUMBER>\d+)\.(?P<SUFFIX>pdb)")
def number_pdbs(*args, **kwargs):
"""Rename pdbs x1.pdb ... x345.pdb --> x0001.pdb ... x0345.pdb
:Arguments:
- *args*: filenames or glob patterns (such as "pdb/md*.pdb")
- *format*: format string including keyword *num* ["%(num)04d"]
"""
format = kwargs.pop('format', "%(num)04d")
name_format = "%(prefix)s" + format +".%(suffix)s"
filenames = []
map(filenames.append, map(glob.glob, args)) # concatenate all filename lists
filenames = filenames[0] # ... ugly
for f in filenames:
m = NUMBERED_PDB.search(f)
if m is None:
continue
num = int(m.group('NUMBER'))
prefix = m.group('PREFIX')
suffix = m.group('SUFFIX')
newname = name_format % vars()
logger.info("Renaming {f!r} --> {newname!r}".format(**vars()))
try:
os.rename(f, newname)
except OSError:
logger.exception("renaming failed")
| jandom/GromacsWrapper | gromacs/utilities.py | Python | gpl-3.0 | 22,872 | [
"Gromacs"
] | 938508dfd0c1703a0ba3ac0c4b554c28bcb752b57e17ab11a1a581fd6a5089d6 |
#!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(ini, res, ini_x, res_x):
lo2 = 0.5 * (res_x - ini_x)
alpha = (ini - res) / 4.0 / lo2**3
beta = -3.0 * alpha * lo2**2
data = [ini_x + i*(res_x - ini_x)/100 for i in range(100)]
data = [(x, alpha * (x - ini_x - lo2)**3 + beta * (x - ini_x - lo2) + (ini + res) / 2.0) for x in data]
return zip(*data)
def moose(fn):
sinphi = np.sin(30.0 * np.pi / 180.0)
cosphi = np.cos(30.0 * np.pi / 180.0)
f = open(fn)
data = [map(float, line.strip().split(",")) for line in f.readlines()[4:-1]]
f.close()
intnl = [d[2] for d in data]
coh = [(0.5 * (d[5] - d[7]) + 0.5 * (d[5] + d[7]) * sinphi) / cosphi for d in data]
return (intnl, coh)
plt.figure()
expect21 = expected(10.0, 20.0, 0.0, 5E-6)
m21 = moose("gold/small_deform_hard21.csv")
plt.plot(expect21[0], expect21[1], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(m21[0], m21[1], 'k^', label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("internal parameter")
plt.ylabel("Cohesion")
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.title("Cohesion hardening")
plt.savefig("figures/small_deform_hard_21.eps")
sys.exit(0)
| liuwenf/moose | modules/tensor_mechanics/test/tests/capped_mohr_coulomb/small_deform_hard_21.py | Python | lgpl-2.1 | 1,258 | [
"MOOSE"
] | 9f4fa1b7550f93dc63b0fa76960c056ec21d136231af9363fb45f955d07f223a |
#! /usr/bin/env python
"""Static analysis tool for checking docstring conventions and style.
Implemented checks cover PEP257:
http://www.python.org/dev/peps/pep-0257/
Other checks can be added, e.g. NumPy docstring conventions:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
The repository is located at:
http://github.com/GreenSteam/pep257
"""
from __future__ import with_statement
import os
import sys
import tokenize as tk
from itertools import takewhile, dropwhile, chain
from optparse import OptionParser
from re import compile as re
try:
from StringIO import StringIO
except ImportError: # Python 3.0 and later
from io import StringIO
try:
next
except NameError: # Python 2.5 and earlier
nothing = object()
def next(obj, default=nothing):
if default == nothing:
return obj.next()
else:
try:
return obj.next()
except StopIteration:
return default
__version__ = '0.3.3-alpha'
__all__ = ('check', 'collect')
humanize = lambda string: re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
is_magic = lambda name: name.startswith('__') and name.endswith('__')
is_ascii = lambda string: all(ord(char) < 128 for char in string)
is_blank = lambda string: not string.strip()
leading_space = lambda string: re('\s*').match(string).group()
class Value(object):
__init__ = lambda self, *args: vars(self).update(zip(self._fields, args))
__hash__ = lambda self: hash(repr(self))
__eq__ = lambda self, other: other and vars(self) == vars(other)
def __repr__(self):
args = [vars(self)[field] for field in self._fields]
return '%s(%s)' % (self.__class__.__name__, ', '.join(map(repr, args)))
class Definition(Value):
_fields = 'name _source start end docstring children parent'.split()
_human = property(lambda self: humanize(type(self).__name__))
kind = property(lambda self: self._human.split()[-1])
module = property(lambda self: self.parent.module)
all = property(lambda self: self.module.all)
_slice = property(lambda self: slice(self.start - 1, self.end))
source = property(lambda self: ''.join(self._source[self._slice]))
__iter__ = lambda self: chain([self], *self.children)
@property
def _publicity(self):
return {True: 'public', False: 'private'}[self.is_public]
def __str__(self):
return 'in %s %s `%s`' % (self._publicity, self._human, self.name)
class Module(Definition):
_fields = 'name _source start end docstring children parent _all'.split()
is_public = True
_nest = staticmethod(lambda s: {'def': Function, 'class': Class}[s])
module = property(lambda self: self)
all = property(lambda self: self._all)
__str__ = lambda self: 'at module level'
class Function(Definition):
_nest = staticmethod(lambda s: {'def': NestedFunction,
'class': NestedClass}[s])
@property
def is_public(self):
if self.all is not None:
return self.name in self.all
else: # TODO: are there any magic functions? not methods
return not self.name.startswith('_') or is_magic(self.name)
class NestedFunction(Function):
is_public = False
class Method(Function):
@property
def is_public(self):
name_is_public = not self.name.startswith('_') or is_magic(self.name)
return self.parent.is_public and name_is_public
class Class(Definition):
_nest = staticmethod(lambda s: {'def': Method, 'class': NestedClass}[s])
is_public = Function.is_public
class NestedClass(Class):
is_public = False
class Token(Value):
_fields = 'kind value start end source'.split()
class TokenStream(object):
def __init__(self, filelike):
self._generator = tk.generate_tokens(filelike.readline)
self.current = Token(*next(self._generator, None))
self.line = self.current.start[0]
def move(self):
previous = self.current
current = next(self._generator, None)
self.current = None if current is None else Token(*current)
self.line = self.current.start[0] if self.current else self.line
return previous
def __iter__(self):
while True:
if self.current is not None:
yield self.current
else:
return
self.move()
class AllError(Exception):
def __init__(self, message):
Exception.__init__(
self, message +
'That means pep257 cannot decide which definitions are public. '
'Variable __all__ should be present at most once in each file, '
"in form `__all__ = ('a_public_function', 'APublicClass', ...)`. "
'More info on __all__: http://stackoverflow.com/q/44834/. ')
class Parser(object):
def __call__(self, filelike, filename):
self.source = filelike.readlines()
src = ''.join(self.source)
self.stream = TokenStream(StringIO(src))
self.filename = filename
self.all = None
return self.parse_module()
current = property(lambda self: self.stream.current)
line = property(lambda self: self.stream.line)
def consume(self, kind):
assert self.stream.move().kind == kind
def leapfrog(self, kind):
for token in self.stream:
if token.kind == kind:
self.consume(kind)
return
def parse_docstring(self):
for token in self.stream:
if token.kind in [tk.COMMENT, tk.NEWLINE, tk.NL]:
continue
elif token.kind == tk.STRING:
return token.value
else:
return None
def parse_definitions(self, class_, all=False):
for token in self.stream:
if all and token.value == '__all__':
self.parse_all()
if token.value in ['def', 'class']:
yield self.parse_definition(class_._nest(token.value))
if token.kind == tk.INDENT:
self.consume(tk.INDENT)
for definition in self.parse_definitions(class_):
yield definition
if token.kind == tk.DEDENT:
return
def parse_all(self):
assert self.current.value == '__all__'
self.consume(tk.NAME)
if self.current.value != '=':
raise AllError('Could not evaluate contents of __all__. ')
self.consume(tk.OP)
if self.current.value not in '([':
raise AllError('Could not evaluate contents of __all__. ')
if self.current.value == '[':
msg = ("%s WARNING: __all__ is defined as a list, this means "
"pep257 cannot reliably detect contents of the __all__ "
"variable, because it can be mutated. Change __all__ to be "
"an (immutable) tuple, to remove this warning. Note, "
"pep257 uses __all__ to detect which definitions are "
"public, to warn if public definitions are missing "
"docstrings. If __all__ is a (mutable) list, pep257 cannot "
"reliably assume its contents. pep257 will proceed "
"assuming __all__ is not mutated.\n" % self.filename)
sys.stderr.write(msg)
self.consume(tk.OP)
s = '('
if self.current.kind != tk.STRING:
raise AllError('Could not evaluate contents of __all__. ')
while self.current.value not in ')]':
s += self.current.value
self.stream.move()
s += ')'
try:
self.all = eval(s, {})
except BaseException:
raise AllError('Could not evaluate contents of __all__: %s. ' % s)
def parse_module(self):
start = self.line
docstring = self.parse_docstring()
children = list(self.parse_definitions(Module, all=True))
assert self.current is None
end = self.line
module = Module(self.filename, self.source, start, end,
docstring, children, None, self.all)
for child in module.children:
child.parent = module
return module
def parse_definition(self, class_):
start = self.line
self.consume(tk.NAME)
name = self.current.value
self.leapfrog(tk.INDENT)
assert self.current.kind != tk.INDENT
docstring = self.parse_docstring()
children = list(self.parse_definitions(class_))
assert self.current.kind == tk.DEDENT
end = self.line - 1
definition = class_(name, self.source, start, end,
docstring, children, None)
for child in definition.children:
child.parent = definition
return definition
class Error(object):
"""Error in docstring style."""
# Options that define how errors are printed:
explain = False
source = False
def __init__(self, message=None, final=False):
self.message, self.is_final = message, final
self.definition, self.explanation = [None, None]
code = property(lambda self: self.message.partition(':')[0])
filename = property(lambda self: self.definition.module.name)
line = property(lambda self: self.definition.start)
@property
def lines(self):
source = ''
lines = self.definition._source[self.definition._slice]
offset = self.definition.start
lines_stripped = list(reversed(list(dropwhile(is_blank,
reversed(lines)))))
numbers_width = 0
for n, line in enumerate(lines_stripped):
numbers_width = max(numbers_width, n + offset)
numbers_width = len(str(numbers_width))
numbers_width = 6
for n, line in enumerate(lines_stripped):
source += '%*d: %s' % (numbers_width, n + offset, line)
if n > 5:
source += ' ...\n'
break
return source
def __str__(self):
self.explanation = '\n'.join(l for l in self.explanation.split('\n')
if not is_blank(l))
template = '%(filename)s:%(line)s %(definition)s:\n %(message)s'
if self.source and self.explain:
template += '\n\n%(explanation)s\n\n%(lines)s\n'
elif self.source and not self.explain:
template += '\n\n%(lines)s\n'
elif self.explain and not self.source:
template += '\n\n%(explanation)s\n\n'
return template % dict((name, getattr(self, name)) for name in
['filename', 'line', 'definition', 'message',
'explanation', 'lines'])
__repr__ = __str__
def __lt__(self, other):
return (self.filename, self.line) < (other.filename, other.line)
def parse_options():
parser = OptionParser(version=__version__,
usage='Usage: pep257 [options] [<file|dir>...]')
option = parser.add_option
option('-e', '--explain', action='store_true',
help='show explanation of each error')
option('-s', '--source', action='store_true',
help='show source for each error')
option('--ignore', metavar='<codes>', default='',
help='ignore a list comma-separated error codes, '
'for example: --ignore=D101,D202')
option('--match', metavar='<pattern>', default='(?!test_).*\.py',
help="check only files that exactly match <pattern> regular "
"expression; default is --match='(?!test_).*\.py' which "
"matches files that don't start with 'test_' but end with "
"'.py'")
option('--match-dir', metavar='<pattern>', default='[^\.].*',
help="search only dirs that exactly match <pattern> regular "
"expression; default is --match-dir='[^\.].*', which matches "
"all dirs that don't start with a dot")
return parser.parse_args()
def collect(names, match=lambda name: True, match_dir=lambda name: True):
"""Walk dir trees under `names` and generate filnames that `match`.
Example
-------
>>> sorted(collect(['non-dir.txt', './'],
... match=lambda name: name.endswith('.py')))
['non-dir.txt', './pep257.py', './setup.py', './test_pep257.py']
"""
for name in names: # map(expanduser, names):
if os.path.isdir(name):
for root, dirs, filenames in os.walk(name):
for dir in dirs:
if not match_dir(dir):
dirs.remove(dir) # do not visit those dirs
for filename in filenames:
if match(filename):
yield os.path.join(root, filename)
else:
yield name
def check(filenames, ignore=()):
"""Generate PEP 257 errors that exist in `filenames` iterable.
Skips errors with error-codes defined in `ignore` iterable.
Example
-------
>>> check(['pep257.py'], ignore=['D100'])
<generator object check at 0x...>
"""
for filename in filenames:
try:
with open(filename) as file:
source = file.read()
for error in PEP257Checker().check_source(source, filename):
code = getattr(error, 'code', None)
if code is not None and code not in ignore:
yield error
except (EnvironmentError, AllError):
yield sys.exc_info()[1]
except tk.TokenError:
yield SyntaxError('invalid syntax in file %s' % filename)
def main(options, arguments):
Error.explain = options.explain
Error.source = options.source
collected = collect(arguments or ['.'],
match=re(options.match + '$').match,
match_dir=re(options.match_dir + '$').match)
code = 0
for error in check(collected, ignore=options.ignore.split(',')):
sys.stderr.write('%s\n' % error)
code = 1
return code
parse = Parser()
def check_for(kind, terminal=False):
def decorator(f):
f._check_for = kind
f._terminal = terminal
return f
return decorator
class PEP257Checker(object):
"""Checker for PEP 257.
D10x: Missing docstrings
D20x: Whitespace issues
D30x: Docstring formatting
D40x: Docstring content issues
"""
def check_source(self, source, filename):
module = parse(StringIO(source), filename)
for definition in module:
for check in self.checks:
terminate = False
if isinstance(definition, check._check_for):
error = check(None, definition, definition.docstring)
errors = error if hasattr(error, '__iter__') else [error]
for error in errors:
if error is not None:
partition = check.__doc__.partition('.\n')
message, _, explanation = partition
if error.message is None:
error.message = message
error.explanation = explanation
error.definition = definition
yield error
if check._terminal:
terminate = True
break
if terminate:
break
@property
def checks(self):
all = [check for check in vars(type(self)).values()
if hasattr(check, '_check_for')]
return sorted(all, key=lambda check: not check._terminal)
@check_for(Definition, terminal=True)
def check_docstring_missing(self, definition, docstring):
"""D10{0,1,2,3}: Public definitions should have docstrings.
All modules should normally have docstrings. [...] all functions and
classes exported by a module should also have docstrings. Public
methods (including the __init__ constructor) should also have
docstrings.
Note: Public (exported) definitions are either those with names listed
in __all__ variable (if present), or those that do not start
with a single underscore.
"""
if (not docstring and definition.is_public or
docstring and is_blank(eval(docstring))):
codes = {Module: 'D100', Class: 'D101', NestedClass: 'D101',
Method: 'D102', Function: 'D103', NestedFunction: 'D103'}
return Error('%s: Docstring missing' % codes[type(definition)])
@check_for(Definition)
def check_one_liners(self, definition, docstring):
"""D200: One-liner docstrings should fit on one line with quotes.
The closing quotes are on the same line as the opening quotes.
This looks better for one-liners.
"""
if docstring:
lines = eval(docstring).split('\n')
if len(lines) > 1:
non_empty_lines = sum(1 for l in lines if not is_blank(l))
if non_empty_lines == 1:
return Error('D200: One-line docstring should not occupy '
'%s lines' % len(lines))
@check_for(Function)
def check_no_blank_before(self, function, docstring): # def
"""D20{1,2}: No blank lines allowed around function/method docstring.
There's no blank line either before or after the docstring.
"""
# NOTE: This does not take comments into account.
# NOTE: This does not take into account functions with groups of code.
if docstring:
before, _, after = function.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield Error('D201: No blank lines allowed *before* %s '
'docstring, found %s'
% (function.kind, blanks_before_count))
if not all(blanks_after) and blanks_after_count != 0:
yield Error('D202: No blank lines allowed *after* %s '
'docstring, found %s'
% (function.kind, blanks_after_count))
@check_for(Class)
def check_blank_before_after_class(slef, class_, docstring):
"""D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
"""
# NOTE: this gives flase-positive in this case
# class Foo:
#
# """Docstring."""
#
#
# # comment here
# def foo(): pass
if docstring:
before, _, after = class_.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 1:
yield Error('D203: Expected 1 blank line *before* class '
'docstring, found %s' % blanks_before_count)
if not all(blanks_after) and blanks_after_count != 1:
yield Error('D204: Expected 1 blank line *after* class '
'docstring, found %s' % blanks_after_count)
@check_for(Definition)
def check_blank_after_summary(self, definition, docstring):
"""D205: Blank line missing between one-line summary and description.
Multi-line docstrings consist of a summary line just like a one-line
docstring, followed by a blank line, followed by a more elaborate
description. The summary line may be used by automatic indexing tools;
it is important that it fits on one line and is separated from the
rest of the docstring by a blank line.
"""
if docstring:
lines = eval(docstring).strip().split('\n')
if len(lines) > 1 and not is_blank(lines[1]):
return Error()
@check_for(Definition)
def check_indent(self, definition, docstring):
"""D20{6,7,8}: The entire docstring should be indented same as code.
The entire docstring is indented the same as the quotes at its
first line.
"""
if docstring:
before_docstring, _, _ = definition.source.partition(docstring)
_, _, indent = before_docstring.rpartition('\n')
lines = docstring.split('\n')
if len(lines) > 1:
lines = lines[1:] # First line does not need indent.
indents = [leading_space(l) for l in lines if not is_blank(l)]
if set(' \t') == set(''.join(indents) + indent):
return Error('D206: Docstring indented with both tabs and '
'spaces')
if (len(indents) > 1 and min(indents[:-1]) > indent
or indents[-1] > indent):
return Error('D208: Docstring is over-indented')
if min(indents) < indent:
return Error('D207: Docstring is under-indented')
@check_for(Definition)
def check_newline_after_last_paragraph(self, definition, docstring):
"""D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves.
"""
if docstring:
lines = [l for l in eval(docstring).split('\n') if not is_blank(l)]
if len(lines) > 1:
if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
return Error('D209: Put multi-line docstring closing '
'quotes on separate line')
@check_for(Definition)
def check_triple_double_quotes(self, definition, docstring):
r'''D300: Use """triple double quotes""".
For consistency, always use """triple double quotes""" around
docstrings. Use r"""raw triple double quotes""" if you use any
backslashes in your docstrings. For Unicode docstrings, use
u"""Unicode triple-quoted strings""".
Note: Exception to this is made if the docstring contains
""" quotes in its body.
'''
if docstring and '"""' in eval(docstring) and docstring.startswith(
("'''", "r'''", "u'''")):
# Allow ''' quotes if docstring contains """, because otherwise """
# quotes could not be expressed inside docstring. Not in PEP 257.
return
if docstring and not docstring.startswith(('"""', 'r"""', 'u"""')):
quotes = "'''" if "'''" in docstring[:4] else "'"
return Error('D300: Expected """-quotes, got %s-quotes' % quotes)
@check_for(Definition)
def check_backslashes(self, definition, docstring):
r'''D301: Use r""" if any backslashes in a docstring.
Use r"""raw triple double quotes""" if you use any backslashes
(\) in your docstrings.
'''
# Just check that docstring is raw, check_triple_double_quotes
# ensures the correct quotes.
if docstring and '\\' in docstring and not docstring.startswith('r'):
return Error()
@check_for(Definition)
def check_unicode_docstring(self, definition, docstring):
r'''D302: Use u""" for docstrings with Unicode.
For Unicode docstrings, use u"""Unicode triple-quoted strings""".
'''
# Just check that docstring is unicode, check_triple_double_quotes
# ensures the correct quotes.
if docstring and sys.version_info[0] <= 2:
if not is_ascii(docstring) and not docstring.startswith('u'):
return Error()
@check_for(Definition)
def check_ends_with_period(self, definition, docstring):
"""D400: First line should end with a period.
The [first line of a] docstring is a phrase ending in a period.
"""
if docstring:
summary_line = eval(docstring).strip().split('\n')[0]
if not summary_line.endswith('.'):
return Error("D400: First line should end with '.', not %r"
% summary_line[-1])
@check_for(Function)
def check_imperative_mood(self, function, docstring): # def context
"""D401: First line should be in imperative mood: 'Do', not 'Does'.
[Docstring] prescribes the function or method's effect as a command:
("Do this", "Return that"), not as a description; e.g. don't write
"Returns the pathname ...".
"""
if docstring:
stripped = eval(docstring).strip()
if stripped:
first_word = stripped.split()[0]
if first_word.endswith('s') and not first_word.endswith('ss'):
return Error('D401: First line should be imperative: '
'%r, not %r' % (first_word[:-1], first_word))
@check_for(Function)
def check_no_signature(self, function, docstring): # def context
"""D402: First line should not be function's or method's "signature".
The one-line docstring should NOT be a "signature" reiterating the
function/method parameters (which can be obtained by introspection).
"""
if docstring:
first_line = eval(docstring).strip().split('\n')[0]
if function.name + '(' in first_line.replace(' ', ''):
return Error("D402: First line should not be %s's signature"
% function.kind)
# Somewhat hard to determine if return value is mentioned.
# @check(Function)
def SKIP_check_return_type(self, function, docstring):
"""D40x: Return value type should be mentioned.
[T]he nature of the return value cannot be determined by
introspection, so it should be mentioned.
"""
if docstring and function.returns_value:
if 'return' not in docstring.lower():
return Error()
if __name__ == '__main__':
try:
sys.exit(main(*parse_options()))
except KeyboardInterrupt:
pass
| wvangeit/python-mode | pymode/libs/pylama/lint/pylama_pep257/pep257.py | Python | lgpl-3.0 | 27,195 | [
"VisIt"
] | 955230a3588951e3b3100756bd40e3f41cec51aad103bbfaa6e533f4eb18d449 |
import numpy as np
import pylab as pl
import netCDF4 as nc4
pl.close('all')
# Create stretched grid
class Grid:
def __init__(self, kmax, nloc1, nbuf1,dz1, dz2):
self.kmax = kmax
dn = 1./kmax
n = np.linspace(dn, 1.-dn, kmax)
nloc1 *= dn
nbuf1 *= dn
dzdn1 = dz1/dn
dzdn2 = dz2/dn
dzdn = dzdn1 + 0.5*(dzdn2-dzdn1)*(1. + np.tanh((n-nloc1)/nbuf1))
self.dz = dzdn*dn
self.kmax = kmax
self.z = np.zeros(self.dz.size)
stretch = np.zeros(self.dz.size)
self.z[0] = 0.5*self.dz[0]
stretch[0] = 1.
for k in range(1, self.kmax):
self.z [k] = self.z[k-1] + 0.5*(self.dz[k-1]+self.dz[k])
stretch[k] = self.dz[k]/self.dz[k-1]
zsize = self.z[kmax-1] + 0.5*self.dz[kmax-1]
#print('kmax=%i, zsize=%f'%(kmax,zsize))
# Read the stage 3 driver file
class read_driver:
def __init__(self):
nc = nc4.Dataset('SCM_LES_STAGE3.nc', 'r')
self.t = nc.variables['time'][:]
self.z = nc.variables['height'][:][::-1]
self.p = nc.variables['pf'][:][::-1]
# initial profiles
self.th = nc.variables['theta'][:][::-1]
self.T = nc.variables['t'][:][::-1]
self.q = nc.variables['qv'][:][::-1] # = zero
self.u = nc.variables['u'][:][::-1]
self.v = nc.variables['v'][:][::-1]
# time varying forcings
self.ug = nc.variables['Ug'][0,:][::-1] # u geo wind = constant in time
self.vg = nc.variables['Vg'][0,:][::-1] # v geo wind = consant in time
self.advT = nc.variables['hadvT'][0,:][::-1] # = zero
self.advq = nc.variables['hadvQ'][0,:][::-1] # = zero
self.Ts = nc.variables['Tg'][:]
self.ps = nc.variables['psurf'].getValue()
self.z0m = nc.variables['z0m'].getValue()
# Calculate theta_s
self.ths = self.Ts / (self.ps / 1.e5)**(287.04/1005.)
outname = 'gabls4s3'
float_type = 'f8'
s3 = read_driver()
# Large domain (~1 km high):
g20l = Grid(288, 250, 20, 2, 12) # dz = 2 m
g10l = Grid(512, 470, 30, 1, 12) # dz = 1 m
# Restart domain (~200 m high):
g20s = Grid(128, 65, 10, 2.0, 8) # dz = 2 m
g10s = Grid(192, 135, 20, 1.0, 8) # dz = 1 m
g05s = Grid(320, 245, 30, 0.5, 8) # dz = 0.5 m
g02s = Grid(512, 440, 40, 0.25, 8) # dz = 0.25 m
g02s2 = Grid(480, 410, 30, 0.25, 8) # dz = 0.25 m # mistral
# Switch between vertical grids:
grid = g20l
# Interpolate GABLS4 data to LES grid
th = np.interp(grid.z, s3.z, s3.th)
u = np.interp(grid.z, s3.z, s3.u)
v = np.interp(grid.z, s3.z, s3.v)
ug = np.interp(grid.z, s3.z, s3.ug)
vg = np.interp(grid.z, s3.z, s3.vg)
# Save all the input data to NetCDF
nc_file = nc4.Dataset('gabls4s3_input.nc', mode='w', datamodel='NETCDF4', clobber=False)
nc_file.createDimension('z', grid.kmax)
nc_z = nc_file.createVariable('z', float_type, ('z'))
nc_z[:] = grid.z[:]
# Create a group called 'init' for the initial profiles.
nc_group_init = nc_file.createGroup('init')
nc_th = nc_group_init.createVariable('th', float_type, ('z'))
nc_u = nc_group_init.createVariable('u', float_type, ('z'))
nc_v = nc_group_init.createVariable('v', float_type, ('z'))
nc_ug = nc_group_init.createVariable('u_geo', float_type, ('z'))
nc_vg = nc_group_init.createVariable('v_geo', float_type, ('z'))
nc_th[:] = th[:]
nc_u [:] = u [:]
nc_v [:] = v [:]
nc_ug[:] = ug[:]
nc_vg[:] = vg[:]
# Create a group called 'timedep' for the timedep.
nc_group_timedep = nc_file.createGroup('timedep')
nc_group_timedep.createDimension('time_surface', s3.t.size)
nc_time_surface = nc_group_timedep.createVariable('time_surface', float_type, ('time_surface'))
nc_th_sbot = nc_group_timedep.createVariable('th_sbot', float_type, ('time_surface'))
nc_time_surface[:] = s3.t[:]
nc_th_sbot[:] = s3.ths[:]
nc_file.close()
| microhh/microhh2 | cases/gabls4s3/gabls4s3_input.py | Python | gpl-3.0 | 3,909 | [
"NetCDF"
] | 796626338fc59a9be405e678aa33457433d35e79e549e6c7d801cbac057ada39 |
"""
========================================
Ammonia inversion transition TROT fitter
========================================
Ammonia inversion transition TROT fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Module API
^^^^^^^^^^
"""
from __future__ import division
import numpy as np
from ...mpfit import mpfit
from ...spectrum.parinfo import ParinfoList,Parinfo
from . import fitter
from . import model
import matplotlib.cbook as mpcb
import copy
from astropy import log
from astropy.extern.six import iteritems
from . import mpfit_messages
import operator
import string
import warnings
from .ammonia_constants import (line_names, freq_dict, aval_dict, ortho_dict,
voff_lines_dict, tau_wts_dict)
TCMB = 2.7315 # K
def ammonia(xarr, trot=20, tex=None, ntot=14, width=1, xoff_v=0.0,
fortho=0.0, tau=None, fillingfraction=None, return_tau=False,
background_tb=TCMB,
verbose=False, return_components=False, debug=False,
line_names=line_names):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
Parameters
----------
xarr: `pyspeckit.spectrum.units.SpectroscopicAxis`
Array of wavelength/frequency values
trot: float
The rotational temperature of the lines. This is the excitation
temperature that governs the relative populations of the rotational
states.
tex: float or None
Excitation temperature. Assumed LTE if unspecified (``None``) or if
tex>trot. This is the excitation temperature for *all* of the modeled
lines, which means we are explicitly assuming T_ex is the same for all
lines.
ntot: float
Total log column density of NH3. Can be specified as a float in the
range 5-25
width: float
Line width in km/s
xoff_v: float
Line offset in km/s
fortho: float
Fraction of NH3 molecules in ortho state. Default assumes all para
(fortho=0).
tau: None or float
If tau (optical depth in the 1-1 line) is specified, ntot is NOT fit
but is set to a fixed value. The optical depths of the other lines are
fixed relative to tau_oneone
fillingfraction: None or float
fillingfraction is an arbitrary scaling factor to apply to the model
return_tau: bool
Return a dictionary of the optical depths in each line instead of a
synthetic spectrum
return_components: bool
Return a list of arrays, one for each hyperfine component, instead of
just one array
background_tb : float
The background brightness temperature. Defaults to TCMB.
verbose: bool
More messages
debug: bool
For debugging.
Returns
-------
spectrum: `numpy.ndarray`
Synthetic spectrum with same shape as ``xarr``
component_list: list
List of `numpy.ndarray`'s, one for each hyperfine component
tau_dict: dict
Dictionary of optical depth values for the various lines
(if ``return_tau`` is set)
"""
from .ammonia_constants import (ckms, ccms, h, kb,
Jortho, Jpara, Brot, Crot)
# Convert X-units to frequency in GHz
if xarr.unit.to_string() != 'GHz':
xarr = xarr.as_unit('GHz')
if tex is None:
log.warning("Assuming tex=trot")
tex = trot
elif isinstance(tex, dict):
for k in tex:
assert k in line_names,"{0} not in line list".format(k)
line_names = tex.keys()
elif tex > trot:
warnings.warn("tex > trot in the ammonia model. "
"This is unphysical and "
"suggests that you may need to constrain tex. See "
"ammonia_model_restricted_tex.")
from .ammonia_constants import line_name_indices, line_names as original_line_names
# recreate line_names keeping only lines with a specified tex
# using this loop instead of tex.keys() preserves the order & data type
line_names = [k for k in original_line_names if k in line_names]
if 5 <= ntot <= 25:
# allow ntot to be specified as a logarithm. This is
# safe because ntot < 1e10 gives a spectrum of all zeros, and the
# plausible range of columns is not outside the specified range
lin_ntot = 10**ntot
else:
raise ValueError("ntot, the logarithmic total column density,"
" must be in the range 5 - 25")
tau_dict = {}
"""
Column density is the free parameter. It is used in conjunction with
the full partition function to compute the optical depth in each band
"""
Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+
(Crot-Brot)*Jpara**2)/(kb*trot))
Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+
(Crot-Brot)*Jortho**2)/(kb*trot))
Qpara = Zpara.sum()
Qortho = Zortho.sum()
log.debug("Partition Function: Q_ortho={0}, Q_para={1}".format(Qortho, Qpara))
for linename in line_names:
if ortho_dict[linename]:
# define variable "ortho_or_para_frac" that will be the ortho
# fraction in the case of an ortho transition or the para
# fraction for a para transition
ortho_or_parafrac = fortho
Z = Zortho
Qtot = Qortho
else:
ortho_or_parafrac = 1.0-fortho
Z = Zpara
Qtot = Qpara
# for a complete discussion of these equations, please see
# https://github.com/keflavich/pyspeckit/blob/ammonia_equations/examples/AmmoniaLevelPopulation.ipynb
# and
# http://low-sky.github.io/ammoniacolumn/
# and
# https://github.com/pyspeckit/pyspeckit/pull/136
# short variable names for readability
frq = freq_dict[linename]
partition = Z[line_name_indices[linename]]
aval = aval_dict[linename]
# Total population of the higher energy inversion transition
population_rotstate = lin_ntot * ortho_or_parafrac * partition/Qtot
if isinstance(tex, dict):
expterm = ((1-np.exp(-h*frq/(kb*tex[linename]))) /
(1+np.exp(-h*frq/(kb*tex[linename]))))
else:
expterm = ((1-np.exp(-h*frq/(kb*tex))) /
(1+np.exp(-h*frq/(kb*tex))))
fracterm = (ccms**2 * aval / (8*np.pi*frq**2))
widthterm = (ckms/(width*frq*(2*np.pi)**0.5))
tau_i = population_rotstate * fracterm * expterm * widthterm
tau_dict[linename] = tau_i
log.debug("Line {0}: tau={1}, expterm={2}, pop={3},"
" partition={4}"
.format(linename, tau_i, expterm, population_rotstate,
partition))
# allow tau(11) to be specified instead of ntot
# in the thin case, this is not needed: ntot plays no role
# this process allows you to specify tau without using the approximate equations specified
# above. It should remove ntot from the calculations anyway...
if tau is not None:
tau11_temp = tau_dict['oneone']
# re-scale all optical depths so that tau is as specified, but the relative taus
# are sest by the kinetic temperature and partition functions
for linename,t in iteritems(tau_dict):
tau_dict[linename] = t * tau/tau11_temp
if return_tau:
return tau_dict
model_spectrum = _ammonia_spectrum(xarr, tex, tau_dict, width, xoff_v,
fortho, line_names,
background_tb=background_tb,
fillingfraction=fillingfraction,
return_components=return_components)
if model_spectrum.min() < 0 and background_tb == TCMB:
raise ValueError("Model dropped below zero. That is not possible "
" normally. Here are the input values: "+
("tex: {0} ".format(tex)) +
("trot: %f " % trot) +
("ntot: %f " % ntot) +
("width: %f " % width) +
("xoff_v: %f " % xoff_v) +
("fortho: %f " % fortho)
)
if verbose or debug:
log.info("trot: %g tex: %s ntot: %g width: %g xoff_v: %g "
"fortho: %g fillingfraction: %g" % (trot, tex, ntot, width,
xoff_v, fortho,
fillingfraction))
return model_spectrum
def cold_ammonia(xarr, tkin, **kwargs):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
Parameters
----------
xarr: `pyspeckit.spectrum.units.SpectroscopicAxis`
Array of wavelength/frequency values
tkin: float
The kinetic temperature of the lines in K. Will be converted to
rotational temperature following the scheme of Swift et al 2005
(http://esoads.eso.org/abs/2005ApJ...620..823S, eqn A6) and further
discussed in Equation 7 of Rosolowsky et al 2008
(http://adsabs.harvard.edu/abs/2008ApJS..175..509R)
"""
dT0 = 41.18 # Energy difference between (2,2) and (1,1) in K
trot = tkin * (1 + (tkin/dT0)*np.log(1 + 0.6*np.exp(-15.7/tkin)))**-1
log.debug("Cold ammonia turned T_K = {0} into T_rot = {1}".format(tkin,trot))
return ammonia(xarr, trot=trot, **kwargs)
def ammonia_thin(xarr, tkin=20, tex=None, ntot=14, width=1, xoff_v=0.0,
fortho=0.0, tau=None, return_tau=False, **kwargs):
"""
Use optical depth in the 1-1 line as a free parameter
The optical depths of the other lines are then set by the kinetic
temperature
tkin is used to compute trot assuming a 3-level system consisting of (1,1),
(2,1), and (2,2) as in Swift et al, 2005 [2005ApJ...620..823S]
"""
tau_dict = {}
tex = tkin
dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K
trot = tkin/(1+tkin/dT0*np.log(1+0.6*np.exp(-15.7/tkin)))
tau_dict['oneone'] = tau
tau_dict['twotwo'] = tau*(23.722/23.694)**2*4/3.*5/3.*np.exp(-41.5/trot)
tau_dict['threethree'] = tau*(23.8701279/23.694)**2*3/2.*14./3.*np.exp(-101.1/trot)
tau_dict['fourfour'] = tau*(24.1394169/23.694)**2*8/5.*9/3.*np.exp(-177.34/trot)
line_names = tau_dict.keys()
# TODO: Raise a warning if tkin > (some value), probably 50 K, because
# the 3-level system approximation used here will break down.
if return_tau:
return tau_dict
else:
return _ammonia_spectrum(xarr, tex, tau_dict, width, xoff_v, fortho,
line_names, **kwargs)
def _ammonia_spectrum(xarr, tex, tau_dict, width, xoff_v, fortho, line_names,
background_tb=TCMB, fillingfraction=None,
return_components=False):
"""
Helper function: given a dictionary of ammonia optical depths,
an excitation tmeperature... etc, produce the spectrum
"""
from .ammonia_constants import (ckms, h, kb)
# fillingfraction is an arbitrary scaling for the data
# The model will be (normal model) * fillingfraction
if fillingfraction is None:
fillingfraction = 1.0
# "runspec" means "running spectrum": it is accumulated over a loop
runspec = np.zeros(len(xarr))
if return_components:
components = []
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
tau_wts = np.array(tau_wts_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]/1e9
tau_wts = tau_wts / (tau_wts).sum()
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# tau array
tauprof = np.zeros(len(xarr))
for kk,nuo in enumerate(nuoff):
tauprof_ = (tau_dict[linename] * tau_wts[kk] *
np.exp(-(xarr.value+nuo-lines[kk])**2 /
(2.0*nuwidth[kk]**2)))
if return_components:
components.append(tauprof_)
tauprof += tauprof_
T0 = (h*xarr.value*1e9/kb) # "temperature" of wavelength
if isinstance(tex, dict):
runspec = ((T0/(np.exp(T0/tex[linename])-1) -
T0/(np.exp(T0/background_tb)-1)) *
(1-np.exp(-tauprof)) * fillingfraction + runspec)
else:
runspec = ((T0/(np.exp(T0/tex)-1) -
T0/(np.exp(T0/background_tb)-1)) *
(1-np.exp(-tauprof)) * fillingfraction + runspec)
if return_components:
if isinstance(tex, dict):
term1 = [(T0/(np.exp(T0/tex[linename])-1)-T0/(np.exp(T0/background_tb)-1))
for linename in line_names]
else:
term1 = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))
return term1*(1-np.exp(-1*np.array(components)))
else:
return runspec
class ammonia_model(model.SpectralModel):
"""
The basic Ammonia (NH3) model with 6 free parameters:
Trot, Tex, ntot, width, xoff_v, and fortho
Trot is the rotational temperature. It governs the relative populations of
the rotational states, i.e., the relative strength of different transitions
Tex is the excitation temperature. It is assumed constant across all
states, which is not always a good assumption - a radiative transfer and
excitation model is required to constrain this, though.
ntot is the total column density of p-NH3 integrated over all states.
width is the linewidth
xoff_v is the velocity offset / line of sight velocity
fortho is the ortho fraction (northo / (northo+npara))
"""
def __init__(self,npeaks=1,npars=6,
parnames=['trot','tex','ntot','width','xoff_v','fortho'],
**kwargs):
npeaks = self.npeaks = int(npeaks)
npars = self.npars = int(npars)
self._default_parnames = parnames
self.parnames = copy.copy(self._default_parnames)
# all fitters must have declared modelfuncs, which should take the fitted pars...
self.modelfunc = ammonia
self.n_modelfunc = self.n_ammonia
# for fitting ammonia simultaneously with a flat background
self.onepeakammonia = fitter.vheightmodel(ammonia)
#self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
# Remove keywords parsed by parinfo and ignored by the fitter
for kw in ('tied','partied'):
if kw in kwargs:
kwargs.pop(kw)
# enforce ammonia-specific parameter limits
for par in self.default_parinfo:
if 'tex' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],TCMB), par.limits[1])
if 'trot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],TCMB), par.limits[1])
if 'width' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
if 'fortho' in par.parname.lower():
par.limited = (True,True)
if par.limits[1] != 0:
par.limits = (max(par.limits[0],0), min(par.limits[1],1))
else:
par.limits = (max(par.limits[0],0), 1)
if 'ntot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
# lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})
self.use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else False
self.fitunit = 'GHz'
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args,**kwargs)
def n_ammonia(self, pars=None, parnames=None, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
trot,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
*pars* [ list ]
a list with len(pars) = (6-nfixed)n, assuming
trot,tex,ntot,width,xoff_v,fortho repeated
*parnames* [ list ]
len(parnames) must = len(pars). parnames determine how the ammonia
function parses the arguments
"""
npeaks = self.npeaks
npars = len(self.default_parinfo)
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
elif parnames is None:
parvals = pars
parnames = self.parnames
else:
parvals = pars
if len(pars) != len(parnames):
# this should only be needed when other codes are changing the number of peaks
# during a copy, as opposed to letting them be set by a __call__
# (n_modelfuncs = n_ammonia can be called directly)
# n_modelfuncs doesn't care how many peaks there are
if len(pars) % len(parnames) == 0:
parnames = [p for ii in range(len(pars)//len(parnames)) for p in parnames]
npeaks = int(len(parvals) / npars)
log.debug("Setting npeaks={0} npars={1}".format(npeaks, npars))
else:
raise ValueError("Wrong array lengths passed to n_ammonia!")
self._components = []
def L(x):
v = np.zeros(len(x))
for jj in range(int(npeaks)):
modelkwargs = kwargs.copy()
for ii in range(int(npars)):
name = parnames[ii+jj*int(npars)].strip('0123456789').lower()
modelkwargs.update({name:parvals[ii+jj*int(npars)]})
v += self.modelfunc(x,**modelkwargs)
return v
return L
def components(self, xarr, pars, hyperfine=False,
return_hyperfine_components=False, **kwargs):
"""
Ammonia components don't follow the default, since in Galactic
astronomy the hyperfine components should be well-separated.
If you want to see the individual components overlaid, you'll need to
pass hyperfine to the plot_fit call
"""
comps=[]
for ii in range(self.npeaks):
if hyperfine or return_hyperfine_components:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],
pars[ii*self.npars:(ii+1)*self.npars]))
comps.append(self.modelfunc(xarr, return_components=True,
**modelkwargs))
else:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],
pars[ii*self.npars:(ii+1)*self.npars]))
comps.append([self.modelfunc(xarr, return_components=False,
**modelkwargs)])
modelcomponents = np.concatenate(comps)
return modelcomponents
def multinh3fit(self, xax, data, err=None,
parinfo=None,
quiet=True, shh=True,
debug=False,
maxiter=200,
use_lmfit=False,
veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles (multiple can be 1)
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [trot, tex, ntot (or tau), width, offset, ortho fraction] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and trot > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
parnames - default parameter names, important for setting kwargs in model ['trot','tex','ntot','width','xoff_v','fortho']
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if parinfo is None:
parinfo = self.parinfo = self.make_parinfo(**kwargs)
else:
if isinstance(parinfo, ParinfoList):
if not quiet:
log.info("Using user-specified parinfo.")
self.parinfo = parinfo
else:
if not quiet:
log.info("Using something like a user-specified parinfo, but not.")
self.parinfo = ParinfoList([p if isinstance(p,Parinfo) else Parinfo(p)
for p in parinfo],
preserve_order=True)
fitfun_kwargs = dict((x,y) for (x,y) in kwargs.items()
if x not in ('npeaks', 'params', 'parnames',
'fixed', 'limitedmin', 'limitedmax',
'minpars', 'maxpars', 'tied',
'max_tem_step'))
fitfun_kwargs.update(self.modelfunc_kwargs)
if 'use_lmfit' in fitfun_kwargs:
raise KeyError("use_lmfit was specified in a location where it "
"is unacceptable")
# not used: npars = len(parinfo)/self.npeaks
self._validate_parinfo()
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None):
return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))]
else:
def f(p,fjac=None):
return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))/err]
return f
if veryverbose:
log.info("GUESSES: ")
log.info(str(parinfo))
#log.info "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
if use_lmfit:
return self.lmfitter(xax, data, err=err,
parinfo=parinfo,
quiet=quiet,
debug=debug)
else:
mp = mpfit(mpfitfun(xax,data,err),
parinfo=parinfo,
maxiter=maxiter,
quiet=quiet,
debug=debug)
mpp = mp.params
if mp.perror is not None:
mpperr = mp.perror
else:
mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
parinfo[i]['error'] = mpperr[i]
if not shh:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Final fit values: ")
for i,p in enumerate(mpp):
log.info(" ".join((parinfo[i]['parname'], str(p), " +/- ",
str(mpperr[i]))))
log.info(" ".join(("Chi2: ", str(mp.fnorm)," Reduced Chi2: ",
str(mp.fnorm/len(data)), " DOF:",
str(len(data)-len(mpp)))))
self.mp = mp
self.parinfo = parinfo
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames,
**fitfun_kwargs)(xax)
indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars]
for jj in range(int(len(self.parinfo)/self.npars))]
modelkwargs = [dict([(p['parname'].strip("0123456789").lower(),
p['value']) for p in pi])
for pi in indiv_parinfo]
self.tau_list = [self.modelfunc(xax, return_tau=True,**mk)
for mk in modelkwargs]
return self.mpp,self.model,self.mpperr,chi2
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, TEX, ntot, width, center, ortho fraction
return [20,10, 15, 1.0, 0.0, 1.0]
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'trot':'T_R', 'tkin': 'T_K', 'tex':'T_{ex}', 'ntot':'N',
'fortho':'F_o', 'width':'\\sigma', 'xoff_v':'v',
'fillingfraction':'FF', 'tau':'\\tau_{1-1}',
'background_tb':'T_{BG}', 'delta':'T_R-T_{ex}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def make_parinfo(self, quiet=True,
npeaks=1,
params=(20,20,14,1.0,0.0,0.5), parnames=None,
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,True,False,False,True),
minpars=(TCMB,TCMB,5,0,0,0),
maxpars=(0,0,25,0,0,1),
tied=('',)*6,
max_tem_step=1.,
**kwargs
):
if not quiet:
log.info("Creating a 'parinfo' from guesses.")
self.npars = int(len(params) / npeaks)
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
npeaks = self.npeaks = int(npeaks)
if isinstance(params,np.ndarray):
params=params.tolist()
# this is actually a hack, even though it's decently elegant
# somehow, parnames was being changed WITHOUT being passed as a variable
# this doesn't make sense - at all - but it happened.
# (it is possible for self.parnames to have npars*npeaks elements where
# npeaks > 1 coming into this function even though only 6 pars are specified;
# _default_parnames is the workaround)
if parnames is None:
parnames = copy.copy(self._default_parnames)
partype_dict = dict(zip(['params', 'parnames', 'fixed',
'limitedmin', 'limitedmax', 'minpars',
'maxpars', 'tied'],
[params, parnames, fixed, limitedmin,
limitedmax, minpars, maxpars, tied]))
# make sure all various things are the right length; if they're
# not, fix them using the defaults
# (you can put in guesses of length 12 but leave the rest length 6;
# this code then doubles the length of everything else)
for partype,parlist in iteritems(partype_dict):
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be
# multiplied by npars to get to the right number of
# gaussians, it will just replicate
if len(parlist) == self.npars:
partype_dict[partype] *= npeaks
elif len(parlist) > self.npars:
# DANGER: THIS SHOULD NOT HAPPEN!
log.warning("WARNING! Input parameters were longer than allowed for variable {0}".format(parlist))
partype_dict[partype] = partype_dict[partype][:self.npars]
elif parlist==params: # this instance shouldn't really be possible
partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
partype_dict[partype] = [False] * len(params)
elif parlist==limitedmax: # only fortho, fillingfraction have upper limits
partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')
elif parlist==limitedmin: # no physical values can be negative except velocity
partype_dict[partype] = (np.array(parnames) != 'xoff_v')
elif parlist==minpars:
# all have minima of zero except kinetic temperature, which can't be below CMB.
# Excitation temperature technically can be, but not in this model
partype_dict[partype] = ((np.array(parnames) == 'trot') + (np.array(parnames) == 'tex')) * TCMB
elif parlist==maxpars: # fractions have upper limits of 1.0
partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')
elif parlist==parnames: # assumes the right number of parnames (essential)
partype_dict[partype] = list(parnames) * self.npeaks
elif parlist==tied:
partype_dict[partype] = [_increment_string_number(t, ii*self.npars)
for t in tied
for ii in range(self.npeaks)]
if len(parnames) != len(partype_dict['params']):
raise ValueError("Wrong array lengths AFTER fixing them")
# used in components. Is this just a hack?
self.parnames = partype_dict['parnames']
parinfo = [{'n':ii, 'value':partype_dict['params'][ii],
'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],
'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],
'parname':partype_dict['parnames'][ii]+str(int(ii/int(self.npars))),
'tied':partype_dict['tied'][ii],
'mpmaxstep':max_tem_step*float(partype_dict['parnames'][ii] in ('tex','trot')), # must force small steps in temperature (True = 1.0)
'error': 0}
for ii in range(len(partype_dict['params']))
]
# hack: remove 'fixed' pars
#parinfo_with_fixed = parinfo
#parinfo = [p for p in parinfo_with_fixed if not p['fixed']]
#fixed_kwargs = dict((p['parname'].strip("0123456789").lower(),
# p['value'])
# for p in parinfo_with_fixed if p['fixed'])
# don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]
# this is OK - not a permanent change
#parnames = [p['parname'] for p in parinfo]
# not OK self.npars = len(parinfo)/self.npeaks
parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
#import pdb; pdb.set_trace()
return parinfo
def _validate_parinfo(self,
must_be_limited={'trot': [True,False],
'tex': [False,False],
'ntot': [True, True],
'width': [True, False],
'xoff_v': [False, False],
'tau': [False, False],
'fortho': [True, True]},
required_limits={'trot': [0, None],
'ntot': [5, 25],
'width': [0, None],
'fortho': [0,1]}):
"""
Make sure the input parameters are all legitimate
"""
for par in self.parinfo:
limited = par.limited
parname = par.parname.strip(string.digits).lower()
mbl = must_be_limited[parname]
for a,b,ul in zip(limited, mbl, ('a lower','an upper')):
if b and not a:
raise ValueError("Parameter {0} must have {1} limit "
"but no such limit is set.".format(
parname, ul))
if parname in required_limits:
limits = par.limits
rlimits = required_limits[parname]
for a,b,op,ul in zip(limits, rlimits, (operator.lt,
operator.gt),
('a lower','an upper')):
if b is not None and op(a,b):
raise ValueError("Parameter {0} must have {1} limit "
"at least {2} but it is set to {3}."
.format(parname, ul, b, a))
def parse_3par_guesses(self, guesses):
"""
Try to convert a set of interactive guesses (peak, center, width) into
guesses appropriate to the model.
For NH3 models, we add in several extra parameters:
tex = peak
trot = tex * 2
fortho = 0.5
ntot = 15
ntot is set to a constant ~10^15 because this results in optical depths
near 1, so it forces the emission to be approximately significant.
trot > tex so that we're in a physical regime to begin with.
"""
gauss_npars = 3
if len(guesses) % gauss_npars != 0:
raise ValueError("Guesses passed to parse_3par_guesses must have "
"length % 3 == 0")
npeaks = len(guesses) // gauss_npars
npars = 6
new_guesses = [-1, -1, 15, -1, -1, 0.5] * npeaks
for ii in range(npeaks):
peak = guesses[ii * gauss_npars + 0]
center = guesses[ii * gauss_npars + 1]
width = guesses[ii * gauss_npars + 2]
new_guesses[ii*npars + 0] = peak * 2
new_guesses[ii*npars + 1] = peak
new_guesses[ii*npars + 3] = width
new_guesses[ii*npars + 4] = center
return new_guesses
class ammonia_model_vtau(ammonia_model):
def __init__(self,
parnames=['trot', 'tex', 'tau', 'width', 'xoff_v', 'fortho'],
**kwargs):
super(ammonia_model_vtau, self).__init__(parnames=parnames,
**kwargs)
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, TEX, ntot, width, center, ortho fraction
return [20, 10, 10, 1.0, 0.0, 1.0]
def _validate_parinfo(self,
must_be_limited={'trot': [True,False],
'tex': [False,False],
'tau': [True, False],
'width': [True, False],
'xoff_v': [False, False],
'fortho': [True, True]},
required_limits={'trot': [0, None],
'tex': [None,None],
'width': [0, None],
'tau': [0, None],
'xoff_v': [None,None],
'fortho': [0,1]}):
supes = super(ammonia_model_vtau, self)
supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
return supes
def make_parinfo(self,
params=(20,14,0.5,1.0,0.0,0.5),
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,False,False,False,True),
minpars=(TCMB,TCMB,0,0,0,0),
maxpars=(0,0,0,0,0,1),
tied=('',)*6,
**kwargs
):
"""
parnames=['trot', 'tex', 'tau', 'width', 'xoff_v', 'fortho']
"""
return super(ammonia_model_vtau, self).make_parinfo(params=params,
fixed=fixed,
limitedmax=limitedmax,
limitedmin=limitedmin,
minpars=minpars,
maxpars=maxpars,
tied=tied,
**kwargs)
class ammonia_model_vtau_thin(ammonia_model_vtau):
def __init__(self,parnames=['tkin', 'tau', 'width', 'xoff_v', 'fortho'],
**kwargs):
super(ammonia_model_vtau_thin, self).__init__(parnames=parnames,
npars=5,
**kwargs)
self.modelfunc = ammonia_thin
def _validate_parinfo(self,
must_be_limited={'tkin': [True,False],
'tex': [False,False],
'ntot': [True, True],
'width': [True, False],
'xoff_v': [False, False],
'tau': [False, False],
'fortho': [True, True]},
required_limits={'tkin': [0, None],
'ntot': [5, 25],
'width': [0, None],
'fortho': [0,1]}):
supes = super(ammonia_model_vtau_thin, self)
return supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, tau, width, center, ortho fraction
return [20, 1, 1.0, 0.0, 1.0]
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args, **kwargs)
def make_parinfo(self,
params=(20,14,1.0,0.0,0.5),
fixed=(False,False,False,False,False),
limitedmin=(True,True,True,False,True),
limitedmax=(False,False,False,False,True),
minpars=(TCMB,0,0,0,0),
maxpars=(0,0,0,0,1),
tied=('',)*5,
**kwargs
):
return super(ammonia_model_vtau_thin, self).make_parinfo(params=params,
fixed=fixed,
limitedmax=limitedmax,
limitedmin=limitedmin,
minpars=minpars,
maxpars=maxpars,
tied=tied,
**kwargs)
class ammonia_model_background(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_background,self).__init__(npars=7,
parnames=['trot', 'tex',
'ntot',
'width',
'xoff_v',
'fortho',
'background_tb'])
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, TEX, ntot, width, center, ortho fraction
return [20,10, 10, 1.0, 0.0, 1.0, TCMB]
def make_parinfo(self, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,TCMB), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(TCMB,TCMB,0,0,0,0,TCMB), parinfo=None,
maxpars=(0,0,0,0,0,1,TCMB),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).make_parinfo(npeaks=npeaks, err=err, params=params,
parnames=parnames, fixed=fixed,
limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
def multinh3fit(self, xax, data, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,TCMB), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(TCMB,TCMB,0,0,0,0,TCMB), parinfo=None,
maxpars=(0,0,0,0,0,1,TCMB),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).multinh3fit(xax, data, npeaks=npeaks, err=err,
params=params, parnames=parnames,
fixed=fixed, limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
class cold_ammonia_model(ammonia_model):
def __init__(self,
parnames=['tkin', 'tex', 'ntot', 'width', 'xoff_v', 'fortho'],
**kwargs):
super(cold_ammonia_model, self).__init__(parnames=parnames, **kwargs)
self.modelfunc = cold_ammonia
def _validate_parinfo(self,
must_be_limited={'tkin': [True,False],
'tex': [False,False],
'ntot': [True, False],
'width': [True, False],
'xoff_v': [False, False],
'fortho': [True, True]},
required_limits={'tkin': [0, None],
'tex': [None,None],
'width': [0, None],
'ntot': [0, None],
'xoff_v': [None,None],
'fortho': [0,1]}):
supes = super(cold_ammonia_model, self)
return supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
class ammonia_model_restricted_tex(ammonia_model):
def __init__(self,
parnames=['trot', 'tex', 'ntot', 'width', 'xoff_v', 'fortho',
'delta'],
**kwargs):
super(ammonia_model_restricted_tex, self).__init__(npars=7,
parnames=parnames,
**kwargs)
def ammonia_dtex(*args, **kwargs):
"""
Strip out the 'delta' keyword
"""
# for py2 compatibility, must strip out manually
delta = kwargs.pop('delta') if 'delta' in kwargs else None
np.testing.assert_allclose(kwargs['trot'] - kwargs['tex'],
delta)
return ammonia(*args, **kwargs)
self.modelfunc = ammonia_dtex
def n_ammonia(self, pars=None, parnames=None, **kwargs):
if parnames is not None:
for ii,pn in enumerate(parnames):
if ii % 7 == 1 and 'tex' not in pn:
raise ValueError('bad parameter names')
if ii % 7 == 6 and 'delta' not in pn:
raise ValueError('bad parameter names')
if pars is not None:
assert len(pars) % 7 == 0
for ii in range(int(len(pars)/7)):
try:
# Case A: they're param objects
# (setting the param directly can result in recursion errors)
pars[1+ii*7].value = pars[0+ii*7].value - pars[6+ii*7].value
except AttributeError:
# Case B: they're just lists of values
pars[1+ii*7] = pars[0+ii*7] - pars[6+ii*7]
supes = super(ammonia_model_restricted_tex, self)
return supes.n_ammonia(pars=pars, parnames=parnames, **kwargs)
def _validate_parinfo(self,
must_be_limited={'trot': [True,False],
'tex': [False,False],
'ntot': [True, False],
'width': [True, False],
'xoff_v': [False, False],
'fortho': [True, True],
'delta': [True, False],
},
required_limits={'trot': [0, None],
'tex': [None,None],
'width': [0, None],
'ntot': [0, None],
'xoff_v': [None,None],
'fortho': [0,1],
'delta': [0, None],
}):
supes = super(ammonia_model_restricted_tex, self)
return supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
def make_parinfo(self,
params=(20,20,0.5,1.0,0.0,0.5,0),
fixed=(False,False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,False),
minpars=(TCMB,TCMB,0,0,0,0,0),
maxpars=(0,0,0,0,0,1,0),
tied=('','p[0]-p[6]','','','','',''),
**kwargs
):
"""
parnames=['trot', 'tex', 'ntot', 'width', 'xoff_v', 'fortho', 'delta']
'delta' is the difference between tex and trot
"""
supes = super(ammonia_model_restricted_tex, self)
return supes.make_parinfo(params=params, fixed=fixed,
limitedmax=limitedmax, limitedmin=limitedmin,
minpars=minpars, maxpars=maxpars, tied=tied,
**kwargs)
def _increment_string_number(st, count):
"""
Increment a number in a string
Expects input of the form: p[6]
"""
import re
dig = re.compile('[0-9]+')
if dig.search(st):
n = int(dig.search(st).group())
result = dig.sub(str(n+count), st)
return result
else:
return st
| vlas-sokolov/pyspeckit | pyspeckit/spectrum/models/ammonia.py | Python | mit | 50,962 | [
"Gaussian"
] | 9006aa638a055a1a67b546d3807a8c9768ee70dee736ad76bccf2f305b77c97b |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
#
"""Fast distance array computation --- :mod:`MDAnalysis.lib.distances`
===================================================================
Fast C-routines to calculate arrays of distances or angles from coordinate
arrays. Many of the functions also exist in parallel versions, which typically
provide higher performance than the serial code.
The boolean attribute `MDAnalysis.lib.distances.USED_OPENMP` can be checked to
see if OpenMP was used in the compilation of MDAnalysis.
Selection of acceleration ("backend")
-------------------------------------
All functions take the optional keyword `backend`, which determines the type of
acceleration. Currently, the following choices are implemented (`backend` is
case-insensitive):
.. Table:: Available *backends* for accelerated distance functions.
========== ========================= ======================================
*backend* module description
========== ========================= ======================================
"serial" :mod:`c_distances` serial implementation in C/Cython
"OpenMP" :mod:`c_distances_openmp` parallel implementation in C/Cython
with OpenMP
========== ========================= ======================================
.. versionadded:: 0.13.0
Functions
---------
.. autofunction:: distance_array
.. autofunction:: self_distance_array
.. autofunction:: capped_distance
.. autofunction:: self_capped_distance
.. autofunction:: calc_bonds
.. autofunction:: calc_angles
.. autofunction:: calc_dihedrals
.. autofunction:: apply_PBC
.. autofunction:: transform_RtoS
.. autofunction:: transform_StoR
.. autofunction:: augment_coordinates(coordinates, box, r)
.. autofunction:: undo_augment(results, translation, nreal)
"""
import numpy as np
from numpy.lib.utils import deprecate
from .util import check_coords, check_box
from .mdamath import triclinic_vectors
from ._augment import augment_coordinates, undo_augment
from .nsgrid import FastNS
from .c_distances import _minimize_vectors_ortho, _minimize_vectors_triclinic
# hack to select backend with backend=<backend> kwarg. Note that
# the cython parallel code (prange) in parallel.distances is
# independent from the OpenMP code
import importlib
_distances = {}
_distances['serial'] = importlib.import_module(".c_distances",
package="MDAnalysis.lib")
try:
_distances['openmp'] = importlib.import_module(".c_distances_openmp",
package="MDAnalysis.lib")
except ImportError:
pass
del importlib
def _run(funcname, args=None, kwargs=None, backend="serial"):
"""Helper function to select a backend function `funcname`."""
args = args if args is not None else tuple()
kwargs = kwargs if kwargs is not None else dict()
backend = backend.lower()
try:
func = getattr(_distances[backend], funcname)
except KeyError:
errmsg = (f"Function {funcname} not available with backend {backend} "
f"try one of: {_distances.keys()}")
raise ValueError(errmsg) from None
return func(*args, **kwargs)
# serial versions are always available (and are typically used within
# the core and topology modules)
from .c_distances import (_UINT64_MAX,
calc_distance_array,
calc_distance_array_ortho,
calc_distance_array_triclinic,
calc_self_distance_array,
calc_self_distance_array_ortho,
calc_self_distance_array_triclinic,
coord_transform,
calc_bond_distance,
calc_bond_distance_ortho,
calc_bond_distance_triclinic,
calc_angle,
calc_angle_ortho,
calc_angle_triclinic,
calc_dihedral,
calc_dihedral_ortho,
calc_dihedral_triclinic,
ortho_pbc,
triclinic_pbc)
from .c_distances_openmp import OPENMP_ENABLED as USED_OPENMP
def _check_result_array(result, shape):
"""Check if the result array is ok to use.
The `result` array must meet the following requirements:
* Must have a shape equal to `shape`.
* Its dtype must be ``numpy.float64``.
Paramaters
----------
result : numpy.ndarray or None
The result array to check. If `result` is `None``, a newly created
array of correct shape and dtype ``numpy.float64`` will be returned.
shape : tuple
The shape expected for the `result` array.
Returns
-------
result : numpy.ndarray (``dtype=numpy.float64``, ``shape=shape``)
The input array or a newly created array if the input was ``None``.
Raises
------
ValueError
If `result` is of incorrect shape.
TypeError
If the dtype of `result` is not ``numpy.float64``.
"""
if result is None:
return np.zeros(shape, dtype=np.float64)
if result.shape != shape:
raise ValueError("Result array has incorrect shape, should be {0}, got "
"{1}.".format(shape, result.shape))
if result.dtype != np.float64:
raise TypeError("Result array must be of type numpy.float64, got {}."
"".format(result.dtype))
# The following two lines would break a lot of tests. WHY?!
# if not coords.flags['C_CONTIGUOUS']:
# raise ValueError("{0} is not C-contiguous.".format(desc))
return result
@check_coords('reference', 'configuration', reduce_result_if_single=False,
check_lengths_match=False)
def distance_array(reference, configuration, box=None, result=None,
backend="serial"):
"""Calculate all possible distances between a reference set and another
configuration.
If there are ``n`` positions in `reference` and ``m`` positions in
`configuration`, a distance array of shape ``(n, m)`` will be computed.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
If a 2D numpy array of dtype ``numpy.float64`` with the shape ``(n, m)``
is provided in `result`, then this preallocated array is filled. This can
speed up calculations.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array of shape ``(3,)`` or ``(n, 3)`` (dtype is
arbitrary, will be converted to ``numpy.float32`` internally).
configuration : numpy.ndarray
Configuration coordinate array of shape ``(3,)`` or ``(m, 3)`` (dtype is
arbitrary, will be converted to ``numpy.float32`` internally).
box : array_like, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
result : numpy.ndarray, optional
Preallocated result array which must have the shape ``(n, m)`` and dtype
``numpy.float64``.
Avoids creating the array which saves time when the function
is called repeatedly.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
d : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n, m)``)
Array containing the distances ``d[i,j]`` between reference coordinates
``i`` and configuration coordinates ``j``.
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
Now also accepts single coordinates as input.
"""
confnum = configuration.shape[0]
refnum = reference.shape[0]
# check resulting array will not overflow UINT64_MAX
if refnum * confnum > _UINT64_MAX:
raise ValueError(f"Size of resulting array {refnum * confnum} elements"
" larger than size of maximum integer")
distances = _check_result_array(result, (refnum, confnum))
if len(distances) == 0:
return distances
if box is not None:
boxtype, box = check_box(box)
if boxtype == 'ortho':
_run("calc_distance_array_ortho",
args=(reference, configuration, box, distances),
backend=backend)
else:
_run("calc_distance_array_triclinic",
args=(reference, configuration, box, distances),
backend=backend)
else:
_run("calc_distance_array",
args=(reference, configuration, distances),
backend=backend)
return distances
@check_coords('reference', reduce_result_if_single=False)
def self_distance_array(reference, box=None, result=None, backend="serial"):
"""Calculate all possible distances within a configuration `reference`.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
If a 1D numpy array of dtype ``numpy.float64`` with the shape
``(n*(n-1)/2,)`` is provided in `result`, then this preallocated array is
filled. This can speed up calculations.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array of shape ``(3,)`` or ``(n, 3)`` (dtype is
arbitrary, will be converted to ``numpy.float32`` internally).
box : array_like, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
result : numpy.ndarray, optional
Preallocated result array which must have the shape ``(n*(n-1)/2,)`` and
dtype ``numpy.float64``. Avoids creating the array which saves time when
the function is called repeatedly.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
d : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n*(n-1)/2,)``)
Array containing the distances ``dist[i,j]`` between reference
coordinates ``i`` and ``j`` at position ``d[k]``. Loop through ``d``:
.. code-block:: python
for i in range(n):
for j in range(i + 1, n):
k += 1
dist[i, j] = d[k]
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
"""
refnum = reference.shape[0]
distnum = refnum * (refnum - 1) // 2
# check resulting array will not overflow UINT64_MAX
if distnum > _UINT64_MAX:
raise ValueError(f"Size of resulting array {distnum} elements larger"
" than size of maximum integer")
distances = _check_result_array(result, (distnum,))
if len(distances) == 0:
return distances
if box is not None:
boxtype, box = check_box(box)
if boxtype == 'ortho':
_run("calc_self_distance_array_ortho",
args=(reference, box, distances),
backend=backend)
else:
_run("calc_self_distance_array_triclinic",
args=(reference, box, distances),
backend=backend)
else:
_run("calc_self_distance_array",
args=(reference, distances),
backend=backend)
return distances
def capped_distance(reference, configuration, max_cutoff, min_cutoff=None,
box=None, method=None, return_distances=True):
"""Calculates pairs of indices corresponding to entries in the `reference`
and `configuration` arrays which are separated by a distance lying within
the specified cutoff(s). Optionally, these distances can be returned as
well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
An automatic guessing of the optimal method to calculate the distances is
included in the function. An optional keyword for the method is also
provided. Users can enforce a particular method with this functionality.
Currently brute force, grid search, and periodic KDtree methods are
implemented.
Parameters
-----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)``.
configuration : numpy.ndarray
Configuration coordinate array with shape ``(3,)`` or ``(m, 3)``.
max_cutoff : float
Maximum cutoff distance between the reference and configuration.
min_cutoff : float, optional
Minimum cutoff distance between reference and configuration.
box : array_like, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
method : {'bruteforce', 'nsgrid', 'pkdtree'}, optional
Keyword to override the automatic guessing of the employed search
method.
return_distances : bool, optional
If set to ``True``, distances will also be returned.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` and
`configuration` arrays such that the distance between them lies within
the interval (`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th coordinate in `reference` and the ``j``-th coordinate in
`configuration`.
distances : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n_pairs,)``), optional
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between the
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
.. code-block:: python
pairs, distances = capped_distances(reference, configuration,
max_cutoff, return_distances=True)
for k, [i, j] in enumerate(pairs):
coord1 = reference[i]
coord2 = configuration[j]
distance = distances[k]
See Also
--------
distance_array
MDAnalysis.lib.pkdtree.PeriodicKDTree.search
MDAnalysis.lib.nsgrid.FastNS.search
.. versionchanged:: 1.0.1
nsgrid was temporarily removed and replaced with pkdtree due to issues
relating to its reliability and accuracy (Issues #2919, #2229, #2345,
#2670, #2930)
.. versionchanged:: 1.0.2
nsgrid enabled again
"""
if box is not None:
box = np.asarray(box, dtype=np.float32)
if box.shape[0] != 6:
raise ValueError("Box Argument is of incompatible type. The "
"dimension should be either None or of the form "
"[lx, ly, lz, alpha, beta, gamma]")
method = _determine_method(reference, configuration, max_cutoff,
min_cutoff=min_cutoff, box=box, method=method)
return method(reference, configuration, max_cutoff, min_cutoff=min_cutoff,
box=box, return_distances=return_distances)
def _determine_method(reference, configuration, max_cutoff, min_cutoff=None,
box=None, method=None):
"""Guesses the fastest method for capped distance calculations based on the
size of the coordinate sets and the relative size of the target volume.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)``.
configuration : numpy.ndarray
Configuration coordinate array with shape ``(3,)`` or ``(m, 3)``.
max_cutoff : float
Maximum cutoff distance between `reference` and `configuration`
coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` and `configuration`
coordinates.
box : numpy.ndarray
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
method : {'bruteforce', 'nsgrid', 'pkdtree'}, optional
Keyword to override the automatic guessing of the employed search
method.
Returns
-------
function : callable
The function implementing the guessed (or deliberatly chosen) method.
.. versionchanged:: 1.0.1
nsgrid was temporarily removed and replaced with pkdtree due to issues
relating to its reliability and accuracy (Issues #2919, #2229, #2345,
#2670, #2930)
.. versionchanged:: 1.1.0
enabled nsgrid again
"""
methods = {'bruteforce': _bruteforce_capped,
'pkdtree': _pkdtree_capped,
'nsgrid': _nsgrid_capped,
}
if method is not None:
return methods[method.lower()]
if len(reference) < 10 or len(configuration) < 10:
return methods['bruteforce']
elif len(reference) * len(configuration) >= 1e8:
# CAUTION : for large datasets, shouldnt go into 'bruteforce'
# in any case. Arbitrary number, but can be characterized
return methods['nsgrid']
else:
if box is None:
min_dim = np.array([reference.min(axis=0),
configuration.min(axis=0)])
max_dim = np.array([reference.max(axis=0),
configuration.max(axis=0)])
size = max_dim.max(axis=0) - min_dim.min(axis=0)
elif np.all(box[3:] == 90.0):
size = box[:3]
else:
tribox = triclinic_vectors(box)
size = tribox.max(axis=0) - tribox.min(axis=0)
if np.any(max_cutoff > 0.3*size):
return methods['bruteforce']
else:
return methods['nsgrid']
@check_coords('reference', 'configuration', enforce_copy=False,
reduce_result_if_single=False, check_lengths_match=False)
def _bruteforce_capped(reference, configuration, max_cutoff, min_cutoff=None,
box=None, return_distances=True):
"""Capped distance evaluations using a brute force method.
Computes and returns an array containing pairs of indices corresponding to
entries in the `reference` and `configuration` arrays which are separated by
a distance lying within the specified cutoff(s). Employs naive distance
computations (brute force) to find relevant distances.
Optionally, these distances can be returned as well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)`` (dtype will
be converted to ``numpy.float32`` internally).
configuration : array
Configuration coordinate array with shape ``(3,)`` or ``(m, 3)`` (dtype
will be converted to ``numpy.float32`` internally).
max_cutoff : float
Maximum cutoff distance between `reference` and `configuration`
coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` and `configuration`
coordinates.
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
return_distances : bool, optional
If set to ``True``, distances will also be returned.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` and
`configuration` arrays such that the distance between them lies within
the interval (`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th coordinate in `reference` and the ``j``-th coordinate in
`configuration`.
distances : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n_pairs,)``), optional
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between the
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
"""
# Default return values (will be overwritten only if pairs are found):
pairs = np.empty((0, 2), dtype=np.intp)
distances = np.empty((0,), dtype=np.float64)
if len(reference) > 0 and len(configuration) > 0:
_distances = distance_array(reference, configuration, box=box)
if min_cutoff is not None:
mask = np.where((_distances <= max_cutoff) & \
(_distances > min_cutoff))
else:
mask = np.where((_distances <= max_cutoff))
if mask[0].size > 0:
pairs = np.c_[mask[0], mask[1]]
if return_distances:
distances = _distances[mask]
if return_distances:
return pairs, distances
else:
return pairs
@check_coords('reference', 'configuration', enforce_copy=False,
reduce_result_if_single=False, check_lengths_match=False)
def _pkdtree_capped(reference, configuration, max_cutoff, min_cutoff=None,
box=None, return_distances=True):
"""Capped distance evaluations using a KDtree method.
Computes and returns an array containing pairs of indices corresponding to
entries in the `reference` and `configuration` arrays which are separated by
a distance lying within the specified cutoff(s). Employs a (periodic) KDtree
algorithm to find relevant distances.
Optionally, these distances can be returned as well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)`` (dtype will
be converted to ``numpy.float32`` internally).
configuration : array
Configuration coordinate array with shape ``(3,)`` or ``(m, 3)`` (dtype
will be converted to ``numpy.float32`` internally).
max_cutoff : float
Maximum cutoff distance between `reference` and `configuration`
coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` and `configuration`
coordinates.
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
return_distances : bool, optional
If set to ``True``, distances will also be returned.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` and
`configuration` arrays such that the distance between them lies within
the interval (`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th coordinate in `reference` and the ``j``-th coordinate in
`configuration`.
distances : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n_pairs,)``), optional
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between the
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
"""
from .pkdtree import PeriodicKDTree # must be here to avoid circular import
# Default return values (will be overwritten only if pairs are found):
pairs = np.empty((0, 2), dtype=np.intp)
distances = np.empty((0,), dtype=np.float64)
if len(reference) > 0 and len(configuration) > 0:
kdtree = PeriodicKDTree(box=box)
cut = max_cutoff if box is not None else None
kdtree.set_coords(configuration, cutoff=cut)
_pairs = kdtree.search_tree(reference, max_cutoff)
if _pairs.size > 0:
pairs = _pairs
if (return_distances or (min_cutoff is not None)):
refA, refB = pairs[:, 0], pairs[:, 1]
distances = calc_bonds(reference[refA], configuration[refB],
box=box)
if min_cutoff is not None:
mask = np.where(distances > min_cutoff)
pairs, distances = pairs[mask], distances[mask]
if return_distances:
return pairs, distances
else:
return pairs
@check_coords('reference', 'configuration', enforce_copy=False,
reduce_result_if_single=False, check_lengths_match=False)
def _nsgrid_capped(reference, configuration, max_cutoff, min_cutoff=None,
box=None, return_distances=True):
"""Capped distance evaluations using a grid-based search method.
Computes and returns an array containing pairs of indices corresponding to
entries in the `reference` and `configuration` arrays which are separated by
a distance lying within the specified cutoff(s). Employs a grid-based search
algorithm to find relevant distances.
Optionally, these distances can be returned as well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)`` (dtype will
be converted to ``numpy.float32`` internally).
configuration : array
Configuration coordinate array with shape ``(3,)`` or ``(m, 3)`` (dtype
will be converted to ``numpy.float32`` internally).
max_cutoff : float
Maximum cutoff distance between `reference` and `configuration`
coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` and `configuration`
coordinates.
box : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n_pairs,)``), optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
return_distances : bool, optional
If set to ``True``, distances will also be returned.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` and
`configuration` arrays such that the distance between them lies within
the interval (`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th coordinate in `reference` and the ``j``-th coordinate in
`configuration`.
distances : numpy.ndarray, optional
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between the
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
"""
# Default return values (will be overwritten only if pairs are found):
pairs = np.empty((0, 2), dtype=np.intp)
distances = np.empty((0,), dtype=np.float64)
if len(reference) > 0 and len(configuration) > 0:
if box is None:
# create a pseudobox
# define the max range
# and supply the pseudobox
# along with only one set of coordinates
pseudobox = np.zeros(6, dtype=np.float32)
all_coords = np.concatenate([reference, configuration])
lmax = all_coords.max(axis=0)
lmin = all_coords.min(axis=0)
# Using maximum dimension as the box size
boxsize = (lmax-lmin).max()
# to avoid failures for very close particles but with
# larger cutoff
boxsize = np.maximum(boxsize, 2 * max_cutoff)
pseudobox[:3] = boxsize + 2.2*max_cutoff
pseudobox[3:] = 90.
shiftref, shiftconf = reference.copy(), configuration.copy()
# Extra padding near the origin
shiftref -= lmin - 0.1*max_cutoff
shiftconf -= lmin - 0.1*max_cutoff
gridsearch = FastNS(max_cutoff, shiftconf, box=pseudobox, pbc=False)
results = gridsearch.search(shiftref)
else:
gridsearch = FastNS(max_cutoff, configuration, box=box)
results = gridsearch.search(reference)
pairs = results.get_pairs()
if return_distances or (min_cutoff is not None):
distances = results.get_pair_distances()
if min_cutoff is not None:
idx = distances > min_cutoff
pairs, distances = pairs[idx], distances[idx]
if return_distances:
return pairs, distances
else:
return pairs
def self_capped_distance(reference, max_cutoff, min_cutoff=None, box=None,
method=None, return_distances=True):
"""Calculates pairs of indices corresponding to entries in the `reference`
array which are separated by a distance lying within the specified
cutoff(s). Optionally, these distances can be returned as well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
An automatic guessing of the optimal method to calculate the distances is
included in the function. An optional keyword for the method is also
provided. Users can enforce a particular method with this functionality.
Currently brute force, grid search, and periodic KDtree methods are
implemented.
Parameters
-----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)``.
max_cutoff : float
Maximum cutoff distance between `reference` coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` coordinates.
box : array_like, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
method : {'bruteforce', 'nsgrid', 'pkdtree'}, optional
Keyword to override the automatic guessing of the employed search
method.
return_distances : bool, optional
If set to ``True``, distances will also be returned.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` array
such that the distance between them lies within the interval
(`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th and the ``j``-th coordinate in `reference`.
distances : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n_pairs,)``)
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between the
coordinates ``reference[pairs[k, 0]]`` and ``reference[pairs[k, 1]]``.
.. code-block:: python
pairs, distances = self_capped_distances(reference, max_cutoff,
return_distances=True)
for k, [i, j] in enumerate(pairs):
coord1 = reference[i]
coord2 = reference[j]
distance = distances[k]
Note
-----
Currently supports brute force, grid-based, and periodic KDtree search
methods.
See Also
--------
self_distance_array
MDAnalysis.lib.pkdtree.PeriodicKDTree.search
MDAnalysis.lib.nsgrid.FastNS.self_search
.. versionchanged:: 0.20.0
Added `return_distances` keyword.
.. versionchanged:: 1.0.1
nsgrid was temporarily removed and replaced with pkdtree due to issues
relating to its reliability and accuracy (Issues #2919, #2229, #2345,
#2670, #2930)
.. versionchanged:: 1.0.2
enabled nsgrid again
"""
if box is not None:
box = np.asarray(box, dtype=np.float32)
if box.shape[0] != 6:
raise ValueError("Box Argument is of incompatible type. The "
"dimension should be either None or of the form "
"[lx, ly, lz, alpha, beta, gamma]")
method = _determine_method_self(reference, max_cutoff,
min_cutoff=min_cutoff,
box=box, method=method)
return method(reference, max_cutoff, min_cutoff=min_cutoff, box=box,
return_distances=return_distances)
def _determine_method_self(reference, max_cutoff, min_cutoff=None, box=None,
method=None):
"""Guesses the fastest method for capped distance calculations based on the
size of the `reference` coordinate set and the relative size of the target
volume.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)``.
max_cutoff : float
Maximum cutoff distance between `reference` coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` coordinates.
box : numpy.ndarray
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
method : {'bruteforce', 'nsgrid', 'pkdtree'}, optional
Keyword to override the automatic guessing of the employed search
method.
Returns
-------
function : callable
The function implementing the guessed (or deliberatly chosen) method.
.. versionchanged:: 1.0.1
nsgrid was temporarily removed and replaced with pkdtree due to issues
relating to its reliability and accuracy (Issues #2919, #2229, #2345,
#2670, #2930)
.. versionchanged:: 1.0.2
enabled nsgrid again
"""
methods = {'bruteforce': _bruteforce_capped_self,
'pkdtree': _pkdtree_capped_self,
'nsgrid': _nsgrid_capped_self,
}
if method is not None:
return methods[method.lower()]
if len(reference) < 100:
return methods['bruteforce']
if box is None:
min_dim = np.array([reference.min(axis=0)])
max_dim = np.array([reference.max(axis=0)])
size = max_dim.max(axis=0) - min_dim.min(axis=0)
elif np.all(box[3:] == 90.0):
size = box[:3]
else:
tribox = triclinic_vectors(box)
size = tribox.max(axis=0) - tribox.min(axis=0)
if max_cutoff < 0.03*size.min():
return methods['pkdtree']
else:
return methods['nsgrid']
@check_coords('reference', enforce_copy=False, reduce_result_if_single=False)
def _bruteforce_capped_self(reference, max_cutoff, min_cutoff=None, box=None,
return_distances=True):
"""Capped distance evaluations using a brute force method.
Computes and returns an array containing pairs of indices corresponding to
entries in the `reference` array which are separated by a distance lying
within the specified cutoff(s). Employs naive distance computations (brute
force) to find relevant distances. Optionally, these distances can be
returned as well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)`` (dtype will
be converted to ``numpy.float32`` internally).
max_cutoff : float
Maximum cutoff distance between `reference` coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` coordinates.
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
return_distances : bool, optional
If set to ``True``, distances will also be returned.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` array
such that the distance between them lies within the interval
(`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th and the ``j``-th coordinate in `reference`.
distances : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n_pairs,)``), optional
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between the
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
.. versionchanged:: 0.20.0
Added `return_distances` keyword.
"""
# Default return values (will be overwritten only if pairs are found):
pairs = np.empty((0, 2), dtype=np.intp)
distances = np.empty((0,), dtype=np.float64)
N = len(reference)
# We're searching within a single coordinate set, so we need at least two
# coordinates to find distances between them.
if N > 1:
distvec = self_distance_array(reference, box=box)
dist = np.full((N, N), np.finfo(np.float64).max, dtype=np.float64)
dist[np.triu_indices(N, 1)] = distvec
if min_cutoff is not None:
mask = np.where((dist <= max_cutoff) & (dist > min_cutoff))
else:
mask = np.where((dist <= max_cutoff))
if mask[0].size > 0:
pairs = np.c_[mask[0], mask[1]]
distances = dist[mask]
if return_distances:
return pairs, distances
return pairs
@check_coords('reference', enforce_copy=False, reduce_result_if_single=False)
def _pkdtree_capped_self(reference, max_cutoff, min_cutoff=None, box=None,
return_distances=True):
"""Capped distance evaluations using a KDtree method.
Computes and returns an array containing pairs of indices corresponding to
entries in the `reference` array which are separated by a distance lying
within the specified cutoff(s). Employs a (periodic) KDtree algorithm to
find relevant distances. Optionally, these distances can be returned as
well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)`` (dtype will
be converted to ``numpy.float32`` internally).
max_cutoff : float
Maximum cutoff distance between `reference` coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` coordinates.
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
return_distances : bool, optional
If set to ``True``, distances will also be returned.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` array
such that the distance between them lies within the interval
(`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th and the ``j``-th coordinate in `reference`.
distances : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n_pairs,)``)
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between
the coordinates ``reference[pairs[k, 0]]`` and
``reference[pairs[k, 1]]``.
.. versionchanged:: 0.20.0
Added `return_distances` keyword.
"""
from .pkdtree import PeriodicKDTree # must be here to avoid circular import
# Default return values (will be overwritten only if pairs are found):
pairs = np.empty((0, 2), dtype=np.intp)
distances = np.empty((0,), dtype=np.float64)
# We're searching within a single coordinate set, so we need at least two
# coordinates to find distances between them.
if len(reference) > 1:
kdtree = PeriodicKDTree(box=box)
cut = max_cutoff if box is not None else None
kdtree.set_coords(reference, cutoff=cut)
_pairs = kdtree.search_pairs(max_cutoff)
if _pairs.size > 0:
pairs = _pairs
if (return_distances or (min_cutoff is not None)):
refA, refB = pairs[:, 0], pairs[:, 1]
distances = calc_bonds(reference[refA], reference[refB], box=box)
if min_cutoff is not None:
idx = distances > min_cutoff
pairs, distances = pairs[idx], distances[idx]
if return_distances:
return pairs, distances
return pairs
@check_coords('reference', enforce_copy=False, reduce_result_if_single=False)
def _nsgrid_capped_self(reference, max_cutoff, min_cutoff=None, box=None,
return_distances=True):
"""Capped distance evaluations using a grid-based search method.
Computes and returns an array containing pairs of indices corresponding to
entries in the `reference` array which are separated by a distance lying
within the specified cutoff(s). Employs a grid-based search algorithm to
find relevant distances. Optionally, these distances can be returned as
well.
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
Parameters
----------
reference : numpy.ndarray
Reference coordinate array with shape ``(3,)`` or ``(n, 3)`` (dtype will
be converted to ``numpy.float32`` internally).
max_cutoff : float
Maximum cutoff distance between `reference` coordinates.
min_cutoff : float, optional
Minimum cutoff distance between `reference` coordinates.
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
Returns
-------
pairs : numpy.ndarray (``dtype=numpy.int64``, ``shape=(n_pairs, 2)``)
Pairs of indices, corresponding to coordinates in the `reference` array
such that the distance between them lies within the interval
(`min_cutoff`, `max_cutoff`].
Each row in `pairs` is an index pair ``[i, j]`` corresponding to the
``i``-th and the ``j``-th coordinate in `reference`.
distances : numpy.ndarray, optional
Distances corresponding to each pair of indices. Only returned if
`return_distances` is ``True``. ``distances[k]`` corresponds to the
``k``-th pair returned in `pairs` and gives the distance between the
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
.. versionchanged:: 0.20.0
Added `return_distances` keyword.
"""
# Default return values (will be overwritten only if pairs are found):
pairs = np.empty((0, 2), dtype=np.intp)
distances = np.empty((0,), dtype=np.float64)
# We're searching within a single coordinate set, so we need at least two
# coordinates to find distances between them.
if len(reference) > 1:
if box is None:
# create a pseudobox
# define the max range
# and supply the pseudobox
# along with only one set of coordinates
pseudobox = np.zeros(6, dtype=np.float32)
lmax = reference.max(axis=0)
lmin = reference.min(axis=0)
# Using maximum dimension as the box size
boxsize = (lmax-lmin).max()
# to avoid failures of very close particles
# but with larger cutoff
if boxsize < 2*max_cutoff:
# just enough box size so that NSGrid doesnot fails
sizefactor = 2.2*max_cutoff/boxsize
else:
sizefactor = 1.2
pseudobox[:3] = sizefactor*boxsize
pseudobox[3:] = 90.
shiftref = reference.copy()
# Extra padding near the origin
shiftref -= lmin - 0.1*boxsize
gridsearch = FastNS(max_cutoff, shiftref, box=pseudobox, pbc=False)
results = gridsearch.self_search()
else:
gridsearch = FastNS(max_cutoff, reference, box=box)
results = gridsearch.self_search()
pairs = results.get_pairs()
if return_distances or (min_cutoff is not None):
distances = results.get_pair_distances()
if min_cutoff is not None:
idx = distances > min_cutoff
pairs, distances = pairs[idx], distances[idx]
if return_distances:
return pairs, distances
return pairs
@check_coords('coords')
def transform_RtoS(coords, box, backend="serial"):
"""Transform an array of coordinates from real space to S space (a.k.a.
lambda space)
S space represents fractional space within the unit cell for this system.
Reciprocal operation to :meth:`transform_StoR`.
Parameters
----------
coords : numpy.ndarray
A ``(3,)`` or ``(n, 3)`` array of coordinates (dtype is arbitrary, will
be converted to ``numpy.float32`` internally).
box : numpy.ndarray
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
newcoords : numpy.ndarray (``dtype=numpy.float32``, ``shape=coords.shape``)
An array containing fractional coordiantes.
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
Now also accepts (and, likewise, returns) a single coordinate.
"""
if len(coords) == 0:
return coords
boxtype, box = check_box(box)
if boxtype == 'ortho':
box = np.diag(box)
box = box.astype(np.float64)
# Create inverse matrix of box
# need order C here
inv = np.array(np.linalg.inv(box), order='C')
_run("coord_transform", args=(coords, inv), backend=backend)
return coords
@check_coords('coords')
def transform_StoR(coords, box, backend="serial"):
"""Transform an array of coordinates from S space into real space.
S space represents fractional space within the unit cell for this system.
Reciprocal operation to :meth:`transform_RtoS`
Parameters
----------
coords : numpy.ndarray
A ``(3,)`` or ``(n, 3)`` array of coordinates (dtype is arbitrary, will
be converted to ``numpy.float32`` internally).
box : numpy.ndarray
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
newcoords : numpy.ndarray (``dtype=numpy.float32``, ``shape=coords.shape``)
An array containing real space coordiantes.
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
Now also accepts (and, likewise, returns) a single coordinate.
"""
if len(coords) == 0:
return coords
boxtype, box = check_box(box)
if boxtype == 'ortho':
box = np.diag(box)
box = box.astype(np.float64)
_run("coord_transform", args=(coords, box), backend=backend)
return coords
@check_coords('coords1', 'coords2')
def calc_bonds(coords1, coords2, box=None, result=None, backend="serial"):
"""Calculates the bond lengths between pairs of atom positions from the two
coordinate arrays `coords1` and `coords2`, which must contain the same
number of coordinates. ``coords1[i]`` and ``coords2[i]`` represent the
positions of atoms connected by the ``i``-th bond. If single coordinates are
supplied, a single distance will be returned.
In comparison to :meth:`distance_array` and :meth:`self_distance_array`,
which calculate distances between all possible combinations of coordinates,
:meth:`calc_bonds` only calculates distances between pairs of coordinates,
similar to::
numpy.linalg.norm(a - b) for a, b in zip(coords1, coords2)
If the optional argument `box` is supplied, the minimum image convention is
applied when calculating distances. Either orthogonal or triclinic boxes are
supported.
If a numpy array of dtype ``numpy.float64`` with shape ``(n,)`` (for ``n``
coordinate pairs) is provided in `result`, then this preallocated array is
filled. This can speed up calculations.
Parameters
----------
coords1 : numpy.ndarray
Coordinate array of shape ``(3,)`` or ``(n, 3)`` for one half of a
single or ``n`` bonds, respectively (dtype is arbitrary, will be
converted to ``numpy.float32`` internally).
coords2 : numpy.ndarray
Coordinate array of shape ``(3,)`` or ``(n, 3)`` for the other half of
a single or ``n`` bonds, respectively (dtype is arbitrary, will be
converted to ``numpy.float32`` internally).
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
result : numpy.ndarray, optional
Preallocated result array of dtype ``numpy.float64`` and shape ``(n,)``
(for ``n`` coordinate pairs). Avoids recreating the array in repeated
function calls.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
bondlengths : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n,)``) or numpy.float64
Array containing the bond lengths between each pair of coordinates. If
two single coordinates were supplied, their distance is returned as a
single number instead of an array.
.. versionadded:: 0.8
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
Now also accepts single coordinates as input.
"""
numatom = coords1.shape[0]
bondlengths = _check_result_array(result, (numatom,))
if numatom > 0:
if box is not None:
boxtype, box = check_box(box)
if boxtype == 'ortho':
_run("calc_bond_distance_ortho",
args=(coords1, coords2, box, bondlengths),
backend=backend)
else:
_run("calc_bond_distance_triclinic",
args=(coords1, coords2, box, bondlengths),
backend=backend)
else:
_run("calc_bond_distance",
args=(coords1, coords2, bondlengths),
backend=backend)
return bondlengths
@check_coords('coords1', 'coords2', 'coords3')
def calc_angles(coords1, coords2, coords3, box=None, result=None,
backend="serial"):
"""Calculates the angles formed between triplets of atom positions from the
three coordinate arrays `coords1`, `coords2`, and `coords3`. All coordinate
arrays must contain the same number of coordinates.
The coordinates in `coords2` represent the apices of the angles::
2---3
/
1
Configurations where the angle is undefined (e.g., when coordinates 1 or 3
of a triplet coincide with coordinate 2) result in a value of **zero** for
that angle.
If the optional argument `box` is supplied, periodic boundaries are taken
into account when constructing the connecting vectors between coordinates,
i.e., the minimum image convention is applied for the vectors forming the
angles. Either orthogonal or triclinic boxes are supported.
If a numpy array of dtype ``numpy.float64`` with shape ``(n,)`` (for ``n``
coordinate triplets) is provided in `result`, then this preallocated array
is filled. This can speed up calculations.
Parameters
----------
coords1 : numpy.ndarray
Array of shape ``(3,)`` or ``(n, 3)`` containing the coordinates of one
side of a single or ``n`` angles, respectively (dtype is arbitrary, will
be converted to ``numpy.float32`` internally)
coords2 : numpy.ndarray
Array of shape ``(3,)`` or ``(n, 3)`` containing the coordinates of the
apices of a single or ``n`` angles, respectively (dtype is arbitrary,
will be converted to ``numpy.float32`` internally)
coords3 : numpy.ndarray
Array of shape ``(3,)`` or ``(n, 3)`` containing the coordinates of the
other side of a single or ``n`` angles, respectively (dtype is
arbitrary, will be converted to ``numpy.float32`` internally)
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
result : numpy.ndarray, optional
Preallocated result array of dtype ``numpy.float64`` and shape ``(n,)``
(for ``n`` coordinate triplets). Avoids recreating the array in repeated
function calls.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
angles : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n,)``) or numpy.float64
Array containing the angles between each triplet of coordinates. Values
are returned in radians (rad). If three single coordinates were
supplied, the angle is returned as a single number instead of an array.
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Added optional box argument to account for periodic boundaries in
calculation
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
Now also accepts single coordinates as input.
"""
numatom = coords1.shape[0]
angles = _check_result_array(result, (numatom,))
if numatom > 0:
if box is not None:
boxtype, box = check_box(box)
if boxtype == 'ortho':
_run("calc_angle_ortho",
args=(coords1, coords2, coords3, box, angles),
backend=backend)
else:
_run("calc_angle_triclinic",
args=(coords1, coords2, coords3, box, angles),
backend=backend)
else:
_run("calc_angle",
args=(coords1, coords2, coords3, angles),
backend=backend)
return angles
@check_coords('coords1', 'coords2', 'coords3', 'coords4')
def calc_dihedrals(coords1, coords2, coords3, coords4, box=None, result=None,
backend="serial"):
r"""Calculates the dihedral angles formed between quadruplets of positions
from the four coordinate arrays `coords1`, `coords2`, `coords3`, and
`coords4`, which must contain the same number of coordinates.
The dihedral angle formed by a quadruplet of positions (1,2,3,4) is
calculated around the axis connecting positions 2 and 3 (i.e., the angle
between the planes spanned by positions (1,2,3) and (2,3,4))::
4
|
2-----3
/
1
If all coordinates lie in the same plane, the cis configuration corresponds
to a dihedral angle of zero, and the trans configuration to :math:`\pi`
radians (180 degrees). Configurations where the dihedral angle is undefined
(e.g., when all coordinates lie on the same straight line) result in a value
of ``nan`` (not a number) for that dihedral.
If the optional argument `box` is supplied, periodic boundaries are taken
into account when constructing the connecting vectors between coordinates,
i.e., the minimum image convention is applied for the vectors forming the
dihedral angles. Either orthogonal or triclinic boxes are supported.
If a numpy array of dtype ``numpy.float64`` with shape ``(n,)`` (for ``n``
coordinate quadruplets) is provided in `result` then this preallocated array
is filled. This can speed up calculations.
Parameters
----------
coords1 : numpy.ndarray
Coordinate array of shape ``(3,)`` or ``(n, 3)`` containing the 1st
positions in dihedrals (dtype is arbitrary, will be converted to
``numpy.float32`` internally)
coords2 : numpy.ndarray
Coordinate array of shape ``(3,)`` or ``(n, 3)`` containing the 2nd
positions in dihedrals (dtype is arbitrary, will be converted to
``numpy.float32`` internally)
coords3 : numpy.ndarray
Coordinate array of shape ``(3,)`` or ``(n, 3)`` containing the 3rd
positions in dihedrals (dtype is arbitrary, will be converted to
``numpy.float32`` internally)
coords4 : numpy.ndarray
Coordinate array of shape ``(3,)`` or ``(n, 3)`` containing the 4th
positions in dihedrals (dtype is arbitrary, will be converted to
``numpy.float32`` internally)
box : numpy.ndarray, optional
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
result : numpy.ndarray, optional
Preallocated result array of dtype ``numpy.float64`` and shape ``(n,)``
(for ``n`` coordinate quadruplets). Avoids recreating the array in
repeated function calls.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
dihedrals : numpy.ndarray (``dtype=numpy.float64``, ``shape=(n,)``) or numpy.float64
Array containing the dihedral angles formed by each quadruplet of
coordinates. Values are returned in radians (rad). If four single
coordinates were supplied, the dihedral angle is returned as a single
number instead of an array.
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Added optional box argument to account for periodic boundaries in
calculation
.. versionchanged:: 0.11.0
Renamed from calc_torsions to calc_dihedrals
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
Now also accepts single coordinates as input.
"""
numatom = coords1.shape[0]
dihedrals = _check_result_array(result, (numatom,))
if numatom > 0:
if box is not None:
boxtype, box = check_box(box)
if boxtype == 'ortho':
_run("calc_dihedral_ortho",
args=(coords1, coords2, coords3, coords4, box, dihedrals),
backend=backend)
else:
_run("calc_dihedral_triclinic",
args=(coords1, coords2, coords3, coords4, box, dihedrals),
backend=backend)
else:
_run("calc_dihedral",
args=(coords1, coords2, coords3, coords4, dihedrals),
backend=backend)
return dihedrals
@check_coords('coords')
def apply_PBC(coords, box, backend="serial"):
"""Moves coordinates into the primary unit cell.
Parameters
----------
coords : numpy.ndarray
Coordinate array of shape ``(3,)`` or ``(n, 3)`` (dtype is arbitrary,
will be converted to ``numpy.float32`` internally).
box : numpy.ndarray
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
backend : {'serial', 'OpenMP'}, optional
Keyword selecting the type of acceleration.
Returns
-------
newcoords : numpy.ndarray (``dtype=numpy.float32``, ``shape=coords.shape``)
Array containing coordinates that all lie within the primary unit cell
as defined by `box`.
.. versionadded:: 0.8
.. versionchanged:: 0.13.0
Added *backend* keyword.
.. versionchanged:: 0.19.0
Internal dtype conversion of input coordinates to ``numpy.float32``.
Now also accepts (and, likewise, returns) single coordinates.
"""
if len(coords) == 0:
return coords
boxtype, box = check_box(box)
if boxtype == 'ortho':
_run("ortho_pbc", args=(coords, box), backend=backend)
else:
_run("triclinic_pbc", args=(coords, box), backend=backend)
return coords
@check_coords('vectors', enforce_copy=False, enforce_dtype=False)
def minimize_vectors(vectors, box):
"""Apply minimum image convention to an array of vectors
This function is required for calculating the correct vectors between two
points. A naive approach of ``ag1.positions - ag2.positions`` will not
provide the minimum vectors between particles, even if all particles are
within the primary unit cell (box).
Parameters
----------
vectors : numpy.ndarray
Vector array of shape ``(n, 3)``, either float32 or float64. These
represent many vectors (such as between two particles).
box : numpy.ndarray
The unitcell dimensions of the system, which can be orthogonal or
triclinic and must be provided in the same format as returned by
:attr:`MDAnalysis.coordinates.base.Timestep.dimensions`:
``[lx, ly, lz, alpha, beta, gamma]``.
Returns
-------
minimized_vectors : numpy.ndarray
Same shape and dtype as input. The vectors from the input, but
minimized according to the size of the box.
.. versionadded:: 2.1.0
"""
boxtype, box = check_box(box)
output = np.empty_like(vectors)
# use box which is same precision as input vectors
box = box.astype(vectors.dtype)
if boxtype == 'ortho':
_minimize_vectors_ortho(vectors, box, output)
else:
_minimize_vectors_triclinic(vectors, box.ravel(), output)
return output
| MDAnalysis/mdanalysis | package/MDAnalysis/lib/distances.py | Python | gpl-2.0 | 67,291 | [
"MDAnalysis"
] | 35f553b58c4b3c9dc901425e19abc6bbebf20d2b7fbb2735f47f68773fc57aaa |
import string, gettext
_ = gettext.gettext
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
EOF = None
E = {
"null-character":
_(u"Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_(u"Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_(u"Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_(u"Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_(u"Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_(u"Numeric entity couldn't be converted to character "
u"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_(u"Numeric entity represents an illegal codepoint: "
u"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_(u"Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_(u"Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_(u"Numeric entity expected but none found."),
"named-entity-without-semicolon":
_(u"Named entity didn't end with ';'."),
"expected-named-entity":
_(u"Named entity expected. Got none."),
"attributes-in-end-tag":
_(u"End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_(u"End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_(u"Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_(u"Expected tag name. Got '?' instead. (HTML doesn't "
u"support processing instructions.)"),
"expected-tag-name":
_(u"Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_(u"Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_(u"Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_(u"Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_(u"Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_(u"Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_(u"Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_(u"Invalid chracter in attribute name"),
"duplicate-attribute":
_(u"Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_(u"Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_(u"Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_(u"Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_(u"Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_(u"Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_(u"Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_(u"Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_(u"Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_(u"Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_(u"Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_(u"Unexpected end of file in tag. Expected >"),
"unexpected-character-after-soldius-in-tag":
_(u"Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_(u"Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_(u"Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_(u"Unexpected space after -- in comment"),
"incorrect-comment":
_(u"Incorrect comment."),
"eof-in-comment":
_(u"Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_(u"Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_(u"Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_(u"Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_(u"Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_(u"Unexpected end of file in comment."),
"unexpected-char-in-comment":
_(u"Unexpected character in comment found."),
"need-space-after-doctype":
_(u"No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_(u"Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_(u"Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_(u"Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_(u"Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_(u"Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_(u"Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_(u"Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_(u"XXX innerHTML EOF"),
"unexpected-doctype":
_(u"Unexpected DOCTYPE. Ignored."),
"non-html-root":
_(u"html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_(u"Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_(u"Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_(u"Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_(u"Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_(u"Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_(u"Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_(u"Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_(u"Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_(u"Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_(u"Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_(u"Unexpected start tag (%(name)s)."),
"missing-end-tag":
_(u"Missing end tag (%(name)s)."),
"missing-end-tags":
_(u"Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_(u"Unexpected start tag (%(startName)s) "
u"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_(u"Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_(u"Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_(u"Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_(u"Unexpected end tag (%(gotName)s). "
u"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_(u"End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_(u"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_(u"End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 3 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_(u"Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_(u"This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_(u"Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_(u"Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_(u"Unexpected non-space characters in "
u"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_(u"Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_(u"Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_(u"Unexpected start tag (%(name)s) in "
u"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_(u"Unexpected end tag (%(name)s) in "
u"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_(u"Unexpected table cell start tag (%(name)s) "
u"in the table body phase."),
"unexpected-cell-end-tag":
_(u"Got table cell end tag (%(name)s) "
u"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_(u"Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_(u"Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_(u"Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_(u"Unexpected select start tag in the select phase "
u"treated as select end tag."),
"unexpected-input-in-select":
_(u"Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_(u"Unexpected start tag token (%(name)s in the select phase. "
u"Ignored."),
"unexpected-end-tag-in-select":
_(u"Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_(u"Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_(u"Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_(u"Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_(u"Unexpected start tag token (%(name)s)"
u" in the after body phase."),
"unexpected-end-tag-after-body":
_(u"Unexpected end tag token (%(name)s)"
u" in the after body phase."),
"unexpected-char-in-frameset":
_(u"Unepxected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_(u"Unexpected start tag token (%(name)s)"
u" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_(u"Unexpected end tag token (frameset) "
u"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_(u"Unexpected end tag token (%(name)s)"
u" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_(u"Unexpected non-space characters in the "
u"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_(u"Unexpected start tag (%(name)s)"
u" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_(u"Unexpected end tag (%(name)s)"
u" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_(u"Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_(u"Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_(u"Unexpected start tag (%(name)s)"
u". Expected end of file."),
"expected-eof-but-got-end-tag":
_(u"Unexpected end tag (%(name)s)"
u". Expected end of file."),
"eof-in-table":
_(u"Unexpected end of file. Expected table content."),
"eof-in-select":
_(u"Unexpected end of file. Expected select content."),
"eof-in-frameset":
_(u"Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_(u"Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_(u"Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_(u"Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_(u"Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_(u"Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
(u"Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html":"http://www.w3.org/1999/xhtml",
"mathml":"http://www.w3.org/1998/Math/MathML",
"svg":"http://www.w3.org/2000/svg",
"xlink":"http://www.w3.org/1999/xlink",
"xml":"http://www.w3.org/XML/1998/namespace",
"xmlns":"http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
spaceCharacters = frozenset((
u"\t",
u"\n",
u"\u000C",
u" ",
u"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c),ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay","controls")),
"video": frozenset(("autoplay","controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": u"\xc6",
"AElig;": u"\xc6",
"AMP": u"&",
"AMP;": u"&",
"Aacute": u"\xc1",
"Aacute;": u"\xc1",
"Abreve;": u"\u0102",
"Acirc": u"\xc2",
"Acirc;": u"\xc2",
"Acy;": u"\u0410",
"Afr;": u"\U0001d504",
"Agrave": u"\xc0",
"Agrave;": u"\xc0",
"Alpha;": u"\u0391",
"Amacr;": u"\u0100",
"And;": u"\u2a53",
"Aogon;": u"\u0104",
"Aopf;": u"\U0001d538",
"ApplyFunction;": u"\u2061",
"Aring": u"\xc5",
"Aring;": u"\xc5",
"Ascr;": u"\U0001d49c",
"Assign;": u"\u2254",
"Atilde": u"\xc3",
"Atilde;": u"\xc3",
"Auml": u"\xc4",
"Auml;": u"\xc4",
"Backslash;": u"\u2216",
"Barv;": u"\u2ae7",
"Barwed;": u"\u2306",
"Bcy;": u"\u0411",
"Because;": u"\u2235",
"Bernoullis;": u"\u212c",
"Beta;": u"\u0392",
"Bfr;": u"\U0001d505",
"Bopf;": u"\U0001d539",
"Breve;": u"\u02d8",
"Bscr;": u"\u212c",
"Bumpeq;": u"\u224e",
"CHcy;": u"\u0427",
"COPY": u"\xa9",
"COPY;": u"\xa9",
"Cacute;": u"\u0106",
"Cap;": u"\u22d2",
"CapitalDifferentialD;": u"\u2145",
"Cayleys;": u"\u212d",
"Ccaron;": u"\u010c",
"Ccedil": u"\xc7",
"Ccedil;": u"\xc7",
"Ccirc;": u"\u0108",
"Cconint;": u"\u2230",
"Cdot;": u"\u010a",
"Cedilla;": u"\xb8",
"CenterDot;": u"\xb7",
"Cfr;": u"\u212d",
"Chi;": u"\u03a7",
"CircleDot;": u"\u2299",
"CircleMinus;": u"\u2296",
"CirclePlus;": u"\u2295",
"CircleTimes;": u"\u2297",
"ClockwiseContourIntegral;": u"\u2232",
"CloseCurlyDoubleQuote;": u"\u201d",
"CloseCurlyQuote;": u"\u2019",
"Colon;": u"\u2237",
"Colone;": u"\u2a74",
"Congruent;": u"\u2261",
"Conint;": u"\u222f",
"ContourIntegral;": u"\u222e",
"Copf;": u"\u2102",
"Coproduct;": u"\u2210",
"CounterClockwiseContourIntegral;": u"\u2233",
"Cross;": u"\u2a2f",
"Cscr;": u"\U0001d49e",
"Cup;": u"\u22d3",
"CupCap;": u"\u224d",
"DD;": u"\u2145",
"DDotrahd;": u"\u2911",
"DJcy;": u"\u0402",
"DScy;": u"\u0405",
"DZcy;": u"\u040f",
"Dagger;": u"\u2021",
"Darr;": u"\u21a1",
"Dashv;": u"\u2ae4",
"Dcaron;": u"\u010e",
"Dcy;": u"\u0414",
"Del;": u"\u2207",
"Delta;": u"\u0394",
"Dfr;": u"\U0001d507",
"DiacriticalAcute;": u"\xb4",
"DiacriticalDot;": u"\u02d9",
"DiacriticalDoubleAcute;": u"\u02dd",
"DiacriticalGrave;": u"`",
"DiacriticalTilde;": u"\u02dc",
"Diamond;": u"\u22c4",
"DifferentialD;": u"\u2146",
"Dopf;": u"\U0001d53b",
"Dot;": u"\xa8",
"DotDot;": u"\u20dc",
"DotEqual;": u"\u2250",
"DoubleContourIntegral;": u"\u222f",
"DoubleDot;": u"\xa8",
"DoubleDownArrow;": u"\u21d3",
"DoubleLeftArrow;": u"\u21d0",
"DoubleLeftRightArrow;": u"\u21d4",
"DoubleLeftTee;": u"\u2ae4",
"DoubleLongLeftArrow;": u"\u27f8",
"DoubleLongLeftRightArrow;": u"\u27fa",
"DoubleLongRightArrow;": u"\u27f9",
"DoubleRightArrow;": u"\u21d2",
"DoubleRightTee;": u"\u22a8",
"DoubleUpArrow;": u"\u21d1",
"DoubleUpDownArrow;": u"\u21d5",
"DoubleVerticalBar;": u"\u2225",
"DownArrow;": u"\u2193",
"DownArrowBar;": u"\u2913",
"DownArrowUpArrow;": u"\u21f5",
"DownBreve;": u"\u0311",
"DownLeftRightVector;": u"\u2950",
"DownLeftTeeVector;": u"\u295e",
"DownLeftVector;": u"\u21bd",
"DownLeftVectorBar;": u"\u2956",
"DownRightTeeVector;": u"\u295f",
"DownRightVector;": u"\u21c1",
"DownRightVectorBar;": u"\u2957",
"DownTee;": u"\u22a4",
"DownTeeArrow;": u"\u21a7",
"Downarrow;": u"\u21d3",
"Dscr;": u"\U0001d49f",
"Dstrok;": u"\u0110",
"ENG;": u"\u014a",
"ETH": u"\xd0",
"ETH;": u"\xd0",
"Eacute": u"\xc9",
"Eacute;": u"\xc9",
"Ecaron;": u"\u011a",
"Ecirc": u"\xca",
"Ecirc;": u"\xca",
"Ecy;": u"\u042d",
"Edot;": u"\u0116",
"Efr;": u"\U0001d508",
"Egrave": u"\xc8",
"Egrave;": u"\xc8",
"Element;": u"\u2208",
"Emacr;": u"\u0112",
"EmptySmallSquare;": u"\u25fb",
"EmptyVerySmallSquare;": u"\u25ab",
"Eogon;": u"\u0118",
"Eopf;": u"\U0001d53c",
"Epsilon;": u"\u0395",
"Equal;": u"\u2a75",
"EqualTilde;": u"\u2242",
"Equilibrium;": u"\u21cc",
"Escr;": u"\u2130",
"Esim;": u"\u2a73",
"Eta;": u"\u0397",
"Euml": u"\xcb",
"Euml;": u"\xcb",
"Exists;": u"\u2203",
"ExponentialE;": u"\u2147",
"Fcy;": u"\u0424",
"Ffr;": u"\U0001d509",
"FilledSmallSquare;": u"\u25fc",
"FilledVerySmallSquare;": u"\u25aa",
"Fopf;": u"\U0001d53d",
"ForAll;": u"\u2200",
"Fouriertrf;": u"\u2131",
"Fscr;": u"\u2131",
"GJcy;": u"\u0403",
"GT": u">",
"GT;": u">",
"Gamma;": u"\u0393",
"Gammad;": u"\u03dc",
"Gbreve;": u"\u011e",
"Gcedil;": u"\u0122",
"Gcirc;": u"\u011c",
"Gcy;": u"\u0413",
"Gdot;": u"\u0120",
"Gfr;": u"\U0001d50a",
"Gg;": u"\u22d9",
"Gopf;": u"\U0001d53e",
"GreaterEqual;": u"\u2265",
"GreaterEqualLess;": u"\u22db",
"GreaterFullEqual;": u"\u2267",
"GreaterGreater;": u"\u2aa2",
"GreaterLess;": u"\u2277",
"GreaterSlantEqual;": u"\u2a7e",
"GreaterTilde;": u"\u2273",
"Gscr;": u"\U0001d4a2",
"Gt;": u"\u226b",
"HARDcy;": u"\u042a",
"Hacek;": u"\u02c7",
"Hat;": u"^",
"Hcirc;": u"\u0124",
"Hfr;": u"\u210c",
"HilbertSpace;": u"\u210b",
"Hopf;": u"\u210d",
"HorizontalLine;": u"\u2500",
"Hscr;": u"\u210b",
"Hstrok;": u"\u0126",
"HumpDownHump;": u"\u224e",
"HumpEqual;": u"\u224f",
"IEcy;": u"\u0415",
"IJlig;": u"\u0132",
"IOcy;": u"\u0401",
"Iacute": u"\xcd",
"Iacute;": u"\xcd",
"Icirc": u"\xce",
"Icirc;": u"\xce",
"Icy;": u"\u0418",
"Idot;": u"\u0130",
"Ifr;": u"\u2111",
"Igrave": u"\xcc",
"Igrave;": u"\xcc",
"Im;": u"\u2111",
"Imacr;": u"\u012a",
"ImaginaryI;": u"\u2148",
"Implies;": u"\u21d2",
"Int;": u"\u222c",
"Integral;": u"\u222b",
"Intersection;": u"\u22c2",
"InvisibleComma;": u"\u2063",
"InvisibleTimes;": u"\u2062",
"Iogon;": u"\u012e",
"Iopf;": u"\U0001d540",
"Iota;": u"\u0399",
"Iscr;": u"\u2110",
"Itilde;": u"\u0128",
"Iukcy;": u"\u0406",
"Iuml": u"\xcf",
"Iuml;": u"\xcf",
"Jcirc;": u"\u0134",
"Jcy;": u"\u0419",
"Jfr;": u"\U0001d50d",
"Jopf;": u"\U0001d541",
"Jscr;": u"\U0001d4a5",
"Jsercy;": u"\u0408",
"Jukcy;": u"\u0404",
"KHcy;": u"\u0425",
"KJcy;": u"\u040c",
"Kappa;": u"\u039a",
"Kcedil;": u"\u0136",
"Kcy;": u"\u041a",
"Kfr;": u"\U0001d50e",
"Kopf;": u"\U0001d542",
"Kscr;": u"\U0001d4a6",
"LJcy;": u"\u0409",
"LT": u"<",
"LT;": u"<",
"Lacute;": u"\u0139",
"Lambda;": u"\u039b",
"Lang;": u"\u27ea",
"Laplacetrf;": u"\u2112",
"Larr;": u"\u219e",
"Lcaron;": u"\u013d",
"Lcedil;": u"\u013b",
"Lcy;": u"\u041b",
"LeftAngleBracket;": u"\u27e8",
"LeftArrow;": u"\u2190",
"LeftArrowBar;": u"\u21e4",
"LeftArrowRightArrow;": u"\u21c6",
"LeftCeiling;": u"\u2308",
"LeftDoubleBracket;": u"\u27e6",
"LeftDownTeeVector;": u"\u2961",
"LeftDownVector;": u"\u21c3",
"LeftDownVectorBar;": u"\u2959",
"LeftFloor;": u"\u230a",
"LeftRightArrow;": u"\u2194",
"LeftRightVector;": u"\u294e",
"LeftTee;": u"\u22a3",
"LeftTeeArrow;": u"\u21a4",
"LeftTeeVector;": u"\u295a",
"LeftTriangle;": u"\u22b2",
"LeftTriangleBar;": u"\u29cf",
"LeftTriangleEqual;": u"\u22b4",
"LeftUpDownVector;": u"\u2951",
"LeftUpTeeVector;": u"\u2960",
"LeftUpVector;": u"\u21bf",
"LeftUpVectorBar;": u"\u2958",
"LeftVector;": u"\u21bc",
"LeftVectorBar;": u"\u2952",
"Leftarrow;": u"\u21d0",
"Leftrightarrow;": u"\u21d4",
"LessEqualGreater;": u"\u22da",
"LessFullEqual;": u"\u2266",
"LessGreater;": u"\u2276",
"LessLess;": u"\u2aa1",
"LessSlantEqual;": u"\u2a7d",
"LessTilde;": u"\u2272",
"Lfr;": u"\U0001d50f",
"Ll;": u"\u22d8",
"Lleftarrow;": u"\u21da",
"Lmidot;": u"\u013f",
"LongLeftArrow;": u"\u27f5",
"LongLeftRightArrow;": u"\u27f7",
"LongRightArrow;": u"\u27f6",
"Longleftarrow;": u"\u27f8",
"Longleftrightarrow;": u"\u27fa",
"Longrightarrow;": u"\u27f9",
"Lopf;": u"\U0001d543",
"LowerLeftArrow;": u"\u2199",
"LowerRightArrow;": u"\u2198",
"Lscr;": u"\u2112",
"Lsh;": u"\u21b0",
"Lstrok;": u"\u0141",
"Lt;": u"\u226a",
"Map;": u"\u2905",
"Mcy;": u"\u041c",
"MediumSpace;": u"\u205f",
"Mellintrf;": u"\u2133",
"Mfr;": u"\U0001d510",
"MinusPlus;": u"\u2213",
"Mopf;": u"\U0001d544",
"Mscr;": u"\u2133",
"Mu;": u"\u039c",
"NJcy;": u"\u040a",
"Nacute;": u"\u0143",
"Ncaron;": u"\u0147",
"Ncedil;": u"\u0145",
"Ncy;": u"\u041d",
"NegativeMediumSpace;": u"\u200b",
"NegativeThickSpace;": u"\u200b",
"NegativeThinSpace;": u"\u200b",
"NegativeVeryThinSpace;": u"\u200b",
"NestedGreaterGreater;": u"\u226b",
"NestedLessLess;": u"\u226a",
"NewLine;": u"\n",
"Nfr;": u"\U0001d511",
"NoBreak;": u"\u2060",
"NonBreakingSpace;": u"\xa0",
"Nopf;": u"\u2115",
"Not;": u"\u2aec",
"NotCongruent;": u"\u2262",
"NotCupCap;": u"\u226d",
"NotDoubleVerticalBar;": u"\u2226",
"NotElement;": u"\u2209",
"NotEqual;": u"\u2260",
"NotEqualTilde;": u"\u2242\u0338",
"NotExists;": u"\u2204",
"NotGreater;": u"\u226f",
"NotGreaterEqual;": u"\u2271",
"NotGreaterFullEqual;": u"\u2267\u0338",
"NotGreaterGreater;": u"\u226b\u0338",
"NotGreaterLess;": u"\u2279",
"NotGreaterSlantEqual;": u"\u2a7e\u0338",
"NotGreaterTilde;": u"\u2275",
"NotHumpDownHump;": u"\u224e\u0338",
"NotHumpEqual;": u"\u224f\u0338",
"NotLeftTriangle;": u"\u22ea",
"NotLeftTriangleBar;": u"\u29cf\u0338",
"NotLeftTriangleEqual;": u"\u22ec",
"NotLess;": u"\u226e",
"NotLessEqual;": u"\u2270",
"NotLessGreater;": u"\u2278",
"NotLessLess;": u"\u226a\u0338",
"NotLessSlantEqual;": u"\u2a7d\u0338",
"NotLessTilde;": u"\u2274",
"NotNestedGreaterGreater;": u"\u2aa2\u0338",
"NotNestedLessLess;": u"\u2aa1\u0338",
"NotPrecedes;": u"\u2280",
"NotPrecedesEqual;": u"\u2aaf\u0338",
"NotPrecedesSlantEqual;": u"\u22e0",
"NotReverseElement;": u"\u220c",
"NotRightTriangle;": u"\u22eb",
"NotRightTriangleBar;": u"\u29d0\u0338",
"NotRightTriangleEqual;": u"\u22ed",
"NotSquareSubset;": u"\u228f\u0338",
"NotSquareSubsetEqual;": u"\u22e2",
"NotSquareSuperset;": u"\u2290\u0338",
"NotSquareSupersetEqual;": u"\u22e3",
"NotSubset;": u"\u2282\u20d2",
"NotSubsetEqual;": u"\u2288",
"NotSucceeds;": u"\u2281",
"NotSucceedsEqual;": u"\u2ab0\u0338",
"NotSucceedsSlantEqual;": u"\u22e1",
"NotSucceedsTilde;": u"\u227f\u0338",
"NotSuperset;": u"\u2283\u20d2",
"NotSupersetEqual;": u"\u2289",
"NotTilde;": u"\u2241",
"NotTildeEqual;": u"\u2244",
"NotTildeFullEqual;": u"\u2247",
"NotTildeTilde;": u"\u2249",
"NotVerticalBar;": u"\u2224",
"Nscr;": u"\U0001d4a9",
"Ntilde": u"\xd1",
"Ntilde;": u"\xd1",
"Nu;": u"\u039d",
"OElig;": u"\u0152",
"Oacute": u"\xd3",
"Oacute;": u"\xd3",
"Ocirc": u"\xd4",
"Ocirc;": u"\xd4",
"Ocy;": u"\u041e",
"Odblac;": u"\u0150",
"Ofr;": u"\U0001d512",
"Ograve": u"\xd2",
"Ograve;": u"\xd2",
"Omacr;": u"\u014c",
"Omega;": u"\u03a9",
"Omicron;": u"\u039f",
"Oopf;": u"\U0001d546",
"OpenCurlyDoubleQuote;": u"\u201c",
"OpenCurlyQuote;": u"\u2018",
"Or;": u"\u2a54",
"Oscr;": u"\U0001d4aa",
"Oslash": u"\xd8",
"Oslash;": u"\xd8",
"Otilde": u"\xd5",
"Otilde;": u"\xd5",
"Otimes;": u"\u2a37",
"Ouml": u"\xd6",
"Ouml;": u"\xd6",
"OverBar;": u"\u203e",
"OverBrace;": u"\u23de",
"OverBracket;": u"\u23b4",
"OverParenthesis;": u"\u23dc",
"PartialD;": u"\u2202",
"Pcy;": u"\u041f",
"Pfr;": u"\U0001d513",
"Phi;": u"\u03a6",
"Pi;": u"\u03a0",
"PlusMinus;": u"\xb1",
"Poincareplane;": u"\u210c",
"Popf;": u"\u2119",
"Pr;": u"\u2abb",
"Precedes;": u"\u227a",
"PrecedesEqual;": u"\u2aaf",
"PrecedesSlantEqual;": u"\u227c",
"PrecedesTilde;": u"\u227e",
"Prime;": u"\u2033",
"Product;": u"\u220f",
"Proportion;": u"\u2237",
"Proportional;": u"\u221d",
"Pscr;": u"\U0001d4ab",
"Psi;": u"\u03a8",
"QUOT": u"\"",
"QUOT;": u"\"",
"Qfr;": u"\U0001d514",
"Qopf;": u"\u211a",
"Qscr;": u"\U0001d4ac",
"RBarr;": u"\u2910",
"REG": u"\xae",
"REG;": u"\xae",
"Racute;": u"\u0154",
"Rang;": u"\u27eb",
"Rarr;": u"\u21a0",
"Rarrtl;": u"\u2916",
"Rcaron;": u"\u0158",
"Rcedil;": u"\u0156",
"Rcy;": u"\u0420",
"Re;": u"\u211c",
"ReverseElement;": u"\u220b",
"ReverseEquilibrium;": u"\u21cb",
"ReverseUpEquilibrium;": u"\u296f",
"Rfr;": u"\u211c",
"Rho;": u"\u03a1",
"RightAngleBracket;": u"\u27e9",
"RightArrow;": u"\u2192",
"RightArrowBar;": u"\u21e5",
"RightArrowLeftArrow;": u"\u21c4",
"RightCeiling;": u"\u2309",
"RightDoubleBracket;": u"\u27e7",
"RightDownTeeVector;": u"\u295d",
"RightDownVector;": u"\u21c2",
"RightDownVectorBar;": u"\u2955",
"RightFloor;": u"\u230b",
"RightTee;": u"\u22a2",
"RightTeeArrow;": u"\u21a6",
"RightTeeVector;": u"\u295b",
"RightTriangle;": u"\u22b3",
"RightTriangleBar;": u"\u29d0",
"RightTriangleEqual;": u"\u22b5",
"RightUpDownVector;": u"\u294f",
"RightUpTeeVector;": u"\u295c",
"RightUpVector;": u"\u21be",
"RightUpVectorBar;": u"\u2954",
"RightVector;": u"\u21c0",
"RightVectorBar;": u"\u2953",
"Rightarrow;": u"\u21d2",
"Ropf;": u"\u211d",
"RoundImplies;": u"\u2970",
"Rrightarrow;": u"\u21db",
"Rscr;": u"\u211b",
"Rsh;": u"\u21b1",
"RuleDelayed;": u"\u29f4",
"SHCHcy;": u"\u0429",
"SHcy;": u"\u0428",
"SOFTcy;": u"\u042c",
"Sacute;": u"\u015a",
"Sc;": u"\u2abc",
"Scaron;": u"\u0160",
"Scedil;": u"\u015e",
"Scirc;": u"\u015c",
"Scy;": u"\u0421",
"Sfr;": u"\U0001d516",
"ShortDownArrow;": u"\u2193",
"ShortLeftArrow;": u"\u2190",
"ShortRightArrow;": u"\u2192",
"ShortUpArrow;": u"\u2191",
"Sigma;": u"\u03a3",
"SmallCircle;": u"\u2218",
"Sopf;": u"\U0001d54a",
"Sqrt;": u"\u221a",
"Square;": u"\u25a1",
"SquareIntersection;": u"\u2293",
"SquareSubset;": u"\u228f",
"SquareSubsetEqual;": u"\u2291",
"SquareSuperset;": u"\u2290",
"SquareSupersetEqual;": u"\u2292",
"SquareUnion;": u"\u2294",
"Sscr;": u"\U0001d4ae",
"Star;": u"\u22c6",
"Sub;": u"\u22d0",
"Subset;": u"\u22d0",
"SubsetEqual;": u"\u2286",
"Succeeds;": u"\u227b",
"SucceedsEqual;": u"\u2ab0",
"SucceedsSlantEqual;": u"\u227d",
"SucceedsTilde;": u"\u227f",
"SuchThat;": u"\u220b",
"Sum;": u"\u2211",
"Sup;": u"\u22d1",
"Superset;": u"\u2283",
"SupersetEqual;": u"\u2287",
"Supset;": u"\u22d1",
"THORN": u"\xde",
"THORN;": u"\xde",
"TRADE;": u"\u2122",
"TSHcy;": u"\u040b",
"TScy;": u"\u0426",
"Tab;": u"\t",
"Tau;": u"\u03a4",
"Tcaron;": u"\u0164",
"Tcedil;": u"\u0162",
"Tcy;": u"\u0422",
"Tfr;": u"\U0001d517",
"Therefore;": u"\u2234",
"Theta;": u"\u0398",
"ThickSpace;": u"\u205f\u200a",
"ThinSpace;": u"\u2009",
"Tilde;": u"\u223c",
"TildeEqual;": u"\u2243",
"TildeFullEqual;": u"\u2245",
"TildeTilde;": u"\u2248",
"Topf;": u"\U0001d54b",
"TripleDot;": u"\u20db",
"Tscr;": u"\U0001d4af",
"Tstrok;": u"\u0166",
"Uacute": u"\xda",
"Uacute;": u"\xda",
"Uarr;": u"\u219f",
"Uarrocir;": u"\u2949",
"Ubrcy;": u"\u040e",
"Ubreve;": u"\u016c",
"Ucirc": u"\xdb",
"Ucirc;": u"\xdb",
"Ucy;": u"\u0423",
"Udblac;": u"\u0170",
"Ufr;": u"\U0001d518",
"Ugrave": u"\xd9",
"Ugrave;": u"\xd9",
"Umacr;": u"\u016a",
"UnderBar;": u"_",
"UnderBrace;": u"\u23df",
"UnderBracket;": u"\u23b5",
"UnderParenthesis;": u"\u23dd",
"Union;": u"\u22c3",
"UnionPlus;": u"\u228e",
"Uogon;": u"\u0172",
"Uopf;": u"\U0001d54c",
"UpArrow;": u"\u2191",
"UpArrowBar;": u"\u2912",
"UpArrowDownArrow;": u"\u21c5",
"UpDownArrow;": u"\u2195",
"UpEquilibrium;": u"\u296e",
"UpTee;": u"\u22a5",
"UpTeeArrow;": u"\u21a5",
"Uparrow;": u"\u21d1",
"Updownarrow;": u"\u21d5",
"UpperLeftArrow;": u"\u2196",
"UpperRightArrow;": u"\u2197",
"Upsi;": u"\u03d2",
"Upsilon;": u"\u03a5",
"Uring;": u"\u016e",
"Uscr;": u"\U0001d4b0",
"Utilde;": u"\u0168",
"Uuml": u"\xdc",
"Uuml;": u"\xdc",
"VDash;": u"\u22ab",
"Vbar;": u"\u2aeb",
"Vcy;": u"\u0412",
"Vdash;": u"\u22a9",
"Vdashl;": u"\u2ae6",
"Vee;": u"\u22c1",
"Verbar;": u"\u2016",
"Vert;": u"\u2016",
"VerticalBar;": u"\u2223",
"VerticalLine;": u"|",
"VerticalSeparator;": u"\u2758",
"VerticalTilde;": u"\u2240",
"VeryThinSpace;": u"\u200a",
"Vfr;": u"\U0001d519",
"Vopf;": u"\U0001d54d",
"Vscr;": u"\U0001d4b1",
"Vvdash;": u"\u22aa",
"Wcirc;": u"\u0174",
"Wedge;": u"\u22c0",
"Wfr;": u"\U0001d51a",
"Wopf;": u"\U0001d54e",
"Wscr;": u"\U0001d4b2",
"Xfr;": u"\U0001d51b",
"Xi;": u"\u039e",
"Xopf;": u"\U0001d54f",
"Xscr;": u"\U0001d4b3",
"YAcy;": u"\u042f",
"YIcy;": u"\u0407",
"YUcy;": u"\u042e",
"Yacute": u"\xdd",
"Yacute;": u"\xdd",
"Ycirc;": u"\u0176",
"Ycy;": u"\u042b",
"Yfr;": u"\U0001d51c",
"Yopf;": u"\U0001d550",
"Yscr;": u"\U0001d4b4",
"Yuml;": u"\u0178",
"ZHcy;": u"\u0416",
"Zacute;": u"\u0179",
"Zcaron;": u"\u017d",
"Zcy;": u"\u0417",
"Zdot;": u"\u017b",
"ZeroWidthSpace;": u"\u200b",
"Zeta;": u"\u0396",
"Zfr;": u"\u2128",
"Zopf;": u"\u2124",
"Zscr;": u"\U0001d4b5",
"aacute": u"\xe1",
"aacute;": u"\xe1",
"abreve;": u"\u0103",
"ac;": u"\u223e",
"acE;": u"\u223e\u0333",
"acd;": u"\u223f",
"acirc": u"\xe2",
"acirc;": u"\xe2",
"acute": u"\xb4",
"acute;": u"\xb4",
"acy;": u"\u0430",
"aelig": u"\xe6",
"aelig;": u"\xe6",
"af;": u"\u2061",
"afr;": u"\U0001d51e",
"agrave": u"\xe0",
"agrave;": u"\xe0",
"alefsym;": u"\u2135",
"aleph;": u"\u2135",
"alpha;": u"\u03b1",
"amacr;": u"\u0101",
"amalg;": u"\u2a3f",
"amp": u"&",
"amp;": u"&",
"and;": u"\u2227",
"andand;": u"\u2a55",
"andd;": u"\u2a5c",
"andslope;": u"\u2a58",
"andv;": u"\u2a5a",
"ang;": u"\u2220",
"ange;": u"\u29a4",
"angle;": u"\u2220",
"angmsd;": u"\u2221",
"angmsdaa;": u"\u29a8",
"angmsdab;": u"\u29a9",
"angmsdac;": u"\u29aa",
"angmsdad;": u"\u29ab",
"angmsdae;": u"\u29ac",
"angmsdaf;": u"\u29ad",
"angmsdag;": u"\u29ae",
"angmsdah;": u"\u29af",
"angrt;": u"\u221f",
"angrtvb;": u"\u22be",
"angrtvbd;": u"\u299d",
"angsph;": u"\u2222",
"angst;": u"\xc5",
"angzarr;": u"\u237c",
"aogon;": u"\u0105",
"aopf;": u"\U0001d552",
"ap;": u"\u2248",
"apE;": u"\u2a70",
"apacir;": u"\u2a6f",
"ape;": u"\u224a",
"apid;": u"\u224b",
"apos;": u"'",
"approx;": u"\u2248",
"approxeq;": u"\u224a",
"aring": u"\xe5",
"aring;": u"\xe5",
"ascr;": u"\U0001d4b6",
"ast;": u"*",
"asymp;": u"\u2248",
"asympeq;": u"\u224d",
"atilde": u"\xe3",
"atilde;": u"\xe3",
"auml": u"\xe4",
"auml;": u"\xe4",
"awconint;": u"\u2233",
"awint;": u"\u2a11",
"bNot;": u"\u2aed",
"backcong;": u"\u224c",
"backepsilon;": u"\u03f6",
"backprime;": u"\u2035",
"backsim;": u"\u223d",
"backsimeq;": u"\u22cd",
"barvee;": u"\u22bd",
"barwed;": u"\u2305",
"barwedge;": u"\u2305",
"bbrk;": u"\u23b5",
"bbrktbrk;": u"\u23b6",
"bcong;": u"\u224c",
"bcy;": u"\u0431",
"bdquo;": u"\u201e",
"becaus;": u"\u2235",
"because;": u"\u2235",
"bemptyv;": u"\u29b0",
"bepsi;": u"\u03f6",
"bernou;": u"\u212c",
"beta;": u"\u03b2",
"beth;": u"\u2136",
"between;": u"\u226c",
"bfr;": u"\U0001d51f",
"bigcap;": u"\u22c2",
"bigcirc;": u"\u25ef",
"bigcup;": u"\u22c3",
"bigodot;": u"\u2a00",
"bigoplus;": u"\u2a01",
"bigotimes;": u"\u2a02",
"bigsqcup;": u"\u2a06",
"bigstar;": u"\u2605",
"bigtriangledown;": u"\u25bd",
"bigtriangleup;": u"\u25b3",
"biguplus;": u"\u2a04",
"bigvee;": u"\u22c1",
"bigwedge;": u"\u22c0",
"bkarow;": u"\u290d",
"blacklozenge;": u"\u29eb",
"blacksquare;": u"\u25aa",
"blacktriangle;": u"\u25b4",
"blacktriangledown;": u"\u25be",
"blacktriangleleft;": u"\u25c2",
"blacktriangleright;": u"\u25b8",
"blank;": u"\u2423",
"blk12;": u"\u2592",
"blk14;": u"\u2591",
"blk34;": u"\u2593",
"block;": u"\u2588",
"bne;": u"=\u20e5",
"bnequiv;": u"\u2261\u20e5",
"bnot;": u"\u2310",
"bopf;": u"\U0001d553",
"bot;": u"\u22a5",
"bottom;": u"\u22a5",
"bowtie;": u"\u22c8",
"boxDL;": u"\u2557",
"boxDR;": u"\u2554",
"boxDl;": u"\u2556",
"boxDr;": u"\u2553",
"boxH;": u"\u2550",
"boxHD;": u"\u2566",
"boxHU;": u"\u2569",
"boxHd;": u"\u2564",
"boxHu;": u"\u2567",
"boxUL;": u"\u255d",
"boxUR;": u"\u255a",
"boxUl;": u"\u255c",
"boxUr;": u"\u2559",
"boxV;": u"\u2551",
"boxVH;": u"\u256c",
"boxVL;": u"\u2563",
"boxVR;": u"\u2560",
"boxVh;": u"\u256b",
"boxVl;": u"\u2562",
"boxVr;": u"\u255f",
"boxbox;": u"\u29c9",
"boxdL;": u"\u2555",
"boxdR;": u"\u2552",
"boxdl;": u"\u2510",
"boxdr;": u"\u250c",
"boxh;": u"\u2500",
"boxhD;": u"\u2565",
"boxhU;": u"\u2568",
"boxhd;": u"\u252c",
"boxhu;": u"\u2534",
"boxminus;": u"\u229f",
"boxplus;": u"\u229e",
"boxtimes;": u"\u22a0",
"boxuL;": u"\u255b",
"boxuR;": u"\u2558",
"boxul;": u"\u2518",
"boxur;": u"\u2514",
"boxv;": u"\u2502",
"boxvH;": u"\u256a",
"boxvL;": u"\u2561",
"boxvR;": u"\u255e",
"boxvh;": u"\u253c",
"boxvl;": u"\u2524",
"boxvr;": u"\u251c",
"bprime;": u"\u2035",
"breve;": u"\u02d8",
"brvbar": u"\xa6",
"brvbar;": u"\xa6",
"bscr;": u"\U0001d4b7",
"bsemi;": u"\u204f",
"bsim;": u"\u223d",
"bsime;": u"\u22cd",
"bsol;": u"\\",
"bsolb;": u"\u29c5",
"bsolhsub;": u"\u27c8",
"bull;": u"\u2022",
"bullet;": u"\u2022",
"bump;": u"\u224e",
"bumpE;": u"\u2aae",
"bumpe;": u"\u224f",
"bumpeq;": u"\u224f",
"cacute;": u"\u0107",
"cap;": u"\u2229",
"capand;": u"\u2a44",
"capbrcup;": u"\u2a49",
"capcap;": u"\u2a4b",
"capcup;": u"\u2a47",
"capdot;": u"\u2a40",
"caps;": u"\u2229\ufe00",
"caret;": u"\u2041",
"caron;": u"\u02c7",
"ccaps;": u"\u2a4d",
"ccaron;": u"\u010d",
"ccedil": u"\xe7",
"ccedil;": u"\xe7",
"ccirc;": u"\u0109",
"ccups;": u"\u2a4c",
"ccupssm;": u"\u2a50",
"cdot;": u"\u010b",
"cedil": u"\xb8",
"cedil;": u"\xb8",
"cemptyv;": u"\u29b2",
"cent": u"\xa2",
"cent;": u"\xa2",
"centerdot;": u"\xb7",
"cfr;": u"\U0001d520",
"chcy;": u"\u0447",
"check;": u"\u2713",
"checkmark;": u"\u2713",
"chi;": u"\u03c7",
"cir;": u"\u25cb",
"cirE;": u"\u29c3",
"circ;": u"\u02c6",
"circeq;": u"\u2257",
"circlearrowleft;": u"\u21ba",
"circlearrowright;": u"\u21bb",
"circledR;": u"\xae",
"circledS;": u"\u24c8",
"circledast;": u"\u229b",
"circledcirc;": u"\u229a",
"circleddash;": u"\u229d",
"cire;": u"\u2257",
"cirfnint;": u"\u2a10",
"cirmid;": u"\u2aef",
"cirscir;": u"\u29c2",
"clubs;": u"\u2663",
"clubsuit;": u"\u2663",
"colon;": u":",
"colone;": u"\u2254",
"coloneq;": u"\u2254",
"comma;": u",",
"commat;": u"@",
"comp;": u"\u2201",
"compfn;": u"\u2218",
"complement;": u"\u2201",
"complexes;": u"\u2102",
"cong;": u"\u2245",
"congdot;": u"\u2a6d",
"conint;": u"\u222e",
"copf;": u"\U0001d554",
"coprod;": u"\u2210",
"copy": u"\xa9",
"copy;": u"\xa9",
"copysr;": u"\u2117",
"crarr;": u"\u21b5",
"cross;": u"\u2717",
"cscr;": u"\U0001d4b8",
"csub;": u"\u2acf",
"csube;": u"\u2ad1",
"csup;": u"\u2ad0",
"csupe;": u"\u2ad2",
"ctdot;": u"\u22ef",
"cudarrl;": u"\u2938",
"cudarrr;": u"\u2935",
"cuepr;": u"\u22de",
"cuesc;": u"\u22df",
"cularr;": u"\u21b6",
"cularrp;": u"\u293d",
"cup;": u"\u222a",
"cupbrcap;": u"\u2a48",
"cupcap;": u"\u2a46",
"cupcup;": u"\u2a4a",
"cupdot;": u"\u228d",
"cupor;": u"\u2a45",
"cups;": u"\u222a\ufe00",
"curarr;": u"\u21b7",
"curarrm;": u"\u293c",
"curlyeqprec;": u"\u22de",
"curlyeqsucc;": u"\u22df",
"curlyvee;": u"\u22ce",
"curlywedge;": u"\u22cf",
"curren": u"\xa4",
"curren;": u"\xa4",
"curvearrowleft;": u"\u21b6",
"curvearrowright;": u"\u21b7",
"cuvee;": u"\u22ce",
"cuwed;": u"\u22cf",
"cwconint;": u"\u2232",
"cwint;": u"\u2231",
"cylcty;": u"\u232d",
"dArr;": u"\u21d3",
"dHar;": u"\u2965",
"dagger;": u"\u2020",
"daleth;": u"\u2138",
"darr;": u"\u2193",
"dash;": u"\u2010",
"dashv;": u"\u22a3",
"dbkarow;": u"\u290f",
"dblac;": u"\u02dd",
"dcaron;": u"\u010f",
"dcy;": u"\u0434",
"dd;": u"\u2146",
"ddagger;": u"\u2021",
"ddarr;": u"\u21ca",
"ddotseq;": u"\u2a77",
"deg": u"\xb0",
"deg;": u"\xb0",
"delta;": u"\u03b4",
"demptyv;": u"\u29b1",
"dfisht;": u"\u297f",
"dfr;": u"\U0001d521",
"dharl;": u"\u21c3",
"dharr;": u"\u21c2",
"diam;": u"\u22c4",
"diamond;": u"\u22c4",
"diamondsuit;": u"\u2666",
"diams;": u"\u2666",
"die;": u"\xa8",
"digamma;": u"\u03dd",
"disin;": u"\u22f2",
"div;": u"\xf7",
"divide": u"\xf7",
"divide;": u"\xf7",
"divideontimes;": u"\u22c7",
"divonx;": u"\u22c7",
"djcy;": u"\u0452",
"dlcorn;": u"\u231e",
"dlcrop;": u"\u230d",
"dollar;": u"$",
"dopf;": u"\U0001d555",
"dot;": u"\u02d9",
"doteq;": u"\u2250",
"doteqdot;": u"\u2251",
"dotminus;": u"\u2238",
"dotplus;": u"\u2214",
"dotsquare;": u"\u22a1",
"doublebarwedge;": u"\u2306",
"downarrow;": u"\u2193",
"downdownarrows;": u"\u21ca",
"downharpoonleft;": u"\u21c3",
"downharpoonright;": u"\u21c2",
"drbkarow;": u"\u2910",
"drcorn;": u"\u231f",
"drcrop;": u"\u230c",
"dscr;": u"\U0001d4b9",
"dscy;": u"\u0455",
"dsol;": u"\u29f6",
"dstrok;": u"\u0111",
"dtdot;": u"\u22f1",
"dtri;": u"\u25bf",
"dtrif;": u"\u25be",
"duarr;": u"\u21f5",
"duhar;": u"\u296f",
"dwangle;": u"\u29a6",
"dzcy;": u"\u045f",
"dzigrarr;": u"\u27ff",
"eDDot;": u"\u2a77",
"eDot;": u"\u2251",
"eacute": u"\xe9",
"eacute;": u"\xe9",
"easter;": u"\u2a6e",
"ecaron;": u"\u011b",
"ecir;": u"\u2256",
"ecirc": u"\xea",
"ecirc;": u"\xea",
"ecolon;": u"\u2255",
"ecy;": u"\u044d",
"edot;": u"\u0117",
"ee;": u"\u2147",
"efDot;": u"\u2252",
"efr;": u"\U0001d522",
"eg;": u"\u2a9a",
"egrave": u"\xe8",
"egrave;": u"\xe8",
"egs;": u"\u2a96",
"egsdot;": u"\u2a98",
"el;": u"\u2a99",
"elinters;": u"\u23e7",
"ell;": u"\u2113",
"els;": u"\u2a95",
"elsdot;": u"\u2a97",
"emacr;": u"\u0113",
"empty;": u"\u2205",
"emptyset;": u"\u2205",
"emptyv;": u"\u2205",
"emsp13;": u"\u2004",
"emsp14;": u"\u2005",
"emsp;": u"\u2003",
"eng;": u"\u014b",
"ensp;": u"\u2002",
"eogon;": u"\u0119",
"eopf;": u"\U0001d556",
"epar;": u"\u22d5",
"eparsl;": u"\u29e3",
"eplus;": u"\u2a71",
"epsi;": u"\u03b5",
"epsilon;": u"\u03b5",
"epsiv;": u"\u03f5",
"eqcirc;": u"\u2256",
"eqcolon;": u"\u2255",
"eqsim;": u"\u2242",
"eqslantgtr;": u"\u2a96",
"eqslantless;": u"\u2a95",
"equals;": u"=",
"equest;": u"\u225f",
"equiv;": u"\u2261",
"equivDD;": u"\u2a78",
"eqvparsl;": u"\u29e5",
"erDot;": u"\u2253",
"erarr;": u"\u2971",
"escr;": u"\u212f",
"esdot;": u"\u2250",
"esim;": u"\u2242",
"eta;": u"\u03b7",
"eth": u"\xf0",
"eth;": u"\xf0",
"euml": u"\xeb",
"euml;": u"\xeb",
"euro;": u"\u20ac",
"excl;": u"!",
"exist;": u"\u2203",
"expectation;": u"\u2130",
"exponentiale;": u"\u2147",
"fallingdotseq;": u"\u2252",
"fcy;": u"\u0444",
"female;": u"\u2640",
"ffilig;": u"\ufb03",
"fflig;": u"\ufb00",
"ffllig;": u"\ufb04",
"ffr;": u"\U0001d523",
"filig;": u"\ufb01",
"fjlig;": u"fj",
"flat;": u"\u266d",
"fllig;": u"\ufb02",
"fltns;": u"\u25b1",
"fnof;": u"\u0192",
"fopf;": u"\U0001d557",
"forall;": u"\u2200",
"fork;": u"\u22d4",
"forkv;": u"\u2ad9",
"fpartint;": u"\u2a0d",
"frac12": u"\xbd",
"frac12;": u"\xbd",
"frac13;": u"\u2153",
"frac14": u"\xbc",
"frac14;": u"\xbc",
"frac15;": u"\u2155",
"frac16;": u"\u2159",
"frac18;": u"\u215b",
"frac23;": u"\u2154",
"frac25;": u"\u2156",
"frac34": u"\xbe",
"frac34;": u"\xbe",
"frac35;": u"\u2157",
"frac38;": u"\u215c",
"frac45;": u"\u2158",
"frac56;": u"\u215a",
"frac58;": u"\u215d",
"frac78;": u"\u215e",
"frasl;": u"\u2044",
"frown;": u"\u2322",
"fscr;": u"\U0001d4bb",
"gE;": u"\u2267",
"gEl;": u"\u2a8c",
"gacute;": u"\u01f5",
"gamma;": u"\u03b3",
"gammad;": u"\u03dd",
"gap;": u"\u2a86",
"gbreve;": u"\u011f",
"gcirc;": u"\u011d",
"gcy;": u"\u0433",
"gdot;": u"\u0121",
"ge;": u"\u2265",
"gel;": u"\u22db",
"geq;": u"\u2265",
"geqq;": u"\u2267",
"geqslant;": u"\u2a7e",
"ges;": u"\u2a7e",
"gescc;": u"\u2aa9",
"gesdot;": u"\u2a80",
"gesdoto;": u"\u2a82",
"gesdotol;": u"\u2a84",
"gesl;": u"\u22db\ufe00",
"gesles;": u"\u2a94",
"gfr;": u"\U0001d524",
"gg;": u"\u226b",
"ggg;": u"\u22d9",
"gimel;": u"\u2137",
"gjcy;": u"\u0453",
"gl;": u"\u2277",
"glE;": u"\u2a92",
"gla;": u"\u2aa5",
"glj;": u"\u2aa4",
"gnE;": u"\u2269",
"gnap;": u"\u2a8a",
"gnapprox;": u"\u2a8a",
"gne;": u"\u2a88",
"gneq;": u"\u2a88",
"gneqq;": u"\u2269",
"gnsim;": u"\u22e7",
"gopf;": u"\U0001d558",
"grave;": u"`",
"gscr;": u"\u210a",
"gsim;": u"\u2273",
"gsime;": u"\u2a8e",
"gsiml;": u"\u2a90",
"gt": u">",
"gt;": u">",
"gtcc;": u"\u2aa7",
"gtcir;": u"\u2a7a",
"gtdot;": u"\u22d7",
"gtlPar;": u"\u2995",
"gtquest;": u"\u2a7c",
"gtrapprox;": u"\u2a86",
"gtrarr;": u"\u2978",
"gtrdot;": u"\u22d7",
"gtreqless;": u"\u22db",
"gtreqqless;": u"\u2a8c",
"gtrless;": u"\u2277",
"gtrsim;": u"\u2273",
"gvertneqq;": u"\u2269\ufe00",
"gvnE;": u"\u2269\ufe00",
"hArr;": u"\u21d4",
"hairsp;": u"\u200a",
"half;": u"\xbd",
"hamilt;": u"\u210b",
"hardcy;": u"\u044a",
"harr;": u"\u2194",
"harrcir;": u"\u2948",
"harrw;": u"\u21ad",
"hbar;": u"\u210f",
"hcirc;": u"\u0125",
"hearts;": u"\u2665",
"heartsuit;": u"\u2665",
"hellip;": u"\u2026",
"hercon;": u"\u22b9",
"hfr;": u"\U0001d525",
"hksearow;": u"\u2925",
"hkswarow;": u"\u2926",
"hoarr;": u"\u21ff",
"homtht;": u"\u223b",
"hookleftarrow;": u"\u21a9",
"hookrightarrow;": u"\u21aa",
"hopf;": u"\U0001d559",
"horbar;": u"\u2015",
"hscr;": u"\U0001d4bd",
"hslash;": u"\u210f",
"hstrok;": u"\u0127",
"hybull;": u"\u2043",
"hyphen;": u"\u2010",
"iacute": u"\xed",
"iacute;": u"\xed",
"ic;": u"\u2063",
"icirc": u"\xee",
"icirc;": u"\xee",
"icy;": u"\u0438",
"iecy;": u"\u0435",
"iexcl": u"\xa1",
"iexcl;": u"\xa1",
"iff;": u"\u21d4",
"ifr;": u"\U0001d526",
"igrave": u"\xec",
"igrave;": u"\xec",
"ii;": u"\u2148",
"iiiint;": u"\u2a0c",
"iiint;": u"\u222d",
"iinfin;": u"\u29dc",
"iiota;": u"\u2129",
"ijlig;": u"\u0133",
"imacr;": u"\u012b",
"image;": u"\u2111",
"imagline;": u"\u2110",
"imagpart;": u"\u2111",
"imath;": u"\u0131",
"imof;": u"\u22b7",
"imped;": u"\u01b5",
"in;": u"\u2208",
"incare;": u"\u2105",
"infin;": u"\u221e",
"infintie;": u"\u29dd",
"inodot;": u"\u0131",
"int;": u"\u222b",
"intcal;": u"\u22ba",
"integers;": u"\u2124",
"intercal;": u"\u22ba",
"intlarhk;": u"\u2a17",
"intprod;": u"\u2a3c",
"iocy;": u"\u0451",
"iogon;": u"\u012f",
"iopf;": u"\U0001d55a",
"iota;": u"\u03b9",
"iprod;": u"\u2a3c",
"iquest": u"\xbf",
"iquest;": u"\xbf",
"iscr;": u"\U0001d4be",
"isin;": u"\u2208",
"isinE;": u"\u22f9",
"isindot;": u"\u22f5",
"isins;": u"\u22f4",
"isinsv;": u"\u22f3",
"isinv;": u"\u2208",
"it;": u"\u2062",
"itilde;": u"\u0129",
"iukcy;": u"\u0456",
"iuml": u"\xef",
"iuml;": u"\xef",
"jcirc;": u"\u0135",
"jcy;": u"\u0439",
"jfr;": u"\U0001d527",
"jmath;": u"\u0237",
"jopf;": u"\U0001d55b",
"jscr;": u"\U0001d4bf",
"jsercy;": u"\u0458",
"jukcy;": u"\u0454",
"kappa;": u"\u03ba",
"kappav;": u"\u03f0",
"kcedil;": u"\u0137",
"kcy;": u"\u043a",
"kfr;": u"\U0001d528",
"kgreen;": u"\u0138",
"khcy;": u"\u0445",
"kjcy;": u"\u045c",
"kopf;": u"\U0001d55c",
"kscr;": u"\U0001d4c0",
"lAarr;": u"\u21da",
"lArr;": u"\u21d0",
"lAtail;": u"\u291b",
"lBarr;": u"\u290e",
"lE;": u"\u2266",
"lEg;": u"\u2a8b",
"lHar;": u"\u2962",
"lacute;": u"\u013a",
"laemptyv;": u"\u29b4",
"lagran;": u"\u2112",
"lambda;": u"\u03bb",
"lang;": u"\u27e8",
"langd;": u"\u2991",
"langle;": u"\u27e8",
"lap;": u"\u2a85",
"laquo": u"\xab",
"laquo;": u"\xab",
"larr;": u"\u2190",
"larrb;": u"\u21e4",
"larrbfs;": u"\u291f",
"larrfs;": u"\u291d",
"larrhk;": u"\u21a9",
"larrlp;": u"\u21ab",
"larrpl;": u"\u2939",
"larrsim;": u"\u2973",
"larrtl;": u"\u21a2",
"lat;": u"\u2aab",
"latail;": u"\u2919",
"late;": u"\u2aad",
"lates;": u"\u2aad\ufe00",
"lbarr;": u"\u290c",
"lbbrk;": u"\u2772",
"lbrace;": u"{",
"lbrack;": u"[",
"lbrke;": u"\u298b",
"lbrksld;": u"\u298f",
"lbrkslu;": u"\u298d",
"lcaron;": u"\u013e",
"lcedil;": u"\u013c",
"lceil;": u"\u2308",
"lcub;": u"{",
"lcy;": u"\u043b",
"ldca;": u"\u2936",
"ldquo;": u"\u201c",
"ldquor;": u"\u201e",
"ldrdhar;": u"\u2967",
"ldrushar;": u"\u294b",
"ldsh;": u"\u21b2",
"le;": u"\u2264",
"leftarrow;": u"\u2190",
"leftarrowtail;": u"\u21a2",
"leftharpoondown;": u"\u21bd",
"leftharpoonup;": u"\u21bc",
"leftleftarrows;": u"\u21c7",
"leftrightarrow;": u"\u2194",
"leftrightarrows;": u"\u21c6",
"leftrightharpoons;": u"\u21cb",
"leftrightsquigarrow;": u"\u21ad",
"leftthreetimes;": u"\u22cb",
"leg;": u"\u22da",
"leq;": u"\u2264",
"leqq;": u"\u2266",
"leqslant;": u"\u2a7d",
"les;": u"\u2a7d",
"lescc;": u"\u2aa8",
"lesdot;": u"\u2a7f",
"lesdoto;": u"\u2a81",
"lesdotor;": u"\u2a83",
"lesg;": u"\u22da\ufe00",
"lesges;": u"\u2a93",
"lessapprox;": u"\u2a85",
"lessdot;": u"\u22d6",
"lesseqgtr;": u"\u22da",
"lesseqqgtr;": u"\u2a8b",
"lessgtr;": u"\u2276",
"lesssim;": u"\u2272",
"lfisht;": u"\u297c",
"lfloor;": u"\u230a",
"lfr;": u"\U0001d529",
"lg;": u"\u2276",
"lgE;": u"\u2a91",
"lhard;": u"\u21bd",
"lharu;": u"\u21bc",
"lharul;": u"\u296a",
"lhblk;": u"\u2584",
"ljcy;": u"\u0459",
"ll;": u"\u226a",
"llarr;": u"\u21c7",
"llcorner;": u"\u231e",
"llhard;": u"\u296b",
"lltri;": u"\u25fa",
"lmidot;": u"\u0140",
"lmoust;": u"\u23b0",
"lmoustache;": u"\u23b0",
"lnE;": u"\u2268",
"lnap;": u"\u2a89",
"lnapprox;": u"\u2a89",
"lne;": u"\u2a87",
"lneq;": u"\u2a87",
"lneqq;": u"\u2268",
"lnsim;": u"\u22e6",
"loang;": u"\u27ec",
"loarr;": u"\u21fd",
"lobrk;": u"\u27e6",
"longleftarrow;": u"\u27f5",
"longleftrightarrow;": u"\u27f7",
"longmapsto;": u"\u27fc",
"longrightarrow;": u"\u27f6",
"looparrowleft;": u"\u21ab",
"looparrowright;": u"\u21ac",
"lopar;": u"\u2985",
"lopf;": u"\U0001d55d",
"loplus;": u"\u2a2d",
"lotimes;": u"\u2a34",
"lowast;": u"\u2217",
"lowbar;": u"_",
"loz;": u"\u25ca",
"lozenge;": u"\u25ca",
"lozf;": u"\u29eb",
"lpar;": u"(",
"lparlt;": u"\u2993",
"lrarr;": u"\u21c6",
"lrcorner;": u"\u231f",
"lrhar;": u"\u21cb",
"lrhard;": u"\u296d",
"lrm;": u"\u200e",
"lrtri;": u"\u22bf",
"lsaquo;": u"\u2039",
"lscr;": u"\U0001d4c1",
"lsh;": u"\u21b0",
"lsim;": u"\u2272",
"lsime;": u"\u2a8d",
"lsimg;": u"\u2a8f",
"lsqb;": u"[",
"lsquo;": u"\u2018",
"lsquor;": u"\u201a",
"lstrok;": u"\u0142",
"lt": u"<",
"lt;": u"<",
"ltcc;": u"\u2aa6",
"ltcir;": u"\u2a79",
"ltdot;": u"\u22d6",
"lthree;": u"\u22cb",
"ltimes;": u"\u22c9",
"ltlarr;": u"\u2976",
"ltquest;": u"\u2a7b",
"ltrPar;": u"\u2996",
"ltri;": u"\u25c3",
"ltrie;": u"\u22b4",
"ltrif;": u"\u25c2",
"lurdshar;": u"\u294a",
"luruhar;": u"\u2966",
"lvertneqq;": u"\u2268\ufe00",
"lvnE;": u"\u2268\ufe00",
"mDDot;": u"\u223a",
"macr": u"\xaf",
"macr;": u"\xaf",
"male;": u"\u2642",
"malt;": u"\u2720",
"maltese;": u"\u2720",
"map;": u"\u21a6",
"mapsto;": u"\u21a6",
"mapstodown;": u"\u21a7",
"mapstoleft;": u"\u21a4",
"mapstoup;": u"\u21a5",
"marker;": u"\u25ae",
"mcomma;": u"\u2a29",
"mcy;": u"\u043c",
"mdash;": u"\u2014",
"measuredangle;": u"\u2221",
"mfr;": u"\U0001d52a",
"mho;": u"\u2127",
"micro": u"\xb5",
"micro;": u"\xb5",
"mid;": u"\u2223",
"midast;": u"*",
"midcir;": u"\u2af0",
"middot": u"\xb7",
"middot;": u"\xb7",
"minus;": u"\u2212",
"minusb;": u"\u229f",
"minusd;": u"\u2238",
"minusdu;": u"\u2a2a",
"mlcp;": u"\u2adb",
"mldr;": u"\u2026",
"mnplus;": u"\u2213",
"models;": u"\u22a7",
"mopf;": u"\U0001d55e",
"mp;": u"\u2213",
"mscr;": u"\U0001d4c2",
"mstpos;": u"\u223e",
"mu;": u"\u03bc",
"multimap;": u"\u22b8",
"mumap;": u"\u22b8",
"nGg;": u"\u22d9\u0338",
"nGt;": u"\u226b\u20d2",
"nGtv;": u"\u226b\u0338",
"nLeftarrow;": u"\u21cd",
"nLeftrightarrow;": u"\u21ce",
"nLl;": u"\u22d8\u0338",
"nLt;": u"\u226a\u20d2",
"nLtv;": u"\u226a\u0338",
"nRightarrow;": u"\u21cf",
"nVDash;": u"\u22af",
"nVdash;": u"\u22ae",
"nabla;": u"\u2207",
"nacute;": u"\u0144",
"nang;": u"\u2220\u20d2",
"nap;": u"\u2249",
"napE;": u"\u2a70\u0338",
"napid;": u"\u224b\u0338",
"napos;": u"\u0149",
"napprox;": u"\u2249",
"natur;": u"\u266e",
"natural;": u"\u266e",
"naturals;": u"\u2115",
"nbsp": u"\xa0",
"nbsp;": u"\xa0",
"nbump;": u"\u224e\u0338",
"nbumpe;": u"\u224f\u0338",
"ncap;": u"\u2a43",
"ncaron;": u"\u0148",
"ncedil;": u"\u0146",
"ncong;": u"\u2247",
"ncongdot;": u"\u2a6d\u0338",
"ncup;": u"\u2a42",
"ncy;": u"\u043d",
"ndash;": u"\u2013",
"ne;": u"\u2260",
"neArr;": u"\u21d7",
"nearhk;": u"\u2924",
"nearr;": u"\u2197",
"nearrow;": u"\u2197",
"nedot;": u"\u2250\u0338",
"nequiv;": u"\u2262",
"nesear;": u"\u2928",
"nesim;": u"\u2242\u0338",
"nexist;": u"\u2204",
"nexists;": u"\u2204",
"nfr;": u"\U0001d52b",
"ngE;": u"\u2267\u0338",
"nge;": u"\u2271",
"ngeq;": u"\u2271",
"ngeqq;": u"\u2267\u0338",
"ngeqslant;": u"\u2a7e\u0338",
"nges;": u"\u2a7e\u0338",
"ngsim;": u"\u2275",
"ngt;": u"\u226f",
"ngtr;": u"\u226f",
"nhArr;": u"\u21ce",
"nharr;": u"\u21ae",
"nhpar;": u"\u2af2",
"ni;": u"\u220b",
"nis;": u"\u22fc",
"nisd;": u"\u22fa",
"niv;": u"\u220b",
"njcy;": u"\u045a",
"nlArr;": u"\u21cd",
"nlE;": u"\u2266\u0338",
"nlarr;": u"\u219a",
"nldr;": u"\u2025",
"nle;": u"\u2270",
"nleftarrow;": u"\u219a",
"nleftrightarrow;": u"\u21ae",
"nleq;": u"\u2270",
"nleqq;": u"\u2266\u0338",
"nleqslant;": u"\u2a7d\u0338",
"nles;": u"\u2a7d\u0338",
"nless;": u"\u226e",
"nlsim;": u"\u2274",
"nlt;": u"\u226e",
"nltri;": u"\u22ea",
"nltrie;": u"\u22ec",
"nmid;": u"\u2224",
"nopf;": u"\U0001d55f",
"not": u"\xac",
"not;": u"\xac",
"notin;": u"\u2209",
"notinE;": u"\u22f9\u0338",
"notindot;": u"\u22f5\u0338",
"notinva;": u"\u2209",
"notinvb;": u"\u22f7",
"notinvc;": u"\u22f6",
"notni;": u"\u220c",
"notniva;": u"\u220c",
"notnivb;": u"\u22fe",
"notnivc;": u"\u22fd",
"npar;": u"\u2226",
"nparallel;": u"\u2226",
"nparsl;": u"\u2afd\u20e5",
"npart;": u"\u2202\u0338",
"npolint;": u"\u2a14",
"npr;": u"\u2280",
"nprcue;": u"\u22e0",
"npre;": u"\u2aaf\u0338",
"nprec;": u"\u2280",
"npreceq;": u"\u2aaf\u0338",
"nrArr;": u"\u21cf",
"nrarr;": u"\u219b",
"nrarrc;": u"\u2933\u0338",
"nrarrw;": u"\u219d\u0338",
"nrightarrow;": u"\u219b",
"nrtri;": u"\u22eb",
"nrtrie;": u"\u22ed",
"nsc;": u"\u2281",
"nsccue;": u"\u22e1",
"nsce;": u"\u2ab0\u0338",
"nscr;": u"\U0001d4c3",
"nshortmid;": u"\u2224",
"nshortparallel;": u"\u2226",
"nsim;": u"\u2241",
"nsime;": u"\u2244",
"nsimeq;": u"\u2244",
"nsmid;": u"\u2224",
"nspar;": u"\u2226",
"nsqsube;": u"\u22e2",
"nsqsupe;": u"\u22e3",
"nsub;": u"\u2284",
"nsubE;": u"\u2ac5\u0338",
"nsube;": u"\u2288",
"nsubset;": u"\u2282\u20d2",
"nsubseteq;": u"\u2288",
"nsubseteqq;": u"\u2ac5\u0338",
"nsucc;": u"\u2281",
"nsucceq;": u"\u2ab0\u0338",
"nsup;": u"\u2285",
"nsupE;": u"\u2ac6\u0338",
"nsupe;": u"\u2289",
"nsupset;": u"\u2283\u20d2",
"nsupseteq;": u"\u2289",
"nsupseteqq;": u"\u2ac6\u0338",
"ntgl;": u"\u2279",
"ntilde": u"\xf1",
"ntilde;": u"\xf1",
"ntlg;": u"\u2278",
"ntriangleleft;": u"\u22ea",
"ntrianglelefteq;": u"\u22ec",
"ntriangleright;": u"\u22eb",
"ntrianglerighteq;": u"\u22ed",
"nu;": u"\u03bd",
"num;": u"#",
"numero;": u"\u2116",
"numsp;": u"\u2007",
"nvDash;": u"\u22ad",
"nvHarr;": u"\u2904",
"nvap;": u"\u224d\u20d2",
"nvdash;": u"\u22ac",
"nvge;": u"\u2265\u20d2",
"nvgt;": u">\u20d2",
"nvinfin;": u"\u29de",
"nvlArr;": u"\u2902",
"nvle;": u"\u2264\u20d2",
"nvlt;": u"<\u20d2",
"nvltrie;": u"\u22b4\u20d2",
"nvrArr;": u"\u2903",
"nvrtrie;": u"\u22b5\u20d2",
"nvsim;": u"\u223c\u20d2",
"nwArr;": u"\u21d6",
"nwarhk;": u"\u2923",
"nwarr;": u"\u2196",
"nwarrow;": u"\u2196",
"nwnear;": u"\u2927",
"oS;": u"\u24c8",
"oacute": u"\xf3",
"oacute;": u"\xf3",
"oast;": u"\u229b",
"ocir;": u"\u229a",
"ocirc": u"\xf4",
"ocirc;": u"\xf4",
"ocy;": u"\u043e",
"odash;": u"\u229d",
"odblac;": u"\u0151",
"odiv;": u"\u2a38",
"odot;": u"\u2299",
"odsold;": u"\u29bc",
"oelig;": u"\u0153",
"ofcir;": u"\u29bf",
"ofr;": u"\U0001d52c",
"ogon;": u"\u02db",
"ograve": u"\xf2",
"ograve;": u"\xf2",
"ogt;": u"\u29c1",
"ohbar;": u"\u29b5",
"ohm;": u"\u03a9",
"oint;": u"\u222e",
"olarr;": u"\u21ba",
"olcir;": u"\u29be",
"olcross;": u"\u29bb",
"oline;": u"\u203e",
"olt;": u"\u29c0",
"omacr;": u"\u014d",
"omega;": u"\u03c9",
"omicron;": u"\u03bf",
"omid;": u"\u29b6",
"ominus;": u"\u2296",
"oopf;": u"\U0001d560",
"opar;": u"\u29b7",
"operp;": u"\u29b9",
"oplus;": u"\u2295",
"or;": u"\u2228",
"orarr;": u"\u21bb",
"ord;": u"\u2a5d",
"order;": u"\u2134",
"orderof;": u"\u2134",
"ordf": u"\xaa",
"ordf;": u"\xaa",
"ordm": u"\xba",
"ordm;": u"\xba",
"origof;": u"\u22b6",
"oror;": u"\u2a56",
"orslope;": u"\u2a57",
"orv;": u"\u2a5b",
"oscr;": u"\u2134",
"oslash": u"\xf8",
"oslash;": u"\xf8",
"osol;": u"\u2298",
"otilde": u"\xf5",
"otilde;": u"\xf5",
"otimes;": u"\u2297",
"otimesas;": u"\u2a36",
"ouml": u"\xf6",
"ouml;": u"\xf6",
"ovbar;": u"\u233d",
"par;": u"\u2225",
"para": u"\xb6",
"para;": u"\xb6",
"parallel;": u"\u2225",
"parsim;": u"\u2af3",
"parsl;": u"\u2afd",
"part;": u"\u2202",
"pcy;": u"\u043f",
"percnt;": u"%",
"period;": u".",
"permil;": u"\u2030",
"perp;": u"\u22a5",
"pertenk;": u"\u2031",
"pfr;": u"\U0001d52d",
"phi;": u"\u03c6",
"phiv;": u"\u03d5",
"phmmat;": u"\u2133",
"phone;": u"\u260e",
"pi;": u"\u03c0",
"pitchfork;": u"\u22d4",
"piv;": u"\u03d6",
"planck;": u"\u210f",
"planckh;": u"\u210e",
"plankv;": u"\u210f",
"plus;": u"+",
"plusacir;": u"\u2a23",
"plusb;": u"\u229e",
"pluscir;": u"\u2a22",
"plusdo;": u"\u2214",
"plusdu;": u"\u2a25",
"pluse;": u"\u2a72",
"plusmn": u"\xb1",
"plusmn;": u"\xb1",
"plussim;": u"\u2a26",
"plustwo;": u"\u2a27",
"pm;": u"\xb1",
"pointint;": u"\u2a15",
"popf;": u"\U0001d561",
"pound": u"\xa3",
"pound;": u"\xa3",
"pr;": u"\u227a",
"prE;": u"\u2ab3",
"prap;": u"\u2ab7",
"prcue;": u"\u227c",
"pre;": u"\u2aaf",
"prec;": u"\u227a",
"precapprox;": u"\u2ab7",
"preccurlyeq;": u"\u227c",
"preceq;": u"\u2aaf",
"precnapprox;": u"\u2ab9",
"precneqq;": u"\u2ab5",
"precnsim;": u"\u22e8",
"precsim;": u"\u227e",
"prime;": u"\u2032",
"primes;": u"\u2119",
"prnE;": u"\u2ab5",
"prnap;": u"\u2ab9",
"prnsim;": u"\u22e8",
"prod;": u"\u220f",
"profalar;": u"\u232e",
"profline;": u"\u2312",
"profsurf;": u"\u2313",
"prop;": u"\u221d",
"propto;": u"\u221d",
"prsim;": u"\u227e",
"prurel;": u"\u22b0",
"pscr;": u"\U0001d4c5",
"psi;": u"\u03c8",
"puncsp;": u"\u2008",
"qfr;": u"\U0001d52e",
"qint;": u"\u2a0c",
"qopf;": u"\U0001d562",
"qprime;": u"\u2057",
"qscr;": u"\U0001d4c6",
"quaternions;": u"\u210d",
"quatint;": u"\u2a16",
"quest;": u"?",
"questeq;": u"\u225f",
"quot": u"\"",
"quot;": u"\"",
"rAarr;": u"\u21db",
"rArr;": u"\u21d2",
"rAtail;": u"\u291c",
"rBarr;": u"\u290f",
"rHar;": u"\u2964",
"race;": u"\u223d\u0331",
"racute;": u"\u0155",
"radic;": u"\u221a",
"raemptyv;": u"\u29b3",
"rang;": u"\u27e9",
"rangd;": u"\u2992",
"range;": u"\u29a5",
"rangle;": u"\u27e9",
"raquo": u"\xbb",
"raquo;": u"\xbb",
"rarr;": u"\u2192",
"rarrap;": u"\u2975",
"rarrb;": u"\u21e5",
"rarrbfs;": u"\u2920",
"rarrc;": u"\u2933",
"rarrfs;": u"\u291e",
"rarrhk;": u"\u21aa",
"rarrlp;": u"\u21ac",
"rarrpl;": u"\u2945",
"rarrsim;": u"\u2974",
"rarrtl;": u"\u21a3",
"rarrw;": u"\u219d",
"ratail;": u"\u291a",
"ratio;": u"\u2236",
"rationals;": u"\u211a",
"rbarr;": u"\u290d",
"rbbrk;": u"\u2773",
"rbrace;": u"}",
"rbrack;": u"]",
"rbrke;": u"\u298c",
"rbrksld;": u"\u298e",
"rbrkslu;": u"\u2990",
"rcaron;": u"\u0159",
"rcedil;": u"\u0157",
"rceil;": u"\u2309",
"rcub;": u"}",
"rcy;": u"\u0440",
"rdca;": u"\u2937",
"rdldhar;": u"\u2969",
"rdquo;": u"\u201d",
"rdquor;": u"\u201d",
"rdsh;": u"\u21b3",
"real;": u"\u211c",
"realine;": u"\u211b",
"realpart;": u"\u211c",
"reals;": u"\u211d",
"rect;": u"\u25ad",
"reg": u"\xae",
"reg;": u"\xae",
"rfisht;": u"\u297d",
"rfloor;": u"\u230b",
"rfr;": u"\U0001d52f",
"rhard;": u"\u21c1",
"rharu;": u"\u21c0",
"rharul;": u"\u296c",
"rho;": u"\u03c1",
"rhov;": u"\u03f1",
"rightarrow;": u"\u2192",
"rightarrowtail;": u"\u21a3",
"rightharpoondown;": u"\u21c1",
"rightharpoonup;": u"\u21c0",
"rightleftarrows;": u"\u21c4",
"rightleftharpoons;": u"\u21cc",
"rightrightarrows;": u"\u21c9",
"rightsquigarrow;": u"\u219d",
"rightthreetimes;": u"\u22cc",
"ring;": u"\u02da",
"risingdotseq;": u"\u2253",
"rlarr;": u"\u21c4",
"rlhar;": u"\u21cc",
"rlm;": u"\u200f",
"rmoust;": u"\u23b1",
"rmoustache;": u"\u23b1",
"rnmid;": u"\u2aee",
"roang;": u"\u27ed",
"roarr;": u"\u21fe",
"robrk;": u"\u27e7",
"ropar;": u"\u2986",
"ropf;": u"\U0001d563",
"roplus;": u"\u2a2e",
"rotimes;": u"\u2a35",
"rpar;": u")",
"rpargt;": u"\u2994",
"rppolint;": u"\u2a12",
"rrarr;": u"\u21c9",
"rsaquo;": u"\u203a",
"rscr;": u"\U0001d4c7",
"rsh;": u"\u21b1",
"rsqb;": u"]",
"rsquo;": u"\u2019",
"rsquor;": u"\u2019",
"rthree;": u"\u22cc",
"rtimes;": u"\u22ca",
"rtri;": u"\u25b9",
"rtrie;": u"\u22b5",
"rtrif;": u"\u25b8",
"rtriltri;": u"\u29ce",
"ruluhar;": u"\u2968",
"rx;": u"\u211e",
"sacute;": u"\u015b",
"sbquo;": u"\u201a",
"sc;": u"\u227b",
"scE;": u"\u2ab4",
"scap;": u"\u2ab8",
"scaron;": u"\u0161",
"sccue;": u"\u227d",
"sce;": u"\u2ab0",
"scedil;": u"\u015f",
"scirc;": u"\u015d",
"scnE;": u"\u2ab6",
"scnap;": u"\u2aba",
"scnsim;": u"\u22e9",
"scpolint;": u"\u2a13",
"scsim;": u"\u227f",
"scy;": u"\u0441",
"sdot;": u"\u22c5",
"sdotb;": u"\u22a1",
"sdote;": u"\u2a66",
"seArr;": u"\u21d8",
"searhk;": u"\u2925",
"searr;": u"\u2198",
"searrow;": u"\u2198",
"sect": u"\xa7",
"sect;": u"\xa7",
"semi;": u";",
"seswar;": u"\u2929",
"setminus;": u"\u2216",
"setmn;": u"\u2216",
"sext;": u"\u2736",
"sfr;": u"\U0001d530",
"sfrown;": u"\u2322",
"sharp;": u"\u266f",
"shchcy;": u"\u0449",
"shcy;": u"\u0448",
"shortmid;": u"\u2223",
"shortparallel;": u"\u2225",
"shy": u"\xad",
"shy;": u"\xad",
"sigma;": u"\u03c3",
"sigmaf;": u"\u03c2",
"sigmav;": u"\u03c2",
"sim;": u"\u223c",
"simdot;": u"\u2a6a",
"sime;": u"\u2243",
"simeq;": u"\u2243",
"simg;": u"\u2a9e",
"simgE;": u"\u2aa0",
"siml;": u"\u2a9d",
"simlE;": u"\u2a9f",
"simne;": u"\u2246",
"simplus;": u"\u2a24",
"simrarr;": u"\u2972",
"slarr;": u"\u2190",
"smallsetminus;": u"\u2216",
"smashp;": u"\u2a33",
"smeparsl;": u"\u29e4",
"smid;": u"\u2223",
"smile;": u"\u2323",
"smt;": u"\u2aaa",
"smte;": u"\u2aac",
"smtes;": u"\u2aac\ufe00",
"softcy;": u"\u044c",
"sol;": u"/",
"solb;": u"\u29c4",
"solbar;": u"\u233f",
"sopf;": u"\U0001d564",
"spades;": u"\u2660",
"spadesuit;": u"\u2660",
"spar;": u"\u2225",
"sqcap;": u"\u2293",
"sqcaps;": u"\u2293\ufe00",
"sqcup;": u"\u2294",
"sqcups;": u"\u2294\ufe00",
"sqsub;": u"\u228f",
"sqsube;": u"\u2291",
"sqsubset;": u"\u228f",
"sqsubseteq;": u"\u2291",
"sqsup;": u"\u2290",
"sqsupe;": u"\u2292",
"sqsupset;": u"\u2290",
"sqsupseteq;": u"\u2292",
"squ;": u"\u25a1",
"square;": u"\u25a1",
"squarf;": u"\u25aa",
"squf;": u"\u25aa",
"srarr;": u"\u2192",
"sscr;": u"\U0001d4c8",
"ssetmn;": u"\u2216",
"ssmile;": u"\u2323",
"sstarf;": u"\u22c6",
"star;": u"\u2606",
"starf;": u"\u2605",
"straightepsilon;": u"\u03f5",
"straightphi;": u"\u03d5",
"strns;": u"\xaf",
"sub;": u"\u2282",
"subE;": u"\u2ac5",
"subdot;": u"\u2abd",
"sube;": u"\u2286",
"subedot;": u"\u2ac3",
"submult;": u"\u2ac1",
"subnE;": u"\u2acb",
"subne;": u"\u228a",
"subplus;": u"\u2abf",
"subrarr;": u"\u2979",
"subset;": u"\u2282",
"subseteq;": u"\u2286",
"subseteqq;": u"\u2ac5",
"subsetneq;": u"\u228a",
"subsetneqq;": u"\u2acb",
"subsim;": u"\u2ac7",
"subsub;": u"\u2ad5",
"subsup;": u"\u2ad3",
"succ;": u"\u227b",
"succapprox;": u"\u2ab8",
"succcurlyeq;": u"\u227d",
"succeq;": u"\u2ab0",
"succnapprox;": u"\u2aba",
"succneqq;": u"\u2ab6",
"succnsim;": u"\u22e9",
"succsim;": u"\u227f",
"sum;": u"\u2211",
"sung;": u"\u266a",
"sup1": u"\xb9",
"sup1;": u"\xb9",
"sup2": u"\xb2",
"sup2;": u"\xb2",
"sup3": u"\xb3",
"sup3;": u"\xb3",
"sup;": u"\u2283",
"supE;": u"\u2ac6",
"supdot;": u"\u2abe",
"supdsub;": u"\u2ad8",
"supe;": u"\u2287",
"supedot;": u"\u2ac4",
"suphsol;": u"\u27c9",
"suphsub;": u"\u2ad7",
"suplarr;": u"\u297b",
"supmult;": u"\u2ac2",
"supnE;": u"\u2acc",
"supne;": u"\u228b",
"supplus;": u"\u2ac0",
"supset;": u"\u2283",
"supseteq;": u"\u2287",
"supseteqq;": u"\u2ac6",
"supsetneq;": u"\u228b",
"supsetneqq;": u"\u2acc",
"supsim;": u"\u2ac8",
"supsub;": u"\u2ad4",
"supsup;": u"\u2ad6",
"swArr;": u"\u21d9",
"swarhk;": u"\u2926",
"swarr;": u"\u2199",
"swarrow;": u"\u2199",
"swnwar;": u"\u292a",
"szlig": u"\xdf",
"szlig;": u"\xdf",
"target;": u"\u2316",
"tau;": u"\u03c4",
"tbrk;": u"\u23b4",
"tcaron;": u"\u0165",
"tcedil;": u"\u0163",
"tcy;": u"\u0442",
"tdot;": u"\u20db",
"telrec;": u"\u2315",
"tfr;": u"\U0001d531",
"there4;": u"\u2234",
"therefore;": u"\u2234",
"theta;": u"\u03b8",
"thetasym;": u"\u03d1",
"thetav;": u"\u03d1",
"thickapprox;": u"\u2248",
"thicksim;": u"\u223c",
"thinsp;": u"\u2009",
"thkap;": u"\u2248",
"thksim;": u"\u223c",
"thorn": u"\xfe",
"thorn;": u"\xfe",
"tilde;": u"\u02dc",
"times": u"\xd7",
"times;": u"\xd7",
"timesb;": u"\u22a0",
"timesbar;": u"\u2a31",
"timesd;": u"\u2a30",
"tint;": u"\u222d",
"toea;": u"\u2928",
"top;": u"\u22a4",
"topbot;": u"\u2336",
"topcir;": u"\u2af1",
"topf;": u"\U0001d565",
"topfork;": u"\u2ada",
"tosa;": u"\u2929",
"tprime;": u"\u2034",
"trade;": u"\u2122",
"triangle;": u"\u25b5",
"triangledown;": u"\u25bf",
"triangleleft;": u"\u25c3",
"trianglelefteq;": u"\u22b4",
"triangleq;": u"\u225c",
"triangleright;": u"\u25b9",
"trianglerighteq;": u"\u22b5",
"tridot;": u"\u25ec",
"trie;": u"\u225c",
"triminus;": u"\u2a3a",
"triplus;": u"\u2a39",
"trisb;": u"\u29cd",
"tritime;": u"\u2a3b",
"trpezium;": u"\u23e2",
"tscr;": u"\U0001d4c9",
"tscy;": u"\u0446",
"tshcy;": u"\u045b",
"tstrok;": u"\u0167",
"twixt;": u"\u226c",
"twoheadleftarrow;": u"\u219e",
"twoheadrightarrow;": u"\u21a0",
"uArr;": u"\u21d1",
"uHar;": u"\u2963",
"uacute": u"\xfa",
"uacute;": u"\xfa",
"uarr;": u"\u2191",
"ubrcy;": u"\u045e",
"ubreve;": u"\u016d",
"ucirc": u"\xfb",
"ucirc;": u"\xfb",
"ucy;": u"\u0443",
"udarr;": u"\u21c5",
"udblac;": u"\u0171",
"udhar;": u"\u296e",
"ufisht;": u"\u297e",
"ufr;": u"\U0001d532",
"ugrave": u"\xf9",
"ugrave;": u"\xf9",
"uharl;": u"\u21bf",
"uharr;": u"\u21be",
"uhblk;": u"\u2580",
"ulcorn;": u"\u231c",
"ulcorner;": u"\u231c",
"ulcrop;": u"\u230f",
"ultri;": u"\u25f8",
"umacr;": u"\u016b",
"uml": u"\xa8",
"uml;": u"\xa8",
"uogon;": u"\u0173",
"uopf;": u"\U0001d566",
"uparrow;": u"\u2191",
"updownarrow;": u"\u2195",
"upharpoonleft;": u"\u21bf",
"upharpoonright;": u"\u21be",
"uplus;": u"\u228e",
"upsi;": u"\u03c5",
"upsih;": u"\u03d2",
"upsilon;": u"\u03c5",
"upuparrows;": u"\u21c8",
"urcorn;": u"\u231d",
"urcorner;": u"\u231d",
"urcrop;": u"\u230e",
"uring;": u"\u016f",
"urtri;": u"\u25f9",
"uscr;": u"\U0001d4ca",
"utdot;": u"\u22f0",
"utilde;": u"\u0169",
"utri;": u"\u25b5",
"utrif;": u"\u25b4",
"uuarr;": u"\u21c8",
"uuml": u"\xfc",
"uuml;": u"\xfc",
"uwangle;": u"\u29a7",
"vArr;": u"\u21d5",
"vBar;": u"\u2ae8",
"vBarv;": u"\u2ae9",
"vDash;": u"\u22a8",
"vangrt;": u"\u299c",
"varepsilon;": u"\u03f5",
"varkappa;": u"\u03f0",
"varnothing;": u"\u2205",
"varphi;": u"\u03d5",
"varpi;": u"\u03d6",
"varpropto;": u"\u221d",
"varr;": u"\u2195",
"varrho;": u"\u03f1",
"varsigma;": u"\u03c2",
"varsubsetneq;": u"\u228a\ufe00",
"varsubsetneqq;": u"\u2acb\ufe00",
"varsupsetneq;": u"\u228b\ufe00",
"varsupsetneqq;": u"\u2acc\ufe00",
"vartheta;": u"\u03d1",
"vartriangleleft;": u"\u22b2",
"vartriangleright;": u"\u22b3",
"vcy;": u"\u0432",
"vdash;": u"\u22a2",
"vee;": u"\u2228",
"veebar;": u"\u22bb",
"veeeq;": u"\u225a",
"vellip;": u"\u22ee",
"verbar;": u"|",
"vert;": u"|",
"vfr;": u"\U0001d533",
"vltri;": u"\u22b2",
"vnsub;": u"\u2282\u20d2",
"vnsup;": u"\u2283\u20d2",
"vopf;": u"\U0001d567",
"vprop;": u"\u221d",
"vrtri;": u"\u22b3",
"vscr;": u"\U0001d4cb",
"vsubnE;": u"\u2acb\ufe00",
"vsubne;": u"\u228a\ufe00",
"vsupnE;": u"\u2acc\ufe00",
"vsupne;": u"\u228b\ufe00",
"vzigzag;": u"\u299a",
"wcirc;": u"\u0175",
"wedbar;": u"\u2a5f",
"wedge;": u"\u2227",
"wedgeq;": u"\u2259",
"weierp;": u"\u2118",
"wfr;": u"\U0001d534",
"wopf;": u"\U0001d568",
"wp;": u"\u2118",
"wr;": u"\u2240",
"wreath;": u"\u2240",
"wscr;": u"\U0001d4cc",
"xcap;": u"\u22c2",
"xcirc;": u"\u25ef",
"xcup;": u"\u22c3",
"xdtri;": u"\u25bd",
"xfr;": u"\U0001d535",
"xhArr;": u"\u27fa",
"xharr;": u"\u27f7",
"xi;": u"\u03be",
"xlArr;": u"\u27f8",
"xlarr;": u"\u27f5",
"xmap;": u"\u27fc",
"xnis;": u"\u22fb",
"xodot;": u"\u2a00",
"xopf;": u"\U0001d569",
"xoplus;": u"\u2a01",
"xotime;": u"\u2a02",
"xrArr;": u"\u27f9",
"xrarr;": u"\u27f6",
"xscr;": u"\U0001d4cd",
"xsqcup;": u"\u2a06",
"xuplus;": u"\u2a04",
"xutri;": u"\u25b3",
"xvee;": u"\u22c1",
"xwedge;": u"\u22c0",
"yacute": u"\xfd",
"yacute;": u"\xfd",
"yacy;": u"\u044f",
"ycirc;": u"\u0177",
"ycy;": u"\u044b",
"yen": u"\xa5",
"yen;": u"\xa5",
"yfr;": u"\U0001d536",
"yicy;": u"\u0457",
"yopf;": u"\U0001d56a",
"yscr;": u"\U0001d4ce",
"yucy;": u"\u044e",
"yuml": u"\xff",
"yuml;": u"\xff",
"zacute;": u"\u017a",
"zcaron;": u"\u017e",
"zcy;": u"\u0437",
"zdot;": u"\u017c",
"zeetrf;": u"\u2128",
"zeta;": u"\u03b6",
"zfr;": u"\U0001d537",
"zhcy;": u"\u0436",
"zigrarr;": u"\u21dd",
"zopf;": u"\U0001d56b",
"zscr;": u"\U0001d4cf",
"zwj;": u"\u200d",
"zwnj;": u"\u200c",
}
replacementCharacters = {
0x0:u"\uFFFD",
0x0d:u"\u000D",
0x80:u"\u20AC",
0x81:u"\u0081",
0x81:u"\u0081",
0x82:u"\u201A",
0x83:u"\u0192",
0x84:u"\u201E",
0x85:u"\u2026",
0x86:u"\u2020",
0x87:u"\u2021",
0x88:u"\u02C6",
0x89:u"\u2030",
0x8A:u"\u0160",
0x8B:u"\u2039",
0x8C:u"\u0152",
0x8D:u"\u008D",
0x8E:u"\u017D",
0x8F:u"\u008F",
0x90:u"\u0090",
0x91:u"\u2018",
0x92:u"\u2019",
0x93:u"\u201C",
0x94:u"\u201D",
0x95:u"\u2022",
0x96:u"\u2013",
0x97:u"\u2014",
0x98:u"\u02DC",
0x99:u"\u2122",
0x9A:u"\u0161",
0x9B:u"\u203A",
0x9C:u"\u0153",
0x9D:u"\u009D",
0x9E:u"\u017E",
0x9F:u"\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype":0,
"Characters":1,
"SpaceCharacters":2,
"StartTag":3,
"EndTag":4,
"EmptyTag":5,
"Comment":6,
"ParseError":7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v,k) for k,v in namespaces.iteritems()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| dewitt/appengine-unshorten | third_party/html5lib/constants.py | Python | apache-2.0 | 88,590 | [
"Bowtie"
] | 032651df6061c74b748150e01125dccc4dc98354580ba2c50b71a23f48c5c3ef |
from __future__ import unicode_literals
import json
from context_processors import GroupPermWrapper
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test.utils import override_settings
from django.utils import timezone
from mock import patch, Mock
from smartmin.tests import SmartminTest
from temba.campaigns.models import Campaign, CampaignEvent
from temba.contacts.models import Contact, ContactGroup, TEL_SCHEME, TWITTER_SCHEME
from temba.middleware import BrandingMiddleware
from temba.channels.models import Channel, RECEIVE, SEND, TWILIO, TWITTER, PLIVO_AUTH_ID, PLIVO_AUTH_TOKEN
from temba.flows.models import Flow, ActionSet
from temba.msgs.models import Label, Msg, INCOMING
from temba.orgs.models import UserSettings
from temba.utils.email import link_components
from temba.utils import languages, dict_to_struct
from temba.tests import TembaTest, MockResponse, MockTwilioClient, MockRequestValidator, FlowFileTest
from temba.triggers.models import Trigger
from .models import Org, OrgEvent, TopUp, Invitation, Language, DAYFIRST, MONTHFIRST, CURRENT_EXPORT_VERSION
from .models import CreditAlert, ORG_CREDIT_OVER, ORG_CREDIT_LOW, ORG_CREDIT_EXPIRING
from .models import UNREAD_FLOW_MSGS, UNREAD_INBOX_MSGS, TopUpCredits
from .tasks import squash_topupcredits
class OrgContextProcessorTest(TembaTest):
def test_group_perms_wrapper(self):
administrators = Group.objects.get(name="Administrators")
editors = Group.objects.get(name="Editors")
viewers = Group.objects.get(name="Viewers")
administrators_wrapper = GroupPermWrapper(administrators)
self.assertTrue(administrators_wrapper['msgs']['msg_api'])
self.assertTrue(administrators_wrapper["msgs"]["msg_inbox"])
editors_wrapper = GroupPermWrapper(editors)
self.assertFalse(editors_wrapper["msgs"]["org_plan"])
self.assertTrue(editors_wrapper["msgs"]["msg_inbox"])
viewers_wrapper = GroupPermWrapper(viewers)
self.assertFalse(viewers_wrapper["msgs"]["msg_api"])
self.assertTrue(viewers_wrapper["msgs"]["msg_inbox"])
class OrgTest(TembaTest):
def test_get_org_users(self):
org_users = self.org.get_org_users()
self.assertTrue(self.user in org_users)
self.assertTrue(self.surveyor in org_users)
self.assertTrue(self.editor in org_users)
self.assertTrue(self.admin in org_users)
# should be ordered by email
self.assertEqual(self.admin, org_users[0])
self.assertEqual(self.editor, org_users[1])
self.assertEqual(self.surveyor, org_users[2])
self.assertEqual(self.user, org_users[3])
def test_get_unique_slug(self):
self.org.slug = 'allo'
self.org.save()
self.assertEqual(Org.get_unique_slug('foo'), 'foo')
self.assertEqual(Org.get_unique_slug('Which part?'), 'which-part')
self.assertEqual(Org.get_unique_slug('Allo'), 'allo-2')
def test_edit(self):
# use a manager now
self.login(self.admin)
# can we see the edit page
response = self.client.get(reverse('orgs.org_edit'))
self.assertEquals(200, response.status_code)
# update the name and slug of the organization
data = dict(name="Temba", timezone="Africa/Kigali", date_format=DAYFIRST, slug="nice temba")
response = self.client.post(reverse('orgs.org_edit'), data)
self.assertTrue('slug' in response.context['form'].errors)
data = dict(name="Temba", timezone="Africa/Kigali", date_format=MONTHFIRST, slug="nice-temba")
response = self.client.post(reverse('orgs.org_edit'), data)
self.assertEquals(302, response.status_code)
org = Org.objects.get(pk=self.org.pk)
self.assertEquals("Temba", org.name)
self.assertEquals("nice-temba", org.slug)
def test_recommended_channel(self):
self.org.timezone = 'Africa/Nairobi'
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'africastalking')
self.org.timezone = 'America/Phoenix'
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'twilio')
self.org.timezone = 'Asia/Jakarta'
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'hub9')
self.org.timezone = 'Africa/Mogadishu'
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'shaqodoon')
self.org.timezone = 'Europe/Amsterdam'
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'nexmo')
self.org.timezone = 'Africa/Kigali'
self.org.save()
self.assertEquals(self.org.get_recommended_channel(), 'android')
def test_country(self):
from temba.locations.models import AdminBoundary
country_url = reverse('orgs.org_country')
# can't see this page if not logged in
self.assertLoginRedirect(self.client.get(country_url))
# login as admin instead
self.login(self.admin)
response = self.client.get(country_url)
self.assertEquals(200, response.status_code)
# save with Rwanda as a country
response = self.client.post(country_url, dict(country=AdminBoundary.objects.get(name='Rwanda').pk))
# assert it has changed
org = Org.objects.get(pk=self.org.pk)
self.assertEqual("Rwanda", unicode(org.country))
self.assertEqual("RW", org.get_country_code())
# set our admin boundary name to something invalid
org.country.name = 'Fantasia'
org.country.save()
# getting our country code show now back down to our channel
self.assertEqual('RW', org.get_country_code())
# clear it out
self.client.post(country_url, dict(country=''))
# assert it has been
org = Org.objects.get(pk=self.org.pk)
self.assertFalse(org.country)
self.assertEquals('RW', org.get_country_code())
# remove all our channels so we no longer have a backdown
org.channels.all().delete()
# now really don't have a clue of our country code
self.assertIsNone(org.get_country_code())
def test_plans(self):
self.contact = self.create_contact("Joe", "+250788123123")
self.create_msg(direction=INCOMING, contact=self.contact, text="Orange")
# check start and end date for this plan
self.assertEquals(timezone.now().date(), self.org.current_plan_start())
self.assertEquals(timezone.now().date() + relativedelta(months=1), self.org.current_plan_end())
# check our credits
self.login(self.admin)
response = self.client.get(reverse('orgs.org_home'))
self.assertContains(response, "999")
# view our topups
response = self.client.get(reverse('orgs.topup_list'))
# should say we have a 1,000 credits too
self.assertContains(response, "999")
# and that we have 999 credits left on our topup
self.assertContains(response, "1 of 1,000 Credits Used")
# our receipt should show that the topup was free
with patch('stripe.Charge.retrieve') as stripe:
stripe.return_value = ''
response = self.client.get(reverse('orgs.topup_read', args=[TopUp.objects.filter(org=self.org).first().pk]))
self.assertContains(response, '1000 Credits')
def test_user_update(self):
update_url = reverse('orgs.user_edit')
login_url = reverse('users.user_login')
# no access if anonymous
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
self.login(self.admin)
# change the user language
post_data = dict(language='pt-br', first_name='Admin', last_name='User', email='administrator@temba.com', current_password='Administrator')
response = self.client.post(update_url, post_data)
self.assertRedirect(response, reverse('orgs.org_home'))
# check that our user settings have changed
settings = self.admin.get_settings()
self.assertEquals('pt-br', settings.language)
def test_usersettings(self):
self.login(self.admin)
post_data = dict(tel='+250788382382')
self.client.post(reverse('orgs.usersettings_phone'), post_data)
self.assertEquals('+250 788 382 382', UserSettings.objects.get(user=self.admin).get_tel_formatted())
post_data = dict(tel='bad number')
response = self.client.post(reverse('orgs.usersettings_phone'), post_data)
self.assertEquals(response.context['form'].errors['tel'][0], 'Invalid phone number, try again.')
def test_webhook_headers(self):
update_url = reverse('orgs.org_webhook')
login_url = reverse('users.user_login')
# no access if anonymous
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
self.login(self.admin)
response = self.client.get(update_url)
self.assertEquals(200, response.status_code)
# set a webhook with headers
post_data = response.context['form'].initial
post_data['webhook'] = 'http://webhooks.uniceflabs.org'
post_data['header_1_key'] = 'Authorization'
post_data['header_1_value'] = 'Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='
response = self.client.post(update_url, post_data)
self.assertEquals(302, response.status_code)
self.assertRedirect(response, reverse('orgs.org_home'))
# check that our webhook settings have changed
org = Org.objects.get(pk=self.org.pk)
self.assertEquals('http://webhooks.uniceflabs.org', org.get_webhook_url())
self.assertDictEqual({'Authorization': 'Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='}, org.get_webhook_headers())
def test_org_administration(self):
manage_url = reverse('orgs.org_manage')
update_url = reverse('orgs.org_update', args=[self.org.pk])
login_url = reverse('users.user_login')
# no access to anon
response = self.client.get(manage_url)
self.assertRedirect(response, login_url)
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
# or admins
self.login(self.admin)
response = self.client.get(manage_url)
self.assertRedirect(response, login_url)
response = self.client.get(update_url)
self.assertRedirect(response, login_url)
# only superuser
self.login(self.superuser)
response = self.client.get(manage_url)
self.assertEquals(200, response.status_code)
# should contain our test org
self.assertContains(response, "Temba")
# and can go to that org
response = self.client.get(update_url)
self.assertEquals(200, response.status_code)
post_data = response.context['form'].initial
post_data['plan'] = 'TRIAL'
post_data['language'] = ''
post_data['country'] = ''
post_data['primary_language'] = ''
# change to the trial plan
response = self.client.post(update_url, post_data)
self.assertEquals(302, response.status_code)
@override_settings(SEND_EMAILS=True)
def test_manage_accounts(self):
manage_accounts_url = reverse('orgs.org_manage_accounts')
self.login(self.admin)
response = self.client.get(manage_accounts_url)
self.assertEquals(200, response.status_code)
# we have 19 fields in the form including 16 checkboxes for the four users, an email field, a user group field
# and 'loc' field.
self.assertEquals(19, len(response.context['form'].fields))
self.assertTrue('emails' in response.context['form'].fields)
self.assertTrue('user_group' in response.context['form'].fields)
for user in [self.user, self.editor, self.admin]:
self.assertTrue("administrators_%d" % user.pk in response.context['form'].fields)
self.assertTrue("editors_%d" % user.pk in response.context['form'].fields)
self.assertTrue("viewers_%d" % user.pk in response.context['form'].fields)
self.assertTrue("surveyors_%d" % user.pk in response.context['form'].fields)
self.assertFalse(response.context['form'].fields['emails'].initial)
self.assertEquals('V', response.context['form'].fields['user_group'].initial)
# keep admin as admin, editor as editor, but make user an editor too
post_data = {
'administrators_%d' % self.admin.pk: 'on',
'editors_%d' % self.editor.pk: 'on',
'editors_%d' % self.user.pk: 'on',
'user_group': 'E'
}
response = self.client.post(manage_accounts_url, post_data)
self.assertEquals(302, response.status_code)
org = Org.objects.get(pk=self.org.pk)
self.assertEqual(set(org.administrators.all()), {self.admin})
self.assertEqual(set(org.editors.all()), {self.user, self.editor})
self.assertFalse(set(org.viewers.all()), set())
# add to post_data an email to invite as admin
post_data['emails'] = "norkans7gmail.com"
post_data['user_group'] = 'A'
response = self.client.post(manage_accounts_url, post_data)
self.assertTrue('emails' in response.context['form'].errors)
self.assertEquals("One of the emails you entered is invalid.", response.context['form'].errors['emails'][0])
# now post with right email
post_data['emails'] = "norkans7@gmail.com"
post_data['user_group'] = 'A'
response = self.client.post(manage_accounts_url, post_data)
# an invitation is created and sent by email
self.assertEquals(1, Invitation.objects.all().count())
self.assertTrue(len(mail.outbox) == 1)
invitation = Invitation.objects.get()
self.assertEquals(invitation.org, self.org)
self.assertEquals(invitation.email, "norkans7@gmail.com")
self.assertEquals(invitation.user_group, "A")
# pretend our invite was acted on
Invitation.objects.all().update(is_active=False)
# send another invitation, different group
post_data['emails'] = "norkans7@gmail.com"
post_data['user_group'] = 'E'
self.client.post(manage_accounts_url, post_data)
# old invite should be updated
new_invite = Invitation.objects.all().first()
self.assertEquals(1, Invitation.objects.all().count())
self.assertEquals(invitation.pk, new_invite.pk)
self.assertEquals('E', new_invite.user_group)
self.assertEquals(2, len(mail.outbox))
self.assertTrue(new_invite.is_active)
# post many emails to the form
post_data['emails'] = "norbert@temba.com,code@temba.com"
post_data['user_group'] = 'A'
self.client.post(manage_accounts_url, post_data)
# now 2 new invitations are created and sent
self.assertEquals(3, Invitation.objects.all().count())
self.assertEquals(4, len(mail.outbox))
response = self.client.get(manage_accounts_url)
# user ordered by email
self.assertEqual(response.context['org_users'][0], self.admin)
self.assertEqual(response.context['org_users'][1], self.editor)
self.assertEqual(response.context['org_users'][2], self.user)
# invites ordered by email as well
self.assertEqual(response.context['invites'][0].email, 'code@temba.com')
self.assertEqual(response.context['invites'][1].email, 'norbert@temba.com')
self.assertEqual(response.context['invites'][2].email, 'norkans7@gmail.com')
# Update our users, making the 'user' user a surveyor
post_data = {
'administrators_%d' % self.admin.pk: 'on',
'editors_%d' % self.editor.pk: 'on',
'surveyors_%d' % self.user.pk: 'on',
'user_group': 'E'
}
# successful post redirects
response = self.client.post(manage_accounts_url, post_data)
self.assertEquals(302, response.status_code)
org = Org.objects.get(pk=self.org.pk)
self.assertEqual(set(org.administrators.all()), {self.admin})
self.assertEqual(set(org.editors.all()), {self.editor})
self.assertEqual(set(org.surveyors.all()), {self.user})
# upgrade one of our users to an admin
self.org.editors.remove(self.user)
self.org.administrators.add(self.user)
# now remove ourselves as an admin
post_data = {
'administrators_%d' % self.user.pk: 'on',
'editors_%d' % self.editor.pk: 'on',
'user_group': 'E'
}
response = self.client.post(manage_accounts_url, post_data)
# should be redirected to chooser page
self.assertRedirect(response, reverse('orgs.org_choose'))
# and should no longer be an admin
self.assertFalse(self.admin in self.org.administrators.all())
@patch('temba.utils.email.send_temba_email')
def test_join(self, mock_send_temba_email):
def create_invite(group):
return Invitation.objects.create(org=self.org,
user_group=group,
email="norkans7@gmail.com",
host='app.rapidpro.io',
created_by=self.admin,
modified_by=self.admin)
editor_invitation = create_invite('E')
editor_invitation.send_invitation()
email_args = mock_send_temba_email.call_args[0] # all positional args
self.assertEqual(email_args[0], "RapidPro Invitation")
self.assertIn('https://app.rapidpro.io/org/join/%s/' % editor_invitation.secret, email_args[1])
self.assertNotIn('{{', email_args[1])
self.assertIn('https://app.rapidpro.io/org/join/%s/' % editor_invitation.secret, email_args[2])
self.assertNotIn('{{', email_args[2])
editor_join_url = reverse('orgs.org_join', args=[editor_invitation.secret])
self.client.logout()
# if no user is logged we redirect to the create_login page
response = self.client.get(editor_join_url)
self.assertEqual(302, response.status_code)
response = self.client.get(editor_join_url, follow=True)
self.assertEqual(response.request['PATH_INFO'], reverse('orgs.org_create_login', args=[editor_invitation.secret]))
# a user is already logged in
self.invited_editor = self.create_user("InvitedEditor")
self.login(self.invited_editor)
response = self.client.get(editor_join_url)
self.assertEqual(200, response.status_code)
self.assertEqual(self.org.pk, response.context['org'].pk)
# we have a form without field except one 'loc'
self.assertEqual(1, len(response.context['form'].fields))
post_data = dict()
response = self.client.post(editor_join_url, post_data, follow=True)
self.assertEqual(200, response.status_code)
self.assertIn(self.invited_editor, self.org.editors.all())
self.assertFalse(Invitation.objects.get(pk=editor_invitation.pk).is_active)
roles = (('V', self.org.viewers), ('S', self.org.surveyors),
('A', self.org.administrators), ('E', self.org.editors))
# test it for each role
for role in roles:
invite = create_invite(role[0])
user = self.create_user('User%s' % role[0])
self.login(user)
response = self.client.post(reverse('orgs.org_join', args=[invite.secret]), follow=True)
self.assertEqual(200, response.status_code)
self.assertIsNotNone(role[1].filter(pk=user.pk).first())
# try an expired invite
invite = create_invite('S')
invite.is_active = False
invite.save()
expired_user = self.create_user("InvitedExpired")
self.login(expired_user)
response = self.client.post(reverse('orgs.org_join', args=[invite.secret]), follow=True)
self.assertEqual(200, response.status_code)
self.assertIsNone(self.org.surveyors.filter(pk=expired_user.pk).first())
def test_create_login(self):
admin_invitation = Invitation.objects.create(org=self.org,
user_group="A",
email="norkans7@gmail.com",
created_by=self.admin,
modified_by=self.admin)
admin_create_login_url = reverse('orgs.org_create_login', args=[admin_invitation.secret])
self.client.logout()
response = self.client.get(admin_create_login_url)
self.assertEquals(200, response.status_code)
self.assertEquals(self.org.pk, response.context['org'].pk)
# we have a form with 4 fields and one hidden 'loc'
self.assertEquals(5, len(response.context['form'].fields))
self.assertTrue('first_name' in response.context['form'].fields)
self.assertTrue('last_name' in response.context['form'].fields)
self.assertTrue('email' in response.context['form'].fields)
self.assertTrue('password' in response.context['form'].fields)
post_data = dict()
post_data['first_name'] = "Norbert"
post_data['last_name'] = "Kwizera"
post_data['email'] = "norkans7@gmail.com"
post_data['password'] = "norbertkwizeranorbert"
response = self.client.post(admin_create_login_url, post_data, follow=True)
self.assertEquals(200, response.status_code)
new_invited_user = User.objects.get(email="norkans7@gmail.com")
self.assertTrue(new_invited_user in self.org.administrators.all())
self.assertFalse(Invitation.objects.get(pk=admin_invitation.pk).is_active)
def test_surveyor_invite(self):
surveyor_invite = Invitation.objects.create(org=self.org,
user_group="S",
email="surveyor@gmail.com",
created_by=self.admin,
modified_by=self.admin)
admin_create_login_url = reverse('orgs.org_create_login', args=[surveyor_invite.secret])
self.client.logout()
post_data = dict(first_name='Surveyor', last_name='User', email='surveyor@gmail.com', password='password')
response = self.client.post(admin_create_login_url, post_data, follow=True)
self.assertEquals(200, response.status_code)
# as a surveyor we should have been rerourted
self.assertEquals(reverse('orgs.org_surveyor'), response._request.path)
self.assertFalse(Invitation.objects.get(pk=surveyor_invite.pk).is_active)
# make sure we are a surveyor
new_invited_user = User.objects.get(email="surveyor@gmail.com")
self.assertTrue(new_invited_user in self.org.surveyors.all())
# if we login, we should be rerouted too
self.client.logout()
response = self.client.post('/users/login/', {'username': 'surveyor@gmail.com', 'password': 'password'}, follow=True)
self.assertEquals(200, response.status_code)
self.assertEquals(reverse('orgs.org_surveyor'), response._request.path)
def test_choose(self):
self.client.logout()
choose_url = reverse('orgs.org_choose')
# have a second org
self.create_secondary_org()
self.login(self.admin)
response = self.client.get(reverse('orgs.org_home'))
self.assertEquals(response.context['org'], self.org)
# add self.manager to self.org2 viewers
self.org2.viewers.add(self.admin)
response = self.client.get(choose_url)
self.assertEquals(200, response.status_code)
self.assertTrue('organization' in response.context['form'].fields)
post_data = dict()
post_data['organization'] = self.org2.pk
response = self.client.post(choose_url, post_data, follow=True)
self.assertEquals(200, response.status_code)
response = self.client.get(reverse('orgs.org_home'))
self.assertEquals(response.context_data['org'], self.org2)
# a non org user get a message to contact their administrator
self.login(self.non_org_user)
response = self.client.get(choose_url)
self.assertEquals(200, response.status_code)
self.assertEquals(0, len(response.context['orgs']))
self.assertContains(response, "Your account is not associated with any organization. Please contact your administrator to receive an invitation to an organization.")
# superuser gets redirected to user management page
self.login(self.superuser)
response = self.client.get(choose_url, follow=True)
self.assertContains(response, "Organizations")
def test_topup_admin(self):
self.login(self.admin)
topup = TopUp.objects.get()
# admins shouldn't be able to see the create / manage / update pages
manage_url = reverse('orgs.topup_manage') + "?org=%d" % self.org.id
self.assertRedirect(self.client.get(manage_url), '/users/login/')
create_url = reverse('orgs.topup_create') + "?org=%d" % self.org.id
self.assertRedirect(self.client.get(create_url), '/users/login/')
update_url = reverse('orgs.topup_update', args=[topup.pk])
self.assertRedirect(self.client.get(update_url), '/users/login/')
# log in as root
self.login(self.superuser)
# should list our one topup
response = self.client.get(manage_url)
self.assertEquals(1, len(response.context['object_list']))
# create a new one
post_data = dict(price='1000', credits='500', comment="")
response = self.client.post(create_url, post_data)
self.assertEquals(2, TopUp.objects.filter(org=self.org).count())
self.assertEquals(1500, self.org.get_credits_remaining())
# update one of our topups
post_data = dict(is_active=True, price='0', credits='5000', comment="", expires_on="2025-04-03 13:47:46")
response = self.client.post(update_url, post_data)
self.assertEquals(5500, self.org.get_credits_remaining())
def test_topups(self):
contact = self.create_contact("Michael Shumaucker", "+250788123123")
test_contact = Contact.get_test_contact(self.user)
welcome_topup = TopUp.objects.get()
def create_msgs(recipient, count):
for m in range(count):
self.create_msg(contact=recipient, direction='I', text="Test %d" % m)
create_msgs(contact, 10)
with self.assertNumQueries(1):
self.assertEquals(150, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(150, self.org.get_low_credits_threshold())
# we should have 1000 minus 10 credits for this org
with self.assertNumQueries(4):
self.assertEquals(990, self.org.get_credits_remaining()) # from db
with self.assertNumQueries(0):
self.assertEquals(1000, self.org.get_credits_total()) # from cache
self.assertEquals(10, self.org.get_credits_used())
self.assertEquals(990, self.org.get_credits_remaining())
self.assertEquals(10, welcome_topup.msgs.count())
self.assertEquals(10, TopUp.objects.get(pk=welcome_topup.pk).get_used())
# at this point we shouldn't have squashed any topupcredits, so should have the same number as our used
self.assertEqual(10, TopUpCredits.objects.all().count())
# now squash
squash_topupcredits()
# should only have one remaining
self.assertEqual(1, TopUpCredits.objects.all().count())
# reduce our credits on our topup to 15
TopUp.objects.filter(pk=welcome_topup.pk).update(credits=15)
self.org.update_caches(OrgEvent.topup_updated, None) # invalidates our credits remaining cache
self.assertEquals(15, self.org.get_credits_total())
self.assertEquals(5, self.org.get_credits_remaining())
# create 10 more messages, only 5 of which will get a topup
create_msgs(contact, 10)
self.assertEquals(15, TopUp.objects.get(pk=welcome_topup.pk).msgs.count())
self.assertEquals(15, TopUp.objects.get(pk=welcome_topup.pk).get_used())
self.assertFalse(self.org._calculate_active_topup())
with self.assertNumQueries(0):
self.assertEquals(15, self.org.get_credits_total())
self.assertEquals(20, self.org.get_credits_used())
self.assertEquals(-5, self.org.get_credits_remaining())
# again create 10 more messages, none of which will get a topup
create_msgs(contact, 10)
with self.assertNumQueries(0):
self.assertEquals(15, self.org.get_credits_total())
self.assertEquals(30, self.org.get_credits_used())
self.assertEquals(-15, self.org.get_credits_remaining())
self.assertEquals(15, TopUp.objects.get(pk=welcome_topup.pk).get_used())
# raise our topup to take 20 and create another for 5
TopUp.objects.filter(pk=welcome_topup.pk).update(credits=20)
new_topup = TopUp.create(self.admin, price=0, credits=5)
self.org.update_caches(OrgEvent.topup_updated, None)
# apply topups which will max out both and reduce debt to 5
self.org.apply_topups()
self.assertEquals(20, welcome_topup.msgs.count())
self.assertEquals(20, TopUp.objects.get(pk=welcome_topup.pk).get_used())
self.assertEquals(5, new_topup.msgs.count())
self.assertEquals(5, TopUp.objects.get(pk=new_topup.pk).get_used())
self.assertEquals(25, self.org.get_credits_total())
self.assertEquals(30, self.org.get_credits_used())
self.assertEquals(-5, self.org.get_credits_remaining())
# create a message from our test contact, should not count against our totals
test_msg = self.create_msg(contact=test_contact, direction='I', text="Test")
self.assertIsNone(test_msg.topup_id)
self.assertEquals(30, self.org.get_credits_used())
# test pro user status
self.assertFalse(self.org.is_pro())
# add new topup with lots of credits
mega_topup = TopUp.create(self.admin, price=0, credits=100000)
self.org.update_caches(OrgEvent.topup_updated, None)
# after applying this, no non-test messages should be without a topup
self.org.apply_topups()
self.assertFalse(Msg.all_messages.filter(org=self.org, contact__is_test=False, topup=None))
self.assertFalse(Msg.all_messages.filter(org=self.org, contact__is_test=True).exclude(topup=None))
self.assertEquals(5, TopUp.objects.get(pk=mega_topup.pk).get_used())
# now we're pro
self.assertTrue(self.org.is_pro())
self.assertEquals(100025, self.org.get_credits_total())
self.assertEquals(100025, self.org.get_purchased_credits())
self.assertEquals(30, self.org.get_credits_used())
self.assertEquals(99995, self.org.get_credits_remaining())
# and new messages use the mega topup
msg = self.create_msg(contact=contact, direction='I', text="Test")
self.assertEquals(msg.topup, mega_topup)
self.assertEquals(6, TopUp.objects.get(pk=mega_topup.pk).get_used())
# but now it expires
yesterday = timezone.now() - relativedelta(days=1)
mega_topup.expires_on = yesterday
mega_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
# new incoming messages should not be assigned a topup
msg = self.create_msg(contact=contact, direction='I', text="Test")
self.assertIsNone(msg.topup)
# check our totals
self.org.update_caches(OrgEvent.topup_updated, None)
# we're still pro though
self.assertTrue(self.org.is_pro())
with self.assertNumQueries(2):
self.assertEquals(100025, self.org.get_purchased_credits())
self.assertEquals(31, self.org.get_credits_total())
self.assertEquals(32, self.org.get_credits_used())
self.assertEquals(-1, self.org.get_credits_remaining())
# all top up expired
TopUp.objects.all().update(expires_on=yesterday)
# we have expiring credits, and no more active
gift_topup = TopUp.create(self.admin, price=0, credits=100)
next_week = timezone.now() + relativedelta(days=7)
gift_topup.expires_on = next_week
gift_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(99, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(15, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(99, self.org.get_credits_expiring_soon())
self.assertEquals(15, self.org.get_low_credits_threshold())
# some cedits expires but more credits will remain active
later_active_topup = TopUp.create(self.admin, price=0, credits=200)
five_week_ahead = timezone.now() + relativedelta(days=35)
later_active_topup.expires_on = five_week_ahead
later_active_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(0, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(45, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_credits_expiring_soon())
self.assertEquals(45, self.org.get_low_credits_threshold())
# no expiring credits
gift_topup.expires_on = five_week_ahead
gift_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(0, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(45, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_credits_expiring_soon())
self.assertEquals(45, self.org.get_low_credits_threshold())
# do not consider expired topup
gift_topup.expires_on = yesterday
gift_topup.save(update_fields=['expires_on'])
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(3):
self.assertEquals(0, self.org.get_credits_expiring_soon())
with self.assertNumQueries(1):
self.assertEquals(30, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_credits_expiring_soon())
self.assertEquals(30, self.org.get_low_credits_threshold())
TopUp.objects.all().update(is_active=False)
self.org.update_caches(OrgEvent.topup_updated, None)
self.org.apply_topups()
with self.assertNumQueries(1):
self.assertEquals(0, self.org.get_low_credits_threshold())
with self.assertNumQueries(0):
self.assertEquals(0, self.org.get_low_credits_threshold())
@patch('temba.orgs.views.TwilioRestClient', MockTwilioClient)
@patch('temba.orgs.models.TwilioRestClient', MockTwilioClient)
@patch('twilio.util.RequestValidator', MockRequestValidator)
def test_twilio_connect(self):
with patch('temba.tests.MockTwilioClient.MockAccounts.get') as mock_get:
with patch('temba.tests.MockTwilioClient.MockApplications.list') as mock_apps_list:
org = self.org
connect_url = reverse("orgs.org_twilio_connect")
self.login(self.admin)
self.admin.set_org(self.org)
response = self.client.get(connect_url)
self.assertEquals(200, response.status_code)
self.assertTrue(response.context['form'])
self.assertEquals(len(response.context['form'].fields.keys()), 3)
self.assertIn('account_sid', response.context['form'].fields.keys())
self.assertIn('account_token', response.context['form'].fields.keys())
mock_get.return_value = MockTwilioClient.MockAccount('Full')
mock_apps_list.return_value = [MockTwilioClient.MockApplication("%s/%d" % (settings.TEMBA_HOST.lower(),
self.org.pk))]
# try posting without an account token
post_data = dict()
post_data['account_sid'] = "AccountSid"
response = self.client.post(connect_url, post_data)
self.assertEquals(response.context['form'].errors['account_token'][0], 'This field is required.')
# now add the account token and try again
post_data['account_token'] = "AccountToken"
# but with an unexpected exception
with patch('temba.tests.MockTwilioClient.__init__') as mock:
mock.side_effect = Exception('Unexpected')
response = self.client.post(connect_url, post_data)
self.assertEquals('The Twilio account SID and Token seem invalid. '
'Please check them again and retry.',
response.context['form'].errors['__all__'][0])
self.client.post(connect_url, post_data)
org.refresh_from_db()
self.assertEquals(org.config_json()['ACCOUNT_SID'], "AccountSid")
self.assertEquals(org.config_json()['ACCOUNT_TOKEN'], "AccountToken")
self.assertTrue(org.config_json()['APPLICATION_SID'])
# when the user submit the secondary token, we use it to get the primary one from the rest API
with patch('temba.tests.MockTwilioClient.MockAccounts.get') as mock_get_primary:
mock_get_primary.return_value = MockTwilioClient.MockAccount('Full', 'PrimaryAccountToken')
self.client.post(connect_url, post_data)
org.refresh_from_db()
self.assertEquals(org.config_json()['ACCOUNT_SID'], "AccountSid")
self.assertEquals(org.config_json()['ACCOUNT_TOKEN'], "PrimaryAccountToken")
self.assertTrue(org.config_json()['APPLICATION_SID'])
twilio_account_url = reverse('orgs.org_twilio_account')
response = self.client.get(twilio_account_url)
self.assertEquals("AccountSid", response.context['account_sid'])
org.refresh_from_db()
config = org.config_json()
self.assertEquals('AccountSid', config['ACCOUNT_SID'])
self.assertEquals('PrimaryAccountToken', config['ACCOUNT_TOKEN'])
# post without a sid or token, should get a form validation error
response = self.client.post(twilio_account_url, dict(disconnect='false'), follow=True)
self.assertEquals('[{"message": "You must enter your Twilio Account SID", "code": ""}]',
response.context['form'].errors['__all__'].as_json())
# all our twilio creds should remain the same
org.refresh_from_db()
config = org.config_json()
self.assertEquals(config['ACCOUNT_SID'], "AccountSid")
self.assertEquals(config['ACCOUNT_TOKEN'], "PrimaryAccountToken")
self.assertEquals(config['APPLICATION_SID'], "TwilioTestSid")
# now try with all required fields, and a bonus field we shouldn't change
self.client.post(twilio_account_url, dict(account_sid='AccountSid',
account_token='SecondaryToken',
disconnect='false',
name='DO NOT CHANGE ME'), follow=True)
# name shouldn't change
org.refresh_from_db()
self.assertEquals(org.name, "Temba")
# now disconnect our twilio connection
self.assertTrue(org.is_connected_to_twilio())
self.client.post(twilio_account_url, dict(disconnect='true', follow=True))
org.refresh_from_db()
self.assertFalse(org.is_connected_to_twilio())
def test_connect_nexmo(self):
self.login(self.admin)
# connect nexmo
connect_url = reverse('orgs.org_nexmo_connect')
# simulate invalid credentials
with patch('requests.get') as nexmo:
nexmo.return_value = MockResponse(401, '{"error-code": "401"}')
response = self.client.post(connect_url, dict(api_key='key', api_secret='secret'))
self.assertContains(response, "Your Nexmo API key and secret seem invalid.")
self.assertFalse(self.org.is_connected_to_nexmo())
# ok, now with a success
with patch('requests.get') as nexmo_get:
with patch('requests.post') as nexmo_post:
# believe it or not nexmo returns 'error-code' 200
nexmo_get.return_value = MockResponse(200, '{"error-code": "200"}')
nexmo_post.return_value = MockResponse(200, '{"error-code": "200"}')
self.client.post(connect_url, dict(api_key='key', api_secret='secret'))
# nexmo should now be connected
self.org = Org.objects.get(pk=self.org.pk)
self.assertTrue(self.org.is_connected_to_nexmo())
self.assertEquals(self.org.config_json()['NEXMO_KEY'], 'key')
self.assertEquals(self.org.config_json()['NEXMO_SECRET'], 'secret')
# and disconnect
self.org.remove_nexmo_account()
self.assertFalse(self.org.is_connected_to_nexmo())
self.assertFalse(self.org.config_json()['NEXMO_KEY'])
self.assertFalse(self.org.config_json()['NEXMO_SECRET'])
def test_connect_plivo(self):
self.login(self.admin)
# connect plivo
connect_url = reverse('orgs.org_plivo_connect')
# simulate invalid credentials
with patch('requests.get') as plivo_mock:
plivo_mock.return_value = MockResponse(401,
'Could not verify your access level for that URL.'
'\nYou have to login with proper credentials')
response = self.client.post(connect_url, dict(auth_id='auth-id', auth_token='auth-token'))
self.assertContains(response,
"Your Plivo AUTH ID and AUTH TOKEN seem invalid. Please check them again and retry.")
self.assertFalse(PLIVO_AUTH_ID in self.client.session)
self.assertFalse(PLIVO_AUTH_TOKEN in self.client.session)
# ok, now with a success
with patch('requests.get') as plivo_mock:
plivo_mock.return_value = MockResponse(200, json.dumps(dict()))
self.client.post(connect_url, dict(auth_id='auth-id', auth_token='auth-token'))
# plivo should be added to the session
self.assertEquals(self.client.session[PLIVO_AUTH_ID], 'auth-id')
self.assertEquals(self.client.session[PLIVO_AUTH_TOKEN], 'auth-token')
def test_download(self):
response = self.client.get('/org/download/messages/123/')
self.assertLoginRedirect(response)
self.login(self.admin)
response = self.client.get('/org/download/messages/123/')
self.assertRedirect(response, '/assets/download/message_export/123/')
response = self.client.get('/org/download/contacts/123/')
self.assertRedirect(response, '/assets/download/contact_export/123/')
response = self.client.get('/org/download/flows/123/')
self.assertRedirect(response, '/assets/download/results_export/123/')
class AnonOrgTest(TembaTest):
"""
Tests the case where our organization is marked as anonymous, that is the phone numbers are masked
for users.
"""
def setUp(self):
super(AnonOrgTest, self).setUp()
self.org.is_anon = True
self.org.save()
def test_contacts(self):
# are there real phone numbers on the contact list page?
contact = self.create_contact(None, "+250788123123")
self.login(self.admin)
masked = "%010d" % contact.pk
response = self.client.get(reverse('contacts.contact_list'))
# phone not in the list
self.assertNotContains(response, "788 123 123")
# but the id is
self.assertContains(response, masked)
# can't search for it
response = self.client.get(reverse('contacts.contact_list') + "?search=788")
# can't look for 788 as that is in the search box..
self.assertNotContains(response, "123123")
# create a flow
flow = self.create_flow()
# start the contact down it
flow.start([], [contact])
# should have one SMS
self.assertEquals(1, Msg.all_messages.all().count())
# shouldn't show the number on the outgoing page
response = self.client.get(reverse('msgs.msg_outbox'))
self.assertNotContains(response, "788 123 123")
# also shouldn't show up on the flow results page
response = self.client.get(reverse('flows.flow_results', args=[flow.pk]) + "?json=true")
self.assertNotContains(response, "788 123 123")
self.assertContains(response, masked)
# create an incoming SMS, check our flow page
Msg.create_incoming(self.channel, (TEL_SCHEME, contact.get_urn().path), "Blue")
response = self.client.get(reverse('msgs.msg_flow'))
self.assertNotContains(response, "788 123 123")
self.assertContains(response, masked)
# send another, this will be in our inbox this time
Msg.create_incoming(self.channel, (TEL_SCHEME, contact.get_urn().path), "Where's the beef?")
response = self.client.get(reverse('msgs.msg_flow'))
self.assertNotContains(response, "788 123 123")
self.assertContains(response, masked)
# contact detail page
response = self.client.get(reverse('contacts.contact_read', args=[contact.uuid]))
self.assertNotContains(response, "788 123 123")
self.assertContains(response, masked)
class OrgCRUDLTest(TembaTest):
def test_org_grant(self):
grant_url = reverse('orgs.org_grant')
response = self.client.get(grant_url)
self.assertRedirect(response, '/users/login/')
self.user = self.create_user(username="tito")
self.login(self.user)
response = self.client.get(grant_url)
self.assertRedirect(response, '/users/login/')
granters = Group.objects.get(name='Granters')
self.user.groups.add(granters)
response = self.client.get(grant_url)
self.assertEquals(200, response.status_code)
# fill out the form
post_data = dict(email='john@carmack.com', first_name="John", last_name="Carmack",
name="Oculus", timezone="Africa/Kigali", credits="100000", password='dukenukem')
response = self.client.post(grant_url, post_data, follow=True)
self.assertContains(response, "created")
org = Org.objects.get(name="Oculus")
self.assertEquals(100000, org.get_credits_remaining())
user = User.objects.get(username="john@carmack.com")
self.assertTrue(org.administrators.filter(username="john@carmack.com"))
self.assertTrue(org.administrators.filter(username="tito"))
# try a new org with a user that already exists instead
del post_data['password']
post_data['name'] = "id Software"
response = self.client.post(grant_url, post_data, follow=True)
self.assertContains(response, "created")
org = Org.objects.get(name="id Software")
self.assertEquals(100000, org.get_credits_remaining())
user = User.objects.get(username="john@carmack.com")
self.assertTrue(org.administrators.filter(username="john@carmack.com"))
self.assertTrue(org.administrators.filter(username="tito"))
def test_org_signup(self):
signup_url = reverse('orgs.org_signup')
response = self.client.get(signup_url)
self.assertEquals(200, response.status_code)
self.assertTrue('name' in response.context['form'].fields)
# firstname and lastname are required and bad email
post_data = dict(email="bad_email", password="HelloWorld1", name="Your Face")
response = self.client.post(signup_url, post_data)
self.assertTrue('first_name' in response.context['form'].errors)
self.assertTrue('last_name' in response.context['form'].errors)
self.assertTrue('email' in response.context['form'].errors)
post_data = dict(first_name="Eugene", last_name="Rwagasore", email="myal@relieves.org",
password="badpass", name="Your Face")
response = self.client.post(signup_url, post_data)
self.assertTrue('password' in response.context['form'].errors)
post_data = dict(first_name="Eugene", last_name="Rwagasore", email="myal@relieves.org",
password="HelloWorld1", name="Relieves World")
response = self.client.post(signup_url, post_data)
self.assertTrue('timezone' in response.context['form'].errors)
post_data = dict(first_name="Eugene", last_name="Rwagasore", email="myal@relieves.org",
password="HelloWorld1", name="Relieves World", timezone="Africa/Kigali")
response = self.client.post(signup_url, post_data)
# should have a user
user = User.objects.get(username="myal@relieves.org")
self.assertTrue(user.check_password("HelloWorld1"))
# user should be able to get a token
self.assertTrue(user.api_token)
# should have an org
org = Org.objects.get(name="Relieves World")
self.assertTrue(org.administrators.filter(pk=user.id))
self.assertEquals("Relieves World", str(org))
self.assertEquals(org.slug, "relieves-world")
# should have 1000 credits
self.assertEquals(1000, org.get_credits_remaining())
# a single topup
topup = TopUp.objects.get(org=org)
self.assertEquals(1000, topup.credits)
self.assertEquals(0, topup.price)
# and user should be an administrator on that org
self.assertTrue(org.get_org_admins().filter(pk=user.pk))
# fake session set_org to make the test work
user.set_org(org)
# should now be able to go to channels page
response = self.client.get(reverse('channels.channel_claim'))
self.assertEquals(200, response.status_code)
# check that we have all the tabs
self.assertContains(response, reverse('msgs.msg_inbox'))
self.assertContains(response, reverse('flows.flow_list'))
self.assertContains(response, reverse('contacts.contact_list'))
self.assertContains(response, reverse('channels.channel_list'))
self.assertContains(response, reverse('orgs.org_home'))
post_data['name'] = "Relieves World Rwanda"
response = self.client.post(signup_url, post_data)
self.assertTrue('email' in response.context['form'].errors)
# if we hit /login we'll be taken back to the channel page
response = self.client.get(reverse('users.user_check_login'))
self.assertRedirect(response, reverse('orgs.org_choose'))
# but if we log out, same thing takes us to the login page
self.client.logout()
response = self.client.get(reverse('users.user_check_login'))
self.assertRedirect(response, reverse('users.user_login'))
# try going to the org home page, no dice
response = self.client.get(reverse('orgs.org_home'))
self.assertRedirect(response, reverse('users.user_login'))
# log in as the user
self.client.login(username='myal@relieves.org', password='HelloWorld1')
response = self.client.get(reverse('orgs.org_home'))
self.assertEquals(200, response.status_code)
# try setting our webhook and subscribe to one of the events
response = self.client.post(reverse('orgs.org_webhook'), dict(webhook='http://fake.com/webhook.php', mt_sms=1))
self.assertRedirect(response, reverse('orgs.org_home'))
org = Org.objects.get(name="Relieves World")
self.assertEquals("http://fake.com/webhook.php", org.get_webhook_url())
self.assertTrue(org.is_notified_of_mt_sms())
self.assertFalse(org.is_notified_of_mo_sms())
self.assertFalse(org.is_notified_of_mt_call())
self.assertFalse(org.is_notified_of_mo_call())
self.assertFalse(org.is_notified_of_alarms())
# try changing our username, wrong password
post_data = dict(email='myal@wr.org', current_password='HelloWorld')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('current_password' in response.context['form'].errors)
# bad new password
post_data = dict(email='myal@wr.org', current_password='HelloWorld1', new_password='passwor')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('new_password' in response.context['form'].errors)
billg = User.objects.create(username='bill@msn.com', email='bill@msn.com')
# dupe user
post_data = dict(email='bill@msn.com', current_password='HelloWorld1')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertEquals(200, response.status_code)
self.assertTrue('email' in response.context['form'].errors)
post_data = dict(email='myal@wr.org', first_name="Myal", last_name="Greene", language="en-us", current_password='HelloWorld1')
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertRedirect(response, reverse('orgs.org_home'))
self.assertTrue(User.objects.get(username='myal@wr.org'))
self.assertTrue(User.objects.get(email='myal@wr.org'))
self.assertFalse(User.objects.filter(username='myal@relieves.org'))
self.assertFalse(User.objects.filter(email='myal@relieves.org'))
post_data['current_password'] = 'HelloWorld1'
post_data['new_password'] = 'Password123'
response = self.client.post(reverse('orgs.user_edit'), post_data)
self.assertRedirect(response, reverse('orgs.org_home'))
user = User.objects.get(username='myal@wr.org')
self.assertTrue(user.check_password('Password123'))
def test_org_timezone(self):
self.assertEqual(self.org.timezone, 'Africa/Kigali')
Msg.create_incoming(self.channel, (TEL_SCHEME, "250788382382"), "My name is Frank")
self.login(self.admin)
response = self.client.get(reverse('msgs.msg_inbox'), follow=True)
# Check the message datetime
created_on = response.context['object_list'][0].created_on.astimezone(timezone.pytz.timezone(self.org.timezone))
self.assertIn(created_on.strftime("%I:%M %p").lower().lstrip('0'), response.content)
# change the org timezone to "Africa/Kenya"
self.org.timezone = 'Africa/Nairobi'
self.org.save()
response = self.client.get(reverse('msgs.msg_inbox'), follow=True)
# checkout the message should have the datetime changed by timezone
created_on = response.context['object_list'][0].created_on.astimezone(timezone.pytz.timezone(self.org.timezone))
self.assertIn(created_on.strftime("%I:%M %p").lower().lstrip('0'), response.content)
def test_urn_schemes(self):
# remove existing channels
Channel.objects.all().update(is_active=False, org=None)
self.assertEqual(set(), self.org.get_schemes(SEND))
self.assertEqual(set(), self.org.get_schemes(RECEIVE))
# add a receive only tel channel
Channel.create(self.org, self.user, 'RW', TWILIO, "Nexmo", "0785551212", role="R", secret="45678", gcm_id="123")
self.org = Org.objects.get(pk=self.org.pk)
self.assertEqual(set(), self.org.get_schemes(SEND))
self.assertEqual({TEL_SCHEME}, self.org.get_schemes(RECEIVE))
# add a send/receive tel channel
Channel.create(self.org, self.user, 'RW', TWILIO, "Twilio", "0785553434", role="SR", secret="56789", gcm_id="456")
self.org = Org.objects.get(pk=self.org.id)
self.assertEqual({TEL_SCHEME}, self.org.get_schemes(SEND))
self.assertEqual({TEL_SCHEME}, self.org.get_schemes(RECEIVE))
# add a twitter channel
Channel.create(self.org, self.user, None, TWITTER, "Twitter")
self.org = Org.objects.get(pk=self.org.id)
self.assertEqual({TEL_SCHEME, TWITTER_SCHEME}, self.org.get_schemes(SEND))
self.assertEqual({TEL_SCHEME, TWITTER_SCHEME}, self.org.get_schemes(RECEIVE))
def test_login_case_not_sensitive(self):
login_url = reverse('users.user_login')
User.objects.create_superuser("superuser", "superuser@group.com", "superuser")
response = self.client.post(login_url, dict(username="superuser", password="superuser"))
self.assertEquals(response.status_code, 302)
response = self.client.post(login_url, dict(username="superuser", password="superuser"), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('orgs.org_manage'))
response = self.client.post(login_url, dict(username="SUPeruser", password="superuser"))
self.assertEquals(response.status_code, 302)
response = self.client.post(login_url, dict(username="SUPeruser", password="superuser"), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('orgs.org_manage'))
User.objects.create_superuser("withCAPS", "with_caps@group.com", "thePASSWORD")
response = self.client.post(login_url, dict(username="withcaps", password="thePASSWORD"))
self.assertEquals(response.status_code, 302)
response = self.client.post(login_url, dict(username="withcaps", password="thePASSWORD"), follow=True)
self.assertEquals(response.request['PATH_INFO'], reverse('orgs.org_manage'))
# passwords stay case sensitive
response = self.client.post(login_url, dict(username="withcaps", password="thepassword"), follow=True)
self.assertTrue('form' in response.context)
self.assertTrue(response.context['form'].errors)
def test_org_service(self):
# create a customer service user
self.csrep = self.create_user("csrep")
self.csrep.groups.add(Group.objects.get(name="Customer Support"))
self.csrep.is_staff = True
self.csrep.save()
service_url = reverse('orgs.org_service')
# without logging in, try to service our main org
response = self.client.post(service_url, dict(organization=self.org.id))
self.assertRedirect(response, '/users/login/')
# try logging in with a normal user
self.login(self.admin)
# same thing, no permission
response = self.client.post(service_url, dict(organization=self.org.id))
self.assertRedirect(response, '/users/login/')
# ok, log in as our cs rep
self.login(self.csrep)
# then service our org
response = self.client.post(service_url, dict(organization=self.org.id))
self.assertRedirect(response, '/msg/inbox/')
# create a new contact
response = self.client.post(reverse('contacts.contact_create'), data=dict(name='Ben Haggerty',
urn__tel__0='0788123123'))
self.assertNoFormErrors(response)
# make sure that contact's created on is our cs rep
contact = Contact.objects.get(urns__path='+250788123123', org=self.org)
self.assertEquals(self.csrep, contact.created_by)
# make sure we can manage topups as well
TopUp.objects.create(org=self.org, price=100, credits=1000, expires_on=timezone.now() + timedelta(days=30),
created_by=self.admin, modified_by=self.admin)
response = self.client.get(reverse('orgs.topup_manage') + "?org=%d" % self.org.id)
# i'd buy that for a dollar!
self.assertContains(response, '$1.00')
self.assertNotRedirect(response, '/users/login/')
# ok, now end our session
response = self.client.post(service_url, dict())
self.assertRedirect(response, '/org/manage/')
# can no longer go to inbox, asked to log in
response = self.client.get(reverse('msgs.msg_inbox'))
self.assertRedirect(response, '/users/login/')
class LanguageTest(TembaTest):
def test_setting_language(self):
self.login(self.admin)
# update our org with some language settings
post_data = dict(primary_lang='fre', languages='hat,arc')
response = self.client.post(reverse('orgs.org_languages'), post_data)
self.assertEquals(302, response.status_code)
self.org.refresh_from_db()
self.assertEquals('French', self.org.primary_language.name)
self.assertIsNotNone(self.org.languages.filter(name='French'))
# everything after the paren should be stripped for aramaic
self.assertIsNotNone(self.org.languages.filter(name='Official Aramaic'))
# everything after the semi should be stripped for haitian
self.assertIsNotNone(self.org.languages.filter(name='Haitian'))
# check that the last load shows our new languages
response = self.client.get(reverse('orgs.org_languages'))
self.assertEquals('Haitian and Official Aramaic', response.context['languages'])
self.assertContains(response, 'fre')
self.assertContains(response, 'hat,arc')
# three translation languages
self.client.post(reverse('orgs.org_languages'), dict(primary_lang='fre', languages='hat,arc,spa'))
response = self.client.get(reverse('orgs.org_languages'))
self.assertEquals('Haitian, Official Aramaic and Spanish', response.context['languages'])
# one translation language
self.client.post(reverse('orgs.org_languages'), dict(primary_lang='fre', languages='hat'))
response = self.client.get(reverse('orgs.org_languages'))
self.assertEquals('Haitian', response.context['languages'])
# remove our primary language
self.client.post(reverse('orgs.org_languages'), dict())
self.org.refresh_from_db()
self.assertIsNone(self.org.primary_language)
# search languages
response = self.client.get('%s?search=fre' % reverse('orgs.org_languages'))
results = json.loads(response.content)['results']
self.assertEquals(4, len(results))
# initial should do a match on code only
response = self.client.get('%s?initial=fre' % reverse('orgs.org_languages'))
results = json.loads(response.content)['results']
self.assertEquals(1, len(results))
def test_language_codes(self):
self.assertEquals('French', languages.get_language_name('fre'))
self.assertEquals('Creoles and pidgins, English based', languages.get_language_name('cpe'))
# should strip off anything after an open paren or semicolon
self.assertEquals('Official Aramaic', languages.get_language_name('arc'))
self.assertEquals('Haitian', languages.get_language_name('hat'))
# check that search returns results and in the proper order
matches = languages.search_language_names('Fre')
self.assertEquals(4, len(matches))
self.assertEquals('Creoles and pidgins, French-based', matches[0]['text'])
self.assertEquals('French', matches[1]['text'])
self.assertEquals('French, Middle (ca.1400-1600)', matches[2]['text'])
self.assertEquals('French, Old (842-ca.1400)', matches[3]['text'])
# try a language that doesn't exist
self.assertEquals(None, languages.get_language_name('klingon'))
def test_get_localized_text(self):
text_translations = dict(eng="Hello", esp="Hola")
# null case
self.assertEqual(Language.get_localized_text(None, None, "Hi"), "Hi")
# simple dictionary case
self.assertEqual(Language.get_localized_text(text_translations, ['eng'], "Hi"), "Hello")
# missing language case
self.assertEqual(Language.get_localized_text(text_translations, ['fre'], "Hi"), "Hi")
# secondary option
self.assertEqual(Language.get_localized_text(text_translations, ['fre', 'esp'], "Hi"), "Hola")
class BulkExportTest(TembaTest):
def test_trigger_flow(self):
self.import_file('triggered-flow')
flow = Flow.objects.filter(name='Trigger a Flow', org=self.org).first()
definition = flow.as_json()
actions = definition[Flow.ACTION_SETS][0]['actions']
self.assertEquals(1, len(actions))
self.assertEquals('Triggered Flow', actions[0]['name'])
def test_missing_flows_on_import(self):
# import a flow that starts a missing flow
self.import_file('start-missing-flow')
# the flow that kicks off our missing flow
flow = Flow.objects.get(name='Start Missing Flow')
# make sure our missing flow is indeed not there
self.assertIsNone(Flow.objects.filter(name='Missing Flow').first())
# these two actionsets only have a single action that starts the missing flow
# therefore they should not be created on import
self.assertIsNone(ActionSet.objects.filter(flow=flow, y=160, x=90).first())
self.assertIsNone(ActionSet.objects.filter(flow=flow, y=233, x=395).first())
# should have this actionset, but only one action now since one was removed
other_actionset = ActionSet.objects.filter(flow=flow, y=145, x=731).first()
self.assertEquals(1, len(other_actionset.get_actions()))
# now make sure it does the same thing from an actionset
self.import_file('start-missing-flow-from-actionset')
self.assertIsNotNone(Flow.objects.filter(name='Start Missing Flow').first())
self.assertIsNone(Flow.objects.filter(name='Missing Flow').first())
def test_import(self):
self.login(self.admin)
# try importing without having purchased credits
post_data = dict(import_file=open('%s/test_flows/new_mother.json' % settings.MEDIA_ROOT, 'rb'))
response = self.client.post(reverse('orgs.org_import'), post_data)
self.assertEquals(response.context['form'].errors['import_file'][0], 'Sorry, import is a premium feature')
# now purchase some credits and try again
TopUp.objects.create(org=self.org, price=0, credits=10000,
expires_on=timezone.now() + timedelta(days=30),
created_by=self.admin, modified_by=self.admin)
# force our cache to reload
self.org.get_credits_total(force_dirty=True)
self.assertTrue(self.org.has_added_credits())
# now try again with purchased credits, but our file is too old
post_data = dict(import_file=open('%s/test_flows/too_old.json' % settings.MEDIA_ROOT, 'rb'))
response = self.client.post(reverse('orgs.org_import'), post_data)
self.assertEquals(response.context['form'].errors['import_file'][0], 'This file is no longer valid. Please export a new version and try again.')
# simulate an unexpected exception during import
with patch('temba.triggers.models.Trigger.import_triggers') as validate:
validate.side_effect = Exception('Unexpected Error')
post_data = dict(import_file=open('%s/test_flows/new_mother.json' % settings.MEDIA_ROOT, 'rb'))
response = self.client.post(reverse('orgs.org_import'), post_data)
self.assertEquals(response.context['form'].errors['import_file'][0], 'Sorry, your import file is invalid.')
# trigger import failed, new flows that were added should get rolled back
self.assertIsNone(Flow.objects.filter(org=self.org, name='New Mother').first())
def test_export_import(self):
def assert_object_counts():
self.assertEquals(8, Flow.objects.filter(org=self.org, is_archived=False, flow_type='F').count())
self.assertEquals(2, Flow.objects.filter(org=self.org, is_archived=False, flow_type='M').count())
self.assertEquals(1, Campaign.objects.filter(org=self.org, is_archived=False).count())
self.assertEquals(4, CampaignEvent.objects.filter(campaign__org=self.org, event_type='F').count())
self.assertEquals(2, CampaignEvent.objects.filter(campaign__org=self.org, event_type='M').count())
self.assertEquals(2, Trigger.objects.filter(org=self.org, trigger_type='K', is_archived=False).count())
self.assertEquals(1, Trigger.objects.filter(org=self.org, trigger_type='C', is_archived=False).count())
self.assertEquals(1, Trigger.objects.filter(org=self.org, trigger_type='M', is_archived=False).count())
self.assertEquals(3, ContactGroup.user_groups.filter(org=self.org).count())
self.assertEquals(1, Label.label_objects.filter(org=self.org).count())
# import all our bits
self.import_file('the-clinic')
# check that the right number of objects successfully imported for our app
assert_object_counts()
# let's update some stuff
confirm_appointment = Flow.objects.get(name='Confirm Appointment')
confirm_appointment.expires_after_minutes = 60
confirm_appointment.save()
action_set = confirm_appointment.action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
actions[0]['msg']['base'] = 'Thanks for nothing'
action_set.set_actions_dict(actions)
action_set.save()
trigger = Trigger.objects.filter(keyword='patient').first()
trigger.flow = confirm_appointment
trigger.save()
message_flow = Flow.objects.filter(flow_type='M').order_by('pk').first()
action_set = message_flow.action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
self.assertEquals("Hi there, just a quick reminder that you have an appointment at The Clinic at @contact.next_appointment. If you can't make it please call 1-888-THE-CLINIC.", actions[0]['msg']['base'])
actions[0]['msg'] = 'No reminders for you!'
action_set.set_actions_dict(actions)
action_set.save()
# now reimport
self.import_file('the-clinic')
# our flow should get reset from the import
confirm_appointment = Flow.objects.get(pk=confirm_appointment.pk)
action_set = confirm_appointment.action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
self.assertEquals("Thanks, your appointment at The Clinic has been confirmed for @contact.next_appointment. See you then!", actions[0]['msg']['base'])
# same with our trigger
trigger = Trigger.objects.filter(keyword='patient').first()
self.assertEquals(Flow.objects.filter(name='Register Patient').first(), trigger.flow)
# our old campaign message flow should be gone now
self.assertIsNone(Flow.objects.filter(pk=message_flow.pk).first())
# find our new message flow, and see that the original message is there
message_flow = Flow.objects.filter(flow_type='M').order_by('pk').first()
action_set = Flow.objects.get(pk=message_flow.pk).action_sets.order_by('-y').first()
actions = action_set.get_actions_dict()
self.assertEquals("Hi there, just a quick reminder that you have an appointment at The Clinic at @contact.next_appointment. If you can't make it please call 1-888-THE-CLINIC.", actions[0]['msg']['base'])
# and we should have the same number of items as after the first import
assert_object_counts()
# see that everything shows up properly on our export page
self.login(self.admin)
response = self.client.get(reverse('orgs.org_export'))
self.assertContains(response, 'Register Patient')
self.assertContains(response, 'Catch All')
self.assertContains(response, 'Missed Call')
self.assertContains(response, 'Start Notifications')
self.assertContains(response, 'Stop Notifications')
self.assertContains(response, 'Confirm Appointment')
self.assertContains(response, 'Appointment Followup')
# our campaign
self.assertContains(response, 'Appointment Schedule')
# now let's export!
post_data = dict(flows=[f.pk for f in Flow.objects.filter(flow_type='F')],
campaigns=[c.pk for c in Campaign.objects.all()])
response = self.client.post(reverse('orgs.org_export'), post_data)
exported = json.loads(response.content)
self.assertEquals(CURRENT_EXPORT_VERSION, exported.get('version', 0))
self.assertEquals('https://app.rapidpro.io', exported.get('site', None))
self.assertEquals(8, len(exported.get('flows', [])))
self.assertEquals(4, len(exported.get('triggers', [])))
self.assertEquals(1, len(exported.get('campaigns', [])))
# finally let's try importing our exported file
self.org.import_app(exported, self.admin, site='http://app.rapidpro.io')
assert_object_counts()
# let's rename a flow and import our export again
flow = Flow.objects.get(name='Confirm Appointment')
flow.name = "A new flow"
flow.save()
campaign = Campaign.objects.all().first()
campaign.name = "A new campagin"
campaign.save()
group = ContactGroup.user_groups.filter(name='Pending Appointments').first()
group.name = "A new group"
group.save()
# it should fall back on ids and not create new objects even though the names changed
self.org.import_app(exported, self.admin, site='http://app.rapidpro.io')
assert_object_counts()
# and our objets should have the same names as before
self.assertEquals('Confirm Appointment', Flow.objects.get(pk=flow.pk).name)
self.assertEquals('Appointment Schedule', Campaign.objects.all().first().name)
self.assertEquals('Pending Appointments', ContactGroup.user_groups.get(pk=group.pk).name)
# let's rename our objects again
flow.name = "A new name"
flow.save()
campaign.name = "A new campagin"
campaign.save()
group.name = "A new group"
group.save()
# now import the same import but pretend its from a different site
self.org.import_app(exported, self.admin, site='http://temba.io')
# the newly named objects won't get updated in this case and we'll create new ones instead
self.assertEquals(9, Flow.objects.filter(org=self.org, is_archived=False, flow_type='F').count())
self.assertEquals(2, Campaign.objects.filter(org=self.org, is_archived=False).count())
self.assertEquals(4, ContactGroup.user_groups.filter(org=self.org).count())
# now archive a flow
register = Flow.objects.filter(name='Register Patient').first()
register.is_archived = True
register.save()
# default view shouldn't show archived flows
response = self.client.get(reverse('orgs.org_export'))
self.assertNotContains(response, 'Register Patient')
# with the archived flag one, it should be there
response = self.client.get("%s?archived=1" % reverse('orgs.org_export'))
self.assertContains(response, 'Register Patient')
# delete our flow, and reimport
confirm_appointment.delete()
self.org.import_app(exported, self.admin, site='https://app.rapidpro.io')
# make sure we have the previously exported expiration
confirm_appointment = Flow.objects.get(name='Confirm Appointment')
self.assertEquals(60, confirm_appointment.expires_after_minutes)
class CreditAlertTest(TembaTest):
def test_check_org_credits(self):
self.joe = self.create_contact("Joe Blow", "123")
self.create_msg(contact=self.joe)
with self.settings(HOSTNAME="rapidpro.io", SEND_EMAILS=True):
with patch('temba.orgs.models.Org.get_credits_remaining') as mock_get_credits_remaining:
mock_get_credits_remaining.return_value = -1
# no alert yet
self.assertFalse(CreditAlert.objects.all())
CreditAlert.check_org_credits()
# one alert created and sent
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_OVER).count())
self.assertEquals(1, len(mail.outbox))
# alert email is for out of credits type
sent_email = mail.outbox[0]
self.assertEqual(len(sent_email.to), 1)
self.assertTrue('RapidPro account for Temba' in sent_email.body)
self.assertTrue('is out of credit.' in sent_email.body)
# no new alert if one is sent and no new email
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_OVER).count())
self.assertEquals(1, len(mail.outbox))
# reset alerts
CreditAlert.reset_for_org(self.org)
self.assertFalse(CreditAlert.objects.filter(org=self.org, is_active=True))
# can resend a new alert
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_OVER).count())
self.assertEquals(2, len(mail.outbox))
mock_get_credits_remaining.return_value = 10
with patch('temba.orgs.models.Org.has_low_credits') as mock_has_low_credits:
mock_has_low_credits.return_value = True
self.assertFalse(CreditAlert.objects.filter(org=self.org, alert_type=ORG_CREDIT_LOW))
CreditAlert.check_org_credits()
# low credit alert created and email sent
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_LOW).count())
self.assertEquals(3, len(mail.outbox))
# email sent
sent_email = mail.outbox[2]
self.assertEqual(len(sent_email.to), 1)
self.assertTrue('RapidPro account for Temba' in sent_email.body)
self.assertTrue('is running low on credits' in sent_email.body)
# no new alert if one is sent and no new email
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_LOW).count())
self.assertEquals(3, len(mail.outbox))
# reset alerts
CreditAlert.reset_for_org(self.org)
self.assertFalse(CreditAlert.objects.filter(org=self.org, is_active=True))
# can resend a new alert
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_LOW).count())
self.assertEquals(4, len(mail.outbox))
mock_has_low_credits.return_value = False
with patch('temba.orgs.models.Org.get_credits_expiring_soon') as mock_get_credits_exipiring_soon:
mock_get_credits_exipiring_soon.return_value = 0
self.assertFalse(CreditAlert.objects.filter(org=self.org, alert_type=ORG_CREDIT_EXPIRING))
CreditAlert.check_org_credits()
# no alert since no expiring credits
self.assertFalse(CreditAlert.objects.filter(org=self.org, alert_type=ORG_CREDIT_EXPIRING))
mock_get_credits_exipiring_soon.return_value = 200
CreditAlert.check_org_credits()
# expiring credit alert created and email sent
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_EXPIRING).count())
self.assertEquals(5, len(mail.outbox))
# email sent
sent_email = mail.outbox[4]
self.assertEqual(len(sent_email.to), 1)
self.assertTrue('RapidPro account for Temba' in sent_email.body)
self.assertTrue('expiring credits in less than one month.' in sent_email.body)
# no new alert if one is sent and no new email
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_EXPIRING).count())
self.assertEquals(5, len(mail.outbox))
# reset alerts
CreditAlert.reset_for_org(self.org)
self.assertFalse(CreditAlert.objects.filter(org=self.org, is_active=True))
# can resend a new alert
CreditAlert.check_org_credits()
self.assertEquals(1, CreditAlert.objects.filter(is_active=True, org=self.org,
alert_type=ORG_CREDIT_EXPIRING).count())
self.assertEquals(6, len(mail.outbox))
class UnreadCountTest(FlowFileTest):
def test_unread_count_test(self):
flow = self.get_flow('favorites')
# create a trigger for 'favs'
Trigger.objects.create(org=self.org, flow=flow, keyword='favs', created_by=self.admin, modified_by=self.admin)
# start our flow by firing an incoming message
contact = self.create_contact('Anakin Skywalker', '+12067791212')
msg = self.create_msg(contact=contact, text="favs")
# process it
Msg.process_message(msg)
# our flow unread count should have gone up
self.assertEquals(1, flow.get_and_clear_unread_responses())
# cleared by the first call
self.assertEquals(0, flow.get_and_clear_unread_responses())
# at this point our flow should have started.. go to our trigger list page to see if our context is correct
self.login(self.admin)
trigger_list = reverse('triggers.trigger_list')
response = self.client.get(trigger_list)
self.assertEquals(0, response.context['msgs_unread_count'])
self.assertEquals(1, response.context['flows_unread_count'])
# answer another question in the flow
msg = self.create_msg(contact=contact, text="red")
Msg.process_message(msg)
response = self.client.get(trigger_list)
self.assertEquals(0, response.context['msgs_unread_count'])
self.assertEquals(2, response.context['flows_unread_count'])
# finish the flow and send a message outside it
msg = self.create_msg(contact=contact, text="primus")
Msg.process_message(msg)
msg = self.create_msg(contact=contact, text="nic")
Msg.process_message(msg)
msg = self.create_msg(contact=contact, text="Hello?")
Msg.process_message(msg)
response = self.client.get(trigger_list)
self.assertEquals(4, response.context['flows_unread_count'])
self.assertEquals(1, response.context['msgs_unread_count'])
# visit the msg pane
response = self.client.get(reverse('msgs.msg_inbox'))
self.assertEquals(4, response.context['flows_unread_count'])
self.assertEquals(0, response.context['msgs_unread_count'])
# now the flow list pane
response = self.client.get(reverse('flows.flow_list'))
self.assertEquals(0, response.context['flows_unread_count'])
self.assertEquals(0, response.context['msgs_unread_count'])
# make sure a test contact doesn't update our counts
test_contact = self.create_contact("Test Contact", "+12065551214", is_test=True)
msg = self.create_msg(contact=test_contact, text="favs")
Msg.process_message(msg)
# assert our counts weren't updated
self.assertEquals(0, self.org.get_unread_msg_count(UNREAD_INBOX_MSGS))
self.assertEquals(0, self.org.get_unread_msg_count(UNREAD_FLOW_MSGS))
# wasn't counted for the individual flow
self.assertEquals(0, flow.get_and_clear_unread_responses())
class EmailContextProcessorsTest(SmartminTest):
def setUp(self):
super(EmailContextProcessorsTest, self).setUp()
self.admin = self.create_user("Administrator")
self.middleware = BrandingMiddleware()
def test_link_components(self):
self.request = Mock(spec=HttpRequest)
self.request.get_host.return_value = "rapidpro.io"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
self.assertEquals(link_components(self.request, self.admin), dict(protocol="https", hostname="app.rapidpro.io"))
with self.settings(HOSTNAME="rapidpro.io"):
forget_url = reverse('users.user_forget')
post_data = dict()
post_data['email'] = 'nouser@nouser.com'
response = self.client.post(forget_url, post_data, follow=True)
self.assertEquals(1, len(mail.outbox))
sent_email = mail.outbox[0]
self.assertEqual(len(sent_email.to), 1)
self.assertEqual(sent_email.to[0], 'nouser@nouser.com')
# we have the domain of rapipro.io brand
self.assertTrue('app.rapidpro.io' in sent_email.body)
class TestStripeCredits(TembaTest):
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
@override_settings(SEND_EMAILS=True)
def test_add_credits(self, charge_create, customer_create):
customer_create.return_value = dict_to_struct('Customer', dict(id='stripe-cust-1'))
charge_create.return_value = \
dict_to_struct('Charge', dict(id='stripe-charge-1',
card=dict_to_struct('Card', dict(last4='1234', type='Visa', name='Rudolph'))))
self.org.add_credits('2000', 'stripe-token', self.admin)
self.assertTrue(2000, self.org.get_credits_total())
# assert we saved our charge info
topup = self.org.topups.last()
self.assertEqual('stripe-charge-1', topup.stripe_charge)
# and we saved our stripe customer info
org = Org.objects.get(id=self.org.id)
self.assertEqual('stripe-cust-1', org.stripe_customer)
# assert we sent our confirmation emai
self.assertEqual(1, len(mail.outbox))
email = mail.outbox[0]
self.assertEquals("RapidPro Receipt", email.subject)
self.assertTrue('Rudolph' in email.body)
self.assertTrue('Visa' in email.body)
self.assertTrue('$20' in email.body)
@patch('stripe.Customer.create')
def test_add_credits_fail(self, customer_create):
customer_create.side_effect = ValueError("Invalid customer token")
with self.assertRaises(ValidationError):
self.org.add_credits('2000', 'stripe-token', self.admin)
# assert no email was sent
self.assertEqual(0, len(mail.outbox))
# and no topups created
self.assertEqual(1, self.org.topups.all().count())
self.assertEqual(1000, self.org.get_credits_total())
def test_add_credits_invalid_bundle(self):
with self.assertRaises(ValidationError):
self.org.add_credits('-10', 'stripe-token', self.admin)
# assert no email was sent
self.assertEqual(0, len(mail.outbox))
# and no topups created
self.assertEqual(1, self.org.topups.all().count())
self.assertEqual(1000, self.org.get_credits_total())
@patch('stripe.Customer.retrieve')
@patch('stripe.Charge.create')
@override_settings(SEND_EMAILS=True)
def test_add_credits_existing_customer(self, charge_create, customer_retrieve):
self.org.stripe_customer = 'stripe-cust-1'
self.org.save()
class MockCard(object):
def __init__(self):
self.id = 'stripe-card-1'
def delete(self):
pass
class MockCards(object):
def all(self):
return dict_to_struct('MockCardData', dict(data=[MockCard(), MockCard()]))
def create(self, card):
return MockCard()
class MockCustomer(object):
def __init__(self):
self.id = 'stripe-cust-1'
self.cards = MockCards()
def save(self):
pass
customer_retrieve.return_value = MockCustomer()
charge_create.return_value = \
dict_to_struct('Charge', dict(id='stripe-charge-1',
card=dict_to_struct('Card', dict(last4='1234', type='Visa', name='Rudolph'))))
self.org.add_credits('2000', 'stripe-token', self.admin)
self.assertTrue(2000, self.org.get_credits_total())
# assert we saved our charge info
topup = self.org.topups.last()
self.assertEqual('stripe-charge-1', topup.stripe_charge)
# and we saved our stripe customer info
org = Org.objects.get(id=self.org.id)
self.assertEqual('stripe-cust-1', org.stripe_customer)
# assert we sent our confirmation emai
self.assertEqual(1, len(mail.outbox))
email = mail.outbox[0]
self.assertEquals("RapidPro Receipt", email.subject)
self.assertTrue('Rudolph' in email.body)
self.assertTrue('Visa' in email.body)
self.assertTrue('$20' in email.body) | reyrodrigues/EU-SMS | temba/orgs/tests.py | Python | agpl-3.0 | 92,683 | [
"VisIt"
] | 562731e80496901e86621b6179b465fc77ca445fae6f748a2a7e9cbbad89201d |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
from pyscf import dft
from pyscf import lib
dft.numint.SWITCH_SIZE = 0
mol = gto.Mole()
mol.verbose = 0
mol.atom = [('h', (0,0,i*3)) for i in range(12)]
mol.basis = 'ccpvtz'
mol.build()
mf = dft.RKS(mol)
mf.grids.atom_grid = {"H": (50, 110)}
mf.prune = None
mf.grids.build(with_non0tab=False)
nao = mol.nao_nr()
ao_loc = mol.ao_loc_nr()
h4 = gto.Mole()
h4.verbose = 0
h4.atom = 'H 0 0 0; H 0 0 9; H 0 9 0; H 0 9 9'
h4.basis = 'ccpvtz'
h4.build()
mf_h4 = dft.RKS(h4)
mf_h4.grids.atom_grid = {"H": (50, 110)}
mf_h4.grids.build(with_non0tab=True)
mol1 = gto.Mole()
mol1.verbose = 0
mol1.atom = [('h', (0,0,i*3)) for i in range(4)]
mol1.basis = 'ccpvtz'
mol1.build()
h2o = gto.Mole()
h2o.verbose = 5
h2o.output = '/dev/null'
h2o.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
h2o.basis = {"H": '6-31g', "O": '6-31g',}
h2o.build()
def tearDownModule():
dft.numint.SWITCH_SIZE = 800
global mol, mf, h4, mf_h4, mol1, h2o
h2o.stdout.close()
del mol, mf, h4, mf_h4, mol1, h2o
def finger(a):
return numpy.dot(numpy.cos(numpy.arange(a.size)), a.ravel())
class KnownValues(unittest.TestCase):
def test_make_mask(self):
non0 = dft.numint.make_mask(mol, mf.grids.coords)
self.assertEqual(non0.sum(), 10244)
self.assertAlmostEqual(finger(non0), -2.6880474684794895, 9)
self.assertAlmostEqual(finger(numpy.cos(non0)), 2.5961863522983433, 9)
def test_dot_ao_dm(self):
dm = mf_h4.get_init_guess(key='minao')
ao_loc = h4.ao_loc_nr()
ao = mf_h4._numint.eval_ao(h4, mf_h4.grids.coords).copy() + 0j
nao = ao.shape[1]
v1 = dft.numint._dot_ao_dm(h4, ao, dm, mf_h4.grids.non0tab, (0,h4.nbas), ao_loc)
v2 = dft.numint._dot_ao_dm(h4, ao, dm, None, None, None)
self.assertAlmostEqual(abs(v1-v2).max(), 0, 9)
def test_dot_ao_dm_high_cost(self):
non0tab = mf._numint.make_mask(mol, mf.grids.coords)
ao = dft.numint.eval_ao(mol, mf.grids.coords)
numpy.random.seed(1)
nao = ao.shape[1]
ao_loc = mol.ao_loc_nr()
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
res0 = lib.dot(ao, dm)
res1 = dft.numint._dot_ao_dm(mol, ao, dm, non0tab,
shls_slice=(0,mol.nbas), ao_loc=ao_loc)
self.assertTrue(numpy.allclose(res0, res1))
def test_dot_ao_ao(self):
dm = mf_h4.get_init_guess(key='minao')
ao_loc = h4.ao_loc_nr()
ao = mf_h4._numint.eval_ao(h4, mf_h4.grids.coords).copy() + 0j
nao = h4.nao_nr()
v1 = dft.numint._dot_ao_ao(h4, ao, ao, mf_h4.grids.non0tab, (0,h4.nbas), ao_loc)
v2 = dft.numint._dot_ao_ao(h4, ao, ao, None, None, None)
self.assertAlmostEqual(abs(v1-v2).max(), 0, 9)
def test_dot_ao_ao_high_cost(self):
non0tab = mf.grids.make_mask(mol, mf.grids.coords)
ao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=1)
nao = ao.shape[1]
ao_loc = mol.ao_loc_nr()
res0 = lib.dot(ao[0].T, ao[1])
res1 = dft.numint._dot_ao_ao(mol, ao[0], ao[1], non0tab,
shls_slice=(0,mol.nbas), ao_loc=ao_loc)
self.assertTrue(numpy.allclose(res0, res1))
def test_eval_rho(self):
numpy.random.seed(10)
ngrids = 500
coords = numpy.random.random((ngrids,3))*20
dm = numpy.random.random((nao,nao))
dm = dm + dm.T
ao = dft.numint.eval_ao(mol, coords, deriv=2)
e, mo_coeff = numpy.linalg.eigh(dm)
mo_occ = numpy.ones(nao)
mo_occ[-2:] = -1
dm = numpy.einsum('pi,i,qi->pq', mo_coeff, mo_occ, mo_coeff)
rho0 = numpy.zeros((6,ngrids))
rho0[0] = numpy.einsum('pi,ij,pj->p', ao[0], dm, ao[0].conj())
rho0[1] = numpy.einsum('pi,ij,pj->p', ao[1], dm, ao[0].conj()) + numpy.einsum('pi,ij,pj->p', ao[0], dm, ao[1].conj())
rho0[2] = numpy.einsum('pi,ij,pj->p', ao[2], dm, ao[0].conj()) + numpy.einsum('pi,ij,pj->p', ao[0], dm, ao[2].conj())
rho0[3] = numpy.einsum('pi,ij,pj->p', ao[3], dm, ao[0].conj()) + numpy.einsum('pi,ij,pj->p', ao[0], dm, ao[3].conj())
rho0[4]+= numpy.einsum('pi,ij,pj->p', ao[0], dm, ao[4].conj()) + numpy.einsum('pi,ij,pj->p', ao[4], dm, ao[0].conj())
rho0[4]+= numpy.einsum('pi,ij,pj->p', ao[0], dm, ao[7].conj()) + numpy.einsum('pi,ij,pj->p', ao[7], dm, ao[0].conj())
rho0[4]+= numpy.einsum('pi,ij,pj->p', ao[0], dm, ao[9].conj()) + numpy.einsum('pi,ij,pj->p', ao[9], dm, ao[0].conj())
rho0[5]+= numpy.einsum('pi,ij,pj->p', ao[1], dm, ao[1].conj())
rho0[5]+= numpy.einsum('pi,ij,pj->p', ao[2], dm, ao[2].conj())
rho0[5]+= numpy.einsum('pi,ij,pj->p', ao[3], dm, ao[3].conj())
rho0[4]+= rho0[5]*2
rho0[5] *= .5
ni = dft.numint.NumInt()
rho1 = ni.eval_rho (mol, ao, dm, xctype='MGGA')
rho2 = ni.eval_rho2(mol, ao, mo_coeff, mo_occ, xctype='MGGA')
self.assertTrue(numpy.allclose(rho0, rho1))
self.assertTrue(numpy.allclose(rho0, rho2))
def test_eval_mat(self):
numpy.random.seed(10)
ngrids = 500
coords = numpy.random.random((ngrids,3))*20
rho = numpy.random.random((6,ngrids))
vxc = numpy.random.random((4,ngrids))
weight = numpy.random.random(ngrids)
ao = dft.numint.eval_ao(mol, coords, deriv=2)
mat0 = numpy.einsum('pi,p,pj->ij', ao[0].conj(), weight*vxc[0], ao[0])
mat1 = dft.numint.eval_mat(mol, ao[0], weight, rho, vxc[0], xctype='LDA')
self.assertTrue(numpy.allclose(mat0, mat1))
# UKS
mat2 = dft.numint.eval_mat(mol, ao[0], weight, rho, [vxc[0]]*2, xctype='LDA', spin=1)
self.assertTrue(numpy.allclose(mat0, mat2))
vrho, vsigma = vxc[:2]
wv = weight * vsigma * 2
mat0 = numpy.einsum('pi,p,pj->ij', ao[0].conj(), weight*vrho, ao[0])
mat0 += numpy.einsum('pi,p,pj->ij', ao[0].conj(), rho[1]*wv, ao[1]) + numpy.einsum('pi,p,pj->ij', ao[1].conj(), rho[1]*wv, ao[0])
mat0 += numpy.einsum('pi,p,pj->ij', ao[0].conj(), rho[2]*wv, ao[2]) + numpy.einsum('pi,p,pj->ij', ao[2].conj(), rho[2]*wv, ao[0])
mat0 += numpy.einsum('pi,p,pj->ij', ao[0].conj(), rho[3]*wv, ao[3]) + numpy.einsum('pi,p,pj->ij', ao[3].conj(), rho[3]*wv, ao[0])
mat1 = dft.numint.eval_mat(mol, ao, weight, rho, vxc[:4], xctype='GGA')
self.assertTrue(numpy.allclose(mat0, mat1))
# UKS
ngrids = weight.size
vxc_1 = [vxc[0], numpy.vstack((vxc[1], numpy.zeros(ngrids))).T]
mat2 = dft.numint.eval_mat(mol, ao, weight, [rho[:4]]*2, vxc_1, xctype='GGA', spin=1)
self.assertTrue(numpy.allclose(mat0, mat2))
vrho, vsigma, _, vtau = vxc
vxc = (vrho, vsigma, None, vtau)
wv = weight * vtau * .5
mat2 = numpy.einsum('pi,p,pj->ij', ao[1].conj(), wv, ao[1])
mat2 += numpy.einsum('pi,p,pj->ij', ao[2].conj(), wv, ao[2])
mat2 += numpy.einsum('pi,p,pj->ij', ao[3].conj(), wv, ao[3])
mat0 += mat2
mat1 = dft.numint.eval_mat(mol, ao, weight, rho, vxc, xctype='MGGA')
self.assertTrue(numpy.allclose(mat0, mat1))
# UKS
ngrids = weight.size
vxc_1 = [vxc[0],
numpy.vstack((vxc[1], numpy.zeros(ngrids))).T,
numpy.zeros((ngrids,2)),
numpy.vstack((vxc[3], numpy.zeros(ngrids))).T]
mat2 = dft.numint.eval_mat(mol, ao, weight, [rho]*2, vxc_1, xctype='MGGA', spin=1)
self.assertTrue(numpy.allclose(mat0, mat2))
def test_rks_vxc(self):
numpy.random.seed(10)
nao = mol.nao_nr()
dms = numpy.random.random((2,nao,nao))
v = mf._numint.nr_vxc(mol, mf.grids, 'B88,', dms, spin=0, hermi=0)[2]
self.assertAlmostEqual(finger(v), -0.70124686853021512, 8)
v = mf._numint.nr_vxc(mol, mf.grids, 'HF', dms, spin=0, hermi=0)[2]
self.assertAlmostEqual(abs(v).max(), 0, 9)
v = mf._numint.nr_vxc(mol, mf.grids, '', dms, spin=0, hermi=0)[2]
self.assertAlmostEqual(abs(v).max(), 0, 9)
def test_uks_vxc(self):
numpy.random.seed(10)
nao = h2o.nao_nr()
dms = numpy.random.random((2,nao,nao))
grids = dft.gen_grid.Grids(h2o)
v = mf._numint.nr_vxc(h2o, grids, 'B88,', dms, spin=1)[2]
self.assertAlmostEqual(finger(v), -7.7508525240447348, 8)
v = mf._numint.nr_vxc(h2o, grids, 'HF', dms, spin=1)[2]
self.assertAlmostEqual(abs(v).max(), 0, 9)
v = mf._numint.nr_vxc(h2o, grids, '', dms, spin=1)[2]
self.assertAlmostEqual(abs(v).max(), 0, 9)
def test_uks_vxc_high_cost(self):
numpy.random.seed(10)
nao = mol.nao_nr()
dms = numpy.random.random((2,nao,nao))
v = mf._numint.nr_vxc(mol, mf.grids, 'B88,', dms, spin=1)[2]
self.assertAlmostEqual(finger(v), -0.73803886056633594, 8)
v = mf._numint.nr_vxc(mol, mf.grids, 'HF', dms, spin=1)[2]
self.assertAlmostEqual(abs(v).max(), 0, 9)
v = mf._numint.nr_vxc(mol, mf.grids, '', dms, spin=1)[2]
self.assertAlmostEqual(abs(v).max(), 0, 9)
def test_rks_fxc(self):
numpy.random.seed(10)
nao = mol1.nao_nr()
dm0 = numpy.random.random((nao,nao))
_, mo_coeff = numpy.linalg.eigh(dm0)
mo_occ = numpy.ones(nao)
mo_occ[-2:] = -1
dm0 = numpy.einsum('pi,i,qi->pq', mo_coeff, mo_occ, mo_coeff)
dms = numpy.random.random((2,nao,nao))
ni = dft.numint.NumInt()
v = ni.nr_fxc(mol1, mf.grids, 'B88,', dm0, dms, spin=0, hermi=0)
self.assertAlmostEqual(finger(v), -7.5671368618070343, 8)
# test cache_kernel
rvf = ni.cache_xc_kernel(mol1, mf.grids, 'B88,', mo_coeff, mo_occ, spin=0)
v1 = dft.numint.nr_fxc(mol1, mf.grids, 'B88,', dm0, dms, spin=0, hermi=0,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
v = ni.nr_fxc(mol1, mf.grids, 'LDA,', dm0, dms[0], spin=0, hermi=0)
self.assertAlmostEqual(finger(v), -3.0019207112626876, 8)
# test cache_kernel
rvf = ni.cache_xc_kernel(mol1, mf.grids, 'LDA,', mo_coeff, mo_occ, spin=0)
v1 = dft.numint.nr_fxc(mol1, mf.grids, 'LDA,', dm0, dms[0], spin=0, hermi=0,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
v = ni.nr_fxc(mol1, mf.grids, 'HF', dm0, dms, spin=0, hermi=0)
self.assertAlmostEqual(abs(v).max(), 0, 9)
v = ni.nr_fxc(mol1, mf.grids, '', dm0, dms, spin=0, hermi=0)
self.assertAlmostEqual(abs(v).max(), 0, 9)
def test_rks_fxc_st(self):
numpy.random.seed(10)
nao = mol1.nao_nr()
dm0 = numpy.random.random((nao,nao))
_, mo_coeff = numpy.linalg.eigh(dm0)
mo_occ = numpy.ones(nao)
mo_occ[-2:] = -1
dm0 = numpy.einsum('pi,i,qi->pq', mo_coeff, mo_occ, mo_coeff)
dms = numpy.random.random((2,nao,nao))
ni = dft.numint.NumInt()
rvf = ni.cache_xc_kernel(mol1, mf.grids, 'B88,', [mo_coeff,mo_coeff],
[mo_occ*.5]*2, spin=1)
v = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'B88,', dm0, dms, singlet=True)
self.assertAlmostEqual(finger(v), -7.5671368618070343*2, 8)
v1 = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'B88,', dm0, dms, singlet=True,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
v = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'B88,', dm0, dms, singlet=False)
self.assertAlmostEqual(finger(v), -7.5671368618070343*2, 8)
v1 = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'B88,', dm0, dms, singlet=False,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
rvf = ni.cache_xc_kernel(mol1, mf.grids, 'LDA,', [mo_coeff,mo_coeff],
[mo_occ*.5]*2, spin=1)
v = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'LDA,', dm0, dms[0], singlet=True)
self.assertAlmostEqual(finger(v), -3.0019207112626876*2, 8)
v1 = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'LDA,', dm0, dms[0], singlet=True,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
v = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'LDA,', dm0, dms[0], singlet=False)
self.assertAlmostEqual(finger(v), -3.0019207112626876*2, 8)
v1 = dft.numint.nr_rks_fxc_st(ni, mol1, mf.grids, 'LDA,', dm0, dms[0], singlet=False,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
def test_uks_fxc(self):
numpy.random.seed(10)
nao = mol1.nao_nr()
dm0 = numpy.random.random((2,nao,nao))
e, mo_coeff = numpy.linalg.eigh(dm0)
mo_occ = numpy.ones((2,nao))
mo_occ[:,-2:] = -1
dm0 = numpy.einsum('xpi,xi,xqi->xpq', mo_coeff, mo_occ, mo_coeff)
dms = numpy.random.random((2,nao,nao))
ni = dft.numint.NumInt()
v = ni.nr_fxc(mol1, mf.grids, 'B88,', dm0, dms, spin=1)
self.assertAlmostEqual(finger(v), -10.316443204083185, 8)
# test cache_kernel
rvf = ni.cache_xc_kernel(mol1, mf.grids, 'B88,', mo_coeff, mo_occ, spin=1)
v1 = dft.numint.nr_fxc(mol1, mf.grids, 'B88,', dm0, dms, hermi=0, spin=1,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
v = ni.nr_fxc(mol1, mf.grids, 'LDA,', dm0, dms[0], spin=1)
self.assertAlmostEqual(finger(v), -5.6474405864697967, 8)
# test cache_kernel
rvf = ni.cache_xc_kernel(mol1, mf.grids, 'LDA,', mo_coeff, mo_occ, spin=1)
v1 = dft.numint.nr_fxc(mol1, mf.grids, 'LDA,', dm0, dms[0], hermi=0, spin=1,
rho0=rvf[0], vxc=rvf[1], fxc=rvf[2])
self.assertAlmostEqual(abs(v-v1).max(), 0, 8)
def test_vv10nlc(self):
numpy.random.seed(10)
rho = numpy.random.random((4,20))
coords = (numpy.random.random((20,3))-.5)*3
vvrho = numpy.random.random((4,60))
vvweight = numpy.random.random(60)
vvcoords = (numpy.random.random((60,3))-.5)*3
nlc_pars = .8, .3
v = dft.numint._vv10nlc(rho, coords, vvrho, vvweight, vvcoords, nlc_pars)
self.assertAlmostEqual(finger(v[0]), 0.15894647203764295, 9)
self.assertAlmostEqual(finger(v[1]), 0.20500922537924576, 9)
def test_nr_uks_vxc_vv10(self):
method = dft.UKS(h2o)
dm = method.get_init_guess()
dm = (dm[0], dm[0])
grids = dft.gen_grid.Grids(h2o)
grids.atom_grid = {'H': (20, 50), 'O': (20,50)}
v = dft.numint.nr_vxc(h2o, grids, 'wB97M_V__vv10', dm, spin=1, hermi=0)[2]
self.assertAlmostEqual(finger(v), 0.02293399033256055, 8)
def test_uks_gga_wv1(self):
numpy.random.seed(1)
rho0 = [numpy.random.random((4,5))]*2
rho1 = [numpy.random.random((4,5))]*2
weight = numpy.ones(5)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('b88,', rho0, 1, 0, 3)
wva, wvb = dft.numint._uks_gga_wv1(rho0, rho1, vxc, fxc, weight)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('b88,', rho0[0]+rho0[1], 0, 0, 3)
wv = dft.numint._rks_gga_wv1(rho0[0]+rho0[1], rho1[0]+rho1[1], vxc, fxc, weight)
self.assertAlmostEqual(abs(1 - wv/wva).max(), 0, 12)
self.assertAlmostEqual(abs(1 - wv/wvb).max(), 0, 12)
def test_uks_gga_wv2(self):
numpy.random.seed(1)
rho0 = [numpy.random.random((4,5))]*2
rho1 = [numpy.random.random((4,5))]*2
weight = numpy.ones(5)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('b88,', rho0, 1, 0, 3)
wva, wvb = dft.numint._uks_gga_wv2(rho0, rho1, fxc, kxc, weight)
exc, vxc, fxc, kxc = dft.libxc.eval_xc('b88,', rho0[0]+rho0[1], 0, 0, 3)
wv = dft.numint._rks_gga_wv2(rho0[0]+rho0[1], rho1[0]+rho1[1], fxc, kxc, weight)
self.assertAlmostEqual(abs(1 - wv/wva).max(), 0, 12)
self.assertAlmostEqual(abs(1 - wv/wvb).max(), 0, 12)
def test_complex_dm(self):
mf = dft.RKS(h2o)
mf.xc = 'b3lyp'
nao = h2o.nao
numpy.random.seed(1)
dm = (numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
dm = dm + dm.conj().T
v = mf.get_veff(h2o, dm)
self.assertAlmostEqual(finger(v), 30.543789621782576-0.23207622637751305j, 9)
def test_rsh_omega(self):
rho0 = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)
ni = dft.numint.NumInt()
ni.omega = 0.4
omega = 0.2
exc, vxc, fxc, kxc = ni.eval_xc('ITYH,', rho0, 0, 0, 1, omega)
self.assertAlmostEqual(float(exc), -0.6359945579326314, 7)
self.assertAlmostEqual(float(vxc[0]), -0.8712041561251518, 7)
self.assertAlmostEqual(float(vxc[1]), -0.003911167644579979, 7)
exc, vxc, fxc, kxc = ni.eval_xc('ITYH,', rho0, 0, 0, 1)
self.assertAlmostEqual(float(exc), -0.5406095865415561, 7)
self.assertAlmostEqual(float(vxc[0]), -0.772123720263471, 7)
self.assertAlmostEqual(float(vxc[1]), -0.00301639097170439, 7)
if __name__ == "__main__":
print("Test numint")
unittest.main()
| gkc1000/pyscf | pyscf/dft/test/test_numint.py | Python | apache-2.0 | 18,203 | [
"PySCF"
] | a7cf737d1896be545bbbe2d41c8882479d187d499bd142c64017291ef74a87b4 |
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "particles, Sun"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.particle_systems import *
from cocos.particle import *
class L(Layer):
is_event_handler = True
def __init__(self):
super( L, self).__init__()
# p = Fireworks()
# p = Explosion()
# p = Fire()
# p = Flower()
p = Sun()
# p = Spiral()
# p = Meteor()
# p = Galaxy()
p.position = (320,240)
self.add( p )
p.position_type = ParticleSystem.POSITION_FREE
self.sun = p
def on_mouse_drag( self, x, y, dx, dy, buttons, modifiers ):
(x,y) = director.get_virtual_coordinates(x,y)
x,y = self.sun.position
self.sun.position = (x+dx, y+dy)
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( L() )
director.run( main_scene )
if __name__ == '__main__':
main()
| eevee/cocos2d-mirror | test/test_particle_sun.py | Python | bsd-3-clause | 1,236 | [
"Galaxy"
] | dbffafc250b2c6a5936897caa01de8cc6b9d1d806ad844081df78cb9a9f5f59f |
from setuptools import setup
from ectyper import __version__
setup(
name='ectyper',
version=__version__,
description='Escherichia coli fast serotyping using both raw reads and assemblies with automatic species identification',
url='https://github.com/phac-nml/ecoli_serotyping',
author='Chad Laing, Kyrylo Bessonov, Sam Sung, Camille La Rose, ',
author_email='chad.laing@canada.ca, kyrylo.bessonov@canada.ca, sam.sung@canada.ca, claro100@uottawa.ca',
license='Apache 2',
scripts=['bin/ectyper'],
packages=['ectyper'],
install_requires=['requests','biopython','pandas'],
package_data={'ectyper': ['Data/*.json', 'Data/*.py']},
zip_safe=False,
test_suite='py.test'
)
| phac-nml/ecoli_serotyping | setup.py | Python | apache-2.0 | 720 | [
"Biopython"
] | ecac359a0a94c99129361494cb9a732e8ed25f4b64559e8aafe5f622683b08d6 |
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2007, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
# Created By : Wouter D'Haeseleer
# Created On : 05-11-2007
# Company : Imas NV
#
###########################################################################
import re
from Products.DataCollector.plugins.CollectorPlugin import CommandPlugin
class VmwareEsxDf(CommandPlugin):
"""
Run vdf to model filesystem information. Should work on all ESX servers.
"""
maptype = "FilesystemMap"
command = '/usr/sbin/vdf'
compname = "os"
relname = "filesystems"
modname = "Products.ZenModel.FileSystem"
def process(self, device, results, log):
log.info('Collecting filesystems for device %s' % device.id)
skipfsnames = getattr(device, 'zFileSystemMapIgnoreNames', None)
rm = self.relMap()
rlines = results.split("\n")
bline = ""
for line in rlines:
if line.startswith("Filesystem"): continue
om = self.objectMap()
spline = line.split()
if len(spline) == 1:
bline = spline[0]
continue
if bline:
spline.insert(0,bline)
bline = None
if len(spline) != 6: continue
(om.storageDevice, tblocks, u, a, p, om.mount) = spline
if skipfsnames and re.search(skipfsnames,om.mount): continue
om.totalBlocks = long(tblocks)
om.blockSize = 1024
om.id = self.prepId(om.mount)
rm.append(om)
return rm
| zenoss/ZenPacks.community.VMwareEsx | ZenPacks/community/VMwareEsx/modeler/plugins/VmwareEsxDf.py | Python | gpl-2.0 | 1,927 | [
"VisIt"
] | b1b4e64bae2625ecab9ec021e19f0bc40a8619ece6dffbbb57092fb0587888fe |
""" :mod: SRM2Storage
=================
.. module: python
:synopsis: SRM v2 interface to StorageElement
"""
# # imports
import os
import re
import time
import errno
from types import StringType, StringTypes, ListType, IntType
from stat import S_ISREG, S_ISDIR, S_IMODE, ST_MODE, ST_SIZE
# # from DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Resources.Utilities.Utils import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.File import getSize
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
# # RCSID
__RCSID__ = "$Id$"
class SRM2Storage( StorageBase ):
""" .. class:: SRM2Storage
SRM v2 interafce to StorageElement using lcg_util and gfal
"""
def __init__( self, storageName, protocol, path, host, port, spaceToken, wspath ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param str protocol: protocol to use
:param str path: base path for vo files
:param str host: SE host
:param int port: port to use to communicate with :host:
:param str spaceToken: space token
:param str wspath: location of SRM on :host:
"""
self.log = gLogger.getSubLogger( "SRM2Storage", True )
self.isok = True
# # placeholder for gfal reference
self.gfal = None
# # placeholder for lcg_util reference
self.lcg_util = None
# # save c'tor params
self.protocolName = 'SRM2'
self.name = storageName
self.protocol = protocol
self.path = path
self.host = host
self.port = port
self.wspath = wspath
self.spaceToken = spaceToken
self.cwd = self.path
# # init base class
StorageBase.__init__( self, self.name, self.path )
# # stage limit - 12h
self.stageTimeout = gConfig.getValue( '/Resources/StorageElements/StageTimeout', 12 * 60 * 60 )
# # 1 file timeout
self.fileTimeout = gConfig.getValue( '/Resources/StorageElements/FileTimeout', 30 )
# # nb of surls per gfal call
self.filesPerCall = gConfig.getValue( '/Resources/StorageElements/FilesPerCall', 20 )
# # gfal timeout
self.gfalTimeout = gConfig.getValue( "/Resources/StorageElements/GFAL_Timeout", 100 )
# # gfal long timeout
self.gfalLongTimeOut = gConfig.getValue( "/Resources/StorageElements/GFAL_LongTimeout", 1200 )
# # gfal retry on errno.ECONN
self.gfalRetry = gConfig.getValue( "/Resources/StorageElements/GFAL_Retry", 3 )
# # should busy files be considered to exist
self.busyFilesExist = gConfig.getValue( "/Resources/StorageElements/SRMBusyFilesExist", False )
# # set checksum type, by default this is 0 (GFAL_CKSM_NONE)
self.checksumType = gConfig.getValue( "/Resources/StorageElements/ChecksumType", None )
# enum gfal_cksm_type, all in lcg_util
# GFAL_CKSM_NONE = 0,
# GFAL_CKSM_CRC32,
# GFAL_CKSM_ADLER32,
# GFAL_CKSM_MD5,
# GFAL_CKSM_SHA1
# GFAL_CKSM_NULL = 0
self.checksumTypes = { None : 0, "CRC32" : 1, "ADLER32" : 2,
"MD5" : 3, "SHA1" : 4, "NONE" : 0, "NULL" : 0 }
if self.checksumType:
if str( self.checksumType ).upper() in self.checksumTypes:
gLogger.debug( "SRM2Storage: will use %s checksum check" % self.checksumType )
self.checksumType = self.checksumTypes[ self.checksumType.upper() ]
else:
gLogger.warn( "SRM2Storage: unknown checksum type %s, checksum check disabled" )
# # GFAL_CKSM_NONE
self.checksumType = 0
else:
self.checksumType = 0
self.log.debug( "SRM2Storage: will use no checksum" )
# setting some variables for use with lcg_utils
self.nobdii = 1
self.defaulttype = 2
self.voName = None
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
self.voName = getVOForGroup( ret['Value']['group'] )
# enable lcg-utils debugging for debug level DEBUG
lcgdebuglevel = 0
dlevel = self.log.getLevel()
if dlevel == 'DEBUG':
lcgdebuglevel = 999
self.verbose = lcgdebuglevel
self.conf_file = 'ignored'
self.insecure = 0
self.defaultLocalProtocols = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
self.MAX_SINGLE_STREAM_SIZE = 1024 * 1024 * 10 # 10 MB ???
self.MIN_BANDWIDTH = 0.5 * ( 1024 * 1024 ) # 0.5 MB/s ???
def __importExternals( self ):
""" import lcg_util and gfalthr or gfal
:param self: self reference
"""
if ( self.lcg_util ) and ( self.gfal ):
return S_OK()
# # get lcg_util
try:
import lcg_util
self.log.debug( "Using lcg_util version %s from %s" % ( lcg_util.lcg_util_version(),
lcg_util.__file__ ) )
except ImportError, error:
errStr = "__importExternals: Failed to import lcg_util"
gLogger.exception( errStr, "", error )
return S_ERROR( errStr )
# # and gfalthr
try:
import gfalthr as gfal
self.log.debug( 'Using gfalthr version %s from %s' % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
self.log.warn( "__importExternals: Failed to import gfalthr: %s." % error )
# # so gfal maybe?
try:
import gfal
self.log.debug( "Using gfal version %s from %s" % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
errStr = "__importExternals: Failed to import gfal"
gLogger.exception( errStr, "", error )
return S_ERROR( errStr )
self.lcg_util = lcg_util
self.gfal = gfal
return S_OK()
################################################################################
#
# The methods below are for manipulating the client
#
################################################################################
def resetWorkingDirectory( self ):
""" reset the working directory to the base dir
:param self: self reference
"""
self.cwd = self.path
def changeDirectory( self, directory ):
""" cd to :directory:
:param self: self reference
:param str directory: dir path
"""
if directory[0] == '/':
directory = directory.lstrip( '/' )
self.cwd = '%s/%s' % ( self.cwd, directory )
def getCurrentURL( self, fileName ):
""" Obtain the current file URL from the current working directory and the filename
:param self: self reference
:param str fileName: path on storage
"""
# # strip leading / if fileName arg is present
fileName = fileName.lstrip( "/" ) if fileName else fileName
try:
fullUrl = "%s://%s:%s%s%s/%s" % ( self.protocol, self.host, self.port, self.wspath, self.cwd, fileName )
fullUrl = fullUrl.rstrip( "/" )
return S_OK( fullUrl )
except TypeError, error:
return S_ERROR( "Failed to create URL %s" % error )
def isPfnForProtocol( self, pfn ):
""" check if PFN :pfn: is valid for :self.protocol:
:param self: self reference
:param str pfn: PFN
"""
res = pfnparse( pfn )
if not res['OK']:
return res
pfnDict = res['Value']
return S_OK( pfnDict['Protocol'] == self.protocol )
def getProtocolPfn( self, pfnDict, withPort ):
""" construct SURL using :self.host:, :self.protocol: and optionally :self.port: and :self.wspath:
:param self: self reference
:param dict pfnDict: pfn dict
:param bool withPort: include port information
"""
# For srm2 keep the file name and path
pfnDict['Protocol'] = self.protocol
pfnDict['Host'] = self.host
if not pfnDict['Path'].startswith( self.path ):
pfnDict['Path'] = os.path.join( self.path, pfnDict['Path'].strip( '/' ) )
if withPort:
pfnDict['Port'] = self.port
pfnDict['WSUrl'] = self.wspath
else:
pfnDict['Port'] = ''
pfnDict['WSUrl'] = ''
return pfnunparse( pfnDict )
################################################################################
#
# The methods below are URL manipulation methods
#
################################################################################
def getPFNBase( self, withPort = False ):
""" This will get the pfn base. This is then appended with the LFN in DIRAC convention.
:param self: self reference
:param bool withPort: flag to include port
"""
return S_OK( { True : 'srm://%s:%s%s' % ( self.host, self.port, self.path ),
False : 'srm://%s%s' % ( self.host, self.path ) }[withPort] )
def getUrl( self, path, withPort = True ):
""" get SRM PFN for :path: with optional port info
:param self: self reference
:param str path: file path
:param bool withPort: toggle port info
"""
pfnDict = pfnparse( path )
if not pfnDict["OK"]:
self.log.error( "getUrl: %s" % pfnDict["Message"] )
return pfnDict
pfnDict = pfnDict['Value']
if not pfnDict['Path'].startswith( self.path ):
pfnDict['Path'] = os.path.join( self.path, pfnDict['Path'].strip( '/' ) )
pfnDict['Protocol'] = 'srm'
pfnDict['Host'] = self.host
pfnDict['Port'] = self.port
pfnDict['WSUrl'] = self.wspath
if not withPort:
pfnDict['Port'] = ''
pfnDict['WSUrl'] = ''
return pfnunparse( pfnDict )
def getParameters( self ):
""" gets all the storage specific parameters pass when instantiating the storage
:param self: self reference
"""
return S_OK( { "StorageName" : self.name,
"ProtocolName" : self.protocolName,
"Protocol" : self.protocol,
"Host" : self.host,
"Path" : self.path,
"Port" : self.port,
"SpaceToken" : self.spaceToken,
"WSUrl" : self.wspath } )
#############################################################
#
# These are the methods for directory manipulation
#
######################################################################
#
# This has to be updated once the new gfal_makedir() becomes available
# TODO: isn't it there? when somebody made above comment?
#
def createDirectory( self, path ):
""" mkdir -p path on storage
:param self: self reference
:param str path:
"""
urls = checkArgumentFormat( path )
if not urls['OK']:
return urls
urls = urls['Value']
successful = {}
failed = {}
self.log.debug( "createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
strippedUrl = url.rstrip( '/' )
res = self.__makeDirs( strippedUrl )
if res['OK']:
self.log.debug( "createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
self.log.error( "createDirectory: Failed to create directory on storage.",
"\n%s: \n%s" % ( url, res['Message'] ) )
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __makeDir( self, path ):
""" mkdir path in a weird way
:param self: self reference
:param str path:
"""
srcFile = os.path.join( os.environ.get( 'TMPDIR', os.environ.get( 'TMP', '/tmp' ) ), 'dirac_directory' )
if not os.path.exists( srcFile ):
dfile = open( srcFile, 'w' )
dfile.write( " " )
dfile.close()
destFile = os.path.join( path, 'dirac_directory.%s' % time.time() )
res = self.__putFile( srcFile, destFile, 0, checkExists = False )
if res['OK']:
self.__executeOperation( destFile, 'removeFile' )
return res
def __makeDirs( self, path ):
""" black magic contained within...
:param self: self reference
:param str path: dir name
"""
res = self.__executeOperation( path, 'exists' )
if not res['OK']:
return res
if res['Value']:
return S_OK()
# directory doesn't exist, create it
dirName = os.path.dirname( path )
res = self.__executeOperation( dirName, 'exists' )
if not res['OK']:
return res
if not res['Value']:
res = self.__makeDirs( dirName )
if not res['OK']:
return res
return self.__makeDir( path )
################################################################################
#
# The methods below use the new generic methods for executing operations
#
################################################################################
def removeFile( self, path ):
""" rm path on storage
:param self: self reference
:param str path: file path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "removeFile: Performing the removal of %s file(s)" % len( urls ) )
resDict = self.__gfaldeletesurls_wrapper( urls )
if not resDict["OK"]:
self.log.error( "removeFile: %s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "removeFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "removeFile: Successfully removed file: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "removeFile: File did not exist, successfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeFile: Failed to remove file."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getTransportURL( self, path, protocols = False ):
""" obtain the tURLs for the supplied path and protocols
:param self: self reference
:param str path: path on storage
:param mixed protocols: protocols to use
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
if not protocols:
protocols = self.__getProtocols()
if not protocols['OK']:
return protocols
listProtocols = protocols['Value']
elif type( protocols ) == StringType:
listProtocols = [protocols]
elif type( protocols ) == ListType:
listProtocols = protocols
else:
return S_ERROR( "getTransportURL: Must supply desired protocols to this plug-in." )
self.log.debug( "getTransportURL: Obtaining tURLs for %s file(s)." % len( urls ) )
resDict = self.__gfalturlsfromsurls_wrapper( urls, listProtocols )
if not resDict["OK"]:
self.log.error( "getTransportURL: %s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "getTransportURL: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "getTransportURL: Obtained tURL for file. %s" % pathSURL )
successful[pathSURL] = urlDict['turl']
elif urlDict['status'] == 2:
errMessage = "getTransportURL: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "getTransportURL: Failed to obtain turls."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFile( self, path, lifetime = 86400 ):
""" Issue prestage request for file
:param self: self reference
:param str path: PFN path
:param int lifetime: prestage lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFile: Attempting to issue stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestage_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "prestageFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "prestageFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "prestageFile: Issued stage request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 1:
self.log.debug( "prestageFile: File found to be already staged.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
# It can be 11 or 22 depending on the srm-ifce version...
elif urlDict['status'] in ( 11, 22 ):
self.log.debug( "prestageFile: Stage request for file %s queued.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "prestageFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "prestageFile: Failed issue stage request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFileStatus( self, path ):
""" Monitor prestage request for files
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFileStatus: Attempting to get status "
"of stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestagestatus_wrapper( urls )
if not resDict["OK"]:
self.log.error( "prestageFileStatus: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "prestageFileStatus: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 1:
self.log.debug( "SRM2Storage.prestageFileStatus: File found to be staged %s." % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 0:
self.log.debug( "SRM2Storage.prestageFileStatus: File not staged %s." % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.prestageFileStatus: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.prestageFileStatus: Failed get prestage status."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileMetadata( self, path ):
""" Get metadata associated to the file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = {}
failed = {}
for url in res['Value']:
pathSURL = self.getUrl( url )
if not pathSURL['OK']:
self.log.error( "getFileMetadata: %s" % pathSURL["Message"] )
failed[ url ] = pathSURL["Message"]
else:
urls[pathSURL['Value'] ] = url
self.log.debug( "getFileMetadata: Obtaining metadata for %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "getFileMetadata: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
# Get back the input value for that surl
path = urls[self.getUrl( urlDict['surl'] )['Value']]
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[path] = statDict
else:
errStr = "getFileMetadata: Supplied path is not a file."
self.log.error( errStr, path )
failed[path] = errStr
elif urlDict['status'] == 2:
errMessage = "getFileMetadata: File does not exist."
self.log.error( errMessage, path )
failed[path] = errMessage
else:
errStr = "SRM2Storage.getFileMetadata: Failed to get file metadata."
errMessage = "%s: %s" % ( path, urlDict['ErrorMessage'] )
self.log.error( errStr, errMessage )
failed[path] = "%s %s" % ( errStr, urlDict['ErrorMessage'] )
else:
errStr = "getFileMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def isFile( self, path ):
"""Check if the given path exists and it is a file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "isFile: Checking whether %s path(s) are file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "isFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "isFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = True
else:
self.log.debug( "isFile: Path is not a file: %s" % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "isFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "isFile: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "isFile: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def pinFile( self, path, lifetime = 86400 ):
""" Pin a file with a given lifetime
:param self: self reference
:param str path: PFN path
:param int lifetime: pin lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "pinFile: Attempting to pin %s file(s)." % len( urls ) )
resDict = self.__gfal_pin_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "pinFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "pinFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "pinFile: Issued pin request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "pinFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "pinFile: Failed issue pin request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def releaseFile( self, path ):
""" Release a pinned file
:param self: self reference
:param str path: PFN path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "releaseFile: Attempting to release %s file(s)." % len( urls ) )
resDict = self.__gfal_release_wrapper( urls )
if not resDict["OK"]:
self.log.error( "releaseFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "releaseFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "releaseFile: Issued release request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "releaseFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "releaseFile: Failed issue release request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def exists( self, path ):
""" Check if the given path exists. """
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.exists: Checking the existance of %s path(s)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "exists: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict["surl"] )
if not pathSURL["OK"]:
self.log.error( "SRM2Storage.exists: %s" % pathSURL["Message"] )
failed[ urlDict["surl"] ] = pathSURL["Message"]
continue
pathSURL = pathSURL["Value"]
if urlDict['status'] == 0:
self.log.debug( "SRM2Storage.exists: Path exists: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] in ( 16, 22 ) and self.busyFilesExist:
self.log.debug( "SRM2Storage.exists: Path exists, file busy (e.g., stage-out): %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.exists: Path does not exist: %s" % pathSURL )
successful[pathSURL] = False
else:
errStr = "SRM2Storage.exists: Failed to get path metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.exists: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileSize( self, path ):
"""Get the physical size of the given file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getFileSize: Obtaining the size of %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "getFileSize: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.verbose( "getFileSize: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = statDict['Size']
else:
errStr = "SRM2Storage.getFileSize: Supplied path is not a file."
self.log.verbose( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getFileSize: File does not exist."
self.log.verbose( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.getFileSize: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.verbose( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.getFileSize: Returned element does not contain surl."
self.log.error( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putFile( self, path, sourceSize = 0 ):
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for dest_url, src_file in urls.items():
# Create destination directory
res = self.__executeOperation( os.path.dirname( dest_url ), 'createDirectory' )
if not res['OK']:
failed[dest_url] = res['Message']
else:
res = self.__putFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __putFile( self, src_file, dest_url, sourceSize, checkExists = True ):
""" put :src_file: to :dest_url:
:param self: self reference
:param str src_file: file path in local fs
:param str dest_url: destination url on storage
:param int sourceSize: :src_file: size in B
"""
if checkExists:
# Pre-transfer check
res = self.__executeOperation( dest_url, 'exists' )
if not res['OK']:
self.log.debug( "__putFile: Failed to find pre-existance of destination file." )
return res
if res['Value']:
res = self.__executeOperation( dest_url, 'removeFile' )
if not res['OK']:
self.log.debug( "__putFile: Failed to remove remote file %s." % dest_url )
else:
self.log.debug( "__putFile: Removed remote file %s." % dest_url )
dsttype = self.defaulttype
src_spacetokendesc = ''
dest_spacetokendesc = self.spaceToken
if re.search( 'srm:', src_file ):
src_url = src_file
srctype = 2
if not sourceSize:
return S_ERROR( "__putFile: For file replication the source file size must be provided." )
else:
if not os.path.exists( src_file ):
errStr = "__putFile: The source local file does not exist."
self.log.error( errStr, src_file )
return S_ERROR( errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "__putFile: Failed to get file size."
self.log.error( errStr, src_file )
return S_ERROR( errStr )
src_url = 'file:%s' % src_file
srctype = 0
if sourceSize == 0:
errStr = "__putFile: Source file is zero size."
self.log.error( errStr, src_file )
return S_ERROR( errStr )
timeout = int( sourceSize / self.MIN_BANDWIDTH + 300 )
if sourceSize > self.MAX_SINGLE_STREAM_SIZE:
nbstreams = 4
else:
nbstreams = 1
self.log.info( "__putFile: Executing transfer of %s to %s using %s streams" % ( src_url, dest_url, nbstreams ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url,
srctype, dsttype, nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
res = res['Value']
if not res['OK']:
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.info( '__putFile: Successfully put file to storage.' )
# # checksum check? return!
if self.checksumType:
return S_OK( sourceSize )
# # else compare sizes
res = self.__executeOperation( dest_url, 'getFileSize' )
if res['OK']:
destinationSize = res['Value']
if sourceSize == destinationSize :
self.log.debug( "__putFile: Post transfer check successful." )
return S_OK( destinationSize )
errorMessage = "__putFile: Source and destination file sizes do not match."
self.log.error( errorMessage, src_url )
else:
errorMessage = "__putFile: Failed to put file to storage."
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
errorMessage = errStr
res = self.__executeOperation( dest_url, 'removeFile' )
if res['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return S_ERROR( errorMessage )
def __lcg_cp_wrapper( self, src_url, dest_url, srctype, dsttype, nbstreams,
timeout, src_spacetokendesc, dest_spacetokendesc ):
""" lcg_util.lcg_cp wrapper
:param self: self reference
:param str src_url: source SURL
:param str dest_url: destination SURL
:param srctype: source SE type
:param dsttype: destination SE type
:param int nbstreams: nb of streams used for trasnfer
:param int timeout: timeout in seconds
:param str src_spacetoken: source space token
:param str dest_spacetoken: destination space token
"""
try:
errCode, errStr = self.lcg_util.lcg_cp4( src_url,
dest_url,
self.defaulttype,
srctype,
dsttype,
self.nobdii,
self.voName,
nbstreams,
self.conf_file,
self.insecure,
self.verbose,
timeout,
src_spacetokendesc,
dest_spacetokendesc,
self.checksumType )
if type( errCode ) != IntType:
self.log.error( "__lcg_cp_wrapper: Returned errCode was not an integer",
"%s %s" % ( errCode, type( errCode ) ) )
if type( errCode ) == ListType:
msg = []
for err in errCode:
msg.append( '%s of type %s' % ( err, type( err ) ) )
self.log.error( "__lcg_cp_wrapper: Returned errCode was List:\n" , "\n".join( msg ) )
return S_ERROR( "__lcg_cp_wrapper: Returned errCode was not an integer" )
if type( errStr ) not in StringTypes:
self.log.error( "__lcg_cp_wrapper: Returned errStr was not a string",
"%s %s" % ( errCode, type( errStr ) ) )
return S_ERROR( "__lcg_cp_wrapper: Returned errStr was not a string" )
return S_OK( ( errCode, errStr ) )
except Exception, error:
self.log.exception( "__lcg_cp_wrapper", "", error )
return S_ERROR( "Exception while attempting file upload" )
def getFile( self, path, localPath = False ):
""" make a local copy of a storage :path:
:param self: self reference
:param str path: path on storage
:param mixed localPath: if not specified, os.getcwd()
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for src_url in urls:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = self.__getFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getFile( self, src_url, dest_file ):
""" do a real copy of storage file :src_url: to local fs under :dest_file:
:param self: self reference
:param str src_url: SE url to cp
:param str dest_file: local fs path
"""
if not os.path.exists( os.path.dirname( dest_file ) ):
os.makedirs( os.path.dirname( dest_file ) )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Local file already exists %s. Removing..." % dest_file )
os.remove( dest_file )
srctype = self.defaulttype
src_spacetokendesc = self.spaceToken
dsttype = 0
dest_spacetokendesc = ''
dest_url = 'file:%s' % dest_file
res = self.__executeOperation( src_url, 'getFileSize' )
if not res['OK']:
return S_ERROR( res['Message'] )
remoteSize = res['Value']
timeout = int( remoteSize / self.MIN_BANDWIDTH * 4 + 300 )
nbstreams = 1
self.log.info( "__getFile: Using %d streams" % nbstreams )
self.log.info( "__getFile: Executing transfer of %s to %s" % ( src_url, dest_url ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url, srctype, dsttype,
nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
return res
res = res['Value']
if not res['OK']:
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.debug( '__getFile: Got a file from storage.' )
localSize = getSize( dest_file )
if localSize == remoteSize:
self.log.debug( "__getFile: Post transfer check successful." )
return S_OK( localSize )
errorMessage = "__getFile: Source and destination file sizes do not match."
self.log.error( errorMessage, src_url )
else:
errorMessage = "__getFile: Failed to get file from storage."
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Removing local file %s." % dest_file )
os.remove( dest_file )
return S_ERROR( errorMessage )
def __executeOperation( self, url, method ):
""" executes the requested :method: with the supplied url
:param self: self reference
:param str url: SE url
:param str method: fcn name
"""
fcn = None
if hasattr( self, method ) and callable( getattr( self, method ) ):
fcn = getattr( self, method )
if not fcn:
return S_ERROR( "Unable to invoke %s, it isn't a member funtion of SRM2Storage" % method )
res = fcn( url )
if not res['OK']:
return res
elif url not in res['Value']['Successful']:
if url not in res['Value']['Failed']:
if res['Value']['Failed'].values():
return S_ERROR( res['Value']['Failed'].values()[0] )
elif res['Value']['Successful'].values():
return S_OK( res['Value']['Successful'].values()[0] )
else:
self.log.error( 'Wrong Return structure', str( res['Value'] ) )
return S_ERROR( 'Wrong Return structure' )
return S_ERROR( res['Value']['Failed'][url] )
return S_OK( res['Value']['Successful'][url] )
############################################################################################
#
# Directory based methods
#
def isDirectory( self, path ):
""" isdir on storage path
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.isDirectory: Checking whether %s path(s) are directory(ies)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "isDirectory: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
dirSURL = self.getUrl( urlDict['surl'] )
if not dirSURL["OK"]:
self.log.error( "isDirectory: %s" % dirSURL["Message"] )
failed[ urlDict['surl'] ] = dirSURL["Message"]
continue
dirSURL = dirSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
successful[dirSURL] = True
else:
self.log.debug( "SRM2Storage.isDirectory: Path is not a directory: %s" % dirSURL )
successful[dirSURL] = False
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.isDirectory: Supplied path does not exist: %s" % dirSURL )
failed[dirSURL] = 'Directory does not exist'
else:
errStr = "SRM2Storage.isDirectory: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( dirSURL, errMessage ) )
failed[dirSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.isDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectoryMetadata( self, path ):
""" get the metadata for the directory :path:
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "getDirectoryMetadata: Attempting to obtain metadata for %s directories." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "getDirectoryMetadata: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "getDirectoryMetadata: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
successful[pathSURL] = statDict
else:
errStr = "SRM2Storage.getDirectoryMetadata: Supplied path is not a directory."
self.log.error( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getDirectoryMetadata: Directory does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.getDirectoryMetadata: Failed to get directory metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.getDirectoryMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getDirectorySize: Attempting to get size of %s directories." % len( urls ) )
res = self.listDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for directory, dirDict in res['Value']['Successful'].items():
directorySize = 0
directoryFiles = 0
filesDict = dirDict['Files']
for fileDict in filesDict.itervalues():
directorySize += fileDict['Size']
directoryFiles += 1
self.log.debug( "SRM2Storage.getDirectorySize: Successfully obtained size of %s." % directory )
subDirectories = len( dirDict['SubDirs'] )
successful[directory] = { 'Files' : directoryFiles, 'Size' : directorySize, 'SubDirs' : subDirectories }
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def listDirectory( self, path ):
""" List the contents of the directory on the storage
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.listDirectory: Attempting to list %s directories." % len( urls ) )
res = self.isDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
directories = {}
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories[url] = False
else:
errStr = "SRM2Storage.listDirectory: Directory does not exist."
self.log.error( errStr, url )
failed[url] = errStr
resDict = self.__gfal_lsdir_wrapper( directories )
if not resDict["OK"]:
self.log.error( "listDirectory: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
# resDict = self.__gfalls_wrapper(directories,1)['Value']
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "listDirectory: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
successful[pathSURL] = {}
self.log.debug( "SRM2Storage.listDirectory: Successfully listed directory %s" % pathSURL )
subPathDirs = {}
subPathFiles = {}
if "subpaths" in urlDict:
subPaths = urlDict['subpaths']
# Parse the subpaths for the directory
for subPathDict in subPaths:
subPathSURL = self.getUrl( subPathDict['surl'] )['Value']
if subPathDict['status'] == 22:
self.log.error( "File found with status 22", subPathDict )
elif subPathDict['status'] == 0:
statDict = self.__parse_file_metadata( subPathDict )
if statDict['File']:
subPathFiles[subPathSURL] = statDict
elif statDict['Directory']:
subPathDirs[subPathSURL] = statDict
# Keep the infomation about this path's subpaths
successful[pathSURL]['SubDirs'] = subPathDirs
successful[pathSURL]['Files'] = subPathFiles
else:
errStr = "SRM2Storage.listDirectory: Failed to list directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.listDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putDirectory( self, path ):
""" cp -R local SE
puts a local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param str path: local fs path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
for destDir, sourceDir in urls.items():
res = self.__putDir( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
self.log.debug( "SRM2Storage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Failed to put entire directory to remote storage.", destDir )
failed[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Completely failed to put directory to remote storage.", destDir )
failed[destDir] = { "Files" : 0, "Size" : 0 }
return S_OK( { "Failed" : failed, "Successful" : successful } )
def __putDir( self, src_directory, dest_directory ):
""" Black magic contained within...
"""
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir( src_directory ):
errStr = "SRM2Storage.__putDir: The supplied directory does not exist."
self.log.error( errStr, src_directory )
return S_ERROR( errStr )
# Get the local directory contents
contents = os.listdir( src_directory )
allSuccessful = True
directoryFiles = {}
for fileName in contents:
localPath = '%s/%s' % ( src_directory, fileName )
remotePath = '%s/%s' % ( dest_directory, fileName )
if not os.path.isdir( localPath ):
directoryFiles[remotePath] = localPath
else:
res = self.__putDir( localPath, remotePath )
if not res['OK']:
errStr = "SRM2Storage.__putDir: Failed to put directory to storage."
self.log.error( errStr, res['Message'] )
else:
if not res['Value']['AllPut']:
pathSuccessful = False
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
if directoryFiles:
res = self.putFile( directoryFiles )
if not res['OK']:
self.log.error( "SRM2Storage.__putDir: Failed to put files to storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesPut += 1
sizePut += fileSize
if res['Value']['Failed']:
allSuccessful = False
return S_OK( { 'AllPut' : allSuccessful, 'Files' : filesPut, 'Size' : sizePut } )
def getDirectory( self, path, localPath = False ):
""" Get a local copy in the current directory of a physical file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
self.log.debug( "SRM2Storage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
for src_dir in urls:
dirName = os.path.basename( src_dir )
if localPath:
dest_dir = "%s/%s" % ( localPath, dirName )
else:
dest_dir = "%s/%s" % ( os.getcwd(), dirName )
res = self.__getDir( src_dir, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
self.log.debug( "SRM2Storage.getDirectory: Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Failed to get entire directory.", src_dir )
failed[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Completely failed to get local copy of directory.", src_dir )
failed[src_dir] = {'Files':0, 'Size':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __getDir( self, srcDirectory, destDirectory ):
""" Black magic contained within...
"""
filesGot = 0
sizeGot = 0
# Check the remote directory exists
res = self.__executeOperation( srcDirectory, 'isDirectory' )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to find the supplied source directory.", srcDirectory )
return res
if not res['Value']:
errStr = "SRM2Storage.__getDir: The supplied source path is not a directory."
self.log.error( errStr, srcDirectory )
return S_ERROR( errStr )
# Check the local directory exists and create it if not
if not os.path.exists( destDirectory ):
os.makedirs( destDirectory )
# Get the remote directory contents
res = self.__getDirectoryContents( srcDirectory )
if not res['OK']:
errStr = "SRM2Storage.__getDir: Failed to list the source directory."
self.log.error( errStr, srcDirectory )
filesToGet = res['Value']['Files']
subDirs = res['Value']['SubDirs']
allSuccessful = True
res = self.getFile( filesToGet.keys(), destDirectory )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to get files from storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesGot += 1
sizeGot += fileSize
if res['Value']['Failed']:
allSuccessful = False
for subDir in subDirs:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( destDirectory, subDirName )
res = self.__getDir( subDir, localPath )
if res['OK']:
if not res['Value']['AllGot']:
allSuccessful = True
filesGot += res['Value']['Files']
sizeGot += res['Value']['Size']
return S_OK( { 'AllGot' : allSuccessful, 'Files' : filesGot, 'Size' : sizeGot } )
def removeDirectory( self, path, recursive = False ):
""" Remove a directory
"""
if recursive:
return self.__removeDirectoryRecursive( path )
else:
return self.__removeDirectory( path )
def __removeDirectory( self, directory ):
""" This function removes the directory on the storage
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to remove %s directories." % len( urls ) )
resDict = self.__gfal_removedir_wrapper( urls )
if not resDict["OK"]:
self.log.error( "__removeDirectory: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if "surl" in urlDict:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "__removeDirectory: Successfully removed directory: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "__removeDirectory: Directory did not exist, sucessfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeDirectory: Failed to remove directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __removeDirectoryRecursive( self, directory ):
""" Recursively removes the directory and sub dirs. Repeatedly calls itself to delete recursively.
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to recursively remove %s directories." % len( urls ) )
for directory in urls:
self.log.debug( "SRM2Storage.removeDirectory: Attempting to remove %s" % directory )
res = self.__getDirectoryContents( directory )
resDict = {'FilesRemoved':0, 'SizeRemoved':0}
if not res['OK']:
failed[directory] = resDict
else:
filesToRemove = res['Value']['Files']
subDirs = res['Value']['SubDirs']
# Remove all the files in the directory
res = self.__removeDirectoryFiles( filesToRemove )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allFilesRemoved = res['AllRemoved']
# Remove all the sub-directories
res = self.__removeSubDirectories( subDirs )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allSubDirsRemoved = res['AllRemoved']
# If all the files and sub-directories are removed then remove the directory
allRemoved = False
if allFilesRemoved and allSubDirsRemoved:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed all files and sub-directories." )
res = self.__removeDirectory( directory )
if res['OK']:
if directory in res['Value']['Successful']:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed the directory %s." % directory )
allRemoved = True
# Report the result
if allRemoved:
successful[directory] = resDict
else:
failed[directory] = resDict
return S_OK ( { 'Failed' : failed, 'Successful' : successful } )
def __getDirectoryContents( self, directory ):
""" ls of storage element :directory:
:param self: self reference
:param str directory: SE path
"""
directory = directory.rstrip( '/' )
errMessage = "SRM2Storage.__getDirectoryContents: Failed to list directory."
res = self.__executeOperation( directory, 'listDirectory' )
if not res['OK']:
self.log.error( errMessage, res['Message'] )
return S_ERROR( errMessage )
surlsDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
filesToRemove = dict( [ ( url, surlsDict[url]['Size'] ) for url in surlsDict ] )
return S_OK ( { 'Files' : filesToRemove, 'SubDirs' : subDirsDict.keys() } )
def __removeDirectoryFiles( self, filesToRemove ):
""" rm files from SE
:param self: self reference
:param dict filesToRemove: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( filesToRemove ) > 0:
res = self.removeFile( filesToRemove.keys() )
if res['OK']:
for removedSurl in res['Value']['Successful']:
resDict['FilesRemoved'] += 1
resDict['SizeRemoved'] += filesToRemove[removedSurl]
if res['Value']['Failed']:
resDict['AllRemoved'] = False
self.log.debug( "SRM2Storage.__removeDirectoryFiles:",
"Removed %s files of size %s bytes." % ( resDict['FilesRemoved'], resDict['SizeRemoved'] ) )
return resDict
def __removeSubDirectories( self, subDirectories ):
""" rm -rf sub-directories
:param self: self reference
:param dict subDirectories: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( subDirectories ) > 0:
res = self.__removeDirectoryRecursive( subDirectories )
if res['OK']:
for removedSubDir, removedDict in res['Value']['Successful'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
for removedSubDir, removedDict in res['Value']['Failed'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
if len( res['Value']['Failed'] ) != 0:
resDict['AllRemoved'] = False
return resDict
@staticmethod
def __parse_stat( stat ):
""" get size, ftype and mode from stat struct
:param stat: stat struct
"""
statDict = { 'File' : False, 'Directory' : False }
if S_ISREG( stat[ST_MODE] ):
statDict['File'] = True
statDict['Size'] = stat[ST_SIZE]
if S_ISDIR( stat[ST_MODE] ):
statDict['Directory'] = True
statDict['Mode'] = S_IMODE( stat[ST_MODE] )
return statDict
def __parse_file_metadata( self, urlDict ):
""" parse and save bits and pieces of metadata info
:param self: self reference
:param urlDict: gfal call results
"""
statDict = self.__parse_stat( urlDict['stat'] )
if statDict['File']:
statDict.setdefault( "Checksum", "" )
if "checksum" in urlDict and ( urlDict['checksum'] != '0x' ):
statDict["Checksum"] = urlDict["checksum"]
if 'locality' in urlDict:
urlLocality = urlDict['locality']
if re.search( 'ONLINE', urlLocality ):
statDict['Cached'] = 1
else:
statDict['Cached'] = 0
if re.search( 'NEARLINE', urlLocality ):
statDict['Migrated'] = 1
else:
statDict['Migrated'] = 0
statDict['Lost'] = 0
if re.search( 'LOST', urlLocality ):
statDict['Lost'] = 1
statDict['Unavailable'] = 0
if re.search( 'UNAVAILABLE', urlLocality ):
statDict['Unavailable'] = 1
return statDict
def __getProtocols( self ):
""" returns list of protocols to use at a given site
:warn: priority is given to a protocols list defined in the CS
:param self: self reference
"""
sections = gConfig.getSections( '/Resources/StorageElements/%s/' % ( self.name ) )
if not sections['OK']:
return sections
protocolsList = []
for section in sections['Value']:
path = '/Resources/StorageElements/%s/%s/ProtocolName' % ( self.name, section )
if gConfig.getValue( path, '' ) == self.protocolName:
protPath = '/Resources/StorageElements/%s/%s/ProtocolsList' % ( self.name, section )
siteProtocols = gConfig.getValue( protPath, [] )
if siteProtocols:
self.log.debug( 'Found SE protocols list to override defaults:', ', '.join( siteProtocols, ) )
protocolsList = siteProtocols
if not protocolsList:
self.log.debug( "SRM2Storage.getTransportURL: No protocols provided, using defaults." )
protocolsList = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
if not protocolsList:
return S_ERROR( "SRM2Storage.getTransportURL: No local protocols defined and no defaults found" )
return S_OK( protocolsList )
#######################################################################
#
# These methods wrap the gfal functionality with the accounting. All these are based on __gfal_operation_wrapper()
#
#######################################################################
def __gfal_lsdir_wrapper( self, urls ):
""" This is a hack because the structures returned by the different SEs are different
"""
step = 200
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = 1
gfalDict['srmv2_lscount'] = step
failed = {}
successful = []
for url in urls:
allResults = []
gfalDict['surls'] = [url]
gfalDict['nbfiles'] = 1
gfalDict['timeout'] = self.gfalLongTimeOut
allObtained = False
iteration = 0
while not allObtained:
gfalDict['srmv2_lsoffset'] = iteration * step
iteration += 1
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
if re.search( '\[SE\]\[Ls\]\[SRM_FAILURE\]', res['Message'] ):
allObtained = True
else:
failed[url] = res['Message']
else:
results = res['Value']
tempStep = step
if len( results ) == 1:
for result in results:
if 'subpaths' in result:
results = result['subpaths']
tempStep = step - 1
elif re.search( result['surl'], url ):
results = []
allResults.extend( results )
if len( results ) < tempStep:
allObtained = True
successful.append( { 'surl' : url, 'status' : 0, 'subpaths' : allResults } )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : successful, "Failed" : failed } )
def __gfal_ls_wrapper( self, urls, depth ):
""" gfal_ls wrapper
:param self: self reference
:param list urls: urls to check
:param int depth: srmv2_lslevel (0 or 1)
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = depth
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestage_wrapper( self, urls, lifetime ):
""" gfal_prestage wrapper
:param self: self refefence
:param list urls: urls to prestage
:param int lifetime: prestage lifetime
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
gfalDict['protocols'] = self.defaultLocalProtocols
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.stageTimeout
res = self.__gfal_operation_wrapper( 'gfal_prestage',
gfalDict,
timeout_sendreceive = self.fileTimeout * len( urls ) )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfalturlsfromsurls_wrapper( self, urls, listProtocols ):
""" This is a function that can be reused everywhere to perform the gfal_turlsfromsurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['protocols'] = listProtocols
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_turlsfromsurls', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfaldeletesurls_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_deletesurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_deletesurls', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_removedir_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_removedir
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_removedir', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_pin_wrapper( self, urls, lifetime ):
""" gfal_pin wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
:param int lifetime: pin lifetime in seconds
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_pin', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestagestatus_wrapper( self, urls ):
""" gfal_prestagestatus wrapper
:param self: self reference
:param dict urls: dict { srmRequestID : [ url, url ] }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_prestagestatus', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_release_wrapper( self, urls ):
""" gfal_release wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_release', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_operation_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" gfal fcn call wrapper
:param self: self reference
:param str operation: gfal fcn name
:param dict gfalDict: gfal dict passed to create gfal object
:param srmRequestID: srmRequestID
:param int timeout_sendreceive: gfal sendreceive timeout in seconds
"""
# Create an accounting DataOperation record for each operation
oDataOperation = self.__initialiseAccountingObject( operation, self.name, gfalDict['nbfiles'] )
oDataOperation.setStartTime()
start = time.time()
res = self.__importExternals()
if not res['OK']:
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', 0. )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
# # timeout for one gfal_exec call
timeout = gfalDict['timeout'] if not timeout_sendreceive else timeout_sendreceive
# # pythonCall timeout ( const + timeout * ( 2 ** retry )
pyTimeout = 300 + ( timeout * ( 2 ** self.gfalRetry ) )
res = pythonCall( pyTimeout, self.__gfal_wrapper, operation, gfalDict, srmRequestID, timeout_sendreceive )
end = time.time()
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', end - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
res = res['Value']
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
def __gfal_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" execute gfal :operation:
1. create gfalObject from gfalDict
2. set srmRequestID
3. call __gfal_exec
4. get gfal ids
5. get gfal results
6. destroy gfal object
:param self: self reference
:param str operation: fcn to call
:param dict gfalDict: gfal config dict
:param srmRequestID: srm request id
:param int timeout_sendrecieve: timeout for gfal send request and recieve results in seconds
"""
gfalObject = self.__create_gfal_object( gfalDict )
if not gfalObject["OK"]:
return gfalObject
gfalObject = gfalObject['Value']
if srmRequestID:
res = self.__gfal_set_ids( gfalObject, srmRequestID )
if not res['OK']:
return res
res = self.__gfal_exec( gfalObject, operation, timeout_sendreceive )
if not res['OK']:
return res
gfalObject = res['Value']
res = self.__gfal_get_ids( gfalObject )
if not res['OK']:
newSRMRequestID = srmRequestID
else:
newSRMRequestID = res['Value']
res = self.__get_results( gfalObject )
if not res['OK']:
return res
resultList = []
pfnRes = res['Value']
for myDict in pfnRes:
myDict['SRMReqID'] = newSRMRequestID
resultList.append( myDict )
self.__destroy_gfal_object( gfalObject )
return S_OK( resultList )
@staticmethod
def __initialiseAccountingObject( operation, se, files ):
""" create DataOperation accounting object
:param str operation: operation performed
:param str se: destination SE name
:param int files: nb of files
"""
import DIRAC
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'gfal'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
#######################################################################
#
# The following methods provide the interaction with gfal functionality
#
#######################################################################
def __create_gfal_object( self, gfalDict ):
""" create gfal object by calling gfal.gfal_init
:param self: self reference
:param dict gfalDict: gfal params dict
"""
self.log.debug( "SRM2Storage.__create_gfal_object: Performing gfal_init." )
errCode, gfalObject, errMessage = self.gfal.gfal_init( gfalDict )
if not errCode == 0:
errStr = "SRM2Storage.__create_gfal_object: Failed to perform gfal_init."
if not errMessage:
errMessage = os.strerror( self.gfal.gfal_get_errno() )
self.log.error( errStr, errMessage )
return S_ERROR( "%s%s" % ( errStr, errMessage ) )
else:
self.log.debug( "SRM2Storage.__create_gfal_object: Successfully performed gfal_init." )
return S_OK( gfalObject )
def __gfal_set_ids( self, gfalObject, srmRequestID ):
""" set :srmRequestID:
:param self: self reference
:param gfalObject: gfal object
:param str srmRequestID: srm request id
"""
self.log.debug( "SRM2Storage.__gfal_set_ids: Performing gfal_set_ids." )
errCode, gfalObject, errMessage = self.gfal.gfal_set_ids( gfalObject, None, 0, str( srmRequestID ) )
if not errCode == 0:
errStr = "SRM2Storage.__gfal_set_ids: Failed to perform gfal_set_ids."
if not errMessage:
errMessage = os.strerror( errCode )
self.log.error( errStr, errMessage )
return S_ERROR( "%s%s" % ( errStr, errMessage ) )
else:
self.log.debug( "SRM2Storage.__gfal_set_ids: Successfully performed gfal_set_ids." )
return S_OK( gfalObject )
def __gfal_exec( self, gfalObject, method, timeout_sendreceive = None ):
"""
In gfal, for every method (synchronous or asynchronous), you can define a sendreceive timeout and a connect timeout.
The connect timeout sets the maximum amount of time a client accepts to wait before establishing a successful TCP
connection to SRM (default 60 seconds).
The sendreceive timeout, allows a client to set the maximum time the send
of a request to SRM can take (normally all send operations return immediately unless there is no free TCP buffer)
and the maximum time to receive a reply (a token for example). Default 0, i.e. no timeout.
The srm timeout for asynchronous requests default to 3600 seconds
gfal_set_timeout_connect (int value)
gfal_set_timeout_sendreceive (int value)
gfal_set_timeout_bdii (int value)
gfal_set_timeout_srm (int value)
"""
self.log.debug( "SRM2Storage.__gfal_exec(%s): Starting" % method )
fcn = None
if hasattr( self.gfal, method ) and callable( getattr( self.gfal, method ) ):
fcn = getattr( self.gfal, method )
if not fcn:
return S_ERROR( "Unable to invoke %s for gfal, it isn't a member function" % method )
# # retry
retry = self.gfalRetry if self.gfalRetry else 1
# # initial timeout
timeout = timeout_sendreceive if timeout_sendreceive else self.gfalTimeout
# # errCode, errMessage, errNo
errCode, errMessage, errNo = 0, "", 0
for _i in range( retry ):
self.gfal.gfal_set_timeout_sendreceive( timeout )
errCode, gfalObject, errMessage = fcn( gfalObject )
if not errCode:
break
errNo = self.gfal.gfal_get_errno()
if errCode == -1 and errNo == errno.ECOMM:
timeout *= 2
self.log.debug( "SRM2Storage.__gfal_exec(%s): got ECOMM, extending timeout to %s s" % ( method, timeout ) )
if errCode:
errStr = "SRM2Storage.__gfal_exec(%s): Execution failed." % method
if not errMessage:
errMessage = os.strerror( errNo ) if errNo else "UNKNOWN ERROR"
self.log.error( errStr, errMessage )
return S_ERROR( "%s %s" % ( errStr, errMessage ) )
self.log.debug( "SRM2Storage.__gfal_exec(%s): Successfully invoked." % method )
return S_OK( gfalObject )
def __get_results( self, gfalObject ):
""" retrive gfal results
:param self: self reference
:param gfalObject: gfal object
"""
self.log.debug( "SRM2Storage.__get_results: Performing gfal_get_results" )
numberOfResults, gfalObject, listOfResults = self.gfal.gfal_get_results( gfalObject )
if numberOfResults <= 0:
errStr = "SRM2Storage.__get_results: Did not obtain results with gfal_get_results."
self.log.error( errStr )
return S_ERROR( errStr )
else:
self.log.debug( "SRM2Storage.__get_results: Retrieved %s results from gfal_get_results." % numberOfResults )
for result in listOfResults:
if result['status'] != 0:
if result['explanation']:
errMessage = result['explanation']
elif result['status'] > 0:
errMessage = os.strerror( result['status'] )
result['ErrorMessage'] = errMessage
return S_OK( listOfResults )
def __gfal_get_ids( self, gfalObject ):
""" get srmRequestToken
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__gfal_get_ids: Performing gfal_get_ids." )
numberOfResults, gfalObject, _srm1RequestID, _srm1FileIDs, srmRequestToken = self.gfal.gfal_get_ids( gfalObject )
if numberOfResults <= 0:
errStr = "SRM2Storage.__gfal_get_ids: Did not obtain SRM request ID."
self.log.error( errStr )
return S_ERROR( errStr )
else:
self.log.debug( "SRM2Storage.__get_gfal_ids: Retrieved SRM request ID %s." % srmRequestToken )
return S_OK( srmRequestToken )
def __destroy_gfal_object( self, gfalObject ):
""" del gfal object by calling gfal.gfal_internal_free
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__destroy_gfal_object: Performing gfal_internal_free." )
self.gfal.gfal_internal_free( gfalObject )
return S_OK()
| calancha/DIRAC | Resources/Storage/SRM2Storage.py | Python | gpl-3.0 | 88,248 | [
"DIRAC"
] | 17806bf333c6f6c68590e0ecbd14cf9a6c961804b465328f504419306e0c3a02 |
import sys
from setuptools import setup, find_packages
from Cython.Build import cythonize
import numpy as np
from buildhelpers.shaders import build_c_shaders
# ---- C/C++ EXTENSIONS ---- #
cython_modules = ["menpo/geodesics/kirsanov.pyx",
"menpo/shape/mesh/cpptrimesh.pyx",
"menpo/shape/mesh/normals.pyx",
"menpo/interpolation/cinterp.pyx",
"menpo/transform/fastpwa.pyx",
"menpo/features/cppimagewindowiterator.pyx"]
cython_exts = cythonize(cython_modules, nthreads=2, quiet=True)
# ---- OPENGL C EXTENSIONS ---- #
# first, convert the plain text shaders into C string literals
build_c_shaders()
opengl_c_cython_modules = ["menpo/rasterize/copengl.pyx"]
opengl_c_exts = cythonize(opengl_c_cython_modules, nthreads=2, quiet=True)
# unfortunately, OpenGL is just different on OS X/Linux
if sys.platform.startswith('linux'):
for c_ext in opengl_c_exts:
c_ext.libraries += ['GL', 'GLU', 'glfw']
elif sys.platform == 'darwin':
for c_ext in opengl_c_exts:
c_ext.libraries += ['glfw3']
# TODO why does it compile without these on OS X?!
#c_ext.extra_compile_args += ['-framework OpenGL',
# '-framework Cocoa', '-framework IOKit',
# '-framework CoreVideo']
setup(name='menpo',
version='0.2',
description='iBUG Facial Modelling Toolkit',
author='James Booth',
author_email='james.booth08@imperial.ac.uk',
include_dirs=[np.get_include()],
ext_modules=cython_exts + opengl_c_exts,
packages=find_packages(),
install_requires=[# Core
'numpy>=1.8.0',
'scipy>=0.12.0',
'Cython>=0.20.1', # req on OS X Mavericks
# Image
'Pillow>=2.0.0',
'scikit-image>=0.8.2',
# 3D import
'menpo-pyvrml97==2.3.0a4',
'cyassimp>=0.1.2',
# Visualization
'matplotlib>=1.2.1',
# Need to decide if this is really needed
'decorator>=3.4.0',
# Docs and testing
'Sphinx>=1.2b1',
'numpydoc>=0.4',
'nose>=1.3.0'],
extras_require={'3d': 'mayavi>=4.3.0'}
)
| ikassi/menpo | setup.py | Python | bsd-3-clause | 2,496 | [
"Mayavi"
] | 9ac8e0f12413d2062e94faa92c1a0043d7de5782e7f11a8d677c13ff8dc44dc9 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
This file is part of Arcte.
Arcte is software that simplifies the creation of 3D printable atomic structures,
with added features for the blind.
Copyright (C) 2015 Jesse Smith and contributors
Arcte is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Arcte is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Arcte. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import division
from __future__ import print_function
from qt4 import *
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
import functools as ft
import sys
import time
from vtk import *
from random import random
import signal
import six
import packaging
import packaging.requirements
import packaging.version
import packaging.specifiers
from modules.containers import *
from modules.exportdialog import *
from modules.openstructdialog import *
from modules.addstructdialog import *
from modules.editdialog import *
from modules.brailledialogs import *
from modules.aboutdialog import *
def setPaths():
'''
Sets various paths where files can be located:
main_dir -- Main directory
group_dir -- Directory where .pos files are located
icons_dir -- Directory containing icons
'''
global main_dir, group_dir, icons_dir
main_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
group_dir = main_dir + '/structures/'
icons_dir = main_dir + '/icons/'
def setRadii():
'''
Import atomic radii from radii.csv and create a dictionary for
all atomic species. Creates global dictionary "radii".
'''
os.chdir(main_dir)
global radii
radii = {}
radiiRaw = n.loadtxt(open('radii.csv'), dtype='string', delimiter=',')
for i in radiiRaw[:, :]:
radii[i[0].lower()] = i[1]
def SIGINT_handler(signal, frame):
print("Signal interrupt captured, closing...")
qApp.closeAllWindows()
class ArcteApp(QApplication):
""" Custom QApplication class """
def __init__(self, *args, **kwargs):
super(ArcteApp, self).__init__(*args, **kwargs)
signal.signal(signal.SIGINT, SIGINT_handler)
class mainWindow(QMainWindow):
''' The main window '''
atomScale = 1.0
thetaRes = 20
phiRes = 20
bondRes = 15
bondRadius = 0.3
tatoms = []
curModelView = 'surfaceEdge'
bonds = []
atoms = []
def __init__(self, parent=None):
super(mainWindow, self).__init__(parent)
self.setWindowTitle('Arcte')
self.setWindowIcon(QIcon(icons_dir+'appicon.png'))
self.resize(1400, 900)
# ------------------- Actions ----------------------------------
# General
self.exitAction = QAction(QIcon(icons_dir + 'dark/closeIcon.png'), 'Exit', self)
# self.addStructAction = QAction(QIcon(icons_dir + 'dark/addStructIcon.png'), '&Create new structure', self)
self.addStructAction = QAction(QIcon(icons_dir + 'new.png'), 'New structure', self)
self.saveStructAction = QAction(QIcon(icons_dir + 'save.png'), 'Save as .ustr', self)
self.aboutAction = QAction(QIcon(icons_dir + 'dark/aboutIcon.png'), 'About', self)
self.readmeAction = QAction("Readme", self)
self.loadStructAction = QAction(QIcon(icons_dir + 'open.png'), 'Open .ustr', self)
self.addBrailleAction = QAction(QIcon(icons_dir + '/addbraille.png'), "Add Braille", self)
self.delBrailleAction = QAction(QIcon(icons_dir + '/delbraille.png'), "Remove Braille", self)
self.editStructAction = QAction('Edit structure', self)
self.exportSTL = QAction("Stereo-Lithography (STL)", self)
# self.exportOBJ = QAction("Wavefront (OBJ)",self)
# self.exportVRML = QAction("Virtual Reality Modeling Language (VRML)",self)
# Viewing options
self.cameraResetAction = QAction(QIcon(icons_dir + 'dark/resetCameraIcon.png'), 'Reset camera', self)
self.wireframeAction = QAction(QIcon(icons_dir + 'dark/wireframeViewIcon.png'), 'Wireframe', self)
self.wireframeAction.setCheckable(True)
self.surfaceAction = QAction(QIcon(icons_dir + 'dark/surfaceViewIcon.png'), 'Shaded', self)
self.surfaceAction.setCheckable(True)
self.surfaceEdgeAction = QAction(QIcon(icons_dir + 'dark/surfaceEdgeViewIcon.png'), 'Shaded with edges', self)
self.surfaceEdgeAction.setCheckable(True)
self.surfaceEdgeAction.setChecked(True)
self.pointsAction = QAction(QIcon(icons_dir + 'dark/pointsViewIcon.png'), 'Points', self)
self.pointsAction.setCheckable(True)
self.aaAction = QAction(QIcon(icons_dir + 'dark/aaViewIcon.png'), 'Anti-aliasing', self)
self.aaAction.setCheckable(True)
self.showModelEditDock = QAction(QIcon(icons_dir + 'modify.png'), "Show model editing dock", self)
self.showInfoDock = QAction(QIcon(icons_dir + 'info.png'), "Show information dock", self)
self.showSelectionDock = QAction(QIcon(icons_dir + 'select.png'), "Show selection dock", self)
# Customization
self.darkIconsAction = QAction('Dark Icons', self)
self.lightIconsAction = QAction('Light Icons', self)
# ------------------- Status Bar ---------------------------------
statusbar = self.statusBar()
# ------------------- Menubar --------------------------------
menubar = self.menuBar()
# File menu
self.filemenu = menubar.addMenu('&File')
self.filemenu.addActions((self.addStructAction,
self.loadStructAction,
self.saveStructAction))
# File -> Export menu
self.exportMenu = self.filemenu.addMenu("Export")
# self.exportMenu.addActions((self.exportSTL,
# self.exportOBJ,
# self.exportVRML))
self.exportMenu.addAction(self.exportSTL)
self.filemenu.addAction(self.exitAction)
# View menu
self.viewMenu = menubar.addMenu("&View")
self.viewMenu.addActions((self.cameraResetAction,
self.wireframeAction,
self.surfaceAction,
self.surfaceEdgeAction,
self.pointsAction))
self.viewMenu.addSeparator()
self.viewMenu.addActions((self.showModelEditDock,
self.showInfoDock,
self.showSelectionDock))
# Structure menu
self.structuremenu = menubar.addMenu('&Structure')
self.structuremenu.addAction(self.editStructAction)
# Help menu
self.helpmenu = menubar.addMenu('&Help')
self.helpmenu.addActions((self.readmeAction, self.aboutAction))
# ------------------- Toolbar ---------------------------------
self.toolbar = QToolBar('Tools')
self.toolbar.setMovable(True)
self.toolbar.setIconSize(QSize(30, 30))
self.toolbar.addActions((self.loadStructAction,
self.saveStructAction,
self.addStructAction))
self.toolbar.addSeparator()
# Viewing controls
self.toolbar.addActions((self.aaAction,
self.cameraResetAction,
self.surfaceAction,
self.surfaceEdgeAction,
self.wireframeAction,
self.pointsAction))
self.addToolBar(Qt.TopToolBarArea, self.toolbar)
# ------------------- Information list --------------------------
self.filename = QLabel('')
self.natoms = QLabel('')
self.composition = QLabel('')
self.hermMaug = QLabel('')
self.schoenflies = QLabel('')
self.pearson = QLabel('')
self.strukt = QLabel('')
self.number = QLabel('')
self.bravais = QLabel('')
self.crystalsystem = QLabel('')
self.groupBasic = QGroupBox('Basic Information')
self.groupStructural = QGroupBox('Structural Information')
self.infoLayoutV = QVBoxLayout()
self.infoLayoutBasicF = QFormLayout()
self.infoLayoutStructF = QFormLayout()
self.groupBasic.setLayout(self.infoLayoutBasicF)
self.groupStructural.setLayout(self.infoLayoutStructF)
self.infoLayoutV.addWidget(self.groupBasic)
self.infoLayoutV.addWidget(self.groupStructural)
self.infoLayoutV.addStretch()
self.infoLayoutBasicF.addRow(QLabel('Filename:'), self.filename)
self.infoLayoutBasicF.addRow(QLabel('Composition:'), self.composition)
self.infoLayoutBasicF.addRow(QLabel('Number of Atoms:'), self.natoms)
self.infoLayoutStructF.addRow(QLabel('Crystal System:'), self.crystalsystem)
self.infoLayoutStructF.addRow(QLabel('Bravais Lattice:'), self.bravais)
self.infoLayoutStructF.addRow(QLabel('Space Group:'), self.number)
self.infoLayoutStructF.addRow(QLabel('Herman-Maugin:'), self.hermMaug)
self.infoLayoutStructF.addRow(QLabel('Schoenflies:'), self.schoenflies)
self.infoLayoutStructF.addRow(QLabel('Pearson Symbol:'), self.pearson)
self.infoLayoutStructF.addRow(QLabel('Strukturbericht:'), self.strukt)
# ------------------- Model Viewer -----------------------------------------
# Setup widget for QVTK
self.frame = QFrame()
self.frame.setLineWidth(0)
self.frame.setFrameStyle(QFrame.NoFrame)
# Create the QVTKRenderWindowInteractor
self.vl = QVBoxLayout()
self.vtkWidget = QVTKRenderWindowInteractor(self.frame)
self.vl.addWidget(self.vtkWidget)
self.vl.setContentsMargins(0, 0, 0, 0)
# Create the vtkRenderer and add it to the render window
self.ren = vtkRenderer()
self.ren.GradientBackgroundOn()
self.ren.SetBackground(.4, .4, .4)
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
self.iren.SetInteractorStyle(vtkInteractorStyleTrackballCamera())
# Create x/y/z axes
self.axes = vtkAxesActor()
self.axesWidget = vtkOrientationMarkerWidget()
self.axesWidget.SetOutlineColor(0.93, 0.57, 0.13)
self.axesWidget.SetOrientationMarker(self.axes)
self.axesWidget.SetInteractor(self.iren)
self.axesWidget.SetViewport(0.8, 0.0, 1.0, 0.2)
self.axesWidget.SetEnabled(1)
self.axesWidget.InteractiveOff()
# Finish the vtk display
self.ren.ResetCamera()
self.frame.setLayout(self.vl)
self.setCentralWidget(self.frame)
self.iren.Initialize()
self.iren.Start()
self.renText = []
# ------------------- Model Editing Panel ----------------------------------
# Initialization
self.modelEditWidget = QFrame(self)
self.modelEditWidget.setFrameShape(QFrame.StyledPanel)
self.modelEditLayout = QVBoxLayout()
# model resolution
self.resolutionGroup = QGroupBox('Model Resolution', self)
self.resolutionLayout = QFormLayout()
self.modelThetaResLabel = QLabel('Theta Resolution', enabled=False)
self.modelThetaRes = QSpinBox(self, enabled=False)
self.modelThetaRes.setValue(self.thetaRes)
self.modelPhiResLabel = QLabel('Phi Resolution', enabled=False)
self.modelPhiRes = QSpinBox(self, enabled=False)
self.modelPhiRes.setValue(self.phiRes)
self.modelBondResLabel = QLabel('Bond Resolution', enabled=False)
self.modelBondRes = QSpinBox(self, enabled=False)
self.modelBondRes.setValue(self.bondRes)
self.resolutionLayout.addRow(self.modelThetaResLabel, self.modelThetaRes)
self.resolutionLayout.addRow(self.modelPhiResLabel, self.modelPhiRes)
self.resolutionLayout.addRow(self.modelBondResLabel, self.modelBondRes)
self.resolutionGroup.setLayout(self.resolutionLayout)
# Atom Props (up to 5)
self.radiusGroup = QGroupBox('Atomic Visualization')
self.modelRadiusLayout = QFormLayout()
self.radiusGroup.setLayout(self.modelRadiusLayout)
self.speciesRadLab1 = QLabel('Species #1', enabled=False)
self.speciesRadLab2 = QLabel('Species #2', enabled=False)
self.speciesRadLab3 = QLabel('Species #3', enabled=False)
self.speciesRadLab4 = QLabel('Species #4', enabled=False)
self.speciesRadLab5 = QLabel('Species #5', enabled=False)
self.speciesLabels = [self.speciesRadLab1, self.speciesRadLab2, self.speciesRadLab3,
self.speciesRadLab4, self.speciesRadLab5]
self.speciesRadSpin1 = QDoubleSpinBox(self, enabled=False, singleStep=5)
self.speciesRadSpin1.setRange(0, 5000)
self.speciesRadSpin2 = QDoubleSpinBox(self, enabled=False, singleStep=5)
self.speciesRadSpin2.setRange(0, 5000)
self.speciesRadSpin3 = QDoubleSpinBox(self, enabled=False, singleStep=5)
self.speciesRadSpin3.setRange(0, 5000)
self.speciesRadSpin4 = QDoubleSpinBox(self, enabled=False, singleStep=5)
self.speciesRadSpin4.setRange(0, 5000)
self.speciesRadSpin5 = QDoubleSpinBox(self, enabled=False, singleStep=5)
self.speciesRadSpin5.setRange(0, 5000)
self.speciesSpins = [self.speciesRadSpin1, self.speciesRadSpin2, self.speciesRadSpin3,
self.speciesRadSpin4, self.speciesRadSpin5]
self.speciesRadCol1 = QToolButton(self, enabled=False)
self.speciesRadCol1.setIcon(QIcon(icons_dir + 'dark/colorIcon.png'))
self.speciesRadCol2 = QToolButton(self, enabled=False)
self.speciesRadCol2.setIcon(QIcon(icons_dir + 'dark/colorIcon.png'))
self.speciesRadCol3 = QToolButton(self, enabled=False)
self.speciesRadCol3.setIcon(QIcon(icons_dir + 'dark/colorIcon.png'))
self.speciesRadCol4 = QToolButton(self, enabled=False)
self.speciesRadCol4.setIcon(QIcon(icons_dir + 'dark/colorIcon.png'))
self.speciesRadCol5 = QToolButton(self, enabled=False)
self.speciesRadCol5.setIcon(QIcon(icons_dir + 'dark/colorIcon.png'))
self.speciesBtns = [self.speciesRadCol1, self.speciesRadCol2, self.speciesRadCol3,
self.speciesRadCol4, self.speciesRadCol5]
self.allSpecies = [self.speciesLabels, self.speciesSpins, self.speciesBtns]
self.speciesLayout1 = QHBoxLayout()
self.speciesLayout1.addWidget(self.speciesRadSpin1)
self.speciesLayout1.addWidget(self.speciesRadCol1)
self.speciesLayout2 = QHBoxLayout()
self.speciesLayout2.addWidget(self.speciesRadSpin2)
self.speciesLayout2.addWidget(self.speciesRadCol2)
self.speciesLayout3 = QHBoxLayout()
self.speciesLayout3.addWidget(self.speciesRadSpin3)
self.speciesLayout3.addWidget(self.speciesRadCol3)
self.speciesLayout4 = QHBoxLayout()
self.speciesLayout4.addWidget(self.speciesRadSpin4)
self.speciesLayout4.addWidget(self.speciesRadCol4)
self.speciesLayout5 = QHBoxLayout()
self.speciesLayout5.addWidget(self.speciesRadSpin5)
self.speciesLayout5.addWidget(self.speciesRadCol5)
self.modelRadiusLayout.addRow(self.speciesRadLab1, self.speciesLayout1)
self.modelRadiusLayout.addRow(self.speciesRadLab2, self.speciesLayout2)
self.modelRadiusLayout.addRow(self.speciesRadLab3, self.speciesLayout3)
self.modelRadiusLayout.addRow(self.speciesRadLab4, self.speciesLayout4)
self.modelRadiusLayout.addRow(self.speciesRadLab5, self.speciesLayout5)
# Bonds
self.bondsGroup = QGroupBox('Bonding Visualization')
self.threshSpin = QDoubleSpinBox(self, singleStep=10, value=300., maximum=10000.)
self.bondRadSpin = QDoubleSpinBox(self, singleStep=1, value=30, maximum=5000.)
self.modelBondsLayout = QFormLayout()
self.modelBondsLayout.addRow('Threshold (pm):', self.threshSpin)
self.modelBondsLayout.addRow('Bond Radius (pm):', self.bondRadSpin)
self.bondsGroup.setLayout(self.modelBondsLayout)
self.modelEditLayout.addWidget(self.resolutionGroup)
self.modelEditLayout.addWidget(self.radiusGroup)
self.modelEditLayout.addWidget(self.bondsGroup)
self.modelEditWidget.setLayout(self.modelEditLayout)
# Braille Group
self.brailleGroup = QGroupBox("Braille")
self.brailleList = QListWidget()
self.brailleBar = QToolBar(self)
self.brailleBar.addActions((self.addBrailleAction, self.delBrailleAction))
self.brailleLayout = QVBoxLayout()
self.brailleLayout.addWidget(self.brailleBar)
self.brailleLayout.addWidget(self.brailleList)
self.brailleGroup.setLayout(self.brailleLayout)
self.modelEditLayout.addWidget(self.brailleGroup)
self.modelEditLayout.addStretch()
# ------------------- Selection Dock ---------------------------------------
self.listDockWidget = QTabWidget(self)
# User models tab
self.createBaseDock()
self.createUserDock()
self.listDockWidget.addTab(self.listDockBase, "Pre-Defined")
self.listDockWidget.addTab(self.listDockUser, "Custom Defined")
# ------------------- Finalize ---------------------------------------------
# Info Dock
self.rightPanel = QVBoxLayout()
self.rightPanel.setContentsMargins(0, 0, 0, 0)
self.rightPanel.addLayout(self.infoLayoutV)
self.rightWidget = QWidget(self)
self.rightWidget.setLayout(self.rightPanel)
self.infoDock = QDockWidget("Information", self)
self.infoDock.setWidget(self.rightWidget)
# Model Edit Dock
self.modelDock = QDockWidget("Model Configuration", self)
self.modelDock.setWidget(self.modelEditWidget)
# List Dock
self.listDockWidget.setMinimumWidth(310)
self.listDockWidget.setMinimumHeight(500)
self.listDock = QDockWidget('Structures', self)
self.listDock.setWidget(self.listDockWidget)
# Create Panels
self.addDockWidget(Qt.DockWidgetArea(Qt.LeftDockWidgetArea), self.modelDock)
self.addDockWidget(Qt.DockWidgetArea(Qt.RightDockWidgetArea), self.infoDock)
self.addDockWidget(Qt.DockWidgetArea(Qt.RightDockWidgetArea), self.listDock)
self.setCentralWidget(self.frame)
# Define Signals
self.makeSignals()
self.show()
def makeSignals(self):
''' Connect slots and signals'''
self.listDockBase.clicked[QModelIndex].connect(self.structChangedBase)
self.listDockUser.clicked[QModelIndex].connect(self.structChangedUser)
self.brailleList.clicked[QModelIndex].connect(self.brailleChanged)
self.modelThetaRes.valueChanged.connect(self.thetaResChanged)
self.modelPhiRes.valueChanged.connect(self.phiResChanged)
self.modelBondRes.valueChanged.connect(self.bondResChanged)
self.speciesSpins[0].valueChanged.connect(self.changeRadius1)
self.speciesSpins[1].valueChanged.connect(self.changeRadius2)
self.speciesSpins[2].valueChanged.connect(self.changeRadius3)
self.speciesSpins[3].valueChanged.connect(self.changeRadius4)
self.speciesSpins[4].valueChanged.connect(self.changeRadius5)
self.speciesBtns[0].pressed.connect(self.pickColor1)
self.speciesBtns[1].pressed.connect(self.pickColor2)
self.speciesBtns[2].pressed.connect(self.pickColor3)
self.speciesBtns[3].pressed.connect(self.pickColor4)
self.speciesBtns[4].pressed.connect(self.pickColor5)
self.threshSpin.valueChanged.connect(self.changeThreshold)
self.bondRadSpin.valueChanged.connect(self.changeBondRadius)
self.addBrailleAction.triggered.connect(self.addBraille)
self.delBrailleAction.triggered.connect(self.delBraille)
# Action signals
self.cameraResetAction.triggered.connect(self.cameraResetActivated)
self.exitAction.triggered.connect(self.close)
self.addStructAction.triggered.connect(self.addStructActivated)
self.loadStructAction.triggered.connect(self.loadStructActivated)
self.wireframeAction.triggered.connect(self.wireframeView)
self.surfaceAction.triggered.connect(self.surfaceView)
self.surfaceEdgeAction.triggered.connect(self.surfaceEdgeView)
self.pointsAction.triggered.connect(self.pointsView)
self.aaAction.triggered.connect(self.aaView)
self.editStructAction.triggered.connect(self.editStructActivated)
self.saveStructAction.triggered.connect(self.saveUstrActivated)
self.exportSTL.triggered.connect(ft.partial(self.exportFile, fileType="stl"))
# self.exportOBJ.triggered.connect(ft.partial(self.exportFile,fileType="obj"))
# self.exportVRML.triggered.connect(ft.partial(self.exportFile,fileType="vrml"))
self.showModelEditDock.triggered.connect(self.showModelEditAct)
self.showInfoDock.triggered.connect(self.showInfoAct)
self.showSelectionDock.triggered.connect(self.showSelectionAct)
self.lightIconsAction.triggered.connect(self.lightIcons)
self.aboutAction.triggered.connect(self.about)
def structChangedBase(self, index):
'''
Called when an item in self.listDockBase is selected.
'''
if type(index != int):
item = self.listDockBase.itemFromIndex(index)
baseNames_name = self.listDockBase.item(item.row(), 0).text()
for i in range(len(allStructs.baseStructs.keys())):
if baseNames_name == allStructs.baseStructs.keys()[i]:
index = i
break
# ------------------- Set information text ------------------------------------
index = index
if index == -1:
self.filename.setText('')
self.natoms.setText('')
self.hermMaug.setText('')
self.schoenflies.setText('')
self.pearson.setText('')
self.strukt.setText('')
self.number.setText('')
self.bravais.setText('')
self.crystalsystem.setText('')
elif index != -1:
self.filename.setText(str(allStructs.baseStructs.keys()[index] + '.pos'))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].natoms == 0:
self.natoms.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].natoms != 0:
self.natoms.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].natoms))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].composition == '':
self.composition.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].composition != 0:
self.composition.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].composition))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].herm_maug == 0:
self.hermMaug.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].herm_maug != 0:
self.hermMaug.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].herm_maug))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].schoenflies == 0:
self.schoenflies.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].schoenflies != 0:
self.schoenflies.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].schoenflies))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].pearson == 0:
self.pearson.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].pearson != 0:
self.pearson.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].pearson))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].strukt == 0:
self.strukt.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].strukt != 0:
self.strukt.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].strukt))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].number == 0:
self.number.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].number != 0:
self.number.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].number))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].bravais == 0:
self.bravais.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].bravais != 0:
self.bravais.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].bravais))
if allStructs.baseStructs[allStructs.baseStructs.keys()[index]].system == 0:
self.crystalsystem.setText('')
elif allStructs.baseStructs[allStructs.baseStructs.keys()[index]].system != 0:
self.crystalsystem.setText(str(allStructs.baseStructs[allStructs.baseStructs.keys()[index]].system))
allStructs.setActiveList(index, "Base")
allStructs.clearBraille()
self.brailleList.clear()
self.resetScene()
# Edit controls changes
self.resetDefaults()
for i in range(len(self.tatoms)):
self.speciesLabels[i].setEnabled(True)
self.speciesSpins[i].setEnabled(True)
rad = float(radii[str(list(self.tatoms)[i]).lower()])
if rad:
self.speciesSpins[i].setValue(rad)
else:
self.speciesSpins[i].setValue(100.)
self.speciesBtns[i].setEnabled(True)
self.speciesLabels[i].setText(list(self.tatoms)[i] + ' radius (pm)')
self.modelThetaResLabel.setEnabled(True)
self.modelThetaRes.setEnabled(True)
self.modelPhiResLabel.setEnabled(True)
self.modelPhiRes.setEnabled(True)
self.modelBondResLabel.setEnabled(True)
self.modelBondRes.setEnabled(True)
def structChangedUser(self, index):
'''
Method called when the active structure from self.listDockUser
'''
if type(index) != int:
item = self.listDockUser.itemFromIndex(index)
userStruct_name = self.listDockUser.item(item.row(), 0).text()
for i in range(len(allStructs.custStructs.keys())):
if userStruct_name == allStructs.custStructs.keys()[i]:
index = i
break
# ------------------- Set information text ------------------------------------
index = index
if index == -1:
self.filename.setText('')
self.natoms.setText('')
self.hermMaug.setText('')
self.schoenflies.setText('')
self.pearson.setText('')
self.strukt.setText('')
self.number.setText('')
self.bravais.setText('')
self.crystalsystem.setText('')
elif index != -1:
self.filename.setText(str(allStructs.custStructs.keys()[index] + '.ustr'))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].natoms == 0:
self.natoms.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].natoms != 0:
self.natoms.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].natoms))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].composition == '':
self.composition.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].composition != 0:
self.composition.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].composition))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].herm_maug == 0:
self.hermMaug.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].herm_maug != 0:
self.hermMaug.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].herm_maug))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].schoenflies == 0:
self.schoenflies.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].schoenflies != 0:
self.schoenflies.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].schoenflies))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].pearson == 0:
self.pearson.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].pearson != 0:
self.pearson.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].pearson))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].strukt == 0:
self.strukt.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].strukt != 0:
self.strukt.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].strukt))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].number == 0:
self.number.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].number != 0:
self.number.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].number))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].bravais == 0:
self.bravais.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].bravais != 0:
self.bravais.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].bravais))
if allStructs.custStructs[allStructs.custStructs.keys()[index]].system == 0:
self.crystalsystem.setText('')
elif allStructs.custStructs[allStructs.custStructs.keys()[index]].system != 0:
self.crystalsystem.setText(str(allStructs.custStructs[allStructs.custStructs.keys()[index]].system))
allStructs.setActiveList(index, "Custom")
allStructs.clearBraille()
self.brailleList.clear()
self.resetScene()
# Edit controls changes
self.resetDefaults()
for i in range(len(self.tatoms)):
self.speciesLabels[i].setEnabled(True)
self.speciesSpins[i].setEnabled(True)
rad = float(radii[str(list(self.tatoms)[i]).lower()])
if rad:
self.speciesSpins[i].setValue(rad)
else:
self.speciesSpins[i].setValue(100.)
self.speciesBtns[i].setEnabled(True)
self.speciesLabels[i].setText(list(self.tatoms)[i].capitalize() + ' radius (pm)')
self.modelThetaResLabel.setEnabled(True)
self.modelThetaRes.setEnabled(True)
self.modelPhiResLabel.setEnabled(True)
self.modelPhiRes.setEnabled(True)
self.modelBondResLabel.setEnabled(True)
self.modelBondRes.setEnabled(True)
def brailleChanged(self, index):
row = index.row()
def resetDefaults(self):
''' Reset the GUI elements in the model editing panel '''
for i in range(5):
self.speciesLabels[i].setEnabled(False)
self.speciesLabels[i].setText('Species #' + str(i + 1))
self.speciesSpins[i].setEnabled(False)
self.speciesSpins[i].setValue(0)
self.speciesBtns[i].setEnabled(False)
def cameraResetActivated(self):
''' Reset the camera '''
self.ren.ResetCamera()
self.iren.GetRenderWindow().Render()
def aaView(self):
''' Anti-aliasing '''
if self.aaAction.isChecked():
self.iren.GetRenderWindow().SetAAFrames(3)
else:
self.iren.GetRenderWindow().SetAAFrames(0)
self.iren.GetRenderWindow().Render()
def wireframeView(self):
''' Wireframe view'''
self.surfaceAction.setChecked(False)
self.surfaceEdgeAction.setChecked(False)
self.pointsAction.setChecked(False)
self.wireframeAction.setChecked(True)
self.curModelView = 'wireframe'
for i in self.bonds:
i[0].GetProperty().SetRepresentationToWireframe()
for i in self.atoms:
i[1].GetProperty().SetRepresentationToWireframe()
for i in allStructs.brailleStructs:
[act.GetProperty().SetRepresentationToWireframe() for act in i[1].actors]
self.iren.GetRenderWindow().Render()
def surfaceView(self):
''' Surface shading view'''
self.surfaceEdgeAction.setChecked(False)
self.pointsAction.setChecked(False)
self.wireframeAction.setChecked(False)
self.surfaceAction.setChecked(True)
self.curModelView = 'surface'
for i in self.bonds:
i[0].GetProperty().SetRepresentationToSurface()
i[0].GetProperty().EdgeVisibilityOff()
for i in self.atoms:
i[1].GetProperty().SetRepresentationToSurface()
i[1].GetProperty().EdgeVisibilityOff()
for i in allStructs.brailleStructs:
[act.GetProperty().SetRepresentationToSurface() for act in i[1].actors]
[act.GetProperty().EdgeVisibilityOff() for act in i[1].actors]
self.iren.GetRenderWindow().Render()
def surfaceEdgeView(self):
''' Surface - edge shading view '''
self.surfaceAction.setChecked(False)
self.pointsAction.setChecked(False)
self.wireframeAction.setChecked(False)
self.surfaceEdgeAction.setChecked(True)
self.curModelView = 'surfaceEdge'
for i in self.bonds:
i[0].GetProperty().SetRepresentationToSurface()
i[0].GetProperty().EdgeVisibilityOn()
for i in self.atoms:
i[1].GetProperty().SetRepresentationToSurface()
i[1].GetProperty().EdgeVisibilityOn()
for i in allStructs.brailleStructs:
[act.GetProperty().SetRepresentationToSurface() for act in i[1].actors]
[act.GetProperty().EdgeVisibilityOn() for act in i[1].actors]
self.iren.GetRenderWindow().Render()
def pointsView(self):
''' Vertices view '''
self.surfaceAction.setChecked(False)
self.surfaceEdgeAction.setChecked(False)
self.wireframeAction.setChecked(False)
self.pointsAction.setChecked(True)
self.curModelView = 'points'
for i in self.bonds:
i[0].GetProperty().SetRepresentationToPoints()
i[0].GetProperty().SetPointSize(2.0)
for i in self.atoms:
i[1].GetProperty().SetRepresentationToPoints()
i[1].GetProperty().SetPointSize(2.0)
for i in allStructs.brailleStructs:
[act.GetProperty().SetRepresentationToPoints() for act in i[1].actors]
[act.GetProperty().SetPointSize(2.0) for act in i[1].actors]
self.iren.GetRenderWindow().Render()
def modelEditSameRadiiStateAct(self):
''' Called when the scale slider is activated '''
if self.modelEditScaleSlider.isEnabled():
self.modelEditScaleSlider.setEnabled(False)
self.modelEditScaleLabel.setEnabled(False)
self.modelEditScaleSlider.setValue(10.0)
self.modelEditScaleLabel.setText('Radii Scale = 1.0')
self.atomScale = 1.0
self.resetScene(camera=False)
elif not self.modelEditScaleSlider.isEnabled():
self.modelEditScaleSlider.setEnabled(True)
self.modelEditScaleLabel.setEnabled(True)
self.modelEditScaleSlider.setValue(10.0)
self.modelEditScaleLabel.setText('Radii Scale = 1.0')
self.atomScale = 1.0
self.resetScene(camera=False)
for i in range(len(self.tatoms)):
if self.speciesLabels[i].isEnabled():
self.speciesLabels[i].setEnabled(False)
elif not self.speciesLabels[i].isEnabled():
self.speciesLabels[i].setEnabled(True)
if self.speciesSpins[i].isEnabled():
self.speciesSpins[i].setEnabled(False)
elif not self.speciesSpins[i].isEnabled():
self.speciesSpins[i].setEnabled(True)
if self.speciesBtns[i].isEnabled():
self.speciesBtns[i].setEnabled(False)
elif not self.speciesBtns[i].isEnabled():
self.speciesBtns[i].setEnabled(True)
def modelEditScaleSliderMoved(self, position):
''' Called when the scale slider is moved '''
self.atomScale = position * 0.1
self.modelEditScaleLabel.setText('Radii Scale = ' + str(self.atomScale))
try:
self.resetScene(camera=False)
except NameError:
pass
for i in range(len(self.tatoms)):
self.speciesSpins[i].setValue(float(radii[str(list(self.tatoms)[i]).lower()]))
def thetaResChanged(self, value):
''' Called when the theta resolution is changed '''
self.thetaRes = value
for i in range(len(self.atoms)):
self.atoms[i][2].SetThetaResolution(self.thetaRes)
self.iren.GetRenderWindow().Render()
def phiResChanged(self, value):
''' Called when the Phi resolution is changed '''
self.phiRes = value
for i in range(len(self.atoms)):
self.atoms[i][2].SetPhiResolution(self.phiRes)
self.iren.GetRenderWindow().Render()
def bondResChanged(self, value):
''' Called when the bond resolution is changed '''
self.bondRes = value
for i in range(len(self.bonds)):
self.bonds[i][2].SetNumberOfSides(self.bondRes)
self.iren.GetRenderWindow().Render()
def lightIcons(self):
''' Create a set of light Icons '''
self.exitAction.setIcon(QIcon(icons_dir + 'light/closeIcon.png'))
self.searchAction.setIcon(QIcon(icons_dir + 'light/searchIcon.png'))
self.exportModelAction.setIcon(QIcon(icons_dir + 'light/exportModelIcon.png'))
self.refreshAction.setIcon(QIcon(icons_dir + 'light/refreshIcon.png'))
self.addStructAction.setIcon(QIcon(icons_dir + 'light/addStructIcon.png'))
self.fileEditAction.setIcon(QIcon(icons_dir + 'light/fileEditIcon.png'))
self.cameraResetAction.setIcon(QIcon(icons_dir + 'light/resetCameraIcon.png'))
self.view100Action.setIcon(QIcon(icons_dir + 'light/view100Icon.png'))
self.view010Action.setIcon(QIcon(icons_dir + 'light/view010Icon.png'))
self.view001Action.setIcon(QIcon(icons_dir + 'light/view001Icon.png'))
self.view111Action.setIcon(QIcon(icons_dir + 'light/view111Icon.png'))
self.viewhklAction.setIcon(QIcon(icons_dir + 'light/viewhklIcon.png'))
def exportFile(self, fileType="stl"):
if allStructs.activeList:
window = ExportDialog(fileType, allStructs.brailleStructs, parent=self)
else:
msg = QMessageBox(self)
msg.setText("Please select a structure before exporting.")
msg.setWindowTitle("Export error")
msg.setIcon(QMessageBox.Warning)
msg.show()
def showModelEditAct(self):
''' Show the model editing panel '''
if not self.modelDock.isVisible():
self.modelDock.setVisible(True)
def showInfoAct(self):
''' Show the information panel '''
if not self.infoDock.isVisible():
self.infoDock.setVisible(True)
def showSelectionAct(self):
'''Show the model selection panel '''
if not self.listDock.isVisible():
self.listDock.setVisible(True)
def loadStructActivated(self):
''' Read a .ustr file. '''
loadWindow = OpenUstrFileDialog(parent=self)
newStruct = loadWindow.getStruct()
if newStruct:
allStructs.addCustom(newStruct.name, newStruct)
self.addToCustDock(newStruct)
self.structChangedUser(self.listDockUser.rowCount() - 1)
self.listDockWidget.setCurrentIndex(1)
self.listDockUser.setCurrentCell(self.listDockUser.rowCount() - 1, 1)
def addToCustDock(self, struct):
''' Helper function to add a Structure() instance to self.listDockUser '''
newrow = self.listDockUser.rowCount()
self.listDockUser.insertRow(newrow)
for i in range(self.listDockUser.columnCount()):
header = self.listDockUser.horizontalHeaderItem(i).text()
if header == "Keyname":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.name))
elif header == "Name":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.name))
elif header == "Herm-Maug":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.herm_maug))
elif header == "Schoenflies":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.schoenflies))
elif header == "Pearson":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.pearson))
elif header == "Crystal System":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.system))
elif header == "Bravais":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.bravais))
elif header == "Strukturbericht":
self.listDockUser.setItem(newrow, i, QTableWidgetItem(struct.strukt))
elif header == "Group Number":
if struct.number:
item = QTableWidgetItem()
item.setData(Qt.EditRole, int(str(struct.number)))
self.listDockUser.setItem(newrow, i, item)
if not struct.number:
self.listDockUser.setItem(newrow, i, QTableWidgetItem())
def addStructActivated(self):
''' Launch the newstructdialog window (create new structure) '''
addWindow = AddStructDialog(parent=self)
addWindow.show()
if addWindow.exec_():
newStruct = addWindow.getNewStruct()
allStructs.addCustom(newStruct.name, newStruct)
self.addToCustDock(newStruct)
self.listDockWidget.setCurrentIndex(1)
self.listDockUser.setCurrentCell(self.listDockUser.rowCount() - 1, 1)
self.structChangedUser(self.listDockUser.rowCount() - 1)
def drawAtom(self, xpos, ypos, zpos, color='random', radius=100.):
'''
Draw an atom with x/y/z positions, radius, and color.
RETURNS ( vtkActor , vtkSphereSource , vtkPolyDataNormals )
'''
pos_mult = 100
atom_sphere = vtkSphereSource()
atom_sphere.SetCenter(float(xpos)*pos_mult, float(ypos)*pos_mult, float(zpos)*pos_mult)
atom_sphere.SetPhiResolution(self.phiRes)
atom_sphere.SetRadius(self.atomScale * float(radius))
atom_sphere.SetThetaResolution(self.thetaRes)
atom_sphere.Update()
triFilter = vtkTriangleFilter()
triFilter.SetInputConnection(atom_sphere.GetOutputPort())
triFilter.Update()
clean = vtkCleanPolyData()
clean.SetInputConnection(triFilter.GetOutputPort())
clean.Update()
normals = vtkPolyDataNormals()
normals.SetInputConnection(clean.GetOutputPort())
atom_mapper = vtkPolyDataMapper()
atom_mapper.SetInputConnection(normals.GetOutputPort())
atom_actor = vtkActor()
atom_actor.SetMapper(atom_mapper)
if color == 'random':
atom_actor.GetProperty().SetColor(random(), random(), random())
if self.curModelView == 'wireframe':
atom_actor.GetProperty().SetRepresentationToWireframe()
elif self.curModelView == 'surface':
atom_actor.GetProperty().SetRepresentationToSurface()
atom_actor.GetProperty().EdgeVisibilityOff()
if self.curModelView == 'surfaceEdge':
atom_actor.GetProperty().SetRepresentationToSurface()
atom_actor.GetProperty().EdgeVisibilityOn()
if self.curModelView == 'points':
atom_actor.GetProperty().SetRepresentationToPoints()
return atom_actor, atom_sphere, normals
def drawBond(self, pos1, pos2):
'''
Draws a bond (cylinder)
RETURNS (vtkActor , vtkPolyDataNormals , vtkTubeFilter)
'''
mult = 100
cyl = vtkLineSource()
cyl.SetPoint1(pos1[0]*mult, pos1[1]*mult, pos1[2]*mult)
cyl.SetPoint2(pos2[0]*mult, pos2[1]*mult, pos2[2]*mult)
cyl.Update()
Tube = vtkTubeFilter()
Tube.SetInputConnection(cyl.GetOutputPort())
# Tube.SetRadius(self.bondRadius)
Tube.SetRadius(self.bondRadSpin.value())
Tube.CappingOn()
Tube.SetNumberOfSides(self.bondRes)
Tube.Update()
triFilter = vtkTriangleFilter()
triFilter.SetInputConnection(Tube.GetOutputPort())
triFilter.Update()
clean = vtkCleanPolyData()
clean.SetInputConnection(triFilter.GetOutputPort())
clean.Update()
normals = vtkPolyDataNormals()
normals.SetInputConnection(clean.GetOutputPort())
normals.Update()
cylMapper = vtkPolyDataMapper()
cylMapper.SetInputConnection(normals.GetOutputPort())
cylActor = vtkActor()
cylActor.SetMapper(cylMapper)
cylActor.GetProperty().SetColor(1, 1, 1)
if self.curModelView == 'wireframe':
cylActor.GetProperty().SetRepresentationToWireframe()
elif self.curModelView == 'surface':
cylActor.GetProperty().SetRepresentationToSurface()
cylActor.GetProperty().EdgeVisibilityOff()
if self.curModelView == 'surfaceEdge':
cylActor.GetProperty().SetRepresentationToSurface()
cylActor.GetProperty().EdgeVisibilityOn()
if self.curModelView == 'points':
cylActor.GetProperty().SetRepresentationToPoints()
return (cylActor, normals, Tube)
def drawHUD(self):
''' Draw active elements on the vtk render window '''
self.renText = []
dy = 0
for i in range(len(self.tatoms)):
self.textActor = vtkTextActor()
self.textActor.SetInput(list(self.tatoms)[i].capitalize())
self.textActor.GetTextProperty().SetFontSize(35)
self.textActor.GetTextProperty().SetFontFamilyToArial()
self.textActor.GetTextProperty().SetColor(self.colors[i])
self.textActor.SetDisplayPosition(10, 10 + dy)
self.ren.AddActor(self.textActor)
self.renText.append(self.textActor)
dy += 40
def resetScene(self, camera=True, setAtoms=True, props=True, setBonds=True):
''' Helper function for reseting the render window '''
if props == True:
self.ren.RemoveAllViewProps()
if setAtoms == True:
self.setAtoms()
if setBonds == True:
self.setBonds(self.threshSpin.value())
self.drawHUD()
if camera == True:
self.ren.ResetCamera()
self.iren.GetRenderWindow().Render()
def changeRadius1(self, value):
''' Change the radius of the 1st atomic species '''
for i in range(len(self.atoms)):
if self.atoms[i][0] == str(self.speciesLabels[0].text()).split()[0].capitalize():
self.atoms[i][2].SetRadius(value)
self.iren.GetRenderWindow().Render()
def changeRadius2(self, value):
''' Change the radius of the 2nd atomic species '''
for i in range(len(self.atoms)):
if self.atoms[i][0] == str(self.speciesLabels[1].text()).split()[0].capitalize():
self.atoms[i][2].SetRadius(value)
self.iren.GetRenderWindow().Render()
def changeRadius3(self, value):
''' Change the radius of the 3rd atomic species '''
for i in range(len(self.atoms)):
if self.atoms[i][0] == str(self.speciesLabels[2].text()).split()[0].capitalize():
self.atoms[i][2].SetRadius(value)
self.iren.GetRenderWindow().Render()
def changeRadius4(self, value):
''' Change the radius of the 4th atomic species '''
for i in range(len(self.atoms)):
if self.atoms[i][0] == str(self.speciesLabels[3].text()).split()[0].capitalize():
self.atoms[i][2].SetRadius(value)
self.iren.GetRenderWindow().Render()
def changeRadius5(self, value):
''' Change the radius of the 5th atomic species '''
for i in range(len(self.atoms)):
if self.atoms[i][0] == str(self.speciesLabels[4].text()).split()[0].capitalize():
self.atoms[i][2].SetRadius(value)
self.iren.GetRenderWindow().Render()
def changeBondRadius(self, value):
''' Change the radius of the bonds '''
self.bondRadius = value
for i in self.bonds:
i[2].SetRadius(value)
self.iren.GetRenderWindow().Render()
def changeThreshold(self, value):
''' Change the threshold over which bonds are drawn '''
for i in self.bonds:
self.ren.RemoveActor(i[0])
self.setBonds(value)
self.iren.GetRenderWindow().Render()
def addBraille(self):
if not allStructs.active:
QMessageBox.warning(self, 'No active structure', 'Please select/create a structure before adding braille.')
else:
new = NewBraille(parent=self)
new.show()
if new.exec_():
bwidget = new.getBrailleWidget()
bwidget.actors = [new.axisActor, new.dotsActor]
text = bwidget.text + ' atom at ' + str(round(bwidget.center[0], 3) / 10) + 'nm' + \
', ' + str(round(bwidget.center[1], 3) / 10) + 'nm' + \
', ' + str(round(bwidget.center[2], 3) / 10) + 'nm'
bwidget.widgetText = text
newItem = QListWidgetItem(text)
for i in range(self.brailleList.count()):
if text == self.brailleList.item(i).text():
self.brailleList.takeItem(i)
for j, k in enumerate(allStructs.brailleStructs):
if k[0] == text:
self.ren.RemoveActor(allStructs.brailleStructs[j][1].actors[0])
self.ren.RemoveActor(allStructs.brailleStructs[j][1].actors[1])
del allStructs.brailleStructs[j]
self.brailleList.addItem(newItem)
allStructs.addBraille([text, bwidget])
self.surfaceEdgeView()
def editBraille(self):
pass
def delBraille(self):
if self.brailleList.selectedItems():
name = ''
index = None
for i in self.brailleList.selectedItems():
name = i.text()
self.brailleList.takeItem(self.brailleList.row(i))
for i, j in enumerate(allStructs.brailleStructs):
if j[0] == name:
index = i
self.ren.RemoveActor(allStructs.brailleStructs[index][1].actors[0])
self.ren.RemoveActor(allStructs.brailleStructs[index][1].actors[1])
del allStructs.brailleStructs[index]
self.iren.GetRenderWindow().Render()
pass
def pickColor1(self):
''' Change the color of the 1st atomic species '''
col = QColorDialog.getColor()
self.rgb = col.getRgb()
text = str(self.speciesLabels[0].text()).split()[0].lower()
for i in self.atoms:
if i[0].lower() == text:
i[1].GetProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
self.iren.GetRenderWindow().Render()
self.speciesBtns[0].setDown(False)
for i in self.renText:
if text.lower() == i.GetInput().lower():
i.GetTextProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
self.ren.Render()
def pickColor2(self):
''' Change the color of the 2nd atomic species '''
col = QColorDialog.getColor()
self.rgb = col.getRgb()
text = str(self.speciesLabels[1].text()).split()[0].lower()
for i in self.atoms:
if i[0].lower() == text:
i[1].GetProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
self.iren.GetRenderWindow().Render()
self.speciesBtns[0].setDown(False)
for i in self.renText:
if text.lower() == i.GetInput().lower():
i.GetTextProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
def pickColor3(self):
''' Change the color of the 3rd atomic species '''
col = QColorDialog.getColor()
self.rgb = col.getRgb()
text = str(self.speciesLabels[2].text()).split()[0].lower()
for i in self.atoms:
if i[0].lower() == text:
i[1].GetProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
self.iren.GetRenderWindow().Render()
self.speciesBtns[0].setDown(False)
for i in self.renText:
if text.lower() == i.GetInput().lower():
i.GetTextProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
def pickColor4(self):
''' Change the color of the 4th atomic species '''
col = QColorDialog.getColor()
self.rgb = col.getRgb()
text = str(self.speciesLabels[3].text()).split()[0].lower()
for i in self.atoms:
if i[0].lower() == text:
i[1].GetProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
self.iren.GetRenderWindow().Render()
self.speciesBtns[0].setDown(False)
for i in self.renText:
if text.lower() == i.GetInput().lower():
i.GetTextProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
def pickColor5(self):
''' Change the color of the 5th atomic species '''
col = QColorDialog.getColor()
self.rgb = col.getRgb()
text = str(self.speciesLabels[4].text()).split()[0].lower()
for i in self.atoms:
if i[0].lower() == text:
i[1].GetProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
self.iren.GetRenderWindow().Render()
self.speciesBtns[0].setDown(False)
for i in self.renText:
if text.lower() == i.GetInput().lower():
i.GetTextProperty().SetColor(self.rgb[0] / 255., self.rgb[1] / 255., self.rgb[2] / 255.)
def editStructActivated(self):
if self.listDockUser.rowCount():
row = self.listDockUser.currentItem().row()
name = self.listDockUser.item(row, 0).text()
win = EditDialog(allStructs.custStructs[name], parent=self)
win.show()
if win.exec_():
newStruct = win.getNewStruct()
allStructs.custStructs[newStruct.name] = newStruct
self.listDockUser.setItem(row, 0, QTableWidgetItem(newStruct.name))
self.listDockUser.setItem(row, 1, QTableWidgetItem(newStruct.name))
self.listDockUser.setItem(row, 2, QTableWidgetItem(newStruct.herm_maug))
self.listDockUser.setItem(row, 3, QTableWidgetItem(newStruct.schoenflies))
self.listDockUser.setItem(row, 4, QTableWidgetItem(newStruct.pearson))
self.listDockUser.setItem(row, 5, QTableWidgetItem(newStruct.system))
self.listDockUser.setItem(row, 6, QTableWidgetItem(newStruct.bravais))
self.listDockUser.setItem(row, 7, QTableWidgetItem(newStruct.strukt))
try:
numwidget = QTableWidgetItem()
numwidget.setData(Qt.EditRole, int(newStruct.number))
self.listDockUser.setItem(row, 8, QTableWidgetItem(numwidget))
except:
self.listDockUser.setItem(row, 8, QTableWidgetItem(newStruct.number))
self.structChangedUser(row)
else:
QMessageBox.warning(self, "Warning", "Please select a custom defined structure to edit.")
def saveUstrActivated(self):
if self.listDockUser.rowCount() != 0:
row = self.listDockUser.currentItem().row()
name = self.listDockUser.item(row, 0).text()
SaveUstrDialog(allStructs.custStructs[name], parent=self)
else:
QMessageBox.warning(self, "Warning", "Please select a custom defined structure to save.")
def closeEvent(self, *args, **kwargs):
'''
Close the vtk Qt window correctly.
'''
self.vtkWidget.GetRenderWindow().GetInteractor().TerminateApp()
self.vtkWidget.GetRenderWindow().Finalize()
def setAtoms(self):
'''
Retrieves the atomic positions and radii, creating vtkSphereSources
and adding their actors to the renderer.
self.atoms is [type , actor , source]
'''
self.atoms = []
self.tatoms = set([x[0] for x in allStructs.activeList[:]])
# self.colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (0, 1, 1)]
self.colors = [(random(), random(), random()) for i in range(5)]
for row in allStructs.activeList:
if radii.has_key(row[0].lower()) and float(radii[row[0].lower()]):
currAtom = self.drawAtom(row[1], row[2], row[3], radius=radii[row[0].lower()])
# elif radii.has_key(row[0].lower()) and not float(radii[row[0].lower()]):
else:
currAtom = self.drawAtom(row[1], row[2], row[3])
self.atoms.append([row[0], currAtom[0], currAtom[1], currAtom[2]])
self.atomcolors = {}
for i in range(len(self.tatoms)):
self.atomcolors[list(self.tatoms)[i]] = self.colors[i]
for i in range(len(allStructs.activeList)):
self.atoms[i][1].GetProperty().SetColor(self.atomcolors[allStructs.activeList[i][0]])
for atom in self.atoms:
self.ren.AddActor(atom[1])
def setBonds(self, sDist):
self.bonds = []
endPoints = []
for i in range(len(allStructs.activeList)):
for j in range(len(allStructs.activeList)):
ai = allStructs.activeList[i]
aj = allStructs.activeList[j]
if not j <= i:
disp = n.sqrt((aj[1] - ai[1]) ** 2 + (aj[2] - ai[2]) ** 2 + (aj[3] - ai[3]) ** 2)
if disp <= sDist/100. and ai != aj:
endPoints.append([ai[1], ai[2], ai[3], aj[1], aj[2], aj[3]])
for i in endPoints:
self.bonds.append(self.drawBond(i[:3], i[3:]))
for i in self.bonds:
self.ren.AddActor(i[0])
def about(self):
win = AboutWin(main_dir, parent=self)
win.show()
def createBaseDock(self):
self.listDockBase = QTableWidget(self)
self.listDockBase.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.listHeaders = ("Keyname",
"Comp.",
"Herm-Maug",
'Schoenflies',
'Pearson',
'Crystal System',
'Bravais',
'Strukturbericht',
'Group Number')
self.listDockBase.setColumnCount(len(self.listHeaders))
self.listDockBase.setRowCount(len(allStructs.baseStructs))
self.listDockBase.setHorizontalHeaderLabels(self.listHeaders)
self.bitem_comp = []
self.bitem_name = []
self.bitem_hm = []
self.bitem_sch = []
self.bitem_ps = []
self.bitem_keyname = []
self.bitem_cs = []
self.bitem_br = []
self.bitem_str = []
self.bitem_num = []
for key in allStructs.baseStructs:
self.bitem_keyname.append(QTableWidgetItem(key))
self.bitem_comp.append(QTableWidgetItem(allStructs.baseStructs[key].composition))
self.bitem_hm.append(QTableWidgetItem(allStructs.baseStructs[key].herm_maug))
self.bitem_sch.append(QTableWidgetItem(allStructs.baseStructs[key].schoenflies))
self.bitem_ps.append(QTableWidgetItem(allStructs.baseStructs[key].pearson))
self.bitem_cs.append(QTableWidgetItem(allStructs.baseStructs[key].system))
self.bitem_br.append(QTableWidgetItem(allStructs.baseStructs[key].bravais))
self.bitem_str.append(QTableWidgetItem(allStructs.baseStructs[key].strukt))
if allStructs.baseStructs[key].number != 0:
num = QTableWidgetItem()
num.setData(Qt.EditRole, int(allStructs.baseStructs[key].number))
elif allStructs.baseStructs[key].number == 0:
num = QTableWidgetItem()
self.bitem_num.append(num)
for row in range(len(allStructs.baseStructs)):
self.listDockBase.setItem(row, 0, self.bitem_keyname[row])
self.listDockBase.setItem(row, 1, self.bitem_comp[row])
self.listDockBase.setItem(row, 2, self.bitem_hm[row])
self.listDockBase.setItem(row, 3, self.bitem_sch[row])
self.listDockBase.setItem(row, 4, self.bitem_ps[row])
self.listDockBase.setItem(row, 5, self.bitem_cs[row])
self.listDockBase.setItem(row, 6, self.bitem_br[row])
self.listDockBase.setItem(row, 7, self.bitem_str[row])
self.listDockBase.setItem(row, 8, self.bitem_num[row])
self.listDockBase.setColumnHidden(0, True)
self.listDockBase.setSortingEnabled(True)
self.listDockBase.setSelectionBehavior(QAbstractItemView.SelectRows)
self.listDockBase.setShowGrid(False)
self.listDockBase.setAlternatingRowColors(True)
self.listDockBase.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
def createUserDock(self):
self.listDockBase
self.listDockUser = QTableWidget(self)
self.listDockUser.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.listDockUser.setColumnCount(9)
self.listDockUser.setHorizontalHeaderLabels(("Keyname",
"Name",
"Herm-Maug",
'Schoenflies',
'Pearson',
'Crystal System',
'Bravais',
'Strukturbericht',
'Group Number'))
self.uitem_comp = []
self.uitem_name = []
self.uitem_hm = []
self.uitem_sch = []
self.uitem_ps = []
self.uitem_keyname = []
self.uitem_cs = []
self.uitem_br = []
self.uitem_str = []
self.uitem_num = []
for key in allStructs.custStructs:
self.uitem_keyname.append(QTableWidgetItem(key))
self.uitem_name.append(QTableWidgetItem(key))
self.uitem_hm.append(QTableWidgetItem(allStructs.custStructs[key].herm_maug))
self.uitem_sch.append(QTableWidgetItem(allStructs.custStructs[key].schoenflies))
self.uitem_ps.append(QTableWidgetItem(allStructs.custStructs[key].pearson))
self.uitem_cs.append(QTableWidgetItem(allStructs.custStructs[key].system))
self.uitem_br.append(QTableWidgetItem(allStructs.custStructs[key].bravais))
self.uitem_str.append(QTableWidgetItem(allStructs.custStructs[key].strukt))
if allStructs.custStructs[key].number != 0:
num = QTableWidgetItem()
num.setData(Qt.EditRole, int(str(allStructs.custStructs[key].number)[1:]))
elif allStructs.custStructs[key].number == 0:
num = QTableWidgetItem()
self.uitem_num.append(num)
for row in range(len(allStructs.custStructs)):
self.listDockUser.insertRow(self.listDockUser.rowCount())
self.listDockUser.setItem(row, 0, self.uitem_keyname[row])
self.listDockUser.setItem(row, 1, self.uitem_name[row])
self.listDockUser.setItem(row, 2, self.uitem_hm[row])
self.listDockUser.setItem(row, 3, self.uitem_sch[row])
self.listDockUser.setItem(row, 4, self.uitem_ps[row])
self.listDockUser.setItem(row, 5, self.uitem_cs[row])
self.listDockUser.setItem(row, 6, self.uitem_br[row])
self.listDockUser.setItem(row, 7, self.uitem_str[row])
self.listDockUser.setItem(row, 8, self.uitem_num[row])
self.listDockUser.setColumnHidden(0, True)
self.listDockUser.setSortingEnabled(True)
self.listDockUser.setSelectionBehavior(QAbstractItemView.SelectRows)
self.listDockUser.setShowGrid(False)
self.listDockUser.setAlternatingRowColors(True)
self.listDockUser.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
if __name__ == '__main__':
app = ArcteApp(sys.argv)
setPaths()
begin = time.time()
splashmap = QPixmap(main_dir + '/Splash.png')
splash = QSplashScreen(splashmap, Qt.WindowStaysOnTopHint)
splash.setMask(splashmap.mask())
splash.show()
while time.time() - begin < 1:
time.sleep(0.001)
app.processEvents(flags=QEventLoop.AllEvents)
setRadii()
allStructs = StructsContainer(group_dir, main_dir)
mainUi = mainWindow()
splash.finish(mainUi)
sys.exit(app.exec_())
| jessedsmith/Arcte | arcte.py | Python | gpl-3.0 | 67,141 | [
"CRYSTAL",
"VTK"
] | 622a4c4fcab1ff5450f6e11558413ab8aa761072b05e3e2c9efd1b703e237c5f |
# encoding: utf-8
"""
An embedded IPython shell.
Authors:
* Brian Granger
* Fernando Perez
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import with_statement
from __future__ import print_function
import sys
import warnings
from IPython.core import ultratb, compilerop
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core.interactiveshell import DummyMod
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.terminal.ipapp import load_default_config
from IPython.utils.traitlets import Bool, CBool, Unicode
from IPython.utils.io import ask_yes_no
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# This is an additional magic that is exposed in embedded shells.
@magics_class
class EmbeddedMagics(Magics):
@line_magic
def kill_embedded(self, parameter_s=''):
"""%kill_embedded : deactivate for good the current embedded IPython.
This function (after asking for confirmation) sets an internal flag so
that an embedded IPython will never activate again. This is useful to
permanently disable a shell that is being called inside a loop: once
you've figured out what you needed from it, you may then kill it and
the program will then continue to run without the interactive shell
interfering again.
"""
kill = ask_yes_no("Are you sure you want to kill this embedded instance "
"(y/n)? [y/N] ",'n')
if kill:
self.shell.embedded_active = False
print ("This embedded IPython will not reactivate anymore "
"once you exit.")
class InteractiveShellEmbed(TerminalInteractiveShell):
dummy_mode = Bool(False)
exit_msg = Unicode('')
embedded = CBool(True)
embedded_active = CBool(True)
# Like the base class display_banner is not configurable, but here it
# is True by default.
display_banner = CBool(True)
def __init__(self, config=None, ipython_dir=None, user_ns=None,
user_module=None, custom_exceptions=((),None),
usage=None, banner1=None, banner2=None,
display_banner=None, exit_msg=u'', user_global_ns=None):
if user_global_ns is not None:
warnings.warn("user_global_ns has been replaced by user_module. The\
parameter will be ignored.", DeprecationWarning)
super(InteractiveShellEmbed,self).__init__(
config=config, ipython_dir=ipython_dir, user_ns=user_ns,
user_module=user_module, custom_exceptions=custom_exceptions,
usage=usage, banner1=banner1, banner2=banner2,
display_banner=display_banner
)
self.exit_msg = exit_msg
# don't use the ipython crash handler so that user exceptions aren't
# trapped
sys.excepthook = ultratb.FormattedTB(color_scheme=self.colors,
mode=self.xmode,
call_pdb=self.pdb)
def init_sys_modules(self):
pass
def init_magics(self):
super(InteractiveShellEmbed, self).init_magics()
self.register_magics(EmbeddedMagics)
def __call__(self, header='', local_ns=None, module=None, dummy=None,
stack_depth=1, global_ns=None, compile_flags=None):
"""Activate the interactive interpreter.
__call__(self,header='',local_ns=None,module=None,dummy=None) -> Start
the interpreter shell with the given local and global namespaces, and
optionally print a header string at startup.
The shell can be globally activated/deactivated using the
dummy_mode attribute. This allows you to turn off a shell used
for debugging globally.
However, *each* time you call the shell you can override the current
state of dummy_mode with the optional keyword parameter 'dummy'. For
example, if you set dummy mode on with IPShell.dummy_mode = True, you
can still have a specific call work by making it as IPShell(dummy=False).
"""
# If the user has turned it off, go away
if not self.embedded_active:
return
# Normal exits from interactive mode set this flag, so the shell can't
# re-enter (it checks this variable at the start of interactive mode).
self.exit_now = False
# Allow the dummy parameter to override the global __dummy_mode
if dummy or (dummy != 0 and self.dummy_mode):
return
if self.has_readline:
self.set_readline_completer()
# self.banner is auto computed
if header:
self.old_banner2 = self.banner2
self.banner2 = self.banner2 + '\n' + header + '\n'
else:
self.old_banner2 = ''
# Call the embedding code with a stack depth of 1 so it can skip over
# our call and get the original caller's namespaces.
self.mainloop(local_ns, module, stack_depth=stack_depth,
global_ns=global_ns, compile_flags=compile_flags)
self.banner2 = self.old_banner2
if self.exit_msg is not None:
print(self.exit_msg)
def mainloop(self, local_ns=None, module=None, stack_depth=0,
display_banner=None, global_ns=None, compile_flags=None):
"""Embeds IPython into a running python program.
Parameters
----------
local_ns, module
Working local namespace (a dict) and module (a module or similar
object). If given as None, they are automatically taken from the scope
where the shell was called, so that program variables become visible.
stack_depth : int
How many levels in the stack to go to looking for namespaces (when
local_ns or module is None). This allows an intermediate caller to
make sure that this function gets the namespace from the intended
level in the stack. By default (0) it will get its locals and globals
from the immediate caller.
compile_flags
A bit field identifying the __future__ features
that are enabled, as passed to the builtin :func:`compile` function.
If given as None, they are automatically taken from the scope where
the shell was called.
"""
if (global_ns is not None) and (module is None):
warnings.warn("global_ns is deprecated, use module instead.", DeprecationWarning)
module = DummyMod()
module.__dict__ = global_ns
# Get locals and globals from caller
if ((local_ns is None or module is None or compile_flags is None)
and self.default_user_namespaces):
call_frame = sys._getframe(stack_depth).f_back
if local_ns is None:
local_ns = call_frame.f_locals
if module is None:
global_ns = call_frame.f_globals
module = sys.modules[global_ns['__name__']]
if compile_flags is None:
compile_flags = (call_frame.f_code.co_flags &
compilerop.PyCF_MASK)
# Save original namespace and module so we can restore them after
# embedding; otherwise the shell doesn't shut down correctly.
orig_user_module = self.user_module
orig_user_ns = self.user_ns
orig_compile_flags = self.compile.flags
# Update namespaces and fire up interpreter
# The global one is easy, we can just throw it in
if module is not None:
self.user_module = module
# But the user/local one is tricky: ipython needs it to store internal
# data, but we also need the locals. We'll throw our hidden variables
# like _ih and get_ipython() into the local namespace, but delete them
# later.
if local_ns is not None:
self.user_ns = local_ns
self.init_user_ns()
# Compiler flags
if compile_flags is not None:
self.compile.flags = compile_flags
# Patch for global embedding to make sure that things don't overwrite
# user globals accidentally. Thanks to Richard <rxe@renre-europe.com>
# FIXME. Test this a bit more carefully (the if.. is new)
# N.B. This can't now ever be called. Not sure what it was for.
# And now, since it wasn't called in the previous version, I'm
# commenting out these lines so they can't be called with my new changes
# --TK, 2011-12-10
#if local_ns is None and module is None:
# self.user_global_ns.update(__main__.__dict__)
# make sure the tab-completer has the correct frame information, so it
# actually completes using the frame's locals/globals
self.set_completer_frame()
with self.builtin_trap, self.display_trap:
self.interact(display_banner=display_banner)
# now, purge out the local namespace of IPython's hidden variables.
if local_ns is not None:
for name in self.user_ns_hidden:
local_ns.pop(name, None)
# Restore original namespace so shell can shut down when we exit.
self.user_module = orig_user_module
self.user_ns = orig_user_ns
self.compile.flags = orig_compile_flags
def embed(**kwargs):
"""Call this to embed IPython at the current point in your program.
The first invocation of this will create an :class:`InteractiveShellEmbed`
instance and then call it. Consecutive calls just call the already
created instance.
If you don't want the kernel to initialize the namespace
from the scope of the surrounding function,
and/or you want to load full IPython configuration,
you probably want `IPython.start_ipython()` instead.
Here is a simple example::
from IPython import embed
a = 10
b = 20
embed('First time')
c = 30
d = 40
embed
Full customization can be done by passing a :class:`Config` in as the
config argument.
"""
config = kwargs.get('config')
header = kwargs.pop('header', u'')
compile_flags = kwargs.pop('compile_flags', None)
if config is None:
config = load_default_config()
config.InteractiveShellEmbed = config.TerminalInteractiveShell
kwargs['config'] = config
shell = InteractiveShellEmbed.instance(**kwargs)
shell(header=header, stack_depth=2, compile_flags=compile_flags)
| WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/terminal/embed.py | Python | bsd-3-clause | 11,288 | [
"Brian"
] | 300f06bc15e440ded403f6cfead7c1ad10d723c555493d05c9875a7db7d0a42f |
"""Berendsen NPT dynamics class."""
import numpy as np
#from ase.md import MolecularDynamics
from ase.md.nvtberendsen import NVTBerendsen
import ase.units as units
#import math
class NPTBerendsen(NVTBerendsen):
"""Berendsen (constant N, P, T) molecular dynamics.
This dynamics scale the velocities and volumes to maintain a constant
pressure and temperature. The shape of the simulation cell is not
altered, if that is desired use Inhomogenous_NPTBerendsen.
Usage: NPTBerendsen(atoms, timestep, temperature, taut, pressure, taup)
atoms
The list of atoms.
timestep
The time step.
temperature
The desired temperature, in Kelvin.
taut
Time constant for Berendsen temperature coupling.
fixcm
If True, the position and momentum of the center of mass is
kept unperturbed. Default: True.
pressure
The desired pressure, in bar (1 bar = 1e5 Pa).
taup
Time constant for Berendsen pressure coupling.
compressibility
The compressibility of the material, water 4.57E-5 bar-1, in bar-1
"""
def __init__(self, atoms, timestep, temperature, taut=0.5e3*units.fs,
pressure = 1.01325, taup=1e3*units.fs,
compressibility=4.57e-5, fixcm=True,
trajectory=None, logfile=None, loginterval=1):
NVTBerendsen.__init__(self, atoms, timestep, temperature, taut, fixcm,
trajectory,
logfile, loginterval)
self.taup = taup
self.pressure = pressure
self.compressibility = compressibility
def set_taup(self, taut):
self.taut = taut
def get_taup(self):
return self.taut
def set_pressure(self, pressure):
self.pressure = pressure
def get_pressure(self):
return self.pressure
def set_compressibility(self, compressibility):
self.compressibility = compressibility
def get_compressibility(self):
return self.compressibility
def set_timestep(self, timestep):
self.dt = timestep
def get_timestep(self):
return self.dt
def scale_positions_and_cell(self):
""" Do the Berendsen pressure coupling,
scale the atom positon and the simulation cell."""
taupscl = self.dt / self.taup
stress = self.atoms.get_stress()
old_pressure = self.atoms.get_isotropic_pressure(stress)
scl_pressure = 1.0 - taupscl * self.compressibility / 3.0 * \
(self.pressure - old_pressure)
#print "old_pressure", old_pressure
#print "volume scaling by:", scl_pressure
cell = self.atoms.get_cell()
cell = scl_pressure * cell
self.atoms.set_cell(cell, scale_atoms=True)
def step(self, f):
""" move one timestep forward using Berenden NPT molecular dynamics."""
NVTBerendsen.scale_velocities(self)
self.scale_positions_and_cell()
#one step velocity verlet
atoms = self.atoms
p = self.atoms.get_momenta()
p += 0.5 * self.dt * f
if self.fixcm:
# calculate the center of mass
# momentum and subtract it
psum = p.sum(axis=0) / float(len(p))
p = p - psum
self.atoms.set_positions(self.atoms.get_positions() +
self.dt * p / self.atoms.get_masses()[:,np.newaxis])
# We need to store the momenta on the atoms before calculating
# the forces, as in a parallel Asap calculation atoms may
# migrate during force calculations, and the momenta need to
# migrate along with the atoms. For the same reason, we
# cannot use self.masses in the line above.
self.atoms.set_momenta(p)
f = self.atoms.get_forces()
atoms.set_momenta(self.atoms.get_momenta() + 0.5 * self.dt * f)
return f
class Inhomogeneous_NPTBerendsen(NPTBerendsen):
"""Berendsen (constant N, P, T) molecular dynamics.
This dynamics scale the velocities and volumes to maintain a constant
pressure and temperature. The size of the unit cell is allowed to change
independently in the three directions, but the angles remain constant.
Usage: NPTBerendsen(atoms, timestep, temperature, taut, pressure, taup)
atoms
The list of atoms.
timestep
The time step.
temperature
The desired temperature, in Kelvin.
taut
Time constant for Berendsen temperature coupling.
fixcm
If True, the position and momentum of the center of mass is
kept unperturbed. Default: True.
pressure
The desired pressure, in bar (1 bar = 1e5 Pa).
taup
Time constant for Berendsen pressure coupling.
compressibility
The compressibility of the material, water 4.57E-5 bar-1, in bar-1
"""
def scale_positions_and_cell(self):
""" Do the Berendsen pressure coupling,
scale the atom positon and the simulation cell."""
taupscl = self.dt * self.compressibility / self.taup / 3.0
stress = - self.atoms.get_stress() * 1e-5 / units.Pascal
if stress.shape == (6,):
stress = stress[:3]
elif stress.shape == (3,3):
stress = [stress[i][i] for i in range(3)]
else:
raise ValueError("Cannot use a stress tensor of shape " + str(stress.shape))
pbc = self.atoms.get_pbc()
scl_pressurex = 1.0 - taupscl * (self.pressure - stress[0]) * pbc[0]
scl_pressurey = 1.0 - taupscl * (self.pressure - stress[1]) * pbc[1]
scl_pressurez = 1.0 - taupscl * (self.pressure - stress[2]) * pbc[2]
cell = self.atoms.get_cell()
cell = np.array([scl_pressurex * cell[0],scl_pressurey * cell[1],scl_pressurez * cell[2]])
self.atoms.set_cell(cell, scale_atoms=True)
| grhawk/ASE | tools/ase/md/nptberendsen.py | Python | gpl-2.0 | 5,931 | [
"ASE"
] | 9fb21ae65ebb3e209940617a1f0eb547843f01e1bd066c68af010b8b536628d2 |
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
import pickle
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, assert_warns, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta',
'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy',
'foldcauchy', 'gamma', 'gengamma', 'loggamma',
'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma',
'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r',
'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto',
'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm',
'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct',
'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh',
'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz',
'hypsecant', 'laplace', 'reciprocal', 'triang', 'tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm',
'rice']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
assert_(pval > alpha,
msg="D = {}; pval = {}; alpha = {}; args = {}".format(
D, pval, alpha, args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'triang':
args = tuple(np.random.random(nargs))
elif dist == 'reciprocal':
vals = np.random.random(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0 + np.random.random(nargs))
else:
args = tuple(1.0 + np.random.random(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges,
eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
exspected = -2239.771 # From R
assert_almost_equal(result, exspected, decimal=3)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestPoisson(TestCase):
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
class TestSkewNorm(TestCase):
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk'),
decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk'),
decimal=2)
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(TestCase):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
@dec.slow
def test_fit(self):
def check(func, dist, args, alpha):
if dist in self.skip:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist == 'frechet':
assert_(len(vals) == len(args))
assert_(len(vals2) == len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
@dec.slow
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res, floc=0)
vals2 = distfunc.fit(res, fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[0.25888672, 0, 20], atol=1e-5)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
rv = stats.genpareto(c=-0.1)
a, b = rv.dist.a, rv.dist.b
assert_equal([a, b], [0., 10.])
assert_equal([rv.a, rv.b], [0., 10.])
stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b
assert_equal([rv.dist.a, rv.dist.b], [a, b])
assert_equal([rv.a, rv.b], [a, b])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(TestCase):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def text_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestExponWeib(TestCase):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see http://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
olderr = np.seterr(divide='ignore')
try:
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
finally:
np.seterr(**olderr)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(TestCase):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(TestCase):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall('logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall('logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
try:
_distr3_gen(name='dummy')
except TypeError:
pass
else:
raise AssertionError('TypeError not raised.')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_docstrings():
badones = [',\s*,', '\(\s*,', '^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
if __name__ == "__main__":
run_module_suite()
| niknow/scipy | scipy/stats/tests/test_distributions.py | Python | bsd-3-clause | 94,548 | [
"Gaussian"
] | 90713e01cdd7eefe09c5175b482ae1f3c696d11235e0c412d60b1cdf2beb5c76 |
"""
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views import get_current_child, save_positions_recursively_up
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and access other resources
the user has permissions for.
Users are redirected to this endpoint after they sign in.
You can use the **course_enrollments** value in the response to get a
list of courses the user is enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* course_enrollments: The URI to list the courses the currently signed
in user is enrolled in.
* email: The email address of the currently signed in user.
* id: The ID of the user.
* name: The full name of the currently signed in user.
* username: The username of the currently signed in user.
"""
queryset = (
User.objects.all()
.select_related('profile')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Cases**
Get or update the ID of the module that the specified user last
visited in the specified course.
**Example Requests**
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
**PATCH Parameters**
The body of the PATCH request can include the following parameters.
* last_visited_module_id={module_id}
* modification_date={date}
The modification_date parameter is optional. If it is present, the
update will only take effect if the modification_date in the
request is later than the modification_date saved on the server.
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* last_visited_module_id: The ID of the last module that the user
visited in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.data.get("last_visited_module_id")
modification_date_string = request.data.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses that the currently signed in user is
enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
If the request for information about the user is successful, the
request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* certificate: Information about the user's earned certificate in the
course.
* course: A collection of the following data about the course.
* courseware_access: A JSON representation with access information for the course,
including any access errors.
* course_about: The URL to the course about page.
* course_handouts: The URI to get data for course handouts.
* course_image: The path to the course image.
* course_updates: The URI to get data for course updates.
* discussion_url: The URI to access data for course discussions if
it is enabled, otherwise null.
* end: The end date of the course.
* id: The unique ID of the course.
* latest_updates: Reserved for future use.
* name: The name of the course.
* number: The course number.
* org: The organization that created the course.
* start: The date and time when the course starts.
* start_display:
If start_type is a string, then the advertised_start date for the course.
If start_type is a timestamp, then a formatted date for the start of the course.
If start_type is empty, then the value is None and it indicates that the course has not yet started.
* start_type: One of either "string", "timestamp", or "empty"
* subscription_id: A unique "clean" (alphanumeric with '_') ID of
the course.
* video_outline: The URI to get the list of all videos that the user
can access in the course.
* created: The date the course was created.
* is_active: Whether the course is currently active. Possible values
are true or false.
* mode: The type of certificate registration for this course (honor or
certified).
* url: URL to the downloadable version of the certificate, if exists.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course_overview and
is_mobile_available_for_user(self.request.user, enrollment.course_overview)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
| IndonesiaX/edx-platform | lms/djangoapps/mobile_api/users/views.py | Python | agpl-3.0 | 11,148 | [
"VisIt"
] | 4453e7413b08f17b921468122de7810d8de2c8e45a314ed030f015b890a948a7 |
# $HeadURL: $
''' ResourceManagementDB
Module that provides basic methods to access the ResourceManagementDB.
'''
from datetime import datetime
import sys
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.ResourceStatusSystem.Utilities import MySQLWrapper
__RCSID__ = '$Id: $'
class ResourceManagementDB( object ):
'''
Class that defines the tables for the ResourceManagementDB on a python dictionary.
'''
# Written PrimaryKey as list on purpose !!
_tablesDB = {}
_tablesDB[ 'AccountingCache' ] = { 'Fields' :
{
#'AccountingCacheID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PlotType' : 'VARCHAR(16) NOT NULL',
'PlotName' : 'VARCHAR(64) NOT NULL',
'Result' : 'TEXT NOT NULL',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Name', 'PlotType', 'PlotName' ]
}
_tablesDB[ 'DowntimeCache' ] = { 'Fields' :
{
'DowntimeID' : 'VARCHAR(64) NOT NULL',
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'StartDate' : 'DATETIME NOT NULL',
'EndDate' : 'DATETIME NOT NULL',
'Severity' : 'VARCHAR(32) NOT NULL',
'Description' : 'VARCHAR(512) NOT NULL',
'Link' : 'VARCHAR(255) NOT NULL',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL',
'GOCDBServiceType' : 'VARCHAR(32) NOT NULL'
},
'PrimaryKey' : [ 'DowntimeID' ]
}
_tablesDB[ 'GGUSTicketsCache' ] = { 'Fields' :
{
'GocSite' : 'VARCHAR(64) NOT NULL',
'Link' : 'VARCHAR(1024) NOT NULL',
'OpenTickets' : 'INTEGER NOT NULL DEFAULT 0',
'Tickets' : 'VARCHAR(1024) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'GocSite' ]
}
_tablesDB[ 'JobCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR(64) NOT NULL',
'Timespan' : 'INTEGER NOT NULL',
'Checking' : 'INTEGER NOT NULL DEFAULT 0',
'Completed' : 'INTEGER NOT NULL DEFAULT 0',
'Done' : 'INTEGER NOT NULL DEFAULT 0',
'Failed' : 'INTEGER NOT NULL DEFAULT 0',
'Killed' : 'INTEGER NOT NULL DEFAULT 0',
'Matched' : 'INTEGER NOT NULL DEFAULT 0',
'Received' : 'INTEGER NOT NULL DEFAULT 0',
'Running' : 'INTEGER NOT NULL DEFAULT 0',
'Staging' : 'INTEGER NOT NULL DEFAULT 0',
'Stalled' : 'INTEGER NOT NULL DEFAULT 0',
'Waiting' : 'INTEGER NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site', 'Timespan' ]
}
_tablesDB[ 'PilotCache' ] = { 'Fields' :
{
'CE' : 'VARCHAR(64) NOT NULL',
'Timespan' : 'INTEGER NOT NULL',
'Scheduled' : 'INTEGER NOT NULL DEFAULT 0',
'Waiting' : 'INTEGER NOT NULL DEFAULT 0',
'Submitted' : 'INTEGER NOT NULL DEFAULT 0',
'Running' : 'INTEGER NOT NULL DEFAULT 0',
'Done' : 'INTEGER NOT NULL DEFAULT 0',
'Aborted' : 'INTEGER NOT NULL DEFAULT 0',
'Cancelled' : 'INTEGER NOT NULL DEFAULT 0',
'Deleted' : 'INTEGER NOT NULL DEFAULT 0',
'Failed' : 'INTEGER NOT NULL DEFAULT 0',
'Held' : 'INTEGER NOT NULL DEFAULT 0',
'Killed' : 'INTEGER NOT NULL DEFAULT 0',
'Stalled' : 'INTEGER NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'CE', 'Timespan' ]
}
_tablesDB[ 'PolicyResult' ] = { 'Fields' :
{
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PolicyName' : 'VARCHAR(64) NOT NULL',
'StatusType' : 'VARCHAR(16) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(16) NOT NULL',
'Reason' : 'VARCHAR(512) NOT NULL DEFAULT "Unspecified"',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Element', 'Name', 'StatusType', 'PolicyName' ]
}
_tablesDB[ 'SpaceTokenOccupancyCache' ] = { 'Fields' :
{
'Endpoint' : 'VARCHAR( 64 ) NOT NULL',
'Token' : 'VARCHAR( 64 ) NOT NULL',
'Total' : 'DOUBLE NOT NULL DEFAULT 0',
'Guaranteed' : 'DOUBLE NOT NULL DEFAULT 0',
'Free' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Endpoint', 'Token' ]
}
_tablesDB[ 'TransferCache' ] = { 'Fields' :
{
'SourceName' : 'VARCHAR( 64 ) NOT NULL',
'DestinationName' : 'VARCHAR( 64 ) NOT NULL',
'Metric' : 'VARCHAR( 16 ) NOT NULL',
'Value' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'SourceName', 'DestinationName', 'Metric' ]
}
_tablesDB[ 'UserRegistryCache' ] = { 'Fields' :
{
'Login' : 'VARCHAR(16)',
'Name' : 'VARCHAR(64) NOT NULL',
'Email' : 'VARCHAR(64) NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Login' ]
}
_tablesDB[ 'VOBOXCache' ] = { 'Fields' :
{
'Site' : 'VARCHAR( 64 ) NOT NULL',
'System' : 'VARCHAR( 64 ) NOT NULL',
'ServiceUp' : 'INTEGER NOT NULL DEFAULT 0',
'MachineUp' : 'INTEGER NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'Site', 'System' ]
}
_tablesDB[ 'ErrorReportBuffer' ] = { 'Fields' :
{
'ID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'ElementType' : 'VARCHAR(32) NOT NULL',
'Reporter' : 'VARCHAR(64) NOT NULL',
'ErrorMessage' : 'VARCHAR(512) NOT NULL',
'Operation' : 'VARCHAR(64) NOT NULL',
'Arguments' : 'VARCHAR(512) NOT NULL DEFAULT ""',
'DateEffective' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ID' ]
}
_tablesLike = {}
_tablesLike[ 'PolicyResultWithID' ] = { 'Fields' :
{
'ID' : 'INT UNSIGNED AUTO_INCREMENT NOT NULL',
'Element' : 'VARCHAR(32) NOT NULL',
'Name' : 'VARCHAR(64) NOT NULL',
'PolicyName' : 'VARCHAR(64) NOT NULL',
'StatusType' : 'VARCHAR(16) NOT NULL DEFAULT ""',
'Status' : 'VARCHAR(8) NOT NULL',
'Reason' : 'VARCHAR(512) NOT NULL DEFAULT "Unspecified"',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'
},
'PrimaryKey' : [ 'ID' ]
}
_likeToTable = {
'PolicyResultLog' : 'PolicyResultWithID',
'PolicyResultHistory' : 'PolicyResultWithID',
}
def __init__( self, maxQueueSize = 10, mySQL = None, checkTables = False ):
'''
Constructor, accepts any DB or mySQL connection, mostly used for testing
purposes.
'''
self._tableDict = self.__generateTables()
if mySQL is not None:
self.database = mySQL
else:
self.database = DB( 'ResourceManagementDB',
'ResourceStatus/ResourceManagementDB', maxQueueSize )
if checkTables:
result = self._createTables( self._tablesDict )
if not result['OK']:
error = 'Failed to check/create tables'
self.log.fatal( 'ResourceManagementDB: %s' % error )
sys.exit( error )
if result['Value']:
self.log.info( "ResourceManagementDB: created tables %s" % result['Value'] )
## SQL Methods ###############################################################
def insert( self, params, meta ):
'''
Inserts args in the DB making use of kwargs where parameters such as
the 'table' are specified ( filled automatically by the Client). Typically you
will not pass kwargs to this function, unless you know what are you doing
and you have a very special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
utcnow = datetime.utcnow().replace( microsecond = 0 )
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = utcnow
if 'dateEffective' in params and params[ 'dateEffective' ] is None:
params[ 'dateEffective' ] = utcnow
return MySQLWrapper.insert( self, params, meta )
def update( self, params, meta ):
'''
Updates row with values given on args. The row selection is done using the
default of MySQLMonkey ( column.primary or column.keyColumn ). It can be
modified using kwargs. The 'table' keyword argument is mandatory, and
filled automatically by the Client. Typically you will not pass kwargs to
this function, unless you know what are you doing and you have a very
special use case.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
# We force lastCheckTime to utcnow if it is not present on the params
#if not( 'lastCheckTime' in params and not( params[ 'lastCheckTime' ] is None ) ):
if 'lastCheckTime' in params and params[ 'lastCheckTime' ] is None:
params[ 'lastCheckTime' ] = datetime.utcnow().replace( microsecond = 0 )
return MySQLWrapper.update( self, params, meta )
def select( self, params, meta ):
'''
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
return MySQLWrapper.select( self, params, meta )
def delete( self, params, meta ):
'''
Uses arguments to build conditional SQL statement ( WHERE ... ). If the
sql statement desired is more complex, you can use kwargs to interact with
the MySQL buildCondition parser and generate a more sophisticated query.
There is only one forbidden query, with all parameters None ( this would
mean a query of the type `DELETE * from TableName` ). The usage of kwargs
is the same as in the get function.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
return MySQLWrapper.delete( self, params, meta )
## Extended SQL methods ######################################################
def addOrModify( self, params, meta ):
'''
Using the PrimaryKeys of the table, it looks for the record in the database.
If it is there, it is updated, if not, it is inserted as a new entry.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
selectQuery = self.select( params, meta )
if not selectQuery[ 'OK' ]:
return selectQuery
isUpdate = False
if selectQuery[ 'Value' ]:
# Pseudo - code
# for all column not being PrimaryKey and not a time column:
# if one or more column different than params if not None:
# we update dateTime as well
columns = selectQuery[ 'Columns' ]
values = selectQuery[ 'Value' ]
if len( values ) != 1:
return S_ERROR( 'More than one value returned on addOrModify, please report !!' )
selectDict = dict( zip( columns, values[ 0 ] ) )
newDateEffective = None
for key, value in params.items():
if key in ( 'lastCheckTime', 'dateEffective' ):
continue
if value is None:
continue
if value != selectDict[ key[0].upper() + key[1:] ]:
newDateEffective = datetime.utcnow().replace( microsecond = 0 )
break
if 'dateEffective' in params:
params[ 'dateEffective' ] = newDateEffective
userQuery = self.update( params, meta )
isUpdate = True
else:
userQuery = self.insert( params, meta )
# This part only applies to PolicyResult table
logResult = self._logRecord( params, meta, isUpdate )
if not logResult[ 'OK' ]:
return logResult
return userQuery
# FIXME: this method looks unused. Maybe can be removed from the code.
def addIfNotThere( self, params, meta ):
'''
Using the PrimaryKeys of the table, it looks for the record in the database.
If it is not there, it is inserted as a new entry.
:Parameters:
**params** - `dict`
arguments for the mysql query ( must match table columns ! ).
**meta** - `dict`
metadata for the mysql query. It must contain, at least, `table` key
with the proper table name.
:return: S_OK() || S_ERROR()
'''
selectQuery = self.select( params, meta )
if not selectQuery[ 'OK' ]:
return selectQuery
if selectQuery[ 'Value' ]:
return selectQuery
return self.insert( params, meta )
## Auxiliar methods ##########################################################
def getTable( self, tableName ):
'''
Returns a table dictionary description given its name
'''
if tableName in self._tableDict:
return S_OK( self._tableDict[ tableName ] )
return S_ERROR( '%s is not on the schema' % tableName )
def getTablesList( self ):
'''
Returns a list of the table names in the schema.
'''
return S_OK( self._tableDict.keys() )
## Protected methods #########################################################
def _logRecord( self, params, meta, isUpdate ):
'''
Method that records every change on a LogTable.
'''
if not ( 'table' in meta and meta[ 'table' ] == 'PolicyResult' ):
return S_OK()
if isUpdate:
# This looks little bit like a non-sense. If we were updating, we may have
# not passed a complete set of parameters, so we have to get all them from the
# database :/. It costs us one more query.
updateRes = self.select( params, meta )
if not updateRes[ 'OK' ]:
return updateRes
params = dict( zip( updateRes[ 'Columns' ], updateRes[ 'Value' ][ 0 ] ))
# Writes to PolicyResult"Log"
meta[ 'table' ] += 'Log'
logRes = self.insert( params, meta )
return logRes
## Private methods ###########################################################
def __createTables( self, tableName = None ):
'''
Writes the schema in the database. If no tableName is given, all tables
are written in the database. If a table is already in the schema, it is
skipped to avoid problems trying to create a table that already exists.
'''
tables = {}
if tableName is None:
tables.update( self._tableDict )
elif tableName in self._tableDict:
tables = { tableName : self._tableDict[ tableName ] }
else:
return S_ERROR( '"%s" is not a known table' % tableName )
res = self.database._createTables( tables )
if not res[ 'OK' ]:
return res
# Human readable S_OK message
if res[ 'Value' ] == 0:
res[ 'Value' ] = 'No tables created'
else:
res[ 'Value' ] = 'Tables created: %s' % ( ','.join( tables.keys() ) )
return res
def __generateTables( self ):
'''
Method used to transform the class variables into instance variables,
for safety reasons.
'''
# Avoids copying object.
tables = {}
tables.update( self._tablesDB )
for tableName, tableLike in self._likeToTable.items():
tables[ tableName ] = self._tablesLike[ tableLike ]
return tables
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| miloszz/DIRAC | ResourceStatusSystem/DB/ResourceManagementDB.py | Python | gpl-3.0 | 20,465 | [
"DIRAC"
] | 15f85c1329f927bf0fb9d8d3b033439e3ed9a192d828b025bd7fddbd8ce71302 |
#!/usr/local/bin/python -i
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# Change log:
# 8/05, Steve Plimpton (SNL): original version
# 12/09, David Hart (SNL): except hook for Tkinter no-display error
# 5/11, David Hart (SNL): began list of excludes for no-display machines
# ToDo list:
# Help strings:
version = "2 Jul 2014"
intro = """
Pizza.py (%s), a toolkit written in Python
type ? for help, CTRL-D to quit
"""
help = """
pizza.py switch arg(s) switch arg(s) ...
-s silent (else print start-up help)
-t log dump raster load only these tools
-x raster rasmol load all tools except these
-f mine.py arg1 arg2 run script file with args
-c "vec = range(100)" run Python command
-q quit (else interactive)
Everything typed at the ">" prompt is a Python command
Additional commands available at ">" prompt:
? print help message
?? one-line for each tool and script
? raster list tool commands or script syntax
?? energy.py full documentation of tool or script
!ls -l shell command
@cd .. cd to a new directory
@log tmp.log log all commands typed so far to file
@run block.py arg1 arg2 run script file with args
@time d = dump("*.dump") time a command
Tools:
"""
# -------------------------------------------------------------------------
# modules needed by pizza.py
import sys, commands, os, string, exceptions, glob, re
from time import clock
# readline not available in all Pythons
try:
import readline
readline_flag = 1
except ImportError, exception:
print "readline option not available"
readline_flag = 0
# create global Tk root if Tkinter is loaded
# used by all tools that do GUIs via Tkinter
nodisplay = False
try:
import Tkinter
tkroot = Tkinter.Tk()
tkroot.withdraw()
except ImportError, exception:
nodisplay = True
pass
except Exception, exception:
nodisplay = True
pass
# -------------------------------------------------------------------------
# error trap that enables special commands at interactive prompt
def trap(type,value,tback):
global argv
# only check SyntaxErrors
if not isinstance(value,exceptions.SyntaxError):
sys.__excepthook__(type,value,tback)
return
# special commands at top level only, not in indented text entry
if value.text[0].isspace():
sys.__excepthook__(type,value,tback)
return
# ? = top-level help
# ?? = one-line description of each tool and script
# ? name = one-line for each tool command or script purpose/syntax
# ?? name = entire documentation for tool or script
# name with no .py suffix is tool, name with .py suffix is script
if value.text[0] == "?":
words = value.text.split()
if len(words) == 1 and words[0] == "?":
print intro[1:] % version
print help[1:]," ",
for tool in tools: print tool,
print
elif len(words) == 1 and words[0] == "??":
for tool in tools:
exec "oneline = oneline_%s" % tool
print "%-11s%s" % (tool,oneline)
print
scripts = []
for dir in PIZZA_SCRIPTS[1:]:
list = glob.glob("%s/*.py" % dir)
list.sort()
scripts += list
for script in scripts:
filename = os.path.basename(script)
lines = open(script,'r').readlines()
flag = 0
for line in lines:
if line.find("Purpose:") >= 0:
flag = 1
break
if flag: doc = line[line.find("Purpose:")+8:]
else: doc = " not available\n"
print "%-20s%s" % (filename,doc),
elif len(words) == 2 and words[0] == "?":
if words[1][-3:] == ".py":
fileflag = 0
for dir in PIZZA_SCRIPTS:
filename = "%s/%s" % (dir,words[1])
if os.path.isfile(filename):
fileflag = 1
lineflag = 0
lines = open(filename,'r').readlines()
for line in lines:
if line.find("# Purpose:") >= 0: print line[2:],
if line.find("# Syntax:") >= 0:
lineflag = 1
break
if not lineflag: print "%s has no Syntax line" % words[1]
else: print line[2:],
break
if not fileflag:
print "%s is not a recognized script" % words[1]
else:
if words[1] in tools:
exec "txt = docstr_%s" % words[1]
txt = re.sub("\n\s*\n","\n",txt)
txt = re.sub("\n .*","",txt)
exec "print oneline_%s" % words[1]
print txt
else:
print "%s is not a recognized tool" % words[1]
elif len(words) == 2 and words[0] == "??":
if words[1][-3:] == ".py":
fileflag = 0
for dir in PIZZA_SCRIPTS:
filename = "%s/%s" % (dir,words[1])
if os.path.isfile(filename):
fileflag = 1
lines = open(filename,'r').readlines()
for line in lines:
if len(line.strip()) == 0: continue
if line[0] == '#': print line,
else: break
break
if not fileflag:
print "%s is not a recognized script" % words[1]
else:
if words[1] in tools:
exec "print oneline_%s" % words[1]
exec "print docstr_%s" % words[1]
else:
print "%s is not a recognized class" % words[1]
return
# shell command like !ls, !ls -l
if value.text[0] == "!":
os.system(value.text[1:])
return
# @ commands = @cd, @log, @run, @time
# for run and time, use namespace in execfile and exec commands
# else variables defined in script/command
# won't be set in top-level Pizza.py
if value.text[0] == "@":
words = value.text.split()
if words[0][1:] == "cd":
os.chdir(words[1])
return
elif words[0][1:] == "log":
if readline_flag == 0:
print "cannot use @log without readline module"
return
f = open(words[1],"w")
print >>f,"# pizza.py log file\n"
nlines = readline.get_current_history_length()
for i in xrange(1,nlines):
print >>f,readline.get_history_item(i)
f.close()
return
elif words[0][1:] == "run":
argv = words[1:]
file = argv[0]
flag = 0
for dir in PIZZA_SCRIPTS:
fullfile = dir + '/' + file
if os.path.exists(fullfile):
flag = 1
print "Executing file:",fullfile
execfile(fullfile,namespace)
break
if not flag: print "Could not find file",file
return
elif words[0][1:] == "time":
cmd = string.join(words[1:])
t1 = clock()
exec cmd in namespace
t2 = clock()
print "CPU time = ",t2-t1
return
# unrecognized command, let system handle error
sys.__excepthook__(type,value,tback)
# -------------------------------------------------------------------------
# process command-line switches
# store scripts and commands in tasks list
silent = 0
yes_tools = []
no_tools = []
tasks = []
quitflag = 0
iarg = 1
while (iarg < len(sys.argv)):
if (sys.argv[iarg][0] != '-'):
print "ERROR: arg is not a switch: %s" % (sys.argv[iarg])
sys.exit()
if (sys.argv[iarg] == "-s"):
silent = 1
iarg += 1
elif (sys.argv[iarg] == "-t"):
jarg = iarg + 1
while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'):
yes_tools.append(sys.argv[jarg])
jarg += 1
iarg = jarg
elif (sys.argv[iarg] == "-x"):
jarg = iarg + 1
while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'):
no_tools.append(sys.argv[jarg])
jarg += 1
iarg = jarg
# allow for "--" as arg to script and not Pizza.py arg
elif (sys.argv[iarg] == "-f"):
jarg = iarg + 1
list = []
while (jarg < len(sys.argv) and
(sys.argv[jarg][0] != '-' or
(len(sys.argv[jarg]) >= 3 and sys.argv[jarg][0:2] == "--"))):
list.append(sys.argv[jarg])
jarg += 1
task = ("script",list)
tasks.append(task)
iarg = jarg
elif (sys.argv[iarg] == "-c"):
jarg = iarg + 1
list = []
while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'):
list.append(sys.argv[jarg])
jarg += 1
task = ("command",list)
tasks.append(task)
iarg = jarg
elif (sys.argv[iarg] == "-q"):
quitflag = 1
iarg += 1
else:
print "ERROR: unknown switch: %s" % (sys.argv[iarg])
sys.exit()
# print intro message
if not silent: print intro[1:] % version,
# error test on m,x command-line switches
if len(yes_tools) > 0 and len(no_tools) > 0:
print "ERROR: cannot use -t and -x switches together"
sys.exit()
# -------------------------------------------------------------------------
# tools = list of tool names to import
# if -t switch was used, tools = just those files
# else scan for *.py files in all dirs in PIZZA_TOOLS list
# and then Pizza.py src dir (sys.path[0])
if not silent: print "Loading tools ..."
if not silent and nodisplay: print "Display not available ... no GUIs"
try: from DEFAULTS import PIZZA_TOOLS
except: PIZZA_TOOLS = []
PIZZA_TOOLS = map(os.path.expanduser,PIZZA_TOOLS)
PIZZA_TOOLS.append(sys.path[0])
if len(yes_tools) > 0: tools = yes_tools
else:
tools = []
for dir in PIZZA_TOOLS:
tools += glob.glob(dir + "/*.py")
for i in range(len(tools)):
tools[i] = os.path.basename(tools[i])
tools[i] = tools[i][:-3]
# remove duplicate entries, reverse enables removing all but first entry
tools.reverse()
for tool in tools:
while tools.count(tool) > 1: tools.remove(tool)
tools.reverse()
# remove tools in EXCLUDE list and command-line -x list
try: from DEFAULTS import PIZZA_EXCLUDE
except: PIZZA_EXCLUDE = []
for tool in PIZZA_EXCLUDE:
if tool in tools: tools.remove(tool)
for tool in no_tools:
if tool in tools: tools.remove(tool)
# add PIZZA_TOOLS dirs to front of module search path (sys.path)
# import each tool as a Python module and its documentation strings
# restore sys.path
sys.path = PIZZA_TOOLS + sys.path
failed = []
for tool in tools:
#print "loading tool '%s'"%tool
if nodisplay and tool in ['gl']:
failed.append(tool)
continue
try:
exec "from %s import %s" % (tool,tool)
exec "from %s import oneline as oneline_%s" % (tool,tool)
exec "from %s import docstr as docstr_%s" % (tool,tool)
except Exception, exception:
print "%s tool did not load:" % tool
print " ",exception
failed.append(tool)
for dir in PIZZA_TOOLS: sys.path = sys.path[1:]
# final list of tools: remove tools where import failed, sort them
for tool in failed: tools.remove(tool)
tools.sort()
# add current working dir to sys.path so user can import own modules
# cwd isn't in sys.path when Pizza.py is launched
sys.path.insert(0,'')
# -------------------------------------------------------------------------
# PIZZA_SCRIPTS = list of dirs to look in to find scripts
try: from DEFAULTS import PIZZA_SCRIPTS
except: PIZZA_SCRIPTS = []
PIZZA_SCRIPTS = map(os.path.expanduser,PIZZA_SCRIPTS)
PIZZA_SCRIPTS.insert(0,'.')
PIZZA_SCRIPTS.append(sys.path[1][:-3] + "scripts") # path for pizza.py
# run specified script files and commands in order specified
# put arguments in argv so script can access them
# check list of PIZZA_SCRIPTS dirs to find script file
# catch errors so pizza.py will continue even if script is bad
# traceback logic prints where in the script the error occurred
for task in tasks:
if task[0] == "script":
argv = task[1]
file = argv[0]
try:
flag = 0
for dir in PIZZA_SCRIPTS:
fullfile = dir + '/' + file
if os.path.exists(fullfile):
print "Executing file:",fullfile
execfile(fullfile)
flag = 1
break
if not flag: print "Could not find file",file
except StandardError, exception:
(type,value,tback) = sys.exc_info()
print type,value,tback
type = str(type)
type = type[type.find(".")+1:]
print "%s with value: %s" % (type,value)
tback = tback.tb_next
while tback:
print "error on line %d of file %s" % \
(tback.tb_lineno,tback.tb_frame.f_code.co_filename)
tback = tback.tb_next
elif task[0] == "command":
argv = task[1]
cmd = ""
for arg in argv: cmd += arg + " "
exec cmd
# -------------------------------------------------------------------------
# store global namespace
# swap in a new exception handler
# change interactive prompts
namespace = sys.modules['__main__'].__dict__
sys.excepthook = trap
sys.ps1 = "> "
sys.ps2 = ". "
# should now go interactive if launched with "python -i"
# unless -q switch used
if quitflag > 0: sys.exit()
| slitvinov/Pizza.py | src/pizza.py | Python | gpl-2.0 | 13,329 | [
"RasMol"
] | 7b23044bc86bf40499994c6674460685b8863036d3990a8a71d5dcfc122154b1 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import abc
import math
class BinaryHeapADT(object):
"""
Abstract data type of a binary heap.
:param keys: A list of priority keys for sorting priority.
:type values: list
:param values: A list of values associated with each
key.
:type values: list
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def insert(self, key, value):
"""
Insert a new entry into the heap by providing a key
and the entry's associated value.
"""
return
@abc.abstractmethod
def pop(self):
"""
Extract and remove the root of the heap.
"""
return
def parent_of(self, i):
return int(math.floor(i/2))
def children_of(self, i):
return (2*i, 2*i +1)
@abc.abstractmethod
def ensure(self):
""" Enssure the heap property is maintained. """
return
def __init__(self, keys, values):
_keys = keys or [None]
_values = values or [None]
if _keys[0] is not None:
_keys = [None] + _keys
if _values[0] is not None:
_values = [None] + _values
self._keys = _keys
self._values = _values
self.ensure()
class MinBinaryHeap(BinaryHeapADT):
""" Implements a min-(binary)-heap. """
def min_heapify(self):
x_index = 1
smallest_index = 1
smallest = self._keys[smallest_index]
while smallest <= len(self._keys)-1:
# get the children indices and find the smaller of the two
l_index, r_index = self.children_of(smallest_index)
if l_index <= len(self._keys)-1 and smallest > self._keys[l_index]:
smallest, smallest_index = self._keys[l_index], l_index
if r_index <= len(self._keys)-1 and smallest > self._keys[r_index]:
smallest, smallest_index = self._keys[r_index], r_index
if smallest == x_index:
break
else:
self._keys[smallest_index], self._keys[x_index] = \
self._keys[x_index], self._keys[smallest_index]
self._values[smallest_index], self._values[x_index] = \
self._values[x_index], self._values[smallest_index]
x_index, smallest = smallest_index, self._keys[smallest]
def ensure(self):
"""
Visit the entire heap to ensure min-heap property is maintaed.
"""
self.min_heapify()
def pop(self):
"""
Remove the root from the heap, which is the
smallest item in the heap, based on the
min-heap property.
First, remove the key and the value from arrays. Next,
fill the root with the last item in the arrays. Compare
this element's key with its children, swap the smallest,
repeat as many times as needed downward.
"""
min_key, min_value = self._keys[1], self._values[1]
self._keys[1], self._values[1] = self._keys[-1], self._values[-1]
del self._keys[-1]
del self._values[-1]
self.min_heapify()
return min_key, min_value
def insert(self, key, value):
"""
Insert a new entry into the heap.
First, add the new entry (key and value) to the end of
the arrays. Compare with the new entry's parent's key
with the new entry's key and swap them if the new entry's
key is smaller, repeat as many times as needed upward.
:param key: The key for priority sorting comparsion.
:param value: The value associatd with the key.
"""
x_index = len(self._keys)
xp_index = self.parent_of(x_index)
self._keys.append(key)
self._values.append(value)
while xp_index >= 1 and \
xp_index <= len(self._keys)-1 and \
self._keys[x_index] < self._keys[xp_index]:
self._keys[x_index], self._keys[xp_index] = \
self._keys[xp_index], self._keys[x_index]
self._values[x_index], self._values[xp_index] = \
self._values[xp_index], self._values[x_index]
x_index, xp_index = xp_index, self.parent_of(xp_index)
class MaxBinaryHeap(object):
def __init__(self):
self._data = [None]
def get_size(self):
"""We always reserve the first."""
return len(self._data) - 1
def parent_of(self, i):
return int(math.floor(i/2))
def children_of(self, i):
return 2*i, 2*i + 1
def parent_is_smaller(self, x):
parent_index = self.parent_of(x)
return self._data[x] > self._data[parent_index]
def swap(self, x_index, p_index):
"""Swap parent and child."""
self._data[x_index], self._data[p_index] = (
self._data[p_index], self._data[x_index])
def insert(self, x):
"""Insert new data (x) into the end of the array,
and let x bubbles upward until max heap order
property is satisfied.
"""
# remember we index 0 is always reserved, this is
# important later on as we do insertion.
self._data.append(x)
x_index = self.get_size()
xp_index = self.parent_of(x_index)
while x_index > 1 and self.parent_is_smaller(x_index):
self.swap(x_index, xp_index)
x_index = xp_index
xp_index = self.parent_of(x_index)
def max_heapify(self, i):
l_index, r_index = self.children_of(i)
larger_index = i
if (l_index <= self.get_size() and
self._data[l_index] > self._data[i]):
larger_index = l_index
if (r_index <= self.get_size() and
self._data[r_index] > self._data[larger_index]):
larger_index = r_index
if larger_index != i:
self.swap(i, larger_index)
self.max_heapify(larger_index)
def pop(self):
# heap is empty when the underlying list is left with
# the first element in the list.
if self.get_size() < 1:
return None
# swap max element which is at the root (index=1)
# with the leaf node before popping.
self.swap(1, self.get_size())
# pop the max element and heapify from root
# calling .pop() on a Python list will resize end of the list by 1
val = self._data.pop()
self.max_heapify(1)
return val
| yeukhon/algorithm | algorithm/heap/heap.py | Python | mpl-2.0 | 6,640 | [
"VisIt"
] | 1c097ce3190f3467b0f977b16565f4aab24c48e17cac278de7eff84b647cd9cf |
from .. import query_processor
from nose.tools import assert_equal, assert_not_equal
from unittest import expectedFailure, skip
import ast
import parser
import token
import symbol
def match(pattern, data, vars=None):
if vars is None:
vars = {}
if type(pattern) is list:
vars[pattern[0]] = data
return 1, vars
if type(pattern) is not tuple:
return (pattern == data), vars
if len(data) != len(pattern):
return 0, vars
for pattern, data in map(None, pattern, data):
same, vars = match(pattern, data, vars)
if not same:
break
return same, vars
@skip
def test_rewrite_notin_precedence():
code1 = "a and b not in c"
code2 = "(a and b) not in c"
code3 = "a and (b not in c)"
code4 = "(b not in c) and a"
rw = query_processor.RewriteChangeNotInPrescedence()
tree1 = ast.parse(code1)
tree2 = ast.parse(code2)
tree3 = ast.parse(code3)
tree1_rw = ast.parse(code1)
tree2_rw = ast.parse(code2)
tree3_rw = ast.parse(code3)
rw.visit(tree1_rw)
rw.visit(tree2_rw)
rw.visit(tree3_rw)
assert_not_equal(ast.dump(tree1), ast.dump(tree2))
assert_equal(ast.dump(tree2), ast.dump(tree2_rw))
assert_equal(ast.dump(tree1_rw), ast.dump(tree2))
assert_equal(ast.dump(tree3), ast.dump(tree3_rw))
assert_equal(ast.dump(tree1), ast.dump(tree3_rw))
| demianw/tract_querier | tract_querier/tests/test_query_rewrite.py | Python | bsd-3-clause | 1,399 | [
"VisIt"
] | a18aebcc3bcba19036e0eaaa1b1872875f118dcfde20a63f699fbaa0247eca64 |
import datetime
from utils import Fixture, runtests
from heinzel.core import models
from heinzel.core.utils import datetime_localize
class Car(models.Model):
name = models.TextField(max_length=100, initial="Bug")
depreciation_total = models.FloatField(initial=0.0)
build_date = models.DatetimeField(initial=datetime.datetime.now)
last_checkup = models.DatetimeField()
#in the future, cars will have their own IP address, says everybody.
ip_address = models.IPv6Field()
models.register([Car])
class TestFieldInitial(Fixture):
def runTest(self):
bug = Car().save()[0]
self.assert_(bug.name == "Bug")
class TestFloatFieldInitial(Fixture):
def runTest(self):
bug = Car().save()[0]
self.assert_(bug.depreciation_total == 0.0)
class TestDatetimeField(Fixture):
def runTest(self):
now = datetime.datetime.now()
earlier = datetime.datetime(1975, 3, 12, 9,53, 57)
jag = Car(
name="Jaguar",
build_date=earlier,
last_checkup=now
)
jag.save()
def compdates(dt1, dt2):
return dt1 == dt2
# A TypeError will be raised because earlier is not timezone aware
# whereas jag.build_date was implicitly localized automatically.
# They are not the same objects!
self.assertRaises(TypeError, compdates, jag.build_date, earlier)
self.assert_(jag.build_date is not earlier)
# Make a new datetime object that is timezone aware
aware_earlier = datetime_localize(earlier)
# Now they can be compared
self.assert_(jag.build_date == aware_earlier)
self.assert_(jag.last_checkup == datetime_localize(now))
compjag = Car.objects.get(id=jag.id)
self.assert_(compjag.last_checkup == datetime_localize(now))
# Test initial autosetting of a datetime field
compdt1 = datetime_localize(datetime.datetime.now())
corv = Car(name="Corvette")
compdt2 = datetime_localize(datetime.datetime.now())
self.assert_(compdt1 < corv.build_date < compdt2)
class TestIPv6Field(Fixture):
def runTest(self):
import socket
c = Car(ip_address="::1") # localhost in IPv6
c.save()
compc = Car.objects.get(id=c.id)
self.assert_(compc.ip_address == "::1")
if __name__ == "__main__":
alltests = (
TestFieldInitial,
TestFloatFieldInitial,
TestDatetimeField,
TestIPv6Field
)
runtests(tests=alltests, verbosity=3) | kurvenschubser/pyheinzel | tests/test_fields.py | Python | mit | 2,385 | [
"Jaguar"
] | 301fc140f95381d08e42fcad342d5536e2347a3afdea64614c5e7ba198ecc563 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""Defines Renderer objects that convert AST (from Reader) into an output format."""
import os
import re
import logging
import traceback
import codecs
import shutil
import moosetree
import MooseDocs
from ..common import exceptions, mixins, report_error, Storage
from ..tree import html, latex, pages
LOG = logging.getLogger(__name__)
class Renderer(mixins.ConfigObject, mixins.ComponentObject):
"""
Base renderer for converting AST to an output format.
"""
__TRANSLATOR_METHODS__ = ['init',
'initPage',
'render',
'write',
'preExecute', 'postExecute',
'preRender', 'postRender',
'preWrite', 'postWrite']
#:[str] The name of the method to call on RendererComponent objects.
METHOD = None
def __init__(self, **kwargs):
mixins.ConfigObject.__init__(self, 'renderer', **kwargs)
mixins.ComponentObject.__init__(self)
self.__functions = dict() # functions on the RenderComponent to call
def add(self, name, component):
"""
Associate a RenderComponent object with a token type.
Inputs:
name[str]: The token name (e.g., "String") to associate with the supplied component.
compoment[RenderComponent]: The component to execute with the associated token type.
"""
component.setRenderer(self)
self.addComponent(component)
self.__functions[name] = self._method(component)
def getRoot(self):
"""
Return the rendered content root node.
Called by the Translator prior to beginning rendering.
"""
raise NotImplementedError()
def render(self, parent, token, page):
"""
Convert the AST defined in the token input into a output node of parent.
Inputs:
ast[tree.token]: The AST to convert.
parent[tree.base.NodeBase]: A tree object that the AST shall be converted to.
"""
try:
func = self.__getFunction(token)
el = func(parent, token, page) if func else parent
except Exception as e:
el = None
if token.info is not None:
line = token.info.line
src = token.info[0]
else:
line = None
src = ''
msg = report_error(e, page.source, line, src, traceback.format_exc(), 'RENDER ERROR')
LOG.error(msg)
if el is not None:
for child in token.children:
self.render(el, child, page)
def init(self):
"""
Called after Translator is set, prior to initializing pages.
"""
pass
def initPage(self, page):
"""
Called for each Page object during initialization.
"""
pass
def preExecute(self):
"""
Called by Translator prior to beginning conversion.
"""
pass
def postExecute(self):
"""
Called by Translator after all conversion is complete.
"""
pass
def preRender(self, page, result):
"""
Called by Translator prior to rendering.
Inputs:
page[pages.Source]: The source object representing the content
result[tree.base.NodeBase]: The root node of the result tree
"""
pass
def postRender(self, page, result):
"""
Called by Translator after rendering.
Inputs:
page[pages.Source]: The source object representing the content
result[tree.base.NodeBase]: The root node of the result tree
"""
pass
def preWrite(self, page, result):
"""
Called after renderer has written content.
Inputs:
page[pages.Source]: The source object representing the content
result[tree.base.NodeBase]: The root node of the result tree
"""
pass
def postWrite(self, page):
"""
Called after renderer has written content.
Inputs:
page[pages.Source]: The source object representing the content
"""
pass
def write(self, page, result=None):
"""
Write the supplied results using to the destination defined by the page.
This is called by the Tranlator object.
"""
if isinstance(page, pages.Source):
self._create_directory(page.destination)
LOG.debug('WRITE %s-->%s', page.source, page.destination)
with codecs.open(page.destination, 'w', encoding='utf-8') as fid:
fid.write(result.write())
elif isinstance(page, pages.File):
self._create_directory(page.destination)
LOG.debug('COPY: %s-->%s', page.source, page.destination)
if not os.path.exists(page.source):
LOG.error('Unknown file: %s', page.source)
else:
shutil.copyfile(page.source, page.destination)
elif isinstance(page, pages.Directory):
self._create_directory(page.destination)
elif isinstance(page, pages.Text):
pass
else:
LOG.error('Unknown Node type: %s', type(page))
def _method(self, component):
"""
Return the desired method to call on the RenderComponent object.
Inputs:
component[RenderComponent]: Object to use for locating desired method for renderering.
"""
if self.METHOD is None:
msg = "The Reader class of type {} must define the METHOD class member."
raise exceptions.MooseDocsException(msg, type(self))
elif not hasattr(component, self.METHOD):
msg = "The component object {} does not have a {} method."
raise exceptions.MooseDocsException(msg, type(component), self.METHOD)
return getattr(component, self.METHOD)
def _create_directory(self, location):
"""Helper for creating a directory."""
with self.translator.executioner._lock:
dirname = os.path.dirname(location)
if dirname and not os.path.isdir(dirname):
LOG.debug('CREATE DIR %s', dirname)
os.makedirs(dirname)
def __getFunction(self, token):
"""
Return the desired function for the supplied token object.
Inputs:
token[tree.token]: token for which the associated RenderComponent function is desired.
"""
return self.__functions.get(token.name, None)
class HTMLRenderer(Renderer):
"""
Converts AST into HTML.
"""
METHOD = 'createHTML'
EXTENSION = '.html'
@staticmethod
def defaultConfig():
"""
Return the default configuration.
"""
config = Renderer.defaultConfig()
config['google_analytics'] = (False, "Enable Google Analytics.")
config['favicon'] = (None, "The location of the website favicon.")
config['extra-css'] = ([], "List of additional CSS files to include.")
return config
def __init__(self, *args, **kwargs):
Renderer.__init__(self, *args, **kwargs)
self.__global_files = dict()
if self.get('google_analytics', False):
self.addJavaScript('google_analytics', 'js/google_analytics.js')
def getRoot(self):
"""Return the result node for inserting rendered html nodes."""
root = html.Tag(None, '!DOCTYPE html', close=False)
head = html.Tag(root, 'head')
html.Tag(head, 'meta', charset="UTF-8", close=False)
return html.Tag(root, 'body')
def addJavaScript(self, name, filename, page=None, head=False, **kwargs):
"""
Add a javascript dependency. Do not attempt to call this function to add a global renderer
file, i.e., with `page=None`, from within the read/tokenize/render/write methods.
"""
key = (name, 'head_javascript' if head else 'javascript')
# Add a global script to be included in all HTML pages, otherwise add a per-page script
if page is None:
self.__global_files[key] = (filename, kwargs)
else:
page.attributes.setdefault('renderer_files', dict())[key] = (filename, kwargs)
def addCSS(self, name, filename, page=None, **kwargs):
"""
Add a CSS dependency. Do not attempt to call this function to add a global renderer file,
i.e., with `page=None`, from within the read/tokenize/render/write methods.
"""
key = (name, 'css')
# Add a global style sheet to be included in all HTML pages, otherwise add a per-page sheet
if page is None:
self.__global_files[key] = (filename, kwargs)
else:
page.attributes.setdefault('renderer_files', dict())[key] = (filename, kwargs)
def postRender(self, page, result):
"""Insert CSS/JS dependencies into html node tree."""
def rel(path):
"""Helper to create relative paths for js/css dependencies."""
if path.startswith('http'):
return path
return os.path.relpath(path, os.path.dirname(page.local))
# get the parent nodes to tag
root = result.root
head = moosetree.find(root, lambda n: n.name == 'head')
body = moosetree.find(root, lambda n: n.name == 'body')
favicon = self.get('favicon')
if favicon:
html.Tag(head, 'link', rel="icon", type="image/x-icon", href=rel(favicon), \
sizes="16x16 32x32 64x64 128x128")
# Add the extra-css, this is done here to make sure it shows up last
files = {**self.__global_files, **page.get('renderer_files', dict())}
for i, css in enumerate(self.get('extra-css')):
files[('extra-css-{}'.format(i), 'css')] = (css, {})
for (key, context) in sorted(files, key=(lambda f: f[1])):
name, kwargs = files.pop((key, context))
if context == 'css':
html.Tag(head, 'link', href=rel(name), type="text/css", rel="stylesheet", **kwargs)
elif context == 'head_javascript':
html.Tag(head, 'script', type="text/javascript", src=rel(name), **kwargs)
elif context == 'javascript':
html.Tag(body.parent, 'script', type="text/javascript", src=rel(name), **kwargs)
class MaterializeRenderer(HTMLRenderer):
"""
Convert AST into HTML using the materialize javascript library (http://materializecss.com).
"""
METHOD = 'createMaterialize'
@staticmethod
def defaultConfig():
"""
Return the default configuration.
"""
config = HTMLRenderer.defaultConfig()
return config
def __init__(self, *args, **kwargs):
HTMLRenderer.__init__(self, *args, **kwargs)
self.__index = False # page index created
self.addCSS('materialize', "contrib/materialize/materialize.min.css",
media="screen,projection")
self.addCSS('prism', "contrib/prism/prism.min.css")
self.addCSS('moose', "css/moose.css")
self.addJavaScript('jquery', "contrib/jquery/jquery.min.js", head=True)
self.addJavaScript('materialize', "contrib/materialize/materialize.min.js")
self.addJavaScript('clipboard', "contrib/clipboard/clipboard.min.js")
self.addJavaScript('prism', "contrib/prism/prism.min.js")
self.addJavaScript('init', "js/init.js")
def update(self, **kwargs):
"""
Update the default configuration with the supplied values. This is an override of the
ConfigObject method and is simply modified here to the check the type of a configuration
item.
"""
HTMLRenderer.update(self, **kwargs)
def getRoot(self):
body = HTMLRenderer.getRoot(self)
wrap = html.Tag(body, 'div', class_='page-wrap')
html.Tag(wrap, 'header')
main = html.Tag(wrap, 'main', class_='main')
container = html.Tag(main, 'div', class_="container")
row = html.Tag(container, 'div', class_="row")
col = html.Tag(row, 'div', class_="moose-content")
return col
def _method(self, component):
"""
Fallback to the HTMLRenderer method if the MaterializeRenderer method is not located.
Inputs:
component[RenderComponent]: Object to use for locating desired method for renderering.
"""
if hasattr(component, self.METHOD):
return getattr(component, self.METHOD)
elif hasattr(component, HTMLRenderer.METHOD):
return getattr(component, HTMLRenderer.METHOD)
else:
msg = "The component object {} does not have a {} method."
raise exceptions.MooseDocsException(msg, type(component), self.METHOD)
class LatexRenderer(Renderer):
"""
Renderer for converting AST to LaTeX.
"""
METHOD = 'createLatex'
EXTENSION = '.tex'
def __init__(self, *args, **kwargs):
self._packages = dict()
self._preamble = list()
self._commands = dict()
Renderer.__init__(self, *args, **kwargs)
def getRoot(self):
"""
Return LaTeX root node.
"""
return latex.LatexBase(None, None)
def addNewCommand(self, cmd, content):
"""
Add a NewDocumentCommand to latex preamble.
"""
num = 0
for match in re.finditer(r'#(?P<num>[0-9]+)', content):
num = max(num, int(match.group('num')))
args = [latex.Brace(string=cmd, escape=False), latex.Brace(string='m'*num)]
self._commands[cmd] = latex.Command(None, 'NewDocumentCommand', args=args, escape=False,
string=content, start='\n')
def getNewCommands(self):
"""Return the dict of new commands."""
return self._commands
def addPackage(self, pkg, *args, **kwargs):
"""
Add a LaTeX package to the list of packages for rendering (see pdf.py)
"""
self._packages[pkg] = (args, kwargs)
def getPackages(self):
"""Return the set of packages and settings."""
return self._packages
def addPreamble(self, node):
"""
Add a string to the preamble (see pdf.py).
"""
self._preamble.append(node)
def getPreamble(self):
"""Return the list of preamble strings."""
return self._preamble
class RevealRenderer(HTMLRenderer):
"""
Convert AST into HTML using the materialize javascript library (http://materializecss.com).
"""
METHOD = 'createReveal'
@staticmethod
def defaultConfig():
"""
Return the default configuration.
"""
config = HTMLRenderer.defaultConfig()
config['theme'] = ('simple', "The CSS theme to use (simple).")
return config
def __init__(self, *args, **kwargs):
HTMLRenderer.__init__(self, *args, **kwargs)
self.addCSS('reveal', "contrib/reveal/reveal.css")
self.addCSS('reveal_theme', "contrib/reveal/{}.css".format(self.get('theme')), id_="theme")
self.addCSS('reveal_css', "css/reveal_moose.css")
self.addCSS('prism', "contrib/prism/prism.min.css")
self.addJavaScript('reveal', "contrib/reveal/reveal.js")
self.addJavaScript('prism', "contrib/prism/prism.min.js")
self.addJavaScript('notes', "contrib/reveal/notes.js")
self.addJavaScript('reveal_init', "js/reveal_init.js")
def getRoot(self):
body = HTMLRenderer.getRoot(self)
div = html.Tag(body, 'div', class_='reveal')
slides = html.Tag(div, 'div', class_='slides')
return slides#html.Tag(slides, 'section')
def _method(self, component):
if hasattr(component, self.METHOD):
return getattr(component, self.METHOD)
elif hasattr(component, HTMLRenderer.METHOD):
return getattr(component, HTMLRenderer.METHOD)
else:
msg = "The component object {} does not have a {} method."
raise exceptions.MooseDocsException(msg, type(component), self.METHOD)
| harterj/moose | python/MooseDocs/base/renderers.py | Python | lgpl-2.1 | 16,591 | [
"MOOSE"
] | 66f8c65c0740f66b21d44cf84300631080a062949079b0c2c115239dd44c3f8c |
# mako/codegen.py
# Copyright (C) 2006-2013 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module
source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
from mako import compat
MAGIC_NUMBER = 9
# names which are hardwired into the
# template and are not accessed via the
# context itself
RESERVED_NAMES = set(['context', 'loop', 'UNDEFINED'])
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
future_imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False,
enable_loop=True,
reserved_names=frozenset()):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not compat.py3k and isinstance(source_encoding, compat.text_type):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
future_imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
future_imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.future_imports = future_imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
self.enable_loop = enable_loop
self.reserved_names = reserved_names
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
self.compiler.enable_loop = self.compiler.enable_loop or eval(
pagetag.attributes.get(
'enable_loop', 'False')
)
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers(self.compiler)
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- coding:%s -*-" %
self.compiler.source_encoding)
if self.compiler.future_imports:
self.printer.writeline("from __future__ import %s" %
(", ".join(self.compiler.future_imports),))
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop)
self.printer.writeline(
"_template_filename = %r" % self.compiler.filename)
self.printer.writeline("_template_uri = %r" % self.compiler.uri)
self.printer.writeline(
"_source_encoding = %r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return list(main_identifiers.topleveldefs.values())
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None, None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if 'import' in node.attributes:
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside "
"<%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r,"
" context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, "
" calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri,"
" module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline(
"context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union(
[c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
if self.compiler.enable_loop:
has_loop = "loop" in to_write
to_write.discard("loop")
else:
has_loop = False
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.items():
if 'import' in ns.attributes:
self.printer.writeline(
"_mako_get_namespace(context, %r)."\
"_populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
if has_loop:
self.printer.writeline(
'loop = __M_loop = runtime.LoopStack()'
)
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the
corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context._locals(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline(
"return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline(
"def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached,
callstack=True):
"""write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s,
False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters,
s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key',
repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % \
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s,
False)
self.printer.writelines("return " + s, None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" %
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name,
),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args,
"%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
if node.has_loop_context:
self.printer.writeline('finally:')
self.printer.writeline("loop = __M_loop._exit()")
self.printer.writeline(None)
else:
self.write_source_comment(node)
if self.compiler.enable_loop and node.keyword == 'for':
text = mangle_mako_loop(node, self.printer)
else:
text = node.text
self.printer.writeline(text)
children = node.get_children()
# this covers the three situations where we want to insert a pass:
# 1) a ternary control line with no children,
# 2) a primary control line with nothing but its own ternary
# and end control lines, and
# 3) any control line with no content other than comments
if not children or (
compat.all(isinstance(c, (parsetree.Comment,
parsetree.ControlLine))
for c in children) and
compat.all((node.is_ternary(c.keyword) or c.isend)
for c in children
if isinstance(c, parsetree.ControlLine))):
self.printer.writeline("pass")
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline(
'__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for __M_key in'
' [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline(
"context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the
# "closuredefs" defined in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, "
"callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable(
[], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, compiler, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
self.compiler = compiler
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
illegal_names = self.compiler.reserved_names.intersection(
self.locally_declared)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words declared in template: %s" %
", ".join(illegal_names))
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(self.compiler, node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, "\
"argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(
node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node,
(parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
for ident in node.undeclared_identifiers():
if ident != 'context' and \
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitTextTag(self, node):
for ident in node.undeclared_identifiers():
if ident != 'context' and \
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
_FOR_LOOP = re.compile(
r'^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*'
r'(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):'
)
def mangle_mako_loop(node, printer):
"""converts a for loop into a context manager wrapped around a for loop
when access to the `loop` variable has been detected in the for loop body
"""
loop_variable = LoopVariable()
node.accept_visitor(loop_variable)
if loop_variable.detected:
node.nodes[-1].has_loop_context = True
match = _FOR_LOOP.match(node.text)
if match:
printer.writelines(
'loop = __M_loop._enter(%s)' % match.group(2),
'try:'
#'with __M_loop(%s) as loop:' % match.group(2)
)
text = 'for %s in loop:' % match.group(1)
else:
raise SyntaxError("Couldn't apply loop context: %s" % node.text)
else:
text = node.text
return text
class LoopVariable(object):
"""A node visitor which looks for the name 'loop' within undeclared
identifiers."""
def __init__(self):
self.detected = False
def _loop_reference_detected(self, node):
if 'loop' in node.undeclared_identifiers():
self.detected = True
else:
for n in node.get_children():
n.accept_visitor(self)
def visitControlLine(self, node):
self._loop_reference_detected(node)
def visitCode(self, node):
self._loop_reference_detected(node)
def visitExpression(self, node):
self._loop_reference_detected(node)
| mcollins12321/anita | venv/lib/python2.7/site-packages/mako/codegen.py | Python | mit | 49,250 | [
"VisIt"
] | 4230cfcebed8cf6b0fe6171b6d3c0574269fbe3826f4229ad129a835424e97f2 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Carlos Hernandez, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
# Portions copyright (c) 2012 Stanford University and the Authors.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit. Those portions are Copyright 2008-2012 Stanford University
# and Peter Eastman, and distributed under the following license:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
from __future__ import print_function, division
import os
from datetime import date
import gzip
import numpy as np
import xml.etree.ElementTree as etree
from copy import copy
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.core.topology import Topology
from mdtraj.utils import ilen, cast_indices, in_units_of, open_maybe_zipped
from mdtraj.formats.registry import FormatRegistry
from mdtraj.core import element as elem
from mdtraj.utils import six
from mdtraj import version
import warnings
if six.PY3:
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import (uses_relative, uses_netloc, uses_params)
else:
from urllib2 import urlopen
from urlparse import urlparse
from urlparse import uses_relative, uses_netloc, uses_params
# Ugly hack -- we don't always issue UserWarning in Py2, but we need to in
# this module
warnings.filterwarnings('always', category=UserWarning, module=__name__)
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
__all__ = ['load_pdb', 'PDBTrajectoryFile']
##############################################################################
# Code
##############################################################################
def _is_url(url):
"""Check to see if a URL has a valid protocol.
from pandas/io.common.py Copyright 2014 Pandas Developers
Used under the BSD licence
"""
try:
return urlparse(url).scheme in _VALID_URLS
except (AttributeError, TypeError):
return False
@FormatRegistry.register_loader('.pdb')
@FormatRegistry.register_loader('.pdb.gz')
def load_pdb(filename, stride=None, atom_indices=None, frame=None,
no_boxchk=False, standard_names=True ):
"""Load a RCSB Protein Data Bank file from disk.
Parameters
----------
filename : str
Path to the PDB file on disk. The string could be a URL. Valid URL
schemes include http and ftp.
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, default=None
If not None, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDB
format). So if you want to load only the first atom in the file, you
would supply ``atom_indices = np.array([0])``.
frame : int, default=None
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
no_boxchk : bool, default=False
By default, a heuristic check based on the particle density will be
performed to determine if the unit cell dimensions are absurd. If the
particle density is >1000 atoms per nm^3, the unit cell will be
discarded. This is done because all PDB files from RCSB contain a CRYST1
record, even if there are no periodic boundaries, and dummy values are
filled in instead. This check will filter out those false unit cells and
avoid potential errors in geometry calculations. Set this variable to
``True`` in order to skip this heuristic check.
standard_names : bool, default=True
If True, non-standard atomnames and residuenames are standardized to conform
with the current PDB format version. If set to false, this step is skipped.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
Examples
--------
>>> import mdtraj as md
>>> pdb = md.load_pdb('2EQQ.pdb')
>>> print(pdb)
<mdtraj.Trajectory with 20 frames, 423 atoms at 0x110740a90>
See Also
--------
mdtraj.PDBTrajectoryFile : Low level interface to PDB files
"""
from mdtraj import Trajectory
if not isinstance(filename, six.string_types):
raise TypeError('filename must be of type string for load_pdb. '
'you supplied %s' % type(filename))
atom_indices = cast_indices(atom_indices)
filename = str(filename)
with PDBTrajectoryFile(filename, standard_names=standard_names) as f:
atom_slice = slice(None) if atom_indices is None else atom_indices
if frame is not None:
coords = f.positions[[frame], atom_slice, :]
else:
coords = f.positions[::stride, atom_slice, :]
assert coords.ndim == 3, 'internal shape error'
n_frames = len(coords)
topology = f.topology
if atom_indices is not None:
topology = topology.subset(atom_indices)
if f.unitcell_angles is not None and f.unitcell_lengths is not None:
unitcell_lengths = np.array([f.unitcell_lengths] * n_frames)
unitcell_angles = np.array([f.unitcell_angles] * n_frames)
else:
unitcell_lengths = None
unitcell_angles = None
in_units_of(coords, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(unitcell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(len(coords))
if frame is not None:
time *= frame
elif stride is not None:
time *= stride
traj = Trajectory(xyz=coords, time=time, topology=topology,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
if not no_boxchk and traj.unitcell_lengths is not None:
# Only one CRYST1 record is allowed, so only do this check for the first
# frame. Some RCSB PDB files do not *really* have a unit cell, but still
# have a CRYST1 record with a dummy definition. These boxes are usually
# tiny (e.g., 1 A^3), so check that the particle density in the unit
# cell is not absurdly high. Standard water density is ~55 M, which
# yields a particle density ~100 atoms per cubic nm. It should be safe
# to say that no particle density should exceed 10x that.
particle_density = traj.top.n_atoms / traj.unitcell_volumes[0]
if particle_density > 1000:
warnings.warn('Unlikely unit cell vectors detected in PDB file likely '
'resulting from a dummy CRYST1 record. Discarding unit '
'cell vectors.', category=UserWarning)
traj._unitcell_lengths = traj._unitcell_angles = None
return traj
@FormatRegistry.register_fileobject('.pdb')
@FormatRegistry.register_fileobject('.pdb.gz')
class PDBTrajectoryFile(object):
"""Interface for reading and writing Protein Data Bank (PDB) files
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
standard_names : bool, default=True
If True, non-standard atomnames and residuenames are standardized to conform
with the current PDB format version. If set to false, this step is skipped.
Attributes
----------
positions : np.ndarray, shape=(n_frames, n_atoms, 3)
topology : mdtraj.Topology
closed : bool
Notes
-----
When writing pdb files, mdtraj follows the PDB3.0 standard as closely as
possible. During *reading* however, we try to be more lenient. For instance,
we will parse common nonstandard atom names during reading, and convert them
into the standard names. The replacement table used by mdtraj is at
{mdtraj_source}/formats/pdb/data/pdbNames.xml.
See Also
--------
mdtraj.load_pdb : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'angstroms'
_residueNameReplacements = {}
_atomNameReplacements = {}
_chain_names = [chr(ord('A') + i) for i in range(26)]
def __init__(self, filename, mode='r', force_overwrite=True, standard_names=True):
self._open = False
self._file = None
self._topology = None
self._positions = None
self._mode = mode
self._last_topology = None
self._standard_names = standard_names
if mode == 'r':
PDBTrajectoryFile._loadNameReplacementTables()
if _is_url(filename):
self._file = urlopen(filename)
if filename.lower().endswith('.gz'):
if six.PY3:
self._file = gzip.GzipFile(fileobj=self._file)
else:
self._file = gzip.GzipFile(fileobj=six.StringIO(
self._file.read()))
if six.PY3:
self._file = six.StringIO(self._file.read().decode('utf-8'))
else:
self._file = open_maybe_zipped(filename, 'r')
self._read_models()
elif mode == 'w':
self._header_written = False
self._footer_written = False
self._file = open_maybe_zipped(filename, 'w', force_overwrite)
else:
raise ValueError("invalid mode: %s" % mode)
self._open = True
def write(self, positions, topology, modelIndex=None, unitcell_lengths=None,
unitcell_angles=None, bfactors=None):
"""Write a PDB file to disk
Parameters
----------
positions : array_like
The list of atomic positions to write.
topology : mdtraj.Topology
The Topology defining the model to write.
modelIndex : {int, None}
If not None, the model will be surrounded by MODEL/ENDMDL records
with this index
unitcell_lengths : {tuple, None}
Lengths of the three unit cell vectors, or None for a non-periodic system
unitcell_angles : {tuple, None}
Angles between the three unit cell vectors, or None for a non-periodic system
bfactors : array_like, default=None, shape=(n_atoms,)
Save bfactors with pdb file. Should contain a single number for
each atom in the topology
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if not self._header_written:
self._write_header(unitcell_lengths, unitcell_angles)
self._header_written = True
if ilen(topology.atoms) != len(positions):
raise ValueError('The number of positions must match the number of atoms')
if np.any(np.isnan(positions)):
raise ValueError('Particle position is NaN')
if np.any(np.isinf(positions)):
raise ValueError('Particle position is infinite')
self._last_topology = topology # Hack to save the topology of the last frame written, allows us to output CONECT entries in write_footer()
if bfactors is None:
bfactors = ['{0:5.2f}'.format(0.0)] * len(positions)
else:
if (np.max(bfactors) >= 100) or (np.min(bfactors) <= -10):
raise ValueError("bfactors must be in (-10, 100)")
bfactors = ['{0:5.2f}'.format(b) for b in bfactors]
atomIndex = 1
posIndex = 0
if modelIndex is not None:
print("MODEL %4d" % modelIndex, file=self._file)
for (chainIndex, chain) in enumerate(topology.chains):
chainName = self._chain_names[chainIndex % len(self._chain_names)]
residues = list(chain.residues)
for (resIndex, res) in enumerate(residues):
if len(res.name) > 3:
resName = res.name[:3]
else:
resName = res.name
for atom in res.atoms:
if len(atom.name) < 4 and atom.name[:1].isalpha() and (atom.element is None or len(atom.element.symbol) < 2):
atomName = ' '+atom.name
elif len(atom.name) > 4:
atomName = atom.name[:4]
else:
atomName = atom.name
coords = positions[posIndex]
if atom.element is not None:
symbol = atom.element.symbol
else:
symbol = ' '
line = "ATOM %5d %-4s %3s %1s%4d %s%s%s 1.00 %5s %-4s%-2s " % (
atomIndex % 100000, atomName, resName, chainName,
(res.resSeq) % 10000, _format_83(coords[0]),
_format_83(coords[1]), _format_83(coords[2]),
bfactors[posIndex], atom.segment_id[:4], symbol[-2:])
assert len(line) == 80, 'Fixed width overflow detected'
print(line, file=self._file)
posIndex += 1
atomIndex += 1
if resIndex == len(residues)-1:
print("TER %5d %3s %s%4d" % (atomIndex, resName, chainName, res.resSeq), file=self._file)
atomIndex += 1
if modelIndex is not None:
print("ENDMDL", file=self._file)
def _write_header(self, unitcell_lengths, unitcell_angles, write_metadata=True):
"""Write out the header for a PDB file.
Parameters
----------
unitcell_lengths : {tuple, None}
The lengths of the three unitcell vectors, ``a``, ``b``, ``c``
unitcell_angles : {tuple, None}
The angles between the three unitcell vectors, ``alpha``,
``beta``, ``gamma``
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if unitcell_lengths is None and unitcell_angles is None:
return
if unitcell_lengths is not None and unitcell_angles is not None:
if not len(unitcell_lengths) == 3:
raise ValueError('unitcell_lengths must be length 3')
if not len(unitcell_angles) == 3:
raise ValueError('unitcell_angles must be length 3')
else:
raise ValueError('either unitcell_lengths and unitcell_angles'
'should both be spefied, or neither')
box = list(unitcell_lengths) + list(unitcell_angles)
assert len(box) == 6
if write_metadata:
print("REMARK 1 CREATED WITH MDTraj %s, %s" % (version.version, str(date.today())), file=self._file)
print("CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1 1 " % tuple(box), file=self._file)
def _write_footer(self):
if not self._mode == 'w':
raise ValueError('file not opened for writing')
# Identify bonds that should be listed as CONECT records.
standardResidues = ['ALA', 'ASN', 'CYS', 'GLU', 'HIS', 'LEU', 'MET', 'PRO', 'THR', 'TYR',
'ARG', 'ASP', 'GLN', 'GLY', 'ILE', 'LYS', 'PHE', 'SER', 'TRP', 'VAL',
'A', 'G', 'C', 'U', 'I', 'DA', 'DG', 'DC', 'DT', 'DI', 'HOH']
conectBonds = []
if self._last_topology is not None:
for atom1, atom2 in self._last_topology.bonds:
if atom1.residue.name not in standardResidues or atom2.residue.name not in standardResidues:
conectBonds.append((atom1, atom2))
elif atom1.name == 'SG' and atom2.name == 'SG' and atom1.residue.name == 'CYS' and atom2.residue.name == 'CYS':
conectBonds.append((atom1, atom2))
if len(conectBonds) > 0:
# Work out the index used in the PDB file for each atom.
atomIndex = {}
nextAtomIndex = 0
prevChain = None
for chain in self._last_topology.chains:
for atom in chain.atoms:
if atom.residue.chain != prevChain:
nextAtomIndex += 1
prevChain = atom.residue.chain
atomIndex[atom] = nextAtomIndex
nextAtomIndex += 1
# Record which other atoms each atom is bonded to.
atomBonds = {}
for atom1, atom2 in conectBonds:
index1 = atomIndex[atom1]
index2 = atomIndex[atom2]
if index1 not in atomBonds:
atomBonds[index1] = []
if index2 not in atomBonds:
atomBonds[index2] = []
atomBonds[index1].append(index2)
atomBonds[index2].append(index1)
# Write the CONECT records.
for index1 in sorted(atomBonds):
bonded = atomBonds[index1]
while len(bonded) > 4:
print("CONECT%5d%5d%5d%5d" % (index1, bonded[0], bonded[1], bonded[2]), file=self._file)
del bonded[:4]
line = "CONECT%5d" % index1
for index2 in bonded:
line = "%s%5d" % (line, index2)
print(line, file=self._file)
print("END", file=self._file)
self._footer_written = True
@classmethod
def set_chain_names(cls, values):
"""Set the cycle of chain names used when writing PDB files
When writing PDB files, PDBTrajectoryFile translates each chain's
index into a name -- the name is what's written in the file. By
default, chains are named with the letters A-Z.
Parameters
----------
values : list
A list of chacters (strings of length 1) that the PDB writer will
cycle through to choose chain names.
"""
for item in values:
if not isinstance(item, six.string_types) and len(item) == 1:
raise TypeError('Names must be a single character string')
cls._chain_names = values
@property
def positions(self):
"""The cartesian coordinates of all of the atoms in each frame. Available when a file is opened in mode='r'
"""
return self._positions
@property
def topology(self):
"""The topology from this PDB file. Available when a file is opened in mode='r'
"""
return self._topology
@property
def unitcell_lengths(self):
"The unitcell lengths (3-tuple) in this PDB file. May be None"
return self._unitcell_lengths
@property
def unitcell_angles(self):
"The unitcell angles (3-tuple) in this PDB file. May be None"
return self._unitcell_angles
@property
def closed(self):
"Whether the file is closed"
return not self._open
def close(self):
"Close the PDB file"
if self._mode == 'w' and not self._footer_written:
self._write_footer()
if self._open:
self._file.close()
self._open = False
def _read_models(self):
if not self._mode == 'r':
raise ValueError('file not opened for reading')
self._topology = Topology()
pdb = PdbStructure(self._file, load_all_models=True)
atomByNumber = {}
for chain in pdb.iter_chains():
c = self._topology.add_chain()
for residue in chain.iter_residues():
resName = residue.get_name()
if resName in PDBTrajectoryFile._residueNameReplacements and self._standard_names:
resName = PDBTrajectoryFile._residueNameReplacements[resName]
r = self._topology.add_residue(resName, c, residue.number, residue.segment_id)
if resName in PDBTrajectoryFile._atomNameReplacements and self._standard_names:
atomReplacements = PDBTrajectoryFile._atomNameReplacements[resName]
else:
atomReplacements = {}
for atom in residue.atoms:
atomName = atom.get_name()
if atomName in atomReplacements:
atomName = atomReplacements[atomName]
atomName = atomName.strip()
element = atom.element
if element is None:
element = PDBTrajectoryFile._guess_element(atomName, residue.name, len(residue))
newAtom = self._topology.add_atom(atomName, element, r, serial=atom.serial_number)
atomByNumber[atom.serial_number] = newAtom
# load all of the positions (from every model)
_positions = []
for model in pdb.iter_models(use_all_models=True):
coords = []
for chain in model.iter_chains():
for residue in chain.iter_residues():
for atom in residue.atoms:
coords.append(atom.get_position())
_positions.append(coords)
if not all(len(f) == len(_positions[0]) for f in _positions):
raise ValueError('PDB Error: All MODELs must contain the same number of ATOMs')
self._positions = np.array(_positions)
## The atom positions read from the PDB file
self._unitcell_lengths = pdb.get_unit_cell_lengths()
self._unitcell_angles = pdb.get_unit_cell_angles()
self._topology.create_standard_bonds()
self._topology.create_disulfide_bonds(self.positions[0])
# Add bonds based on CONECT records.
connectBonds = []
for connect in pdb.models[-1].connects:
i = connect[0]
for j in connect[1:]:
if i in atomByNumber and j in atomByNumber:
connectBonds.append((atomByNumber[i], atomByNumber[j]))
if len(connectBonds) > 0:
# Only add bonds that don't already exist.
existingBonds = set(self._topology.bonds)
for bond in connectBonds:
if bond not in existingBonds and (bond[1], bond[0]) not in existingBonds:
self._topology.add_bond(bond[0], bond[1])
existingBonds.add(bond)
@staticmethod
def _loadNameReplacementTables():
"""Load the list of atom and residue name replacements."""
if len(PDBTrajectoryFile._residueNameReplacements) == 0:
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'data', 'pdbNames.xml'))
allResidues = {}
proteinResidues = {}
nucleicAcidResidues = {}
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
if name == 'All':
PDBTrajectoryFile._parseResidueAtoms(residue, allResidues)
elif name == 'Protein':
PDBTrajectoryFile._parseResidueAtoms(residue, proteinResidues)
elif name == 'Nucleic':
PDBTrajectoryFile._parseResidueAtoms(residue, nucleicAcidResidues)
for atom in allResidues:
proteinResidues[atom] = allResidues[atom]
nucleicAcidResidues[atom] = allResidues[atom]
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
for id in residue.attrib:
if id == 'name' or id.startswith('alt'):
PDBTrajectoryFile._residueNameReplacements[residue.attrib[id]] = name
if 'type' not in residue.attrib:
atoms = copy(allResidues)
elif residue.attrib['type'] == 'Protein':
atoms = copy(proteinResidues)
elif residue.attrib['type'] == 'Nucleic':
atoms = copy(nucleicAcidResidues)
else:
atoms = copy(allResidues)
PDBTrajectoryFile._parseResidueAtoms(residue, atoms)
PDBTrajectoryFile._atomNameReplacements[name] = atoms
@staticmethod
def _guess_element(atom_name, residue_name, residue_length):
"Try to guess the element name"
upper = atom_name.upper()
if upper.startswith('CL'):
element = elem.chlorine
elif upper.startswith('NA'):
element = elem.sodium
elif upper.startswith('MG'):
element = elem.magnesium
elif upper.startswith('BE'):
element = elem.beryllium
elif upper.startswith('LI'):
element = elem.lithium
elif upper.startswith('K'):
element = elem.potassium
elif upper.startswith('ZN'):
element = elem.zinc
elif residue_length == 1 and upper.startswith('CA'):
element = elem.calcium
# TJL has edited this. There are a few issues here. First,
# parsing for the element is non-trivial, so I do my best
# below. Second, there is additional parsing code in
# pdbstructure.py, and I am unsure why it doesn't get used
# here...
elif residue_length > 1 and upper.startswith('CE'):
element = elem.carbon # (probably) not Celenium...
elif residue_length > 1 and upper.startswith('CD'):
element = elem.carbon # (probably) not Cadmium...
elif residue_name in ['TRP', 'ARG', 'GLN', 'HIS'] and upper.startswith('NE'):
element = elem.nitrogen # (probably) not Neon...
elif residue_name in ['ASN'] and upper.startswith('ND'):
element = elem.nitrogen # (probably) not ND...
elif residue_name == 'CYS' and upper.startswith('SG'):
element = elem.sulfur # (probably) not SG...
else:
try:
element = elem.get_by_symbol(atom_name[0])
except KeyError:
try:
symbol = atom_name[0:2].strip().rstrip("AB0123456789").lstrip("0123456789")
element = elem.get_by_symbol(symbol)
except KeyError:
element = None
return element
@staticmethod
def _parseResidueAtoms(residue, map):
for atom in residue.findall('Atom'):
name = atom.attrib['name']
for id in atom.attrib:
map[atom.attrib[id]] = name
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __len__(self):
"Number of frames in the file"
if str(self._mode) != 'r':
raise NotImplementedError('len() only available in mode="r" currently')
if not self._open:
raise ValueError('I/O operation on closed file')
return len(self._positions)
def _format_83(f):
"""Format a single float into a string of width 8, with ideally 3 decimal
places of precision. If the number is a little too large, we can
gracefully degrade the precision by lopping off some of the decimal
places. If it's much too large, we throw a ValueError"""
if -999.999 < f < 9999.999:
return '%8.3f' % f
if -9999999 < f < 99999999:
return ('%8.3f' % f)[:8]
raise ValueError('coordinate "%s" could not be represnted '
'in a width-8 field' % f)
| jchodera/mdtraj | mdtraj/formats/pdb/pdbfile.py | Python | lgpl-2.1 | 29,713 | [
"MDTraj",
"OpenMM"
] | 5206c9592812beb5ce0410c3cb054b35901538c32a5bb49db619222e3dd67fda |
#!/usr/bin/env python2
'''
Output the efficiency of a sim from a pext.py output.
All units are SI.
This script is garbage. Consider not using it.
Usage:
quantities.py [options] <input>
Options:
--2D -2 Calculate 2D quantities instead.
--E-cut=ECUT -e ECUT Cutoff at this energy in MeV. [default: 0.0]
--I=I -I I Use this intensity in W/cm^2. [default: 3e18]
--W=SPOTSIZE -w SPOTSIZE Set the spotsize meters. [default: 2.26e-6]
--T=FWHM -t FWHM Set the Full-Width Half-Max in seconds. [default: 30e-15]
--angle=ANGLE -a ANGLE Restrict the angle. In 2D, this is just phi; in 3D, it is solid angle.
--L=L -l L Set the wavelength. [default: 800e-9]
'''
import numpy as np;
e0 = 8.85418782e-12;
c = 2.99792458e8;
e = 1.60217657e-19
def totalKE(d, ecut=0, anglecut=None,return_bools=False):
'''
Get total energy across a pext plane.
Parameters and Keywords
-----------------------
d -- array
ecut -- energy cut in eV
anglecut -- None or a tuple of (angle, dimension) where dimension
is "2D" or "3D". If None or falsey, don't cut on angle.
return_bools -- return the boolean array that selects uncut particles.
Returns total energy.
'''
good = d['KE'] > ecut;
if anglecut:
angle, dim = anglecut;
if dim == '2D':
good &= np.abs(d['phi']) > np.pi - angle/180*np.pi;
elif dim == '3D':
good &= np.cos(angle/180*np.pi) < -np.sin(d['theta'])*np.cos(d['phi']);
else:
raise ValueError("anglecut is not None, '2D' or '3D'");
KE = (np.abs(d['q'][good]*1e-6)*d['KE'][good]).sum();
if return_bools:
return KE,good;
else:
return KE;
def laserE(E_0, T, w,dim="3D"):
'''
Get total energy in a Gaussian Laser.
Parameters and Keywords
-----------------------
E_0 -- Peak E field.
T -- FWHM of the pulse.
w -- Spotsize.
dim -- Spatial dimension, either "2D", or "3D" or None for "3D"
Returns laser energy.
'''
if dim == "2D":
return w * np.sqrt(np.pi/2) * (c*e0*E_0**2)/2 * T*1e-2;
elif not dim or dim == "3D":
return w**2 * (np.pi/2) * (c*e0*E_0**2)/2 * T;
else:
raise ValueError("dim is not None, '2D' or '3D'");
if __name__ == "__main__":
from docopt import docopt;
opts = docopt(__doc__,help=True);
#E_0 = float(opts['--E_0']);
I = float(opts['--I']);
E_0 = np.sqrt(2*I*1e4/(c*e0));
ecut = float(opts['--E-cut'])*1e6;
w = float(opts['--W']);
T = float(opts['--T']);
d = np.load(opts['<input>'],allow_pickle=True);
dim = "2D" if opts['--2D'] else "3D";
if opts['--angle']:
angle = float(opts['--angle']);
angleopt = (angle,dim);
else:
angleopt = None;
KE, good = totalKE(d, ecut, angleopt, return_bools=True);
LE = laserE(E_0, T, w, dim=dim);
totalq = d['q'][good].sum()*1e6;
print('total charge: {:e} {}'.format(totalq,'pC/cm' if opts['--2D'] else 'pC'));
print("total energy: {:e} J".format(KE));
print('pulse energy: {:e} J'.format(LE));
print('efficiency is {:e}'.format(KE/LE));
| noobermin/sharks-butts | bin/quantities.py | Python | mit | 3,257 | [
"Gaussian"
] | c020911895e818c1dd0afda40af1fe3fa1bcacd61a9d7d8dc59ee6334a810b18 |
# The following is a Python translation of a MATLAB file originally written principally by Mike Tipping
# as part of his SparseBayes software library. Initially published on GitHub on July 21st, 2015.
# SB2_CONTROLSETTINGS Set parameters to control the SPARSEBAYES algorithm
#
# CONTROLS = SB2_CONTROLSETTINGS
#
# OUTPUT ARGUMENTS:
#
# CONTROLS A structure whose fields control various aspects of the
# running of the SPARSEBAYES algorithm.
#
# .ZeroFactor Small number equivalent to zero for Q^2-S
# .MinDeltaLogAlpha Termination criterion for changes in log-alpha
# .MinDeltaLogBeta Termination criterion for changes in log-beta
#
# .PriorityAddition Prefer "addition" operations
# .PriorityDeletion Prefer "deletion" operations
#
# .BetaUpdateStart How many "fast start" beta updates
# .BetaUpdateFrequency
# How regularly to update beta after the above
# .BetaMaxFactor Minimum value control for noise estimate
#
# .PosteriorModeFrequency
# How regularly to re-find the posterior mode
#
# .BasisAlignmentTest Test for redundant basis vectors?
# .AlignmentMax Basis redundancy criterion
#
# NOTES:
#
# The various definitions in the file are effectively "fixed" and not
# modified elsewhere.
#
# The interested user may wish to experiment with the operation of the
# SPARSEBAYES algorithm by modifying the values the file directly. See the
# inline comments for hints on the various control settings.
#
#
# Copyright 2009, Vector Anomaly Ltd
#
# This file is part of the SPARSEBAYES library for Matlab (V2.0).
#
# SPARSEBAYES is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SPARSEBAYES is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with SPARSEBAYES in the accompanying file "licence.txt"; if not, write to
# the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301 USA
#
# Contact the author: m a i l [at] m i k e t i p p i n g . c o m
#
def SB2_ControlSettings():
## Define parameters which influence the underlying operation of the
## SparseBayes inference algorithm
CONTROLS = {}
# TOLERANCES
# Any Q^2-S "relevance factor" less than this is considered to be zero
CONTROLS['ZeroFactor'] = 1e-12
# If the change in log-alpha for the best re-estimation is less than this,
# we consider termination
CONTROLS['MinDeltaLogAlpha'] = 1e-3
# In the Gaussian case, we also require a beta update to change the value
# of log-beta (inverse noise variance) less than this to terminate
CONTROLS['MinDeltaLogBeta'] = 1e-6
# ADD/DELETE
# - preferring addition where possible will probably make the algorithm a
# little slower and perhaps less "greedy"
# - preferring deletion may make the model a little more sparse and the
# algorithm may run slightly quicker
# Note: both these can be set to 'true' at the same time, in which case
# both take equal priority over re-estimation.
CONTROLS['PriorityAddition'] = False
CONTROLS['PriorityDeletion'] = True
# (GAUSSIAN) NOISE
# When to update the noise estimate
# The number of iterations from the start for which we update it every
# iteration (to get in the right ball-park to begin with)
CONTROLS['BetaUpdateStart'] = 10
# After the above, we only regularly update it after
# a given number of iterations
CONTROLS['BetaUpdateFrequency'] = 5
# Prevent zero-noise estimate (perfect fit) problem
# - effectively says the noise variance estimate is clamped to be no
# lower than variance-of-targets / BetaMaxFactor.
CONTROLS['BetaMaxFactor'] = 1e6
# POSTERIORMODE
# How many alpha updates to do in between each full posterior mode
# computation in the non-Gaussian case
# In principle, this should be set to one (to update the posterior every
# iteration) but it may be more efficient to do several alpha updates before
# re-finding the posterior mode.
CONTROLS['PosteriorModeFrequency'] = 1
# REDUNDANT BASIS
# Check for basis vector alignment/correlation redundancy
CONTROLS['BasisAlignmentTest'] = True
ALIGNMENT_ZERO = 1e-3
# If BasisAlignmentTest is true, any basis vector with inner product more
# than MAX_ALIGNMENT with any existing model vector will not be added
CONTROLS['AlignmentMax'] = 1 - ALIGNMENT_ZERO
return CONTROLS
| jhallock7/SparseBayes-Python | SB2_ControlSettings.py | Python | gpl-2.0 | 5,159 | [
"Gaussian"
] | d8bc9cc9e01f9564b117bd4748c17ce55c340a4e5f2eaba8083dd3b47fd094c0 |
import numpy as np
from gpaw.lfc import LocalizedFunctionsCollection as LFC
from gpaw.grid_descriptor import GridDescriptor
from gpaw.atom.radialgd import EquidistantRadialGridDescriptor
from gpaw.spline import Spline
from gpaw.setup import Setup
rc = 2.0
a = 2.5 * rc
n = 64
lmax = 2
b = 8.0
m = (lmax + 1)**2
gd = GridDescriptor([n, n, n], [a, a, a])
r = np.linspace(0, rc, 200)
g = np.exp(-(r / rc * b)**2)
splines = [Spline(l=l, rmax=rc, f_g=g) for l in range(lmax + 1)]
c = LFC(gd, [splines])
c.set_positions([(0, 0, 0)])
psi = gd.zeros(m)
d0 = c.dict(m)
if 0 in d0:
d0[0] = np.identity(m)
c.add(psi, d0)
d1 = c.dict(m, derivative=True)
c.derivative(psi, d1)
class TestSetup(Setup):
l_j = range(lmax + 1)
nj = lmax + 1
ni = m
def __init__(self):
pass
rgd = EquidistantRadialGridDescriptor(r[1], len(r))
g = [np.exp(-(r / rc * b)**2) * r**l for l in range(lmax + 1)]
d2 = TestSetup().get_derivative_integrals(rgd, g, np.zeros_like(g))
if 0 in d1:
print abs(d1[0] - d2).max()
assert abs(d1[0] - d2).max() < 2e-6
| robwarm/gpaw-symm | gpaw/test/nabla.py | Python | gpl-3.0 | 1,055 | [
"GPAW"
] | 345809cd72316036d328d4b640c836bde3f9319b8e574411280a9439bc1e4d7a |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""this module contains some utilities to navigate in the tree or to
extract information from it
:author: Sylvain Thenault
:copyright: 2003-2007 LOGILAB S.A. (Paris, FRANCE)
:contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org
:copyright: 2003-2007 Sylvain Thenault
:contact: mailto:thenault@gmail.com
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
__docformat__ = "restructuredtext en"
from clonedigger.logilab.common.compat import enumerate
from clonedigger.logilab.astng._exceptions import IgnoreChild
def extend_class(original, addons):
"""add methods and attribute defined in the addons class to the original
class
"""
brain = addons.__dict__.copy()
for special_key in ('__doc__', '__module__'):
if special_key in addons.__dict__:
del brain[special_key]
original.__dict__.update(brain)
class ASTWalker(object):
"""a walker visiting a tree in preorder, calling on the handler:
* visit_<class name> on entering a node, where class name is the class of
the node in lower case
* leave_<class name> on leaving a node, where class name is the class of
the node in lower case
"""
def __init__(self, handler):
self.handler = handler
self._cache = {}
def walk(self, node):
"""walk on the tree from <node>, getting callbacks from handler
"""
try:
self.visit(node)
except IgnoreChild:
pass
else:
for child_node in node.getChildNodes():
self.walk(child_node)
self.leave(node)
def get_callbacks(self, node):
"""get callbacks from handler for the visited node
"""
klass = node.__class__
methods = self._cache.get(klass)
if methods is None:
handler = self.handler
kid = klass.__name__.lower()
e_method = getattr(handler, 'visit_%s' % kid,
getattr(handler, 'visit_default', None))
l_method = getattr(handler, 'leave_%s' % kid,
getattr(handler, 'leave_default', None))
self._cache[klass] = (e_method, l_method)
else:
e_method, l_method = methods
return e_method, l_method
def visit(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[0]
if method is not None:
method(node)
def leave(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[1]
if method is not None:
method(node)
class LocalsVisitor(ASTWalker):
"""visit a project by traversing the locals dictionnary"""
def __init__(self):
ASTWalker.__init__(self, self)
self._visited = {}
def visit(self, node):
"""launch the visit starting from the given node"""
if node in self._visited:
return
self._visited[node] = 1
methods = self.get_callbacks(node)
recurse = 1
if methods[0] is not None:
try:
methods[0](node)
except IgnoreChild:
recurse = 0
if recurse:
if hasattr(node, 'locals'):
for local_node in list(node.values()):
self.visit(local_node)
if methods[1] is not None:
return methods[1](node)
def are_exclusive(stmt1, stmt2):
"""return true if the two given statement are mutually exclusive
algorithm :
1) index stmt1's parents
2) climb among stmt2's parents until we find a common parent
3) if the common parent is a If or TryExcept statement, look if nodes are
in exclusive branchs
"""
from clonedigger.logilab.astng.nodes import If, TryExcept
# index stmt1's parents
stmt1_parents = {}
children = {}
node = stmt1.parent
previous = stmt1
while node:
stmt1_parents[node] = 1
children[node] = previous
previous = node
node = node.parent
# climb among stmt2's parents until we find a common parent
node = stmt2.parent
previous = stmt2
while node:
if node in stmt1_parents:
# if the common parent is a If or TryExcept statement, look if
# nodes are in exclusive branchs
if isinstance(node, If):
if previous != children[node]:
return True
elif isinstance(node, TryExcept):
stmt1_previous = children[node]
if not previous is stmt1_previous:
stmt1_branch, stmt1_num = _try_except_from_branch(node, stmt1_previous)
stmt2_branch, stmt2_num = _try_except_from_branch(node, previous)
if stmt1_branch != stmt1_branch:
if not ((stmt2_branch == 'body' and stmt1_branch == 'else') or
(stmt1_branch == 'body' and stmt2_branch == 'else') or
(stmt2_branch == 'body' and stmt1_branch == 'except') or
(stmt1_branch == 'body' and stmt2_branch == 'except')):
return True
elif stmt1_num != stmt2_num:
return True
return False
previous = node
node = node.parent
return False
def _try_except_from_branch(node, stmt):
if stmt is node.body:
return 'body', 1
if stmt is node.else_:
return 'else', 1
for i, block_nodes in enumerate(node.handlers):
if stmt in block_nodes:
return 'except', i
| shaleh/clonedigger | clonedigger/logilab/astng/utils.py | Python | gpl-3.0 | 6,714 | [
"VisIt"
] | 7360b20fd3c8f74b9da1ae93057c1f1cf412720733655e633c70cf09b65f3575 |
# proxy module
from __future__ import absolute_import
from mayavi.modules.volume import *
| enthought/etsproxy | enthought/mayavi/modules/volume.py | Python | bsd-3-clause | 90 | [
"Mayavi"
] | 158aa221092c2b058d0154ac421d59160cf87b4e83c5e63d4b32ecf32d53c141 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkGenericStreamTracer(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkGenericStreamTracer(), 'Processing.',
('vtkGenericDataSet', 'vtkDataSet'), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| chrisidefix/devide | modules/vtk_basic/vtkGenericStreamTracer.py | Python | bsd-3-clause | 518 | [
"VTK"
] | 2923d16e3ad4230550793191b51505a73644a566eac6fb4f097e84d56850c000 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyEspressopp(CMakePackage):
"""ESPResSo++ is an extensible, flexible, fast and parallel simulation
software for soft matter research. It is a highly versatile software
package for the scientific simulation and analysis of coarse-grained
atomistic or bead-spring models as they are used in soft matter research
"""
homepage = "https://espressopp.github.io"
url = "https://github.com/espressopp/espressopp/tarball/v1.9.4.1"
version('develop', git='https://github.com/espressopp/espressopp.git', branch='master')
version('1.9.5', '13a93c30b07132b5e5fa0d828aa17d79')
version('1.9.4.1', '0da74a6d4e1bfa6a2a24fca354245a4f')
version('1.9.4', 'f2a27993a83547ad014335006eea74ea')
variant('ug', default=False, description='Build user guide')
variant('pdf', default=False, description='Build user guide in pdf format')
variant('dg', default=False, description='Build developer guide')
depends_on("cmake@2.8:", type='build')
depends_on("mpi")
depends_on("boost+serialization+filesystem+system+python+mpi", when='@1.9.4:')
extends("python")
depends_on("python@2:2.8")
depends_on("py-mpi4py@2.0.0:", when='@1.9.4', type=('build', 'run'))
depends_on("py-mpi4py@1.3.1:", when='@1.9.4.1:', type=('build', 'run'))
depends_on("fftw")
depends_on("py-sphinx", when="+ug", type='build')
depends_on("py-sphinx", when="+pdf", type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', when="+ug", type='build')
depends_on('py-matplotlib', when="+pdf", type='build')
depends_on("texlive", when="+pdf", type='build')
depends_on("doxygen", when="+dg", type='build')
def cmake_args(self):
return [
'-DEXTERNAL_MPI4PY=ON',
'-DEXTERNAL_BOOST=ON',
'-DWITH_RC_FILES=OFF'
]
def build(self, spec, prefix):
with working_dir(self.build_directory):
make()
if '+ug' in spec:
make("ug", parallel=False)
if '+pdf' in spec:
make("ug-pdf", parallel=False)
if '+dg' in spec:
make("doc", parallel=False)
| EmreAtes/spack | var/spack/repos/builtin/packages/py-espressopp/package.py | Python | lgpl-2.1 | 3,452 | [
"ESPResSo"
] | d48eb4448c6b29fc01f96d411bbc4569873b3b0e0000f1ccd48ed0f8510ac615 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.