text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""Probit regression class and diagnostics."""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import numpy.linalg as la
import scipy.optimize as op
from scipy.stats import norm, chisqprob
import scipy.sparse as SP
import user_output as USER
import summary_output as SUMMARY
from utils import spdot, spbroadcast
__all__ = ["Probit"]
class BaseProbit(object):
"""
Probit class to do all the computations
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance or spatial weights sparse matrix
aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
Note: Disregards the presence of dummies.
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('HOVAL')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseProbit((y>40).astype(float), x, w=w)
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
"""
def __init__(self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100):
self.y = y
self.x = x
self.n, self.k = x.shape
self.optim = optim
self.scalem = scalem
self.w = w
self.maxiter = maxiter
par_est, self.warning = self.par_est()
self.betas = np.reshape(par_est[0], (self.k, 1))
self.logl = -float(par_est[1])
@property
def vm(self):
try:
return self._cache['vm']
except AttributeError:
self._cache = {}
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
except KeyError:
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
return self._cache['vm']
@vm.setter
def vm(self, val):
try:
self._cache['vm'] = val
except AttributeError:
self._cache = {}
self._cache['vm'] = val
@property #could this get packaged into a separate function or something? It feels weird to duplicate this.
def z_stat(self):
try:
return self._cache['z_stat']
except AttributeError:
self._cache = {}
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
except KeyError:
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
return self._cache['z_stat']
@z_stat.setter
def z_stat(self, val):
try:
self._cache['z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['z_stat'] = val
@property
def slopes_std_err(self):
try:
return self._cache['slopes_std_err']
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
except KeyError:
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
return self._cache['slopes_std_err']
@slopes_std_err.setter
def slopes_std_err(self, val):
try:
self._cache['slopes_std_err'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = val
@property
def slopes_z_stat(self):
try:
return self._cache['slopes_z_stat']
except AttributeError:
self._cache = {}
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
except KeyError:
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
return self._cache['slopes_z_stat']
@slopes_z_stat.setter
def slopes_z_stat(self, val):
try:
self._cache['slopes_z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_z_stat'] = val
@property
def xmean(self):
try:
return self._cache['xmean']
except AttributeError:
self._cache = {}
try: #why is this try-accept? can x be a list??
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
except KeyError:
try:
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
return self._cache['xmean']
@xmean.setter
def xmean(self, val):
try:
self._cache['xmean'] = val
except AttributeError:
self._cache = {}
self._cache['xmean'] = val
@property
def xb(self):
try:
return self._cache['xb']
except AttributeError:
self._cache = {}
self._cache['xb'] = spdot(self.x, self.betas)
except KeyError:
self._cache['xb'] = spdot(self.x, self.betas)
return self._cache['xb']
@xb.setter
def xb(self, val):
try:
self._cache['xb'] = val
except AttributeError:
self._cache = {}
self._cache['xb'] = val
@property
def predy(self):
try:
return self._cache['predy']
except AttributeError:
self._cache = {}
self._cache['predy'] = norm.cdf(self.xb)
except KeyError:
self._cache['predy'] = norm.cdf(self.xb)
return self._cache['predy']
@predy.setter
def predy(self, val):
try:
self._cache['predy'] = val
except AttributeError:
self._cache = {}
self._cache['predy'] = val
@property
def predpc(self):
try:
return self._cache['predpc']
except AttributeError:
self._cache = {}
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
except KeyError:
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
return self._cache['predpc']
@predpc.setter
def predpc(self, val):
try:
self._cache['predpc'] = val
except AttributeError:
self._cache = {}
self._cache['predpc'] = val
@property
def phiy(self):
try:
return self._cache['phiy']
except AttributeError:
self._cache = {}
self._cache['phiy'] = norm.pdf(self.xb)
except KeyError:
self._cache['phiy'] = norm.pdf(self.xb)
return self._cache['phiy']
@phiy.setter
def phiy(self, val):
try:
self._cache['phiy'] = val
except AttributeError:
self._cache = {}
self._cache['phiy'] = val
@property
def scale(self):
try:
return self._cache['scale']
except AttributeError:
self._cache = {}
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
elif self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
except KeyError:
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
if self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
return self._cache['scale']
@scale.setter
def scale(self, val):
try:
self._cache['scale'] = val
except AttributeError:
self._cache = {}
self._cache['scale'] = val
@property
def slopes(self):
try:
return self._cache['slopes']
except AttributeError:
self._cache = {}
self._cache['slopes'] = self.betas[1:] * self.scale
except KeyError:
self._cache['slopes'] = self.betas[1:] * self.scale
return self._cache['slopes']
@slopes.setter
def slopes(self, val):
try:
self._cache['slopes'] = val
except AttributeError:
self._cache = {}
self._cache['slopes'] = val
@property
def slopes_vm(self):
try:
return self._cache['slopes_vm']
except AttributeError:
self._cache = {}
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
except KeyError:
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
return self._cache['slopes_vm']
@slopes_vm.setter
def slopes_vm(self, val):
try:
self._cache['slopes_vm'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_vm'] = val
@property
def LR(self):
try:
return self._cache['LR']
except AttributeError:
self._cache = {}
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
except KeyError:
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
return self._cache['LR']
@LR.setter
def LR(self, val):
try:
self._cache['LR'] = val
except AttributeError:
self._cache = {}
self._cache['LR'] = val
@property
def u_naive(self):
try:
return self._cache['u_naive']
except AttributeError:
self._cache = {}
self._cache['u_naive'] = self.y - self.predy
except KeyError:
u_naive = self.y - self.predy
self._cache['u_naive'] = u_naive
return self._cache['u_naive']
@u_naive.setter
def u_naive(self, val):
try:
self._cache['u_naive'] = val
except AttributeError:
self._cache = {}
self._cache['u_naive'] = val
@property
def u_gen(self):
try:
return self._cache['u_gen']
except AttributeError:
self._cache = {}
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
except KeyError:
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
return self._cache['u_gen']
@u_gen.setter
def u_gen(self, val):
try:
self._cache['u_gen'] = val
except AttributeError:
self._cache = {}
self._cache['u_gen'] = val
@property
def Pinkse_error(self):
try:
return self._cache['Pinkse_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['Pinkse_error']
@Pinkse_error.setter
def Pinkse_error(self, val):
try:
self._cache['Pinkse_error'] = val
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'] = val
@property
def KP_error(self):
try:
return self._cache['KP_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['KP_error']
@KP_error.setter
def KP_error(self, val):
try:
self._cache['KP_error'] = val
except AttributeError:
self._cache = {}
self._cache['KP_error'] = val
@property
def PS_error(self):
try:
return self._cache['PS_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['PS_error']
@PS_error.setter
def PS_error(self, val):
try:
self._cache['PS_error'] = val
except AttributeError:
self._cache = {}
self._cache['PS_error'] = val
def par_est(self):
start = np.dot(la.inv(spdot(self.x.T, self.x)),
spdot(self.x.T, self.y))
flogl = lambda par: -self.ll(par)
if self.optim == 'newton':
fgrad = lambda par: self.gradient(par)
fhess = lambda par: self.hessian(par)
par_hat = newton(flogl, start, fgrad, fhess, self.maxiter)
warn = par_hat[2]
else:
fgrad = lambda par: -self.gradient(par)
if self.optim == 'bfgs':
par_hat = op.fmin_bfgs(
flogl, start, fgrad, full_output=1, disp=0)
warn = par_hat[6]
if self.optim == 'ncg':
fhess = lambda par: -self.hessian(par)
par_hat = op.fmin_ncg(
flogl, start, fgrad, fhess=fhess, full_output=1, disp=0)
warn = par_hat[5]
if warn > 0:
warn = True
else:
warn = False
return par_hat, warn
def ll(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
ll = sum(np.log(norm.cdf(qxb)))
return ll
def gradient(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
gradient = spdot(lamb.T, self.x)[0]
return gradient
def hessian(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
xb = spdot(self.x, beta)
qxb = q * xb
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
hessian = spdot(self.x.T, spbroadcast(self.x,-lamb * (lamb + xb)))
return hessian
class Probit(BaseProbit):
"""
Classic non-spatial Probit and spatial diagnostics. The class includes a
printout that formats all the results and tests in a nice format.
The diagnostics for spatial dependence currently implemented are:
* Pinkse Error [Pinkse2004]_
* Kelejian and Prucha Moran's I [Kelejian2001]_
* Pinkse & Slade Error [Pinkse1998]_
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the CRIME column (crime) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept. Since we want to run a probit model and for this
example we use the Columbus data, we also need to transform the continuous
CRIME variable into a binary variable. As in [McMillen1992]_, we define
y = 1 if CRIME > 40.
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> y = (y>40).astype(float)
Extract HOVAL (home values) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'HOVAL']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to the test the probit model for spatial dependence, we need to
specify the spatial weights matrix that includes the spatial configuration of
the observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. In PySAL, this
can be easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = Probit(y, x, w=w, name_y='crime', name_x=['income','home value'], name_ds='columbus', name_w='columbus.gal')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them.
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
Since we have provided a spatial weigths matrix, the diagnostics for
spatial dependence have also been computed. We can access them and their
p-values individually:
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed simply by typing 'print model.summary'
"""
def __init__(
self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100,
vm=False, name_y=None, name_x=None, name_w=None, name_ds=None,
spat_diag=False):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
if w != None:
USER.check_weights(w, y)
spat_diag = True
ws = w.sparse
else:
ws = None
x_constant = USER.check_constant(x)
BaseProbit.__init__(self, y=y, x=x_constant, w=ws,
optim=optim, scalem=scalem, maxiter=maxiter)
self.title = "CLASSIC PROBIT ESTIMATOR"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.Probit(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def newton(flogl, start, fgrad, fhess, maxiter):
"""
Calculates the Newton-Raphson method
Parameters
----------
flogl : lambda
Function to calculate the log-likelihood
start : array
kx1 array of starting values
fgrad : lambda
Function to calculate the gradient
fhess : lambda
Function to calculate the hessian
maxiter : int
Maximum number of iterations until optimizer stops
"""
warn = 0
iteration = 0
par_hat0 = start
m = 1
while (iteration < maxiter and m >= 1e-04):
H = -la.inv(fhess(par_hat0))
g = fgrad(par_hat0).reshape(start.shape)
Hg = np.dot(H, g)
par_hat0 = par_hat0 + Hg
iteration += 1
m = np.dot(g.T, Hg)
if iteration == maxiter:
warn = 1
logl = flogl(par_hat0)
return (par_hat0, logl, warn)
def sp_tests(reg):
"""
Calculates tests for spatial dependence in Probit models
Parameters
----------
reg : regression object
output instance from a probit model
"""
if reg.w != None:
try:
w = reg.w.sparse
except:
w = reg.w
Phi = reg.predy
phi = reg.phiy
# Pinkse_error:
Phi_prod = Phi * (1 - Phi)
u_naive = reg.u_naive
u_gen = reg.u_gen
sig2 = np.sum((phi * phi) / Phi_prod) / reg.n
LM_err_num = np.dot(u_gen.T, (w * u_gen)) ** 2
trWW = np.sum((w * w).diagonal())
trWWWWp = trWW + np.sum((w * w.T).diagonal())
LM_err = float(1.0 * LM_err_num / (sig2 ** 2 * trWWWWp))
LM_err = np.array([LM_err, chisqprob(LM_err, 1)])
# KP_error:
moran = moran_KP(reg.w, u_naive, Phi_prod)
# Pinkse-Slade_error:
u_std = u_naive / np.sqrt(Phi_prod)
ps_num = np.dot(u_std.T, (w * u_std)) ** 2
trWpW = np.sum((w.T * w).diagonal())
ps = float(ps_num / (trWW + trWpW))
# chi-square instead of bootstrap.
ps = np.array([ps, chisqprob(ps, 1)])
else:
raise Exception, "W matrix must be provided to calculate spatial tests."
return LM_err, moran, ps
def moran_KP(w, u, sig2i):
"""
Calculates Moran-flavoured tests
Parameters
----------
w : W
PySAL weights instance aligned with y
u : array
nx1 array of naive residuals
sig2i : array
nx1 array of individual variance
"""
try:
w = w.sparse
except:
pass
moran_num = np.dot(u.T, (w * u))
E = SP.lil_matrix(w.get_shape())
E.setdiag(sig2i.flat)
E = E.asformat('csr')
WE = w * E
moran_den = np.sqrt(np.sum((WE * WE + (w.T * E) * WE).diagonal()))
moran = float(1.0 * moran_num / moran_den)
moran = np.array([moran, norm.sf(abs(moran)) * 2.])
return moran
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('CRIME')]).T
var_x = ['INC', 'HOVAL']
x = np.array([dbf.by_col(name) for name in var_x]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
probit1 = Probit(
(y > 40).astype(float), x, w=w, name_x=var_x, name_y="CRIME",
name_ds="Columbus", name_w="columbus.dbf")
print probit1.summary
|
pastephens/pysal
|
pysal/spreg/probit.py
|
Python
|
bsd-3-clause
| 34,383
|
[
"COLUMBUS"
] |
01072466a3b3bf22f38f5cd735b17aae45ac0836086d320e99a68ac767d66be0
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Building the density matrix from data parsed by cclib."""
import logging
import random
import numpy
from .calculationmethod import Method
class Density(Method):
"""Calculate the density matrix"""
def __init__(self, data, progress=None, loglevel=logging.INFO,
logname="Density"):
# Call the __init__ method of the superclass.
super(Density, self).__init__(data, progress, loglevel, logname)
def __str__(self):
"""Return a string representation of the object."""
return "Density matrix of" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'Density matrix("%s")' % (self.data)
def calculate(self, fupdate=0.05):
"""Calculate the density matrix."""
# Do we have the needed info in the data object?
if not hasattr(self.data, "mocoeffs"):
self.logger.error("Missing mocoeffs")
return False
if not hasattr(self.data,"nbasis"):
self.logger.error("Missing nbasis")
return False
if not hasattr(self.data,"homos"):
self.logger.error("Missing homos")
return False
self.logger.info("Creating attribute density: array[3]")
size = self.data.nbasis
unrestricted = (len(self.data.mocoeffs) == 2)
#determine number of steps, and whether process involves beta orbitals
nstep = self.data.homos[0] + 1
if unrestricted:
self.density = numpy.zeros([2, size, size], "d")
nstep += self.data.homos[1] + 1
else:
self.density = numpy.zeros([1, size, size], "d")
#intialize progress if available
if self.progress:
self.progress.initialize(nstep)
step = 0
for spin in range(len(self.data.mocoeffs)):
for i in range(self.data.homos[spin] + 1):
if self.progress and random.random() < fupdate:
self.progress.update(step, "Density Matrix")
col = numpy.reshape(self.data.mocoeffs[spin][i], (size, 1))
colt = numpy.reshape(col, (1, size))
tempdensity = numpy.dot(col, colt)
self.density[spin] = numpy.add(self.density[spin],
tempdensity)
step += 1
if not unrestricted: #multiply by two to account for second electron
self.density[0] = numpy.add(self.density[0], self.density[0])
if self.progress:
self.progress.update(nstep, "Done")
return True #let caller know we finished density
|
Schamnad/cclib
|
src/cclib/method/density.py
|
Python
|
bsd-3-clause
| 2,968
|
[
"cclib"
] |
c4fcb2b7651d2f0d717b056513db8ce3e50bb5c4048ccb57a343c4ee78148369
|
import numpy as np
def collapse(j, data):
"""Collapses samples into a single row"""
uniques = np.unique(j)
return uniques, np.array([data[j == u].sum() for u in uniques])
def nearest(coords, shape, **kwargs):
valid = ~(np.isnan(coords).all(1))
valid = np.logical_and(valid, np.logical_and(coords[:,0] > -.5, coords[:,0] < shape[2]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,1] > -.5, coords[:,1] < shape[1]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,2] > -.5, coords[:,2] < shape[0]+.5))
rcoords = coords[valid].round().astype(int)
j = np.ravel_multi_index(rcoords.T[::-1], shape, mode='clip')
#return np.nonzero(valid)[0], j, (rcoords > 0).all(1) #np.ones((valid.sum(),))
return np.nonzero(valid)[0], j, np.ones((valid.sum(),))
def trilinear(coords, shape, **kwargs):
#trilinear interpolation equation from http://paulbourke.net/miscellaneous/interpolation/
valid = ~(np.isnan(coords).all(1))
(x, y, z), floor = np.modf(coords[valid].T)
floor = floor.astype(int)
ceil = floor + 1
x[x < 0] = 0
y[y < 0] = 0
z[z < 0] = 0
i000 = np.array([floor[2], floor[1], floor[0]])
i100 = np.array([floor[2], floor[1], ceil[0]])
i010 = np.array([floor[2], ceil[1], floor[0]])
i001 = np.array([ ceil[2], floor[1], floor[0]])
i101 = np.array([ ceil[2], floor[1], ceil[0]])
i011 = np.array([ ceil[2], ceil[1], floor[0]])
i110 = np.array([floor[2], ceil[1], ceil[0]])
i111 = np.array([ ceil[2], ceil[1], ceil[0]])
v000 = (1-x)*(1-y)*(1-z)
v100 = x*(1-y)*(1-z)
v010 = (1-x)*y*(1-z)
v110 = x*y*(1-z)
v001 = (1-x)*(1-y)*z
v101 = x*(1-y)*z
v011 = (1-x)*y*z
v111 = x*y*z
i = np.tile(np.nonzero(valid)[0], [1, 8]).ravel()
j = np.hstack([i000, i100, i010, i001, i101, i011, i110, i111])
data = np.vstack([v000, v100, v010, v001, v101, v011, v110, v111]).ravel()
return i, np.ravel_multi_index(j, shape, mode='clip'), data
def distance_func(func, coords, shape, renorm=True, mp=True):
"""Generates masks for seperable distance functions"""
nZ, nY, nX = shape
dx = coords[:,0] - np.atleast_2d(np.arange(nX)).T
dy = coords[:,1] - np.atleast_2d(np.arange(nY)).T
dz = coords[:,2] - np.atleast_2d(np.arange(nZ)).T
Lx, Ly, Lz = func(dx), func(dy), func(dz)
ix, jx = np.nonzero(Lx)
iy, jy = np.nonzero(Ly)
iz, jz = np.nonzero(Lz)
ba = np.broadcast_arrays
def func(v):
mx, my, mz = ix[jx == v], iy[jy == v], iz[jz == v]
idx, idy, idz = [i.ravel() for i in ba(*np.ix_(mx, my, mz))]
vx, vy, vz = [i.ravel() for i in ba(*np.ix_(Lx[mx, v], Ly[my, v], Lz[mz, v]))]
i = v * np.ones((len(idx,)))
j = np.ravel_multi_index((idz, idy, idx), shape, mode='clip')
data = vx*vy*vz
if renorm:
data /= data.sum()
return i, j, data
if mp:
from .. import mp
ijdata = mp.map(func, range(len(coords)))
else:
#ijdata = map(func, range(len(coords)))
ijdata = [func(x) for x in range(len(coords))]
return np.hstack(ijdata)
def gaussian(coords, shape, sigma=1, window=3, **kwargs):
raise NotImplementedError
def gaussian(x):
pass
return distance_func(gaussian, coords, shape, **kwargs)
def lanczos(coords, shape, window=3, **kwargs):
def lanczos(x):
out = np.zeros_like(x)
sel = np.abs(x)<window
selx = x[sel]
out[sel] = np.sin(np.pi * selx) * np.sin(np.pi * selx / window) * (window / (np.pi**2 * selx**2))
return out
return distance_func(lanczos, coords, shape, **kwargs)
|
gallantlab/pycortex
|
cortex/mapper/samplers.py
|
Python
|
bsd-2-clause
| 3,673
|
[
"Gaussian"
] |
7f530ccfed5877222fe93158d9119cfc60ac12c168f15aa404954e8d11458208
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.joseandro.uniqueids.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComJoseandroUniqueidsModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
joseandro/TitaniumiOSUniqueIDs
|
build.py
|
Python
|
mit
| 6,810
|
[
"VisIt"
] |
be35b7771e10ac8e10cdd4910abfc638f78e654755bf87974ee906e59204a9ba
|
"""
This file is a mess, it is a merge of random stuff that is in galaxy.util and
stuff that was in lwr.util. This should be reworked to only contain stuff in
galaxy.util and the rest should be moved into galaxy.util.lwr_io or something
like that.
"""
import os
import platform
import stat
try:
import grp
except ImportError:
grp = None
import errno
from subprocess import Popen
from tempfile import NamedTemporaryFile
from logging import getLogger
log = getLogger(__name__)
BUFFER_SIZE = 4096
def enum(**enums):
"""
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
"""
return type('Enum', (), enums)
def copy_to_path(object, path):
"""
Copy file-like object to path.
"""
output = open(path, 'wb')
_copy_and_close(object, output)
def _copy_and_close(object, output):
try:
while True:
buffer = object.read(BUFFER_SIZE)
if not buffer:
break
output.write(buffer)
finally:
output.close()
def copy_to_temp(object):
"""
Copy file-like object to temp file and return
path.
"""
temp_file = NamedTemporaryFile(delete=False)
_copy_and_close(object, temp_file)
return temp_file.name
def execute(command_line, working_directory, stdout, stderr):
preexec_fn = None
if not (platform.system() == 'Windows'):
preexec_fn = os.setpgrp
proc = Popen(args=command_line,
shell=True,
cwd=working_directory,
stdout=stdout,
stderr=stderr,
preexec_fn=preexec_fn)
return proc
def is_in_directory(file, directory, local_path_module=os.path):
"""
Return true, if the common prefix of both is equal to directory
e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
Heavily inspired by similar method in from Galaxy's BaseJobRunner class.
"""
# Make both absolute.
directory = local_path_module.abspath(directory)
file = local_path_module.abspath(file)
return local_path_module.commonprefix([file, directory]) == directory
in_directory = is_in_directory # For compat. w/Galaxy.
def umask_fix_perms(path, umask, unmasked_perms, gid=None):
"""
umask-friendly permissions fixing
"""
perms = unmasked_perms & ~umask
try:
st = os.stat(path)
except OSError, e:
log.exception('Unable to set permissions or group on %s' % path)
return
# fix modes
if stat.S_IMODE(st.st_mode) != perms:
try:
os.chmod(path, perms)
except Exception, e:
log.warning('Unable to honor umask (%s) for %s, tried to set: %s but mode remains %s, error was: %s' % (oct(umask),
path,
oct(perms),
oct(stat.S_IMODE(st.st_mode)),
e))
# fix group
if gid is not None and st.st_gid != gid:
try:
os.chown(path, -1, gid)
except Exception, e:
try:
desired_group = grp.getgrgid(gid)
current_group = grp.getgrgid(st.st_gid)
except:
desired_group = gid
current_group = st.st_gid
log.warning('Unable to honor primary group (%s) for %s, group remains %s, error was: %s' % (desired_group,
path,
current_group,
e))
def xml_text(root, name=None):
"""Returns the text inside an element"""
if name is not None:
# Try attribute first
val = root.get(name)
if val:
return val
# Then try as element
elem = root.find(name)
else:
elem = root
if elem is not None and elem.text:
text = ''.join(elem.text.splitlines())
return text.strip()
# No luck, return empty string
return ''
# asbool implementation pulled from PasteDeploy
truthy = frozenset(['true', 'yes', 'on', 'y', 't', '1'])
falsy = frozenset(['false', 'no', 'off', 'n', 'f', '0'])
def asbool(obj):
if isinstance(obj, basestring):
obj = obj.strip().lower()
if obj in truthy:
return True
elif obj in falsy:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def force_symlink(source, link_name):
try:
os.symlink(source, link_name)
except OSError, e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(source, link_name)
else:
raise e
def listify( item, do_strip=False ):
"""
Make a single item a single item list, or return a list if passed a
list. Passing a None returns an empty list.
"""
if not item:
return []
elif isinstance( item, list ):
return item
elif isinstance( item, basestring ) and item.count( ',' ):
if do_strip:
return [token.strip() for token in item.split( ',' )]
else:
return item.split( ',' )
else:
return [ item ]
|
jmchilton/lwr
|
galaxy/util/__init__.py
|
Python
|
apache-2.0
| 5,762
|
[
"Galaxy"
] |
7ece96b2417df56404c1cc7305890160453cb152d282cee42be1ee04a1dac995
|
#!/usr/bin/env python
from __future__ import absolute_import
#----------------------------------------------------------------------
# Copyright (c) 2012-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
Omni AM API Call Handler
Handle calls to AM API functions
"""
from copy import copy
import datetime
import dateutil.parser
import json
import logging
import os
import pprint
import re
import string
import zlib
from .util import OmniError, NoSliceCredError, RefusedError, naiveUTC, AMAPIError
from .util.dossl import _do_ssl
from .util.abac import get_abac_creds, save_abac_creds, save_proof, is_ABAC_framework
from .util import credparsing as credutils
from .util.handler_utils import _listaggregates, validate_url, _get_slice_cred, _derefAggNick, \
_derefRSpecNick, _get_user_urn, \
_print_slice_expiration, _construct_output_filename, \
_getRSpecOutput, _writeRSpec, _printResults, _load_cred, _lookupAggNick, \
expires_from_rspec, expires_from_status
from .util.json_encoding import DateTimeAwareJSONEncoder, DateTimeAwareJSONDecoder
from .xmlrpc import client as xmlrpcclient
from .util.files import *
from .util.credparsing import *
from ..geni.util.tz_util import tzd
from ..geni.util import rspec_util, urn_util
class BadClientException(Exception):
''' Internal only exception thrown if AM speaks wrong AM API version'''
def __init__(self, client, msg):
self.client = client
self.validMsg = msg
class AMCallHandler(object):
'''Dispatch AM API calls to aggregates'''
def __init__(self, framework, config, opts):
self.framework = framework
self.logger = config['logger']
self.omni_config = config['omni']
self.config = config
self.opts = opts # command line options as parsed
self.GetVersionCache = None # The cache of GetVersion info in memory
self.clients = None # XMLRPC clients for talking to AMs
if self.opts.abac:
aconf = self.config['selected_framework']
if 'abac' in aconf and 'abac_log' in aconf:
self.abac_dir = aconf['abac']
self.abac_log = aconf['abac_log']
else:
self.logger.error("ABAC requested (--abac) and no abac= or abac_log= in omni_config: disabling ABAC")
self.opts.abac= False
self.abac_dir = None
self.abac_log = None
def _handle(self, args):
''' Actually dispatch calls - only those that don't start with an underscore'''
if len(args) == 0:
self._raise_omni_error('Insufficient number of arguments - Missing command to run')
call = args[0].lower()
# disallow calling private methods
if call.startswith('_'):
return
if not hasattr(self,call):
self._raise_omni_error('Unknown function: %s' % call)
# Extract the slice name arg and put it in an option
self.opts.sliceName = self._extractSliceArg(args)
# Try to auto-correct API version
msg = self._correctAPIVersion(args)
if msg is None:
msg = ""
(message, val) = getattr(self,call)(args[1:])
if message is None:
message = ""
return (msg+message, val)
# Pull any slice name arg out of the args and return it, else None
# Ignore it for createsliver or allocate as no slice exists yet in these cases
def _extractSliceArg(self, args):
if args is None or len(args) == 0:
return None
call = args[0].lower().strip()
# Skip createsliver and allocate and provision because the whole idea is to add a new AM here - so the CH doesn't know
if call in ('getversion', 'listimages', 'deleteimage', 'createsliver', 'allocate', 'provision'): # createimage?
return None
elif len(args) > 1:
ret = args[1].strip()
if ret == "":
return None
self.logger.debug("Found slice name %s", ret)
return ret
def _correctAPIVersion(self, args):
'''Switch AM API versions if the AMs all or mostly speak something else. But be conservative.'''
cmd = None
if len(args) > 0:
cmd = args[0].strip().lower()
# FIXME: Keep this check in sync with createsliver
if cmd is not None and cmd == 'createsliver' and (not self.opts.aggregate or len(self.opts.aggregate) == 0):
# the user must supply an aggregate.
msg = 'Missing -a argument: specify an aggregate where you want the reservation.'
self._raise_omni_error(msg)
configVer = str(self.opts.api_version) # turn int into a string
(clients, message) = self._getclients()
numClients = len(clients)
# If we know the method we are calling takes exactly 1 AM and we have more
# than one here, bail. Note that later we remove bad AMs, so this is an imperfect check.
# createimage takes exactly 1 client
# FIXME: Keep this check in sync with createimage
if cmd is not None and cmd == 'createimage' and numClients > 1:
self._raise_omni_error("CreateImage snapshots a particular machine: specify exactly 1 AM URL with '-a'")
liveVers = {}
versions = {}
retmsg = "" # Message to put at start of result summary
i = -1 # Index of client in clients list
badcIs = [] # Indices of bad clients to remove from list later
for client in clients:
i = i + 1
(thisVer, message) = self._get_this_api_version(client)
if thisVer is None:
# Not a valid client
numClients = numClients - 1
badcIs.append(i) # Mark this client to be removed from the list later
if message and message.strip() != '':
# Extract out of the message the real error
# raise that as an omni error
# FIXME: If messages change in dossl this won't work
if "Operation timed out" in message:
message = "Aggregate %s unreachable: %s" % (client.str, message[message.find("Operation timed out"):])
elif "Unknown socket error" in message:
message = "Aggregate %s unreachable: %s" % (client.str, message[message.find("Unknown socket error"):])
elif "Server does not trust" in message:
message = "Aggregate %s does not trust your certificate: %s" % (client.str, message[message.find("Server does not trust"):])
elif "Your user certificate" in message:
message = "Cannot contact aggregates: %s" % (message[message.find("Your user certificate"):])
else:
message = 'Unknown error'
if self.numOrigClients == 1:
self._raise_omni_error(message)
msg = "Removing %s from list of aggregates to contact. %s " % (client.str, message)
self.logger.warn(msg)
retmsg += msg
if retmsg.endswith(' ') or retmsg.endswith('.'):
retmsg += "\n"
elif not retmsg.endswith('\n'):
retmsg += ".\n"
continue
thisVer = str(thisVer) # turn int into a string
liveVers[thisVer] = liveVers.get(thisVer, 0) + 1 # hash is by strings
(thisVersions, message) = self._get_api_versions(client)
# Ticket 242: be robust to malformed geni_api_versions
if thisVersions and isinstance(thisVersions, dict):
for version in thisVersions.keys(): # version is a string
# self.logger.debug("%s supports %d at %s", client.url, int(version), thisVersions[version])
versions[version] = versions.get(version, 0) + 1 # hash by strings
# self.logger.debug("%d spoken by %d", int(version), versions[version])
else:
#self.logger.debug("Incrementing counter of clients that speak %r somewhere", thisVer)
versions[thisVer] = versions.get(thisVer, 0) + 1
# End of loop over clients
# Remove the bad clients now (not while looping over this same list)
i = -1
newcs = []
for i in range(len(self.clients)):
if i in badcIs:
# self.logger.debug("Skipping client %s" % self.clients[i].url)
continue
# self.logger.debug("Saving client %s" % self.clients[i].url)
newcs.append(self.clients[i])
self.clients = newcs
if len(self.clients) == 0:
self._raise_omni_error(retmsg + "\nNo Aggregates left to operate on.")
# If we didn't get any AMs, bail early
if len(liveVers.keys()) == 0:
return retmsg
# If all the AMs talk the desired version here, great
if liveVers.has_key(configVer) and liveVers[configVer] == numClients:
self.logger.debug("Config version spoken here by all AMs")
return retmsg
# If all the AMs talk the desired version somewhere, fine. We'll switch URLs later.
if versions.has_key(configVer) and versions[configVer] == numClients:
self.logger.debug("Config version spoken somewhere by all AMs")
return retmsg
# Some AM does not talk the desired version
self.logger.warn("You asked to use AM API %s, but the AM(s) you are contacting do not all speak that version.", configVer)
# If all AMs speak the same (different) version at the current URL, use that
if len(liveVers.keys()) == 1:
newVer = int(liveVers.keys()[0])
msg = "At the URLs you are contacting, all your AMs speak AM API v%d. " % newVer
self.logger.warn(msg)
if self.opts.devmode:
self.logger.warn("Would switch AM API versions, but in dev mode, so continuing...")
elif self.opts.explicitAPIVersion:
retmsg = ("Your AMs do not all speak requested API v%s. " % configVer) + msg
msg = "Continuing with your requested API version %s, but consider next time calling Omni with '-V%d'." % (configVer, newVer)
retmsg += msg + "\n"
self.logger.warn(msg)
else:
retmsg = ("Your AMs do not all speak requested API v%s. " % configVer) + msg
msg = "Switching to AM API v%d. Next time call Omni with '-V%d'." % (newVer, newVer)
retmsg += msg + "\n"
self.logger.warn(msg)
self.opts.api_version = newVer
return retmsg
# If the configured version is spoken somewhere by a majority of AMs, use it
if versions.has_key(configVer) and float(versions[configVer]) >= float(numClients)/float(2):
self.logger.debug("Config version spoken somewhere by a majority of AMs")
#self.logger.debug("numClients/2 = %r", float(numClients)/float(2))
self.logger.info("Sticking with API version %s, even though only %d of %d AMs support it", configVer, versions[configVer], numClients)
return retmsg
self.logger.warn("Configured API version %s is not supported by most of your AMs", configVer)
# We could now prefer the version that the most AMs talk at the current URL - particularly if that
# is the configVer
# Or a version that the most AMs talk at another URL (could be all) - particularly if that is the
# configVer again
# So we need to find the version that the most AMs support.
# Sort my versions array by value
from operator import itemgetter
sortedVersions = sorted(versions.iteritems(), key=itemgetter(1), reverse=True)
sortedLiveVersions = sorted(liveVers.iteritems(), key=itemgetter(1), reverse=True)
mostLive = sortedLiveVersions[0][0]
mostAnywhere = sortedVersions[0][0]
if mostLive == configVer or (liveVers.has_key(configVer) and liveVers[configVer] == liveVers[mostLive]):
# The configured API version is what is spoken at the most AMs at the current URL
self.logger.debug("Config version is the most common live version")
configSup = versions.get(configVer, 0)
self.logger.info("Sticking with API version %d, even though only %d of %d AMs support it", configVer, configSup, numClients)
return retmsg
if liveVers[mostLive] == numClients:
newVer = int(mostLive)
msg = "At the URLs you are contacting, all your AMs speak AM API v%d. " % newVer
self.logger.warn(msg)
if self.opts.devmode:
self.logger.warn("Would switch AM API version, but continuing...")
elif self.opts.explicitAPIVersion:
retmsg = "Most of your AMs do not support requested API version %d. " + msg
msg = "Continuing with your requested API version %s, but consider next time calling Omni with '-V%d'." % (configVer, newVer)
retmsg += msg + "\n"
self.logger.warn(msg)
else:
msg = "Switching to AM API v%d. Next time call Omni with '-V%d'." % (newVer, newVer)
retmsg += msg + "\n"
self.logger.warn(msg)
self.opts.api_version = newVer
return retmsg
if mostAnywhere == configVer or (versions.has_key(configVer) and versions[configVer] == versions[mostAnywhere]):
# The configured API version is what is spoken at the most AMs at _some_ URL
self.logger.debug("Config version is the most common anywhere version")
self.logger.info("Sticking with API version %s, even though only %d of %d AMs support it", configVer, versions[configVer], numClients)
return retmsg
# If we get here, the configured version is not the most popular, nor supported by most AMs
# IE, something else is more popular
if versions[mostAnywhere] == numClients:
# The most popular anywhere API version is spoken by all AMs
newVer = int(mostAnywhere)
if self.opts.devmode:
self.logger.warn("Would switch AM API version to %d, which is supported by all your AMs, but continuing...")
elif self.opts.explicitAPIVersion:
msg = "Continuing with your requested API version %s (even though it is not well supported by your AMs), but consider next time calling Omni with '-V%d' (which all your AMs support). " % (configVer, newVer)
self.logger.warn(msg)
retmsg = msg + "\n"
else:
retmsg = "Your requested AM API version is not well supported by your AMs. "
msg = "Switching to AM API v%d, which is supported by all your AMs. Next time call Omni with '-V%d'." % (newVer, newVer)
retmsg += msg + "\n"
self.logger.warn(msg)
self.opts.api_version = newVer
return retmsg
if float(liveVers[mostLive]) >= float(numClients)/float(2):
# The most popular live API version is spoken by a majority of AMs
newVer = int(mostLive)
if self.opts.devmode:
self.logger.warn("Would switch AM API version to %d, which is running at a majority of your AMs, but continuing...", newVer)
elif self.opts.explicitAPIVersion:
msg = "Continuing with your requested API version %s (which is not well supported by your AMs), but consider next time calling Omni with '-V%d' (which most of your AMs are running). " % (configVer, newVer)
self.logger.warn(msg)
retmsg = msg + "\n"
else:
msg = "Switching to AM API v%d, which is running at a majority of your AMs. Next time call Omni with '-V%d'." % (newVer, newVer)
retmsg = msg + "\n"
self.logger.warn(msg)
self.opts.api_version = newVer
return retmsg
if float(versions[mostAnywhere]) >= float(numClients)/float(2):
# The most popular anywhere API version is spoken by a majority of AMs
newVer = int(mostAnywhere)
if self.opts.devmode:
self.logger.warn("Would switch AM API version to %d, which is supported by a majority of your AMs, but continuing...", newVer)
elif self.opts.explicitAPIVersion:
msg = "Continuing with your requested API version %s, but consider next time calling Omni with '-V%d' (which is supported by most of your AMs). " % (configVer, newVer)
retmsg = msg + "\n"
self.logger.warn(msg)
else:
msg = "Switching to AM API v%d, which is supported by a majority of your AMs. Next time call Omni with '-V%d'." % (newVer, newVer)
retmsg = msg + "\n"
self.logger.warn(msg)
self.opts.api_version = newVer
return retmsg
# No API version is supported by a majority of AMs
if versions.has_key(configVer) and versions[configVer] > 0:
# Somebody speaks the desired version. Use that
self.logger.debug("Config ver is supported _somewhere_ at least")
return retmsg
# No AM speaks the desired API version. No version is supported by a majority of AMs
# Go with the most popular live version? The most popular anywhere version?
newVer = int(mostAnywhere)
if self.opts.devmode:
self.logger.warn("Would switch AM API version to %d, the most commonly supported version, but continuing...", newVer)
elif self.opts.explicitAPIVersion:
msg = "Continuing with your requested API version %s, but consider next time calling Omni with '-V%d' (which is the most common version your AMs support). " % (configVer, newVer)
retmsg = msg + "\n"
self.logger.warn(msg)
else:
msg = "Switching to AM API v%d, the most commonly supported version. Next time call Omni with '-V%d'." % (newVer, newVer)
retmsg = msg + "\n"
self.logger.warn(msg)
self.opts.api_version = newVer
return retmsg
# ------- AM API methods and direct support methods follow
# FIXME: This method manipulates the message. Need to separate Dev/Exp
# Also, it marks whether it used the cache through the message. Is there a better way?
# Helper indicates a function to get one of the getversion return attributes called this,
# So make messages indicate not 'getversion' but that we were trying to get an attribute
def _do_getversion(self, client, helper=False):
'''Pull GetVersion for this AM from cache; otherwise actually call GetVersion if this
AM wasn't in the cache, the options say not to use the cache, or the cache is too old.
If we actually called GetVersion:
Construct full error message including string version of code/output slots.
Then cache the result.
If we got the result from the cache, set the message to say so.
'''
cachedVersion = None
if not self.opts.noGetVersionCache:
cachedVersion = self._get_cached_getversion(client)
# FIXME: What if cached entry had an error? Should I retry then?
if self.opts.noGetVersionCache or cachedVersion is None or (self.opts.GetVersionCacheOldestDate and cachedVersion['timestamp'] < self.opts.GetVersionCacheOldestDate):
self.logger.debug("Actually calling GetVersion")
if self.opts.noGetVersionCache:
self.logger.debug(" ... opts.noGetVersionCache set")
elif cachedVersion is None:
self.logger.debug(" ... cachedVersion was None")
failMsg = "GetVersion at %s" % (str(client.str))
if helper:
failMsg = "Check AM properties at %s" % (str(client.str))
if self.opts.api_version >= 2:
options = self._build_options("GetVersion", None, None)
if len(options.keys()) == 0:
(thisVersion, message) = _do_ssl(self.framework, None, failMsg, client.GetVersion)
else:
(thisVersion, message) = _do_ssl(self.framework, None, failMsg, client.GetVersion, options)
else:
(thisVersion, message) = _do_ssl(self.framework, None, failMsg, client.GetVersion)
# This next line is experimenter-only maybe?
message = _append_geni_error_output(thisVersion, message)
# Cache result, even on error (when we note the error message)
self._cache_getversion(client, thisVersion, message)
else:
self.logger.debug("Pulling GetVersion from cache")
thisVersion = cachedVersion['version']
message = "From cached result from %s" % cachedVersion['timestamp']
return (thisVersion, message)
def _do_getversion_output(self, thisVersion, client, message):
'''Write GetVersion output to a file or log depending on options.
Return a string to print that we saved it to a file, if that's what we did.
'''
# FIXME only print 'peers' on verbose? (Or is peers gone now?)
# FIXME: Elsewhere we use json.dumps - should we do so here too?
# This is more concise and looks OK - leave it for now
pp = pprint.PrettyPrinter(indent=4)
prettyVersion = pp.pformat(thisVersion)
if client.nick:
header = "AM %s URN: %s (url: %s) has version:" % (client.nick, client.urn, client.url)
if self.opts.devmode:
amstr = "%s (%s, %s)" % (client.nick, client.urn, client.url)
else:
amstr = client.nick
else:
header = "AM URN: %s (url: %s) has version:" % (client.urn, client.url)
amstr = "%s (%s)" % (client.urn, client.url)
if message:
header += " (" + message + ")"
filename = None
if self.opts.output:
# Create filename
filename = _construct_output_filename(self.opts, None, client.url, client.urn, "getversion", ".json", 1)
self.logger.info("Writing result of getversion at AM %s to file '%s'", amstr, filename)
# Create File
# This logs or prints, depending on whether filename is None
_printResults(self.opts, self.logger, header, prettyVersion, filename)
# FIXME: include filename in summary: always? only if 1 aggregate?
if filename:
return "Saved getversion at AM %s to file '%s'.\n" % (amstr, filename)
else:
return ""
def _save_getversion_cache(self):
'''Write GetVersionCache object to file as JSON (creating it and directories if needed)'''
#client url->
# timestamp (a datetime.datetime)
# version struct, including code/value/etc as appropriate
# urn
# url
# lasterror
if self.opts.noCacheFiles:
self.logger.debug("Per option noCacheFiles, not saving GetVersion cache")
return
fdir = os.path.dirname(self.opts.getversionCacheName)
if fdir and fdir != "":
if not os.path.exists(fdir):
os.makedirs(fdir)
try:
with open(self.opts.getversionCacheName, 'w') as f:
json.dump(self.GetVersionCache, f, cls=DateTimeAwareJSONEncoder)
self.logger.debug("Wrote GetVersionCache to %s", self.opts.getversionCacheName)
except Exception, e:
self.logger.error("Failed to write GetVersion cache: %s", e)
def _load_getversion_cache(self):
'''Load GetVersion cache from JSON encoded file, if any'''
self.GetVersionCache = {}
if self.opts.noCacheFiles:
self.logger.debug("Per option noCacheFiles, not loading get version cache")
return
#client url->
# timestamp (a datetime.datetime)
# version struct, including code/value/etc as appropriate
# urn
# url
# lasterror
if not os.path.exists(self.opts.getversionCacheName) or os.path.getsize(self.opts.getversionCacheName) < 1:
return
try:
with open(self.opts.getversionCacheName, 'r') as f:
self.GetVersionCache = json.load(f, encoding='ascii', cls=DateTimeAwareJSONDecoder)
self.logger.debug("Read GetVersionCache from %s", self.opts.getversionCacheName)
except Exception, e:
self.logger.error("Failed to read GetVersion cache: %s", e)
# FIXME: This saves every time we add to the cache. Is that right?
def _cache_getversion(self, client, thisVersion, error=None):
'''Add to Cache the GetVersion output for this AM.
If this was an error, don't over-write any existing good result, but record the error message
This methods both loads and saves the cache from file.
'''
# url, urn, timestamp, apiversion, rspecversions (type version, type version, ..), credtypes (type version, ..), single_alloc, allocate, last error and message
res = {}
if error and error.startswith(" (PG log ur"):
# If the only error string is the pointer to the PG log url, treat this as no error
error = None
if error:
# On error, pretend this is old, to force refetch
res['timestamp'] = datetime.datetime.min
else:
res['timestamp'] = datetime.datetime.utcnow()
res['version'] = thisVersion
if client is not None and client.urn is not None and str(client.urn).strip() != "":
res['urn'] = client.urn
elif client is not None and client.url is not None:
res['urn'] = client.url
else:
res['urn'] = "unspecified_AM_URN"
if client is not None and client.url is not None:
res['url'] = client.url
else:
res['url'] = "unspecified_AM_URL"
res['error'] = error
if self.GetVersionCache is None:
# Read the file as serialized JSON
self._load_getversion_cache()
if error:
# On error, leave existing data alone - just record the last error
if self.GetVersionCache.has_key(client.url):
self.GetVersionCache[client.url]['lasterror'] = error
self.logger.debug("Added GetVersion error output to cache for %s: %s", client.url, error)
else:
self.GetVersionCache[client.url] = res
self.logger.debug("Added GetVersion success output to cache for %s", client.url)
# Write the file as serialized JSON
self._save_getversion_cache()
def _get_cached_getversion(self, client):
'''Get GetVersion from cache or this AM, if any.'''
if self.GetVersionCache is None:
self._load_getversion_cache()
if self.GetVersionCache is None:
return None
self.logger.debug("Checking cache for %s", client.url)
if isinstance(self.GetVersionCache, dict) and self.GetVersionCache.has_key(client.url):
# FIXME: Could check that the cached URN is same as the client urn?
return self.GetVersionCache[client.url]
# FIXME: Is this too much checking/etc for developers?
# See _check_valid_return_struct: lots of overlap, but this checks the top-level geni_api
# FIXME: The return from the cache doesn't really need to be rechecked, does it? Or will that not happen?
# Helper indicates a function to get one of the getversion return attributes called this,
# So make messages indicate not 'getversion' but that we were trying to get an attribute
def _do_and_check_getversion(self, client, helper=False):
'''Do GetVersion (possibly from cache), then check return for errors,
constructing a good message.
Basically, add return checks to _do_getversion'''
op = "getversion"
# if helper:
# op = "check AM properties"
message = None
(thisVersion, message) = self._do_getversion(client, helper)
if thisVersion is None:
# error
message = "AM %s failed %s (empty): %s" % (client.str, op, message)
return (None, message)
elif not isinstance(thisVersion, dict):
# error
message = "AM %s failed %s (returned %s): %s" % (client.str, op, thisVersion, message)
return (None, message)
elif not thisVersion.has_key('geni_api'):
# error
message = "AM %s failed %s (no geni_api at top: %s): %s" % (client.str, op, thisVersion, message)
return (None, message)
elif thisVersion['geni_api'] == 1:
# No more checking to do - return it as is
return (thisVersion, message)
elif not thisVersion.has_key('value'):
message = "AM %s failed %s (no value: %s): %s" % (client.str, op, thisVersion, message)
return (None, message)
elif not thisVersion.has_key('code'):
message = "AM %s failed %s (no code: %s): %s" % (client.str, op, thisVersion, message)
return (None, message)
elif not thisVersion['code'].has_key('geni_code'):
message = "AM %s failed %s (no geni_code: %s): %s" % (client.str, op, thisVersion, message)
# error
return (None, message)
elif thisVersion['code']['geni_code'] != 0:
# error
# This next line is experimenter-only maybe?
message = "AM %s failed %s: %s" % (client.str, op, _append_geni_error_output(thisVersion, message))
return (None, message)
elif not isinstance(thisVersion['value'], dict):
message = "AM %s failed %s (non dict value %s): %s" % (client.str, op, thisVersion['value'], message)
return (None, message)
# OK, we have a good result
return (thisVersion, message)
# This is the real place that ends up calling GetVersion
# FIXME: As above: this loses the code/output slots and any other top-level slots.
# Maybe only for experimenters?
# Helper indicates a function to get one of the getversion return attributes called this,
# So make messages indicate not 'getversion' but that we were trying to get an attribute
def _get_getversion_value(self, client, helper=False):
'''Do GetVersion (possibly from cache), check error returns to produce a message,
pull out the value slot (dropping any code/output).'''
message = None
# We cache results by URL
if not hasattr(self, 'gvValueCache'):
self.gvValueCache = dict()
if self.gvValueCache.has_key(client.url):
return self.gvValueCache[client.url]
(thisVersion, message) = self._do_and_check_getversion(client, helper)
if thisVersion is None:
# error - return what the error check had
return (thisVersion, message)
elif thisVersion['geni_api'] == 1:
versionSpot = thisVersion
else:
versionSpot = thisVersion['value']
self.gvValueCache[client.url] = (versionSpot, message)
return (versionSpot, message)
# Helper indicates a function to get one of the getversion return attributes called this,
# So make messages indicate not 'getversion' but that we were trying to get an attribute
def _get_getversion_key(self, client, key, helper=False):
'''Pull the given key from the GetVersion value object'''
if key is None or key.strip() == '':
return (None, "no key specified")
(versionSpot, message) = self._get_getversion_value(client, helper)
if versionSpot is None:
return (None, message)
elif not versionSpot.has_key(key):
message2 = "AM %s getversion has no key %s" % (client.str, key)
if message:
message = message2 + "; " + message
else:
message = message2
return (None, message)
else:
return (versionSpot[key], message)
def _get_this_api_version(self, client):
'''Get the supported API version for this AM (from GetVersion)'''
(res, message) = self._get_getversion_key(client, 'geni_api', helper=True)
if res is None:
self.logger.debug("Couldn't get api version supported from GetVersion: %s" % message)
# Return is an int API version
return (res, message)
def _get_api_versions(self, client):
'''Get the supported API versions and URLs for this AM (from GetVersion)'''
(res, message) = self._get_getversion_key(client, 'geni_api_versions', helper=True)
if res is None:
msg = "Couldnt get api versions supported from GetVersion: %s" % message
(thisVer, msg2) = self._get_getversion_key(client, 'geni_api', helper=True)
if thisVer and thisVer < 2:
self.logger.debug(msg)
else:
self.logger.warning(msg)
# Return is a dict: Int API version -> string URL of AM
return (res, message)
def _get_advertised_rspecs(self, client):
'''Get the supported advertisement rspec versions for this AM (from GetVersion)'''
(ads, message) = self._get_getversion_key(client, 'ad_rspec_versions', helper=True)
if ads is None:
if message and "has no key" in message:
(ads, message) = self._get_getversion_key(client, 'geni_ad_rspec_versions', helper=True)
if ads is None:
self.logger.warning("Couldnt get Advertised supported RSpec versions from GetVersion so can't do ListResources: %s" % message)
# Return is array of dicts with type, version, schema, namespace, array of extensions
return (ads, message)
def _get_request_rspecs(self, client):
'''Get the supported request rspec versions for this AM (from GetVersion)'''
(ads, message) = self._get_getversion_key(client, 'request_rspec_versions', helper=True)
if ads is None:
if message and "has no key" in message:
(ads, message) = self._get_getversion_key(client, 'geni_request_rspec_versions', helper=True)
if ads is None:
self.logger.warning("Couldnt get Request supported RSpec versions from GetVersion: %s" % message)
# Return is array of dicts with type, version, schema, namespace, array of extensions
return (ads, message)
def _get_cred_versions(self, client):
'''Get the supported credential types for this AM (from GetVersion)'''
(res, message) = self._get_getversion_key(client, 'geni_credential_types', helper=True)
if res is None:
self.logger.warning("Couldnt get credential types supported from GetVersion: %s" % message)
# Return is array of dicts: geni_type, geni_version
return (res, message)
def _get_singlealloc_style(self, client):
'''Get the supported single_allocation for this AM (from GetVersion)'''
(res, message) = self._get_getversion_key(client, 'geni_single_allocation', helper=True)
if res is None:
self.logger.debug("Couldnt get single_allocation mode supported from GetVersion; will use default of False: %s" % message)
res = False
# return is boolean
return (res, message)
def _get_alloc_style(self, client):
'''Get the supported geni_allocate allocation style for this AM (from GetVersion)'''
(res, message) = self._get_getversion_key(client, 'geni_allocate', helper=True)
if res is None:
self.logger.debug("Couldnt get allocate style supported from GetVersion; will use default of 'geni_single': %s" % message)
res = 'geni_single'
# Return is string: geni_single, geni_disjoint, or geni_many
return (res, message)
def _api_call(self, client, msg, op, args):
'''Make the AM API Call, after first checking that the AM we are talking
to is of the right API version.'''
(ver, newc, validMsg) = self._checkValidClient(client)
if newc is None:
# if the error reason is just that the client is not
# reachable then clean up the error message
if "Operation timed out" in validMsg:
validMsg = "Aggregate %s unreachable: %s" % (client.str, validMsg[validMsg.find("Operation timed out"):])
elif "Unknown socket error" in validMsg:
validMsg = "Aggregate %s unreachable: %s" % (client.str, validMsg[validMsg.find("Unknown socket error"):])
elif "Server does not trust" in validMsg:
validMsg = "Aggregate %s does not trust your certificate: %s" % (client.str, validMsg[validMsg.find("Server does not trust"):])
elif "Your user certificate" in validMsg:
validMsg = "Cannot contact aggregates: %s" % (validMsg[validMsg.find("Your user certificate"):])
# Theoretically could remove bad client here. But nothing uses the clients list after an _api_call
# And removing it here is dangerous if we're inside a loop over the clients
raise BadClientException(client, validMsg)
elif newc.url != client.url:
if ver != self.opts.api_version:
self.logger.error("AM %s doesn't speak API version %d. Try the AM at %s and tell Omni to use API version %d, using the option '-V%d'.", client.str, self.opts.api_version, newc.url, ver, ver)
raise BadClientException(client, validMsg)
# self.logger.warn("Changing API version to %d. Is this going to work?", ver)
# # FIXME: changing the api_version is not a great idea if
# # there are multiple clients. Push this into _checkValidClient
# # and only do it if there is one client.
#
# # FIXME: changing API versions means unwrap or wrap cred, maybe change the op name, ...
# # This may work for getversion, but likely not for other methods!
# self.opts.api_version = ver
else:
pass
# Theoretically could remove bad client here and add the correct one. But nothing uses the clients list after an _api_call
# And removing it here is dangerous if we're inside a loop over the clients
client = newc
elif ver != self.opts.api_version:
self.logger.error("AM %s doesn't speak API version %d. Tell Omni to use API version %d, using the option '-V%d'.", client.str, self.opts.api_version, ver, ver)
raise BadClientException(client, validMsg)
self.logger.debug("Doing SSL/XMLRPC call to %s invoking %s", client.url, op)
#self.logger.debug("Doing SSL/XMLRPC call to %s invoking %s with args %r", client.url, op, args)
return _do_ssl(self.framework, None, msg, getattr(client, op), *args), client
# FIXME: Must still factor dev vs exp
# For experimenters: If exactly 1 AM, then show only the value slot, formatted nicely, printed to STDOUT.
# If it fails, show only why
# If saving to file, print out 'saved to file <foo>', or the error if it failed
# If querying multiple, then print a header for each before printing to STDOUT, otherwise like above.
# For developers, maybe leave it like this? Print whole struct not just the value?
def getversion(self, args):
"""AM API GetVersion
Get basic information about the aggregate and how to talk to it.
Aggregates queried:
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
- Note that --useSliceAggregates is not honored as no slice name is provided.
Output directing options:
-o Save result (JSON format) in per-Aggregate files
-p (used with -o) Prefix for resulting version information filenames
--outputfile If supplied, use this output file name: substitute the AM for any %a
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
Omni caches getversion results for use elsewhere. This method skips the local cache.
--ForceUseGetVersionCache will force it to look at the cache if possible
--GetVersionCacheAge <#> specifies the # of days old a cache entry can be, before Omni re-queries the AM, default is 7
--GetVersionCacheName <path> is the path to the GetVersion cache, default is ~/.gcf/get_version_cache.json
--devmode causes Omni to continue on bad input, if possible
-V# specifies the AM API version to attempt to speak
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
omni.py -a http://myaggregate/url -V2 getversion
"""
### Method specific arg handling
# Ensure GetVersion skips the cache, unless commandline option forces the cache
if not self.opts.useGetVersionCache:
self.opts.noGetVersionCache = True
# Start basic loop over clients
retVal = ""
version = {}
(clients, message) = self._getclients()
numClients = len(clients)
successCnt = 0
for client in clients:
# Pulls from cache or caches latest, error checks return
# getversion output should be the whole triple
(thisVersion, message) = self._do_and_check_getversion(client)
if self.opts.devmode:
pp = pprint.PrettyPrinter(indent=4)
prettyVersion = pp.pformat(thisVersion)
self.logger.debug("AM %s raw getversion:\n%s", client.url, prettyVersion)
thisVersionValue, message = self._retrieve_value(thisVersion, message, self.framework)
# Method specific result handling
version[ client.url ] = thisVersion
# Per client result outputs:
if version[client.url] is None:
# FIXME: SliverStatus sets these to False. Should this for consistency?
self.logger.warn("URN: %s (url:%s) GetVersion call failed: %s\n" % (client.urn, client.url, message) )
retVal += "Cannot GetVersion at %s: %s\n" % (client.str, message)
else:
successCnt += 1
retVal += self._do_getversion_output(thisVersionValue, client, message)
# End of loop over clients
### Method specific all-results handling, printing
if numClients==0:
retVal += "No aggregates to query. %s\n\n" % message
else:
if self.numOrigClients>1:
# FIXME: If I have a message from getclients, want it here?
if "From Cache" in message:
retVal += "\nGot version for %d out of %d aggregates using GetVersion cache\n" % (successCnt,self.numOrigClients)
else:
retVal += "\nGot version for %d out of %d aggregates\n" % (successCnt,self.numOrigClients)
else:
if successCnt == 1:
retVal += "\nGot version for %s\n" % clients[0].str
else:
retVal += "\nFailed to get version for %s\n" % clients[0].str
if "From Cache" in message:
retVal += message + "\n"
return (retVal, version)
# ------- End of GetVersion stuff
def _selectRSpecVersion(self, slicename, client, mymessage, options):
'''Helper for Describe and ListResources and Provision to set the rspec_version option, based on a single AMs capabilities.
Uses -t argument: If user specified an RSpec type and version, then only
use AMs that support that type/version (default is GENI 3).
Return dict with API version appropriate key specifying RSpec type/version
to request, plus a message describing results.
Raise a BadClientException if the AM cannot support the given RSpect type
or didn't advertise what it supports.'''
# If the user specified a specific rspec type and version,
# then we ONLY get rspecs from each AM that is capable
# of talking that type&version.
# Note an alternative would have been to let the AM just
# do whatever it likes to do if
# you ask it to give you something it doesn't understand.
if self.opts.rspectype:
rtype = self.opts.rspectype[0]
rver = self.opts.rspectype[1]
self.logger.debug("Will request RSpecs only of type %s and version %s", rtype, rver)
# Note this call uses the GetVersion cache, if available
# If got a slicename, use request rspecs to better match manifest support
if not slicename:
(ad_rspec_version, message) = self._get_advertised_rspecs(client)
else:
(ad_rspec_version, message) = self._get_request_rspecs(client)
if ad_rspec_version is None:
if message:
if mymessage != "":
mymessage += ". "
mymessage = mymessage + message
self.logger.debug("AM %s failed to advertise supported RSpecs", client.str)
# Allow developers to call an AM that fails to advertise
if not self.opts.devmode:
# Skip this AM/client
raise BadClientException(client, mymessage)
else:
self.logger.debug("... but continuing")
ad_rspec_version = ()
self.logger.debug("Got %d supported RSpec versions", len(ad_rspec_version))
# foreach item in the list that is the val
match = False
hasGENI3 = False
hasPG2 = False
for availversion in ad_rspec_version:
if not (availversion.has_key('type') and availversion.has_key('version')):
self.logger.warning("AM getversion rspec_version entry malformed: no type or no version")
continue
# version is also a string
if str(availversion['type']).lower().strip() == rtype.lower().strip() and str(availversion['version']).lower().strip() == str(rver).lower().strip():
# success
self.logger.debug("Found a matching supported type/ver: %s/%s", availversion['type'], availversion['version'])
match = True
rtype=availversion['type']
rver=availversion['version']
break
if str(availversion['type']).lower().strip() == 'geni' and str(availversion['version']).lower().strip() == '3':
hasGENI3 = True
elif str(availversion['type']).lower().strip() == 'protogeni' and str(availversion['version']).lower().strip() == '2':
hasPG2 = True
# if no success
if match == False:
# if user did not explicitly specify this version, then maybe try to get the RSpec with another format
if not self.opts.explicitRSpecVersion:
# if only 1 version is supported, use it
if len(ad_rspec_version) == 1:
ver = ad_rspec_version[0]
if ver.has_key('type') and ver.has_key('version'):
self.logger.warning("AM doesn't support default RSpec version %s %s. Returning RSpec in only supported format. Next time at this AM, call Omni with '-t %s %s'.", rtype, rver, ver['type'], ver['version'])
rtype=ver['type']
rver=ver['version']
# if this is an ad, and default_ad_rspec is set, use that
# FIXME: Maybe do this even for manifests?
elif not slicename:
(default_ad, message) = self._get_getversion_key(client, 'default_ad_rspec')
if default_ad and default_ad.has_key('type') and default_ad.has_key('version'):
self.logger.warning("AM doesn't support default RSpec version %s %s. Returning RSpec in AM specified default Ad format. Next time at this AM, call Omni with '-t %s %s'.", rtype, rver, default_ad['type'], default_ad['version'])
rtype=default_ad['type']
rver=default_ad['version']
# more than 1 format advertised but no default.
else:
# User explicitly picked this version that is not supported
# FIXME: Could or should we pick PGv2 if GENIv3 not there, and vice versa?
# return error showing ad_rspec_versions
pp = pprint.PrettyPrinter(indent=4)
self.logger.warning("AM cannot provide Rspec in requested version (%s %s) at AM %s. This AM only supports: \n%s", rtype, rver, client.str, pp.pformat(ad_rspec_version))
tryOthersMsg = "";
if hasGENI3:
tryOthersMsg = ". Try calling Omni with '-t GENI 3' for GENI v3 RSpecs."
elif hasPG2:
tryOthersMsg = ". Try calling Omni with '-t ProtoGENI 2' for ProtoGENI v2 RSpecs."
else:
tryOthersMsg = ". Try calling Omni with '-t <another supported RSpec format>'."
if mymessage != "" and not mymessage.endswith('.'):
mymessage += ". "
if not self.opts.devmode:
mymessage = mymessage + "Skipped AM %s that didnt support required RSpec format %s %s" % (client.str, rtype, rver)
mymessage = mymessage + tryOthersMsg
# Skip this AM/client
raise BadClientException(client, mymessage)
else:
mymessage = mymessage + "AM %s didnt support required RSpec format %s %s, but continuing" % (client.str, rtype, rver)
#--- API version differences:
if self.opts.api_version == 1:
options['rspec_version'] = dict(type=rtype, version=rver)
else:
options['geni_rspec_version'] = dict(type=rtype, version=rver)
#--- Dev mode should not force supplying this option maybe?
# This elif is only if no rspec type option was supplied - which you can't really do at this point
elif self.opts.api_version >= 2:
# User did not specify an rspec type but did request version 2.
# Make an attempt to do the right thing, otherwise bail and tell the user.
if not slicename:
(ad_rspec_version, message) = self._get_advertised_rspecs(client)
else:
(ad_rspec_version, message) = self._get_request_rspecs(client)
if ad_rspec_version is None:
if message:
if mymessage != "" and not mymessage.endswith('.'):
mymessage += ". "
mymessage = mymessage + message
self.logger.debug("AM %s failed to advertise supported RSpecs", client.str)
# Allow developers to call an AM that fails to advertise
if not self.opts.devmode:
# Skip this AM/client
raise BadClientException(client, mymessage)
if len(ad_rspec_version) == 1:
# there is only one advertisement, so use it.
options['geni_rspec_version'] = dict(type=ad_rspec_version[0]['type'],
version=ad_rspec_version[0]['version'])
# FIXME: if there is a default_ad_rspec and this is for ads, use that?
else:
# FIXME: Could we pick GENI v3 if there, else PG v2?
# Inform the user that they have to pick.
ad_versions = [(x['type'], x['version']) for x in ad_rspec_version]
self.logger.warning("Please use the -t option to specify the desired RSpec type for AM %s as one of %r", client.str, ad_versions)
if mymessage != "" and not mymessage.endswith('.'):
mymessage += ". "
mymessage = mymessage + "AM %s supports multiple RSpec versions: %r" % (client.str, ad_versions)
if not self.opts.devmode:
# Skip this AM/client
raise BadClientException(client, mymessage)
return (options, mymessage)
# End of _selectRSpecVersion
def _maybeDecompressRSpec(self, options, rspec):
'''Helper to decompress an RSpec string if necessary'''
if rspec is None or rspec.strip() == "":
return rspec
if options.get('geni_compressed', False):
try:
rspec = zlib.decompress(rspec.decode('base64'))
except Exception, e:
if rspec and rspec_util.is_rspec_string(rspec, None, None, logger=self.logger):
self.logger.debug("AM returned uncompressed RSpec when compressed was requested")
else:
self.logger.error("Failed to decompress RSpec: %s", e);
self.logger.debug("RSpec begins: '%s'", rspec[:min(40, len(rspec))])
# In experimenter mode, maybe notice if the rspec appears compressed anyhow and try to decompress?
elif not self.opts.devmode and rspec and not rspec_util.is_rspec_string(rspec, None, None, logger=self.logger):
try:
rspec2 = zlib.decompress(rspec.decode('base64'))
if rspec2 and rspec_util.is_rspec_string(rspec2, None, None, logger=self.logger):
rspec = rspec2
except Exception, e:
pass
return rspec
def _listresources(self, args):
"""Support method for doing AM API ListResources. Queries resources on various aggregates.
Takes an optional slicename.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
If you specify a required Ad RSpec type and version (both strings. Use the -t option)
then it skips any AM that doesn't advertise (in GetVersion) that it supports that format.
Note that -t GENI 3 is the default.
Returns a dictionary of rspecs with the following format:
rspecs[(urn, url)] = return struct, containing a decompressed rspec
AND a string describing the result.
On error the dictionary is None and the message explains.
Decompress the returned RSpec if necessary
--arbitrary-option: supply arbitrary thing for testing
-V# API Version #
--devmode: Continue on error if possible
--no-compress: Request the returned RSpec not be compressed (default is to compress)
--available: Return Ad of only available resources
"""
# rspecs[(urn, url)] = decompressed rspec
rspecs = {}
options = {}
# Pass in a dummy option for testing that is actually ok
# FIXME: Omni should have a standard way for supplying additional options. Something like extra args
# of the form Name=Value
# Then a standard helper function could be used here to split them apart
if self.opts.arbitrary_option:
options['arbitrary_option'] = self.opts.arbitrary_option
#--- Maybe dev mode gets both user and slice creds? Somehow let caller decide?
#-- AM API v2-3 differences here:
# An optional slice name might be specified.
# FIXME: This should be done by caller so this method takes slicename that may be null
slicename = None
if len(args) > 0:
slicename = args[0].strip()
if self.opts.api_version >= 3 and slicename is not None and slicename != "":
if not self.opts.devmode:
self._raise_omni_error("In AM API version 3, use 'describe' to list contents of a slice, not 'listresources'. Otherwise specify the -V2 argument to use AM API v2, if the AM supports it.")
else:
self.logger.warn("Got a slice name to v3+ ListResources, but continuing...")
options['geni_compressed'] = self.opts.geni_compressed
# Get the credential for this query
if slicename is None or slicename == "":
options['geni_available'] = self.opts.geni_available
slicename = None
cred = None
if self.opts.api_version >= 3:
(cred, message) = self.framework.get_user_cred_struct()
else:
(cred, message) = self.framework.get_user_cred()
if cred is None:
# Per AM API Change Proposal AD, allow no user cred to get an ad
self.logger.debug("No user credential, but this is now allowed for getting Ads....")
else:
(slicename, urn, cred, retVal, slice_exp) = self._args_to_slicecred(args, 1, "listresources")
if cred is None or cred == "":
# Dev mode allow doing the call anyhow
if not self.opts.devmode:
return (None, prstr)
self.logger.info('Gathering resources reserved for slice %s.' % slicename)
options['geni_slice_urn'] = urn
# We now have a credential
#----
# Query each aggregate for resources
successCnt = 0
mymessage = ""
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients == 0:
if message != "":
mymessage = "No aggregates available to query: %s" % message
else:
# FIXME: What if got a message and still got some aggs?
if message != "":
self.logger.debug("Got %d AMs but also got an error message: %s", numClients, message)
creds = _maybe_add_abac_creds(self.framework, cred)
creds = self._maybe_add_creds_from_files(creds)
# Connect to each available GENI AM to list their resources
for client in clientList:
if creds is None or len(creds) == 0:
self.logger.debug("Have null or empty credential list in call to ListResources!")
rspec = None
(ver, newc, validMsg) = self._checkValidClient(client)
if newc is None:
if validMsg and validMsg != '':
if not mymessage:
mymessage = ""
else:
if not mymessage.endswith('.'):
mymessage += ".\n"
else:
mymyessage += "\n"
if "Operation timed out" in validMsg:
validMsg = validMsg[validMsg.find("Operation timed out"):]
elif "Unknown socket error" in validMsg:
validMsg = validMsg[validMsg.find("Unknown socket error"):]
elif "Server does not trust" in validMsg:
validMsg = validMsg[validMsg.find("Server does not trust"):]
elif "Your user certificate" in validMsg:
validMsg = validMsg[validMsg.find("Your user certificate"):]
mymessage += "Skipped AM %s: %s" % (client.str, validMsg)
# Theoretically could remove this client from clients list, but currently
# nothing uses client list after this, so no need.
# Plus, editing the client list inside the loop is bad
continue
elif newc.url != client.url:
if ver != self.opts.api_version:
if numClients == 1:
self._raise_omni_error("Can't do ListResources: AM %s speaks only AM API v%d, not %d. Try calling Omni with the -V%d option." % (client.str, ver, self.opts.api_version, ver))
self.logger.warn("AM %s doesn't speak API version %d. Try the AM at %s and tell Omni to use API version %d, using the option '-V%d'.", client.str, self.opts.api_version, newc.url, ver, ver)
if not mymessage:
mymessage = ""
else:
if not mymessage.endswith('.'):
mymessage += ".\n"
else:
mymyessage += "\n"
mymessage += "Skipped AM %s: speaks only API v%d, not %d. Try -V%d option." % (client.str, ver, self.opts.api_version, ver)
# Theoretically could remove this client from clients list, but currently
# nothing uses client list after this, so no need.
# Plus, editing the client list inside the loop is bad
continue
# raise BadClientException(client, mymessage)
# self.logger.warn("Changing API version to %d. Is this going to work?", ver)
# # FIXME: changing the api_version is not a great idea if
# # there are multiple clients. Push this into _checkValidClient
# # and only do it if there is one client.
#1 self.opts.api_version = ver
else:
self.logger.debug("Using new AM url %s but same API version %d", newc.url, ver)
# Theoretically could remove this client from clients list, but currently
# nothing uses client list after this, so no need.
# Plus, editing the client list inside the loop is bad
# Also note I'm not adding the new corrected client here
client = newc
elif ver != self.opts.api_version:
if numClients == 1:
self._raise_omni_error("Can't do ListResources: AM %s speaks only AM API v%d, not %d. Try calling Omni with the -V%d option." % (client.str, ver, self.opts.api_version, ver))
self.logger.warn("AM %s speaks API version %d, not %d. Rerun with option '-V%d'.", client.str, ver, self.opts.api_version, ver)
if not mymessage:
mymessage = ""
else:
if not mymessage.endswith('.'):
mymessage += ".\n"
else:
mymessage += "\n"
mymessage += "Skipped AM %s: speaks only API v%d, not %d. Try -V%d option." % (client.str, ver, self.opts.api_version, ver)
# Theoretically could remove this client from clients list, but currently
# nothing uses client list after this, so no need.
# Plus, editing the client list inside the loop is bad
continue
self.logger.debug("Connecting to AM: %s at %s", client.urn, client.url)
#---
# In Dev mode, just use the requested type/version - don't check what is supported
try:
(options, mymessage) = self._selectRSpecVersion(slicename, client, mymessage, options)
except BadClientException, bce:
if not mymessage:
mymessage = ""
else:
if not mymessage.endswith('.'):
mymessage += ".\n"
else:
mymessage += "\n"
if bce.validMsg and bce.validMsg != '':
mymessage += bce.validMsg
if not mymessage.endswith('.'):
mymessage += ". "
# mymessage += "AM %s doesn't advertise matching RSpec versions" % client.url
self.logger.warn(message + "... continuing with next AM")
# Theoretically could remove this client from clients list, but currently
# nothing uses client list after this, so no need.
# Plus, editing the client list inside the loop is bad
continue
options = self._build_options("ListResources", slicename, options)
# Done constructing options to ListResources
#-----
self.logger.debug("Doing listresources with %d creds, options %r", len(creds), options)
(resp, message) = _do_ssl(self.framework, None, ("List Resources at %s" % (client.url)), client.ListResources, creds, options)
# Decompress the RSpec before sticking it in retItem
if resp and (self.opts.api_version == 1 or (self.opts.api_version > 1 and isinstance(resp, dict) and resp.has_key('value') and isinstance(resp['value'], str))):
if self.opts.api_version > 1:
origRSpec = resp['value']
else:
origRSpec = resp
rspec = self._maybeDecompressRSpec(options, origRSpec)
if rspec and rspec != origRSpec:
self.logger.debug("Decompressed RSpec")
if rspec and rspec_util.is_rspec_string( rspec, None, None, logger=self.logger ):
successCnt += 1
doPretty = (slicename is not None) # True on Manifests
if doPretty and rspec.count('\n') > 10:
# Are there newlines in the manifest already? Then set it false. Good enough.
doPretty = False
elif not doPretty and rspec.count('\n') <= 10:
# Are there no newlines in the Ad? Then set it true to make the ad prettier,
# but usually don't bother. FOAM ads are messy otherwise.
doPretty = True
rspec = rspec_util.getPrettyRSpec(rspec, doPretty)
else:
self.logger.warn("Didn't get a valid RSpec!")
if mymessage != "":
if mymessage.endswith('.'):
mymessage += ' '
else:
mymessage += ". "
mymessage += "No resources from AM %s: %s" % (client.str, message)
if self.opts.api_version > 1:
resp['value']=rspec
else:
resp = rspec
else:
self.logger.warn("No resource listing returned!")
self.logger.debug("Return struct missing proper rspec in value element!")
if mymessage != "":
if mymessage.endswith('.'):
mymessage += ' '
else:
mymessage += ". "
mymessage += "No resources from AM %s: %s" % (client.str, message)
# Return for tools is the full code/value/output triple
rspecs[(client.urn, client.url)] = resp
# End of loop over clients
if self.numOrigClients > 0:
if slicename:
self.logger.info( "Listed reserved resources on %d out of %d possible aggregates." % (successCnt, self.numOrigClients))
else:
self.logger.info( "Listed advertised resources at %d out of %d possible aggregates." % (successCnt, self.numOrigClients))
return (rspecs, mymessage)
# End of _listresources
def listresources(self, args):
"""GENI AM API ListResources
Call ListResources on 1+ aggregates and prints the rspec to stdout or to a file.
Optional argument for API v1&2 is a slice name, making the request for a manifest RSpec.
Note that the slice name argument is only supported in AM API v1 or v2.
For listing contents of a slice in APIv3+, use describe().
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
-t <type version>: Default "GENI 3". Specify a required RSpec type and version to return.
It skips any AM that doesn't advertise (in GetVersion) that it supports that format.
Returns a dictionary of rspecs with the following format:
API V1&2:
rspecs[(urn, url)] = decompressed rspec
API V3+:
rspecs[url] = return struct containing a decompressed rspec
Output directing options:
-o Save result RSpec (XML format) in per-Aggregate files
-p (used with -o) Prefix for resulting rspec filenames
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-rspec-localhost-8001.xml
--slicecredfile says to use the given slicecredfile if it exists.
If a slice name is supplied, then resources for that slice only
will be displayed. In this case, the slice credential is usually
retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
--arbitrary-option: supply arbitrary thing for testing
-V# API Version #
--devmode: Continue on error if possible
--no-compress: Request the returned RSpec not be compressed (default is to compress)
--available: Return Ad of only available resources
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Call AM API v2 ListResources at 1 Aggregate for 1 slice, getting the manifest RSpec
omni.py -a http://myaggregate/url -V2 listresources myslice
Call AM API v3 ListResources at 1 Aggregate, getting the Ad RSpec
omni.py -a http://myaggregate/url -V3 listresources
Do AM API v3 ListResources from 1 aggregate saving the results in a specific file,
with the aggregate name (constructed from the URL) inserted into the filename:
omni.py -a http://myaggregate/url -V3 -o --outputfile AdRSpecAt%a.xml listresources
"""
#--- API version specific
# An optional slice name might be specified.
slicename = None
if len(args) > 0:
slicename = args[0].strip()
if self.opts.api_version >= 3 and slicename is not None and slicename != "":
if not self.opts.devmode:
self._raise_omni_error("In AM API version 3, use 'describe' to list contents of a slice, not 'listresources'. Otherwise specify the -V2 argument to use AM API v2, if the AM supports it.")
else:
self.logger.warn("Got a slice name to v3+ ListResources, but continuing...")
#---
# check command line args
if self.opts.output:
self.logger.info("Saving output to a file.")
# Query the various aggregates for resources
# rspecs[(urn, url)] = decompressed rspec
(rspecs, message) = self._listresources( args )
numAggs = self.numOrigClients
# handle empty case
if not rspecs or rspecs == {}:
if slicename:
prtStr = "Got no resources on slice %s"%slicename
else:
prtStr = "Got no resources"
if message is not None:
prtStr = prtStr + ". " + message
else:
prtStr += " (no reason given)"
self.logger.info( prtStr )
return prtStr, {}
# Loop over RSpecs and print them
returnedRspecs = {}
rspecCtr = 0
savedFileDesc = ""
for ((urn,url), rspecStruct) in rspecs.items():
amNick = _lookupAggNick(self, urn)
if amNick is None:
amNick = urn
self.logger.debug("Getting RSpec items for AM urn %s (%s)", urn, url)
rspecOnly, message = self._retrieve_value( rspecStruct, message, self.framework)
if self.opts.api_version < 2:
returnedRspecs[(urn,url)] = rspecOnly
else:
returnedRspecs[url] = rspecStruct
retVal, filename = _writeRSpec(self.opts, self.logger, rspecOnly, slicename, urn, url, None, len(rspecs))
if filename:
if not savedFileDesc.endswith(' ') and savedFileDesc != "" and not savedFileDesc.endswith('\n'):
savedFileDesc += " "
savedFileDesc += "Saved listresources RSpec from '%s' (url '%s') to file %s; " % (amNick, url, filename)
if rspecOnly and rspecOnly != "":
rspecCtr += 1
if slicename:
# Try to parse the new sliver expiration from the rspec and print it in the result summary.
# Use a helper function in handler_utils that can be used elsewhere.
manExpires = expires_from_rspec(rspecOnly, self.logger)
if manExpires is not None:
prstr = "Reservation at %s in slice %s expires at %s (UTC)." % (amNick, slicename, manExpires)
self.logger.info(prstr)
if not savedFileDesc.endswith('.') and savedFileDesc != "" and not savedFileDesc.endswith('; '):
savedFileDesc += '.'
if not savedFileDesc.endswith(' ') and savedFileDesc != '':
savedFileDesc += " "
savedFileDesc += prstr
else:
self.logger.debug("Got None sliver expiration from manifest")
# End of loop over rspecs
self.logger.debug("rspecCtr %d", rspecCtr)
# Create RETURNS
# FIXME: If numAggs is 1 then retVal should just be the rspec?
#--- AM API specific:
if slicename:
retVal = "Queried resources for slice %s from %d of %d aggregate(s)."%(slicename, rspecCtr, numAggs)
#---
else:
retVal = "Queried resources from %d of %d aggregate(s)." % (rspecCtr, numAggs)
if numAggs > 0:
retVal +="\n"
if len(returnedRspecs.keys()) > 0:
if self.opts.output:
retVal += "Wrote rspecs from %d aggregate(s)" % numAggs
retVal +=" to %d file(s)"% len(rspecs)
if savedFileDesc != "":
if not retVal.endswith("\n"):
retVal += "\n"
retVal += savedFileDesc
else:
retVal +="No Rspecs succesfully parsed from %d aggregate(s)." % numAggs
if message:
retVal += message
retItem = returnedRspecs
return retVal, retItem
# End of listresources
def describe(self, args):
"""GENI AM API v3 Describe()
Retrieve a manifest RSpec describing the resources contained by the named entities,
e.g. a single slice or a set of the slivers in a slice. This listing and description
should be sufficiently descriptive to allow experimenters to use the resources.
For listing contents of a slice in APIv1 or 2, or to get the Ad
of available resources at an AM, use ListResources().
Argument is a slice name, naming the slice whose contents will be described.
Lists contents and state on 1+ aggregates and prints the result to stdout or to a file.
--sliver-urn / -u option: each specifies a sliver URN to describe. If specified,
only the listed slivers will be described. Otherwise, all slivers in the slice will be described.
Return is (1) A string describing the result to print, and (2) a dictionary by AM URL of the full
code/value/output return struct from the AM.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
Output directing options:
-o writes output to file instead of stdout; single file per aggregate.
-p gives filename prefix for each output file
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-rspec-localhost-8001.json
-t <type version>: Specify a required manifest RSpec type and version to return.
It skips any AM that doesn't advertise (in GetVersion)
that it supports that format. Default is "GENI 3".
--slicecredfile says to use the given slicecredfile if it exists.
--arbitrary-option: supply arbitrary thing for testing
-V# API Version #
--devmode: Continue on error if possible
--no-compress: Request the returned RSpec not be compressed (default is to compress)
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Describe at 1 Aggregate, getting the Manifest RSpec
in GENI v3 RSpec format
omni.py -a http://myaggregate/url -V3 describe myslice
Describe from 2 aggregates, saving the results in a specific file,
with the aggregate name (constructed from the URL) inserted into the filename:
omni.py -a http://myaggregate/url -a http://another/aggregate -V3 -o --outputfile AdRSpecAt%a.xml describe myslice
Describe 2 slivers from a particular aggregate
omni.py -a http://myaggregate/url -V3 describe myslice -u urn:publicid:IDN:myam+sliver+sliver1 -u urn:publicid:IDN:myam+sliver+sliver2
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Describe with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Describe is only available in AM API v3+. Use ListResources with AM API v%d, or specify -V3 to use AM API v3." % self.opts.api_version)
# get the slice name and URN or raise an error
(name, urn, slice_cred,
retVal, slice_exp) = self._args_to_slicecred(args, 1, "Describe")
options = {}
options['geni_compressed'] = self.opts.geni_compressed
# Pass in a dummy option for testing that is actually ok
# FIXME: Omni should have a standard way for supplying additional options. Something like extra args
# of the form Name=Value
# Then a standard helper function could be used here to split them apart
if self.opts.arbitrary_option:
options['arbitrary_option'] = self.opts.arbitrary_option
successCnt = 0
retItem = {}
args = []
creds = []
slivers = []
urnsarg = []
# Query status at each client
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients > 0:
self.logger.info('Describe Slice %s:' % urn)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
urnsarg, slivers = self._build_urns(urn)
# Add the options dict
options = self._build_options('Describe', name, options)
else:
prstr = "No aggregates available to describe slice at: %s" % message
retVal += prstr + "\n"
self.logger.warn(prstr)
descripMsg = "slice %s" % urn
if len(slivers) > 0:
descripMsg = "%d slivers in slice %s" % (len(slivers), urn)
op = 'Describe'
msg = "Describe %s at " % (descripMsg)
for client in clientList:
args = [urnsarg, creds]
try:
# Do per client check for rspec version to use and properly fill in geni_rspec_version
mymessage = ""
(options, mymessage) = self._selectRSpecVersion(name, client, mymessage, options)
args.append(options)
self.logger.debug("Doing describe of %s, %d creds, options %r", descripMsg, len(creds), options)
((status, message), client) = self._api_call(client,
msg + str(client.url),
op, args)
if mymessage.strip() != "":
if message is None or message.strip() == "":
message = ""
message = mymessage + ". " + message
except BadClientException as bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Describe skipping AM %s. No matching RSpec version or wrong AM API version - check logs" % (client.str)
if numClients == 1:
self._raise_omni_error("\nDescribe failed: " + retVal)
continue
# FIXME: Factor this next chunk into helper method?
# Decompress the RSpec before sticking it in retItem
rspec = None
if status and isinstance(status, dict) and status.has_key('value') and isinstance(status['value'], dict) and status['value'].has_key('geni_rspec'):
rspec = self._maybeDecompressRSpec(options, status['value']['geni_rspec'])
if rspec and rspec != status['value']['geni_rspec']:
self.logger.debug("Decompressed RSpec")
if rspec and rspec_util.is_rspec_string( rspec, None, None, logger=self.logger ):
rspec = rspec_util.getPrettyRSpec(rspec)
else:
self.logger.warn("Didn't get a valid RSpec!")
status['value']['geni_rspec'] = rspec
else:
self.logger.warn("Got no resource listing from AM %s", client.str)
self.logger.debug("Return struct missing geni_rspec element!")
# Return for tools is the full code/value/output triple
retItem[client.url] = status
# Get the dict describe result out of the result (accounting for API version diffs, ABAC)
(status, message) = self._retrieve_value(status, message, self.framework)
if not status:
fmt = "\nFailed to Describe %s at AM %s: %s\n"
if message is None or message.strip() == "":
message = "(no reason given)"
retVal += fmt % (descripMsg, client.str, message)
continue # go to next AM
missingSlivers = self._findMissingSlivers(status, slivers)
if len(missingSlivers) > 0:
self.logger.warn("%d slivers from request missing in result?!", len(missingSlivers))
self.logger.debug("%s", missingSlivers)
sliverFails = self._didSliversFail(status)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
(header, rspeccontent, rVal) = _getRSpecOutput(self.logger, rspec, name, client.urn, client.url, message, slivers)
self.logger.debug(rVal)
if status and isinstance(status, dict) and status.has_key('geni_rspec') and rspec and rspeccontent:
status['geni_rspec'] = rspeccontent
if not isinstance(status, dict):
# malformed describe return
self.logger.warn('Malformed describe result from AM %s. Expected struct, got type %s.' % (client.str, status.__class__.__name__))
# FIXME: Add something to retVal that the result was malformed?
if isinstance(status, str):
prettyResult = str(status)
else:
prettyResult = pprint.pformat(status)
else:
prettyResult = json.dumps(status, ensure_ascii=True, indent=2)
#header="<!-- Describe %s at AM URL %s -->" % (descripMsg, client.url)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, name, client.url, client.urn, "describe", ".json", numClients)
#self.logger.info("Writing result of describe for slice: %s at AM: %s to file %s", name, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved description of %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
# Only count it as success if no slivers were missing
if len(missingSlivers) == 0 and len(sliverFails.keys()) == 0:
successCnt+=1
else:
retVal += " - with %d slivers missing and %d slivers with errors. \n" % (len(missingSlivers), len(sliverFails.keys()))
# FIXME: Return the status if there was only 1 client?
if numClients > 0:
retVal += "Found description of slivers on %d of %d possible aggregates." % (successCnt, self.numOrigClients)
self.logger.debug("Describe return: \n" + json.dumps(retItem, indent=2))
return retVal, retItem
# End of describe
def createsliver(self, args):
"""AM API CreateSliver call
CreateSliver <slicename> <rspec file>
Return on success the manifest RSpec
For use in AM API v1+2 only. For AM API v3+, use allocate(), provision, and performoperationalaction().
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
-a Contact only the aggregate at the given URL, or with the given
nickname that translates to a URL in your omni_config
- Note that `--useSliceAggregates` is not honored, as the desired
aggregate usually has no resources in this slice yet.
Output directing options:
-o writes output to file instead of stdout; single file per aggregate.
-p gives filename prefix for each output file
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-manifest-localhost-8001.xml
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
--slicecredfile Read slice credential from given file, if it exists
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
omni_config users section is used to get a set of SSH keys that
should be loaded onto the remote node to allow SSH login, if the
remote resource and aggregate support this.
Note you likely want to check SliverStatus to ensure your resource
comes up.
And check the sliver expiration time: you may want to call RenewSliver.
"""
if self.opts.api_version >= 3:
if self.opts.devmode:
self.logger.warn("Trying CreateSliver with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("CreateSliver is only available in AM API v1 or v2. Use Allocate, then Provision, then PerformOperationalAction in AM API v3+, or use the -V2 option to use AM API v2 if the AM supports it.")
# check command line args
if not self.opts.aggregate or len(self.opts.aggregate) == 0:
# the user must supply an aggregate.
# FIXME: Note this check is now duplicated in _correctAPIVersion
msg = 'Missing -a argument: specify an aggregate where you want the reservation.'
# FIXME: parse the AM to reserve at from a comment in the RSpec
# Calling exit here is a bit of a hammer.
# Maybe there's a gentler way.
self._raise_omni_error(msg)
elif self.clients and len(self.clients) > 1:
self.logger.warn("Multiple clients supplied - only the first will be used. ('%s')" % self.clients[0].str)
elif not self.clients and len(self.opts.aggregate) > 1:
self.logger.warn("Multiple -a arguments received - only the first will be used. ('%s')" % self.opts.aggregate[0])
self.opts.aggregate = [self.opts.aggregate[0]]
# prints slice expiration. Warns or raises an Omni error on problems
(slicename, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 2, "CreateSliver", "and a request rspec filename")
# Load up the user's request rspec
rspecfile = None
if not (self.opts.devmode and len(args) < 2):
rspecfile = args[1]
if rspecfile is None: # FIXME: If file type arg, check the file exists: os.path.isfile(rspecfile)
#--- Dev mode should allow missing RSpec
msg = 'File of resources to request missing: %s' % rspecfile
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
try:
# read the rspec into a string, and add it to the rspecs dict
rspec = _derefRSpecNick(self, rspecfile)
except Exception, exc:
#--- Should dev mode allow this?
msg = "Unable to read rspec file '%s': %s" % (rspecfile, str(exc))
if self.opts.devmode:
rspec = ""
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
# Test if the rspec is really json containing an RSpec, and pull out the right thing
rspec = self._maybeGetRSpecFromStruct(rspec)
# FIXME: We could try to parse the RSpec right here, and get the AM URL or nickname
# out of the RSpec
(clientList, message) = self._getclients()
if (clientList is None or len(clientList) == 0):
retVal += "CreateSliver failed: No aggregates at which to make reservation"
if message != '':
retVal += ": %" % message
self._raise_omni_error(retVal)
client = clientList[0]
url = client.url
clienturn = client.urn
result = None
self.logger.info("Creating sliver(s) from rspec file %s for slice %s", rspecfile, urn)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
# Copy the user config and read the keys from the files into the structure
slice_users = self._get_users_arg(slicename)
if not slice_users or len(slice_users) == 0:
self.logger.warn("No users or SSH keys supplied; you will not be able to SSH in to any compute resources")
op = "CreateSliver"
options = self._build_options(op, slicename, None)
args = [urn, creds, rspec, slice_users]
#--- API version diff:
if self.opts.api_version >= 2:
# Add the options dict
args.append(options)
#---
msg = "Create Sliver %s at %s" % (urn, client.str)
self.logger.debug("Doing createsliver with urn %s, %d creds, rspec of length %d starting '%s...', users struct %s..., options %r", urn, len(creds), len(rspec), rspec[:min(100, len(rspec))], str(slice_users)[:min(180, len(str(slice_users)))], options)
try:
((result, message), client) = self._api_call(client, msg, op,
args)
url = client.url
client.urn = clienturn
except BadClientException as bce:
self._raise_omni_error("Cannot CreateSliver at %s: The AM speaks the wrong API version, not %d. %s" % (client.str, self.opts.api_version, bce.validMsg))
# Get the manifest RSpec out of the result (accounting for API version diffs, ABAC)
(result, message) = self._retrieve_value(result, message, self.framework)
if result:
self.logger.info("Got return from CreateSliver for slice %s at %s:", slicename, client.str)
if rspec_util.is_rspec_string( result, None, None, logger=self.logger ):
result = rspec_util.getPrettyRSpec(result)
(retVal, filename) = _writeRSpec(self.opts, self.logger, result, slicename, clienturn, url, message)
if filename:
self.logger.info("Wrote result of createsliver for slice: %s at AM: %s to file %s", slicename, client.str, filename)
retVal += '\n Saved createsliver results to %s. ' % (filename)
manExpires = None
if result and "<rspec" in result and "expires" in result:
# Try to parse the new sliver expiration from the rspec and print it in the result summary.
# Use a helper function in handler_utils that can be used elsewhere.
manExpires = expires_from_rspec(result, self.logger)
if manExpires is not None:
prstr = "Reservation at %s in slice %s expires at %s (UTC)." % (client.str, slicename, manExpires)
self.logger.info(prstr)
if not (retVal.endswith('.') or retVal.endswith('. ')):
retVal += '.'
retVal += " " + prstr
else:
self.logger.debug("Got None sliver expiration from manifest")
# record new slivers in the SA database if able to do so
try:
if not self.opts.noExtraCHCalls:
agg_urn = self._getURNForClient(client)
exp = slice_exp
if manExpires:
exp = manExpires
self.framework.create_sliver_info(result, urn,
url, exp, None, agg_urn)
else:
self.logger.debug("Per commandline option, not reporting new sliver to clearinghouse")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
# FIXME: Info only?
self.logger.warn('Error recording new slivers in SA database')
self.logger.debug(e)
# import traceback
# self.logger.debug(traceback.format_exc())
# raise e
# FIXME: When Tony revises the rspec, fix this test
if result and '<RSpec' in result and 'type="SFA"' in result:
# Figure out the login name
# We could of course do this for the user.
prstr = "Please run the omni sliverstatus call on your slice %s to determine your login name to PL resources." % slicename
self.logger.info(prstr)
if not retVal.endswith('.'):
retVal += '.'
retVal += " " + prstr
else:
prStr = "Failed CreateSliver for slice %s at %s." % (slicename, client.str)
if message is None or message.strip() == "":
message = "(no reason given)"
if message:
prStr += " %s" % message
self.logger.warn(prStr)
retVal = prStr
return retVal, result
# End of createsliver
def allocate(self, args):
"""
GENI AM API Allocate <slice name> <rspec file name>
For use with AM API v3+ only. Otherwise, use CreateSliver.
Allocate resources as described in a request RSpec argument to a slice with
the named URN. On success, one or more slivers are allocated, containing
resources satisfying the request, and assigned to the given slice.
Clients must Renew or Provision slivers before the expiration time
(given in the return struct), or the aggregate will automatically Delete them.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
- Note that `--useSliceAggregates` is not honored, as the desired
aggregate usually has no resources in this slice yet.
Note that if multiple aggregates are supplied, the same RSpec will be submitted to each.
Aggregates should ignore parts of the Rspec requesting specific non-local resources (bound requests), but each
aggregate should attempt to satisfy all unbound requests. Note also that allocate() calls
are always all-or-nothing: if the aggregate cannot give everything requested, it gives nothing.
--end-time: Request that new slivers expire at the given time.
The aggregates may allocate the resources, but not be able to grant the requested
expiration time.
Note that per the AM API expiration times will be timezone aware.
Unqualified times are assumed to be in UTC.
Note that the expiration time cannot be past your slice expiration
time (see renewslice).
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-allocate-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Basic allocation of resources at 1 AM into myslice
omni.py -V3 -a http://myaggregate/url allocate myslice my-request-rspec.xml
Allocate resources into 2 AMs, requesting a specific sliver end time, save results into specificly named files that include an AM name calculated from the AM URL,
using the slice credential saved in the given file
omni.py -V3 -a http://myaggregate/url -a http://myother/aggregate --end-time 20120909 -o --outputfile myslice-manifest-%a.json --slicecredfile mysaved-myslice-slicecred.xml allocate myslice my-request-rspec.xml
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Allocation with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Allocate is only available in AM API v3+. Use CreateSliver with AM API v%d, or specify -V3 to use AM API v3." % self.opts.api_version)
(slicename, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 2,
"Allocate",
"and a request rspec filename")
# Load up the user's request rspec
rspecfile = None
if not (self.opts.devmode and len(args) < 2):
rspecfile = args[1]
if rspecfile is None: # FIXME: If file type arg, check the file exists: os.path.isfile(rspecfile)
# Dev mode should allow missing RSpec
msg = 'File of resources to request missing: %s' % rspecfile
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
try:
# read the rspec into a string, and add it to the rspecs dict
rspec = _derefRSpecNick(self, rspecfile)
except Exception, exc:
msg = "Unable to read rspec file '%s': %s" % (rspecfile, str(exc))
if self.opts.devmode:
rspec = ""
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
# Test if the rspec is really json containing an RSpec, and
# pull out the right thing
rspec = self._maybeGetRSpecFromStruct(rspec)
# Build args
options = self._build_options('Allocate', slicename, None)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
args = [urn, creds, rspec, options]
descripMsg = "slivers in slice %s" % urn
op = 'Allocate'
self.logger.debug("Doing Allocate with urn %s, %d creds, rspec starting: \'%s...\', and options %s", urn, len(creds), rspec[:min(40, len(rspec))], options)
successCnt = 0
retItem = dict()
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients == 0:
msg = "No aggregate specified to submit allocate request to. Use the -a argument."
if self.opts.devmode:
# warn
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
elif numClients > 1:
# info - mention unbound bits will be repeated
self.logger.info("Multiple aggregates will get the same request RSpec; unbound requests will be attempted at multiple aggregates.")
# Do the command for each client
for client in clientList:
self.logger.info("Allocate %s at %s:", descripMsg, client.str)
try:
((result, message), client) = self._api_call(client,
("Allocate %s at %s" % (descripMsg, client.url)),
op,
args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nAllocate failed: " + retVal)
continue
# Make the RSpec more pretty-printed
rspec = None
if result and isinstance(result, dict) and result.has_key('value') and isinstance(result['value'], dict) and result['value'].has_key('geni_rspec'):
rspec = result['value']['geni_rspec']
if rspec and rspec_util.is_rspec_string( rspec, None, None, logger=self.logger ):
rspec = rspec_util.getPrettyRSpec(rspec)
result['value']['geni_rspec'] = rspec
else:
self.logger.debug("No valid RSpec returned!")
else:
self.logger.debug("Return struct missing geni_rspec element!")
# Pull out the result and check it
retItem[ client.url ] = result
(realresult, message) = self._retrieve_value(result, message, self.framework)
badSlivers = self._getSliverAllocStates(realresult, 'geni_allocated')
for sliver in badSlivers.keys():
self.logger.warn("Sliver %s in wrong state! Expected %s, got %s?!", sliver, 'geni_allocated', badSlivers[sliver])
# FIXME: Is the alloc reported here as a failure if some slivers in wrong state?
if realresult:
# Success (maybe partial?)
(header, rspeccontent, rVal) = _getRSpecOutput(self.logger, rspec, slicename, client.urn, client.url, message)
self.logger.debug(rVal)
if realresult and isinstance(realresult, dict) and realresult.has_key('geni_rspec') and rspec and rspeccontent:
realresult['geni_rspec'] = rspeccontent
if isinstance(realresult, dict):
# Hmm. The rspec content looks OK here. But the
# json.dumps seems to screw it up? Quotes get
# double escaped.
prettyResult = json.dumps(realresult, ensure_ascii=True, indent=2)
else:
prettyResult = pprint.pformat(realresult)
# Save out the result
# header="<!-- Allocate %s at AM URL %s -->" % (descripMsg, client.url)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, slicename, client.url, client.urn, "allocate", ".json", numClients)
#self.logger.info("Writing result of allocate for slice: %s at AM: %s to file %s", slicename, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved allocation of %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
else:
retVal += "Allocated %s at %s. \n" % (descripMsg, client.str)
# Check the new sliver expirations
(orderedDates, sliverExps) = self._getSliverExpirations(realresult)
# None case
if len(orderedDates) == 1:
self.logger.info("All slivers expire on %r", orderedDates[0].isoformat())
elif len(orderedDates) == 2:
self.logger.info("%d slivers expire on %r, the rest (%d) on %r", len(sliverExps[orderedDates[0]]), orderedDates[0].isoformat(), len(sliverExps[orderedDates[0]]), orderedDates[1].isoformat())
elif len(orderedDates) == 0:
msg = " 0 Slivers reported allocated!"
self.logger.warn(msg)
retVal += msg
else:
self.logger.info("%d slivers expire on %r, %d on %r, and others later", len(sliverExps[orderedDates[0]]), orderedDates[0].isoformat(), len(sliverExps[orderedDates[0]]), orderedDates[1].isoformat())
if len(orderedDates) > 0:
if len(orderedDates) == 1:
retVal += " All slivers expire on: %s" % orderedDates[0].isoformat()
else:
retVal += " First sliver expiration: %s" % orderedDates[0].isoformat()
self.logger.debug("Allocate %s result: %s" % (descripMsg, prettyResult))
successCnt += 1
else:
# Failure
if message is None or message.strip() == "":
message = "(no reason given)"
retVal += "Allocation of %s at %s failed: %s.\n" % (descripMsg, client.str, message)
self.logger.warn(retVal)
# FIXME: Better message?
# Done with allocate call loop over clients
if numClients == 0:
retVal += "No aggregates at which to allocate %s. %s\n" % (descripMsg, message)
elif numClients > 1:
retVal += "Allocated %s at %d out of %d aggregates.\n" % (descripMsg, successCnt, self.numOrigClients)
elif successCnt == 0:
retVal += "Allocate %s failed at %s" % (descripMsg, clientList[0].url)
self.logger.debug("Allocate Return: \n%s", json.dumps(retItem, indent=2))
return retVal, retItem
# end of allocate
def provision(self, args):
"""
GENI AM API Provision <slice name>
For use with AM API v3+ only. Otherwise, use CreateSliver.
Request that the named geni_allocated slivers be made geni_provisioned,
instantiating or otherwise realizing the resources, such that they have a
valid geni_operational_status and may possibly be made geni_ready for
experimenter use. This operation is synchronous, but may start a longer process,
such as creating and imaging a virtual machine.
Clients must Renew or use slivers before the expiration time
(given in the return struct), or the aggregate will automatically Delete them.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
- Note that `--useSliceAggregates` is not honored, as the desired
aggregate usually has no resources in this slice yet.
-t <type version>: Specify a required manifest RSpec type and version to return.
It skips any AM that doesn't advertise (in GetVersion)
that it supports that format. Default is "GENI 3".
--end-time: Request that new slivers expire at the given time.
The aggregates may provision the resources, but not be able to grant the requested
expiration time.
Note that per the AM API expiration times will be timezone aware.
Unqualified times are assumed to be in UTC.
Note that the expiration time cannot be past your slice expiration
time (see renewslice).
--sliver-urn / -u option: each specifies a sliver URN to provision. If specified,
only the listed slivers will be provisioned. Otherwise, all slivers in the slice will be provisioned.
--best-effort: If supplied, slivers that can be provisioned, will be; some slivers
may not be provisioned, in which case check the geni_error return for that sliver.
If not supplied, then if any slivers cannot be provisioned, the whole call fails
and sliver allocation states do not change.
Note that some aggregates may require provisioning all slivers in the same state at the same
time, per the geni_single_allocation GetVersion return.
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-provision-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
omni_config users section is used to get a set of SSH keys that
should be loaded onto the remote node to allow SSH login, if the
remote resource and aggregate support this.
Sample usage:
Basic provision of allocated resources at 1 AM into myslice
omni.py -V3 -a http://myaggregate/url provision myslice
Provision resources in 2 AMs, requesting a specific sliver end time, save results into specificly named files that include an AM name calculated from the AM URL,
and slice name, using the slice credential saved in the given file. Provision in best effort mode: provision as much as possible
omni.py -V3 -a http://myaggregate/url -a http://myother/aggregate --end-time 20120909 -o --outputfile %s-provision-%a.json --slicecredfile mysaved-myslice-slicecred.xml --best-effort provision myslice
Provision allocated resources in specific slivers
omni.py -V3 -a http://myaggregate/url -u urn:publicid:IDN+myam+sliver+1 -u urn:publicid:IDN+myam+sliver+2 provision myslice
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Provision with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Provision is only available in AM API v3+. Use CreateSliver with AM API v%d, or specify -V3 to use AM API v3." % self.opts.api_version)
# Build up args, options
op = "Provision"
(slicename, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 1,
op)
# Copy the user config and read the keys from the files into the structure
slice_users = self._get_users_arg(slicename)
# If there are slice_users, include that option
options = {}
if slice_users and len(slice_users) > 0:
options['geni_users'] = slice_users
else:
self.logger.warn("No users or SSH keys supplied; you will not be able to SSH in to any compute resources")
options = self._build_options(op, slicename, options)
urnsarg, slivers = self._build_urns(urn)
descripMsg = "slivers in slice %s" % urn
if len(slivers) > 0:
descripMsg = "%d slivers in slice %s" % (len(slivers), urn)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
# Get Clients
successCnt = 0
retItem = dict()
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients == 0:
msg = "No aggregate specified to submit provision request to. Use the -a argument."
if message and message.strip() != "":
msg += " " + message
if self.opts.devmode:
# warn
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
elif numClients > 1 and len(slivers) > 0:
# All slivers will go to all AMs. If not best effort, AM may fail the request if its
# not a local sliver.
# # FIXME: Could partition slivers by AM URN?
msg = "Will do %s %s at all %d AMs - some aggregates may fail the request if given slivers not from that aggregate." % (op, descripMsg, numClients)
if self.opts.geni_best_effort:
self.logger.info(msg)
else:
self.logger.warn(msg + " Consider running with --best-effort in future.")
# Loop over clients doing operation
for client in clientList:
args = [urnsarg, creds]
self.logger.info("%s %s at %s", op, descripMsg, client.str)
try:
mymessage = ""
(options, mymessage) = self._selectRSpecVersion(slicename, client, mymessage, options)
args.append(options)
self.logger.debug("Doing Provision at %s with urns %s, %d creds, options %s", client.str, urnsarg, len(creds), options)
((result, message), client) = self._api_call(client,
("Provision %s at %s" % (descripMsg, client.url)),
op,
args)
if mymessage.strip() != "":
if message is None or message.strip() == "":
message = ""
message = mymessage + ". " + message
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nProvision failed: " + retVal)
continue
# Make the RSpec more pretty-printed
if result and isinstance(result, dict) and result.has_key('value') and isinstance(result['value'], dict) and result['value'].has_key('geni_rspec'):
rspec = result['value']['geni_rspec']
if rspec and rspec_util.is_rspec_string( rspec, None, None, logger=self.logger ):
rspec = rspec_util.getPrettyRSpec(rspec)
result['value']['geni_rspec'] = rspec
else:
self.logger.debug("No valid RSpec returned!")
else:
self.logger.debug("Return struct missing geni_rspec element!")
# Pull out the result
retItem[ client.url ] = result
(realresult, message) = self._retrieve_value(result, message, self.framework)
badSlivers = self._getSliverAllocStates(realresult, 'geni_provisioned')
for sliver in badSlivers.keys():
self.logger.warn("Sliver %s in wrong state! Expected %s, got %s?!", sliver, 'geni_provisioned', badSlivers[sliver])
# FIXME: Is the alloc reported here as a failure if some slivers in wrong state?
if realresult:
# Success
missingSlivers = self._findMissingSlivers(realresult, slivers)
if len(missingSlivers) > 0:
self.logger.warn("%d slivers from request missing in result?!", len(missingSlivers))
self.logger.debug("Slivers requested missing in result: %s", missingSlivers)
sliverFails = self._didSliversFail(realresult)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
# record new slivers in SA database if possible
try:
if not self.opts.noExtraCHCalls:
agg_urn = self._getURNForClient(client)
# Get the slivers actually returned
ret_slivers = self._getSliverResultList(realresult)
self.framework.create_sliver_info(None, urn,
client.url,
slice_exp,
ret_slivers, agg_urn)
else:
self.logger.debug("Per commandline option, not reporting new sliver(s) to clearinghouse")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
# FIXME: Info only?
self.logger.warn('Error recording new slivers in SA database')
self.logger.debug(e)
# Print out the result
if isinstance(realresult, dict):
prettyResult = json.dumps(realresult, ensure_ascii=True, indent=2)
else:
prettyResult = pprint.pformat(realresult)
header="<!-- Provision %s at AM %s -->" % (descripMsg, client.str)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, slicename, client.url, client.urn, "provision", ".json", numClients)
#self.logger.info("Writing result of provision for slice: %s at AM: %s to file %s", name, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved provision of %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
else:
retVal += "Provisioned %s at %s. \n" % (descripMsg, client.str)
if len(missingSlivers) > 0:
retVal += " - but with %d slivers from request missing in result?! \n" % len(missingSlivers)
if len(sliverFails.keys()) > 0:
retVal += " = but with %d slivers reporting errors. \n" % len(sliverFails.keys())
# Check sliver expiration
(orderedDates, sliverExps) = self._getSliverExpirations(realresult)
# None case
if len(orderedDates) == 1:
self.logger.info("All slivers expire on %r", orderedDates[0].isoformat())
elif len(orderedDates) == 2:
self.logger.info("%d slivers expire on %r, the rest (%d) on %r", len(sliverExps[orderedDates[0]]), orderedDates[0].isoformat(), len(sliverExps[orderedDates[0]]), orderedDates[1].isoformat())
elif len(orderedDates) == 0:
msg = " 0 Slivers reported results!"
self.logger.warn(msg)
retVal += msg
else:
self.logger.info("%d slivers expire on %r, %d on %r, and others later", len(sliverExps[orderedDates[0]]), orderedDates[0].isoformat(), len(sliverExps[orderedDates[0]]), orderedDates[1].isoformat())
if len(orderedDates) > 0:
if len(orderedDates) == 1:
retVal += " All slivers expire on: %s" % orderedDates[0].isoformat()
else:
retVal += " First sliver expiration: %s" % orderedDates[0].isoformat()
self.logger.debug("Provision %s result: %s" % (descripMsg, prettyResult))
if len(missingSlivers) == 0 and len(sliverFails.keys()) == 0:
successCnt += 1
else:
# Failure
if message is None or message.strip() == "":
message = "(no reason given)"
retVal = "Provision of %s at %s failed: %s" % (descripMsg, client.str, message)
self.logger.warn(retVal)
retVal += "\n"
# Done loop over clients
if numClients == 0:
retVal += "No aggregates at which to provision %s. %s\n" % (descripMsg, message)
elif numClients > 1:
retVal += "Provisioned %s at %d out of %d aggregates.\n" % (descripMsg, successCnt, self.numOrigClients)
elif successCnt == 0:
retVal += "Provision %s failed at %s" % (descripMsg, clientList[0].url)
self.logger.debug("Provision Return: \n%s", json.dumps(retItem, indent=2))
return retVal, retItem
# end of provision
def performoperationalaction(self, args):
""" Alias of "poa" which is an implementation of v3 PerformOperationalAction.
"""
return self.poa( args )
def poa(self, args):
"""
GENI AM API PerformOperationalAction <slice name> <action name>
For use with AM API v3+ only. Otherwise, use CreateSliver.
Perform the named operational action on the named slivers or slice, possibly changing
the geni_operational_status of the named slivers. E.G. 'start' a VM. For valid
operations and expected states, consult the state diagram advertised in the
aggregate's advertisement RSpec.
Common `poa` Actions:
Some actions are well known and supported at many aggregates and
resource types. Always check the Ad RSpec for an aggregate to verify
what is supported.
- `geni_start`: Make the resources ready for use (like booting
machines). No options needed
- `geni_restart`: For example, reboot a machine. No options required.
- `geni_stop`: Stop a resource (e.g. shut it down). No options
needed.
- `geni_update_users`: Refresh the set of user accounts and installed
SSH keys on the resource. Takes the option `geni_users`. This action
creates any users specified that do not already exist, and sets the
SSH keys for all users per the list of keys specified - including
removing keys not explicitly listed. The `geni_users` option can be
supplied using the `--optionsfile` argument. If not supplied that
way, then users are read from the omni_config or clearinghouse slice
members, as documented under `createsliver`.
Clients must Renew or use slivers before the expiration time
(given in the return struct), or the aggregate will automatically Delete them.
--sliver-urn / -u option: each specifies a sliver URN on which to perform the given action. If specified,
only the listed slivers will be acted on. Otherwise, all slivers in the slice will be acted on.
Note though that actions are state and resource type specific, so the action may not apply everywhere.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
--slicecredfile Read slice credential from given file, if it exists
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
--best-effort: If supplied, slivers that can be acted on, will be; some slivers
may not be acted on successfully, in which case check the geni_error return for that sliver.
If not supplied, then if any slivers cannot be changed, the whole call fails
and sliver states do not change.
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-poa-geni_start-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Do geni_start on slivers in myslice
omni.py -V3 -a http://myaggregate poa myslice geni_start
Do geni_start on 2 slivers in myslice, but continue if 1 fails, and save results to the named file
omni.py -V3 -a http://myaggregate poa --best-effort -o --outputfile %s-start-%a.json -u urn:publicid:IDN+myam+sliver+1 -u urn:publicid:IDN+myam+sliver+2 myslice geni_start
"""
if self.opts.api_version < 3:
if self.opts.api_version == 2:
self.logger.info("Running PerformOperationalAction even though you are using AM API v2 - will fail at most AMs.")
elif self.opts.devmode:
self.logger.warn("Trying PerformOperationalAction with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("PerformOperationalAction is only available in AM API v3+. Use CreateSliver with AM API v%d, or specify -V3 to use AM API v3." % self.opts.api_version)
# Build up args, options
op = "PerformOperationalAction"
(slicename, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 2,
op,
"and an action to perform")
action = args[1]
if action is None or action.strip() == "":
if self.opts.devmode:
action = ""
self.logger.warn("poa: No action specified....")
else:
self._raise_omni_error("PerformOperationalAction requires an arg of the name of the action to perform")
# check common action typos
# FIXME: Auto correct?
if not self.opts.devmode:
if action.lower() == "start":
self.logger.warn("Action: '%s'. Did you mean 'geni_start'?" % action)
elif action.lower() == "stop":
self.logger.warn("Action: '%s'. Did you mean 'geni_stop'?" % action)
elif action.lower() == "restart":
self.logger.warn("Action: '%s'. Did you mean 'geni_restart'?" % action)
options = self._build_options(op, slicename, None)
# If the action is geni_update_users and we got no geni_users option yet, then call _get_users_arg.
# If we did get a geni_users, then we use that.
# _get_users_arg will check slice members and the omni config (per options)
if action.lower() == 'geni_update_users':
if options and options.has_key('geni_users'):
self.logger.debug("Got geni_users option from optionsfile")
else:
if not options:
options = {}
users = self._get_users_arg(slicename)
if users and len(users) > 0:
options['geni_users'] = users
else:
self.logger.info("No users or keys supplied for geni_update_users")
urnsarg, slivers = self._build_urns(urn)
descripMsg = "%s on slivers in slice %s" % (action, urn)
if len(slivers) > 0:
descripMsg = "%s on %d slivers in slice %s" % (action, len(slivers), urn)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
args = [urnsarg, creds, action, options]
self.logger.debug("Doing POA with urns %s, action %s, %d creds, and options %s", urnsarg, action, len(creds), options)
# Get clients
successCnt = 0
retItem = dict()
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients == 0:
msg = "No aggregate specified to submit %s request to. Use the -a argument." % op
if self.opts.devmode:
# warn
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
elif numClients > 1 and len(slivers) > 0:
# All slivers will go to all AMs. If not best effort, AM may fail the request if its
# not a local sliver.
# # FIXME: Could partition slivers by AM URN?
msg = "Will do %s %s at all %d AMs - some aggregates may fail the request if given slivers not from that aggregate." % (op, descripMsg, numClients)
if self.opts.geni_best_effort:
self.logger.info(msg)
else:
self.logger.warn(msg + " Consider running with --best-effort in future.")
# Do poa action on each client
for client in clientList:
self.logger.info("%s %s at %s", op, descripMsg, client.str)
try:
((result, message), client) = self._api_call(client,
("PerformOperationalAction %s at %s" % (descripMsg, client.url)),
op,
args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nPerformOperationalAction failed: " + retVal)
continue
retItem[ client.url ] = result
(realresult, message) = self._retrieve_value(result, message, self.framework)
if realresult is None:
# Failure
if message is None or message.strip() == "":
message = "(no reason given)"
msg = "PerformOperationalAction %s at %s failed: %s \n" % (descripMsg, client.str, message)
retVal += msg
self.logger.warn(msg)
else:
# Success
missingSlivers = self._findMissingSlivers(realresult, slivers)
if len(missingSlivers) > 0:
self.logger.warn("%d slivers from request missing in result?!", len(missingSlivers))
self.logger.debug("%s", missingSlivers)
sliverFails = self._didSliversFail(realresult)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
# Save result
ftype = ".json"
if isinstance(realresult, dict):
prettyResult = json.dumps(realresult, ensure_ascii=True, indent=2)
# Some POAs return a top level geni_credential
# Save it off separately for convenience
if realresult.has_key('geni_credential'):
cred = realresult['geni_credential'].replace("\\n", "\n")
fname = _maybe_save_slicecred(self, slicename + '-sharedlan', cred)
if fname is not None:
prstr = "Saved shared LAN credential to file '%s'" % fname
retVal += prstr + "\n"
self.logger.info(prstr)
else:
ftype = ".txt"
prettyResult = pprint.pformat(realresult)
# Some POAs return a credential per sliver
# Save those as separate files for readability
if isinstance(realresult, list):
for sliver in realresult:
sliverurn = ''
cred = None
if isinstance(sliver, dict):
if sliver.has_key('geni_sliver_urn'):
sliverurn = sliver['geni_sliver_urn']
if sliver.has_key('geni_credential'):
cred = sliver['geni_credential'].replace("\\n", "\n")
if cred is not None:
fname = _maybe_save_slicecred(self, slicename + '-' + sliverurn + '-sharedlan', cred)
if fname is not None:
prstr = "Saved shared LAN %s credential to file '%s'" % (sliverurn, fname)
retVal += prstr + "\n"
self.logger.info(prstr)
header="PerformOperationalAction result for %s at AM %s:" % (descripMsg, client.str)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, slicename, client.url, client.urn, "poa-" + action, ftype, numClients)
#self.logger.info("Writing result of poa %s at AM: %s to file %s", descripMsg, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
retVal += "PerformOperationalAction %s was successful." % descripMsg
if len(missingSlivers) > 0:
retVal += " - with %d missing slivers?!" % len(missingSlivers)
if len(sliverFails.keys()) > 0:
retVal += " - with %d slivers reporting errors!" % len(sliverFails.keys())
if filename:
retVal += " Saved results at AM %s to file %s. \n" % (client.str, filename)
elif len(prettyResult) < 120:
retVal += ' ' + prettyResult + '\n'
else:
retVal += ' \n'
if len(missingSlivers) == 0 and len(sliverFails.keys()) == 0:
successCnt += 1
# Done loop over clients
self.logger.debug("POA %s result: %s", descripMsg, json.dumps(retItem, indent=2))
if numClients == 0:
retVal += "No aggregates at which to PerformOperationalAction %s. %s\n" % (descripMsg, message)
elif numClients > 1:
retVal += "Performed Operational Action %s at %d out of %d aggregates.\n" % (descripMsg, successCnt, self.numOrigClients)
elif successCnt == 0:
retVal += "PerformOperationalAction %s failed at %s" % (descripMsg, clientList[0].url)
return retVal, retItem
# end of poa
def renewsliver(self, args):
"""AM API RenewSliver <slicename> <new expiration time in UTC
or with a timezone>
For use in AM API v1&2. Use renew() in AM API v3+.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
Note that per the AM API expiration times will be timezone aware.
Unqualified times are assumed to be in UTC.
Note that the expiration time cannot be past your slice expiration
time (see renewslice). Some aggregates will
not allow you to _shorten_ your sliver expiration time.
Times are in UTC or supply an explicit timezone, and
should be quoted if they contain spaces or forward slashes.
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
--alap: Renew slivers as long as possible (up to the slice
expiration / time requested). Default is False - either renew
to the requested time, or fail.
"""
if self.opts.api_version >= 3:
if self.opts.devmode:
self.logger.warn("Trying RenewSliver with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("RenewSliver is only available in AM API v1 or v2. Use Renew, or specify the -V2 option to use AM API v2, if the AM supports it.")
# Gather arguments, options
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 2, "RenewSliver", "and new expiration time in UTC")
if len(args) >= 2:
ds = args[1]
else:
ds = None
# noSec=True so that fractional seconds are dropped
(time, time_with_tz, time_string) = self._datetimeFromString(ds, slice_exp, name, noSec=True)
self.logger.info('Renewing Sliver %s until %s (UTC)' % (name, time_with_tz))
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
op = "RenewSliver"
options = self._build_options(op, name, None)
args = [urn, creds, time_string]
#--- AM API version specific
if self.opts.api_version >= 2:
# Add the options dict
args.append(options)
self.logger.debug("Doing renewsliver with urn %s, %d creds, time %s, options %r", urn, len(creds), time_string, options)
# Run renew at each client
successCnt = 0
successList = []
failList = []
(clientList, message) = self._getclients()
numClients = len(clientList)
msg = "Renew Sliver %s on " % (urn)
for client in clientList:
try:
((res, message), client) = self._api_call(client,
msg + str(client.url),
op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nRenewSliver failed: " + retVal)
continue
outputstr = None
if self.opts.alap:
# Get the output from the res - it will have the new
# sliver expiration
if isinstance(res, dict) and res.has_key('output') and res['output'] is not None and str(res['output']).strip() != "":
outputstr = str(res['output']).strip()
# Get the boolean result out of the result (accounting for API version diffs, ABAC)
(res, message) = self._retrieve_value(res, message, self.framework)
if not res:
prStr = "Failed to renew sliver %s at %s (got result '%s')" % (urn, (client.str if client.nick else client.urn), res)
if message != "":
if not prStr.endswith('.'):
prStr += '.'
prStr += " " + message
else:
prStr += " (no reason given)"
if numClients == 1:
retVal += prStr + "\n"
self.logger.warn(prStr)
failList.append( client.url )
else:
newExp = time_with_tz.isoformat()
gotALAP = False
if self.opts.alap:
if not outputstr or outputstr.strip() == "":
self.logger.info("Querying AM for actual sliver expiration...")
# Call sliverstatus
# If result haskey 'pg_expires' then make that
# outputstr
# elif haskey geni_resources and that haskey
# orca_expires then make that outputstr
# use same creds but diff args & options
try:
args2 = [urn, creds]
options2 = self._build_options('SliverStatus', name, None)
# API version specific
if self.opts.api_version >= 2:
# Add the options dict
args2.append(options2)
message2 = ""
status = None
((status, message2), client2) = self._api_call(client,
'SliverStatus of %s at %s' % (urn, str(client.url)),
'SliverStatus', args2)
# Get the dict status out of the result (accounting for API version diffs, ABAC)
(status, message1) = self._retrieve_value(status, message2, self.framework)
exps = expires_from_status(status, self.logger)
if len(exps) > 1:
# More than 1 distinct sliver expiration found
# FIXME: Sort and take first?
exps = exps.sort()
self.logger.debug("Found %d different expiration times. Using first", len(exps))
outputstr = exps[0].isoformat()
elif len(exps) == 0:
self.logger.debug("Failed to parse a sliver expiration from status")
else:
outputstr = exps[0].isoformat()
except Exception, e:
self.logger.debug("Failed SliverStatus to get real expiration: %s", e)
if outputstr:
try:
newExpO = dateutil.parser.parse(str(outputstr), tzinfos=tzd)
newExpO = naiveUTC(newExpO)
newExpO_tz = newExpO.replace(tzinfo=dateutil.tz.tzutc())
newExp = newExpO_tz.isoformat()
if abs(time - newExpO) > datetime.timedelta.resolution:
gotALAP = True
self.logger.debug("Got new sliver expiration from output field. Orig %s != new %s", time, newExpO)
except:
self.logger.debug("Failed to parse a time from the RenewSliver output - assume got requested time. Output: '%s'", outputstr)
else:
self.logger.debug("Could not determine actual sliver expiration after renew alap")
prStr = "Renewed sliver %s at %s until %s (UTC)" % (urn, (client.str if client.nick else client.urn), newExp)
if gotALAP:
prStr = prStr + " (not requested %s UTC), which was as long as possible for this AM" % time_with_tz.isoformat()
elif self.opts.alap and not outputstr:
prStr = prStr + " (or as long as possible at this AM)"
self.logger.info(prStr)
if not self.opts.noExtraCHCalls:
try:
agg_urn = self._getURNForClient(client)
if urn_util.is_valid_urn(agg_urn):
sliver_urns = self.framework.list_sliverinfo_urns(urn, agg_urn)
# We only get here if the framework implements list_sliverinfo_urns
if not sliver_urns:
sliver_urns = []
# Use sliverstatus to augment the list of slivers in this slice at this AM
# This way we catch slivers that were never recorded.
# Only do this if we have 0 slivers, to limit times we incur the expense of
# an extra AM API call.
if len(sliver_urns) == 0:
st = None
streal = None
try:
args2 = [urn, creds]
ops = self._build_options('SliverStatus', name, None)
args2.append(ops)
((st, m), c) = self._api_call(client,
"Sliverstatus of %s at %s" % (urn, agg_urn),
'SliverStatus', args2)
(streal, m2) = self._retrieve_value(st, m, self.framework)
#self.logger.debug("Got st %s", streal)
except Exception, e:
self.logger.debug("Failed Sliverstatus to list slivers after renew of %s at %s: %s", urn, agg_urn, e)
if streal and isinstance(streal, dict) and streal.has_key('geni_resources'):
for s in streal['geni_resources']:
#self.logger.debug("Got s %s", s)
if s.has_key('geni_urn') and urn_util.is_valid_urn_bytype(s['geni_urn'], 'sliver'):
slice_auth = slice_urn[0 : slice_urn.find('slice+')]
surn = s['geni_urn']
if not surn in sliver_urns:
sliver_urns.append(surn)
elif s.has_key('geni_urn'):
surn = s['geni_urn']
if surn is None:
surn = ""
surn = surn.strip()
if surn.startswith(urn) and agg_urn is not None and agg_urn != "" and ("foam" in agg_urn or "al2s" in agg_urn):
# Work around a FOAM/AL2S bug producing bad sliver URNs
# See http://groups.geni.net/geni/ticket/1294
if not surn in sliver_urns:
sliver_urns.append(surn)
self.logger.debug("Malformed sliver URN '%s'. Assuming this is OK anyhow at this FOAM based am: %s. See http://groups.geni.net/geni/ticket/1294", surn, agg_urn)
# End of loop over status return elems
for sliver_urn in sliver_urns:
self.framework.update_sliver_info(agg_urn, urn, sliver_urn,
newExp)
else:
self.logger.info("Not updating recorded sliver expirations - no valid AM URN known")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
# FIXME: Only info?
self.logger.warn('Error updating sliver record in SA database')
self.logger.debug(e)
import traceback
self.logger.debug(traceback.format_exc())
else:
self.logger.debug("Per commandline option, not updating sliver info record at clearinghouse")
if numClients == 1:
retVal += prStr + "\n"
successCnt += 1
successList.append( client.url )
if numClients == 0:
retVal += "No aggregates on which to renew slivers for slice %s. %s\n" % (urn, message)
elif numClients > 1:
if self.opts.alap:
# FIXME: Say more about where / how long it was renewed?
retVal += "Renewed slivers on %d out of %d aggregates for slice %s until %s (UTC) or as long as possible\n" % (successCnt, self.numOrigClients, urn, time_with_tz)
else:
retVal += "Renewed slivers on %d out of %d aggregates for slice %s until %s (UTC)\n" % (successCnt, self.numOrigClients, urn, time_with_tz)
return retVal, (successList, failList)
# End of renewsliver
def renew(self, args):
"""AM API Renew <slicename> <new expiration time in UTC
or with a timezone>
For use with AM API v3+. Use RenewSliver() in AM API v1&2.
This command will renew your resources at each aggregate up to the
specified time. This time must be less than or equal to the time
available to the slice (see `print_slice_expiration` and
`renewslice`). Times are in UTC or supply an explicit timezone, and
should be quoted if they contain spaces or forward slashes.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
Note that per the AM API expiration times will be timezone aware.
Unqualified times are assumed to be in UTC.
Note that the expiration time cannot be past your slice expiration
time (see renewslice). Some aggregates will
not allow you to _shorten_ your sliver expiration time.
--sliver-urn / -u option: each specifies a sliver URN to renew. If specified,
only the listed slivers will be renewed. Otherwise, all slivers in the slice will be renewed.
--best-effort: If supplied, slivers that can be renewed, will be; some slivers
may not be renewed, in which case check the geni_error return for that sliver.
If not supplied, then if any slivers cannot be renewed, the whole call fails
and sliver expiration times do not change.
When renewing multiple slivers, note that slivers in the geni_allocated state are treated
differently than slivers in the geni_provisioned state, and typically are restricted
to shorter expiration times. Users are recommended to supply the geni_best_effort option,
and to consider operating on only slivers in the same state.
Note that some aggregates may require renewing all slivers in the same state at the same
time, per the geni_single_allocation GetVersion return.
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-renew-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
--alap: Renew slivers as long as possible (up to the slice
expiration / time requested). Default is False - either renew
to the requested time, or fail.
Sample usage:
Renew slivers in slice myslice to the given time; fail the call if all slivers cannot be renewed to this time
omni.py -V3 -a http://myaggregate/url renew myslice 20120909
Renew slivers in slice myslice to the given time; any slivers that cannot be renewed to this time, stay as they were, while others are renewed
omni.py -V3 -a http://myaggregate/url --best-effort renew myslice "2012/09/09 12:00"
Renew the given sliver in myslice at this AM to the given time and write the result struct to the given file
omni.py -V3 -a http://myaggregate/url -o --outputfile %s-renew-%a.json -u urn:publicid:IDN+myam+sliver+1 renew myslice 20120909
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Renew with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Renew is only available in AM API v3+. Use RenewSliver with AM API v%d, or specify -V3 to use AM API v3." % self.opts.api_version)
# Gather options,args
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred,
retVal, slice_exp) = self._args_to_slicecred(args, 2,
"Renew",
"and new expiration time in UTC")
time = datetime.datetime.max
if len(args) >= 2:
ds = args[1]
else:
ds = None
# noSec=True so that fractional seconds are dropped
(time, time_with_tz, time_string) = self._datetimeFromString(ds, slice_exp, name, noSec=True)
self.logger.info('Renewing Slivers in slice %s until %s (UTC)' % (name, time_with_tz))
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
urnsarg, slivers = self._build_urns(urn)
descripMsg = "slivers in slice %s" % urn
if len(slivers) > 0:
descripMsg = "%d slivers in slice %s" % (len(slivers), urn)
op = 'Renew'
args = [urnsarg, creds, time_string]
# Add the options dict
options = self._build_options(op, name, None)
args.append(options)
self.logger.debug("Doing renew with urns %s, %d creds, time %s, options %r", urnsarg, len(creds), time_string, options)
# Call renew at each client
successCnt = 0
(clientList, message) = self._getclients()
numClients = len(clientList)
retItem = dict()
msg = "Renew %s at " % (descripMsg)
for client in clientList:
try:
((res, message), client) = self._api_call(client, msg + client.url, op,
args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nRenew failed: " + retVal)
continue
retItem[client.url] = res
# Get the boolean result out of the result (accounting for API version diffs, ABAC)
(res, message) = self._retrieve_value(res, message, self.framework)
if res is None:
prStr = "Failed to renew %s at %s" % (descripMsg, (client.str if client.nick else client.urn))
if message != "":
prStr += ": " + message
else:
prStr += " (no reason given)"
if numClients == 1:
retVal += prStr + "\n"
self.logger.warn(prStr)
else:
prStr = "Renewed %s at %s until %s (UTC)" % (descripMsg, (client.str if client.nick else client.urn), time_with_tz.isoformat())
self.logger.info(prStr)
# Look inside return. Did all slivers we asked about report results?
# For each that did, did any fail?
missingSlivers = self._findMissingSlivers(res, slivers)
if len(missingSlivers) > 0:
msg = " - but %d slivers from request missing in result?!" % len(missingSlivers)
self.logger.warn(msg)
self.logger.debug("%s", missingSlivers)
prStr += msg
sliverFails = self._didSliversFail(res)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
if len(sliverFails.keys()) > 0:
prStr += " - with %d slivers reporting errors!" % len(sliverFails.key())
(orderedDates, sliverExps) = self._getSliverExpirations(res, time)
if len(orderedDates) == 1 and orderedDates[0] == time:
self.logger.info("All slivers expire as requested on %r", time_with_tz.isoformat())
elif len(orderedDates) == 1:
self.logger.warn("Slivers expire on %r, not as requested %r", orderedDates[0].isoformat(), time_with_tz.isoformat())
# self.logger.warn("timedelta: %r", time - orderedDates[0])
elif len(orderedDates) == 0:
msg = " 0 Slivers reported results!"
self.logger.warn(msg)
retVal += msg
else:
firstTime = None
firstCount = 0
if sliverExps.has_key(time):
expectedCount = sliverExps[time]
else:
expectedCount = 0
for time in orderedDates:
if time == requestedExpiration or time - requestedExpiration < datetime.timedelta.resolution:
continue
firstTime = time
firstCount = len(sliverExps[time])
break
self.logger.warn("Slivers do not all expire as requested: %d as requested (%r), but %d expire on %r, and others at %d other times", expectedCount, time_with_tz.isoformat(), firstCount, firstTime.isoformat(), len(orderedDates) - 2)
if not self.opts.noExtraCHCalls:
# record results in SA database
try:
agg_urn = self._getURNForClient(client)
slivers = self._getSliverResultList(res)
for sliver in slivers:
if isinstance(sliver, dict) and \
sliver.has_key('geni_sliver_urn') and \
sliver.has_key('geni_expires'):
# Exclude slivers with
# geni_allocation_status of geni_allocated - they
# are not yet in the DB
if sliver.has_key('geni_allocation_status') and \
sliver['geni_allocation_status'] == 'geni_allocated':
self.logger.debug("Not recording updated sliver that is only allocated: %s", sliver)
continue
# FIXME: Exclude slivers in sliverFails (had errors)?
if sliver['geni_sliver_urn'] in sliverFails.keys():
self.logger.debug("Not recording sliver that had renew error: %s", sliver)
continue
self.framework.update_sliver_info \
(agg_urn, urn, sliver['geni_sliver_urn'], sliver['geni_expires'])
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
# FIXME: Info only?
self.logger.warn('Error updating sliver record in SA database')
self.logger.debug(e)
else:
self.logger.debug("Per commandline option, not updating sliver record at clearinghouse")
# Save results
if isinstance(res, dict):
prettyResult = json.dumps(res, ensure_ascii=True, indent=2)
else:
prettyResult = pprint.pformat(res)
header="Renewed %s at AM %s" % (descripMsg, client.str)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, name, client.url, client.urn, "renewal", ".json", numClients)
#self.logger.info("Writing result of renew for slice: %s at AM: %s to file %s", name, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved renewal on %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
if numClients == 1:
retVal += prStr + "\n"
if len(sliverFails.keys()) == 0 and len(missingSlivers) == 0:
successCnt += 1
# End of loop over clients
if numClients == 0:
retVal += "No aggregates on which to renew slivers for slice %s. %s\n" % (urn, message)
elif numClients > 1:
retVal += "Renewed slivers on %d out of %d aggregates for slice %s until %s (UTC)\n" % (successCnt, self.numOrigClients, urn, time_with_tz)
self.logger.debug("Renew Return: \n%s", json.dumps(retItem, indent=2))
return retVal, retItem
# End of renew
def sliverstatus(self, args):
"""AM API SliverStatus <slice name>
For use in AM API v1&2; use status() in API v3+.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-status-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
"""
if self.opts.api_version >= 3:
if self.opts.devmode:
self.logger.warn("Trying SliverStatus with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("SliverStatus is only available in AM API v1 or v2. Use Status, or specify the -V2 option to use AM API v2, if the AM supports it.")
# Build up args, options
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 1, "SliverStatus")
successCnt = 0
retItem = {}
args = []
creds = []
op = 'SliverStatus'
# Query status at each client
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients > 0:
self.logger.info('Status of Slice %s:' % urn)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
args = [urn, creds]
options = self._build_options(op, name, None)
# API version specific
if self.opts.api_version >= 2:
# Add the options dict
args.append(options)
self.logger.debug("Doing sliverstatus with urn %s, %d creds, options %r", urn, len(creds), options)
else:
prstr = "No aggregates available to get slice status at: %s" % message
retVal += prstr + "\n"
self.logger.warn(prstr)
msg = "%s of %s at " % (op, urn)
# Call SliverStatus on each client
for client in clientList:
try:
((rawstatus, message), client) = self._api_call(client,
msg + str(client.url),
op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nSliverStatus failed: " + retVal)
continue
rawResult = rawstatus
amapiError = None
status = None
try:
# Get the dict status out of the result (accounting for API version diffs, ABAC)
(status, message) = self._retrieve_value(rawstatus, message, self.framework)
except AMAPIError, amapiError:
# Would raise an AMAPIError.
# But that loses the side-effect of deleting any sliverinfo records.
# So if we're doing those, hold odd on raising the error
if self.opts.noExtraCHCalls:
raise amapiError
else:
self.logger.debug("Got AMAPIError retrieving value from sliverstatus. Hold it until we do any sliver info processing")
if status:
if not isinstance(status, dict):
# malformed sliverstatus return
self.logger.warn('Malformed sliver status from AM %s. Expected struct, got type %s.' % (client.str, status.__class__.__name__))
# FIXME: Add something to retVal that the result was malformed?
if isinstance(status, str):
prettyResult = str(status)
else:
prettyResult = pprint.pformat(status)
else:
try:
prettyResult = json.dumps(status, ensure_ascii=True, indent=2)
except Exception, jde:
self.logger.debug("Failed to parse status as JSON: %s", jde)
prettyResult = pprint.pformat(status)
if status.has_key('geni_status'):
msg = "Slice %s at AM %s has overall SliverStatus: %s"% (name, client.str, status['geni_status'])
self.logger.info(msg)
retVal += msg + ".\n "
# FIXME: Do this even if many AMs?
exps = expires_from_status(status, self.logger)
if len(exps) > 1:
# More than 1 distinct sliver expiration found
# FIXME: Sort and take first?
exps = exps.sort()
outputstr = exps[0].isoformat()
msg = "Resources in slice %s at AM %s expire at %d different times. First expiration is %s UTC" % (name, client.str, len(exps), outputstr)
elif len(exps) == 0:
self.logger.debug("Failed to parse a sliver expiration from status")
msg = None
else:
outputstr = exps[0].isoformat()
msg = "Resources in slice %s at AM %s expire at %s UTC" % (name, client.str, outputstr)
if msg:
self.logger.info(msg)
retVal += msg + ".\n "
# #634: Get the sliverinfo
# Then sync these up: create an entry if there isn't one, or update it with the correct expiration
if not self.opts.noExtraCHCalls:
try:
# Get the Agg URN for this client
agg_urn = self._getURNForClient(client)
self.logger.debug("Syncing sliver_info records with CH....")
if urn_util.is_valid_urn(agg_urn):
# Extract sliver_urn / expiration pairs from sliverstatus
# But this is messy. An AM might report a sliver in the top level geni_urn.
# Or it might report multiple geni_resources, and the URN in each geni_urn might be the slivers.
# For PG and GRAM and EG, look for geni_urn under geni_resources
# At DCN, the geni_urn under geni_resources is what I want, although the URN type says 'slice'
poss_slivers = []
if status.has_key('geni_resources'):
for resource in status['geni_resources']:
if resource and isinstance(resource, dict) and resource.has_key('geni_urn'):
gurn = resource['geni_urn']
if urn_util.is_valid_urn(gurn):
poss_slivers.append(gurn.strip())
# self.logger.debug("AM poss_slivers: %s", str(poss_slivers))
# Grab the first expiration. In APIv2 that's the only real one.
if isinstance(exps, list):
if len(exps) > 0:
expI = exps[0]
else:
expI = None
else:
expI = exps
# I'd like to be able to tell the SA to delete all slivers registered for
# this slice/AM, but the API says sliver_urn is required
slivers_by_am = self.framework.list_sliver_infos_for_slice(urn)
if slivers_by_am is None or not slivers_by_am.has_key(agg_urn):
# CH has no slivers. So all slivers the AM reported must be sent to the CH
# FIXME: status should be a list of structs which each has a geni_urn or geni_sliver_urn
# So it could be status['geni_resources']. Mostly I think that works.
s_es = []
if status.has_key('geni_resources'):
s_es = status['geni_resources']
self.logger.debug("CH listed 0 sliver_info records, so creating them all from status info")
# Create an entry
self.framework.create_sliver_info(None, urn,
client.url,
expI,
s_es, agg_urn)
else:
# No struct of slivers to report
pass
else:
# Need to reconcile the CH list and the AM list
ch_slivers = slivers_by_am[agg_urn]
self.logger.debug("Reconciling %d CH sliver infos against %d AM reported slivers", len(ch_slivers.keys()), len(poss_slivers))
# For each CH sliver, if not in poss_slivers, then remove it
# Else if expirations differ, update it
for sliver in ch_slivers.keys():
chexpo = None
if ch_slivers[sliver].has_key('SLIVER_INFO_EXPIRATION'):
chexp = ch_slivers[sliver]['SLIVER_INFO_EXPIRATION']
chexpo = naiveUTC(dateutil.parser.parse(chexp, tzinfos=tzd))
if sliver not in poss_slivers:
self.logger.debug("CH lists sliver '%s' that is not in AM list; delete", sliver)
# CH reported a sliver not reported by the AM. Delete it
self.framework.delete_sliver_info(sliver)
else:
if chexpo is None or (expI is not None and abs(chexpo - expI) > datetime.timedelta.resolution):
self.logger.debug("CH sliver %s expiration %s != AM exp %s; update at CH", sliver, str(chexpo), str(expI))
# update the recorded expiration time to be accurate
self.framework.update_sliver_info(agg_urn, urn, sliver,
expI)
else:
# CH has what we have
# self.logger.debug("CH agrees about expiration of %s: %s", sliver, expI)
pass
# Then for each AM sliver, if not in ch_slivers, add it
sliver_statusstruct = []
for amsliver in poss_slivers:
if amsliver not in ch_slivers.keys():
self.logger.debug("AM lists sliver %s not reported by CH", amsliver)
# AM reported a sliver not reported by the CH
if status.has_key('geni_resources'):
s_es = status['geni_resources']
for resource in status['geni_resources']:
if resource and isinstance(resource, dict) and resource.has_key('geni_urn'):
gurn = resource['geni_urn']
if gurn.strip() == amsliver:
sliver_statusstruct.append(resource)
break
if len(sliver_statusstruct) > 0:
self.logger.debug("Creating %s sliver records at CH", len(sliver_statusstruct))
# Create an entry for each sliver that was missing
self.framework.create_sliver_info(None, urn,
client.url,
expI,
sliver_statusstruct, agg_urn)
# End of else block to reconcile CH vs AM sliver lists
else:
self.logger.debug("Not syncing slivers with CH - no valid AM URN known")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
# FIXME: info only?
self.logger.warn('Error syncing slivers with SA database')
self.logger.debug(e)
else:
self.logger.debug("Per commandline option, not syncing slivers with clearinghouse")
# End of block to sync sliver_info with CH
# End of block to handle status is a dict
# Save/print out result
header="Sliver status for Slice %s at AM %s" % (urn, client.str)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, name, client.url, client.urn, "sliverstatus", ".json", numClients)
#self.logger.info("Writing result of sliverstatus for slice: %s at AM: %s to file %s", name, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved sliverstatus on %s at AM %s to file %s. \n" % (name, client.str, filename)
retItem[ client.url ] = status
successCnt+=1
else:
# #634:
# delete any sliver_infos for this am/slice
# However, not all errors mean there are no slivers here.
# Based on testing 8/2014, all AMs return code 2 or code 12 if there are no slivers here
# so that it's safe to delete any sliver_info records.
# Use code 15 too as that seems reasonable.
# SEARCHFAILED (12), EXPIRED (15)
# EG uses ERROR (2), but that's too general so avoid that one
doDelete = False
code = -1
if rawResult is not None and isinstance(rawResult, dict) and rawResult.has_key('code') and isinstance(rawResult['code'], dict) and 'geni_code' in rawResult['code']:
code = rawResult['code']['geni_code']
if code==12 or code==15:
doDelete=True
if doDelete and not self.opts.noExtraCHCalls:
self.logger.debug("SliverStatus failed with an error that suggests no slice at this AM - delete all sliverinfo records: %s", message)
# delete sliver info from SA database
try:
# Get the Agg URN for this client
agg_urn = self._getURNForClient(client)
if urn_util.is_valid_urn(agg_urn):
# I'd like to be able to tell the SA to delete all slivers registered for
# this slice/AM, but the API says sliver_urn is required
sliver_urns = self.framework.list_sliverinfo_urns(urn, agg_urn)
for sliver_urn in sliver_urns:
self.framework.delete_sliver_info(sliver_urn)
else:
self.logger.debug("Not ensuring with CH that AM %s slice %s has no slivers - no valid AM URN known")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
self.logger.info('Error ensuring slice has no slivers recorded in SA database at this AM')
self.logger.debug(e)
else:
if self.opts.noExtraCHCalls:
self.logger.debug("Per commandline option, not ensuring clearinghouse lists no slivers for this slice.")
else:
self.logger.debug("Based on return error code, (%d), not deleting any slivers here.", code)
if amapiError is not None:
self.logger.debug("Having processed the sliverstatus return, now raise the AMAPI Error")
raise amapiError
# FIXME: Put the message error in retVal?
# FIXME: getVersion uses None as the value in this case. Be consistent
retItem[ client.url ] = False
if message is None or message.strip() == "":
if status is None:
message = "(no reason given, missing result)"
elif status == False:
message = "(no reason given, False result)"
elif status == 0:
message = "(no reason given, 0 result)"
else:
message = "(no reason given, empty result)"
retVal += "\nFailed to get SliverStatus on %s at AM %s: %s\n" % (name, client.str, message)
# End of loop over clients
# FIXME: Return the status if there was only 1 client?
if numClients > 0:
retVal += "Returned status of slivers on %d of %d possible aggregates." % (successCnt, self.numOrigClients)
return retVal, retItem
# End of sliverstatus
def status(self, args):
"""AM API Status <slice name>
For use in AM API v3+. See sliverstatus for the v1 and v2 equivalent.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
--sliver-urn / -u option: each specifies a sliver URN to get status on. If specified,
only the listed slivers will be queried. Otherwise, all slivers in the slice will be queried.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-poa-geni_start-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Get status on the slice at given aggregate
omni.py -V3 -a http://aggregate/url status myslice
Get status on specific slivers and save the result to a file
omni.py -V3 -a http://aggregate/url -o --outputfile %s-status-%a.json -u urn:publicid:IDN+myam+sliver+1 -u urn:publicid:IDN+myam+sliver+2 status myslice
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Status with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Status is only available in AM API v3+. Use SliverStatus with AM API v%d, or specify -V3 to use AM API v3." % self.opts.api_version)
# Build up args, options
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred,
retVal, slice_exp) = self._args_to_slicecred(args, 1, "Status")
successCnt = 0
retItem = {}
args = []
creds = []
# Get clients
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients > 0:
self.logger.info('Status of Slice %s:' % urn)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
urnsarg, slivers = self._build_urns(urn)
args = [urnsarg, creds]
# Add the options dict
options = self._build_options('Status', name, None)
args.append(options)
self.logger.debug("Doing status with urns %s, %d creds, options %r", urnsarg, len(creds), options)
else:
prstr = "No aggregates available to get slice status at: %s" % message
retVal += prstr + "\n"
self.logger.warn(prstr)
descripMsg = "slivers in slice %s" % urn
if len(slivers) > 0:
descripMsg = "%d slivers in slice %s" % (len(slivers), urn)
# Do Status at all clients
op = 'Status'
msg = "Status of %s at " % (descripMsg)
for client in clientList:
try:
((status, message), client) = self._api_call(client,
msg + str(client.url),
op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nStatus failed: " + retVal)
continue
retItem[client.url] = status
# Get the dict status out of the result (accounting for API version diffs, ABAC)
(status, message) = self._retrieve_value(status, message, self.framework)
if not status:
# #634:
# delete any sliver_infos for this am/slice
# However, not all errors mean there are no slivers here.
# Based on testing 8/2014, all AMs return code 2 or code 12 if there are no slivers here
# so that it's safe to delete any sliver_info records.
# Use code 15 too as that seems reasonable.
# Use SEARCHFAILED (12), EXPIRED (15)
# EG uses ERROR (2), but that will show up in other places. So avoid that one.
# Also note that if not geni_best_effort
# that a failure may mean only part failed
doDelete = False
raw = retItem[client.url]
code = -1
if raw is not None and isinstance(raw, dict) and raw.has_key('code') and isinstance(raw['code'], dict) and 'geni_code' in raw['code']:
code = raw['code']['geni_code']
# Technically if geni_best_effort and got this failure, then all slivers are bad
# But that's only true if the AM honors geni_best_effort, which it may not
# So only assume they're all bad if we didn't request any specific slivers.
if len(slivers) == 0:
if code==12 or code==15:
doDelete=True
if not self.opts.noExtraCHCalls:
if doDelete:
self.logger.debug("Status failed with an error that suggests no slice at this AM or requested slivers not at this AM - delete all/requested sliverinfo records: %s", message)
# delete sliver info from SA database
try:
if len(slivers) > 0:
self.logger.debug("Status failed - assuming all %d sliver URNs asked about are invalid and not at this AM - delete from CH", len(slivers))
for sliver in slivers:
self.framework.delete_sliver_info(sliver)
else:
self.logger.debug("Status failed: assuming this slice has 0 slivers at this AM. Ensure CH lists none.")
# Get the Agg URN for this client
agg_urn = self._getURNForClient(client)
if urn_util.is_valid_urn(agg_urn):
# I'd like to be able to tell the SA to delete all slivers registered for
# this slice/AM, but the API says sliver_urn is required
sliver_urns = self.framework.list_sliverinfo_urns(urn, agg_urn)
for sliver_urn in sliver_urns:
self.framework.delete_sliver_info(sliver_urn)
else:
self.logger.debug("Not ensuring with CH that AM %s slice %s has no slivers - no valid AM URN known")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
self.logger.info('Error ensuring slice has no slivers recorded in SA database at this AM')
self.logger.debug(e)
else:
self.logger.debug("Given AM return code (%d) and # requested slivers (%d), not telling CH to not list these slivers.", code, len(slivers))
else:
self.logger.debug("Per commandline option, not ensuring clearinghouse lists no slivers for this slice.")
# FIXME: Put the message error in retVal?
# FIXME: getVersion uses None as the value in this case. Be consistent
fmt = "\nFailed to get Status on %s at AM %s: %s\n"
if message is None or message.strip() == "":
message = "(no reason given)"
retVal += fmt % (descripMsg, client.str, message)
continue
# End of block to handle got no good status (got an error)
missingSlivers = self._findMissingSlivers(status, slivers)
if len(missingSlivers) > 0:
self.logger.warn("%d slivers from request missing in result?!", len(missingSlivers))
self.logger.debug("%s", missingSlivers)
# Summarize result
retcnt = len(slivers) # Num slivers reporting results
if retcnt > 0:
retcnt = retcnt - len(missingSlivers)
else:
retcnt = len(self._getSliverResultList(status))
retVal += "Retrieved Status on %d slivers in slice %s at %s:\n" % (retcnt, urn, client.str)
sliverFails = self._didSliversFail(status)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
# Summarize sliver expiration
(orderedDates, sliverExps) = self._getSliverExpirations(status, None)
if len(orderedDates) == 1:
msg = "All slivers expire on %r." % orderedDates[0].isoformat()
self.logger.info(msg)
elif len(orderedDates) == 0:
msg = "0 Slivers reported results!"
self.logger.warn(msg)
else:
firstTime = orderedDates[0]
firstCount = len(sliverExps[firstTime])
msg = "Slivers expire on %d times, next is %d at %r, and others at %d other times." % (len(orderedDates), firstCount, firstTime.isoformat(), len(orderedDates) - 1)
self.logger.info(msg)
retVal += " " + msg + "\n"
# Summarize overall status
# Get all statuses in a hash (value is count)
alloc_statuses, op_statuses = self._getSliverStatuses(status)
# If only 1 sliver, get its allocation and operational status
# if alloc or operational status same for all slivers, say so
# Else say '%d slivers have %d different statuses
# if op state includes geni_failed or geni_pending_allocation, say so
# If alloc state includes geni_unallocated, say so
statusMsg = ' '
if len(alloc_statuses) == 1:
if retcnt == 1:
statusMsg += "Sliver is "
else:
statusMsg += "All slivers are "
statusMsg += "in allocation state %s.\n" % alloc_statuses.keys()[0]
else:
statusMsg += " %d slivers have %d different allocation statuses" % (retcnt, len(alloc_statuses.keys()))
if 'geni_unallocated' in alloc_statuses:
statusMsg += "; some are geni_unallocated.\n"
else:
if not statusMsg.endswith('.'):
statusMsg += '.'
statusMsg += "\n"
if len(op_statuses) == 1:
if retcnt == 1:
statusMsg += " Sliver is "
else:
statusMsg += " All slivers are "
statusMsg += "in operational state %s.\n" % op_statuses.keys()[0]
else:
statusMsg = " %d slivers have %d different operational statuses" % (retcnt, len(op_statuses.keys()))
if 'geni_failed' in op_statuses:
statusMsg += "; some are geni_failed"
if 'geni_pending_allocation' in op_statuses:
statusMsg += "; some are geni_pending_allocation"
else:
if not statusMsg.endswith('.'):
statusMsg += '.'
statusMsg += "\n"
statusMsg += "\n"
# Resulting text added to retVal (below). But do this even if lots AMs? Or only if limited # of AMs?
# Print or save out result
if not isinstance(status, dict):
# malformed status return
self.logger.warn('Malformed status from AM %s. Expected struct, got type %s.' % (client.str, status.__class__.__name__))
# FIXME: Add something to retVal that the result was malformed?
if isinstance(status, str):
prettyResult = str(status)
else:
prettyResult = pprint.pformat(status)
else:
prettyResult = json.dumps(status, ensure_ascii=True, indent=2)
header="Status for %s at AM %s" % (descripMsg, client.str)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, name, client.url, client.urn, "status", ".json", numClients)
#self.logger.info("Writing result of status for slice: %s at AM: %s to file %s", name, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved status on %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
if len(missingSlivers) > 0:
retVal += " - %d slivers missing from result!? \n" % len(missingSlivers)
if len(sliverFails.keys()) > 0:
retVal += " - %d slivers failed?! \n" % len(sliverFails.keys())
retVal += statusMsg
if len(missingSlivers) == 0 and len(sliverFails.keys()) == 0:
successCnt+=1
# Now sync up slivers with CH
if not self.opts.noExtraCHCalls:
# ensure have agg_urn
agg_urn = self._getURNForClient(client)
if urn_util.is_valid_urn(agg_urn):
slivers_by_am = None # Slivers in this slice by AM CH reports
try:
slivers_by_am = self.framework.list_sliver_infos_for_slice(urn)
# Gather info on what the AM reported
resultValue = self._getSliverResultList(status)
status_structs = {} # dict by URN of sliver status structs
expirations = {} # dict by URN of sliver expiration string
if len(resultValue) == 0:
self.logger.debug("Result value not a list or empty")
else:
for sliver in resultValue:
if not isinstance(sliver, dict):
self.logger.debug("entry in result list was not a dict")
continue
if not sliver.has_key('geni_sliver_urn') or str(sliver['geni_sliver_urn']).strip() == "":
self.logger.debug("entry in result had no 'geni_sliver_urn'")
else:
slivurn = sliver['geni_sliver_urn']
status_structs[slivurn] = sliver
if not sliver.has_key('geni_expires'):
self.logger.debug("Sliver %s missing 'geni_expires'", slivurn)
expirations[slivurn] = slice_exp # Assume sliver expires at slice expiration if not specified
continue
expirations[slivurn] = sliver['geni_expires']
# Finished building status_structs and expirations
statuses = self._getSliverAllocStates(status) # Dict by URN of sliver alloc state
resultSlivers = statuses.keys()
if slivers_by_am is None or not slivers_by_am.has_key(agg_urn):
# CH has no slivers. So all
# slivers the AM reported must be sent
# to the CH
if len(resultSlivers) > 0:
self.logger.debug("CH missing %d slivers at AM - report those that are provisioned", len(resultSlivers))
for sliver in resultSlivers:
if not statuses.has_key(sliver):
self.logger.debug("No %s key in statuses? %s", sliver, statuses)
elif statuses[sliver] == 'geni_provisioned':
if not expirations.has_key(sliver):
self.logger.debug("No %s key in expirations? %s", sliver, expirations)
expO = None
else:
expO = self._datetimeFromString(expirations[sliver])[1]
if not status_structs.has_key(sliver):
self.logger.debug("status_structs missing %s: %s", sliver, status_structs)
else:
# self.logger.debug("Will create sliver. slice: %s, AMURL: %s, expiration: %s, status_struct: %s, AMURN: %s", urn, client.url, expO, status_structs[sliver], agg_urn)
self.framework.create_sliver_info(None, urn,
client.url,
expO,
[status_structs[sliver]], agg_urn)
# else this sliver should not (yet) be recorded at the CH
else:
# Need to reconcile the CH list and the AM list
ch_slivers = slivers_by_am[agg_urn]
# missingSlivers: delete CH record for each
# FIXME: If self.opts.geni_best_effort could an AM not return an entry for a sliver
# you don't have permission to see or something? I don't think I'll
# worry about this now.
if len(missingSlivers) > 0:
self.logger.debug("Ensure %d missing slivers not reported by CH", len(missingSlivers))
for missing in missingSlivers:
if missing in ch_slivers.keys():
self.framework.delete_sliver_info(missing)
# Else AM didn't list it and neither did CH
# sliverFails: If the failed sliver says it is provisioned, it should be at the CH
# If the failed sliver is not provisioned, then it should not be at the CH (yet)
for fail in sliverFails:
if statuses[fail] == 'geni_provisioned' and fail not in ch_slivers.keys():
expO = self._datetimeFromString(expirations[fail])[1]
self.logger.debug("Recording failed but provisioned sliver %s at CH (error: %s)", fail, sliverFails[fail])
self.framework.create_sliver_info(None, urn,
client.url,
expO,
[status_structs[fail]], agg_urn)
elif statuses[fail] != 'geni_provisioned' and fail in ch_slivers.keys():
# The AM says the sliver is gone or not yet provisioned: Delete
self.logger.debug("Deleting CH record of failed and not provisioned sliver %s (error: %s, expiration: %s)", fail, sliverFails[fail], expirations[fail])
self.framework.delete_sliver_info(fail)
else:
# Do nothing with this failed sliver - just note it
if fail in ch_slivers.keys():
self.logger.debug("Not changing existing CH record of sliver %s that failed: %s", fail, sliverFails[fail])
else:
self.logger.debug("Not adding new CH record of sliver %s that failed: %s", fail, sliverFails[fail])
# End of block to handle failed slivers (had a geni_error)
# Any in CH not in result (and if we asked for slivers, also in list
# we asked for) - Delete
# Plus any in CH and result that are not geni_provisioned, delete
for ch_sliver in ch_slivers.keys():
if ch_sliver not in resultSlivers:
if len(slivers) == 0 or ch_sliver in slivers:
self.logger.debug("Deleting CH record of sliver not at AM: %s", ch_sliver)
self.framework.delete_sliver_info(ch_sliver)
elif statuses[ch_sliver] != 'geni_provisioned':
self.logger.debug("Deleting CH record of not provisioned sliver %s (expiration: %s)", ch_sliver, expirations[ch_sliver])
self.framework.delete_sliver_info(ch_sliver)
# All other slivers in result (not in sliverFails):
for sliver in resultSlivers:
if statuses[sliver] == 'geni_provisioned' and sliver not in sliverFails.keys():
if sliver not in ch_slivers.keys():
expO = self._datetimeFromString(expirations[sliver])[1]
self.logger.debug("Recording AM reported sliver %s at CH", sliver)
self.framework.create_sliver_info(None, urn,
client.url,
expO,
[status_structs[sliver]], agg_urn)
else:
# Now dealing with slivers listed by AM and CH, and provisioned at AM, and not failed
chexpo = None
if ch_slivers[sliver].has_key('SLIVER_INFO_EXPIRATION'):
chexp = ch_slivers[sliver]['SLIVER_INFO_EXPIRATION']
chexpo = naiveUTC(dateutil.parser.parse(chexp, tzinfos=tzd))
expO, expT, _ = self._datetimeFromString(expirations[sliver])
if chexpo is None or (expO is not None and abs(chexpo - expO) > datetime.timedelta.resolution):
self.logger.debug("CH sliver %s expiration %s != AM exp %s; update at CH", sliver, str(chexpo), str(expO))
# update the recorded expiration time to be accurate
self.framework.update_sliver_info(agg_urn, urn, sliver,
expT)
# else CH/AM agree on the time. Nothing to do
# Else the sliver is not yet provisioned or failed. Should already have been handled
# End of loop over slivers in result
# End of block where CH lists slivers in the slice for this AM
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
self.logger.info('Error ensuring CH lists same slivers as at this AM')
self.logger.debug(e)
else:
self.logger.debug("Not syncing slivers with CH - no valid AM URN known")
else:
self.logger.debug("Per commandline option, not syncing slivers with clearinghouse.")
# End of loop over clients
# FIXME: Return the status if there was only 1 client?
if numClients > 0:
retVal += "Returned status of slivers on %d of %d possible aggregates." % (successCnt, self.numOrigClients)
self.logger.debug("Status result: " + json.dumps(retItem, indent=2))
return retVal, retItem
# End of status
def deletesliver(self, args):
"""AM API DeleteSliver <slicename>
For use in AM API v1&2; Use Delete() for v3+
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
"""
if self.opts.api_version >= 3:
if self.opts.devmode:
self.logger.warn("Trying DeleteSliver with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("DeleteSliver is only available in AM API v1 or v2. Use Delete, or specify the -V2 option to use AM API v2, if the AM supports it.")
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 1, "DeleteSliver")
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
args = [urn, creds]
op = 'DeleteSliver'
options = self._build_options(op, name, None)
#--- API version specific
if self.opts.api_version >= 2:
# Add the options dict
args.append(options)
self.logger.debug("Doing deletesliver with urn %s, %d creds, options %r", urn, len(creds), options)
successList = []
failList = []
successCnt = 0
(clientList, message) = self._getclients()
numClients = len(clientList)
msg = "%s %s at " % (op, urn)
# Connect to each available GENI AM
## The AM API does not cleanly state how to deal with
## aggregates which do not have a sliver in this slice. We
## know at least one aggregate (PG) returns an Exception in
## this case.
## FIX ME: May need to look at handling of this more in the future.
## Also, if the user supplied the aggregate list, a failure is
## more interesting. We can figure out what the error strings
## are at the various aggregates if they don't know about the
## slice and make those more quiet. Finally, we can try
## sliverstatus at places where it fails to indicate places
## where you still have resources.
for client in clientList:
try:
((rawres, message), client) = self._api_call(client,
msg + str(client.url),
op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nDeleteSliver failed: " + retVal)
continue
amapiError = None
res = None
try:
# Get the boolean result out of the result (accounting for API version diffs, ABAC)
(res, message) = self._retrieve_value(rawres, message, self.framework)
except AMAPIError, amapiError:
# Would raise an AMAPIError.
# But that loses the side-effect of deleting any sliverinfo records.
# So if we're doing those, hold odd on raising the error
if self.opts.noExtraCHCalls:
raise amapiError
else:
self.logger.debug("Got AMAPIError retrieving value from deletesliver. Hold it until we do any sliver info processing")
if res:
prStr = "Deleted sliver %s at %s" % (urn,
(client.str if client.nick else client.urn))
if not self.opts.noExtraCHCalls:
# delete sliver info from SA database
try:
# Get the Agg URN for this client
agg_urn = self._getURNForClient(client)
if urn_util.is_valid_urn(agg_urn):
# I'd like to be able to tell the SA to delete all slivers registered for
# this slice/AM, but the API says sliver_urn is required
sliver_urns = self.framework.list_sliverinfo_urns(urn, agg_urn)
for sliver_urn in sliver_urns:
self.framework.delete_sliver_info(sliver_urn)
else:
self.logger.debug("Not reporting to CH that slivers were deleted - no valid AM URN known")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
# FIXME: info only?
self.logger.warn('Error noting sliver deleted in SA database')
self.logger.debug(e)
else:
self.logger.debug("Per commandline option, not reporting sliver deleted to clearinghouse")
if numClients == 1:
retVal = prStr
self.logger.info(prStr)
successCnt += 1
successList.append( client.url )
else:
doDelete = False
code = -1
if rawres is not None and isinstance(rawres, dict) and rawres.has_key('code') and isinstance(rawres['code'], dict) and 'geni_code' in rawres['code']:
code = rawres['code']['geni_code']
if code==12 or code==15:
doDelete=True
if doDelete and not self.opts.noExtraCHCalls:
self.logger.debug("DeleteSliver failed with an error that suggests no slice at this AM - delete all sliverinfo records: %s", message)
# delete sliver info from SA database
try:
# Get the Agg URN for this client
agg_urn = self._getURNForClient(client)
if urn_util.is_valid_urn(agg_urn):
# I'd like to be able to tell the SA to delete all slivers registered for
# this slice/AM, but the API says sliver_urn is required
sliver_urns = self.framework.list_sliverinfo_urns(urn, agg_urn)
for sliver_urn in sliver_urns:
self.framework.delete_sliver_info(sliver_urn)
else:
self.logger.debug("Not ensuring with CH that AM %s slice %s has no slivers - no valid AM URN known")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
self.logger.info('Error ensuring slice has no slivers recorded in SA database at this AM')
self.logger.debug(e)
else:
if self.opts.noExtraCHCalls:
self.logger.debug("Per commandline option, not ensuring clearinghouse lists no slivers for this slice.")
else:
self.logger.debug("Based on return error code, (%d), not deleting any sliver infos here.", code)
if amapiError is not None:
self.logger.debug("Having processed the deletesliver return, now raise the AMAPI Error")
raise amapiError
prStr = "Failed to delete sliver %s at %s (got result '%s')" % (urn, (client.str if client.nick else client.urn), res)
if message is None or message.strip() == "":
message = "(no reason given)"
if not prStr.endswith('.'):
prStr += '.'
prStr += " " + message
self.logger.warn(prStr)
if numClients == 1:
retVal = prStr
failList.append( client.url )
if numClients == 0:
retVal = "No aggregates specified on which to delete slivers. %s" % message
elif numClients > 1:
retVal = "Deleted slivers on %d out of a possible %d aggregates" % (successCnt, self.numOrigClients)
return retVal, (successList, failList)
# End of deletesliver
def delete(self, args):
"""AM API Delete <slicename>
For use in AM API v3+. Use DeleteSliver for API v1&2.
Delete the named slivers, making them geni_unallocated. Resources are stopped
if necessary, and both de-provisioned and de-allocated. No further AM API
operations may be performed on slivers that have been deleted.
See deletesliver.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
--sliver-urn / -u option: each specifies a sliver URN to delete. If specified,
only the listed slivers will be deleted. Otherwise, all slivers in the slice will be deleted.
--best-effort: If supplied, slivers that can be deleted, will be; some slivers
may not be deleted, in which case check the geni_error return for that sliver.
If not supplied, then if any slivers cannot be deleted, the whole call fails
and slivers do not change.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-delete-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Delete all slivers in the slice at specific aggregates
omni.py -V3 -a http://aggregate/url -a http://another/url delete myslice
Delete slivers in slice myslice; any slivers that cannot be deleted, stay as they were, while others are deleted
omni.py -V3 -a http://myaggregate/url --best-effort delete myslice
Delete the given sliver in myslice at this AM and write the result struct to the given file
omni.py -V3 -a http://myaggregate/url -o --outputfile %s-delete-%a.json -u urn:publicid:IDN+myam+sliver+1 delete myslice
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Delete with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Delete is only available in AM API v3+. Use DeleteSliver with AM API v%d, or specify -V3 to use AM API v3." % self.opts.api_version)
# Gather options, args
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred,
retVal, slice_exp) = self._args_to_slicecred(args, 1, "Delete")
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
urnsarg, slivers = self._build_urns(urn)
descripMsg = "slivers in slice %s" % urn
if len(slivers) > 0:
descripMsg = "%d slivers in slice %s" % (len(slivers), urn)
args = [urnsarg, creds]
# Add the options dict
options = self._build_options('Delete', name, None)
args.append(options)
self.logger.debug("Doing delete with urns %s, %d creds, options %r",
urnsarg, len(creds), options)
successCnt = 0
(clientList, message) = self._getclients()
numClients = len(clientList)
# Connect to each available GENI AM
## The AM API does not cleanly state how to deal with
## aggregates which do not have a sliver in this slice. We
## know at least one aggregate (PG) returns an Exception in
## this case.
## FIX ME: May need to look at handling of this more in the future.
## Also, if the user supplied the aggregate list, a failure is
## more interesting. We can figure out what the error strings
## are at the various aggregates if they don't know about the
## slice and make those more quiet. Finally, we can try
## to call status at places where it fails to indicate places
## where you still have resources.
op = 'Delete'
msg = "Delete of %s at " % (descripMsg)
retItem = {}
for client in clientList:
try:
((result, message), client) = self._api_call(client,
msg + str(client.url),
op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nDelete failed: " + retVal)
continue
retItem[client.url] = result
(realres, message) = self._retrieve_value(result, message, self.framework)
someSliversFailed = False
badSlivers = self._getSliverAllocStates(realres, 'geni_unallocated')
for sliver in badSlivers.keys():
self.logger.warn("Sliver %s in wrong state! Expected %s, got %s?!", sliver, 'geni_unallocated', badSlivers[sliver])
# FIXME: This really might be a case where sliver in wrong state means the call failed?!
someSliversFailed = True
missingSlivers = self._findMissingSlivers(realres, slivers)
if len(missingSlivers) > 0:
self.logger.debug("Slivers from request missing in result: %s", missingSlivers)
sliverFails = self._didSliversFail(realres)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
if realres is not None:
if not self.opts.noExtraCHCalls:
# record results in SA database
try:
sliversDict = self._getSliverResultList(realres)
for sliver in sliversDict:
if isinstance(sliver, dict) and \
sliver.has_key('geni_sliver_urn'):
# Note that the sliver may not be in the DB if you delete after allocate
# FIXME: Exclude any slivers that are not geni_unallocated?
# That is, what happens if you call delete only with specific slivers,
# and do not delete all the slivers. Will the others be returned?
# I think the others are not _supposed to be returned....
# FIXME: If the user asked to delete everything in this slice
# at this AM, should I use list_slivers to delete everything
# the CH knows in this slice at this AM, in case something got missed?
# FIXME: Exclude slivers in sliverFails (had errors)?
if sliver['geni_sliver_urn'] in sliverFails.keys():
self.logger.debug("Skipping noting delete of failed sliver %s", sliver)
continue
self.logger.debug("Recording sliver %s deleted", sliver)
self.framework.delete_sliver_info \
(sliver['geni_sliver_urn'])
else:
self.logger.debug("Skipping noting delete of malformed sliver %s", sliver)
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
# FIXME Info?
self.logger.warn('Error noting sliver deleted in SA database')
self.logger.debug(e)
else:
self.logger.debug("Per commandline option, not reporting sliver deleted to clearinghouse")
prStr = "Deleted %s at %s" % (descripMsg,
(client.str if client.nick else client.urn))
if someSliversFailed:
prStr += " - but %d slivers are not fully de-allocated; check the return! " % len(badSlivers.keys())
if len(missingSlivers) > 0:
prStr += " - but %d slivers from request missing in result!? " % len(missingSlivers)
if len(sliverFails.keys()) > 0:
prStr += " = but %d slivers failed! " % len(sliverFails.keys())
if numClients == 1:
retVal = prStr + "\n"
self.logger.info(prStr)
# Construct print / save out result
if not isinstance(realres, list):
# malformed describe return
self.logger.warn('Malformed delete result from AM %s. Expected list, got type %s.' % (client.str, realres.__class__.__name__))
# FIXME: Add something to retVal saying that the result was malformed?
if isinstance(realres, str):
prettyResult = str(realres)
else:
prettyResult = pprint.pformat(realres)
else:
prettyResult = json.dumps(realres, ensure_ascii=True, indent=2)
header="Deletion of %s at AM %s" % (descripMsg, client.str)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, name, client.url, client.urn, "delete", ".json", numClients)
#self.logger.info("Writing result of delete for slice: %s at AM: %s to file %s", name, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved deletion of %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
if len(sliverFails.keys()) == 0:
successCnt += 1
else:
doDelete = False
raw = retItem[client.url]
code = -1
if raw is not None and isinstance(raw, dict) and raw.has_key('code') and isinstance(raw['code'], dict) and 'geni_code' in raw['code']:
code = raw['code']['geni_code']
# Technically if geni_best_effort and got this failure, then all slivers are bad
# But that's only true if the AM honors geni_best_effort, which it may not
# So only assume they're all bad if we didn't request any specific slivers.
if len(slivers) == 0:
if code==12 or code==15:
doDelete=True
if not self.opts.noExtraCHCalls:
if doDelete:
self.logger.debug("Delete failed with an error that suggests no slice at this AM or requested slivers not at this AM - delete all/requested sliverinfo records: %s", message)
# delete sliver info from SA database
try:
if len(slivers) > 0:
self.logger.debug("Delete failed - assuming all %d sliver URNs asked about are invalid and not at this AM - delete from CH", len(slivers))
for sliver in slivers:
self.framework.delete_sliver_info(sliver)
else:
self.logger.debug("Delete failed: assuming this slice has 0 slivers at this AM. Ensure CH lists none.")
# Get the Agg URN for this client
agg_urn = self._getURNForClient(client)
if urn_util.is_valid_urn(agg_urn):
# I'd like to be able to tell the SA to delete all slivers registered for
# this slice/AM, but the API says sliver_urn is required
sliver_urns = self.framework.list_sliverinfo_urns(urn, agg_urn)
for sliver_urn in sliver_urns:
self.framework.delete_sliver_info(sliver_urn)
else:
self.logger.debug("Not ensuring with CH that AM %s slice %s has no slivers - no valid AM URN known")
except NotImplementedError, nie:
self.logger.debug('Framework %s doesnt support recording slivers in SA database', self.config['selected_framework']['type'])
except Exception, e:
self.logger.info('Error ensuring slice has no slivers recorded in SA database at this AM')
self.logger.debug(e)
else:
self.logger.debug("Given AM return code (%d) and # requested slivers (%d), not telling CH to not list these slivers.", code, len(slivers))
else:
self.logger.debug("Per commandline option, not ensuring clearinghouse lists no slivers for this slice.")
if message is None or message.strip() == "":
message = "(no reason given)"
prStr = "Failed to delete %s at %s: %s" % (descripMsg, (client.str if client.nick else client.urn), message)
self.logger.warn(prStr)
if numClients == 1:
retVal = prStr
# loop over all clients
if numClients == 0:
retVal = "No aggregates specified on which to delete slivers. %s" % message
elif numClients > 1:
retVal = "Deleted slivers on %d out of a possible %d aggregates" % (successCnt, self.numOrigClients)
self.logger.debug("Delete result: " + json.dumps(retItem, indent=2))
return retVal, retItem
# End of delete
def shutdown(self, args):
"""AM API Shutdown <slicename>
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
"""
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 1, "Shutdown")
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
args = [urn, creds]
op = "Shutdown"
options = self._build_options(op, name, None)
if self.opts.api_version >= 2:
# Add the options dict
args.append(options)
self.logger.debug("Doing shutdown with urn %s, %d creds, options %r", urn, len(creds), options)
#Call shutdown on each AM
successCnt = 0
successList = []
failList = []
retItem = dict()
(clientList, message) = self._getclients()
numClients = len(clientList)
msg = "Shutdown %s on " % (urn)
for client in clientList:
try:
((res, message), client) = self._api_call(client, msg + client.url, op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nShutdown Failed: " + retVal)
continue
retItem[client.url] = res
# Get the boolean result out of the result (accounting for API version diffs, ABAC)
(res, message) = self._retrieve_value(res, message, self.framework)
if res:
prStr = "Shutdown Sliver %s at AM %s" % (urn, (client.str if client.nick else client.urn))
self.logger.info(prStr)
if numClients == 1:
retVal = prStr
successCnt+=1
successList.append( client.url )
else:
prStr = "Failed to shutdown sliver %s at AM %s" % (urn, (client.str if client.nick else client.urn))
if message is None or message.strip() == "":
message = "(no reason given)"
if not prStr.endswith('.'):
prStr += '.'
prStr += " " + message
self.logger.warn(prStr)
if numClients == 1:
retVal = prStr
failList.append( client.url )
if numClients == 0:
retVal = "No aggregates specified on which to shutdown slice %s. %s" % (urn, message)
elif numClients > 1:
retVal = "Shutdown slivers of slice %s on %d of %d possible aggregates" % (urn, successCnt, self.numOrigClients)
if self.opts.api_version < 3:
return retVal, (successList, failList)
else:
return retVal, retItem
# End of shutdown
def update(self, args):
"""
GENI AM API Update <slice name> <rspec file name>
For use with AM API v3+ only, and only at some AMs.
Technically adopted for AM API v4, but may be implemented by v3 AMs. See http://groups.geni.net/geni/wiki/GAPI_AM_API_DRAFT/Adopted#ChangeSetC:Update
Update resources as described in a request RSpec argument in a slice with
the named URN. Update the named slivers if specified, or all slivers in the slice at the aggregate.
On success, new resources in the RSpec will be allocated in new slivers, existing resources in the RSpec will
be updated, and slivers requested but missing in the RSpec will be deleted.
Return a string summarizing results, and a dictionary by AM URL of the return value from the AM.
After update, slivers that were geni_allocated remain geni_allocated (unless they were left
out of the RSpec, indicating they should be deleted, which is then immediate). Slivers that were
geni_provisioned or geni_updating will be geni_updating.
Clients must Renew or Provision any new (geni_updating) slivers before the expiration time
(given in the return struct), or the aggregate will automatically revert the changes
(delete new slivers or revert changed slivers to their original state).
Slivers that were geni_provisioned that you do not include in the RSpec will be deleted,
but only after calling Provision.
Slivers that were geni_allocated or geni_updating are immediately changed.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
Note that if multiple aggregates are supplied, the same RSpec will be submitted to each.
Aggregates should ignore parts of the Rspec requesting specific non-local resources (bound requests), but each
aggregate should attempt to satisfy all unbound requests.
--sliver-urn / -u option: each specifies a sliver URN to update. If specified,
only the listed slivers will be updated. Otherwise, all slivers in the slice will be updated.
--best-effort: If supplied, slivers that can be updated, will be; some slivers
may not be updated, in which case check the geni_error return for that sliver.
If not supplied, then if any slivers cannot be updated, the whole call fails
and sliver states do not change.
Note that some aggregates may require updating all slivers in the same state at the same
time, per the geni_single_allocation GetVersion return.
--end-time: Request that new slivers expire at the given time.
The aggregates may update the resources, but not be able to grant the requested
expiration time.
Note that per the AM API expiration times will be timezone aware.
Unqualified times are assumed to be in UTC.
Note that the expiration time cannot be past your slice expiration
time (see renewslice).
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-update-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Basic update of resources at 1 AM into myslice
omni.py -V3 -a http://myaggregate/url update myslice my-request-rspec.xml
Update resources in 2 AMs, requesting a specific sliver end time, save results into specificly named files that include an AM name calculated from the AM URL,
using the slice credential saved in the given file
omni.py -V3 -a http://myaggregate/url -a http://myother/aggregate --end-time 20120909 -o --outputfile myslice-manifest-%a.json --slicecredfile mysaved-myslice-slicecred.xml update myslice my-update-rspec.xml
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Update with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Update is only available in AM API v3+. Specify -V3 to use AM API v3.")
(slicename, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 2,
"Update",
"and a request rspec filename")
# Load up the user's request rspec
rspecfile = None
if not (self.opts.devmode and len(args) < 2):
rspecfile = args[1]
if rspecfile is None: # FIXME: If file type arg, check the file exists: os.path.isfile(rspecfile)
# Dev mode should allow missing RSpec
msg = 'File of resources to request missing: %s' % rspecfile
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
try:
# read the rspec into a string, and add it to the rspecs dict
rspec = _derefRSpecNick(self, rspecfile)
except Exception, exc:
msg = "Unable to read rspec file '%s': %s" % (rspecfile, str(exc))
if self.opts.devmode:
rspec = ""
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
# Test if the rspec is really json containing an RSpec, and
# pull out the right thing
rspec = self._maybeGetRSpecFromStruct(rspec)
# Build args
op = 'Update'
options = self._build_options(op, slicename, None)
urnsarg, slivers = self._build_urns(urn)
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
args = [urnsarg, creds, rspec, options]
descripMsg = "slivers in slice %s" % urn
if len(slivers) > 0:
descripMsg = "%d slivers in slice %s" % (len(slivers), urn)
self.logger.debug("Doing Update with urns %s, %d creds, rspec starting: \'%s...\', and options %s", urnsarg, len(creds), rspec[:min(40, len(rspec))], options)
successCnt = 0
retItem = dict()
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients == 0:
msg = "No aggregate specified to submit update request to. Use the -a argument."
if self.opts.devmode:
# warn
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
elif numClients > 1:
# info - mention unbound bits will be repeated
self.logger.info("Multiple aggregates will get the same request RSpec; unbound requests will be attempted at multiple aggregates.")
if len(slivers) > 0:
# All slivers will go to all AMs. If not best effort, AM may fail the request if its
# not a local sliver.
# # FIXME: Could partition slivers by AM URN?
msg = "Will do %s %s at all %d AMs - some aggregates may fail the request if given slivers not from that aggregate." % (op, descripMsg, numClients)
if self.opts.geni_best_effort:
self.logger.info(msg)
else:
self.logger.warn(msg + " Consider running with --best-effort in future.")
# Do the command for each client
for client in clientList:
self.logger.info("%s %s at %s:", op, descripMsg, client.str)
try:
((result, message), client) = self._api_call(client,
("%s %s at %s" % (op, descripMsg, client.url)),
op,
args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nUpdate failed: " + retVal)
continue
# Make the RSpec more pretty-printed
rspec = None
if result and isinstance(result, dict) and result.has_key('value') and isinstance(result['value'], dict) and result['value'].has_key('geni_rspec'):
rspec = result['value']['geni_rspec']
if rspec and rspec_util.is_rspec_string( rspec, None, None, logger=self.logger ):
rspec = rspec_util.getPrettyRSpec(rspec)
result['value']['geni_rspec'] = rspec
else:
self.logger.debug("No valid RSpec returned!")
else:
self.logger.debug("Return struct missing geni_rspec element!")
# Pull out the result and check it
retItem[ client.url ] = result
(realresult, message) = self._retrieve_value(result, message, self.framework)
if realresult:
# Success (maybe partial?)
missingSlivers = self._findMissingSlivers(realresult, slivers)
if len(missingSlivers) > 0:
self.logger.warn("%d slivers from request missing in result?!", len(missingSlivers))
self.logger.debug("Slivers requested missing in result: %s", missingSlivers)
sliverFails = self._didSliversFail(realresult)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
(header, rspeccontent, rVal) = _getRSpecOutput(self.logger, rspec, slicename, client.urn, client.url, message)
self.logger.debug(rVal)
if realresult and isinstance(realresult, dict) and realresult.has_key('geni_rspec') and rspec and rspeccontent:
realresult['geni_rspec'] = rspeccontent
if isinstance(realresult, dict):
# Hmm. The rspec content looks OK here. But the
# json.dumps seems to screw it up? Quotes get
# double escaped.
prettyResult = json.dumps(realresult, ensure_ascii=True, indent=2)
else:
prettyResult = pprint.pformat(realresult)
# Save out the result
# header="<!-- Update %s at AM URL %s -->" % (descripMsg, client.url)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, slicename, client.url, client.urn, "update", ".json", numClients)
#self.logger.info("Writing result of update for slice: %s at AM: %s to file %s", slicename, client.url, filename)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved update of %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
else:
retVal += "Updated %s at %s. \n" % (descripMsg, client.str)
if len(missingSlivers) > 0:
retVal += " - but with %d slivers from request missing in result?! \n" % len(missingSlivers)
if len(sliverFails.keys()) > 0:
retVal += " = but with %d slivers reporting errors. \n" % len(sliverFails.keys())
# Check the new sliver expirations
(orderedDates, sliverExps) = self._getSliverExpirations(realresult)
# None case
if len(orderedDates) == 1:
self.logger.info("All slivers expire on %r", orderedDates[0].isoformat())
elif len(orderedDates) == 2:
self.logger.info("%d slivers expire on %r, the rest (%d) on %r", len(sliverExps[orderedDates[0]]), orderedDates[0].isoformat(), len(sliverExps[orderedDates[0]]), orderedDates[1].isoformat())
elif len(orderedDates) == 0:
msg = " 0 Slivers reported updated!"
self.logger.warn(msg)
retVal += msg
else:
self.logger.info("%d slivers expire on %r, %d on %r, and others later", len(sliverExps[orderedDates[0]]), orderedDates[0].isoformat(), len(sliverExps[orderedDates[0]]), orderedDates[1].isoformat())
if len(orderedDates) > 0:
if len(orderedDates) == 1:
retVal += " All slivers expire on: %s" % orderedDates[0].isoformat()
else:
retVal += " First sliver expiration: %s" % orderedDates[0].isoformat()
self.logger.debug("Update %s result: %s" % (descripMsg, prettyResult))
if len(missingSlivers) == 0 and len(sliverFails.keys()) == 0:
successCnt += 1
else:
# Failure
if message is None or message.strip() == "":
message = "(no reason given)"
retVal += "Update of %s at %s failed: %s.\n" % (descripMsg, client.str, message)
self.logger.warn(retVal)
# FIXME: Better message?
# Done with update call loop over clients
if numClients == 0:
retVal += "No aggregates at which to update %s. %s\n" % (descripMsg, message)
elif numClients > 1:
retVal += "Updated %s at %d out of %d aggregates.\n" % (descripMsg, successCnt, self.numOrigClients)
elif successCnt == 0:
retVal += "Update %s failed at %s" % (descripMsg, clientList[0].url)
self.logger.debug("Update Return: \n%s", json.dumps(retItem, indent=2))
return retVal, retItem
# end of update
# Cancel(urns, creds, options)
# return (like for Describe - see how that is handled):
# rspec
# slice urn
# slivers list
# urn
# expires
# alloc status
# op status
# error
# options may include geni_best_effort
def cancel(self, args):
"""
GENI AM API Cancel <slice name>
For use with AM API v3+ only, and only at some AMs.
Technically adopted for AM API v4, but may be implemented by v3 AMs. See http://groups.geni.net/geni/wiki/GAPI_AM_API_DRAFT/Adopted#ChangeSetC:Update
Cancel an Update or Allocate of what is reserved in this slice. For geni_allocated slivers,
this method acts like Delete. For geni_updating slivers, returns the slivers to the geni_provisioned state and
the operational state and properties from before the call to Update.
Return a string summarizing results, and a dictionary by AM URL of the return value from the AM.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
--sliver-urn / -u option: each specifies a sliver URN whose update or allocate to cancel. If specified,
only the listed slivers will be cancelled. Otherwise, all slivers in the slice will be cancelled.
--best-effort: If supplied, slivers whose update or allocation can be cancelled, will be; some sliver
changes may not be cancelled, in which case check the geni_error return for that sliver.
If not supplied, then if any slivers cannot be cancelled, the whole call fails
and sliver states do not change.
Note that some aggregates may require updating all slivers in the same state at the same
time, per the geni_single_allocation GetVersion return.
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a,
and %s for any slicename
If not saving results to a file, they are logged.
If --tostdout option, then instead of logging, print to STDOUT.
File names will indicate the slice name, file format, and
which aggregate is represented.
e.g.: myprefix-myslice-update-localhost-8001.json
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
Sample usage:
Basic cancel of changes at 1 AM in myslice
omni.py -V3 -a http://myaggregate/url cancel myslice
Cancel changes in 2 AMs, save results into specificly named files that include an AM name calculated from the AM URL,
using the slice credential saved in the given file
omni.py -V3 -a http://myaggregate/url -a http://myother/aggregate -o --outputfile myslice-status-%a.json --slicecredfile mysaved-myslice-slicecred.xml cancel myslice
"""
if self.opts.api_version < 3:
if self.opts.devmode:
self.logger.warn("Trying Cancel with AM API v%d...", self.opts.api_version)
else:
self._raise_omni_error("Cancel is only available in AM API v3+. Specify -V3 to use AM API v3.")
(slicename, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 1,
"Cancel")
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
urnsarg, slivers = self._build_urns(urn)
descripMsg = "changes in slice %s" % urn
if len(slivers) > 0:
descripMsg = "changes in %d slivers in slice %s" % (len(slivers), urn)
args = [urnsarg, creds]
# Add the options dict
options = self._build_options('Delete', slicename, None)
args.append(options)
self.logger.debug("Doing cancel with urns %s, %d creds, options %r",
urnsarg, len(creds), options)
successCnt = 0
(clientList, message) = self._getclients()
numClients = len(clientList)
# Connect to each available GENI AM
## The AM API does not cleanly state how to deal with
## aggregates which do not have a sliver in this slice. We
## know at least one aggregate (PG) returns an Exception in
## this case.
## FIX ME: May need to look at handling of this more in the future.
## Also, if the user supplied the aggregate list, a failure is
## more interesting. We can figure out what the error strings
## are at the various aggregates if they don't know about the
## slice and make those more quiet. Finally, we can try
## to call status at places where it fails to indicate places
## where you still have resources.
op = 'Cancel'
msg = "Cancel of %s at " % (descripMsg)
retItem = {}
for client in clientList:
try:
((result, message), client) = self._api_call(client,
msg + str(client.url),
op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nCancel failed: " + retVal)
continue
# FIXME: Factor this next chunk into helper method?
# Decompress the RSpec before sticking it in retItem
rspec = None
if result and isinstance(result, dict) and result.has_key('value') and isinstance(result['value'], dict) and result['value'].has_key('geni_rspec'):
rspec = self._maybeDecompressRSpec(options, result['value']['geni_rspec'])
if rspec and rspec != result['value']['geni_rspec']:
self.logger.debug("Decompressed RSpec")
if rspec and rspec_util.is_rspec_string( rspec, None, None, logger=self.logger ):
rspec = rspec_util.getPrettyRSpec(rspec)
else:
self.logger.warn("Didn't get a valid RSpec!")
result['value']['geni_rspec'] = rspec
else:
self.logger.warn("Got no results from AM %s", client.str)
self.logger.debug("Return struct missing geni_rspec element!")
# Return for tools is the full code/value/output triple
retItem[client.url] = result
# Get the dict cancel result out of the result (accounting for API version diffs, ABAC)
(result, message) = self._retrieve_value(result, message, self.framework)
if not result:
fmt = "\nFailed to Cancel %s at AM %s: %s\n"
if message is None or message.strip() == "":
message = "(no reason given)"
retVal += fmt % (descripMsg, client.str, message)
continue # go to next AM
else:
retVal += "\nCancelled %s at AM %s" % (descripMsg, client.str)
sliverStateErrors = 0
# FIXME: geni_unallocated or geni_provisioned are both possible new alloc states
# So querying sliver alloc states would have to get all states and then complain if any are not
# either of those states.
sliverStates = self._getSliverAllocStates(result)
for sliver in sliverStates.keys():
# self.logger.debug("Sliver %s state: %s", sliver, sliverStates[sliver])
if sliverStates[sliver] not in ['geni_unallocated', 'geni_provisioned']:
self.logger.warn("Sliver %s in wrong state! Expected %s, got %s?!", sliver, 'geni_unallocated ir geni_provisioned', sliverStates[sliver])
# FIXME: This really might be a case where sliver in wrong state means the call failed?!
sliverStateErrors += 1
missingSlivers = self._findMissingSlivers(result, slivers)
if len(missingSlivers) > 0:
self.logger.warn("%d slivers from request missing in result", len(missingSlivers))
self.logger.debug("%s", missingSlivers)
sliverFails = self._didSliversFail(result)
for sliver in sliverFails.keys():
self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
(header, rspeccontent, rVal) = _getRSpecOutput(self.logger, rspec, slicename, client.urn, client.url, message, slivers)
self.logger.debug(rVal)
if result and isinstance(result, dict) and result.has_key('geni_rspec') and rspec and rspeccontent:
result['geni_rspec'] = rspeccontent
if not isinstance(result, dict):
# malformed cancel return
self.logger.warn('Malformed cancel result from AM %s. Expected struct, got type %s.' % (client.str, result.__class__.__name__))
# FIXME: Add something to retVal that the result was malformed?
if isinstance(result, str):
prettyResult = str(result)
else:
prettyResult = pprint.pformat(result)
else:
prettyResult = json.dumps(result, ensure_ascii=True, indent=2)
#header="<!-- Cancel %s at AM URL %s -->" % (descripMsg, client.url)
filename = None
if self.opts.output:
filename = _construct_output_filename(self.opts, slicename, client.url, client.urn, "cancel", ".json", numClients)
#self.logger.info("Writing result of cancel for slice: %s at AM: %s to file %s", slicename, client.url, filename)
else:
self.logger.info("Result of Cancel: ")
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
retVal += "Saved result of cancel %s at AM %s to file %s. \n" % (descripMsg, client.str, filename)
# Only count it as success if no slivers were missing
if len(missingSlivers) == 0 and len(sliverFails.keys()) == 0 and sliverStateErrors == 0:
successCnt+=1
else:
retVal += " - with %d sliver(s) missing and %d sliver(s) with errors and %d sliver(s) in wrong state. \n" % (len(missingSlivers), len(sliverFails.keys()), sliverStateErrors)
# loop over all clients
if numClients == 0:
retVal = "No aggregates specified at which to cancel changes. %s" % message
elif numClients > 1:
retVal = "Cancelled changes at %d out of a possible %d aggregates" % (successCnt, self.numOrigClients)
self.logger.debug("Cancel result: " + json.dumps(retItem, indent=2))
return retVal, retItem
# End of cancel
# End of AM API operations
#######
# Non AM API operations at aggregates
def snapshotimage(self, args):
'''Call createimage'''
return self.createimage(args)
def createimage(self, args):
'''ProtoGENI's createimage function: snapshot the disk for a
single sliver (node), giving it the given image name.
See http://www.protogeni.net/trac/protogeni/wiki/ImageHowTo'''
# args: sliceURNOrName, sliverURN, imageName, makePublic=True
# Plus the AM(s) to invoke this at
# sliver urn from the -U argument
# So really we just need slice name, imageName, and makePublic
# This is a PG function, and only expected to work at recent
# PG AMs
# # imagename is alphanumeric
# # note this method returns quick; the experimenter gets an
# # email later when it is done. In the interval, don't change
# # anything
# # Note that if you re-use the name, you replace earlier
# # content
# # makePublic is whether the image is available to others;
# # default is True
# # sliverURN is the urn of the sliver from the manifest RSpec
# # whose disk image you are snapshotting
# prints slice expiration. Warns or raises an Omni error on problems
(name, sliceURN, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 2, "snapshotImage",
"and an image name and optionally makePublic")
# Extract the single sliver URN. More than one, complain
urnsarg, slivers = self._build_urns(sliceURN)
if len(slivers) != 1:
self._raise_omni_error("CreateImage requires exactly one sliver URN: the sliver containing the node to snapshot.")
sliverURN = slivers[0]
if sliverURN:
sliverURN = sliverURN.strip()
if not urn_util.is_valid_urn_bytype(sliverURN, 'sliver', self.logger):
if not self.opts.devmode:
self._raise_omni_error("Sliver URN invalid: %s" % sliverURN)
else:
self.logger.warn("Sliver URN invalid but continuing: %s", sliverURN)
# Extract the imageName from args
if len(args) >= 2:
imageName = args[1]
else:
imageName = None
if not imageName:
self._raise_omni_error("CreateImage requires a name for the image (alphanumeric)")
# FIXME: Check that image name is alphanumeric?
import re
if not re.match("^[a-zA-Z0-9]+$", imageName):
if not self.opts.devmode:
self._raise_omni_error("Image name must be alphanumeric: %s" % imageName)
else:
self.logger.warning("Image name must be alphanumeric, but continuing: %s" % imageName)
# Extract makePublic from args, if present
makePublic = True
if len(args) >= 3:
makePublicString = args[2]
# 0 or f or false or no, case insensitive, means False
makePublic = makePublicString.lower() not in ('0', 'f', 'false', 'no')
if makePublic:
publicString = "public"
else:
publicString = "private"
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
op = "CreateImage"
options = self._build_options(op, name, None)
options['global'] = makePublic
args = (sliceURN, imageName, sliverURN, creds, options)
retItem = None
retVal = ""
(clientList, message) = self._getclients()
numClients = len(clientList)
if numClients != 1:
self._raise_omni_error("CreateImage snapshots a particular machine: specify exactly 1 AM URL with '-a'")
# FIXME: Note this is already checked in _correctAPIVersion
client = clientList[0]
msg = "Create %s Image %s of sliver %s on " % (publicString, imageName, sliverURN)
self.logger.debug("Doing createimage with slice %s, image %s, sliver %s, %d creds, options %r", sliceURN, imageName, sliverURN, len(creds), options)
# FIXME: Confirm that AM is PG, complain if not?
# pgeni gives an error 500 (Protocol Error). PLC gives error
# 13 (invalid method)
try:
((res, message), client) = self._api_call(client, msg + client.url, op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
self._raise_omni_error("\nCreateImage Failed: " + retVal)
# self.logger.debug("Doing SSL/XMLRPC call to %s invoking %s with args %r", client.url, op, args)
# (res, message) = _do_ssl(self.framework, None, msg + client.url, getattr(client, op), *args)
self.logger.debug("raw result: %r" % res)
retItem = res
(realres, message) = self._retrieve_value(res, message, self.framework)
if not realres:
# fail
prStr = "Failed to %s%s" % (msg, client.str)
if message is None or message.strip() == "":
message = "(no reason given)"
if not prStr.endswith('.'):
prStr += '.'
prStr += " " + message
self.logger.warn(prStr)
retVal += prStr + "\n"
else:
# success
prStr = "Snapshotting disk on %s at %s, creating %s image %s" % (sliverURN, client.str, publicString, res['value'])
self.logger.info(prStr)
retVal += prStr
return retVal, retItem
def deleteimage(self, args):
'''ProtoGENI's deleteimage function: Delete the named disk image.
Takes an image urn. Optionally supply the URN of the image creator, if that is not you,
as a second argument.
Note that you should invoke this at the AM where you created the image - other AMs will return
a SEARCHFAILED error.
See http://www.protogeni.net/trac/protogeni/wiki/ImageHowTo'''
image_urn = None
creator_urn = None
# Ensure we got a disk image URN
if len(args) < 1:
if not self.opts.devmode:
self._raise_omni_error("Missing image URN argument to deleteimage")
else:
self.logger.warn("Missing image URN argument to deleteimage but continuing")
if len(args) >= 1:
image_urn = args[0]
self.logger.info("DeleteImage using image_urn %r", image_urn)
# Validate that this looks like an image URN
if image_urn and not urn_util.is_valid_urn_bytype(image_urn, 'image', self.logger):
if not self.opts.devmode:
self._raise_omni_error("Image URN invalid: %s" % image_urn)
else:
self.logger.warn("Image URN invalid but continuing: %s", image_urn)
if len(args) > 1:
creator_urn = args[1]
if creator_urn:
creator_urn = creator_urn.strip()
if creator_urn == "":
creator_urn = None
if creator_urn:
self.logger.info("Deleteimage using creator_urn %s", creator_urn)
# If we got a creator urn option
# Validate it looks like a user urn
if not urn_util.is_valid_urn_bytype(creator_urn, 'user', self.logger):
if not self.opts.devmode:
self._raise_omni_error("Creator URN invalid: %s" % creator_urn)
else:
self.logger.warn("Creator URN invalid but continuing: %s", image_urn)
# get the user credential
cred = None
message = "(no reason given)"
if self.opts.api_version >= 3:
(cred, message) = self.framework.get_user_cred_struct()
else:
(cred, message) = self.framework.get_user_cred()
if cred is None:
# Dev mode allow doing the call anyhow
self.logger.error('Cannot deleteimage: Could not get user credential: %s', message)
if not self.opts.devmode:
return ("Could not get user credential: %s" % message, dict())
else:
self.logger.info('... but continuing')
cred = ""
creds = _maybe_add_abac_creds(self.framework, cred)
creds = self._maybe_add_creds_from_files(creds)
op = "DeleteImage"
options = self._build_options(op, None, None)
# put creator_urn in the options list if we got one
if creator_urn:
options['creator_urn'] = creator_urn
args = (image_urn, creds, options)
retItem = dict()
retVal = ""
# Return value is an XML-RPC boolean 1 (True) on success. Else it uses the AM API to return an error code.
# EG a SEARCHFAILED "No such image" if the local AM does not have this image.
(clientList, message) = self._getclients()
numClients = len(clientList)
# FIXME: Insist on only 1 AM (user has to know which AM has this image)? Or let user try a bunch of AMs
# and just see where it works?
msg = "Delete Image %s" % (image_urn)
if creator_urn:
msg += " created by %s" % (creator_urn)
msg += " on "
self.logger.debug("Doing deleteimage with image_urn %s, %d creds, options %r",
image_urn, len(creds), options)
prStr = None
success = False
for client in clientList:
# FIXME: Confirm that AM is PG, complain if not?
# pgeni gives an error 500 (Protocol Error). PLC gives error
# 13 (invalid method)
try:
((res, message), client) = self._api_call(client, msg + client.url, op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nDeleteImage Failed: " + retVal)
continue
self.logger.debug("deleteimage raw result: %r" % res)
retItem[client.url] = res
(realres, message) = self._retrieve_value(res, message, self.framework)
if not realres:
# fail
prStr = "Failed to %s%s" % (msg, client.str)
if message is None or message.strip() == "":
message = "(no reason given)"
if not prStr.endswith('.'):
prStr += '.'
prStr += " " + message
self.logger.warn(prStr)
#retVal += prStr + "\n"
else:
# success
success = True
prStr = "Deleted image %s at %s" % (image_urn, client.str)
self.logger.info(prStr)
retVal += prStr
if not self.opts.devmode:
# Only expect 1 AM to have this, so quit once we find it
break
if numClients == 0:
retVal = "Specify at least one aggregate at which to try to delete image %s. %s" % (image_urn, message)
elif not success:
retVal = "Failed to delete image %s at any of %d aggregates. Last error: %s" % (image_urn, self.numOrigClients, prStr)
return retVal, retItem
def listimages(self, args):
'''ProtoGENI's ListImages function: List the disk images created by the given user.
Takes a user urn or name. If no user is supplied, uses the caller's urn.
Gives a list of all images created by that user, including the URN
for deleting the image. Return is a list of structs containing the url and urn of the image.
Note that you should invoke this at the AM where the images were created.
See http://www.protogeni.net/trac/protogeni/wiki/ImageHowTo
Output directing options:
-o Save result in per-Aggregate files
-p (used with -o) Prefix for resulting files
--outputfile If supplied, use this output file name: substitute the AM for any %a
If not saving results to a file, they are logged.
If --tostdout option is supplied (not -o), then instead of logging, print to STDOUT.
File names will indicate the user and which aggregate is represented.
e.g.: myprefix-imageowner-listimages-localhost-8001.json
'''
creator_urn = None
# If we got a creator argument, use it
if len(args) >= 1:
creator_urn = args[0]
self.logger.debug("ListImages got creator_urn %r", creator_urn)
# get the user credential
cred = None
message = "(no reason given by SA)"
if self.opts.api_version >= 3:
(cred, message) = self.framework.get_user_cred_struct()
else:
(cred, message) = self.framework.get_user_cred()
if cred is None:
# Dev mode allow doing the call anyhow
self.logger.error('Cannot listimages: Could not get user credential: %s', message)
if not self.opts.devmode:
return ("Could not get user credential: %s" % message, dict())
else:
self.logger.info('... but continuing')
cred = ""
creds = _maybe_add_abac_creds(self.framework, cred)
creds = self._maybe_add_creds_from_files(creds)
invoker_authority = None
if cred:
invoker_urn = credutils.get_cred_owner_urn(self.logger, cred)
if urn_util.is_valid_urn(invoker_urn):
iURN = urn_util.URN(None, None, None, invoker_urn)
invoker_authority = urn_util.string_to_urn_format(iURN.getAuthority())
self.logger.debug("Got invoker %s with authority %s", invoker_urn, invoker_authority)
if not creator_urn:
creator_urn = invoker_urn
# Validate that this looks like an user URN
if creator_urn and not urn_util.is_valid_urn_bytype(creator_urn, 'user', self.logger):
self.logger.debug("Creator_urn %s not valid", creator_urn)
if invoker_authority:
test_urn = urn_util.URN(invoker_authority, "user", creator_urn, None)
if urn_util.is_valid_urn_bytype(test_urn.urn, 'user', self.logger):
self.logger.info("Inferred creator urn %s from name %s", test_urn, creator_urn)
creator_urn = test_urn.urn
else:
self.logger.debug("test urn using invoker_authority was invalid")
if not self.opts.devmode:
self._raise_omni_error("Creator URN invalid: %s" % creator_urn)
else:
self.logger.warn("Creator URN invalid but continuing: %s", creator_urn)
else:
self.logger.debug("Had no invoker authority")
if not self.opts.devmode:
self._raise_omni_error("Creator URN invalid: %s" % creator_urn)
else:
self.logger.warn("Creator URN invalid but continuing: %s", creator_urn)
if urn_util.is_valid_urn(creator_urn) and cred:
urn = urn_util.URN(None, None, None, creator_urn)
# Compare creator_urn with invoker urn: must be same SA
creator_authority = urn_util.string_to_urn_format(urn.getAuthority())
if creator_authority != invoker_authority:
if not self.opts.devmode:
return ("Cannot listimages: Given creator %s not from same SA as you (%s)" % (creator_urn, invoker_authority), dict())
else:
self.logger.warn("Cannot listimages but continuing: Given creator %s not from same SA as you (%s)" % (creator_urn, invoker_authority))
self.logger.info("ListImages using creator_urn %r", creator_urn)
op = "ListImages"
options = self._build_options(op, None, None)
args = (creator_urn, creds, options)
retItem = dict()
retVal = ""
# Return value is a list of structs on success. Else it uses the AM API to return an error code.
# EG a SEARCHFAILED "No such image" if the local AM does not have this image.
(clientList, message) = self._getclients()
numClients = len(clientList)
msg = "List Images created by %s on " % (creator_urn)
self.logger.debug("Doing listimages with creator_urn %s, %d creds, options %r",
creator_urn, len(creds), options)
prStr = None
success = False
for client in clientList:
# FIXME: Confirm that AM is PG, complain if not?
# pgeni gives an error 500 (Protocol Error). PLC gives error
# 13 (invalid method)
try:
((res, message), client) = self._api_call(client, msg + client.url, op, args)
except BadClientException, bce:
if bce.validMsg and bce.validMsg != '':
retVal += bce.validMsg + ". "
else:
retVal += "Skipped aggregate %s. (Unreachable? Doesn't speak AM API v%d? Check the log messages, and try calling 'getversion' to check AM status and API versions supported.).\n" % (client.str, self.opts.api_version)
if numClients == 1:
self._raise_omni_error("\nListImages Failed: " + retVal)
continue
self.logger.debug("listimages raw result: %r" % res)
retItem[client.url] = res
(realres, message) = self._retrieve_value(res, message, self.framework)
if realres is None or realres == 0:
# fail
prStr = "Failed to %s%s" % (msg, client.str)
if message is None or message.strip() == "":
message = "(no reason given)"
if not prStr.endswith('.'):
prStr += '.'
prStr += " " + message
self.logger.warn(prStr)
#retVal += prStr + "\n"
else:
# success
success = True
prettyResult = json.dumps(realres, ensure_ascii=True, indent=2)
# Save/print out result
imgCnt = len(realres)
header="Found %d images created by %s at %s" % (imgCnt, creator_urn, client.str)
filename = None
if self.opts.output:
creator_name = urn_util.nameFromURN(creator_urn)
filename = _construct_output_filename(self.opts, creator_name, client.url, client.urn, "listimages", ".json", numClients)
_printResults(self.opts, self.logger, header, prettyResult, filename)
if filename:
prStr = "Saved list of images created by %s at AM %s to file %s. \n" % (creator_urn, client.str, filename)
elif numClients == 1:
prStr = "Images created by %s at %s:\n%s" % (creator_urn, client.str, prettyResult)
else:
imgCnt = len(realres)
prStr = "Found %d images created by %s at %s. \n" % (imgCnt, creator_urn, client.str)
retVal += prStr
if numClients == 0:
retVal = "Specify at least one valid aggregate at which to try to list images created by %s. %s" % (creator_urn, message)
elif not success:
retVal = "Failed to list images created by %s at any of %d aggregates. Last error: %s" % (creator_urn, self.numOrigClients, prStr)
return retVal, retItem
def print_sliver_expirations(self, args):
'''Print the expiration of any slivers in the given slice.
Return is a string, and a struct by AM URL of the list of sliver expirations.
Slice name could be a full URN, but is usually just the slice name portion.
Note that PLC Web UI lists slices as <site name>_<slice name>
(e.g. bbn_myslice), and we want only the slice name part here (e.g. myslice).
Slice credential is usually retrieved from the Slice Authority. But
with the --slicecredfile option it is read from that file, if it exists.
--sliver-urn / -u option: each specifies a sliver URN to get status on. If specified,
only the listed slivers will be queried. Otherwise, all slivers in the slice will be queried.
Aggregates queried:
- If `--useSliceAggregates`, each aggregate recorded at the clearinghouse as having resources for the given slice,
'''and''' any aggregates specified with the `-a` option.
- Only supported at some clearinghouses, and the list of aggregates is only advisory
- Each URL given in an -a argument or URL listed under that given
nickname in omni_config, if provided, ELSE
- List of URLs given in omni_config aggregates option, if provided, ELSE
- List of URNs and URLs provided by the selected clearinghouse
-V# API Version #
--devmode: Continue on error if possible
-l to specify a logging config file
--logoutput <filename> to specify a logging output filename
'''
# for each AM,
# do sliverstatus or listresources as appropriate and get the sliver expiration,
# and print that
# prints slice expiration. Warns or raises an Omni error on problems
(name, urn, slice_cred, retVal, slice_exp) = self._args_to_slicecred(args, 1, "print_sliver_expirations")
creds = _maybe_add_abac_creds(self.framework, slice_cred)
creds = self._maybe_add_creds_from_files(creds)
(clientList, message) = self._getclients()
numClients = len(clientList)
retItem = {}
for client in clientList:
# What kind of AM is this? Which function do I call?
# For now, always use status or sliverstatus
# Known AMs all do something in status.
# Of known AMs, FOAM and GRAM do not do manifest, and ION not yet
sliverstatus = True
(ver, msg) = self._get_this_api_version(client)
if not ver:
self.logger.debug("Error getting API version. Assume 2. Msg: %s", msg)
else:
self.logger.debug("%s does API v%d", client.str, ver)
if ver >= 3:
sliverstatus = False
# Call the function
if sliverstatus:
args = [urn, creds]
options = self._build_options('SliverStatus', name, None)
if self.opts.api_version >= 2:
# Add the options dict
args.append(options)
self.logger.debug("Doing sliverstatus with urn %s, %d creds, options %r", urn, len(creds), options)
msg = None
status = None
try:
((status, message), client) = self._api_call(client,
"SliverStatus of %s at %s" % (urn, str(client.url)),
'SliverStatus', args)
# Get the dict status out of the result (accounting for API version diffs, ABAC)
(status, message) = self._retrieve_value(status, message, self.framework)
except Exception, e:
self.logger.debug("Failed to get sliverstatus to get sliver expiration from %s: %s", client.str, e)
retItem[client.url] = None
# Parse the expiration and print / add to retVal
if status and isinstance(status, dict):
exps = expires_from_status(status, self.logger)
if len(exps) > 1:
# More than 1 distinct sliver expiration found
# Sort and take first
exps = exps.sort()
outputstr = exps[0].isoformat()
msg = "Resources in slice %s at AM %s expire at %d different times. First expiration is %s UTC" % (name, client.str, len(exps), outputstr)
elif len(exps) == 0:
msg = "Failed to get sliver expiration from %s" % client.str
self.logger.debug("Failed to parse a sliver expiration from status")
else:
outputstr = exps[0].isoformat()
msg = "Resources in slice %s at AM %s expire at %s UTC" % (name, client.str, outputstr)
retItem[client.url] = exps
else:
retItem[client.url] = None
msg = "Malformed or failed to get status from %s, cannot find sliver expiration" % client.str
if message is None or message.strip() == "":
if status is None:
message = "(no reason given, missing result)"
elif status == False:
message = "(no reason given, False result)"
elif status == 0:
message = "(no reason given, 0 result)"
else:
message = "(no reason given, empty result)"
# FIXME: If this is PG and code 12, then be nicer here.
if message:
msg += " %s" % message
if message and "protogeni AM code: 12: No slice or aggregate here" in message:
# PG says this AM has no resources here
msg = "No resources at %s in slice %s" % (client.str, name)
self.logger.debug("AM %s says: %s", client.str, message)
if msg:
self.logger.info(msg)
retVal += msg + ".\n "
else:
# Doing APIv3
urnsarg, slivers = self._build_urns(urn)
args = [urnsarg, creds]
# Add the options dict
options = self._build_options('Status', name, None)
args.append(options)
self.logger.debug("Doing status with urns %s, %d creds, options %r", urnsarg, len(creds), options)
descripMsg = "slivers in slice %s" % urn
if len(slivers) > 0:
descripMsg = "%d slivers in slice %s" % (len(slivers), urn)
msg = None
status = None
try:
((status, message), client) = self._api_call(client,
"Status of %s at %s" % (urn, str(client.url)),
'Status', args)
# Get the dict status out of the result (accounting for API version diffs, ABAC)
(status, message) = self._retrieve_value(status, message, self.framework)
except Exception, e:
self.logger.debug("Failed to get status to get sliver expiration from %s: %s", client.str, e)
retItem[client.url] = None
if not status:
retItem[client.url] = None
if message and "protogeni AM code: 12: No such slice here" in message:
# PG says this AM has no resources here
msg = "No resources at %s in slice %s" % (client.str, name)
self.logger.debug("AM %s says: %s", client.str, message)
self.logger.info(msg)
retVal += msg + ".\n "
else:
# FIXME: Put the message error in retVal?
# FIXME: getVersion uses None as the value in this case. Be consistent
fmt = "\nFailed to get Status on %s at AM %s: %s\n"
if message is None or message.strip() == "":
message = "(no reason given)"
retVal += fmt % (descripMsg, client.str, message)
continue
# Summarize sliver expiration
(orderedDates, sliverExps) = self._getSliverExpirations(status, None)
retItem[client.url] = orderedDates
if len(orderedDates) == 1:
msg = "Resources in slice %s at AM %s expire at %s UTC" % (name, client.str, orderedDates[0])
elif len(orderedDates) == 0:
msg = "0 Slivers reported results!"
else:
firstTime = orderedDates[0]
firstCount = len(sliverExps[firstTime])
msg = "Resources in slice %s at AM %s expire at %d different times. First expiration is %s UTC (%d slivers), and other slivers at %d different times." % (name, client.str, len(orderedDates), firstTime, firstCount, len(orderedDates) - 1)
if msg:
self.logger.info(msg)
retVal += msg + ".\n "
# End of block to handle APIv3 AM
# End of loop over AMs
if numClients == 0:
retVal = "No aggregates specified on which to get sliver expirations in slice %s. %s" % (name, message)
elif numClients > 1:
soonest = None
for client in clientList:
thisAM = retItem[client.url]
if thisAM and len(thisAM) > 0:
nextTime = thisAM[0]
if nextTime:
if soonest is None or nextTime < soonest[0]:
soonest = (nextTime, client.str)
if soonest:
retVal += "First resources expire at %s (UTC) at AM %s.\n" % (soonest[0], soonest[1])
return retVal, retItem
# End of print_sliver_expirations
#######
# Helper functions follow
def _checkValidClient(self, client):
'''Confirm this AM speaks the right AM API version.
Return the API version spoken by this AM, and a client object to talk to it.
In particular, the returned client may be different, if the AM you asked about advertised
a different URL as supporting your desired API Version.
Check for None client to indicate an error, so you can bail.'''
# Use the GetVersion cache
# Make sure the client we are talking to speaks the expected AM API (or claims to)
# What else would this do? See if it is reachable? We'll do that elsewhere
cver, message = self._get_this_api_version(client)
if isinstance(cver, str):
self.logger.warn("AM %s reported a string API version %s", client.url, cver)
cver = int(cver)
configver = self.opts.api_version
if cver and cver == configver:
return (cver, client, None)
elif not cver:
self.logger.debug("_checkValidClient got message %s", message)
if "Operation timed out" in message:
message = message[message.find("Operation timed out"):]
elif "Unknown socket error" in message:
message = message[message.find("Unknown socket error"):]
elif "Server does not trust" in message:
message = message[message.find("Server does not trust"):]
elif "Your user certificate" in message:
message = message[message.find("Your user certificate"):]
self.logger.debug("Got no api_version from getversion at %s? %s" % (client.url, message))
msg = "Error contacting %s: %s" % (client.url, message)
self.logger.warn(msg)
if not self.opts.devmode:
self.logger.warn("... skipping this aggregate")
return (0, None, msg + " ... skipped this aggregate")
else:
self.logger.warn("... but continuing with requested version and client")
return (configver, client, msg + " ... but continued with requested version and client")
# This AM doesn't speak the desired API version - see if there's an alternate
svers, message = self._get_api_versions(client)
if svers:
if svers.has_key(str(configver)):
msg = "Requested API version %d, but AM %s uses version %d. Same aggregate talks API v%d at a different URL: %s" % (configver, client.url, cver, configver, svers[str(configver)])
self.logger.warn(msg)
# do a makeclient with the corrected URL and return that client?
if not self.opts.devmode:
try:
newclient = make_client(svers[str(configver)], self.framework, self.opts)
except Exception, e:
self.logger.debug(" - but that URL appears invalid: '%s'" % e)
self.logger.warn(" -- Cannot connect to that URL, skipping this aggregate")
retmsg = "Skipped AM %s: it claims to speak API v%d at a broken URL (%s)." % (client.url, configver, svers[str(configver)])
return (configver, None, retmsg)
newclient.urn = client.urn # Wrong urn?
newclient.nick = _lookupAggNick(self, newclient.url)
if newclient.nick:
if self.opts.devmode:
newclient.str = "%s (%s)" % (newclient.nick, newclient.url)
else:
newclient.str = newclient.nick
else:
newclient.str = newclient.url
(ver, c, msg2) = self._checkValidClient(newclient)
if ver == configver and c.url == newclient.url and c is not None:
self.logger.info("Switching AM URL to match requested version")
return (ver, c, "Switched AM URL from %s to %s to speak AM API v%d as requested" % (client.url, c.url, configver))
else:
self.logger.warn("... skipping this aggregate - failed to get a connection to the AM URL with the right version")
return (configver, None, "Skipped AM %s: failed to get a connection to %s which supports APIv%d as requested" % (client.url, newclient.url, configver))
else:
self.logger.warn("... but continuing with requested version and client")
return (configver, client, msg + ", but continued with URL and version as requested")
else:
if len(svers.keys()) == 1:
msg = "Requested API version %d, but AM %s only speaks version %d. Try running Omni with -V%d." % (configver, client.url, cver, cver)
retmsg = msg
else:
msg = "Requested API version %d, but AM %s uses version %d. This aggregate does not talk your requested version. It advertises: %s. Try running Omni with -V<one of the advertised versions>." % (configver, client.url, cver, pprint.pformat(svers.keys()))
retmsg = "Requested API version %d, but AM %s uses version %d. Try running Omni with -V%s" % (configver, client.url, cver, pprint.pformat(svers.keys()))
self.logger.warn(msg)
# FIXME: If we're continuing, change api_version to be correct, or we will get errors
if not self.opts.devmode:
# self.logger.warn("Changing to use API version %d", cver)
self.logger.warn("... skipping this aggregate")
retmsg += " Skipped this aggregate"
return (cver, None, retmsg)
else:
# FIXME: Pick out the max API version supported at this client, and use that?
self.logger.warn("... but continuing with requested version and client")
return (configver, client, retmsg + " Continued with URL as requested.")
else:
msg = "Requested API version %d, but AM %s advertises only version %d. Try running Omni with -V%d." % (configver, client.url, cver, cver)
self.logger.warn(msg)
# FIXME: If we're continuing, change api_version to be correct, or we will get errors
if not self.opts.devmode:
# self.logger.warn("Changing to use API version %d", cver)
self.logger.warn("... skipping this aggregate")
return (cver, None, msg + " ... skipped this Aggregate")
else:
self.logger.warn("... but continuing with requested version and client")
return (configver, client, msg + " ... but continued with URL as requested")
#self.logger.warn("... skipping this aggregate")
#return (cver, None, msg)
# Shouldn't get here...
self.logger.warn("Cannot validate client ... skipping this aggregate")
return (cver, None, ("Could not validate AM %s .. skipped" % client.str))
# End of _checkValidClient
def _maybeGetRSpecFromStruct(self, rspec):
'''RSpec might be string of JSON, in which case extract the
XML out of the struct.'''
if rspec is None:
self._raise_omni_error("RSpec is empty")
if "'geni_rspec'" in rspec or "\"geni_rspec\"" in rspec or '"geni_rspec"' in rspec:
try:
rspecStruct = json.loads(rspec, encoding='ascii', cls=DateTimeAwareJSONDecoder, strict=False)
if rspecStruct and isinstance(rspecStruct, dict) and rspecStruct.has_key('geni_rspec'):
rspec = rspecStruct['geni_rspec']
if rspec is None:
self._raise_omni_error("Malformed RSpec: 'geni_rspec' empty in JSON struct")
except Exception, e:
import traceback
msg = "Failed to read RSpec from JSON text %s: %s" % (rspec[:min(60, len(rspec))], e)
self.logger.debug(traceback.format_exc())
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
# If \" in rspec then make that "
rspec = string.replace(rspec, "\"", '"')
# If \n in rspec then remove that
rspec = string.replace(rspec, "\\n", " ")
# rspec = string.replace(rspec, '\n', ' ')
return rspec
def _get_users_arg(self, sliceName):
'''Get the users argument for SSH public keys to install.
These keys are used in createsliver, provision, and poa geni_update_users.
Keys may come from the clearinghouse list of slice members, and from the omni_config 'users' section.
Commandline options enable/disable each source. The set of users and keys is unioned.'''
# Return is a list of dicts
# Each dict has 2 keys: 'urn' and 'keys'
# 'keys' is a list of strings - the value of the keys
slice_users = []
# First get slice members & keys from the CH
if self.opts.useSliceMembers and not self.opts.noExtraCHCalls:
self.logger.debug("Getting users and SSH keys from the Clearinghouse list of slice members")
sliceMembers = []
mess = None
try:
# Return is a list of dicts with 'URN', 'EMAIL', and 'KEYS' (which is a list of keys or None)
(sliceMembers, mess) = self.framework.get_members_of_slice(sliceName)
if not sliceMembers:
self.logger.debug("Got empty sliceMembers list for slice %s: %s", sliceName, mess)
except Exception, e:
self.logger.debug("Failed to get list of slice members for slice %s: %s", sliceName, e)
if sliceMembers:
for member in sliceMembers:
if not (member.has_key('URN') and member.has_key('KEYS')):
self.logger.debug("Skipping malformed member %s", member)
continue
found = False
for user in slice_users:
if user['urn'] == member['URN']:
found = True
if member['KEYS'] is None:
self.logger.debug("CH had no keys for member %s", member['URN'])
break
for mkey in member['KEYS']:
if mkey.strip() in user['keys']:
continue
else:
user['keys'].append(mkey.strip())
self.logger.debug("Adding a CH key for member %s", member['URN'])
# Done unioning keys for existing user
break
# Done searching for existing user
if not found:
nmember = dict()
nmember['urn'] = member['URN']
nmember['keys'] = []
if member['KEYS'] is not None:
for key in member['KEYS']:
nmember['keys'].append(key.strip())
slice_users.append(nmember)
# Done looping of slice members
# Done if got sliceMembers
self.logger.debug("From Clearinghouse got %d users whose SSH keys will be set", len(slice_users))
# Done block to fetch users from CH
else:
if self.opts.useSliceMembers and self.opts.noExtraCHCalls:
self.logger.debug("Per config not doing extra Clearinghouse calls, including looking up slice members")
elif not self.opts.useSliceMembers:
self.logger.debug("Did not request to get slice members' SSH keys")
if not self.opts.ignoreConfigUsers:
self.logger.debug("Reading users and keys to install from your omni_config")
# Copy the user config and read the keys from the files into the structure
slice_users2 = copy(self.config['users'])
if len(slice_users) == 0 and len(slice_users2) == 0:
self.logger.warn("No users defined. No keys will be uploaded to support SSH access.")
return slice_users
for user in slice_users2:
newkeys = []
required = ['urn', 'keys']
for req in required:
#--- Dev vs Exp: allow this in dev mode:
if not req in user:
msg = "%s in omni_config is not specified for user %s" % (req,user)
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
#---
if user.has_key('keys'):
for key in user['keys'].split(','):
try:
newkeys.append(file(os.path.expanduser(key.strip())).read().strip())
except Exception, exc:
self.logger.error("Failed to read user key from %s: %s" %(user['keys'], exc))
user['keys'] = newkeys
if len(newkeys) == 0:
uStr = ""
if user.has_key('urn'):
uStr = user['urn']
self.logger.warn("Empty keys for user %s", uStr)
else:
uStr = ""
if user.has_key('urn'):
uStr = "User %s " % user['urn']
self.logger.debug("%sNewkeys: %r...", uStr, str(newkeys)[:min(160, len(str(newkeys)))])
# Now merge this into the list from above
found = False
for member in slice_users:
if not user.has_key('urn'):
if not member.has_key('urn'):
found = True
elif member.has_key('urn') and user['urn'] == member['urn']:
found = True
if found:
if user.has_key('keys'):
if not member.has_key('keys'):
member['keys'] = []
for key in user['keys']:
if key.strip() not in member['keys']:
member['keys'].append(key.strip())
break
if not found:
slice_users.append(user)
# Done looping over users defined in the omni_config
self.logger.debug("After reading omni_config, %d users will have SSH keys set", len(slice_users))
else:
self.logger.debug("Requested to ignore omni_config users and SSH keys")
# if len(slice_users) < 1:
# self.logger.warn("No user keys found to be uploaded")
return slice_users
# End of _get_users_arg
def _retrieve_value(self, result, message, framework):
'''Extract ABAC proof and creds from the result if any.
Then pull the actual value out, checking for errors.
Returned message includes a string representation of any error code/output.
'''
# Existing code is inconsistent on whether it is if code or elif code.
# IE is the whole code struct shoved inside the success thing maybe?
if not result:
self.logger.debug("Raw result from AM API call was %s?!", result)
if not message or message.strip() == "":
message = "(no reason given)"
if result is None:
message += " (missing result)"
elif result == False:
message += " ('False' result)"
elif result == 0:
message += " ('0' result)"
else:
message += " (empty result)"
return (result, message)
value = result
# If ABAC return is a dict with proof and the regular return
if isinstance(result, dict):
if is_ABAC_framework(framework):
if 'proof' in result:
save_proof(framework.abac_log, result['proof'])
# XXX: may not need to do delete the proof dict entry
# This was only there for SliverStatus, where the return is already a dict
del result['proof']
if 'abac_credentials' in result:
save_abac_creds(result['abac_credentials'],
framework.abac_dir)
# For ListR and CreateS
if 'manifest' in result:
value = result['manifest']
# For Renew, Delete, Shutdown
elif 'success' in result:
value = result['success']
# FIXME Should that be if 'code' or elif 'code'?
# FIXME: See _check_valid_return_struct
if 'code' in result and isinstance(result['code'], dict) and 'geni_code' in result['code']:
# AM API v2+
if result['code']['geni_code'] == 0:
value = result['value']
if value is None:
self.logger.warn("Result claimed success but value is empty!")
if result['code'].has_key('am_code'):
if not message or message.strip() == "":
message = "(no reason given)"
amtype = ""
if result['code'].has_key('am_type'):
amtype = result['code']['am_type']
message += " (AM return code %s:%d)" % (amtype, result['code']['am_code'])
# If this is pg then include the pg log urn/url in
# the message even on success when in debug mode
# But problem: callers swallow the message if it
# looks like success. So log this at info.
# The result here is that this is logged only on
# success, not on error.
msg = _get_pg_log(result)
if not message and msg != "":
message = ""
if msg != "":
# # Force this log URL to be logged even if we're at WARN log level? That's noisy
# if not self.logger.isEnabledFor(logging.INFO):
# self.logger.warn(msg)
# else:
self.logger.info(msg)
# FIXME: This may cause pg_log to be included in result summary even in success
# message += msg
# FIXME: More complete error code handling!
elif self.opts.raiseErrorOnV2AMAPIError and result['code']['geni_code'] != 0 and self.opts.api_version == 2:
# Allow scripts to get an Error raised if any
# single AM returns a failure error code.
# note it means any other AMs do not get processed
# FIXME: AMAPIError needs a nice printable string
self._raise_omni_error(message, AMAPIError, result)
else:
message = _append_geni_error_output(result, message)
value = None
else:
# No code in result
if self.opts.api_version > 1:
# This happens doing getversion at a v1 AM.
if isinstance(result, dict) and result.has_key('geni_api') and result['geni_api'] == 1:
pass
else:
self.logger.warn("Result had no code!")
else:
# Not a dict response. Value is result in itself
if self.opts.api_version > 1:
self.logger.warn("Result was not a dict!")
return (value, message)
# End of _retrieve_value
def _args_to_slicecred(self, args, num_args, methodname, otherargstring=""):
'''Confirm got the specified number of arguments. First arg is taken as slice name.
Try to get the slice credential. Check it for expiration and print the expiration date.
Raise an OmniError on error, unless in devmode, when we just log a warning.
'''
#- pull slice name
#- get urn
#- get slice_cred
#- check expiration
#- get printout of expiration
#- if orca_id reset urn
#- return name, urn, slice_cred, retVal, slice_exp
#users: SliverStatus, CreateSliver, Describe, renewSliver, DeleteSliver,
if num_args < 1:
return ("", "", "", "", datetime.datetime.max)
# If we had no args or not enough
if len(args) == 0 or len(args) < num_args or (len(args) >=1 and (args[0] == None or args[0].strip() == "")):
msg = '%s requires arg of slice name %s' % (methodname, otherargstring)
if self.opts.devmode:
self.logger.warn(msg + ", but continuing...")
if len(args) == 0 or (len(args) >=1 and (args[0] == None or args[0].strip() == "")):
return ("", "", "", "", datetime.datetime.max)
else:
self._raise_omni_error(msg)
name = args[0]
# FIXME: catch errors getting slice URN to give prettier error msg?
urn = self.framework.slice_name_to_urn(name)
# Get a slice cred, handle it being None
(slice_cred, message) = _get_slice_cred(self, urn)
# Unwrap the slice cred if it is wrapped and this is an API < 3
if self.opts.api_version < 3 and slice_cred is not None:
slice_cred = get_cred_xml(slice_cred)
if slice_cred is None:
message = "No valid SFA slice credential returned"
if slice_cred is None:
msg = 'Cannot do %s for %s: Could not get slice credential: %s' % (methodname, urn, message)
if self.opts.devmode:
slice_cred = ""
self.logger.warn(msg + ", but continuing....")
else:
self._raise_omni_error(msg, NoSliceCredError)
# FIXME: Check that the returned slice_cred is actually for the given URN?
# Or maybe do that in _get_slice_cred?
# Check for an expired slice
slice_exp = None
expd = True
if not self.opts.devmode or slice_cred != "":
expd, slice_exp = self._has_slice_expired(slice_cred)
if slice_exp is None:
slice_exp = datetime.datetime.min
if expd:
msg = 'Cannot do %s for slice %s: Slice has expired at %s' % (methodname, urn, slice_exp.isoformat())
if self.opts.devmode:
self.logger.warn(msg + ", but continuing...")
else:
self._raise_omni_error(msg)
retVal = ""
if not self.opts.devmode or slice_cred != "":
retVal = _print_slice_expiration(self, urn, slice_cred) + "\n"
if self.opts.orca_slice_id:
self.logger.info('Using ORCA slice id %r', self.opts.orca_slice_id)
urn = self.opts.orca_slice_id
return name, urn, slice_cred, retVal, slice_exp
# End of _args_to_slicecred
def _raise_omni_error( self, msg, err=OmniError, triple=None ):
msg2 = msg
if triple is not None:
msg2 += " "
msg2 += str(triple)
self.logger.error( msg2 )
if triple is None:
raise err, msg
else:
raise err, (msg, triple)
def _has_slice_expired(self, sliceCred):
"""Return (boolean, expiration datetime) whether given slicecred (string) has expired)"""
if sliceCred is None:
return (True, None)
sliceexp = credutils.get_cred_exp(self.logger, sliceCred)
sliceexp = naiveUTC(sliceexp)
now = datetime.datetime.utcnow()
if sliceexp <= now:
return (True, sliceexp)
return (False, sliceexp)
def _getclients(self):
"""Create XML-RPC clients for each aggregate (from commandline,
else from config file, else from framework)
Return them as a sequence.
Each client has a urn and url. See handler_utils._listaggregates for details.
"""
if self.clients is not None:
return (self.clients, "")
self.clients = []
self.numOrigClients = 0
(aggs, message) = _listaggregates(self)
if aggs == {} and message != "":
self.logger.warn('No aggregates found: %s', message)
return (self.clients, message)
if message == "From CH":
self.logger.info("Acting on all aggregates from the clearinghouse - this may take time")
for (urn, url) in aggs.items():
client = make_client(url, self.framework, self.opts)
client.urn = urn
client.nick = _lookupAggNick(self, url)
clstr = client.url
if client.nick:
if self.opts.devmode:
clstr = "%s (%s)" % (client.nick, client.url)
else:
clstr = client.nick
client.str = clstr
self.clients.append(client)
self.numOrigClients = len(self.clients)
return (self.clients, message)
def _build_urns(self, slice_urn):
'''Build up the URNs argument, using given slice URN and the option sliver-urn, if present.
Only gather sliver URNs if they are valid.
If no sliver URNs supplied, list is the slice URN.
If sliver URNs were supplied but all invalid, raise an error.
Return the urns list for the arg, plus a separate list of the valid slivers.'''
urns = list()
slivers = list()
# FIXME: Check that all URNs are for same AM?
if self.opts.slivers and len(self.opts.slivers) > 0:
for sliver in self.opts.slivers:
if not urn_util.is_valid_urn_bytype(sliver, 'sliver', self.logger):
self.logger.warn("Supplied sliver URN %s - not a valid sliver URN.", sliver)
if self.opts.devmode:
urns.append(sliver)
slivers.append(sliver)
else:
self.logger.warn("... skipping")
else:
# self.logger.debug("Adding sliver URN %s", sliver)
urns.append(sliver)
slivers.append(sliver)
if len(urns) == 0:
# Error - got no slivers to operate on
msg = "No valid sliver URNs found, from %d supplied." % len(self.opts.slivers)
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
elif len(urns) == 0:
urns.append(slice_urn)
return urns, slivers
# slicename included just to pass on to datetimeFromString
def _build_options(self, op, slicename, options):
'''Add geni_best_effort and geni_end_time and geni_start_time to options if supplied, applicable'''
if self.opts.api_version == 1 and op != 'ListResources':
return None
if not options or options is None:
options = {}
if self.opts.api_version >= 3 and self.opts.geni_end_time:
if op in ('Allocate', 'Provision', 'Update') or self.opts.devmode:
if self.opts.devmode and not op in ('Allocate', 'Provision', 'Update'):
self.logger.warn("Got geni_end_time for method %s but using anyhow", op)
time = datetime.datetime.max
try:
# noSec=True so that fractional seconds are dropped
(time, time_with_tz, time_string) = self._datetimeFromString(self.opts.geni_end_time, name=slicename, noSec=True)
options["geni_end_time"] = time_string
except Exception, exc:
msg = 'Couldnt parse geni_end_time from %s: %r' % (self.opts.geni_end_time, exc)
self.logger.warn(msg)
if self.opts.devmode:
self.logger.info(" ... passing raw geni_end_time")
options["geni_end_time"] = self.opts.geni_end_time
if self.opts.api_version >= 3 and self.opts.geni_start_time:
if op in ('Allocate') or self.opts.devmode:
if self.opts.devmode and not op in ('Allocate'):
self.logger.warn("Got geni_start_time for method %s but using anyhow", op)
time = datetime.datetime.min
try:
# noSec=True so that fractional seconds are dropped
(time, time_with_tz, time_string) = self._datetimeFromString(self.opts.geni_start_time, name=slicename, noSec=True)
options['geni_start_time'] = time_string
except Exception, exc:
msg = 'Couldnt parse geni_start_time from %s: %r' % (self.opts.geni_start_time, exc)
self.logger.warn(msg)
if self.opts.devmode:
self.logger.info(" ... passing raw geni_start_time")
options["geni_start_time"] = self.opts.geni_start_time
if self.opts.api_version >= 3 and self.opts.geni_best_effort:
# FIXME: What about Describe? Status?
if op in ('Provision', 'Renew', 'Delete', 'PerformOperationalAction', 'Cancel'):
options["geni_best_effort"] = self.opts.geni_best_effort
elif self.opts.devmode:
self.logger.warn("Got geni_best_effort for method %s but using anyhow", op)
options["geni_best_effort"] = self.opts.geni_best_effort
# For Update. See http://groups.geni.net/geni/wiki/GAPI_AM_API_DRAFT/Adopted#ChangestoDescribe
if self.opts.api_version >= 3 and self.opts.cancelled and op == 'Describe':
options["geni_cancelled"] = self.opts.cancelled
elif self.opts.devmode and self.opts.cancelled:
self.logger.warn("Got cancelled option for method %s but using anhow", op)
options["geni_cancelled"] = self.opts.cancelled
# To support Speaks For, allow specifying the URN of the user
# the tool is speaking for.
if self.opts.speaksfor:
options["geni_speaking_for"] = self.opts.speaksfor
if self.opts.api_version > 1 and self.opts.alap:
if op in ('Renew', 'RenewSliver'):
options["geni_extend_alap"] = self.opts.alap
elif self.opts.devmode:
self.logger.warn("Got geni_extend_alap option for method %s that doesn't take it, but using anyhow", op)
options["geni_extend_alap"] = self.opts.alap
# To support all the methods that take arbitrary options,
# allow specifying a JSON format file that specifies
# name/value pairs, with values of various types.
# Note that options here may over-ride other options.
# Sample options file content:
#{
# "option_name_1": "value",
# "option_name_2": {"complicated_dict" : 37},
# "option_name_3": 67
#}
if self.opts.optionsfile:
if not (os.path.exists(self.opts.optionsfile) and os.path.getsize(self.opts.optionsfile) > 0):
msg = "Options file %s doesn't exist or is not readable" % self.opts.optionsfile
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
try:
optionsStruct = None
with open(self.opts.optionsfile, 'r') as optsfp:
# , encoding='ascii', cls=DateTimeAwareJSONDecoder, strict=False)
optionsStruct = json.load(optsfp)
self.logger.debug("options read from file: %s", optionsStruct)
if optionsStruct and isinstance(optionsStruct, dict) and len(optionsStruct.keys()) > 0:
for name, value in optionsStruct.iteritems():
self.logger.debug("Adding option %s=%s", name, value)
options[name] = value
except Exception, e:
import traceback
msg = "Failed to read options from JSON-format file %s: %s" % (self.opts.optionsfile, e)
self.logger.debug(traceback.format_exc())
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
return options
def _getSliverResultList(self, resultValue):
'''Pull the list of sliver-specific results from the input'''
# resultValue could be a list of dicts with keys geni_sliver_urn and geni_error (Delete, poa, Renew)
# OR dict containing the key geni_slivers, which is then the above list (Status, Provision, Describe
# Note allocate does not return the geni_error key - otherwise it is like status/provision)
if not resultValue:
self.logger.debug("Result value empty")
return list()
if isinstance(resultValue, dict):
if resultValue.has_key('geni_slivers'):
resultValue = resultValue['geni_slivers']
else:
self.logger.debug("Result value had no 'geni_slivers' key")
return list()
if not isinstance(resultValue, list) or len(resultValue) == 0:
self.logger.debug("Result value not a list or empty")
return list()
return resultValue
def _getSliverStatuses(self, resultValue):
'''Summarize the allocation and operational statuses in a list of 2 hashes by state name'''
op_statuses = dict()
alloc_statuses = dict()
resultValue = self._getSliverResultList(resultValue)
if len(resultValue) == 0:
self.logger.debug("Result value not a list or empty")
for sliver in resultValue:
sliverUrn = ''
if not isinstance(sliver, dict):
self.logger.debug("entry in result list was not a dict")
continue
if not sliver.has_key('geni_sliver_urn') or str(sliver['geni_sliver_urn']).strip() == "":
self.logger.debug("entry in result had no 'geni_sliver_urn'")
else:
sliverUrn = sliver['geni_sliver_urn']
if not sliver.has_key('geni_allocation_status') or str(sliver['geni_allocation_status']).strip() == "":
self.logger.debug("Sliver %s had no allocation status", sliverUrn)
else:
aStat = sliver['geni_allocation_status']
if aStat in alloc_statuses:
alloc_statuses[aStat] += 1
else:
alloc_statuses[aStat] = 1
if not sliver.has_key('geni_operational_status') or str(sliver['geni_operational_status']).strip() == "":
self.logger.debug("Sliver %s had no operational status", sliverUrn)
else:
oStat = sliver['geni_operational_status']
if oStat in op_statuses:
op_statuses[oStat] += 1
else:
op_statuses[oStat] = 1
return (alloc_statuses, op_statuses)
def _didSliversFail(self, resultValue):
'''Take a result value, and return a dict of slivers that had a geni_error: URN->geni_error'''
# Used by Describe, Renew, Provision, Status, PerformOperationalAction, Delete
# sliverFails = self._didSliversFail(realresult)
# for sliver in sliverFails.keys():
# self.logger.warn("Sliver %s reported error: %s", sliver, sliverFails[sliver])
# # FIXME: Add to retVal?
# # Then add fact that sliverFails is not empty to test on whether the call succeded overall or not
result = dict()
resultValue = self._getSliverResultList(resultValue)
if len(resultValue) == 0:
self.logger.debug("Result value not a list or empty")
return result
for sliver in resultValue:
if not isinstance(sliver, dict):
self.logger.debug("entry in result list was not a dict")
continue
if not sliver.has_key('geni_sliver_urn') or str(sliver['geni_sliver_urn']).strip() == "":
self.logger.debug("entry in result had no 'geni_sliver_urn'")
continue
# sliver['geni_error'] = 'testing' # TESTING CODE
if sliver.has_key('geni_error') and sliver['geni_error'] is not None and str(sliver['geni_error']).strip() != "":
self.logger.debug("Sliver %s had error %s", sliver['geni_sliver_urn'], sliver['geni_error'])
result[sliver['geni_sliver_urn']] = sliver['geni_error']
return result
def _findMissingSlivers(self, resultValue, requestedSlivers):
'''Return list of sliver URNs in requested list but with no entry in resultValue'''
# Used by Describe, Renew, Provision, Status, PerformOperationalAction, Delete
# missingSlivers = self._findMissingSlivers(realresult, slivers)
# if len(missingSlivers) > 0:
# self.logger.warn("%d slivers from request missing in result!?", len(missingSlivers))
# self.logger.debug("%s", missingSlivers)
# # Then add missingSlivers being non-empty to test for overall success
result = list()
if not requestedSlivers or len(requestedSlivers) == 0:
return result
resultValue = self._getSliverResultList(resultValue)
if len(resultValue) == 0:
self.logger.debug("Result value not a list or empty")
return result
retSlivers = list()
# get URNs from resultValue
for sliver in resultValue:
if not isinstance(sliver, dict):
self.logger.debug("entry in result list was not a dict")
continue
if not sliver.has_key('geni_sliver_urn') or str(sliver['geni_sliver_urn']).strip() == "":
self.logger.debug("entry in result had no 'geni_sliver_urn'")
continue
retSlivers.append(sliver['geni_sliver_urn'])
for request in requestedSlivers:
if not request or str(request).strip() == "":
continue
# if request not in resultValue, then add it to the return
if request not in retSlivers:
result.append(request)
return result
def _getSliverExpirations(self, resultValue, requestedExpiration=None):
'''Get any slivers with a listed expiration different than the supplied date.
If supplied is None, then gets all sliver expirationtimes.
Return is a dict(sliverURN)->expiration'''
# Called by Renew, Allocate(requested=None), Provision(requested=None)
# (orderedDates, sliverExps) = self._getSliverExpirations(realresult, requestedExpiration/None)
# None case
# if len(orderedDates) == 1:
# self.logger.info("All slivers expire on %s", orderedDates[0])
# elif len(orderedDates) == 2:
# self.logger.info("%d slivers expire on %s, the rest (%d) on %s", len(sliverExps[orderedDates[0]]), orderedDates[0], len(sliverExps[orderedDates[0]]), orderedDates[1])
# else:
# self.logger.info("%d slivers expire on %s, %d on %s, and others later", len(sliverExps[orderedDates[0]]), orderedDates[0], len(sliverExps[orderedDates[0]]), orderedDates[1])
# retVal += " First sliver expiration: %s" % orderedDates[0]
# Renew/specific time case
# (orderedDates, sliverExps) = self._getSliverExpirations(realresult, requestedExpiration/None)
# if len(orderedDates) == 1 and orderedDates[0] == requestedExpiration:
# self.logger.info("All slivers expire as requested on %s", requestedExpiration)
# elif len(orderedDates) == 1:
# self.logger.warn("Slivers expire on %s, not as requested %s", orderedDates[0], requestedExpiration)
# else:
# firstTime = None
# firstCount = 0
# if sliverExps.has_key(requestedExpiration):
# expectedCount = sliverExps[requestedExpiration]
# else:
# expectedCount = 0
# for time in orderedDates:
# if time == requestedExpiration:
# continue
# firstTime = time
# firstCount = len(sliverExps[time])
# break
# self.logger.warn("Slivers do not all expire as requested: %d as requested (%s), but %d expire on %s, and others at %d other times", expectedCount, requestedExpiration, firstCount, firstTime, len(orderedDates) - 2)
if requestedExpiration is None:
requestedExpiration = datetime.datetime.max
result = dict()
resultValue = self._getSliverResultList(resultValue)
if len(resultValue) == 0:
self.logger.debug("Result value not a list or empty")
return [], result
for sliver in resultValue:
if not isinstance(sliver, dict):
self.logger.debug("entry in result list was not a dict")
continue
if not sliver.has_key('geni_sliver_urn') or str(sliver['geni_sliver_urn']).strip() == "":
self.logger.debug("entry in result had no 'geni_sliver_urn'")
continue
if not sliver.has_key('geni_expires'):
self.logger.debug("Sliver %s missing 'geni_expires'", sliver['geni_sliver_urn'])
continue
sliver_expires = sliver['geni_expires']
if isinstance(sliver_expires, str):
(sliver_expires, sliver_expires_with_tz, timestring) = self._datetimeFromString(sliver_expires)
if requestedExpiration != datetime.datetime.max and sliver_expires != requestedExpiration:
self.logger.warn("Sliver %s doesn't expire when requested! Expires at %r, not at %r", sliver['geni_sliver_urn'], sliver['geni_expires'], requestedExpiration.isoformat())
if sliver_expires not in result.keys():
thisTime = list()
result[sliver_expires] = thisTime
result[sliver_expires].append(sliver['geni_sliver_urn'])
orderedDates = result.keys()
orderedDates.sort()
return (orderedDates, result)
def _getSliverAllocStates(self, resultValue, expectedState=None):
'''Get the Allocation state of slivers if the state is not the expected one, or all
states if the expected arg is omitted.
Return is a dict of sliverURN->actual allocation state.'''
# Called by Allocate, Provision, Delete:
# badSlivers = self._getSliverAllocStates(realresult, 'geni_allocated'/'geni_provisioned')
# for sliver in badSlivers.keys():
# self.logger.warn("Sliver %s in wrong state! Expected %s, got %s", sliver, 'geni_allocated'/'geni_provisioned', badSlivers[sliver])
# FIXME: Put that in return value?
result = dict()
if not resultValue:
return result
resultValue = self._getSliverResultList(resultValue)
if len(resultValue) == 0:
self.logger.debug("Result value not a list or empty")
return result
for sliver in resultValue:
if not isinstance(sliver, dict):
self.logger.debug("entry in result list was not a dict")
continue
if not sliver.has_key('geni_sliver_urn') or str(sliver['geni_sliver_urn']).strip() == "":
self.logger.debug("entry in result had no 'geni_sliver_urn'")
continue
if not sliver.has_key('geni_allocation_status'):
self.logger.debug("Sliver %s missing 'geni_allocation_status'", sliver['geni_sliver_urn'])
result[sliver['geni_sliver_urn']] = ""
if sliver['geni_allocation_status'] != expectedState:
result[sliver['geni_sliver_urn']] = sliver['geni_allocation_status']
return result
# name arg: if present then we assume you are trying to
# renew/create slivers with the given time - so raise an error if
# the time is invalid
# When noSec is true, fractional seconds are trimmed from the parsed time. Avoid problems at PG servers.
def _datetimeFromString(self, dateString, slice_exp = None, name=None, noSec=False):
'''Get time, time_with_tz, time_string from the given string. Log/etc appropriately
if given a slice expiration to limit by.
If given a slice name or slice expiration, insist that the given time is a valid
time for requesting sliver expirations.
Generally, use time_with_tz for comparisons and time_string to print or send in API Call.'''
time = datetime.datetime.max
try:
if dateString is not None or self.opts.devmode:
time = dateutil.parser.parse(dateString, tzinfos=tzd)
if noSec:
time2 = time.replace(microsecond=0)
if (time2 != time):
self.logger.debug("Trimmed fractional seconds from %s to get %s", dateString, time2)
time = time2
except Exception, exc:
msg = "Couldn't parse time from '%s': %s" % (dateString, exc)
if self.opts.devmode:
self.logger.warn(msg)
else:
self._raise_omni_error(msg)
# Convert to naive UTC time if necessary for ease of comparison
try:
time = naiveUTC(time)
except Exception, exc:
if self.opts.devmode:
pass
else:
self.logger.warn("Failed to convert '%s' to naive UTC: %r", dateString, exc)
raise
if slice_exp:
# Compare requested time with slice expiration time
if not name:
name = "<unspecified>"
if time > slice_exp:
msg = 'Cannot request or renew sliver(s) in %s until %s UTC because it is after the slice expiration time %s UTC' % (name, time, slice_exp)
if self.opts.devmode:
self.logger.warn(msg + ", but continuing...")
else:
self._raise_omni_error(msg)
else:
self.logger.debug('Slice expires at %s UTC, at or after requested time %s UTC' % (slice_exp, time))
if time <= datetime.datetime.utcnow():
if name is not None and not self.opts.devmode:
# Syseng ticket 3011: User typo means their sliver expires.
# Instead raise an error
self._raise_omni_error("Cannot request or renew sliver(s) in %s to now or the past (%s UTC <= %s UTC)" % (name, time, datetime.datetime.utcnow()))
# self.logger.info('Sliver(s) in %s will be set to expire now' % name)
# time = datetime.datetime.utcnow()
elif name is not None and self.opts.devmode:
self.logger.warn("Will request or renew sliver(s) in %s to now or the past (%s UTC <= %s UTC)" % (name, time, datetime.datetime.utcnow()))
# Add UTC TZ, to have an RFC3339 compliant datetime, per the AM API
time_with_tz = time.replace(tzinfo=dateutil.tz.tzutc())
# Note that the time arg includes UTC offset as needed
time_string = time_with_tz.isoformat()
if self.opts.no_tz:
# The timezone causes an error in older sfa
# implementations as deployed in mesoscale GENI. Strip
# off the timezone if the user specfies --no-tz
self.logger.info('Removing timezone at user request (--no-tz)')
time_string = time_with_tz.replace(tzinfo=None).isoformat()
return time, time_with_tz, time_string
# end of datetimeFromString
def _maybe_add_creds_from_files(self, creds):
if creds is None:
creds = []
# Load and pass along also any 'credentials' specified with the
# --cred argument
if self.opts.cred and len(self.opts.cred) > 0:
for credfile in self.opts.cred:
# load it (comes out wrapped as needed)
# FIXME: Wrapping code needs updates to mark speaksfor?
cred = _load_cred(self, credfile)
# append it
if cred:
self.logger.info("Adding credential %s to arguments", credfile)
creds.append(cred)
return creds
def _getURNForClient(self, client):
if client is None or client.url is None:
return None
agg_urn = client.urn
if not urn_util.is_valid_urn(agg_urn):
# Check if get_version has a geni_urn and use that?
(gvurn, gvmess) = self._get_getversion_key(client, 'geni_urn', helper=True)
(gvuurn, gvmess) = self._get_getversion_key(client, 'urn', helper=True) # For SFA AMs
if urn_util.is_valid_urn(gvurn):
agg_urn = gvurn
elif urn_util.is_valid_urn(gvuurn):
agg_urn = gvuurn
elif not self.opts.noExtraCHCalls:
# Else, ask the CH
try:
turn = self.framework.lookup_agg_urn_by_url(client.url)
if urn_util.is_valid_urn(turn):
return turn
except Exception, e:
self.logger.debug("Error asking CH for URN to match URL %s: %s", client.url, e)
else:
self.logger.debug("Didn't look up AM urn at CH per commandline option")
return agg_urn
# End of AMHandler
def make_client(url, framework, opts):
""" Create an xmlrpc client, skipping the client cert if not opts.ssl"""
warnprefix = "WARN: "
err = validate_url(url)
if err is not None:
if hasattr(framework, 'logger'):
logger = framework.logger
else:
logger = logging.getLogger("omni")
if err.find(warnprefix) == 0:
err = err[len(warnprefix):]
logger.warn(err)
else:
logger.error(err)
raise OmniError(err)
if opts.ssl:
tmp_client = xmlrpcclient.make_client(url, framework.key, framework.cert, opts.verbosessl, opts.ssltimeout)
else:
tmp_client = xmlrpcclient.make_client(url, None, None)
tmp_client.url = str(url)
tmp_client.urn = ""
tmp_client.nick = None
return tmp_client
def _maybe_add_abac_creds(framework, cred):
'''Construct creds list. If using ABAC then creds are ABAC creds. Else creds are the user cred or slice cred
as supplied, as normal.'''
if is_ABAC_framework(framework):
creds = get_abac_creds(framework.abac_dir)
else:
creds = []
if cred:
creds.append(cred)
return creds
# FIXME: Use this frequently in experimenter mode, for all API calls
def _check_valid_return_struct(client, resultObj, message, call):
'''Basic check that any API method returned code/value/output struct,
producing a message with a proper error message'''
if resultObj is None:
# error
message = "AM %s failed %s (empty): %s" % (client.str, call, message)
return (None, message)
elif not isinstance(resultObj, dict):
# error
message = "AM %s failed %s (returned %s): %s" % (client.str, call, resultObj, message)
return (None, message)
elif not resultObj.has_key('value'):
message = "AM %s failed %s (no value: %s): %s" % (client.str, call, resultObj, message)
return (None, message)
elif not resultObj.has_key('code'):
message = "AM %s failed %s (no code: %s): %s" % (client.str, call, resultObj, message)
return (None, message)
elif not resultObj['code'].has_key('geni_code'):
message = "AM %s failed %s (no geni_code: %s): %s" % (client.str, call, resultObj, message)
# error
return (None, message)
elif resultObj['code']['geni_code'] != 0:
# error
# This next line is experimenter-only maybe?
message = "AM %s failed %s: %s" % (client.str, call, _append_geni_error_output(resultObj, message))
return (None, message)
else:
return (resultObj, message)
# FIXMEFIXME: Use this lots places
# FIXME: How factor this for Dev/Exp?
def _append_geni_error_output(retStruct, message):
'''Add to given error message the code and output if code != 0'''
# If return is a dict
if isinstance(retStruct, dict) and retStruct.has_key('code'):
message2 = ""
if isinstance(retStruct['code'], int):
if retStruct['code'] != 0:
message2 = "Malformed error from Aggregate: code " + str(retStruct['code'])
elif isinstance(retStruct['code'], dict):
if retStruct['code'].has_key('geni_code') and retStruct['code']['geni_code'] != 0:
message2 = "Error from Aggregate: code " + str(retStruct['code']['geni_code'])
amType = ""
if retStruct['code'].has_key('am_type'):
amType = retStruct['code']['am_type']
if retStruct['code'].has_key('am_code') and retStruct['code']['am_code'] != 0 and retStruct['code']['am_code'] is not None and str(retStruct['code']['am_code']).strip() != "":
if message2 != "":
if not message2.endswith('.'):
message2 += '.'
message2 += " "
message2 += "%s AM code: %s" % (amType, str(retStruct['code']['am_code']))
if retStruct.has_key('output') and retStruct['output'] is not None and str(retStruct['output']).strip() != "":
message2 += ": %s" % retStruct['output']
# Append any PG log urn/url - this shows up in Result Summary
# on errors
message2 += _get_pg_log(retStruct)
if message2 != "":
if not message2.endswith('.'):
message2 += '.'
if message is not None and message.strip() != "" and message2 != "":
if not message2.endswith('.'):
message2 += '.'
message += " (%s)" % message2
else:
message = message2
return message
def _get_pg_log(retStruct):
'''Pull out the PG log URN and URL, if present'''
msg = ""
if retStruct.has_key('code') and isinstance(retStruct['code'], dict) and retStruct['code'].has_key('am_type') and retStruct['code']['am_type'] == 'protogeni':
if retStruct['code'].has_key('protogeni_error_url'):
msg += " (PG log url - look here for details on any failures: %s)" % retStruct['code']['protogeni_error_url']
elif retStruct['code'].has_key('protogeni_error_log'):
msg = " (PG log urn: %s)" % retStruct['code']['protogeni_error_log']
return msg
|
plantigrade/geni-tools
|
src/gcf/omnilib/amhandler.py
|
Python
|
mit
| 362,512
|
[
"ORCA"
] |
d6d199e1d17955bbb02f6804b779485c37e0b6189de6e4e44e15161dae2c8987
|
""" :mod: SRM2Storage
=================
.. module: python
:synopsis: SRM v2 interface to StorageElement
"""
# # imports
import os
import re
import time
import errno
from types import StringType, StringTypes, ListType, IntType
from stat import S_ISREG, S_ISDIR, S_IMODE, ST_MODE, ST_SIZE
# # from DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Resources.Utilities.Utils import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.File import getSize
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
# # RCSID
__RCSID__ = "$Id$"
class SRM2Storage( StorageBase ):
""" .. class:: SRM2Storage
SRM v2 interafce to StorageElement using lcg_util and gfal
"""
def __init__( self, storageName, protocol, path, host, port, spaceToken, wspath ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param str protocol: protocol to use
:param str path: base path for vo files
:param str host: SE host
:param int port: port to use to communicate with :host:
:param str spaceToken: space token
:param str wspath: location of SRM on :host:
"""
self.log = gLogger.getSubLogger( "SRM2Storage", True )
self.isok = True
# # placeholder for gfal reference
self.gfal = None
# # placeholder for lcg_util reference
self.lcg_util = None
# # save c'tor params
self.protocolName = 'SRM2'
self.name = storageName
self.protocol = protocol
self.path = path
self.host = host
self.port = port
self.wspath = wspath
self.spaceToken = spaceToken
self.cwd = self.path
# # init base class
StorageBase.__init__( self, self.name, self.path )
# # stage limit - 12h
self.stageTimeout = gConfig.getValue( '/Resources/StorageElements/StageTimeout', 12 * 60 * 60 )
# # 1 file timeout
self.fileTimeout = gConfig.getValue( '/Resources/StorageElements/FileTimeout', 30 )
# # nb of surls per gfal call
self.filesPerCall = gConfig.getValue( '/Resources/StorageElements/FilesPerCall', 20 )
# # gfal timeout
self.gfalTimeout = gConfig.getValue( "/Resources/StorageElements/GFAL_Timeout", 100 )
# # gfal long timeout
self.gfalLongTimeOut = gConfig.getValue( "/Resources/StorageElements/GFAL_LongTimeout", 1200 )
# # gfal retry on errno.ECONN
self.gfalRetry = gConfig.getValue( "/Resources/StorageElements/GFAL_Retry", 3 )
# # set checksum type, by default this is 0 (GFAL_CKSM_NONE)
self.checksumType = gConfig.getValue( "/Resources/StorageElements/ChecksumType", 0 )
# enum gfal_cksm_type, all in lcg_util
# GFAL_CKSM_NONE = 0,
# GFAL_CKSM_CRC32,
# GFAL_CKSM_ADLER32,
# GFAL_CKSM_MD5,
# GFAL_CKSM_SHA1
# GFAL_CKSM_NULL = 0
self.checksumTypes = { None : 0, "CRC32" : 1, "ADLER32" : 2,
"MD5" : 3, "SHA1" : 4, "NONE" : 0, "NULL" : 0 }
if self.checksumType:
if str( self.checksumType ).upper() in self.checksumTypes:
gLogger.debug( "SRM2Storage: will use %s checksum check" % self.checksumType )
self.checksumType = self.checksumTypes[ self.checksumType.upper() ]
else:
gLogger.warn( "SRM2Storage: unknown checksum type %s, checksum check disabled" )
# # GFAL_CKSM_NONE
self.checksumType = 0
else:
# # invert and get name
self.log.debug( "SRM2Storage: will use %s checksum" % dict( zip( self.checksumTypes.values(),
self.checksumTypes.keys() ) )[self.checksumType] )
# setting some variables for use with lcg_utils
self.nobdii = 1
self.defaulttype = 2
self.voName = None
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
self.voName = getVOForGroup( ret['Value']['group'] )
self.verbose = 0
self.conf_file = 'ignored'
self.insecure = 0
self.defaultLocalProtocols = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
self.MAX_SINGLE_STREAM_SIZE = 1024 * 1024 * 10 # 10 MB ???
self.MIN_BANDWIDTH = 0.5 * ( 1024 * 1024 ) # 0.5 MB/s ???
def __importExternals( self ):
""" import lcg_util and gfalthr or gfal
:param self: self reference
"""
if ( self.lcg_util ) and ( self.gfal ):
return S_OK()
# # get lcg_util
try:
import lcg_util
self.log.debug( "Using lcg_util version %s from %s" % ( lcg_util.lcg_util_version(),
lcg_util.__file__ ) )
except ImportError, error:
errStr = "__importExternals: Failed to import lcg_util"
gLogger.exception( errStr, "", error )
return S_ERROR( errStr )
# # and gfalthr
try:
import gfalthr as gfal
self.log.debug( 'Using gfalthr version %s from %s' % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
self.log.warn( "__importExternals: Failed to import gfalthr: %s." % error )
# # so gfal maybe?
try:
import gfal
self.log.debug( "Using gfal version %s from %s" % ( gfal.gfal_version(),
gfal.__file__ ) )
except ImportError, error:
errStr = "__importExternals: Failed to import gfal"
gLogger.exception( errStr, "", error )
return S_ERROR( errStr )
self.lcg_util = lcg_util
self.gfal = gfal
return S_OK()
################################################################################
#
# The methods below are for manipulating the client
#
################################################################################
def resetWorkingDirectory( self ):
""" reset the working directory to the base dir
:param self: self reference
"""
self.cwd = self.path
def changeDirectory( self, directory ):
""" cd to :directory:
:param self: self reference
:param str directory: dir path
"""
if directory[0] == '/':
directory = directory.lstrip( '/' )
self.cwd = '%s/%s' % ( self.cwd, directory )
def getCurrentURL( self, fileName ):
""" Obtain the current file URL from the current working directory and the filename
:param self: self reference
:param str fileName: path on storage
"""
# # strip leading / if fileName arg is present
fileName = fileName.lstrip( "/" ) if fileName else fileName
try:
fullUrl = "%s://%s:%s%s%s/%s" % ( self.protocol, self.host, self.port, self.wspath, self.cwd, fileName )
fullUrl = fullUrl.rstrip( "/" )
return S_OK( fullUrl )
except TypeError, error:
return S_ERROR( "Failed to create URL %s" % error )
def isPfnForProtocol( self, pfn ):
""" check if PFN :pfn: is valid for :self.protocol:
:param self: self reference
:param str pfn: PFN
"""
res = pfnparse( pfn )
if not res['OK']:
return res
pfnDict = res['Value']
return S_OK( pfnDict['Protocol'] == self.protocol )
def getProtocolPfn( self, pfnDict, withPort ):
""" construct SURL using :self.host:, :self.protocol: and optionally :self.port: and :self.wspath:
:param self: self reference
:param dict pfnDict: pfn dict
:param bool withPort: include port information
"""
# For srm2 keep the file name and path
pfnDict['Protocol'] = self.protocol
pfnDict['Host'] = self.host
if not pfnDict['Path'].startswith( self.path ):
pfnDict['Path'] = os.path.join( self.path, pfnDict['Path'].strip( '/' ) )
if withPort:
pfnDict['Port'] = self.port
pfnDict['WSUrl'] = self.wspath
else:
pfnDict['Port'] = ''
pfnDict['WSUrl'] = ''
return pfnunparse( pfnDict )
################################################################################
#
# The methods below are URL manipulation methods
#
################################################################################
def getPFNBase( self, withPort = False ):
""" This will get the pfn base. This is then appended with the LFN in DIRAC convention.
:param self: self reference
:param bool withPort: flag to include port
"""
return S_OK( { True : 'srm://%s:%s%s' % ( self.host, self.port, self.path ),
False : 'srm://%s%s' % ( self.host, self.path ) }[withPort] )
def getUrl( self, path, withPort = True ):
""" get SRM PFN for :path: with optional port info
:param self: self reference
:param str path: file path
:param bool withPort: toggle port info
"""
pfnDict = pfnparse( path )
if not pfnDict["OK"]:
self.log.error( "getUrl: %s" % pfnDict["Message"] )
return pfnDict
pfnDict = pfnDict['Value']
if not pfnDict['Path'].startswith( self.path ):
pfnDict['Path'] = os.path.join( self.path, pfnDict['Path'].strip( '/' ) )
pfnDict['Protocol'] = 'srm'
pfnDict['Host'] = self.host
pfnDict['Port'] = self.port
pfnDict['WSUrl'] = self.wspath
if not withPort:
pfnDict['Port'] = ''
pfnDict['WSUrl'] = ''
return pfnunparse( pfnDict )
def getParameters( self ):
""" gets all the storage specific parameters pass when instantiating the storage
:param self: self reference
"""
return S_OK( { "StorageName" : self.name,
"ProtocolName" : self.protocolName,
"Protocol" : self.protocol,
"Host" : self.host,
"Path" : self.path,
"Port" : self.port,
"SpaceToken" : self.spaceToken,
"WSUrl" : self.wspath } )
#############################################################
#
# These are the methods for directory manipulation
#
######################################################################
#
# This has to be updated once the new gfal_makedir() becomes available
# TODO: isn't it there? when somebody made above comment?
#
def createDirectory( self, path ):
""" mkdir -p path on storage
:param self: self reference
:param str path:
"""
urls = checkArgumentFormat( path )
if not urls['OK']:
return urls
urls = urls['Value']
successful = {}
failed = {}
self.log.debug( "createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
strippedUrl = url.rstrip( '/' )
res = self.__makeDirs( strippedUrl )
if res['OK']:
self.log.debug( "createDirectory: Successfully created directory on storage: %s" % url )
successful[url] = True
else:
self.log.error( "createDirectory: Failed to create directory on storage.",
"\n%s: \n%s" % ( url, res['Message'] ) )
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __makeDir( self, path ):
""" mkdir path in a weird way
:param self: self reference
:param str path:
"""
srcFile = os.path.join( os.environ.get( 'TMPDIR', os.environ.get( 'TMP', '/tmp' ) ), 'dirac_directory' )
if not os.path.exists( srcFile ):
dfile = open( srcFile, 'w' )
dfile.write( " " )
dfile.close()
destFile = os.path.join( path, 'dirac_directory.%s' % time.time() )
res = self.__putFile( srcFile, destFile, 0, checkExists = False )
if res['OK']:
self.__executeOperation( destFile, 'removeFile' )
return res
def __makeDirs( self, path ):
""" black magic contained within...
:param self: self reference
:param str path: dir name
"""
res = self.__executeOperation( path, 'exists' )
if not res['OK']:
return res
if res['Value']:
return S_OK()
# directory doesn't exist, create it
dirName = os.path.dirname( path )
res = self.__executeOperation( dirName, 'exists' )
if not res['OK']:
return res
if not res['Value']:
res = self.__makeDirs( dirName )
if not res['OK']:
return res
return self.__makeDir( path )
################################################################################
#
# The methods below use the new generic methods for executing operations
#
################################################################################
def removeFile( self, path ):
""" rm path on storage
:param self: self reference
:param str path: file path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "removeFile: Performing the removal of %s file(s)" % len( urls ) )
resDict = self.__gfaldeletesurls_wrapper( urls )
if not resDict["OK"]:
self.log.error( "removeFile: %s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "removeFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "removeFile: Successfully removed file: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "removeFile: File did not exist, successfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeFile: Failed to remove file."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getTransportURL( self, path, protocols = False ):
""" obtain the tURLs for the supplied path and protocols
:param self: self reference
:param str path: path on storage
:param mixed protocols: protocols to use
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
if not protocols:
protocols = self.__getProtocols()
if not protocols['OK']:
return protocols
listProtocols = protocols['Value']
elif type( protocols ) == StringType:
listProtocols = [protocols]
elif type( protocols ) == ListType:
listProtocols = protocols
else:
return S_ERROR( "getTransportURL: Must supply desired protocols to this plug-in." )
self.log.debug( "getTransportURL: Obtaining tURLs for %s file(s)." % len( urls ) )
resDict = self.__gfalturlsfromsurls_wrapper( urls, listProtocols )
if not resDict["OK"]:
self.log.error( "getTransportURL: %s" % resDict["Message"] )
return resDict
resDict = resDict['Value']
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "getTransportURL: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "getTransportURL: Obtained tURL for file. %s" % pathSURL )
successful[pathSURL] = urlDict['turl']
elif urlDict['status'] == 2:
errMessage = "getTransportURL: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "getTransportURL: Failed to obtain turls."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFile( self, path, lifetime = 86400 ):
""" Issue prestage request for file
:param self: self reference
:param str path: PFN path
:param int lifetime: prestage lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFile: Attempting to issue stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestage_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "prestageFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "prestageFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "prestageFile: Issued stage request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 1:
self.log.debug( "prestageFile: File found to be already staged.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 22:
self.log.debug( "prestageFile: Stage request for file %s queued.", pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "prestageFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "prestageFile: Failed issue stage request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def prestageFileStatus( self, path ):
""" Monitor prestage request for files
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "prestageFileStatus: Attempting to get status "
"of stage requests for %s file(s)." % len( urls ) )
resDict = self.__gfal_prestagestatus_wrapper( urls )
if not resDict["OK"]:
self.log.error( "prestageFileStatus: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "prestageFileStatus: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 1:
self.log.debug( "SRM2Storage.prestageFileStatus: File found to be staged %s." % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 0:
self.log.debug( "SRM2Storage.prestageFileStatus: File not staged %s." % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.prestageFileStatus: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.prestageFileStatus: Failed get prestage status."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileMetadata( self, path ):
""" Get metadata associated to the file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = {}
failed = {}
for url in res['Value']:
pathSURL = self.getUrl( url )
if not pathSURL['OK']:
self.log.error( "getFileMetadata: %s" % pathSURL["Message"] )
failed[ url ] = pathSURL["Message"]
else:
urls[pathSURL['Value'] ] = url
self.log.debug( "getFileMetadata: Obtaining metadata for %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "getFileMetadata: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
# Get back the input value for that surl
path = urls[self.getUrl( urlDict['surl'] )['Value']]
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[path] = statDict
else:
errStr = "getFileMetadata: Supplied path is not a file."
self.log.error( errStr, path )
failed[path] = errStr
elif urlDict['status'] == 2:
errMessage = "getFileMetadata: File does not exist."
self.log.error( errMessage, path )
failed[path] = errMessage
else:
errStr = "SRM2Storage.getFileMetadata: Failed to get file metadata."
errMessage = "%s: %s" % ( path, urlDict['ErrorMessage'] )
self.log.error( errStr, errMessage )
failed[path] = "%s %s" % ( errStr, urlDict['ErrorMessage'] )
else:
errStr = "getFileMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def isFile( self, path ):
"""Check if the given path exists and it is a file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "isFile: Checking whether %s path(s) are file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "isFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "isFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = True
else:
self.log.debug( "isFile: Path is not a file: %s" % pathSURL )
successful[pathSURL] = False
elif urlDict['status'] == 2:
errMessage = "isFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "isFile: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "isFile: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def pinFile( self, path, lifetime = 86400 ):
""" Pin a file with a given lifetime
:param self: self reference
:param str path: PFN path
:param int lifetime: pin lifetime in seconds (default 24h)
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "pinFile: Attempting to pin %s file(s)." % len( urls ) )
resDict = self.__gfal_pin_wrapper( urls, lifetime )
if not resDict["OK"]:
self.log.error( "pinFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "pinFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "pinFile: Issued pin request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "pinFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "pinFile: Failed issue pin request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def releaseFile( self, path ):
""" Release a pinned file
:param self: self reference
:param str path: PFN path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "releaseFile: Attempting to release %s file(s)." % len( urls ) )
resDict = self.__gfal_release_wrapper( urls )
if not resDict["OK"]:
self.log.error( "releaseFile: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "releaseFile: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
self.log.debug( "releaseFile: Issued release request for file %s." % pathSURL )
successful[pathSURL] = urlDict['SRMReqID']
elif urlDict['status'] == 2:
errMessage = "releaseFile: File does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "releaseFile: Failed issue release request."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( errMessage, pathSURL ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def exists( self, path ):
""" Check if the given path exists. """
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.exists: Checking the existance of %s path(s)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "exists: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict["surl"] )
if not pathSURL["OK"]:
self.log.error( "SRM2Storage.exists: %s" % pathSURL["Message"] )
failed[ urlDict["surl"] ] = pathSURL["Message"]
continue
pathSURL = pathSURL["Value"]
if urlDict['status'] == 0:
self.log.debug( "SRM2Storage.exists: Path exists: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.exists: Path does not exist: %s" % pathSURL )
successful[pathSURL] = False
else:
errStr = "SRM2Storage.exists: Failed to get path metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.exists: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getFileSize( self, path ):
"""Get the physical size of the given file
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getFileSize: Obtaining the size of %s file(s)." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "getFileSize: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.verbose( "getFileSize: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['File']:
successful[pathSURL] = statDict['Size']
else:
errStr = "SRM2Storage.getFileSize: Supplied path is not a file."
self.log.verbose( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getFileSize: File does not exist."
self.log.verbose( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.getFileSize: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.verbose( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.getFileSize: Returned element does not contain surl."
self.log.error( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putFile( self, path, sourceSize = 0 ):
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for dest_url, src_file in urls.items():
# Create destination directory
res = self.__executeOperation( os.path.dirname( dest_url ), 'createDirectory' )
if not res['OK']:
failed[dest_url] = res['Message']
else:
res = self.__putFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __putFile( self, src_file, dest_url, sourceSize, checkExists = True ):
""" put :src_file: to :dest_url:
:param self: self reference
:param str src_file: file path in local fs
:param str dest_url: destination url on storage
:param int sourceSize: :src_file: size in B
"""
if checkExists:
# Pre-transfer check
res = self.__executeOperation( dest_url, 'exists' )
if not res['OK']:
self.log.debug( "__putFile: Failed to find pre-existance of destination file." )
return res
if res['Value']:
res = self.__executeOperation( dest_url, 'removeFile' )
if not res['OK']:
self.log.debug( "__putFile: Failed to remove remote file %s." % dest_url )
else:
self.log.debug( "__putFile: Removed remote file %s." % dest_url )
dsttype = self.defaulttype
src_spacetokendesc = ''
dest_spacetokendesc = self.spaceToken
if re.search( 'srm:', src_file ):
src_url = src_file
srctype = 2
if not sourceSize:
return S_ERROR( "__putFile: For file replication the source file size must be provided." )
else:
if not os.path.exists( src_file ):
errStr = "__putFile: The source local file does not exist."
self.log.error( errStr, src_file )
return S_ERROR( errStr )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "__putFile: Failed to get file size."
self.log.error( errStr, src_file )
return S_ERROR( errStr )
src_url = 'file:%s' % src_file
srctype = 0
if sourceSize == 0:
errStr = "__putFile: Source file is zero size."
self.log.error( errStr, src_file )
return S_ERROR( errStr )
timeout = int( sourceSize / self.MIN_BANDWIDTH + 300 )
if sourceSize > self.MAX_SINGLE_STREAM_SIZE:
nbstreams = 4
else:
nbstreams = 1
self.log.info( "__putFile: Executing transfer of %s to %s using %s streams" % ( src_url, dest_url, nbstreams ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url,
srctype, dsttype, nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
res = res['Value']
if not res['OK']:
# Remove the failed replica, just in case
result = self.__executeOperation( dest_url, 'removeFile' )
if result['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.info( '__putFile: Successfully put file to storage.' )
# # checksum check? return!
if self.checksumType:
return S_OK( sourceSize )
# # else compare sizes
res = self.__executeOperation( dest_url, 'getFileSize' )
if res['OK']:
destinationSize = res['Value']
if sourceSize == destinationSize :
self.log.debug( "__putFile: Post transfer check successful." )
return S_OK( destinationSize )
errorMessage = "__putFile: Source and destination file sizes do not match."
self.log.error( errorMessage, src_url )
else:
errorMessage = "__putFile: Failed to put file to storage."
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
res = self.__executeOperation( dest_url, 'removeFile' )
if res['OK']:
self.log.debug( "__putFile: Removed remote file remnant %s." % dest_url )
else:
self.log.debug( "__putFile: Unable to remove remote file remnant %s." % dest_url )
return S_ERROR( errorMessage )
def __lcg_cp_wrapper( self, src_url, dest_url, srctype, dsttype, nbstreams,
timeout, src_spacetokendesc, dest_spacetokendesc ):
""" lcg_util.lcg_cp wrapper
:param self: self reference
:param str src_url: source SURL
:param str dest_url: destination SURL
:param srctype: source SE type
:param dsttype: destination SE type
:param int nbstreams: nb of streams used for trasnfer
:param int timeout: timeout in seconds
:param str src_spacetoken: source space token
:param str dest_spacetoken: destination space token
"""
try:
errCode, errStr = self.lcg_util.lcg_cp4( src_url,
dest_url,
self.defaulttype,
srctype,
dsttype,
self.nobdii,
self.voName,
nbstreams,
self.conf_file,
self.insecure,
self.verbose,
timeout,
src_spacetokendesc,
dest_spacetokendesc,
self.checksumType )
if type( errCode ) != IntType:
self.log.error( "__lcg_cp_wrapper: Returned errCode was not an integer",
"%s %s" % ( errCode, type( errCode ) ) )
if type( errCode ) == ListType:
msg = []
for err in errCode:
msg.append( '%s of type %s' % ( err, type( err ) ) )
self.log.error( "__lcg_cp_wrapper: Returned errCode was List:\n" , "\n".join( msg ) )
return S_ERROR( "__lcg_cp_wrapper: Returned errCode was not an integer" )
if type( errStr ) not in StringTypes:
self.log.error( "__lcg_cp_wrapper: Returned errStr was not a string",
"%s %s" % ( errCode, type( errStr ) ) )
return S_ERROR( "__lcg_cp_wrapper: Returned errStr was not a string" )
return S_OK( ( errCode, errStr ) )
except Exception, error:
self.log.exception( "__lcg_cp_wrapper", "", error )
return S_ERROR( "Exception while attempting file upload" )
def getFile( self, path, localPath = False ):
""" make a local copy of a storage :path:
:param self: self reference
:param str path: path on storage
:param mixed localPath: if not specified, os.getcwd()
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for src_url in urls:
fileName = os.path.basename( src_url )
if localPath:
dest_file = "%s/%s" % ( localPath, fileName )
else:
dest_file = "%s/%s" % ( os.getcwd(), fileName )
res = self.__getFile( src_url, dest_file )
if res['OK']:
successful[src_url] = res['Value']
else:
failed[src_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getFile( self, src_url, dest_file ):
""" do a real copy of storage file :src_url: to local fs under :dest_file:
:param self: self reference
:param str src_url: SE url to cp
:param str dest_file: local fs path
"""
if not os.path.exists( os.path.dirname( dest_file ) ):
os.makedirs( os.path.dirname( dest_file ) )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Local file already exists %s. Removing..." % dest_file )
os.remove( dest_file )
srctype = self.defaulttype
src_spacetokendesc = self.spaceToken
dsttype = 0
dest_spacetokendesc = ''
dest_url = 'file:%s' % dest_file
res = self.__executeOperation( src_url, 'getFileSize' )
if not res['OK']:
return S_ERROR( res['Message'] )
remoteSize = res['Value']
timeout = int( remoteSize / self.MIN_BANDWIDTH * 4 + 300 )
nbstreams = 1
self.log.info( "__getFile: Using %d streams" % nbstreams )
self.log.info( "__getFile: Executing transfer of %s to %s" % ( src_url, dest_url ) )
res = pythonCall( ( timeout + 10 ), self.__lcg_cp_wrapper, src_url, dest_url, srctype, dsttype,
nbstreams, timeout, src_spacetokendesc, dest_spacetokendesc )
if not res['OK']:
return res
res = res['Value']
if not res['OK']:
return res
errCode, errStr = res['Value']
if errCode == 0:
self.log.debug( '__getFile: Got a file from storage.' )
localSize = getSize( dest_file )
if localSize == remoteSize:
self.log.debug( "__getFile: Post transfer check successful." )
return S_OK( localSize )
errorMessage = "__getFile: Source and destination file sizes do not match."
self.log.error( errorMessage, src_url )
else:
errorMessage = "__getFile: Failed to get file from storage."
if errCode > 0:
errStr = "%s %s" % ( errStr, os.strerror( errCode ) )
self.log.error( errorMessage, errStr )
if os.path.exists( dest_file ):
self.log.debug( "__getFile: Removing local file %s." % dest_file )
os.remove( dest_file )
return S_ERROR( errorMessage )
def __executeOperation( self, url, method ):
""" executes the requested :method: with the supplied url
:param self: self reference
:param str url: SE url
:param str method: fcn name
"""
fcn = None
if hasattr( self, method ) and callable( getattr( self, method ) ):
fcn = getattr( self, method )
if not fcn:
return S_ERROR( "Unable to invoke %s, it isn't a member funtion of SRM2Storage" % method )
res = fcn( url )
if not res['OK']:
return res
elif url not in res['Value']['Successful']:
if url not in res['Value']['Failed']:
if res['Value']['Failed'].values():
return S_ERROR( res['Value']['Failed'].values()[0] )
elif res['Value']['Successful'].values():
return S_OK( res['Value']['Successful'].values()[0] )
else:
self.log.error( 'Wrong Return structure', str( res['Value'] ) )
return S_ERROR( 'Wrong Return structure' )
return S_ERROR( res['Value']['Failed'][url] )
return S_OK( res['Value']['Successful'][url] )
############################################################################################
#
# Directory based methods
#
def isDirectory( self, path ):
""" isdir on storage path
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.isDirectory: Checking whether %s path(s) are directory(ies)" % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "isDirectory: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if urlDict.get( 'surl' ):
dirSURL = self.getUrl( urlDict['surl'] )
if not dirSURL["OK"]:
self.log.error( "isDirectory: %s" % dirSURL["Message"] )
failed[ urlDict['surl'] ] = dirSURL["Message"]
continue
dirSURL = dirSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
successful[dirSURL] = True
else:
self.log.debug( "SRM2Storage.isDirectory: Path is not a directory: %s" % dirSURL )
successful[dirSURL] = False
elif urlDict['status'] == 2:
self.log.debug( "SRM2Storage.isDirectory: Supplied path does not exist: %s" % dirSURL )
failed[dirSURL] = 'Directory does not exist'
else:
errStr = "SRM2Storage.isDirectory: Failed to get file metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( dirSURL, errMessage ) )
failed[dirSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.isDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectoryMetadata( self, path ):
""" get the metadata for the directory :path:
:param self: self reference
:param str path: SE path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "getDirectoryMetadata: Attempting to obtain metadata for %s directories." % len( urls ) )
resDict = self.__gfal_ls_wrapper( urls, 0 )
if not resDict["OK"]:
self.log.error( "getDirectoryMetadata: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "getDirectoryMetadata: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
statDict = self.__parse_file_metadata( urlDict )
if statDict['Directory']:
successful[pathSURL] = statDict
else:
errStr = "SRM2Storage.getDirectoryMetadata: Supplied path is not a directory."
self.log.error( errStr, pathSURL )
failed[pathSURL] = errStr
elif urlDict['status'] == 2:
errMessage = "SRM2Storage.getDirectoryMetadata: Directory does not exist."
self.log.error( errMessage, pathSURL )
failed[pathSURL] = errMessage
else:
errStr = "SRM2Storage.getDirectoryMetadata: Failed to get directory metadata."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.getDirectoryMetadata: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.getDirectorySize: Attempting to get size of %s directories." % len( urls ) )
res = self.listDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for directory, dirDict in res['Value']['Successful'].items():
directorySize = 0
directoryFiles = 0
filesDict = dirDict['Files']
for fileDict in filesDict.itervalues():
directorySize += fileDict['Size']
directoryFiles += 1
self.log.debug( "SRM2Storage.getDirectorySize: Successfully obtained size of %s." % directory )
subDirectories = len( dirDict['SubDirs'] )
successful[directory] = { 'Files' : directoryFiles, 'Size' : directorySize, 'SubDirs' : subDirectories }
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def listDirectory( self, path ):
""" List the contents of the directory on the storage
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.listDirectory: Attempting to list %s directories." % len( urls ) )
res = self.isDirectory( urls )
if not res['OK']:
return res
failed = res['Value']['Failed']
directories = {}
for url, isDirectory in res['Value']['Successful'].items():
if isDirectory:
directories[url] = False
else:
errStr = "SRM2Storage.listDirectory: Directory does not exist."
self.log.error( errStr, url )
failed[url] = errStr
resDict = self.__gfal_lsdir_wrapper( directories )
if not resDict["OK"]:
self.log.error( "listDirectory: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
# resDict = self.__gfalls_wrapper(directories,1)['Value']
failed.update( resDict['Failed'] )
listOfResults = resDict['AllResults']
successful = {}
for urlDict in listOfResults:
if "surl" in urlDict and urlDict["surl"]:
pathSURL = self.getUrl( urlDict['surl'] )
if not pathSURL["OK"]:
self.log.error( "listDirectory: %s" % pathSURL["Message"] )
failed[ urlDict['surl'] ] = pathSURL["Message"]
continue
pathSURL = pathSURL['Value']
if urlDict['status'] == 0:
successful[pathSURL] = {}
self.log.debug( "SRM2Storage.listDirectory: Successfully listed directory %s" % pathSURL )
subPathDirs = {}
subPathFiles = {}
if "subpaths" in urlDict:
subPaths = urlDict['subpaths']
# Parse the subpaths for the directory
for subPathDict in subPaths:
subPathSURL = self.getUrl( subPathDict['surl'] )['Value']
if subPathDict['status'] == 22:
self.log.error( "File found with status 22", subPathDict )
elif subPathDict['status'] == 0:
statDict = self.__parse_file_metadata( subPathDict )
if statDict['File']:
subPathFiles[subPathSURL] = statDict
elif statDict['Directory']:
subPathDirs[subPathSURL] = statDict
# Keep the infomation about this path's subpaths
successful[pathSURL]['SubDirs'] = subPathDirs
successful[pathSURL]['Files'] = subPathFiles
else:
errStr = "SRM2Storage.listDirectory: Failed to list directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
else:
errStr = "SRM2Storage.listDirectory: Returned element does not contain surl."
self.log.fatal( errStr, self.name )
return S_ERROR( errStr )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def putDirectory( self, path ):
""" cp -R local SE
puts a local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param str path: local fs path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.putDirectory: Attemping to put %s directories to remote storage." % len( urls ) )
for destDir, sourceDir in urls.items():
res = self.__putDir( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
self.log.debug( "SRM2Storage.putDirectory: Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Failed to put entire directory to remote storage.", destDir )
failed[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
self.log.error( "SRM2Storage.putDirectory: Completely failed to put directory to remote storage.", destDir )
failed[destDir] = { "Files" : 0, "Size" : 0 }
return S_OK( { "Failed" : failed, "Successful" : successful } )
def __putDir( self, src_directory, dest_directory ):
""" Black magic contained within...
"""
filesPut = 0
sizePut = 0
# Check the local directory exists
if not os.path.isdir( src_directory ):
errStr = "SRM2Storage.__putDir: The supplied directory does not exist."
self.log.error( errStr, src_directory )
return S_ERROR( errStr )
# Get the local directory contents
contents = os.listdir( src_directory )
allSuccessful = True
directoryFiles = {}
for fileName in contents:
localPath = '%s/%s' % ( src_directory, fileName )
remotePath = '%s/%s' % ( dest_directory, fileName )
if not os.path.isdir( localPath ):
directoryFiles[remotePath] = localPath
else:
res = self.__putDir( localPath, remotePath )
if not res['OK']:
errStr = "SRM2Storage.__putDir: Failed to put directory to storage."
self.log.error( errStr, res['Message'] )
else:
if not res['Value']['AllPut']:
pathSuccessful = False
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
if directoryFiles:
res = self.putFile( directoryFiles )
if not res['OK']:
self.log.error( "SRM2Storage.__putDir: Failed to put files to storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesPut += 1
sizePut += fileSize
if res['Value']['Failed']:
allSuccessful = False
return S_OK( { 'AllPut' : allSuccessful, 'Files' : filesPut, 'Size' : sizePut } )
def getDirectory( self, path, localPath = False ):
""" Get a local copy in the current directory of a physical file specified by its path
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
self.log.debug( "SRM2Storage.getDirectory: Attempting to get local copies of %s directories." % len( urls ) )
for src_dir in urls:
dirName = os.path.basename( src_dir )
if localPath:
dest_dir = "%s/%s" % ( localPath, dirName )
else:
dest_dir = "%s/%s" % ( os.getcwd(), dirName )
res = self.__getDir( src_dir, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
self.log.debug( "SRM2Storage.getDirectory: Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Failed to get entire directory.", src_dir )
failed[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
self.log.error( "SRM2Storage.getDirectory: Completely failed to get local copy of directory.", src_dir )
failed[src_dir] = {'Files':0, 'Size':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __getDir( self, srcDirectory, destDirectory ):
""" Black magic contained within...
"""
filesGot = 0
sizeGot = 0
# Check the remote directory exists
res = self.__executeOperation( srcDirectory, 'isDirectory' )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to find the supplied source directory.", srcDirectory )
return res
if not res['Value']:
errStr = "SRM2Storage.__getDir: The supplied source path is not a directory."
self.log.error( errStr, srcDirectory )
return S_ERROR( errStr )
# Check the local directory exists and create it if not
if not os.path.exists( destDirectory ):
os.makedirs( destDirectory )
# Get the remote directory contents
res = self.__getDirectoryContents( srcDirectory )
if not res['OK']:
errStr = "SRM2Storage.__getDir: Failed to list the source directory."
self.log.error( errStr, srcDirectory )
filesToGet = res['Value']['Files']
subDirs = res['Value']['SubDirs']
allSuccessful = True
res = self.getFile( filesToGet.keys(), destDirectory )
if not res['OK']:
self.log.error( "SRM2Storage.__getDir: Failed to get files from storage.", res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesGot += 1
sizeGot += fileSize
if res['Value']['Failed']:
allSuccessful = False
for subDir in subDirs:
subDirName = os.path.basename( subDir )
localPath = '%s/%s' % ( destDirectory, subDirName )
res = self.__getDir( subDir, localPath )
if res['OK']:
if not res['Value']['AllGot']:
allSuccessful = True
filesGot += res['Value']['Files']
sizeGot += res['Value']['Size']
return S_OK( { 'AllGot' : allSuccessful, 'Files' : filesGot, 'Size' : sizeGot } )
def removeDirectory( self, path, recursive = False ):
""" Remove a directory
"""
if recursive:
return self.__removeDirectoryRecursive( path )
else:
return self.__removeDirectory( path )
def __removeDirectory( self, directory ):
""" This function removes the directory on the storage
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to remove %s directories." % len( urls ) )
resDict = self.__gfal_removedir_wrapper( urls )
if not resDict["OK"]:
self.log.error( "__removeDirectory: %s" % resDict["Message"] )
return resDict
resDict = resDict["Value"]
failed = resDict['Failed']
allResults = resDict['AllResults']
successful = {}
for urlDict in allResults:
if "surl" in urlDict:
pathSURL = urlDict['surl']
if urlDict['status'] == 0:
self.log.debug( "__removeDirectory: Successfully removed directory: %s" % pathSURL )
successful[pathSURL] = True
elif urlDict['status'] == 2:
# This is the case where the file doesn't exist.
self.log.debug( "__removeDirectory: Directory did not exist, sucessfully removed: %s" % pathSURL )
successful[pathSURL] = True
else:
errStr = "removeDirectory: Failed to remove directory."
errMessage = urlDict['ErrorMessage']
self.log.error( errStr, "%s: %s" % ( pathSURL, errMessage ) )
failed[pathSURL] = "%s %s" % ( errStr, errMessage )
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __removeDirectoryRecursive( self, directory ):
""" Recursively removes the directory and sub dirs. Repeatedly calls itself to delete recursively.
"""
res = checkArgumentFormat( directory )
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
self.log.debug( "SRM2Storage.__removeDirectory: Attempting to recursively remove %s directories." % len( urls ) )
for directory in urls:
self.log.debug( "SRM2Storage.removeDirectory: Attempting to remove %s" % directory )
res = self.__getDirectoryContents( directory )
resDict = {'FilesRemoved':0, 'SizeRemoved':0}
if not res['OK']:
failed[directory] = resDict
else:
filesToRemove = res['Value']['Files']
subDirs = res['Value']['SubDirs']
# Remove all the files in the directory
res = self.__removeDirectoryFiles( filesToRemove )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allFilesRemoved = res['AllRemoved']
# Remove all the sub-directories
res = self.__removeSubDirectories( subDirs )
resDict['FilesRemoved'] += res['FilesRemoved']
resDict['SizeRemoved'] += res['SizeRemoved']
allSubDirsRemoved = res['AllRemoved']
# If all the files and sub-directories are removed then remove the directory
allRemoved = False
if allFilesRemoved and allSubDirsRemoved:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed all files and sub-directories." )
res = self.__removeDirectory( directory )
if res['OK']:
if directory in res['Value']['Successful']:
self.log.debug( "SRM2Storage.removeDirectory: Successfully removed the directory %s." % directory )
allRemoved = True
# Report the result
if allRemoved:
successful[directory] = resDict
else:
failed[directory] = resDict
return S_OK ( { 'Failed' : failed, 'Successful' : successful } )
def __getDirectoryContents( self, directory ):
""" ls of storage element :directory:
:param self: self reference
:param str directory: SE path
"""
directory = directory.rstrip( '/' )
errMessage = "SRM2Storage.__getDirectoryContents: Failed to list directory."
res = self.__executeOperation( directory, 'listDirectory' )
if not res['OK']:
self.log.error( errMessage, res['Message'] )
return S_ERROR( errMessage )
surlsDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
filesToRemove = dict( [ ( url, surlsDict[url]['Size'] ) for url in surlsDict ] )
return S_OK ( { 'Files' : filesToRemove, 'SubDirs' : subDirsDict.keys() } )
def __removeDirectoryFiles( self, filesToRemove ):
""" rm files from SE
:param self: self reference
:param dict filesToRemove: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( filesToRemove ) > 0:
res = self.removeFile( filesToRemove.keys() )
if res['OK']:
for removedSurl in res['Value']['Successful']:
resDict['FilesRemoved'] += 1
resDict['SizeRemoved'] += filesToRemove[removedSurl]
if res['Value']['Failed']:
resDict['AllRemoved'] = False
self.log.debug( "SRM2Storage.__removeDirectoryFiles:",
"Removed %s files of size %s bytes." % ( resDict['FilesRemoved'], resDict['SizeRemoved'] ) )
return resDict
def __removeSubDirectories( self, subDirectories ):
""" rm -rf sub-directories
:param self: self reference
:param dict subDirectories: dict with surls as keys
"""
resDict = { 'FilesRemoved' : 0, 'SizeRemoved' : 0, 'AllRemoved' : True }
if len( subDirectories ) > 0:
res = self.__removeDirectoryRecursive( subDirectories )
if res['OK']:
for removedSubDir, removedDict in res['Value']['Successful'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
for removedSubDir, removedDict in res['Value']['Failed'].items():
resDict['FilesRemoved'] += removedDict['FilesRemoved']
resDict['SizeRemoved'] += removedDict['SizeRemoved']
self.log.debug( "SRM2Storage.__removeSubDirectories:",
"Removed %s files of size %s bytes from %s." % ( removedDict['FilesRemoved'],
removedDict['SizeRemoved'],
removedSubDir ) )
if len( res['Value']['Failed'] ) != 0:
resDict['AllRemoved'] = False
return resDict
@staticmethod
def __parse_stat( stat ):
""" get size, ftype and mode from stat struct
:param stat: stat struct
"""
statDict = { 'File' : False, 'Directory' : False }
if S_ISREG( stat[ST_MODE] ):
statDict['File'] = True
statDict['Size'] = stat[ST_SIZE]
if S_ISDIR( stat[ST_MODE] ):
statDict['Directory'] = True
statDict['Mode'] = S_IMODE( stat[ST_MODE] )
return statDict
def __parse_file_metadata( self, urlDict ):
""" parse and save bits and pieces of metadata info
:param self: self reference
:param urlDict: gfal call results
"""
statDict = self.__parse_stat( urlDict['stat'] )
if statDict['File']:
statDict.setdefault( "Checksum", "" )
if "checksum" in urlDict and ( urlDict['checksum'] != '0x' ):
statDict["Checksum"] = urlDict["checksum"]
if 'locality' in urlDict:
urlLocality = urlDict['locality']
if re.search( 'ONLINE', urlLocality ):
statDict['Cached'] = 1
else:
statDict['Cached'] = 0
if re.search( 'NEARLINE', urlLocality ):
statDict['Migrated'] = 1
else:
statDict['Migrated'] = 0
statDict['Lost'] = 0
if re.search( 'LOST', urlLocality ):
statDict['Lost'] = 1
statDict['Unavailable'] = 0
if re.search( 'UNAVAILABLE', urlLocality ):
statDict['Unavailable'] = 1
return statDict
def __getProtocols( self ):
""" returns list of protocols to use at a given site
:warn: priority is given to a protocols list defined in the CS
:param self: self reference
"""
sections = gConfig.getSections( '/Resources/StorageElements/%s/' % ( self.name ) )
if not sections['OK']:
return sections
protocolsList = []
for section in sections['Value']:
path = '/Resources/StorageElements/%s/%s/ProtocolName' % ( self.name, section )
if gConfig.getValue( path, '' ) == self.protocolName:
protPath = '/Resources/StorageElements/%s/%s/ProtocolsList' % ( self.name, section )
siteProtocols = gConfig.getValue( protPath, [] )
if siteProtocols:
self.log.debug( 'Found SE protocols list to override defaults:', ', '.join( siteProtocols, ) )
protocolsList = siteProtocols
if not protocolsList:
self.log.debug( "SRM2Storage.getTransportURL: No protocols provided, using defaults." )
protocolsList = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
if not protocolsList:
return S_ERROR( "SRM2Storage.getTransportURL: No local protocols defined and no defaults found" )
return S_OK( protocolsList )
def getCurrentStatus(self):
""" Get the current status (lcg_stmd), needed for RSS
"""
res = self.__importExternals()
if not res['OK']:
return S_ERROR("Cannot import externals")
if not self.spaceToken:
return S_ERROR("Space token not defined for SE")
#make the endpoint
endpoint = 'httpg://%s:%s%s' % ( self.host, self.port, self.wspath )
endpoint = endpoint.replace( '?SFN=', '' )
res = pythonCall(10, self.lcg_util.lcg_stmd, self.spaceToken, endpoint, 1, 0)
if not res['OK']:
return res
status, resdict, errmessage = res['Value']
if status != 0:
return S_ERROR("lcg_util.lcg_stmd failed: %s" % errmessage)
return S_OK(resdict[0])
#######################################################################
#
# These methods wrap the gfal functionality with the accounting. All these are based on __gfal_operation_wrapper()
#
#######################################################################
def __gfal_lsdir_wrapper( self, urls ):
""" This is a hack because the structures returned by the different SEs are different
"""
step = 200
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = 1
gfalDict['srmv2_lscount'] = step
failed = {}
successful = []
for url in urls:
allResults = []
gfalDict['surls'] = [url]
gfalDict['nbfiles'] = 1
gfalDict['timeout'] = self.gfalLongTimeOut
allObtained = False
iteration = 0
while not allObtained:
gfalDict['srmv2_lsoffset'] = iteration * step
iteration += 1
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
if re.search( '\[SE\]\[Ls\]\[SRM_FAILURE\]', res['Message'] ):
allObtained = True
else:
failed[url] = res['Message']
else:
results = res['Value']
tempStep = step
if len( results ) == 1:
for result in results:
if 'subpaths' in result:
results = result['subpaths']
tempStep = step - 1
elif re.search( result['surl'], url ):
results = []
allResults.extend( results )
if len( results ) < tempStep:
allObtained = True
successful.append( { 'surl' : url, 'status' : 0, 'subpaths' : allResults } )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : successful, "Failed" : failed } )
def __gfal_ls_wrapper( self, urls, depth ):
""" gfal_ls wrapper
:param self: self reference
:param list urls: urls to check
:param int depth: srmv2_lslevel (0 or 1)
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_lslevels'] = depth
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_ls', gfalDict )
# gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestage_wrapper( self, urls, lifetime ):
""" gfal_prestage wrapper
:param self: self refefence
:param list urls: urls to prestage
:param int lifetime: prestage lifetime
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
gfalDict['protocols'] = self.defaultLocalProtocols
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.stageTimeout
res = self.__gfal_operation_wrapper( 'gfal_prestage',
gfalDict,
timeout_sendreceive = self.fileTimeout * len( urls ) )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfalturlsfromsurls_wrapper( self, urls, listProtocols ):
""" This is a function that can be reused everywhere to perform the gfal_turlsfromsurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['protocols'] = listProtocols
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_turlsfromsurls', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfaldeletesurls_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_deletesurls
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_deletesurls', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_removedir_wrapper( self, urls ):
""" This is a function that can be reused everywhere to perform the gfal_removedir
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 1
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
listOfLists = breakListIntoChunks( urls.keys(), self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_removedir', gfalDict )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_pin_wrapper( self, urls, lifetime ):
""" gfal_pin wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
:param int lifetime: pin lifetime in seconds
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
gfalDict['srmv2_desiredpintime'] = lifetime
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_pin', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_prestagestatus_wrapper( self, urls ):
""" gfal_prestagestatus wrapper
:param self: self reference
:param dict urls: dict { srmRequestID : [ url, url ] }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
gfalDict['srmv2_spacetokendesc'] = self.spaceToken
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_prestagestatus', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_release_wrapper( self, urls ):
""" gfal_release wrapper
:param self: self reference
:param dict urls: dict { url : srmRequestID }
"""
gfalDict = {}
gfalDict['defaultsetype'] = 'srmv2'
gfalDict['no_bdii_check'] = 0
allResults = []
failed = {}
srmRequestFiles = {}
for url, srmRequestID in urls.items():
if srmRequestID not in srmRequestFiles:
srmRequestFiles[srmRequestID] = []
srmRequestFiles[srmRequestID].append( url )
for srmRequestID, urls in srmRequestFiles.items():
listOfLists = breakListIntoChunks( urls, self.filesPerCall )
for urls in listOfLists:
gfalDict['surls'] = urls
gfalDict['nbfiles'] = len( urls )
gfalDict['timeout'] = self.fileTimeout * len( urls )
res = self.__gfal_operation_wrapper( 'gfal_release', gfalDict, srmRequestID = srmRequestID )
gDataStoreClient.addRegister( res['AccountingOperation'] )
if not res['OK']:
for url in urls:
failed[url] = res['Message']
else:
allResults.extend( res['Value'] )
# gDataStoreClient.commit()
return S_OK( { "AllResults" : allResults, "Failed" : failed } )
def __gfal_operation_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" gfal fcn call wrapper
:param self: self reference
:param str operation: gfal fcn name
:param dict gfalDict: gfal dict passed to create gfal object
:param srmRequestID: srmRequestID
:param int timeout_sendreceive: gfal sendreceive timeout in seconds
"""
# Create an accounting DataOperation record for each operation
oDataOperation = self.__initialiseAccountingObject( operation, self.name, gfalDict['nbfiles'] )
oDataOperation.setStartTime()
start = time.time()
res = self.__importExternals()
if not res['OK']:
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', 0. )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
# # timeout for one gfal_exec call
timeout = gfalDict['timeout'] if not timeout_sendreceive else timeout_sendreceive
# # pythonCall timeout ( const + timeout * ( 2 ** retry )
pyTimeout = 300 + ( timeout * ( 2 ** self.gfalRetry ) )
res = pythonCall( pyTimeout, self.__gfal_wrapper, operation, gfalDict, srmRequestID, timeout_sendreceive )
end = time.time()
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', end - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
res = res['Value']
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
res['AccountingOperation'] = oDataOperation
return res
def __gfal_wrapper( self, operation, gfalDict, srmRequestID = None, timeout_sendreceive = None ):
""" execute gfal :operation:
1. create gfalObject from gfalDict
2. set srmRequestID
3. call __gfal_exec
4. get gfal ids
5. get gfal results
6. destroy gfal object
:param self: self reference
:param str operation: fcn to call
:param dict gfalDict: gfal config dict
:param srmRequestID: srm request id
:param int timeout_sendrecieve: timeout for gfal send request and recieve results in seconds
"""
gfalObject = self.__create_gfal_object( gfalDict )
if not gfalObject["OK"]:
return gfalObject
gfalObject = gfalObject['Value']
if srmRequestID:
res = self.__gfal_set_ids( gfalObject, srmRequestID )
if not res['OK']:
return res
res = self.__gfal_exec( gfalObject, operation, timeout_sendreceive )
if not res['OK']:
return res
gfalObject = res['Value']
res = self.__gfal_get_ids( gfalObject )
if not res['OK']:
newSRMRequestID = srmRequestID
else:
newSRMRequestID = res['Value']
res = self.__get_results( gfalObject )
if not res['OK']:
return res
resultList = []
pfnRes = res['Value']
for myDict in pfnRes:
myDict['SRMReqID'] = newSRMRequestID
resultList.append( myDict )
self.__destroy_gfal_object( gfalObject )
return S_OK( resultList )
@staticmethod
def __initialiseAccountingObject( operation, se, files ):
""" create DataOperation accounting object
:param str operation: operation performed
:param str se: destination SE name
:param int files: nb of files
"""
import DIRAC
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'gfal'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
#######################################################################
#
# The following methods provide the interaction with gfal functionality
#
#######################################################################
def __create_gfal_object( self, gfalDict ):
""" create gfal object by calling gfal.gfal_init
:param self: self reference
:param dict gfalDict: gfal params dict
"""
self.log.debug( "SRM2Storage.__create_gfal_object: Performing gfal_init." )
errCode, gfalObject, errMessage = self.gfal.gfal_init( gfalDict )
if not errCode == 0:
errStr = "SRM2Storage.__create_gfal_object: Failed to perform gfal_init."
if not errMessage:
errMessage = os.strerror( self.gfal.gfal_get_errno() )
self.log.error( errStr, errMessage )
return S_ERROR( "%s%s" % ( errStr, errMessage ) )
else:
self.log.debug( "SRM2Storage.__create_gfal_object: Successfully performed gfal_init." )
return S_OK( gfalObject )
def __gfal_set_ids( self, gfalObject, srmRequestID ):
""" set :srmRequestID:
:param self: self reference
:param gfalObject: gfal object
:param str srmRequestID: srm request id
"""
self.log.debug( "SRM2Storage.__gfal_set_ids: Performing gfal_set_ids." )
errCode, gfalObject, errMessage = self.gfal.gfal_set_ids( gfalObject, None, 0, str( srmRequestID ) )
if not errCode == 0:
errStr = "SRM2Storage.__gfal_set_ids: Failed to perform gfal_set_ids."
if not errMessage:
errMessage = os.strerror( errCode )
self.log.error( errStr, errMessage )
return S_ERROR( "%s%s" % ( errStr, errMessage ) )
else:
self.log.debug( "SRM2Storage.__gfal_set_ids: Successfully performed gfal_set_ids." )
return S_OK( gfalObject )
def __gfal_exec( self, gfalObject, method, timeout_sendreceive = None ):
"""
In gfal, for every method (synchronous or asynchronous), you can define a sendreceive timeout and a connect timeout.
The connect timeout sets the maximum amount of time a client accepts to wait before establishing a successful TCP
connection to SRM (default 60 seconds).
The sendreceive timeout, allows a client to set the maximum time the send
of a request to SRM can take (normally all send operations return immediately unless there is no free TCP buffer)
and the maximum time to receive a reply (a token for example). Default 0, i.e. no timeout.
The srm timeout for asynchronous requests default to 3600 seconds
gfal_set_timeout_connect (int value)
gfal_set_timeout_sendreceive (int value)
gfal_set_timeout_bdii (int value)
gfal_set_timeout_srm (int value)
"""
self.log.debug( "SRM2Storage.__gfal_exec(%s): Starting" % method )
fcn = None
if hasattr( self.gfal, method ) and callable( getattr( self.gfal, method ) ):
fcn = getattr( self.gfal, method )
if not fcn:
return S_ERROR( "Unable to invoke %s for gfal, it isn't a member function" % method )
# # retry
retry = self.gfalRetry if self.gfalRetry else 1
# # initial timeout
timeout = timeout_sendreceive if timeout_sendreceive else self.gfalTimeout
# # errCode, errMessage, errNo
errCode, errMessage, errNo = 0, "", 0
while retry:
retry -= 1
self.gfal.gfal_set_timeout_sendreceive( timeout )
errCode, gfalObject, errMessage = fcn( gfalObject )
if errCode == -1:
errNo = self.gfal.gfal_get_errno()
if errCode == -1 and errNo == errno.ECOMM:
timeout *= 2
self.log.debug( "SRM2Storage.__gfal_exec(%s): got ECOMM, extending timeout to %s s" % ( method, timeout ) )
continue
else:
break
if errCode:
errStr = "SRM2Storage.__gfal_exec(%s): Execution failed." % method
if not errMessage:
errMessage = os.strerror( errNo ) if errNo else "UNKNOWN ERROR"
self.log.error( errStr, errMessage )
return S_ERROR( "%s %s" % ( errStr, errMessage ) )
self.log.debug( "SRM2Storage.__gfal_exec(%s): Successfully invoked." % method )
return S_OK( gfalObject )
def __get_results( self, gfalObject ):
""" retrive gfal results
:param self: self reference
:param gfalObject: gfal object
"""
self.log.debug( "SRM2Storage.__get_results: Performing gfal_get_results" )
numberOfResults, gfalObject, listOfResults = self.gfal.gfal_get_results( gfalObject )
if numberOfResults <= 0:
errStr = "SRM2Storage.__get_results: Did not obtain results with gfal_get_results."
self.log.error( errStr )
return S_ERROR( errStr )
else:
self.log.debug( "SRM2Storage.__get_results: Retrieved %s results from gfal_get_results." % numberOfResults )
for result in listOfResults:
if result['status'] != 0:
if result['explanation']:
errMessage = result['explanation']
elif result['status'] > 0:
errMessage = os.strerror( result['status'] )
result['ErrorMessage'] = errMessage
return S_OK( listOfResults )
def __gfal_get_ids( self, gfalObject ):
""" get srmRequestToken
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__gfal_get_ids: Performing gfal_get_ids." )
numberOfResults, gfalObject, _srm1RequestID, _srm1FileIDs, srmRequestToken = self.gfal.gfal_get_ids( gfalObject )
if numberOfResults <= 0:
errStr = "SRM2Storage.__gfal_get_ids: Did not obtain SRM request ID."
self.log.error( errStr )
return S_ERROR( errStr )
else:
self.log.debug( "SRM2Storage.__get_gfal_ids: Retrieved SRM request ID %s." % srmRequestToken )
return S_OK( srmRequestToken )
def __destroy_gfal_object( self, gfalObject ):
""" del gfal object by calling gfal.gfal_internal_free
:param self: self reference
:param gfalObject: gfalObject
"""
self.log.debug( "SRM2Storage.__destroy_gfal_object: Performing gfal_internal_free." )
self.gfal.gfal_internal_free( gfalObject )
return S_OK()
|
sposs/DIRAC
|
Resources/Storage/SRM2Storage.py
|
Python
|
gpl-3.0
| 88,519
|
[
"DIRAC"
] |
05e874c4b0313a203d6308f5b80455263da86646797ba22af8fee7eb9493cd20
|
"""
This module contains the CharmmMatcher class. It is used to apply
atom names from known topologies to the molecule by using a graph-based
representation of each molecule.
Author: Robin Betz
Copyright (C) 2019 Robin Betz
"""
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330
# Boston, MA 02111-1307, USA.
from __future__ import print_function
import logging
import networkx as nx
from networkx.algorithms import isomorphism
from vmd import atomsel
from dabble import DabbleError
from dabble.param import MoleculeMatcher
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# CLASSES #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class Patch(object):
"""
Represents a patch applied to part of the protein.
As patches can be applied to one or more residues, it allows
unlimited segids and resids inside.
"""
def __init__(self, name, segids=None, resids=None):
self.name = name
self.segids = [] if segids is None else segids
self.resids = [] if resids is None else resids
def __repr__(self):
return "%s %s" % (self.name,
" ".join("%s:%s" % (x, y)
for x, y in zip(self.segids, self.resids)))
def __eq__(self, other):
if isinstance(other, Patch):
return ((self.name == other.name) \
and (sorted(zip(self.segids, self.resids)) == \
sorted(zip(other.segids, other.resids))))
else:
return False
def __hash__(self):
return hash(self.__repr__())
def add_patch(self, segid, resid):
self.segids.append(segid)
self.resids.append(resid)
def targets(self):
# TODO keep this up to date w psfgen
x = [(str(self.segids[i]), str(self.resids[i])) \
for i in range(len(self.segids))]
return x
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class CharmmMatcher(MoleculeMatcher):
"""
Represents a collection of graphs of all residues defined in the
topology files. Can pass in VMD molecules to be checked and atom
names corrected
Attributes:
topologies (list of str): The topology files this molecule graph
knows about, from parent class
known_res (dict str resname -> networkx graph): The molecule graphs,
from parent class
nodenames (dict name -> element): Translates atom names to
elements, from parent class
known_pres (dict tuple (str resname, patchname) -> networkx graph)
patches (dict patchname -> str instructions): Known patches
"""
#==========================================================================
def __init__(self, topologies):
"""
Initializes a graph parser with the given rtf files
as known molecules
"""
self.patches = {}
self.known_pres = {}
# Parent assigns and parses topologies
super(CharmmMatcher, self).__init__(topologies=topologies)
# Assign elements to all known residues
for graph in self.known_res.values():
self._assign_elements(graph)
# Create dictionary of patched amino acids that we know about
for res in [s for s in self.known_res.keys()
if s in self.AMINO_ACIDS]:
# Also add the +C -N linkages in amino acids. There's no reliable
# way to add these using the info in the psf files (impropers and
# cmap terms mess up other molecule types) so we just add them in
# this function call by name
_define_bond(self.known_res[res], "+N", "C", patch=False)
_define_bond(self.known_res[res], "-C", "N", patch=False)
# Assign elements again, in case some extraresidue atoms were
# added by the amino acid linkage bonds. Patches take care
# of their own element assignment for new atoms later.
self._assign_elements(self.known_res[res])
for patch in self.patches:
applied = self._apply_patch(res, patch)
if applied:
self.known_pres[(res, patch)] = applied
#=========================================================================
# Public methods #
#=========================================================================
def get_patches(self, selection):
"""
Obtains names and patch info for a modified residue in a selection.
Identifies which amino acid is patched by finding which amino acid
this one is a maximal subgraph of. Then, builds a library of graphs
representing all valid patches applied to this amino acid.
Note that this does NOT handle multiple-residue patches such as
disulfides!
Args:
selection (VMD atomsel): Selection that is patched
Returns:
(str, str, dict) resname matched, patch name applied,
name translation dictionary
"""
resname = selection.resname[0]
rgraph = self.parse_vmd_graph(selection)[0]
# Check this residue against all possible patches applied to the
for names in self.known_pres:
graph = self.known_pres[names]
matcher = isomorphism.GraphMatcher(rgraph, graph, \
node_match=super(CharmmMatcher, self)._check_atom_match)
if matcher.is_isomorphic():
logger.info("Detected patch %s", names[1])
match = next(matcher.match())
_, atomnames = self._get_names_from_match(graph, match)
return (names[0], names[1], atomnames)
logger.error("Couldn't find a patch for resname '%s'."
"Dumping as 'rgraph.dot'", resname)
self.write_dot(rgraph, "rgraph.dot")
return (None, None, None)
#=========================================================================
def get_disulfide(self, selstring, molid): #pylint: disable=too-many-locals
"""
Checks if the selection corresponds to a cysteine in a disulfide bond.
Sets the patch line appropriately and matches atom names using
a subgraph match to the normal cysteine residue
Args:
selstring (str): Selection to check
molid (int): VMD molecule of entire system (needed for disu partner)
Returns:
(str, Patch, dict) resname matched, patch object for psfgen,
name translation dictionary
"""
selection = atomsel(selstring, molid=molid)
# Check for the 3 join atoms corresponding to the disulfide bonds
rgraph, _ = self.parse_vmd_graph(selection)
externs = self.get_extraresidue_atoms(selection)
if len(externs) != 3:
return (None, None, None)
# Check that it is a cysteine in some way shape or form
# ie that it this residue is a subgraph of a cysteine
truncated = nx.Graph(rgraph)
truncated.remove_nodes_from([n for n in rgraph.nodes() if \
rgraph.node[n]["residue"] != "self"])
matches = {}
for matchname in self.AMINO_ACIDS:
graph = self.known_res.get(matchname)
if not graph:
continue
matcher = isomorphism.GraphMatcher(graph, truncated, \
node_match=super(CharmmMatcher, self)._check_atom_match)
if matcher.subgraph_is_isomorphic():
matches[matchname] = matcher.match()
if not matches:
return (None, None, None)
matchname = max(matches.keys(), key=(lambda x: len(self.known_res[x])))
if matchname != "CYS":
return (None, None, None)
# Invert mapping so it's idx->name. It's currently backwards
# because of the need to find a subgraph.
atomnames = dict((v, k) for (k, v) in next(matches[matchname]).items())
# Now we know it's a cysteine in a disulfide bond
# Identify which resid and fragment corresponds to the other cysteine
partners = [n for n in externs if \
atomsel("index %d" % n, molid=molid).element[0] == "S"]
if not partners:
raise DabbleError("3 bonded Cys %d isn't a valid disulfide!"
% selection.resid[0])
osel = atomsel("index %d" % partners[0], molid=molid)
# Order so same DISU isn't listed twice
fr1 = osel.fragment[0]
fr2 = selection.fragment[0]
if fr1 < fr2:
first = osel
second = selection
elif fr1 > fr2:
first = selection
second = osel
else:
if osel.resid[0] < selection.resid[0]:
first = osel
second = selection
else:
first = selection
second = osel
patch = Patch(name="DISU",
segids=[
self.get_protein_segname(molid, first.fragment[0]),
self.get_protein_segname(molid, second.fragment[0])
],
resids=[first.resid[0],
second.resid[0]])
return (matchname, patch, atomnames)
#=========================================================================
def get_names(self, selection, print_warning=False):
"""
Returns at atom name matching up dictionary.
Does the generic moleculematcher algorithm then checks that only
one resname matched since for CHARMM there is no concept
of a unit and only one named residue is defined per topology.
Args:
selection (VMD atomsel): Selection to rename
print_warning (bool): Debug output
Returns:
(str) resname matched
(dict int->str) translation dictionary from index to atom name
Raises:
ValueError if more than one residue name is matched
"""
(resnames, atomnames) = super(CharmmMatcher, self).get_names(selection,
print_warning)
if not resnames:
return (None, None)
# Set the resname correctly after checking only one resname
# matched since this is charmm
resname = set(resnames.values())
if len(resname) > 1:
raise DabbleError("More than one residue name was returned as "
"belonging to a single residue in CHARMM matching."
" Not sure how this happened; something is really "
"really wrong. Residue was: %s:%d" %
(selection.resname[0],
selection.resid[0]))
return (resname.pop(), atomnames)
#==========================================================================
def get_protein_segname(self, molid, fragment):
"""
Gets the segment name from a given protein fragment.
Sometimes fragment numbers for protein can be quite large if there
are a lot of other molecules in the system. This method returns a
consistent segment name for a given protein fragment.
"""
allfrags = sorted(set(atomsel("resname %s" % " ".join(self.AMINO_ACIDS),
molid=molid).fragment))
return "P%d" % allfrags.index(fragment)
#=========================================================================
# Private methods #
#=========================================================================
def _parse_topology(self, filename): #pylint: disable=too-many-branches
"""
Parses a topology file and pulls out the defined residues into
graph representation.
First pulls out atom types that are defined and updates nodenames,
then pulls out defined residues and updates known_res.
Also pulls out known patches as it goes
Args:
filename (str): The file to parse
Returns:
True if successful
Raises:
ValueError if topology file is malformed in various ways
"""
resname = ""
data = ""
patch = False
with open(filename, 'r') as fileh:
for line in fileh:
# Remove comments except "special" graphmatcher directives
# This directive is only really used to parse the bond on NMA
# that attaches to the previous residue, in order for its extra
# connection to be properly registered since chamber fails
# if a connection is listed twice
if "!GraphMatcher:" in line:
line = line.replace("!GraphMatcher:", "")
if "!" in line:
line = line[:line.index("!")]
if not line:
continue
tokens = [i.strip() for i in line.split()]
if not tokens:
continue
# Handle previous data
if data and (tokens[0] == "RESI" or tokens[0] == "PRES"):
if patch:
self.patches[resname] = data
else:
self.known_res[resname] = self._rtf_to_graph(data, resname)
data = ""
# Handle new residue definition
if tokens[0] == "RESI":
resname = tokens[1]
# Only warn for too long str files
if len(resname) > 4 and filename.split('.')[-1] == "str":
raise DabbleError("Residue name '%s' too long for psfgen"
" to parse. Max is 4 characters!"
% resname)
patch = False
if self.known_res.get(resname):
logging.info("Skipping duplicate residue %s", resname)
# TODO define as a different residue name???
# Currently reads in first file's definition, ignores others
resname = "_skip"
# PRES is a patch
elif tokens[0] == "PRES":
resname = tokens[1] # prefix with _ so we can tell it's a patch
if len(resname) > 10:
raise DabbleError("Patch name '%s' too long for psfgen"
" to parse. Max is 10 characters."
% resname)
patch = True
if self.patches.get(resname):
logging.warning("Skipping duplicate patch %s", resname[1:])
# Check for atom definitions
elif tokens[0] == "MASS":
if self.nodenames.get(tokens[2]):
logger.info("Skipping duplicate type %s", tokens[2])
else:
self.nodenames[tokens[2]] = \
MoleculeMatcher.get_element(float(tokens[3]))
elif resname and resname != "_skip":
data += ' '.join(tokens) + '\n'
# Write out final residue
if data:
if patch:
self.patches[resname] = data
else:
self.known_res[resname] = self._rtf_to_graph(data, resname)
return True
#=========================================================================
def _rtf_to_graph(self, data, resname, patch=None): #pylint: disable=too-many-branches
"""
Parses rtf text to a graph representation. If a graph to patch
is provided, then patches that graph with this rtf data
Args:
data (str): The rtf data for this residue or patch
resname (str): Residue name, from earlier parsing
patch (networkx graph): The graph to apply patches to,
or None if just parsing a residue. Will not be modified.
Returns:
(networkx graph): Graph representation of molecule, or None
if it could not be converted (invalid patch)
Raises:
ValueError if rtf file is malformed in various ways
"""
# They changed the copy keyword after version 2.1 so that
# graph attributes can have more names
if nx.__version__ >= "2.1":
graph = nx.Graph(incoming_graph_data=patch)
else:
graph = nx.Graph(data=patch)
for line in data.splitlines():
tokens = [i.strip().upper() for i in line.split()]
# Atoms mean add node to current residue
if tokens[0] == "ATOM":
# Patches can change atom type
# Technically re-adding the node will just change the type and
# not add a duplicate, but this is more correct and clear.
if tokens[1] in graph.nodes():
graph.node[tokens[1]]["type"] = tokens[2]
else:
graph.add_node(tokens[1], type=tokens[2],
atomname=tokens[1],
residue="self",
patched=bool(patch))
# Bond or double means add edge to residue graph
elif tokens[0] == "BOND" or tokens[0] == "DOUBLE":
if len(tokens) % 2 == 0:
raise DabbleError("Unequal number of atoms in bond terms\n"
"Line was:\n%s" % line)
for txn in range(1, len(tokens), 2):
node1 = tokens[txn]
node2 = tokens[txn+1]
if not _define_bond(graph, node1, node2, bool(patch)):
if patch:
return None
raise DabbleError("Could not bond atoms '%s' - '%s' "
"when parsing rtf file.\n"
"Line was:\n%s"
% (node1, node2, line))
# Check for atom definitions
elif tokens[0] == "MASS":
if self.nodenames.get(tokens[2]):
logger.info("Skipping duplicate type %s", tokens[2])
else:
self.nodenames[tokens[2]] = \
MoleculeMatcher.get_element(float(tokens[3]))
# Patches can delete atoms
elif tokens[0] == "DELETE" or tokens[0] == "DELE":
if not patch:
raise ValueError("DELETE only supported in patches!\n"
"Line was:\n%s" % line)
# Sometimes delete has a number in front of the atom name
try:
if tokens[1] == "ATOM":
if tokens[2][0].isdigit():
tokens[2] = tokens[2][1:]
graph.remove_node(tokens[2])
elif tokens[1] == "BOND":
if tokens[2][0].isdigit():
tokens[2] = tokens[2][1:]
if tokens[3][0].isdigit():
tokens[3] = tokens[3][1:]
graph.remove_edge(tokens[2], tokens[3])
# Atom or bond did not exist, ie this patch is invalid
except nx.NetworkXError:
return None
# Assign resname to all atoms
nx.set_node_attributes(graph, name="resname", values=resname)
# If we didn't patch, set the whole residue to unpatched atom attribute
# If we are patching, new atoms will have that attribute set when
# they are added.
if not patch:
nx.set_node_attributes(graph, name="patched", values=False)
return graph
#=========================================================================
def _apply_patch(self, matchname, patch):
"""
Applies a patch to a graph, returning a modified graph
Args:
matchname (str): The key of the graph to modify
patch (str): The patch to apply
Returns:
networkx graph that's the patched residue, or None
if the patch did not apply correctly
"""
patched = self._rtf_to_graph(self.patches.get(patch),
resname=matchname,
patch=self.known_res[matchname])
if not patched:
return None
self._assign_elements(patched)
return patched
#=========================================================================
def _assign_elements(self, graph):
"""
Assigns elements to parsed in residues. Called after all
topology files are read in. Element "Any" is assigned
to atoms from other residues (+- atoms), since these are only
defined by name.
Args:
graph (networkx graph): The graph to assign elements to
Raises:
ValueError if an atom type can't be assigned an element
"""
# Now that all atom and mass lines are read, get the element for each atom
for node, data in graph.nodes(data=True):
if data.get('residue') != "self":
element = "Any"
else:
element = self.nodenames.get(data.get('type'))
if not element:
self.write_dot(graph, "invalid_type.dot")
raise DabbleError("Unknown atom type %s, name '%s'.\nDumping "
"graph as invalid_type.dot"
% (data.get("type"), node))
data['element'] = element
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# FUNCTIONS #
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _define_bond(graph, node1, node2, patch):
"""
Process a bond defined in a psf file and adds it to the graph.
Checks for + or - in bonded atom name and sets the node "residue"
attribute accordingly if it is present.
Args:
graph (networkx graph): Graph to add bond to
node1 (str): Atom name from psf file of first atom
node2 (str): Atom name from psf file of second atom
patch (bool): If this bond is defined by a patch
Returns:
(bool) If the bond could be defined
"""
# If both atoms are extraresidue, refuse to define the bond
# and just silently continue. This helps deal with impropers
if all("+" in _ or "-" in _ for _ in [node1, node2]):
return True
# Sanity check and process atom names
for n in [node1, node2]:
if "+" in n:
graph.add_node(n, atomname="+", type="", residue="+", patched=patch)
elif "-" in n:
graph.add_node(n, atomname="-", type="", residue="-", patched=patch)
elif n not in graph.nodes():
return False
# If we are applying a patch and there are extraresidue atoms attached
# to the atom we are applying a bond to, delete the extraresidue atom.
# It can be added back later if it was actually needed.
neighbor_joins = []
if graph.node[node1]["patched"] and not graph.node[node2]["patched"]:
neighbor_joins = [e[1] for e in graph.edges(nbunch=[node2]) \
if graph.node[e[1]]["residue"] != "self" and \
not graph.node[e[1]]["patched"]]
elif graph.node[node2]["patched"] and not graph.node[node1]["patched"]:
neighbor_joins = [e[1] for e in graph.edges(nbunch=[node1]) \
if graph.node[e[1]]["residue"] != "self" and \
not graph.node[e[1]]["patched"]]
graph.remove_nodes_from(neighbor_joins)
graph.add_edge(node1, node2, patched=patch)
return True
#=========================================================================
def _prune_joins(graph):
"""
Prunes _join elements that have been fulfilled by the addition of
this patch.
DEPRECATED! But a useful function for detecting fulfilled +- joins
that match by element so I'm keeping it.
Pruning now done in _define_bond
Args:
graph (networkx graph): The residue to prun
"""
unpatched = [n for n in graph.nodes() if not graph.node[n]["patched"]]
for uun in unpatched:
neighbor_joins = [e[1] for e in graph.edges(nbunch=[uun]) if \
graph.node[e[1]]["residue"] != "self" and \
not graph.node[e[1]]["patched"]]
for nei in neighbor_joins:
if any(graph.node[e[1]]["element"] == graph.node[nei]["element"] for \
e in graph.edges(nbunch=[uun]) if \
graph.node[e[1]]["patched"]):
graph.remove_node(nei)
#=========================================================================
|
Eigenstate/dabble
|
dabble/param/charmmmatcher.py
|
Python
|
gpl-2.0
| 26,364
|
[
"CHARMM",
"VMD"
] |
e22e062d448ffb0f4f1ca42b052dc465d025fbd354aaa1b2ae8d491c948a4e12
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
********************************************************************
**ParticleAccess** - abstract base class for analysis/measurement/io
********************************************************************
"""
from espresso import pmi
from _espresso import ParticleAccess
class ParticleAccessLocal(ParticleAccess):
"""Abstract local base class"""
def perform_action(self):
if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.perform_action(self)
if pmi.isController :
class ParticleAccess(object):
"""Abstract base class"""
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ 'perform_action' ]
)
|
BackupTheBerlios/espressopp
|
src/ParticleAccess.py
|
Python
|
gpl-3.0
| 1,591
|
[
"ESPResSo"
] |
b8cca7e1ba34af30697c2082753d807a53b77315ef7646916d1e851cae731eca
|
from __future__ import absolute_import
from __future__ import division
from six import text_type
from typing import Any, Dict, List, Tuple, Optional, Sequence, Callable, Union
from django.db import connection
from django.db.models.query import QuerySet
from django.template import RequestContext, loader
from django.core import urlresolvers
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from jinja2 import Markup as mark_safe
from zerver.decorator import has_request_variables, REQ, zulip_internal
from zerver.models import get_realm, UserActivity, UserActivityInterval, Realm
from zerver.lib.timestamp import timestamp_to_datetime
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import time
import re
import pytz
from six.moves import filter
from six.moves import map
from six.moves import range
from six.moves import zip
eastern_tz = pytz.timezone('US/Eastern')
from zproject.jinja2 import render_to_response
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.domain,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.domain,
age
order by
r.domain,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['domain']][row['age']] = row['cnt']
result = {}
for domain in counts:
raw_cnts = [counts[domain].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[domain] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.domain,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, domain ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['domain']]['cnts']
except:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
domain = row['domain']
minutes = realm_minutes.get(domain, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except:
pass
# formatting
for row in rows:
row['domain'] = realm_activity_link(row['domain'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
domain='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__domain'
).order_by(
'user_profile__realm__domain',
'user_profile__email'
)
by_domain = lambda row: row.user_profile.realm.domain
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for domain, realm_intervals in itertools.groupby(all_intervals, by_domain):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (domain,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[domain] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Domain':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.domain,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by domain, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, up.id, client.name
''' % (mobile_type,)
cols = [
'Domain',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.domain,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by domain, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, client.name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by domain'
query = '''
select
realm.domain,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by domain, client_name
having max(last_visit) > now() - interval '2 week'
order by domain, client_name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.domain,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, domain
having max(last_visit) > now() - interval '2 week'
order by client_name, domain
'''
cols = [
'Client',
'Domain',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__domain=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm=realm))
realm_link = '<a href="%s">%s</a>' % (url, realm)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[text_type]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val # type: ignore # datetie.now tzinfo bug.
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Sequence[str]]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = get_realm(realm).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm)
data += [(page_title, content)]
fix_name = lambda realm: realm.replace('.', '_')
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (fix_name(realm),)
title = realm
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
|
umkay/zulip
|
analytics/views.py
|
Python
|
apache-2.0
| 28,114
|
[
"VisIt"
] |
369731c1ffecc0cc1d7d9fc7117465bcf78f928c627c211bc97b3ca1bf9d7afd
|
from data import *
import numpy as np
import sys, math
""" fix_image_flags.py
- reads in LAMMPS data file
- looks for bonds that span periodic boundaries
- adds image flags
- writes new LAMMPS data file
note:
- completely ignores existing image flags
- assumes 'atom_style full'
- requires pizza.py (pizza.sandia.gov)
usage:
- python fix_image_flags.py <initial_data_file> <new_data_file>
"""
def sgn(a):
return int((a<0)*-1 + (a>=0))
# Check that all bonds are no more than 'threshold' units long
def distance_check(i, j, flags, positions, box, threshold=3):
r = 0.
for ii in range(3):
r += ((positions[i][ii]+flags[i][ii]*box[ii])-(positions[j][ii]+flags[j][ii]*box[ii]))**2
if math.sqrt(r) > threshold:
print 'bond %s:%s, |r| = %s' % (i, j, math.sqrt(r))
return
# computes r_parent - r_child, in unwrapped co-ordinates, and sets flag of child
def correct_flag(parent, child, flags, positions, box):
for i in range(3):
#r = (positions[parent][i]+1*flags[parent][i]*box[i]) - (positions[child][i]+1*flags[child][i]*box[i])
r = (positions[parent][i] - positions[child][i])
if abs(r) > 0.5*box[i]:
flags[child][i] = flags[parent][i] + sgn(r)
#print 'modified: ', parent, child
else:
flags[child][i] = flags[parent][i]
return
def main(infile, outfile):
d = data(infile)
# Get box co-ordinates and lengths
box = [d.headers['xlo xhi'], d.headers['ylo yhi'], d.headers['zlo zhi']]
box_len = [b[1]-b[0] for b in box]
# Get atomic positions (assumes
positions = [np.array([a for a in ali.strip().split()[4:7]],dtype=float) for ali in d.sections['Atoms']]
for pi in positions:
for i in range(3):
# Assume all initial co-ordinates are unwrapped
try:
assert pi[i] >= box[i][0] and pi[i] <= box[i][1]
except:
print 'atom outside box:', pi, i, box[i]
sys.exit()
natoms = len(positions)
# Convert bonds to list
bonds = [[int(b) for b in bli.strip().split()[2:4]] for bli in d.sections['Bonds']]
# Compute neighbours for every atom
atom_neighbours = [[] for i in range(natoms)]
for bi, bj in bonds:
atom_neighbours[bi-1].append(bj-1)
atom_neighbours[bj-1].append(bi-1)
# Set initial flags to 0
flags = [[0,0,0] for i in range(natoms)]
mol_ids = {}
for i in range(len(d.sections['Atoms'])):
li = d.sections['Atoms'][i]
mol_no = int(li.strip().split()[1])
if mol_no not in mol_ids.keys():
mol_ids[mol_no] = i
#parent = [0]
parent = mol_ids.values()
steps = 0
while True:
temp = []
for p in parent:
#ptype = d.sections['Atoms'][p].strip().split()[-1]
for child in atom_neighbours[p]:
# fix the flag
correct_flag(p, child, flags, positions, box_len)
# append child to temp for next iteration
temp.append(child)
# kill atom_neighbours
atom_neighbours[p] = []
parent = temp
steps +=1
if np.size(temp) == 0:
break
print 'doing distance check...'
for bi, bj in bonds:
distance_check(bi-1, bj-1, flags, positions, box_len)
# Append flags to atom section in lammps data object
for i in range(natoms):
line = d.sections['Atoms'][i]
l = [li for li in line.strip().split() if '#' not in li]
assert len(l) == 7 or len(l) == 10 # atom-num, mol-num, atom-type, charge, x, y, z, (image flags)
l = l[:7] + [str(f) for f in flags[i]]
nl = "%-10s %8s %8s %10s %15s %15s %15s %8s %8s %8s\n" % tuple(l)
d.sections['Atoms'][i] = nl
d.write(outfile)
if __name__ == "__main__":
assert len(sys.argv) == 3
infile = sys.argv[1]
outfile = sys.argv[2]
main(infile, outfile)
|
musab-k/fix-image-flags
|
fix_image_flags.py
|
Python
|
mit
| 3,997
|
[
"LAMMPS"
] |
074b013615c1204d705bf5c2684729cb9d08ae764a0fe571412f41cf24432e41
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.misc import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1:-1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def wavedec(amn, hk):
gk = qmf(hk)
return NotImplemented
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=np.float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float
Omega0. Default is 5
s : float
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-t^2/a^2)``,
where ``A = 2/sqrt(3a)pi^1/3``.
Parameters
----------
points : int
Number of points in `vector`. Default is ``10 * a``.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print len(vec2)
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
tsq = vec**2
mod = (1 - tsq / wsq)
gauss = np.exp(-tsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(width[ii],
... length), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths)
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
|
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/site-packages/scipy/signal/wavelets.py
|
Python
|
mit
| 10,266
|
[
"Gaussian"
] |
91387ceb7aa672db763418a8658ad4ee8325296d3c00be34b700f9a04471ada1
|
#!/usr/bin/env python2
# Copyright (C) 2015-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# #
# ESPResSo++ Python script for an example of pressure tensor calculation #
# layerwise according to the Irvin Kirwood method #
# #
###########################################################################
"""
Initial configuration file is 'lennard_jones.xyz' (equilibrated lennard-jones fluid).
"""
import espressopp
import mpi4py.MPI as MPI
# skin for Verlet list
skin = 0.3
# LJ cutoff
rc = 2.5
# integration step
dt = 0.005
# read a configuration from a file
pid, type, xpos, ypos, zpos, xvel, yvel, zvel, Lx, Ly, Lz = \
espressopp.tools.readxyz('lennard_jones.xyz')
# number of particles
NPart = len(xpos)
# system box size
box = (Lx, Ly, Lz)
# create a basic system
system = espressopp.System()
# specify a random number generator
system.rng = espressopp.esutil.RNG()
# use orthorhombic periodic boundary conditions
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = espressopp.tools.decomp.nodeGrid(comm.size,box,rc,skin)
# calculate a 3D subgrid to speed up verlet list builds and communication
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc, skin)
# create a domain decomposition particle storage with the specified nodeGrid and cellGrid
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
print "number of particles = ", NPart
print "box = ", box
print "nodeGrid = ", nodeGrid
print "cellGrid = ", cellGrid
print "skin = ", skin
print "cutoff = ", rc
print "timestep = ", dt
print "setting up system ..."
# add the particles from the file to the storage of the system
properties = ['id', 'type', 'pos', 'v']
particles = []
for i in range(NPart):
part = [pid[i], type[i], espressopp.Real3D(xpos[i], ypos[i], zpos[i]), espressopp.Real3D(xvel[i], yvel[i], zvel[i])]
particles.append(part)
# add particles in chunks of 1000 particles, this is faster than adding each single particle
if i % 1000 == 0:
system.storage.addParticles(particles, *properties)
# distribute particles to their nodes
system.storage.decompose()
particles = []
system.storage.addParticles(particles, *properties)
system.storage.decompose()
# setup the Lennard-Jones interaction, we use Verlet-List to loop over all interactions
vl = espressopp.VerletList(system, cutoff = rc)
potLJ = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, cutoff=rc)
interLJ = espressopp.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1=0, type2=0, potential=potLJ)
system.addInteraction(interLJ)
# use a velocity Verlet integration scheme
integrator = espressopp.integrator.VelocityVerlet(system)
# set the integration step
integrator.dt = dt
integrator.run(1)
print "system setup finished"
print 'Calculating pressure...'
n = int(10) # 10 layers in z direction
h0 = Lz / float(n) # z coordinate of initial layer
dh = 3. # area around the layer for kinetic part of the pressure tensor
pressure_tensor = espressopp.analysis.PressureTensor(system)
pressure_tensor_l = espressopp.analysis.PressureTensorLayer(system, h0, dh)
pressure_tensor_ml = espressopp.analysis.PressureTensorMultiLayer(system, n, dh)
n_measurements = 10 # result will be averaged over 10 measurements
print 'result will be averaged over ', n_measurements, ' measurements'
pij_layers1 = []
pij_layers2 = []
Pijtot = espressopp.Tensor(0.0)
for i in range(n_measurements):
integrator.run(10)
print 'measurement Nr: %d of %d' % ( i+1, n_measurements )
# compute the total pressure tensor
Pijtot += espressopp.Tensor( pressure_tensor.compute() )
# layerwise
pij_aux = pressure_tensor_ml.compute()
for j in range(n):
pressure_tensor_l.h0 = j * h0
if(j>= len( pij_layers1 ) ):
pij_layers1.append( espressopp.Tensor( pressure_tensor_l.compute() ) )
pij_layers2.append( pij_aux[j] )
else:
pij_layers1[j] += espressopp.Tensor( pressure_tensor_l.compute() )
pij_layers2[j] += pij_aux[j]
# averaging
Pijtot /= float(n_measurements)
for i in range(n):
pij_layers1[i] /= float(n_measurements)
pij_layers2[i] /= float(n_measurements)
print '\ntotal pressure tensor'
print ' Pxx Pyy Pzz Pxy Pxz Pyz'
fmt1 = '%8.4f %8.4f %8.4f %8.4f %8.4f %8.4f'
print(fmt1 % (Pijtot[0], Pijtot[1], Pijtot[2], Pijtot[3], Pijtot[4], Pijtot[5]))
print '\nPressure tensor by PressureTensorLayer (caculated for each layer separatelly).'
print 'layer number z coord of layer pressure tensor'
for i in range(n):
print ('%4d %7.3f ' % (i, i * h0)) , pij_layers1[i]
print '\nPressure tensor by PressureTensorMultiLayer (caculated for each layer at once).'
print 'layer number z coord of layer pressure tensor'
for i in range(n):
print ('%4d %7.3f ' % (i, i * h0)) , pij_layers2[i]
print 'done'
print 'both functions should give the same result'
|
kkreis/espressopp
|
examples/local_pressure_tensor/local_pressure_tensor.py
|
Python
|
gpl-3.0
| 6,109
|
[
"ESPResSo"
] |
8ed33e805b06f59a174326cf840299f307f24c98398a1129b4cb4c7f05073b69
|
"""
ex20170209_SIMEX1.py
Build Model SIMEX from Godley & Lavoie (Chapter 3, section 3.7.1.
Uses the model builder from the sfc_models.gl_book sub-package.
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sfc_models
from sfc_models.gl_book.chapter3 import SIMEX1, SIM
from sfc_models.examples.Quick2DPlot import Quick2DPlot
# The next line of code sets the name of the output files based on the code file's name.
# This means that if you paste this code into a new file, get a new log name.
sfc_models.register_standard_logs('output', __file__)
builder_SIMEX = SIMEX1(country_code='C1', use_book_exogenous=True)
model = builder_SIMEX.build_model()
model.main()
# NOTE: Running two models messes up file output...
builder_SIM = SIM(country_code='C1', use_book_exogenous=True)
model_SIM = builder_SIM.build_model()
model_SIM.main()
model.TimeSeriesCutoff = 20
model_SIM.TimeSeriesCutoff = 20
time = model.GetTimeSeries('k')
Y_SIMEX = model.GetTimeSeries('GOOD__SUP_GOOD')
Y_SIM = model_SIM.GetTimeSeries('GOOD__SUP_GOOD')
income = model.GetTimeSeries('HH__AfterTax')
expected_income = model.GetTimeSeries('HH__EXP_AfterTax')
F_SIMEX = model.GetTimeSeries('HH__F')
F_SIM = model_SIM.GetTimeSeries('HH__F')
Quick2DPlot(time, Y_SIMEX, 'Output (Y) - Model SIMEX', filename='intro_5_4_1.png')
q = Quick2DPlot([time, time], [expected_income, income], 'Household Income in Model SIMEX', run_now=False,
filename='intro_5_4_2.png')
q.Legend = ['Expected', 'Realised']
q.DoPlot()
q = Quick2DPlot([time, time], [Y_SIMEX, Y_SIM], 'Output (Y)', run_now=False, filename='intro_5_4_3.png')
q.Legend = ['Model SIMEX', 'Model SIM']
q.DoPlot()
q = Quick2DPlot([time, time], [F_SIMEX, F_SIM], 'Household Financial Assets', run_now=False, filename='intro_5_4_4.png')
q.Legend = ['Model SIMEX', 'Model SIM']
q.DoPlot()
|
brianr747/SFC_models
|
sfc_models/examples/scripts/intro_5_04_SIMEX1.py
|
Python
|
apache-2.0
| 2,355
|
[
"Brian"
] |
3115292d57794d894844f40d824214b730b7c5d344fc546f6894a867bb93971c
|
import unittest, logging
from pyquante2 import molecule, rhf, uhf, rohf, cuhf, h2, h2o, lih, li, oh, ch4, basisset
from pyquante2.ints.integrals import libint_twoe_integrals, twoe_integrals_compressed
from pyquante2.geo.molecule import read_xyz
from pyquante2.scf.iterators import SCFIterator, AveragingIterator, USCFIterator, ROSCFIterator
class test_scf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_h2(self):
bfs = basisset(h2,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -1.117099435262, 7)
def test_h2_631(self):
bfs = basisset(h2,'6-31gss')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -1.131333590574, 7)
def test_lih(self):
bfs = basisset(lih,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -7.860746149768, 6)
def test_lih_averaging(self):
bfs = basisset(lih,'sto-3g')
hamiltonian = rhf(bfs)
iterator = AveragingIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -7.860746149768, 6)
def test_h4(self):
h4 = molecule([
(1, 0.00000000, 0.00000000, 0.36628549),
(1, 0.00000000, 0.00000000, -0.36628549),
(1, 0.00000000, 4.00000000, 0.36628549),
(1, 0.00000000, 4.00000000, -0.36628549),
],
units='Angstrom')
bfs = basisset(h4,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -2.234185358600, 7)
# This is not quite equal to 2x the h2 energy, but very close
def test_h2o(self):
bfs = basisset(h2o,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.959857776754, 5)
def test_h2o_averaging(self):
bfs = basisset(h2o,'sto-3g')
hamiltonian = rhf(bfs)
iterator = AveragingIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.959857776754, 5)
def test_oh(self):
bfs = basisset(oh,'sto-3g')
hamiltonian = uhf(bfs)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.360233544941, 4)
def test_li(self):
bfs = basisset(li,'sto-3g')
hamiltonian = uhf(bfs)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -7.315525981280, 6)
class test_libint_rhf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_CH4(self):
"""CH4 symmetry Td"""
bfs = basisset(ch4,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -39.726862723517, 6)
def test_C2H2Cl2(self):
"""C2H2Cl2 symmetry C2H"""
C2H2Cl2 = read_xyz('./molfiles/C2H2Cl2.xyz')
bfs = basisset(C2H2Cl2,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -967.533150337277, 4)
def test_H2O_4(self):
"""H2O tethramer symmetry S4"""
H2O4 = read_xyz('./molfiles/H2O_4.xyz')
bfs = basisset(H2O4,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -299.909789863537, 5)
def test_BrF5(self):
"""BrF5 symmetry C4v"""
BrF5 = read_xyz('./molfiles/BrF5.xyz')
bfs = basisset(BrF5,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -3035.015731331871, 4)
def test_HBr(self):
"""HBr"""
HBr = read_xyz('./molfiles/HBr.xyz')
bfs = basisset(HBr,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -2545.887434128302, 4)
def test_C8H8(self):
"""C8H8"""
C8H8 = read_xyz('./molfiles/C8H8.xyz')
bfs = basisset(C8H8,'sto-6g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -306.765545547300, 5)
def test_N8(self):
"""N8"""
N8 = read_xyz('./molfiles/N8.xyz')
bfs = basisset(N8,'cc-pvdz')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -434.992755329296, 5)
class test_unstable(unittest.TestCase):
"""Unstable RHF convergence.
Different NWCHEM energy with and without autosym.
"""
def test_B12(self):
"""B12 symmetry Ih"""
B12 = read_xyz('./molfiles/B12.xyz')
bfs = basisset(B12,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -290.579419642829, 0)
def test_CrCO6(self):
# FAIL
"""Cr(CO)6 symmetry Oh
Reference: Whitaker, A.; Jeffery, J. W. Acta Cryst. 1967, 23, 977. DOI: 10.1107/S0365110X67004153
"""
CrCO6 = read_xyz('./molfiles/CrCO6.xyz')
bfs = basisset(CrCO6,'sto-3g')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -1699.539642257497, 0)
def test_C24(self):
# FAIL
"""C24 symmetry Th"""
C24 = read_xyz('./molfiles/C24.xyz')
bfs = basisset(C24,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -890.071915453874, 0)
class test_libint_uhf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_CF3(self):
"""CF3 radical"""
CF3 = read_xyz('./molfiles/CF3.xyz')
bfs = basisset(CF3,'sto-3g')
hamiltonian = uhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -331.480688906400, 5)
class test_libint_rohf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_CH3(self):
"""CH3 radical"""
CH3 = read_xyz('./molfiles/CH3.xyz')
bfs = basisset(CH3,'sto-3g')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -38.9493, 5)
def test_CF3(self):
"""CF3 radical"""
CF3 = read_xyz('./molfiles/CF3.xyz')
bfs = basisset(CF3,'sto-3g')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -331.479340943449, 5)
def test_oh(self):
bfs = basisset(oh,'sto-3g')
hamiltonian = rohf(bfs)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.359151530162, 5)
def test_N8(self):
"""N8"""
N8 = read_xyz('./molfiles/N8.xyz')
bfs = basisset(N8,'cc-pvdz')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -434.992755329296, 5)
class test_libint_cuhf(unittest.TestCase):
"""use UHF energy reference"""
def test_CH3(self):
"""CH3 radical"""
CH3 = read_xyz('./molfiles/CH3.xyz')
bfs = basisset(CH3,'sto-3g')
hamiltonian = cuhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -38.952023222533, 5)
def test_CF3(self):
"""CF3 radical"""
CF3 = read_xyz('./molfiles/CF3.xyz')
bfs = basisset(CF3,'sto-3g')
hamiltonian = cuhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -331.480688906400, 5)
def test_oh(self):
bfs = basisset(oh,'sto-3g')
hamiltonian = cuhf(bfs)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.360233544941, 4)
def runsuite(verbose=True):
# To use psyco, uncomment this line:
#import psyco; psyco.full()
verbosity = 2 if verbose else 1
# If you want more output, uncomment this line:
logging.basicConfig(format="%(message)s",level=logging.DEBUG)
suite1 = unittest.TestLoader().loadTestsFromTestCase(test_scf)
suite2 = unittest.TestLoader().loadTestsFromTestCase(test_libint_rhf)
suite3 = unittest.TestLoader().loadTestsFromTestCase(test_unstable)
suite4 = unittest.TestLoader().loadTestsFromTestCase(test_libint_uhf)
suite5 = unittest.TestLoader().loadTestsFromTestCase(test_libint_rohf)
suite6 = unittest.TestLoader().loadTestsFromTestCase(test_libint_cuhf)
alltests = unittest.TestSuite([suite6])
unittest.TextTestRunner(verbosity=verbosity).run(alltests)
# Running without verbosity is equivalent to replacing the above
# two lines with the following:
#unittest.main()
def debugsuite():
import cProfile,pstats
cProfile.run('runsuite()','prof')
prof = pstats.Stats('prof')
prof.strip_dirs().sort_stats('time').print_stats(15)
if __name__ == '__main__':
import sys
if "-d" in sys.argv:
debugsuite()
else:
runsuite()
|
Konjkov/pyquante2
|
tests/test_scf.py
|
Python
|
bsd-3-clause
| 11,974
|
[
"NWChem"
] |
c7fc040a386fca983725821e03d9c0423bc4711eebb6796074821ba24c945d93
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.defects.generators import VacancyGenerator, \
SubstitutionGenerator, InterstitialGenerator, VoronoiInterstitialGenerator, \
SimpleChargeGenerator
class VacancyGeneratorTest(PymatgenTest):
def test_vacancy_gen(self):
struc = PymatgenTest.get_structure("VO2")
vac_gen = VacancyGenerator(struc)
vacs = list(vac_gen)
self.assertEqual(len(vacs), 2)
multiplicities = {str(v.site.specie): v.multiplicity for v in vacs}
self.assertEqual(multiplicities, {"O": 4, "V": 2})
def test_vacancy_gen_charges(self):
# Ensure correct BV charges are assigned
struc = PymatgenTest.get_structure("VO2")
vac_gen = VacancyGenerator(struc, include_bv_charge=True)
for vac in vac_gen:
if str(vac.site.specie) == "V":
self.assertEqual(vac.charge, -4)
if str(vac.site.specie) == "O":
self.assertEqual(vac.charge, 2)
class SubstitutionGeneratorTest(PymatgenTest):
def test_substitution_gen(self):
struc = PymatgenTest.get_structure("VO2")
# test antisite
sub_gen = SubstitutionGenerator(struc, "V")
sub = list(sub_gen)
self.assertEqual(len(sub), 1)
self.assertEqual(sub[0].site.specie.symbol, 'V')
self.assertEqual(sub[0].multiplicity, 4)
# test vacant site symbol
defindex = sorted(
struc.get_sites_in_sphere(sub[0].site.coords, 2,
include_index=True),
key=lambda x: x[1])[0][2]
self.assertEqual(struc[defindex].specie.symbol, 'O')
# test substitutional
sub_gen = SubstitutionGenerator(struc, "S")
subs = list(sub_gen)
self.assertEqual(len(subs), 2)
name_sets = set([s.name for s in subs])
true_name_sets = set(['Sub_S_on_O_mult4', 'Sub_S_on_V_mult2'])
self.assertEqual(true_name_sets, name_sets)
class InterstitialGeneratorTest(PymatgenTest):
def test_int_gen(self):
struc = PymatgenTest.get_structure("VO2")
int_gen = InterstitialGenerator(struc, "Li")
ints = list(int_gen)
self.assertEqual(len(ints), 4)
multiplicities = [i.multiplicity for i in ints]
self.assertEqual(multiplicities, [8, 8, 4, 4])
self.assertEqual(str(ints[0].site.specie), "Li")
self.assertEqual(str(ints[1].site.specie), "Li")
self.assertArrayAlmostEqual(ints[0].site.coords, (0.9106, 0.3078, 0.3078), decimal=4)
self.assertArrayAlmostEqual(ints[1].site.coords, (1.5177, 1.7444, 0.3078,), decimal=4)
class VoronoiInterstitialGeneratorTest(PymatgenTest):
def test_int_gen(self):
struc = PymatgenTest.get_structure("VO2")
int_gen = VoronoiInterstitialGenerator(struc, "Li")
ints = list(int_gen)
self.assertEqual(len(ints), 3)
multiplicities = [i.multiplicity for i in ints]
self.assertEqual(multiplicities, [8, 8, 4])
self.assertEqual(str(ints[0].site.specie), "Li")
self.assertEqual(str(ints[1].site.specie), "Li")
self.assertEqual(str(ints[2].site.specie), "Li")
# self.assertEqual(str(ints[3].site.specie), "Li")
self.assertArrayAlmostEqual(ints[0].site.coords, (1.5177146, 2.6784354, 3.9481299))
self.assertArrayAlmostEqual(ints[1].site.coords, (1.7357692, 3.8392513, 3.8392513))
# self.assertArrayAlmostEqual(ints[2].site.coords, (1.5177146, 3.7168193, 3.7168193))
self.assertArrayAlmostEqual(ints[2].site.coords, [2.2765713, 4.5150233, 2.2575138])
class SimpleChargeGeneratorTest(PymatgenTest):
def test_charge_gen(self):
struc = PymatgenTest.get_structure("VO2")
# assemble set of defects to get charges for
vac_gen = VacancyGenerator(struc)
vacs = list(vac_gen)
full_subs = []
for sub_elt in ['V', 'O', 'S']:
sub_gen = SubstitutionGenerator(struc, sub_elt)
full_subs.extend(list(sub_gen))
int_gen = VoronoiInterstitialGenerator(struc, "H")
inters = list(int_gen)
defect_list = list(set().union(vacs, full_subs, inters))
# test simple charges
true_charges = {'Vac_O_mult4': 2, 'Int_H_Voronoi1_mult8': 0,
'Int_H_Voronoi2_mult8': 0, 'Vac_V_mult2': -4,
'Sub_S_on_V_mult2': 0, 'Int_H_Voronoi3_mult4': 0,
'Int_H_Voronoi4_mult4': 0, 'Sub_O_on_V_mult2': -2,
'Sub_S_on_O_mult4': 0, 'Sub_V_on_O_mult4': 1}
for defect in defect_list:
scg = SimpleChargeGenerator(defect)
charged_defects_list = list(scg)
def_name = charged_defects_list[0].name
charge = charged_defects_list[0].charge
self.assertEqual(len(charged_defects_list), 1)
self.assertEqual(true_charges[def_name], charge)
if __name__ == "__main__":
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/analysis/defects/tests/test_generators.py
|
Python
|
mit
| 5,141
|
[
"pymatgen"
] |
75a0ff9ba078c4660ce49389a564e9869f71784e090b332a54cf022b70f3df73
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='chronoplot',
version='0.1.0',
description='Timeline maker.',
long_description='',
keywords='',
url='https://github.com/darkfeline/chronoplot',
author='Allen Li',
author_email='darkfeline@felesatra.moe',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Intended Audience :: End Users/Desktop',
'Programming Language :: Python :: 3.5',
],
package_dir={'': 'src'},
packages=find_packages('src'),
entry_points={
'console_scripts': [
'chronoplot = chronoplot.main:main',
],
},
)
|
darkfeline/chronoplot
|
setup.py
|
Python
|
gpl-3.0
| 786
|
[
"MOE"
] |
efe831c2a9c06911ba43e12a608f4fa038610b6bf23237e8e8b0acb6bcb9c189
|
"""
sentry.web.frontend.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from functools import partial, update_wrapper
import six
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as login_user, authenticate
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import IntegrityError, transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.views.decorators.http import require_http_methods
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.utils import timezone
from django.utils.translation import ugettext as _
from social_auth.backends import get_backend
from social_auth.models import UserSocialAuth
from sudo.decorators import sudo_required
from sentry import newsletter
from sentry.models import (User, UserEmail, LostPasswordHash, Project, UserOption, Authenticator)
from sentry.security import capture_security_activity
from sentry.signals import email_verified
from sentry.web.decorators import login_required, signed_auth_required
from sentry.web.forms.accounts import (
AccountSettingsForm, AppearanceSettingsForm, RecoverPasswordForm, ChangePasswordRecoverForm,
EmailForm
)
from sentry.web.helpers import render_to_response
from sentry.utils import auth
logger = logging.getLogger('sentry.accounts')
@login_required
def login_redirect(request):
login_url = auth.get_login_redirect(request)
return HttpResponseRedirect(login_url)
def expired(request, user):
password_hash = LostPasswordHash.for_user(user)
password_hash.send_email(request)
context = {'email': password_hash.user.email}
return render_to_response('sentry/account/recover/expired.html', context, request)
def recover(request):
from sentry.app import ratelimiter
extra = {
'ip_address': request.META['REMOTE_ADDR'],
'user_agent': request.META.get('HTTP_USER_AGENT'),
}
if request.method == 'POST' and ratelimiter.is_limited(
'accounts:recover:{}'.format(extra['ip_address']),
limit=5,
window=60, # 5 per minute should be enough for anyone
):
return HttpResponse(
'You have made too many password recovery attempts. Please try again later.',
content_type='text/plain',
status=429,
)
logger.warning('recover.rate-limited', extra=extra)
prefill = {'user': request.GET.get('email')}
form = RecoverPasswordForm(request.POST or None, initial=prefill)
extra['user_recovered'] = form.data.get('user')
if form.is_valid():
email = form.cleaned_data['user']
password_hash = LostPasswordHash.for_user(email)
password_hash.send_email(request)
extra['passwordhash_id'] = password_hash.id
extra['user_id'] = password_hash.user_id
logger.info('recover.sent', extra=extra)
tpl = 'sentry/account/recover/sent.html'
context = {'email': password_hash.user.email}
return render_to_response(tpl, context, request)
if form._errors:
logger.warning('recover.error', extra=extra)
tpl = 'sentry/account/recover/index.html'
context = {'form': form}
return render_to_response(tpl, context, request)
def get_template(name, mode):
return 'sentry/account/{}/{}.html'.format(mode, name)
def recover_confirm(request, user_id, hash, mode='recover'):
try:
password_hash = LostPasswordHash.objects.get(user=user_id, hash=hash)
if not password_hash.is_valid():
password_hash.delete()
raise LostPasswordHash.DoesNotExist
user = password_hash.user
except LostPasswordHash.DoesNotExist:
tpl = get_template('failure', mode)
return render_to_response(tpl, {}, request)
if request.method == 'POST':
form = ChangePasswordRecoverForm(request.POST)
if form.is_valid():
with transaction.atomic():
user.set_password(form.cleaned_data['password'])
user.refresh_session_nonce(request)
user.save()
# Ugly way of doing this, but Django requires the backend be set
user = authenticate(
username=user.username,
password=form.cleaned_data['password'],
)
login_user(request, user)
password_hash.delete()
capture_security_activity(
account=user,
type='password-changed',
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
send_email=True,
)
return login_redirect(request)
else:
form = ChangePasswordRecoverForm()
tpl = get_template('confirm', mode)
context = {'form': form}
return render_to_response(tpl, context, request)
# Set password variation of password recovery
set_password_confirm = partial(recover_confirm, mode='set_password')
set_password_confirm = update_wrapper(set_password_confirm, recover)
@login_required
@require_http_methods(["POST"])
def start_confirm_email(request):
from sentry.app import ratelimiter
if ratelimiter.is_limited(
'auth:confirm-email:{}'.format(request.user.id),
limit=10,
window=60, # 10 per minute should be enough for anyone
):
return HttpResponse(
'You have made too many email confirmation requests. Please try again later.',
content_type='text/plain',
status=429,
)
if 'primary-email' in request.POST:
email = request.POST.get('email')
try:
email_to_send = UserEmail.objects.get(user=request.user, email=email)
except UserEmail.DoesNotExist:
msg = _('There was an error confirming your email.')
level = messages.ERROR
else:
request.user.send_confirm_email_singular(email_to_send)
msg = _('A verification email has been sent to %s.') % (email)
level = messages.SUCCESS
messages.add_message(request, level, msg)
return HttpResponseRedirect(reverse('sentry-account-settings'))
elif request.user.has_unverified_emails():
request.user.send_confirm_emails()
unverified_emails = [e.email for e in request.user.get_unverified_emails()]
msg = _('A verification email has been sent to %s.') % (', ').join(unverified_emails)
for email in unverified_emails:
logger.info(
'user.email.start_confirm',
extra={
'user_id': request.user.id,
'ip_address': request.META['REMOTE_ADDR'],
'email': email,
}
)
else:
msg = _('Your email (%s) has already been verified.') % request.user.email
messages.add_message(request, messages.SUCCESS, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
def confirm_email(request, user_id, hash):
msg = _('Thanks for confirming your email')
level = messages.SUCCESS
try:
email = UserEmail.objects.get(user=user_id, validation_hash=hash)
if not email.hash_is_valid():
raise UserEmail.DoesNotExist
except UserEmail.DoesNotExist:
if request.user.is_anonymous() or request.user.has_unverified_emails():
msg = _(
'There was an error confirming your email. Please try again or '
'visit your Account Settings to resend the verification email.'
)
level = messages.ERROR
else:
email.is_verified = True
email.validation_hash = ''
email.save()
email_verified.send(email=email.email, sender=email)
logger.info(
'user.email.confirm',
extra={
'user_id': user_id,
'ip_address': request.META['REMOTE_ADDR'],
'email': email.email,
}
)
messages.add_message(request, level, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def account_settings(request):
user = request.user
form = AccountSettingsForm(
user,
request,
request.POST or None,
initial={
'email': UserEmail.get_primary_email(user).email,
'username': user.username,
'name': user.name,
},
)
if form.is_valid():
old_email = user.email
form.save()
# update notification settings for those set to primary email with new primary email
alert_email = UserOption.objects.get_value(user=user, key='alert_email')
if alert_email == old_email:
UserOption.objects.set_value(user=user, key='alert_email', value=user.email)
options = UserOption.objects.filter(user=user, key='mail:email')
for option in options:
if option.value != old_email:
continue
option.value = user.email
option.save()
# TODO(dcramer): we should maintain validation here when we support
# multiple email addresses
if request.user.email != old_email:
try:
with transaction.atomic():
user_email = UserEmail.objects.create(
user=user,
email=user.email,
)
except IntegrityError:
pass
else:
user_email.set_hash()
user_email.save()
user.send_confirm_email_singular(user_email)
msg = _('A confirmation email has been sent to %s.') % user_email.email
messages.add_message(request, messages.SUCCESS, msg)
messages.add_message(request, messages.SUCCESS, _('Your settings were saved.'))
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update(
{
'form': form,
'page': 'settings',
'has_2fa': Authenticator.objects.user_has_2fa(request.user),
'AUTH_PROVIDERS': auth.get_auth_providers(),
'email': UserEmail.get_primary_email(user),
'has_newsletters': newsletter.is_enabled,
}
)
return render_to_response('sentry/account/settings.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def twofactor_settings(request):
interfaces = Authenticator.objects.all_interfaces_for_user(request.user, return_missing=True)
if request.method == 'POST' and 'back' in request.POST:
return HttpResponseRedirect(reverse('sentry-account-settings'))
context = csrf(request)
context.update(
{
'page': 'security',
'has_2fa': any(x.is_enrolled and not x.is_backup_interface for x in interfaces),
'interfaces': interfaces,
'has_newsletters': newsletter.is_enabled,
}
)
return render_to_response('sentry/account/twofactor.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def avatar_settings(request):
context = csrf(request)
context.update(
{
'page': 'avatar',
'AUTH_PROVIDERS': auth.get_auth_providers(),
'has_newsletters': newsletter.is_enabled,
}
)
return render_to_response('sentry/account/avatar.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def appearance_settings(request):
from django.conf import settings
options = UserOption.objects.get_all_values(user=request.user, project=None)
form = AppearanceSettingsForm(
request.user,
request.POST or None,
initial={
'language': options.get('language') or request.LANGUAGE_CODE,
'stacktrace_order': int(options.get('stacktrace_order', -1) or -1),
'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,
'clock_24_hours': options.get('clock_24_hours') or False,
}
)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update(
{
'form': form,
'page': 'appearance',
'AUTH_PROVIDERS': auth.get_auth_providers(),
'has_newsletters': newsletter.is_enabled,
}
)
return render_to_response('sentry/account/appearance.html', context, request)
@csrf_protect
@never_cache
@signed_auth_required
@transaction.atomic
def email_unsubscribe_project(request, project_id):
# For now we only support getting here from the signed link.
if not request.user_from_signed_request:
raise Http404()
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404()
if request.method == 'POST':
if 'cancel' not in request.POST:
UserOption.objects.set_value(
user=request.user,
key='mail:alert',
value=0,
project=project,
)
return HttpResponseRedirect(auth.get_login_url())
context = csrf(request)
context['project'] = project
return render_to_response('sentry/account/email_unsubscribe_project.html', context, request)
@csrf_protect
@never_cache
@login_required
def list_identities(request):
identity_list = list(UserSocialAuth.objects.filter(user=request.user))
AUTH_PROVIDERS = auth.get_auth_providers()
context = csrf(request)
context.update(
{
'identity_list': identity_list,
'page': 'identities',
'AUTH_PROVIDERS': AUTH_PROVIDERS,
'has_newsletters': newsletter.is_enabled,
}
)
return render_to_response('sentry/account/identities.html', context, request)
@csrf_protect
@never_cache
@login_required
def disconnect_identity(request, identity_id):
if request.method != 'POST':
raise NotImplementedError
try:
auth = UserSocialAuth.objects.get(id=identity_id)
except UserSocialAuth.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-account-settings-identities'))
backend = get_backend(auth.provider, request, '/')
if backend is None:
raise Exception('Backend was not found for request: {}'.format(auth.provider))
# stop this from bubbling up errors to social-auth's middleware
# XXX(dcramer): IM SO MAD ABOUT THIS
try:
backend.disconnect(request.user, identity_id)
except Exception as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(Exception, exc, exc_tb)
del exc_tb
# XXX(dcramer): we experienced an issue where the identity still existed,
# and given that this is a cheap query, lets error hard in that case
assert not UserSocialAuth.objects.filter(
user=request.user,
id=identity_id,
).exists()
backend_name = backend.AUTH_BACKEND.name
messages.add_message(
request, messages.SUCCESS, 'Your {} identity has been disconnected.'.format(
settings.AUTH_PROVIDER_LABELS.get(backend_name, backend_name),
)
)
logger.info(
'user.identity.disconnect',
extra={
'user_id': request.user.id,
'ip_address': request.META['REMOTE_ADDR'],
'usersocialauth_id': identity_id,
}
)
return HttpResponseRedirect(reverse('sentry-account-settings-identities'))
@csrf_protect
@never_cache
@login_required
def show_emails(request):
user = request.user
emails = user.emails.all()
email_form = EmailForm(user, request.POST or None)
primary_email = UserEmail.get_primary_email(user)
alt_emails = emails.exclude(email=primary_email.email)
if 'remove' in request.POST:
email = request.POST.get('email')
del_email = UserEmail.objects.filter(user=user, email=email)
del_email.delete()
logger.info(
'user.email.remove',
extra={
'user_id': user.id,
'ip_address': request.META['REMOTE_ADDR'],
'email': email,
}
)
return HttpResponseRedirect(request.path)
if 'primary' in request.POST:
new_primary = request.POST['new_primary_email'].lower().strip()
if User.objects.filter(Q(email__iexact=new_primary) | Q(username__iexact=new_primary)
).exclude(id=user.id).exists():
messages.add_message(
request, messages.ERROR, _("That email is already in use for another user")
)
elif new_primary != user.email:
# update notification settings for those set to primary email with new primary email
alert_email = UserOption.objects.get_value(user=user, key='alert_email')
if alert_email == user.email:
UserOption.objects.set_value(user=user, key='alert_email', value=new_primary)
options = UserOption.objects.filter(user=user, key='mail:email')
for option in options:
if option.value != user.email:
continue
option.value = new_primary
option.save()
has_new_username = user.email == user.username
user.email = new_primary
msg = _('Your settings were saved')
messages.add_message(request, messages.SUCCESS, msg)
if has_new_username and not User.objects.filter(username__iexact=new_primary).exists():
user.username = user.email
user.save()
return HttpResponseRedirect(request.path)
if email_form.is_valid():
alternative_email = email_form.cleaned_data['alt_email'].lower().strip()
# check if this alternative email already exists for user
if alternative_email and not UserEmail.objects.filter(
user=user, email__iexact=alternative_email
).exists():
# create alternative email for user
try:
with transaction.atomic():
new_email = UserEmail.objects.create(user=user, email=alternative_email)
except IntegrityError:
pass
else:
new_email.set_hash()
new_email.save()
user.send_confirm_email_singular(new_email)
# Update newsletter subscription and mark as unverified
newsletter.update_subscription(
user=user,
verified=False,
)
logger.info(
'user.email.add',
extra={
'user_id': user.id,
'ip_address': request.META['REMOTE_ADDR'],
'email': new_email.email,
}
)
msg = _('A confirmation email has been sent to %s.') % new_email.email
messages.add_message(request, messages.SUCCESS, msg)
messages.add_message(request, messages.SUCCESS, _('Your settings were saved.'))
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update(
{
'email_form': email_form,
'primary_email': primary_email,
'alt_emails': alt_emails,
'page': 'emails',
'AUTH_PROVIDERS': auth.get_auth_providers(),
'has_newsletters': newsletter.is_enabled,
}
)
return render_to_response('sentry/account/emails.html', context, request)
@csrf_protect
@never_cache
@login_required
def manage_subscriptions(request):
user = request.user
email = UserEmail.get_primary_email(user)
if request.method == 'GET':
context = csrf(request)
context.update(
{
'page': 'subscriptions',
'email': email,
'AUTH_PROVIDERS': auth.get_auth_providers(),
'has_newsletters': newsletter.is_enabled,
'subscriptions': newsletter.get_subscriptions(user),
}
)
return render_to_response('sentry/account/subscriptions.html', context, request)
subscribed = request.POST.get('subscribed') == '1'
try:
list_id = int(request.POST.get('listId', ''))
except ValueError:
return HttpResponse('bad request', status=400)
kwargs = {
'list_id': list_id,
'subscribed': subscribed,
'verified': email.is_verified,
}
if not subscribed:
kwargs['unsubscribed_date'] = timezone.now()
else:
kwargs['subscribed_date'] = timezone.now()
newsletter.create_or_update_subscription(user, **kwargs)
return HttpResponse()
|
gencer/sentry
|
src/sentry/web/frontend/accounts.py
|
Python
|
bsd-3-clause
| 21,398
|
[
"VisIt"
] |
a600dcbddfc84c579130136d3bbb69d14d980c5522d3c4e8376d97459aeddb30
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest2
from openstack import profile
from rackspace import connection
class TestConnection(unittest2.TestCase):
@mock.patch.object(profile.Profile, 'set_region')
def test_with_region(self, mock_set_region):
region = "BEN"
connection.Connection(region=region,
username="test", api_key="test")
mock_set_region.assert_called_with(profile.Profile.ALL, region)
@mock.patch("rackspaceauth.v2.APIKey")
def test_auth_with_APIKey(self, mock_apikey):
user = "brian"
api_key = "123"
connection.Connection(region="the moon", username=user,
api_key=api_key)
mock_apikey.assert_called_with(username=user, api_key=api_key)
@mock.patch("rackspaceauth.v2.Password")
def test_auth_with_Password(self, mock_password):
user = "walter"
password = "123"
connection.Connection(region="the moon", username=user,
password=password)
mock_password.assert_called_with(username=user, password=password)
@mock.patch("rackspaceauth.v2.Token")
def test_auth_with_Token(self, mock_token):
tenant = "everett"
token = "123"
connection.Connection(region="the moon", tenant_id=tenant,
token=token)
mock_token.assert_called_with(tenant_id=tenant, token=token)
def test_auth_no_user_or_tenant(self):
self.assertRaisesRegexp(ValueError,
"username or tenant_id must be specified",
connection.Connection, region="test")
def test_auth_user_and_tenant(self):
self.assertRaisesRegexp(
ValueError,
"username and tenant_id cannot be used together",
connection.Connection, username="test", tenant_id="test",
region="test")
def test_auth_user_only(self):
self.assertRaisesRegexp(
ValueError,
"Either api_key or password must be passed with username",
connection.Connection, username="test", region="test")
|
briancurtin/rackspace-sdk-plugin
|
rackspace/tests/unit/test_connection.py
|
Python
|
apache-2.0
| 2,699
|
[
"Brian"
] |
42e152a84315dbdb8d7f194770dabf0d33a88abbbf8cc82558754ce2bd9fd582
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Trinity, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Balazs Hajgato (Vrije Universiteit Brussel)
@author: Robert Qiao (DeepThought HPC Service, Flinders University, Adelaide, Australia)
"""
import glob
import os
import re
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.environment import setvar
from easybuild.tools.filetools import apply_regex_substitutions
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_Trinity(EasyBlock):
"""Support for building/installing Trinity."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for Trinity."""
EasyBlock.__init__(self, *args, **kwargs)
self.build_in_installdir = True
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Trinity."""
extra_vars = {
'withsampledata': [False, "Include sample data", CUSTOM],
'bwapluginver': [None, "BWA pugin version", CUSTOM],
'RSEMmod': [False, "Enable RSEMmod", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def butterfly(self):
"""Install procedure for Butterfly."""
self.log.info("Begin Butterfly")
setvar("JAVA_TOOL_OPTIONS", "-Dfile.encoding=UTF8")
dst = os.path.join(self.cfg['start_dir'], 'Butterfly', 'src')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Butterfly: failed to change to dst dir %s: %s", dst, err)
cmd = "ant"
run_cmd(cmd)
self.log.info("End Butterfly")
def chrysalis(self, run=True):
"""Install procedure for Chrysalis."""
make_flags = "COMPILER='%s' CPLUSPLUS='%s' CC='%s' " % (os.getenv('CXX'),
os.getenv('CXX'),
os.getenv('CC'))
make_flags += "OMP_FLAGS='%s' OMP_LINK='%s' " % (self.toolchain.get_flag('openmp'),
os.getenv('LIBS'))
make_flags += "OPTIM='-O1' SYS_OPT='-O2 %s' " % self.toolchain.get_flag('optarch')
make_flags += "OPEN_MP=yes UNSUPPORTED=yes DEBUG=no QUIET=yes"
if run:
self.log.info("Begin Chrysalis")
dst = os.path.join(self.cfg['start_dir'], 'Chrysalis')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Chrysalis: failed to change to dst dir %s: %s", dst, err)
run_cmd("make clean")
run_cmd("make %s" % make_flags)
self.log.info("End Chrysalis")
else:
return make_flags
def inchworm(self, run=True):
"""Install procedure for Inchworm."""
make_flags = 'CXXFLAGS="%s %s"' % (os.getenv('CXXFLAGS'), self.toolchain.get_flag('openmp'))
version = LooseVersion(self.version)
if version >= LooseVersion('2.0') and version < LooseVersion('3.0'):
make_flags += ' CXX=%s' % os.getenv('CXX')
if run:
self.log.info("Begin Inchworm")
dst = os.path.join(self.cfg['start_dir'], 'Inchworm')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Inchworm: failed to change to dst dir %s: %s", dst, err)
run_cmd('./configure --prefix=%s' % dst)
run_cmd("make install %s" % make_flags)
self.log.info("End Inchworm")
else:
return make_flags
def jellyfish(self):
"""use a seperate jellyfish source if it exists, otherwise, just install the bundled jellyfish"""
self.log.debug("begin jellyfish")
self.log.debug("startdir: %s", self.cfg['start_dir'])
cwd = os.getcwd()
glob_pat = os.path.join(self.cfg['start_dir'], "..", "jellyfish-*")
jellyfishdirs = glob.glob(glob_pat)
self.log.debug("glob pattern '%s' yields %s" % (glob_pat, jellyfishdirs))
if len(jellyfishdirs) == 1 and os.path.isdir(jellyfishdirs[0]):
jellyfishdir = jellyfishdirs[0]
# if there is a jellyfish directory
self.log.info("detected jellyfish directory %s, so using this source", jellyfishdir)
orig_jellyfishdir = os.path.join(self.cfg['start_dir'], 'trinity-plugins', 'jellyfish')
try:
# remove original symlink
os.unlink(orig_jellyfishdir)
except OSError as err:
self.log.warning("jellyfish plugin: failed to remove dir %s: %s" % (orig_jellyfishdir, err))
try:
# create new one
os.symlink(jellyfishdir, orig_jellyfishdir)
os.chdir(orig_jellyfishdir)
except OSError as err:
raise EasyBuildError("jellyfish plugin: failed to change dir %s: %s", orig_jellyfishdir, err)
run_cmd('./configure --prefix=%s' % orig_jellyfishdir)
cmd = "make CC='%s' CXX='%s' CFLAGS='%s'" % (os.getenv('CC'), os.getenv('CXX'), os.getenv('CFLAGS'))
run_cmd(cmd)
# the installstep is running the jellyfish script, this is a wrapper that will compile .lib/jellyfish
run_cmd("bin/jellyfish cite")
# return to original dir
try:
os.chdir(cwd)
except OSError:
raise EasyBuildError("jellyfish: Could not return to original dir %s", cwd)
elif jellyfishdirs:
raise EasyBuildError("Found multiple 'jellyfish-*' directories: %s", jellyfishdirs)
else:
self.log.info("no seperate source found for jellyfish, letting Makefile build shipped version")
self.log.debug("end jellyfish")
def kmer(self):
"""Install procedure for kmer (Meryl)."""
self.log.info("Begin Meryl")
dst = os.path.join(self.cfg['start_dir'], 'trinity-plugins', 'kmer')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Meryl: failed to change to dst dir %s: %s", dst, err)
cmd = "./configure.sh"
run_cmd(cmd)
cmd = 'make -j 1 CCDEP="%s -MM -MG" CXXDEP="%s -MM -MG"' % (os.getenv('CC'), os.getenv('CXX'))
run_cmd(cmd)
cmd = 'make install'
run_cmd(cmd)
self.log.info("End Meryl")
def trinityplugin(self, plugindir, cc=None):
"""Install procedure for Trinity plugins."""
self.log.info("Begin %s plugin" % plugindir)
dst = os.path.join(self.cfg['start_dir'], 'trinity-plugins', plugindir)
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("%s plugin: failed to change to dst dir %s: %s", plugindir, dst, err)
if not cc:
cc = os.getenv('CC')
cmd = "make CC='%s' CXX='%s' CFLAGS='%s'" % (cc, os.getenv('CXX'), os.getenv('CFLAGS'))
run_cmd(cmd)
self.log.info("End %s plugin" % plugindir)
def configure_step(self):
"""No configuration for Trinity."""
pass
def build_step(self):
"""No building for Trinity."""
pass
def install_step(self):
"""Custom install procedure for Trinity."""
version = LooseVersion(self.version)
if version > LooseVersion('2012') and version < LooseVersion('2012-10-05'):
self.inchworm()
self.chrysalis()
self.kmer()
if version < LooseVersion('2.9'):
self.butterfly()
bwapluginver = self.cfg['bwapluginver']
if bwapluginver:
self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)
if self.cfg['RSEMmod']:
self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))
else:
self.jellyfish()
inchworm_flags = self.inchworm(run=False)
chrysalis_flags = self.chrysalis(run=False)
cc = os.getenv('CC')
cxx = os.getenv('CXX')
lib_flags = ""
for lib in ['ncurses', 'zlib']:
libroot = get_software_root(lib)
if libroot:
lib_flags += " -L%s/lib" % libroot
if version >= LooseVersion('2.0') and version < LooseVersion('3.0'):
regex_subs = [
(r'^( INCHWORM_CONFIGURE_FLAGS\s*=\s*).*$', r'\1%s' % inchworm_flags),
(r'^( CHRYSALIS_MAKE_FLAGS\s*=\s*).*$', r'\1%s' % chrysalis_flags),
]
else:
regex_subs = [
(r'^(INCHWORM_CONFIGURE_FLAGS\s*=\s*).*$', r'\1%s' % inchworm_flags),
(r'^(CHRYSALIS_MAKE_FLAGS\s*=\s*).*$', r'\1%s' % chrysalis_flags),
(r'(/rsem && \$\(MAKE\))\s*$',
r'\1 CC=%s CXX="%s %s" CFLAGS_EXTRA="%s"\n' % (cc, cxx, lib_flags, lib_flags)),
(r'(/fastool && \$\(MAKE\))\s*$',
r'\1 CC="%s -std=c99" CFLAGS="%s ${CFLAGS}"\n' % (cc, lib_flags)),
]
apply_regex_substitutions('Makefile', regex_subs)
trinity_compiler = None
comp_fam = self.toolchain.comp_family()
if comp_fam in [toolchain.INTELCOMP]:
trinity_compiler = "intel"
elif comp_fam in [toolchain.GCC]:
trinity_compiler = "gcc"
else:
raise EasyBuildError("Don't know how to set TRINITY_COMPILER for %s compiler", comp_fam)
explicit_make_args = ''
if version >= LooseVersion('2.0') and version < LooseVersion('3.0'):
explicit_make_args = 'all plugins'
cmd = "make TRINITY_COMPILER=%s %s" % (trinity_compiler, explicit_make_args)
run_cmd(cmd)
# butterfly is not included in standard build before v2.9.0
if version < LooseVersion('2.9'):
self.butterfly()
# remove sample data if desired
if not self.cfg['withsampledata']:
try:
shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))
except OSError as err:
raise EasyBuildError("Failed to remove sample data: %s", err)
def sanity_check_step(self):
"""Custom sanity check for Trinity."""
version = LooseVersion(self.version)
if version >= LooseVersion('2.0') and version < LooseVersion('2.3'):
sep = '-'
elif version >= LooseVersion('2.3') and version < LooseVersion('2.9'):
sep = '-Trinity-v'
elif version >= LooseVersion('2.9') and version < LooseVersion('3.0'):
sep = '-v'
else:
sep = '_r'
# Chrysalis
if version >= LooseVersion('2.9') and version < LooseVersion('2000'):
chrysalis_bin = os.path.join('Chrysalis', 'bin')
chrysalis_files = ['BubbleUpClustering',
'CreateIwormFastaBundle',
'QuantifyGraph',
'Chrysalis',
'GraphFromFasta',
'ReadsToTranscripts']
elif version >= LooseVersion('2.8') and version < LooseVersion('2.9'):
chrysalis_bin = os.path.join('Chrysalis', 'bin')
chrysalis_files = ['Chrysalis']
else:
chrysalis_bin = 'Chrysalis'
chrysalis_files = ['Chrysalis']
chrysalis_bin_files = [os.path.join(chrysalis_bin, x) for x in chrysalis_files]
# Inchworm
inchworm_bin = os.path.join('Inchworm', 'bin')
inchworm_files = ['inchworm']
if version >= LooseVersion('2.9') and version < LooseVersion('2000'):
inchworm_files.extend(['FastaToDeBruijn', 'fastaToKmerCoverageStats'])
inchworm_bin_files = [os.path.join(inchworm_bin, x) for x in inchworm_files]
path = 'trinityrnaseq'
# folders path
dir_path = ['util']
if version < LooseVersion('2.9'):
dir_path.append(os.path.join('Butterfly', 'src', 'bin'))
# these lists are definitely non-exhaustive, but better than nothing
custom_paths = {
'files': [os.path.join(path, x) for x in (inchworm_bin_files + chrysalis_bin_files)],
'dirs': [os.path.join(path, x) for x in dir_path]
}
super(EB_Trinity, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom tweaks for PATH variable for Trinity."""
guesses = super(EB_Trinity, self).make_module_req_guess()
install_rootdir = os.path.basename(self.cfg['start_dir'].strip('/'))
guesses.update({
'PATH': [install_rootdir],
'TRINITY_HOME': [install_rootdir],
})
return guesses
|
IGB-UIUC/easybuild
|
easyblocks/trinity.py
|
Python
|
gpl-3.0
| 14,491
|
[
"BWA"
] |
9988d74e32c9a74e58f480c0b25cf9a222ba6ffed4a3188237704eae6f3e0b92
|
# This file is part of PyEMMA.
#
# Copyright (c) 2014-2017 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pickle import Pickler, Unpickler, UnpicklingError
import numpy as np
import logging
try:
import tables
except:
pass
logger = logging.getLogger(__name__)
__author__ = 'marscher'
def _blosc_opts(complevel=9, complib='blosc:lz4', shuffle=True):
shuffle = 2 if shuffle == 'bit' else 1 if shuffle else 0
compressors = ['blosclz', 'lz4', 'lz4hc', 'snappy', 'zlib', 'zstd']
complib = ['blosc:' + c for c in compressors].index(complib)
args = {
'compression': 32001,
'compression_opts': (0, 0, 0, 0, complevel, shuffle, complib)
}
if shuffle:
args['shuffle'] = False
return args
def _check_blosc_avail():
import tempfile, h5py
blosc_opts = _blosc_opts()
fid, name = tempfile.mkstemp()
try:
with h5py.File(name, mode="w") as h5f:
try:
h5f.create_dataset('test', shape=(1,1), **blosc_opts)
except ValueError as ve:
if 'Unknown compression filter' in str(ve):
import warnings
warnings.warn('BLOSC compression filter unavailable. '
'Your resulting file may be large and not optimal to process.')
return {}
else: # unknown exception
raise
else:
return blosc_opts
finally:
try:
import os
os.unlink(name)
except:
pass
# we cache this during runtime
_DEFAULT_BLOSC_OPTIONS = _check_blosc_avail()
class HDF5PersistentPickler(Pickler):
# stores numpy arrays during pickling in given hdf5 group.
def __init__(self, group, file):
super().__init__(file=file, protocol=4)
self.group = group
self._seen_ids = set()
def dump(self, *args, **kwargs):
# we temporarily patch mdtraj.Topology to save state to numpy array
from unittest import mock
from pyemma._base.serialization.mdtraj_helpers import getstate
with mock.patch('mdtraj.Topology.__getstate__', getstate, create=True):
super(HDF5PersistentPickler, self).dump(*args, **kwargs)
def _store(self, array):
id_ = id(array)
key = str(id_)
# this actually makes no sense to check it here, however it is needed,
# since there seems to be some race condition in h5py...
if key in self.group:
assert id_ in self._seen_ids
return id_
self._seen_ids.add(id_)
self.group.create_dataset(name=key, data=array,
chunks=True, **_DEFAULT_BLOSC_OPTIONS)
return id_
def persistent_id(self, obj):
if (isinstance(obj, np.ndarray) and obj.dtype != np.object_
and id(obj) not in self._seen_ids):
# do not store empty arrays in hdf (more overhead)
if not len(obj):
return None
array_id = self._store(obj)
return 'np_array', array_id
return None
class HDF5PersistentUnpickler(Unpickler):
__allowed_packages = ('builtin',
'pyemma',
'mdtraj',
'numpy',
'scipy',
'bhmm',
)
def __init__(self, group, file):
super().__init__(file=file)
self.group = group
def persistent_load(self, pid):
# This method is invoked whenever a persistent ID is encountered.
# Here, pid is the type and the dataset id.
type_tag, key_id = pid
if type_tag == "np_array":
return self.group[str(key_id)][:]
else:
# Always raises an error if you cannot return the correct object.
# Otherwise, the unpickler will think None is the object referenced
# by the persistent ID.
raise UnpicklingError("unsupported persistent object")
def load(self, *args, **kwargs):
# we temporarily patch mdtraj.Topology to load state from numpy array
from unittest import mock
from pyemma._base.serialization.mdtraj_helpers import setstate
with mock.patch('mdtraj.Topology.__setstate__', setstate, create=True):
return super(HDF5PersistentUnpickler, self).load(*args, **kwargs)
@staticmethod
def __check_allowed(module):
# check if we are allowed to unpickle from these modules.
i = module.find('.')
if i > 0:
package = module[:i]
else:
package = module
if package not in HDF5PersistentUnpickler.__allowed_packages:
raise UnpicklingError('{mod} not allowed to unpickle'.format(mod=module))
def find_class(self, module, name):
self.__check_allowed(module)
from .util import class_rename_registry
new_class = class_rename_registry.find_replacement_for_old('{}.{}'.format(module, name))
if new_class:
return new_class
return super(HDF5PersistentUnpickler, self).find_class(module, name)
|
markovmodel/PyEMMA
|
pyemma/_base/serialization/pickle_extensions.py
|
Python
|
lgpl-3.0
| 5,874
|
[
"MDTraj"
] |
8b0d49335359acbaf15edda756ce4b0bad9620de367c36e56c9c3ca828c616cc
|
# encoding=utf-8
# Created by xupingmao on 2017/04/16
# @modified 2022/03/08 22:18:33
# @filename dao.py
"""资料的DAO操作集合
DAO层只做最基础的数据库交互,不做权限校验(空校验要做),业务状态检查之类的工作
一些表的说明
note_full:<note_id> = 笔记的内容,包含一些属性(部分属性比如访问时间、访问次数不是实时更新的)
note_index:<note_id> = 笔记索引,不包含内容
note_tiny:<user>:<note_id> = 用户维度的笔记索引
notebook:<user>:<note_id> = 用户维度的笔记本(项目)索引
token:<uuid> = 用于链接分享的令牌
note_history:<note_id>:<version> = 笔记的历史版本
note_comment:<note_id>:<timeseq> = 笔记的评论
comment_index:<user>:<timeseq> = 用户维度的评论索引
search_history:<user>:<timeseq> = 用户维度的搜索历史
note_public:<note_id> = 公开的笔记索引
"""
import time
import math
import re
import six
import web.db as db
import os
import xconfig
import xtables
import xutils
import xauth
import xmanager
import copy
import threading
import logging
from collections import Counter
from xutils import readfile, savetofile, sqlite3, Storage
from xutils import dateutil, cacheutil, Timer, dbutil, textutil, fsutil
from xutils import attrget
# 配置日志模块
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s|%(levelname)s|%(filename)s:%(lineno)d|%(message)s')
def register_note_table(name, description, check_user = False):
dbutil.register_table(name, description, "note", check_user = check_user)
register_note_table("note_full", "笔记完整信息 <note_full:note_id>")
register_note_table("note_index", "笔记索引,不包含内容 <note_index:note_id>")
register_note_table("note_skey", "用户维度的skey索引 <note_skey:user:skey>")
register_note_table("notebook", "笔记分组", check_user = True)
register_note_table("token", "用于分享的令牌")
register_note_table("note_history", "笔记的历史版本")
register_note_table("note_tags", "笔记标签 <note_tags:user:note_id>")
# 分享关系
register_note_table("note_share_from", "分享发送者关系表 <note_share_from:from_user:note_id>")
register_note_table("note_share_to", "分享接受关系表 <note_share_to:to_user:note_id>")
dbutil.register_table("search_history", "搜索历史")
# 公开分享的笔记索引
register_note_table("note_public", "公共笔记索引")
dbutil.register_table_index("note_public", "hot_index")
dbutil.register_table_index("note_public", "share_time")
# 用户维度索引
register_note_table("note_tiny", "用户维度的笔记索引 <table:user:id>", check_user = True)
dbutil.register_table_index("note_tiny", "name")
dbutil.register_table_index("note_tiny", "ctime")
NOTE_DAO = xutils.DAO("note")
DB_PATH = xconfig.DB_PATH
MAX_EDIT_LOG = 500
MAX_VIEW_LOG = 500
MAX_STICKY_SIZE = 1000
MAX_SEARCH_SIZE = 1000
MAX_LIST_SIZE = 1000
NOTE_ICON_DICT = {
"group" : "fa-folder orange",
"post" : "fa-file-word-o", # 废弃
"html" : "fa-file-word-o", # 废弃
"gallery" : "fa-photo",
"list" : "fa-list",
"plan" : "fa-calendar-check-o",
# 表格类
"csv" : "fa-table", # 废弃
"table" : "fa-table", # 废弃
"form" : "fa-table", # 开发中
}
CREATE_LOCK = threading.RLock()
class NoteSchema:
"""这个类主要是说明结构"""
# 基本信息
id = "主键ID"
name = "笔记名称"
ctime = "创建时间"
mtime = "修改时间"
atime = "访问时间"
type = "类型"
category = "所属分类" # 一级图书分类
size = "大小"
parent_id = "父级节点ID"
content = "纯文本内容"
data = "富文本内容"
is_deleted = "是否删除"
archived = "是否归档"
# 权限控制
creator = "创建者"
is_public = "是否公开"
token = "分享token"
# 统计信息
priority = "优先级"
visited_cnt = "访问次数"
orderby = "排序方式"
hot_index = "热门指数"
def format_note_id(id):
return str(id)
def format_date(date):
if date is None:
return date
return date.split(" ")[0].replace("-", "/")
def get_root():
root = Storage()
root.name = "根目录"
root.type = "group"
root.size = None
root.id = 0
root.parent_id = 0
root.content = ""
root.priority = 0
build_note_info(root)
root.url = "/note/group"
return root
def get_default_group():
group = Storage()
group.name = "默认分组"
group.type = "group"
group.size = None
group.id = "default"
group.parent_id = 0
group.content = ""
group.priority = 0
build_note_info(group)
group.url = "/note/default"
return group
def get_archived_group():
group = Storage()
group.name = "归档分组"
group.type = "group"
group.size = None
group.id = "archived"
group.parent_id = 0
group.content = ""
group.priority = 0
build_note_info(group)
group.url = "/note/archived"
return group
def get_note_public_table():
return dbutil.get_table("note_public")
def get_note_tiny_table(user_name):
return dbutil.get_table("note_tiny", user_name = user_name)
def batch_query(id_list):
creator = xauth.current_name()
result = dict()
for id in id_list:
note = dbutil.get("note_index:%s" % id)
if note:
result[id] = note
build_note_info(note)
return result
def batch_query_list(id_list):
creator = xauth.current_name()
result = []
for id in id_list:
note = dbutil.get("note_index:%s" % id)
if note:
build_note_info(note)
result.append(note)
return result
def sort_by_name(notes):
notes.sort(key = lambda x: x.name)
def sort_by_name_desc(notes):
notes.sort(key = lambda x: x.name, reverse = True)
def sort_by_name_priority(notes):
sort_by_name(notes)
sort_by_priority(notes)
def sort_by_mtime_desc(notes):
notes.sort(key = lambda x: x.mtime, reverse = True)
def sort_by_ctime_desc(notes):
notes.sort(key = lambda x: x.ctime, reverse = True)
def sort_by_atime_desc(notes):
notes.sort(key = lambda x: x.atime, reverse = True)
def sort_by_priority(notes):
# 置顶笔记
notes.sort(key = lambda x: x.priority, reverse = True)
def sort_by_default(notes):
# 先按照名称排序
sort_by_name(notes)
# 置顶笔记
sort_by_priority(notes)
# 文件夹放在前面
sort_by_type(notes)
def sort_by_ctime_priority(notes):
# 先按照名称排序
sort_by_ctime_desc(notes)
# 置顶笔记
sort_by_priority(notes)
# 文件夹放在前面
sort_by_type(notes)
def sort_by_type(notes):
# 文件夹放在前面
notes.sort(key = lambda x: 0 if x.type == "group" else 1)
def sort_by_type_mtime_desc(notes):
sort_by_mtime_desc(notes)
sort_by_type(notes)
def sort_by_type_ctime_desc(notes):
sort_by_ctime_desc(notes)
sort_by_type(notes)
def sort_by_dtime_desc(notes):
notes.sort(key = lambda x: x.dtime, reverse = True)
def sort_by_dtime_asc(notes):
notes.sort(key = lambda x: x.dtime)
def sort_by_hot_index(notes):
notes.sort(key = lambda x: x.hot_index or 0, reverse = True)
SORT_FUNC_DICT = {
"name": sort_by_name,
"name_desc": sort_by_name_desc,
"name_priority": sort_by_name_priority,
"mtime_desc": sort_by_mtime_desc,
"ctime_desc": sort_by_ctime_desc,
"ctime_priority": sort_by_ctime_priority,
"atime_desc": sort_by_atime_desc,
"type_mtime_desc": sort_by_type_mtime_desc,
"type_ctime_desc": sort_by_type_ctime_desc,
"dtime_desc": sort_by_dtime_desc,
"dtime_asc" : sort_by_dtime_asc,
"hot_index" : sort_by_hot_index,
"default": sort_by_default,
}
def sort_notes(notes, orderby = "name"):
if orderby is None:
orderby = "name"
sort_func = SORT_FUNC_DICT.get(orderby, sort_by_mtime_desc)
build_note_list_info(notes, orderby)
sort_func(notes)
def build_note_list_info(notes, orderby = None):
for note in notes:
build_note_info(note, orderby)
def build_note_info(note, orderby = None):
if note is None:
return None
# note.url = "/note/view?id={}".format(note["id"])
note.url = "/note/view/{}".format(note["id"])
if note.priority is None:
note.priority = 0
if note.content is None:
note.content = ''
if note.data is None:
note.data = ''
# process icon
note.icon = NOTE_ICON_DICT.get(note.type, "fa-file-text-o")
note.id = str(note.id)
if note.type in ("list", "csv"):
note.show_edit = False
if note.visited_cnt is None:
note.visited_cnt = 0
if note.orderby is None:
note.orderby = "ctime_priority"
if note.category is None:
note.category = "000"
if note.hot_index is None:
note.hot_index = 0
if note.ctime != None:
note.create_date = format_date(note.ctime)
if note.mtime != None:
note.update_date = format_date(note.mtime)
# 处理删除时间
if note.is_deleted == 1 and note.dtime == None:
note.dtime = note.mtime
if orderby == "hot_index":
note.badge_info = "热度: %s" % note.hot_index
if note.badge_info is None:
note.badge_info = note.create_date
return note
def convert_to_path_item(note):
return Storage(name = note.name, url = note.url, id = note.id,
type = note.type, priority = note.priority, is_public = note.is_public)
@xutils.timeit(name = "NoteDao.ListPath:leveldb", logfile = True)
def list_path(file, limit = 5):
pathlist = []
while file is not None:
pathlist.insert(0, convert_to_path_item(file))
file.url = "/note/%s" % file.id
if len(pathlist) >= limit:
break
if str(file.id) == "0":
break
# 处理根目录
if str(file.parent_id) == "0":
if file.type != "group":
pathlist.insert(0, get_default_group())
elif file.archived:
pathlist.insert(0, get_archived_group())
pathlist.insert(0, convert_to_path_item(get_root()))
break
file = get_by_id(file.parent_id, include_full = False)
return pathlist
def get_full_by_id(id):
return dbutil.get("note_full:%s" % id)
@xutils.timeit(name = "NoteDao.GetById:leveldb", logfile = True)
def get_by_id(id, include_full = True):
if id == "" or id is None:
return None
if id == 0 or id == "0":
return get_root()
note_index = dbutil.get("note_index:%s" % id)
if not include_full and note_index != None:
build_note_info(note_index)
return note_index
note = get_full_by_id(id)
if note and not include_full:
del note.content
del note.data
if note_index:
note.name = note_index.name
note.mtime = note_index.mtime
note.atime = note_index.atime
note.size = note_index.size
note.tags = note_index.tags
note.parent_id = note_index.parent_id
note.visited_cnt = note_index.visited_cnt
note.hot_index = note_index.hot_index
build_note_info(note)
return note
def get_by_id_creator(id, creator, db=None):
note = get_by_id(id)
if note and note.creator == creator:
return note
return None
def get_by_token(token):
token_info = dbutil.get("token:%s" % token)
if token_info != None and token_info.type == "note":
return get_by_id(token_info.id)
return None
def get_by_user_skey(user_name, skey):
skey = skey.replace("-", "_")
note_info = dbutil.get("note_skey:%s:%s" % (user_name, skey))
if note_info != None:
return get_by_id(note_info.note_id)
else:
return None
def save_note_skey(note):
if note.skey is None or note.skey == "":
return
key = "note_skey:%s:%s" % (note.creator, note.skey)
dbutil.put(key, Storage(note_id = note.id))
def delete_note_skey(note):
skey = note.skey
if skey is None or skey == "":
return
key = "note_skey:%s:%s" % (note.creator, note.skey)
dbutil.delete(key)
def get_or_create_note(skey, creator):
"""根据skey查询或者创建笔记
@param {string} skey 笔记的特殊key,用户维度唯一
@param {string} creator 笔记的创建者
@throws {exception} 创建异常
"""
if skey is None or skey == "":
return None
skey = skey.replace("-", "_")
note = get_by_user_skey(creator, skey)
if note != None:
return note
# 检查笔记名称
check_by_name(creator, skey)
note_dict = Storage()
note_dict.name = skey
note_dict.skey = skey
note_dict.creator = creator
note_dict.content = ""
note_dict.data = ""
note_dict.type = "md"
note_dict.sub_type = "log"
note_dict.parent_id = "0"
note_id = create_note(note_dict)
return get_by_id(note_id)
def create_note_base(note_dict, date_str = None, note_id = None):
# 真实的创建时间
ctime0 = dateutil.format_datetime()
note_dict["ctime0"] = ctime0
note_dict["atime"] = ctime0
note_dict["mtime"] = ctime0
note_dict["ctime"] = ctime0
note_dict["version"] = 0
if note_id is not None:
# 指定id创建笔记
note_dict["id"] = note_id
put_note_to_db(note_id, note_dict)
# 创建日志
add_create_log(note_dict)
return note_id
elif date_str is None or date_str == "":
# 默认创建规则
note_id = dbutil.timeseq()
note_dict["id"] = note_id
put_note_to_db(note_id, note_dict)
# 创建日志
add_create_log(note_dict)
return note_id
else:
# 指定日期创建
date_str = date_str.replace(".", "-")
if date_str == dateutil.format_date():
# 当天创建的,保留秒级
timestamp = int(time.time() * 1000)
else:
timestamp = int(dateutil.parse_date_to_timestamp(date_str) * 1000)
try:
CREATE_LOCK.acquire()
while True:
note_id = "%020d" % timestamp
note_dict["ctime"] = dateutil.format_datetime(timestamp/1000)
old = get_by_id(note_id)
if old is None:
note_dict["id"] = note_id
put_note_to_db(note_id, note_dict)
# 创建日志
add_create_log(note_dict)
return note_id
else:
timestamp += 1
finally:
CREATE_LOCK.release()
def create_note(note_dict, date_str = None, note_id = None):
content = note_dict["content"]
creator = note_dict["creator"]
name = note_dict.get("name")
if "parent_id" not in note_dict:
note_dict["parent_id"] = "0"
if "priority" not in note_dict:
note_dict["priority"] = 0
if "data" not in note_dict:
note_dict["data"] = ""
# 创建笔记的基础信息
note_id = create_note_base(note_dict, date_str, note_id)
# 更新分组下面页面的数量
update_children_count(note_dict["parent_id"])
# 创建对应的文件夹
if type == "gallery":
dirname = os.path.join(xconfig.UPLOAD_DIR, creator, str(note_id))
xutils.makedirs(dirname)
# 更新统计数量
refresh_note_stat_async(creator)
# 更新目录修改时间
touch_note(note_dict["parent_id"])
# 保存skey索引
save_note_skey(note_dict)
# 最后发送创建笔记成功的消息
xmanager.fire("note.add", dict(name=name, type=type, id = note_id))
return note_id
def create_token(type, id):
uuid = textutil.generate_uuid()
token_info = Storage(type = type, id = id)
dbutil.put("token:%s" % uuid, token_info)
return uuid
def add_create_log(note):
NOTE_DAO.add_create_log(note.creator, note)
def add_visit_log(user_name, note):
NOTE_DAO.add_visit_log(user_name, note)
def remove_virtual_fields(note):
del_dict_key(note, "path")
del_dict_key(note, "url")
del_dict_key(note, "icon")
del_dict_key(note, "show_edit")
del_dict_key(note, "create_date")
def put_note_to_db(note_id, note):
priority = note.priority
mtime = note.mtime
creator = note.creator
atime = note.atime
# 删除不需要持久化的数据
remove_virtual_fields(note)
# 保存到DB
dbutil.put("note_full:%s" % note_id, note)
# 更新索引
update_index(note)
# 增加编辑日志
NOTE_DAO.add_edit_log(creator, note)
def touch_note(note_id):
note = get_by_id(note_id)
if note != None:
note.mtime = dateutil.format_datetime()
update_index(note)
def del_dict_key(dict, key):
if key in dict:
del dict[key]
def convert_to_index(note):
note_index = Storage(**note)
# 删除虚拟字段
remove_virtual_fields(note_index)
# 删除内容字段
del_dict_key(note_index, 'data')
del_dict_key(note_index, 'content')
note_index.parent_id = str(note_index.parent_id)
return note_index
def update_index(note):
"""更新索引的时候也会更新用户维度的索引(note_tiny)"""
id = note['id']
note_id = format_note_id(id)
if note_id == "0":
# 根目录,不需要更新
return
note_index = convert_to_index(note)
dbutil.put('note_index:%s' % id, note_index)
# 更新用户维度的笔记索引
note_tiny_db = get_note_tiny_table(note.creator)
note_tiny_db.update_by_id(note_id, note_index)
if note.type == "group":
dbutil.put("notebook:%s:%s" % (note.creator, format_note_id(id)), note_index)
if note.is_public != None:
update_public_index(note)
def update_public_index(note):
db = get_note_public_table()
note_id = format_note_id(note.id)
if note.is_public:
note_index = convert_to_index(note)
db.update_by_id(note_id, note_index)
else:
db.delete_by_id(note_id)
def update_note(note_id, **kw):
# 这里只更新基本字段,移动笔记使用 move_note
if "parent_id" in kw:
raise Exception("[note.dao.update_note] can not update `parent_id`, please use `note.dao.move_note`")
content = kw.get('content')
data = kw.get('data')
priority = kw.get('priority')
name = kw.get("name")
atime = kw.get("atime")
is_public = kw.get("is_public")
tags = kw.get("tags")
orderby = kw.get("orderby")
archived = kw.get("archived")
size = kw.get("size")
token = kw.get("token")
visited_cnt = kw.get("visited_cnt")
old_parent_id = None
new_parent_id = None
note = get_by_id(note_id)
if note is None:
return 0
if content:
note.content = content
if data:
note.data = data
if priority != None:
note.priority = priority
if name:
note.name = name
if atime:
note.atime = atime
# 分享相关的更新
if is_public != None:
note.is_public = is_public
if is_public == 1:
note.share_time = dateutil.format_time()
if is_public == 0:
note.share_time = None
if tags != None:
note.tags = tags
if orderby != None:
note.orderby = orderby
if archived != None:
note.archived = archived
if size != None:
note.size = size
if token != None:
note.token = token
if visited_cnt != None:
note.visited_cnt = visited_cnt
if note.version is None:
note.version = 1
old_version = note.version
note.mtime = xutils.format_time()
note.version += 1
# 只修改优先级
if len(kw) == 1 and kw.get('priority') != None:
note.version = old_version
# 只修改名称
if len(kw) == 1 and kw.get('name') != None:
note.version = old_version
put_note_to_db(note_id, note)
return 1
def move_note(note, new_parent_id):
old_parent_id = note.parent_id
note.parent_id = new_parent_id
# 没有更新内容,只需要更新索引数据
update_index(note)
# 更新文件夹的容量
update_children_count(old_parent_id)
update_children_count(new_parent_id)
# 更新新的parent更新时间
touch_note(new_parent_id)
def update0(note):
"""更新基本信息,比如name、mtime、content、items、priority等,不处理parent_id更新"""
current = get_by_id(note.id)
if current is None:
return
# 更新各种字段
current_time = xutils.format_datetime()
note.version = current.version + 1
note.mtime = current_time
note.atime = current_time
put_note_to_db(note.id, note)
def get_by_name(creator, name):
def find_func(key, value):
if value.is_deleted:
return False
return value.name == name
db = get_note_tiny_table(creator)
result = db.list(offset = 0, limit = 1, filter_func = find_func)
if len(result) > 0:
note = result[0]
return get_by_id(note.id)
return None
def check_by_name(creator, name):
note_by_name = get_by_name(creator, name)
if note_by_name != None:
raise Exception("笔记【%s】已存在" % name)
def visit_note(user_name, id):
note = get_by_id(id)
if note is None:
return
note.atime = xutils.format_datetime()
# 访问的总字数
if note.visited_cnt is None:
note.visited_cnt = 0
note.visited_cnt += 1
note.visit_cnt = note.visited_cnt
# 访问热度
if note.hot_index is None:
note.hot_index = 0
note.hot_index += 1
update_index(note)
add_visit_log(user_name, note)
def delete_note_physically(creator, note_id):
assert creator != None, "creator can not be null"
assert note_id != None, "note_id can not be null"
full_key = "note_full:%s" % note_id
index_key = "note_index:%s" % note_id
dbutil.delete(full_key)
dbutil.delete(index_key)
note_tiny_db = get_note_tiny_table(creator)
note_tiny_db.delete_by_id(note_id)
delete_history(note_id)
# 刷新数量
refresh_note_stat_async(creator)
def delete_note(id):
note = get_by_id(id)
if note is None:
return
if note.is_deleted != 0:
# 已经被删除了,执行物理删除
delete_note_physically(note.creator, note.id)
return
# 标记删除
note.mtime = xutils.format_datetime()
note.dtime = xutils.format_datetime()
note.is_deleted = 1
put_note_to_db(id, note)
# 更新数量
update_children_count(note.parent_id)
delete_tags(note.creator, id)
# 删除笔记本
book_key = "notebook:%s:%s" % (note.creator, format_note_id(id))
dbutil.delete(book_key)
# 删除skey索引
delete_note_skey(note)
# 删除访问日志
NOTE_DAO.delete_visit_log(note.creator, note.id)
# 更新数量统计
refresh_note_stat(note.creator)
def update_children_count(parent_id, db=None):
if parent_id is None or parent_id == "" or parent_id == 0:
return
note = get_by_id(parent_id)
if note is None:
return
creator = note.creator
children_count = count_by_parent(creator, parent_id)
note.size = children_count
update_index(note)
def fill_parent_name(files):
id_list = []
for item in files:
build_note_info(item)
id_list.append(item.parent_id)
note_dict = batch_query(id_list)
for item in files:
parent = note_dict.get(item.parent_id)
if parent != None:
item.parent_name = parent.name
else:
item.parent_name = None
def check_group_status(status):
if status is None:
return
if status not in ("all", "active", "archived"):
raise Exception("[check_group_status] invalid status: %s" % status)
@xutils.timeit(name = "NoteDao.ListGroup:leveldb", logfile = True)
def list_group(creator = None, orderby = "mtime_desc",
skip_archived = False,
status = "all",
offset = 0, limit = None):
"""查询笔记本列表"""
check_group_status(status)
# TODO 添加索引优化
def list_group_func(key, value):
if value.type != "group" or value.is_deleted != 0:
return False
if skip_archived and value.archived:
return False
if status == "archived":
return value.archived
if status == "active":
return not value.archived
return True
notes = dbutil.prefix_list("notebook:%s" % creator, list_group_func)
sort_notes(notes, orderby)
if limit is not None:
return notes[offset:offset + limit]
return notes
def count_group(creator, status = None):
check_group_status(status)
if status is None:
return dbutil.count_table("notebook:%s" % creator)
return len(list_group(creator, status = status))
@xutils.timeit(name = "NoteDao.ListRootGroup:leveldb", logfile = True)
def list_root_group(creator = None, orderby = "name"):
def list_root_group_func(key, value):
return value.creator == creator and value.type == "group" and value.parent_id == 0 and value.is_deleted == 0
notes = dbutil.prefix_list("notebook:%s" % creator, list_root_group_func)
sort_notes(notes, orderby)
return notes
def list_default_notes(creator, offset = 0, limit = 1000, orderby = "mtime_desc"):
# TODO 添加索引优化
def list_default_func(key, value):
if value.is_deleted:
return False
if value.type == "group":
return False
return value.creator == creator and str(value.parent_id) == "0"
notes = dbutil.prefix_list("note_tiny:", list_default_func)
sort_notes(notes, orderby)
return notes[offset:offset+limit]
def list_public(offset, limit, orderby = "ctime_desc"):
if orderby == "hot":
index_name = "hot_index"
else:
index_name = "share_time"
db = dbutil.get_table("note_public")
notes = db.list_by_index(index_name,
offset = offset, limit = limit, reverse = True)
build_note_list_info(notes)
for note in notes:
if note.is_deleted:
logging.warning("笔记已删除:%s,name:%s", note.id, note.name)
db.delete_by_id(note.id)
return notes
def count_public():
db = get_note_public_table()
return db.count()
@xutils.timeit(name = "NoteDao.ListNote:leveldb", logfile = True, logargs=True)
def list_by_parent(creator, parent_id, offset = 0, limit = 1000,
orderby="name", skip_group = False, include_public = True):
"""通过父级节点ID查询笔记列表"""
if parent_id is None:
raise Exception("list_by_parent: parent_id is None")
parent_id = str(parent_id)
# TODO 添加索引优化
def list_note_func(key, value):
if value.is_deleted:
return False
if skip_group and value.type == "group":
return False
if str(value.parent_id) != parent_id:
return False
if include_public:
return (value.is_public or value.creator == creator)
else:
return value.creator == creator
db = get_note_tiny_table(creator)
notes = db.list(offset = 0, limit = limit, filter_func = list_note_func)
sort_notes(notes, orderby)
return notes[offset:offset+limit]
def list_by_date(field, creator, date, orderby = "ctime_desc"):
user = creator
if user is None:
user = "public"
def list_func(key, value):
if value.is_deleted:
return False
return date in getattr(value, field)
files = dbutil.prefix_list("note_tiny:%s" % user, list_func)
fill_parent_name(files)
sort_notes(files, orderby)
return files
@xutils.timeit(name = "NoteDao.CountNote", logfile=True, logargs=True, logret=True)
def count_by_creator(creator):
def count_func(key, value):
if value.is_deleted:
return False
return value.creator == creator and type != 'group'
return dbutil.prefix_count("note_tiny:%s" % creator, count_func)
def count_user_note(creator):
return count_by_creator(creator)
def count_ungrouped(creator):
return count_ungrouped(creator, 0)
@xutils.timeit(name = "NoteDao.CountNoteByParent", logfile = True, logargs = True, logret = True)
def count_by_parent(creator, parent_id):
"""统计笔记数量
@param {string} creator 创建者
@param {string/number} parent_id 父级节点ID
"""
# TODO 添加索引优化
def list_note_func(key, value):
if value.is_deleted:
return False
return (value.is_public or value.creator == creator) and str(value.parent_id) == str(parent_id)
return dbutil.prefix_count("note_tiny", list_note_func)
@xutils.timeit(name = "NoteDao.CountDict", logfile = True, logargs = True, logret = True)
def count_dict(user_name):
import xtables
return xtables.get_dict_table().count()
@xutils.timeit(name = "NoteDao.FindPrev", logfile = True)
def find_prev_note(note, user_name):
parent_id = str(note.parent_id)
note_name = note.name
def find_prev_func(key, value):
if value.is_deleted:
return False
return str(value.parent_id) == parent_id and value.name < note_name
result = dbutil.prefix_list("note_tiny:%s" % user_name, find_prev_func)
result.sort(key = lambda x:x.name, reverse=True)
if len(result) > 0:
return result[0]
else:
return None
@xutils.timeit(name = "NoteDao.FindNext", logfile = True)
def find_next_note(note, user_name):
parent_id = str(note.parent_id)
note_name = note.name
def find_next_func(key, value):
if value.is_deleted:
return False
return str(value.parent_id) == parent_id and value.name > note_name
result = dbutil.prefix_list("note_tiny:%s" % user_name, find_next_func)
result.sort(key = lambda x:x.name)
# print([x.name for x in result])
if len(result) > 0:
return result[0]
else:
return None
def add_history(id, version, note):
if version is None:
return
note['note_id'] = id
dbutil.put("note_history:%s:%s" % (id, version), note)
def list_history(note_id):
history_list = dbutil.prefix_list("note_history:%s:" % note_id)
history_list = sorted(history_list, key = lambda x: x.mtime or "", reverse = True)
return history_list
def delete_history(note_id, version = None):
pass
def get_history(note_id, version):
# note = table.select_first(where = dict(note_id = note_id, version = version))
return dbutil.get("note_history:%s:%s" % (note_id, version))
def search_name(words, creator = None, parent_id = None, orderby = "hot_index"):
assert isinstance(words, list)
words = [word.lower() for word in words]
if parent_id != None and parent_id != "":
parent_id = str(parent_id)
def search_func(key, value):
if value.is_deleted:
return False
if parent_id != None and str(value.parent_id) != parent_id:
return False
is_user_match = (value.creator == creator or value.is_public)
is_words_match = textutil.contains_all(value.name.lower(), words)
return is_user_match and is_words_match
db = get_note_tiny_table(creator)
result = db.list(filter_func = search_func,
offset = 0, limit = MAX_SEARCH_SIZE, fill_cache = False)
# 补全信息
build_note_list_info(result)
# 对笔记进行排序
sort_notes(result, orderby)
sort_by_priority(result)
return result
def search_content(words, creator=None, orderby = "hot_index"):
assert isinstance(words, list)
words = [word.lower() for word in words]
def search_func(key, value):
if value.content is None:
return False
return (value.creator == creator or value.is_public) \
and textutil.contains_all(value.content.lower(), words)
result = dbutil.prefix_list("note_full", search_func, 0, MAX_SEARCH_SIZE,
fill_cache = False)
# 补全信息
build_note_list_info(result)
# 对笔记进行排序
sort_notes(result, orderby)
return result
def search_public(words):
assert isinstance(words, list)
words = [word.lower() for word in words]
def search_public_func(key, value):
if value.content is None:
return False
if not value.is_public:
return False
return textutil.contains_all(value.name.lower(), words)
result = dbutil.prefix_list("note_full", search_public_func, 0, MAX_SEARCH_SIZE)
notes = [build_note_info(item) for item in result]
sort_notes(notes)
return notes
def check_and_remove_broken_notes(notes, user_name):
result = []
has_broken = False
for note in notes:
full = get_full_by_id(note.id)
if full != None:
result.append(note)
else:
logging.error("node=%s", note)
delete_note(note.id)
# 如果note_index被删除,delete_note也无法删除它,所以需要再删除一下
db = get_note_tiny_table(note.creator)
db.delete(note)
has_broken = True
if has_broken:
refresh_note_stat(user_name)
return result
def count_removed(creator):
def count_func(key, value):
return value.is_deleted and value.creator == creator
return dbutil.prefix_count("note_tiny:%s" % creator, count_func)
def list_removed(creator, offset, limit, orderby = None):
def list_func(key, value):
return value.is_deleted and value.creator == creator
db = get_note_tiny_table(creator)
notes = db.list(filter_func = list_func, offset = offset, limit = MAX_LIST_SIZE)
notes = check_and_remove_broken_notes(notes, creator)
sort_notes(notes, orderby)
return notes[offset: offset + limit]
def document_filter_func(key, value):
return value.type in ("md", "text", "html", "post", "log", "plan") and value.is_deleted == 0
def table_filter_func(key, value):
return value.type in ("csv", "table") and value.is_deleted == 0
def get_filter_func(type, default_filter_func):
if type == "document" or type == "doc":
return document_filter_func
if type in ("csv", "table"):
return table_filter_func
return default_filter_func
def list_by_type(creator, type, offset, limit, orderby = "name", skip_archived = False):
"""按照类型查询笔记列表
@param {str} creator 笔记作者
@param {str|None} type 笔记类型
@param {int} offset 下标
@param {int} limit 返回的最大列数
@param {str} orderby 排序
@param {bool} skip_archived 是否跳过归档笔记
"""
assert type != None, "note.dao.list_by_type: type is None"
def list_func(key, value):
if skip_archived and value.archived:
return False
if type != "all" and value.type != type:
return False
return value.is_deleted == 0
filter_func = get_filter_func(type, list_func)
db = get_note_tiny_table(creator)
notes = db.list_by_index("ctime", filter_func = filter_func, offset = offset,
limit = limit, reverse = True)
sort_notes(notes, orderby)
return notes
def count_by_type(creator, type):
def default_count_func(key, value):
return value.type == type and value.creator == creator and value.is_deleted == 0
filter_func = get_filter_func(type, default_count_func)
return dbutil.prefix_count("note_tiny:%s" % creator, filter_func)
def list_sticky(creator, offset = 0, limit = 1000, orderby = "ctime_desc"):
def list_func(key, value):
return value.priority > 0 and value.creator == creator and value.is_deleted == 0
db = get_note_tiny_table(creator)
notes = db.list(filter_func = list_func, offset = offset, limit = MAX_STICKY_SIZE)
sort_notes(notes, orderby = orderby)
return notes[offset:offset+limit]
def count_sticky(creator):
def list_func(key, value):
return value.priority > 0 and value.creator == creator and value.is_deleted == 0
db = get_note_tiny_table(creator)
return db.count(filter_func = list_func)
def list_archived(creator, offset = 0, limit = 100):
def list_func(key, value):
return value.archived and value.creator == creator and value.is_deleted == 0
notes = dbutil.prefix_list("note_tiny:%s" % creator, list_func, offset, limit)
sort_notes(notes)
return notes
def get_tags(creator, note_id):
key = "note_tags:%s:%s" % (creator, note_id)
note_tags = dbutil.get(key)
if note_tags:
return attrget(note_tags, "tags")
return None
def update_tags(creator, note_id, tags):
key = "note_tags:%s:%s" % (creator, note_id)
dbutil.put(key, Storage(note_id = note_id, tags = tags))
note = get_by_id(note_id)
if note != None:
note.tags = tags
update_index(note)
def delete_tags(creator, note_id):
key = "note_tags:%s:%s" % (creator, note_id)
dbutil.delete(key)
def list_by_tag(user, tagname):
if user is None:
user = "public"
def list_func(key, value):
if value.tags is None:
return False
return tagname in value.tags
tags = dbutil.prefix_list("note_tags:%s" % user, list_func)
files = []
for tag in tags:
note = get_by_id(tag.note_id)
if note != None:
files.append(note)
sort_notes(files)
return files
def list_tag(user):
if user is None:
user = "public"
tags = dict()
def list_func(key, value):
if value.tags is None:
return False
for tag in value.tags:
count = tags.get(tag, 0)
count += 1
tags[tag] = count
dbutil.prefix_count("note_tags:%s" % user, list_func)
tag_list = [Storage(name = k, amount = tags[k]) for k in tags]
tag_list.sort(key = lambda x: -x.amount)
return tag_list
def list_by_func(creator, list_func, offset, limit):
notes = dbutil.prefix_list("note_tiny:%s" % creator, list_func, offset, limit, reverse = True)
build_note_list_info(notes)
return notes
def add_search_history(user, search_key, category = "default", cost_time = 0):
key = "search_history:%s:%s" % (user, dbutil.timeseq())
dbutil.put(key, Storage(key = search_key, category = category, cost_time = cost_time))
def list_search_history(user, limit = 1000, orderby = "time_desc"):
if user is None or user == "":
return []
return dbutil.prefix_list("search_history:%s" % user, reverse = True, limit = limit)
def clear_search_history(user_name):
assert user_name != None
assert user_name != ""
db = dbutil.get_list_table("search_history", user_name = user_name)
for item in db.iter(reverse = True, limit = -1):
db.delete(item)
@xutils.async_func_deco()
def refresh_note_stat_async(user_name):
"""异步刷新笔记统计"""
refresh_note_stat(user_name)
def refresh_note_stat(user_name):
assert user_name != None, "[refresh_note_stat.assert] user_name != None"
stat = Storage()
if user_name is None:
return stat
stat.total = count_by_creator(user_name)
stat.group_count = count_group(user_name)
stat.doc_count = count_by_type(user_name, "doc")
stat.gallery_count = count_by_type(user_name, "gallery")
stat.list_count = count_by_type(user_name, "list")
stat.table_count = count_by_type(user_name, "table")
stat.plan_count = count_by_type(user_name, "plan")
stat.log_count = count_by_type(user_name, "log")
stat.sticky_count = count_sticky(user_name)
stat.removed_count = count_removed(user_name)
stat.dict_count = count_dict(user_name)
stat.comment_count = NOTE_DAO.count_comment(user_name)
dbutil.put("user_stat:%s:note" % user_name, stat)
return stat
def get_note_stat(user_name):
stat = dbutil.get("user_stat:%s:note" % user_name)
if stat is None:
stat = refresh_note_stat(user_name)
return stat
def get_gallery_path(note):
import xconfig
# 新的位置, 增加一级子目录(100个,二级子目录取决于文件系统,最少的255个,最多无上限,也就是最少2.5万个相册,对于一个用户应该够用了)
note_id = str(note.id)
if len(note_id) < 2:
second_dir = ("00" + note_id)[-2:]
else:
second_dir = note_id[-2:]
standard_dir = os.path.join(xconfig.UPLOAD_DIR, note.creator, "gallery", second_dir, note_id)
if os.path.exists(standard_dir):
return standard_dir
# TODO 归档的位置
# 老的位置
fpath = os.path.join(xconfig.UPLOAD_DIR, note.creator, str(note.parent_id), note_id)
if os.path.exists(fpath):
# 修复数据另外通过工具实现
return fpath
# 如果依然不存在,创建一个地址
fsutil.makedirs(standard_dir)
return standard_dir
def get_virtual_group(user_name, name):
if name == "ungrouped":
files = list_by_parent(user_name, 0, 0, 1000, skip_group = True, include_public = False)
group = Storage()
group.name = "未分类笔记"
group.url = "/note/default"
group.size = len(files)
group.icon = "fa-folder"
group.priority = 1
return group
else:
raise Exception("[get_virtual_group] invalid name: %s" % name)
def record_share_from_info(note, from_user, to_user):
share_from_key = "note_share_from:%s:%s" % (from_user, note.id)
from_info = dbutil.get(share_from_key)
if from_info == None:
from_info = Storage(note_id = note.id, share_to_list = [])
if to_user not in from_info.share_to_list:
from_info.share_to_list.append(to_user)
dbutil.put(share_from_key, from_info)
def share_note_to(note, from_user, to_user):
# TODO 记录到笔记表中
if not xauth.is_user_exist(to_user):
raise Exception("[share_note_to] user not exist: %s" % to_user)
record_share_from_info(note, from_user, to_user)
share_to_key = "note_share_to:%s:%s" % (to_user, note.id)
old = dbutil.get(share_to_key)
if old is not None:
# 已经分享了
return
note_index = convert_to_index(note)
note_index.share_time = dateutil.format_datetime()
dbutil.put(share_to_key, note_index)
def get_share_from(from_user, note_id):
share_from_key = "note_share_from:%s:%s" % (from_user, note_id)
return dbutil.get(share_from_key)
def list_share_to(to_user, offset = 0, limit = None, orderby = None):
if limit is None:
limit = xconfig.PAGE_SIZE
notes = dbutil.prefix_list("note_share_to:%s" % to_user, offset = offset, limit = limit)
sort_notes(notes, orderby = orderby)
return notes
def get_share_to(to_user, note_id):
check_not_empty(to_user, "get_share_to.to_user")
check_not_empty(note_id, "get_share_to.note_id")
share_to_key = "note_share_to:%s:%s" % (to_user, note_id)
return dbutil.get(share_to_key)
def count_share_to(to_user):
check_not_empty(to_user, "count_share_to")
return dbutil.count_table("note_share_to:%s" % to_user)
def check_not_empty(value, method_name):
if value == None or value == "":
raise Exception("[%s] can not be empty" % method_name)
# write functions
xutils.register_func("note.create", create_note)
xutils.register_func("note.update", update_note)
xutils.register_func("note.update0", update0)
xutils.register_func("note.move", move_note)
xutils.register_func("note.visit", visit_note)
xutils.register_func("note.delete", delete_note)
xutils.register_func("note.touch", touch_note)
xutils.register_func("note.update_tags", update_tags)
xutils.register_func("note.create_token", create_token)
xutils.register_func("note.share_to", share_note_to)
xutils.register_func("note.delete_physically", delete_note_physically)
## 内部更新索引的接口,外部不要使用
xutils.register_func("note.update_index", update_index)
xutils.register_func("note.update_public_index", update_public_index)
# query functions
xutils.register_func("note.get_root", get_root)
xutils.register_func("note.get_default_group", get_default_group)
xutils.register_func("note.get_by_id", get_by_id)
xutils.register_func("note.get_by_token", get_by_token)
xutils.register_func("note.get_by_id_creator", get_by_id_creator)
xutils.register_func("note.get_by_name", get_by_name)
xutils.register_func("note.get_tags", get_tags)
xutils.register_func("note.get_or_create", get_or_create_note)
xutils.register_func("note.get_virtual_group", get_virtual_group)
xutils.register_func("note.search_name", search_name)
xutils.register_func("note.search_content", search_content)
xutils.register_func("note.search_public", search_public)
xutils.register_func("note.get_share_from", get_share_from)
xutils.register_func("note.get_share_to", get_share_to)
xutils.register_func("note.batch_query_list", batch_query_list)
# list functions
xutils.register_func("note.list_path", list_path)
xutils.register_func("note.list_group", list_group)
xutils.register_func("note.list_default_notes", list_default_notes)
xutils.register_func("note.list_root_group", list_root_group)
xutils.register_func("note.list_by_parent", list_by_parent)
xutils.register_func("note.list_by_date", list_by_date)
xutils.register_func("note.list_by_tag", list_by_tag)
xutils.register_func("note.list_by_type", list_by_type)
xutils.register_func("note.list_removed", list_removed)
xutils.register_func("note.list_sticky", list_sticky)
xutils.register_func("note.list_archived", list_archived)
xutils.register_func("note.list_tag", list_tag)
xutils.register_func("note.list_public", list_public)
xutils.register_func("note.list_by_func", list_by_func)
xutils.register_func("note.list_share_to", list_share_to)
# count functions
xutils.register_func("note.count_public", count_public)
xutils.register_func("note.count_recent_edit", count_user_note)
xutils.register_func("note.count_user_note", count_user_note)
xutils.register_func("note.count_ungrouped", count_ungrouped)
xutils.register_func("note.count_removed", count_removed)
xutils.register_func("note.count_by_type", count_by_type)
xutils.register_func("note.count_by_parent", count_by_parent)
xutils.register_func("note.count_group", count_group)
xutils.register_func("note.count_share_to", count_share_to)
# others
xutils.register_func("note.find_prev_note", find_prev_note)
xutils.register_func("note.find_next_note", find_next_note)
# history
xutils.register_func("note.add_history", add_history)
xutils.register_func("note.list_history", list_history)
xutils.register_func("note.get_history", get_history)
xutils.register_func("note.add_search_history", add_search_history)
xutils.register_func("note.list_search_history", list_search_history)
xutils.register_func("note.clear_search_history", clear_search_history)
# stat
xutils.register_func("note.get_note_stat", get_note_stat)
xutils.register_func("note.get_gallery_path", get_gallery_path)
xutils.register_func("note.refresh_note_stat_async", refresh_note_stat_async)
|
xupingmao/xnote
|
handlers/note/dao.py
|
Python
|
gpl-3.0
| 47,559
|
[
"VisIt"
] |
c5c3cd956517342e1660d591e558bc6400df7d5b6bb8caa739d5f1c354ab15ab
|
from __future__ import division
import numpy as np
import paraBEM
from paraBEM.pan2d import doublet_2_0, doublet_2_0_v, source_2_0, source_2_0_v
from paraBEM.vtk_export import VtkWriter
from paraBEM.utils import check_path
p1 = paraBEM.PanelVector2(-1, -10)
p2 = paraBEM.PanelVector2(0, -10)
p3 = paraBEM.PanelVector2(1, -10)
p4 = paraBEM.PanelVector2(-1, 10)
p5 = paraBEM.PanelVector2(0, 10)
p6 = paraBEM.PanelVector2(1, 10)
pan1 = paraBEM.Panel2([p4, p1])
pan2 = paraBEM.Panel2([p2, p5])
pan3 = paraBEM.Panel2([p3, p6])
mat = np.zeros([3, 3])
rhs = np.zeros([3])
panels = [pan1, pan2, pan3]
# | | |
# col|+ +| +|
# | | |
# x -1 0 1
# T 0 ? 1
# l 1 2
# panel1: temp-formulation
T1 = -10
T2 = 10
l1 = 1
l2 = 2
mat[0, 0] = source_2_0(panels[0].center, panels[0])
mat[0, 1] = source_2_0(panels[0].center, panels[1]) * (1 - l2 / l1)
mat[0, 2] = source_2_0(panels[0].center, panels[2])
rhs[0] += doublet_2_0(panels[0].center, panels[0]) * T1
rhs[0] += doublet_2_0(panels[0].center, panels[2]) * T2
rhs[0] += T1
# panel2: velocity formulation
mat[1, 0] = source_2_0_v(panels[1].center, panels[0]).dot(panels[1].n)
mat[1, 1] = source_2_0_v(panels[1].center, panels[1]).dot(panels[1].n) * (1 - l2 / l1) - 1
mat[1, 2] = source_2_0_v(panels[1].center, panels[2]).dot(panels[1].n)
rhs[1] += doublet_2_0_v(panels[1].center, panels[0]).dot(panels[1].n) * T1
rhs[1] += doublet_2_0_v(panels[1].center, panels[2]).dot(panels[1].n) * T2
# panel3: temp-formulation
mat[2, 0] = source_2_0(panels[2].center, panels[0])
mat[2, 1] = source_2_0(panels[2].center, panels[1]) * (1 - l2 / l1)
mat[2, 2] = source_2_0(panels[2].center, panels[2])
rhs[2] += doublet_2_0(panels[2].center, panels[0]) * T1
rhs[2] += doublet_2_0(panels[2].center, panels[2]) * T2
rhs[2] += T2
sol = np.linalg.solve(mat, rhs)
print(mat)
print(rhs)
print(sol)
nx = 300
ny = 300
x_grid = np.linspace(-3, 3, nx)
y_grid = np.linspace(-3, 3, ny)
grid = [paraBEM.Vector2(x, y) for y in y_grid for x in x_grid]
t_list = []
for point in grid:
t = 0
t -= doublet_2_0(point, pan1) * T1
t -= doublet_2_0(point, pan3) * T2
t += source_2_0(point, pan1) * sol[0]
t += source_2_0(point, pan2) * sol[1] * (1 - l2 / l1)
t += source_2_0(point, pan3) * sol[2]
t_list.append(t)
q_list = []
for point in grid:
q = paraBEM.Vector2(0, 0)
q -= doublet_2_0_v(point, pan1) * T1
q -= doublet_2_0_v(point, pan3) * T2
q += source_2_0_v(point, pan1) * sol[0]
q += source_2_0_v(point, pan2) * sol[1] * (1 - l2 / l1)
q += source_2_0_v(point, pan3) * sol[2]
if point.x > -1 and point.x < 0:
q *= l1
if point.x > 0 and point.x < 1:
q *= l2
q_list.append(q)
writer = VtkWriter()
with open(check_path("results/heat_test.vtk"), "w") as _file:
writer.structed_grid(_file, "element_2", [nx, ny, 1])
writer.points(_file, grid)
writer.data(_file, t_list, name="temperature", _type="SCALARS", data_type="POINT_DATA")
writer.data(_file, q_list, name="q", _type="VECTORS", data_type="POINT_DATA")
|
looooo/paraBEM
|
examples/vtk/vtk_heat_test.py
|
Python
|
gpl-3.0
| 3,051
|
[
"VTK"
] |
15e0da7a6d8938770460bb02bb371bb292c7fde0572b5d72e33c8fb6fc62a732
|
from pathlib import Path
from os import chdir
import unittest
from math import sqrt
from nipype.interfaces.base import CommandLine
from qipype.commands import NewImage, Diff, ZShim
from qipype.fitting import ASL, ASE, ASESim, ASEDBV, ASEDBVSim
vb = True
CommandLine.terminal_output = 'allatonce'
class Perfusion(unittest.TestCase):
def setUp(self):
Path('testdata').mkdir(exist_ok=True)
chdir('testdata')
def tearDown(self):
chdir('../')
def test_asl(self):
seq = {'CASL': {'TR': 4.0, 'label_time': 3.0,
'post_label_delay': [0.3]}}
asl_file = 'sim_asl.nii.gz'
NewImage(img_size=[32, 32, 32, 2], grad_dim=3, grad_vals=(1, 1.06), grad_steps=1,
out_file=asl_file, verbose=vb).run()
NewImage(img_size=[32, 32, 32], fill=147.355,
out_file='ref_cbf.nii.gz', verbose=vb).run()
ASL(sequence=seq, in_file=asl_file, verbose=vb).run()
diff_CBF = Diff(in_file='CASL_CBF.nii.gz',
baseline='ref_cbf.nii.gz', verbose=vb).run()
self.assertLessEqual(diff_CBF.outputs.out_diff, 0.1)
def test_oef(self):
# Use MultiEchoFlex as a proxy for ASE
seq = {'MultiEcho': {'TR': 2.5,
'TE': [-0.07, -0.06, -0.05, -0.04, -0.03, -0.02, -0.01, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07]}}
ase_file = 'sim_ase.nii.gz'
img_sz = [32, 32, 32]
noise = 0.001
NewImage(img_size=img_sz, grad_dim=0, fill=100.,
out_file='S0.nii.gz', verbose=vb).run()
NewImage(img_size=img_sz, grad_dim=0, grad_vals=(-0.01, 0.01),
out_file='dT.nii.gz', verbose=vb).run()
NewImage(img_size=img_sz, grad_dim=1, grad_vals=(1.0, 3.0),
out_file='R2p.nii.gz', verbose=vb).run()
NewImage(img_size=img_sz, grad_dim=2, grad_vals=(0.005, 0.025),
out_file='DBV.nii.gz', verbose=vb).run()
ASEDBVSim(sequence=seq, out_file=ase_file, noise=noise, verbose=vb,
S0_map='S0.nii.gz',
dT_map='dT.nii.gz',
R2p_map='R2p.nii.gz',
DBV_map='DBV.nii.gz').run()
ASEDBV(sequence=seq, in_file=ase_file, verbose=vb).run()
diff_R2p = Diff(in_file='ASE_R2p.nii.gz', baseline='R2p.nii.gz',
noise=noise, verbose=vb).run()
diff_DBV = Diff(in_file='ASE_DBV.nii.gz', baseline='DBV.nii.gz',
noise=noise, verbose=vb).run()
self.assertLessEqual(diff_R2p.outputs.out_diff, 20)
self.assertLessEqual(diff_DBV.outputs.out_diff, 75)
def test_oef_fixed_dbv(self):
# Use MultiEchoFlex as a proxy for ASE
seq = {'MultiEcho': {'TR': 2.5,
'TE': [-0.05, -0.04, -0.03, -0.02, -0.01, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05]}}
ase_file = 'sim_ase.nii.gz'
img_sz = [32, 32, 32]
noise = 0.001
DBV = 0.01
NewImage(img_size=img_sz, grad_dim=0, fill=100.,
out_file='S0.nii.gz', verbose=vb).run()
NewImage(img_size=img_sz, grad_dim=0, grad_vals=(-0.01, 0.01),
out_file='dT.nii.gz', verbose=vb).run()
NewImage(img_size=img_sz, grad_dim=1, grad_vals=(1.0, 3.0),
out_file='R2p.nii.gz', verbose=vb).run()
ASESim(sequence=seq, out_file=ase_file,
fix_DBV=DBV, noise=noise, verbose=vb,
S0_map='S0.nii.gz',
dT_map='dT.nii.gz',
R2p_map='R2p.nii.gz').run()
ASE(sequence=seq, in_file=ase_file, fix_DBV=DBV, verbose=vb).run()
diff_R2p = Diff(in_file='ASE_R2p.nii.gz', baseline='R2p.nii.gz',
noise=noise, verbose=vb).run()
self.assertLessEqual(diff_R2p.outputs.out_diff, 1.0)
def test_zshim(self):
nshims = 8
sz = 32
ref_val = sqrt(sum([x**2 for x in range(1, nshims + 1)]))
NewImage(out_file='zshim.nii.gz', img_size=[sz, sz, sz, nshims], grad_dim=3,
grad_vals=(1, nshims), grad_steps=7, verbose=vb).run()
NewImage(out_file='zshim_ref.nii.gz', img_size=[sz, sz, sz],
fill=ref_val, verbose=vb).run()
ZShim(in_file='zshim.nii.gz', zshims=nshims, verbose=vb).run()
zdiff = Diff(in_file='zshim_zshim.nii.gz',
baseline='zshim_ref.nii.gz', noise=1, verbose=vb).run()
self.assertLessEqual(zdiff.outputs.out_diff, 1.e-3)
if __name__ == '__main__':
unittest.main()
|
spinicist/QUIT
|
Python/Tests/test_perfusion.py
|
Python
|
mpl-2.0
| 4,560
|
[
"ASE"
] |
14993c722cf6d25309d808ba0511f3575406840e500cc5bdf98e168109f74932
|
from __future__ import unicode_literals
import os
from fabric.api import env, execute, task
from fabric.colors import red
from fabric.contrib.project import rsync_project
from fabric.utils import abort
from fabfile import run_local, cd, require_env, run, step
@task(default=True)
@require_env
def deploy():
"""Deploys frontend and backend code to the server if the checking step
did not report any problems"""
execute('check.deploy')
execute('deploy.code', reload=False)
execute('deploy.styles', reload=False)
execute('deploy.restart_server')
def _deploy_styles_foundation5_gulp():
run_local('./node_modules/.bin/gulp build')
for part in ['bower_components', 'build']:
rsync_project(
local_dir='%(box_staticfiles)s/%(part)s' % dict(env, part=part),
remote_dir='%(box_domain)s/%(box_staticfiles)s/' % env,
delete=True,
)
def _deploy_styles_foundation5_grunt():
run_local('cd %(box_staticfiles)s && grunt build')
for part in ['bower_components', 'css']:
rsync_project(
local_dir='%(box_staticfiles)s/%(part)s' % dict(env, part=part),
remote_dir='%(box_domain)s/%(box_staticfiles)s/' % env,
delete=True,
)
def _deploy_styles_foundation4_bundler():
run_local('bundle exec compass clean %(box_staticfiles)s')
run_local('bundle exec compass compile -s compressed %(box_staticfiles)s')
rsync_project(
local_dir='%(box_staticfiles)s/stylesheets' % env,
remote_dir='%(box_domain)s/%(box_staticfiles)s/' % env,
delete=True,
)
@task
@require_env
def styles(reload=True):
"""Compiles and compresses the CSS and deploys it to the server"""
execute('check.deploy')
step('\nBuilding and deploying assets...')
if os.path.exists('gulpfile.js'):
_deploy_styles_foundation5_gulp()
elif os.path.exists('%(box_staticfiles)s/Gulpfile.js' % env):
_deploy_styles_foundation5_grunt()
elif os.path.exists('%(box_staticfiles)s/config.rb' % env):
_deploy_styles_foundation4_bundler()
else:
abort(red('I do not know how to deploy this frontend code.'))
with cd('%(box_domain)s'):
run('venv/bin/python manage.py collectstatic --noinput')
if reload:
execute('deploy.restart_server')
@task
@require_env
def code(reload=True):
"""Deploys the currently committed project state to the server, if there
are no uncommitted changes on the server and the checking step did not
report any problems"""
execute('check.deploy')
# XXX Maybe abort deployment if branch-to-be-deployed is not checked out?
step('\nPushing changes...')
run_local('git push origin %(box_branch)s')
step('\nDeploying new code on server...')
with cd('%(box_domain)s'):
run('git fetch')
run('git reset --hard origin/%(box_branch)s')
run('find . -name "*.pyc" -delete')
run('venv/bin/pip install -r requirements/production.txt'
' --find-links file:///home/www-data/tmp/wheel/wheelhouse/')
run('venv/bin/python manage.py migrate --noinput')
if reload:
execute('deploy.restart_server')
execute('git.fetch_remote')
@task
@require_env
def restart_server():
with cd('%(box_domain)s'):
run('sctl restart %(box_domain)s:*')
|
lucacorsato/feincms-in-a-box
|
fbox/fabfile/deploy.py
|
Python
|
bsd-3-clause
| 3,356
|
[
"GULP"
] |
f5dc68a98f5c717f116bd7ea864ac8a327bbf2d40bceec001c7cbffd335a28b2
|
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
Implemented the following paper: DenseNet-BC
Gao Huang, Zhuang Liu, Kilian Q. Weinberger, Laurens van der Maaten. "Densely Connected Convolutional Networks"
Coded by Lin Xiong Mar-2, 2017
"""
import argparse,logging,os
import mxnet as mx
from symbol_densenet import DenseNet
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(console)
def multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90, 95, 115, 120], factor=0.1):
step_ = [epoch_size * (x-begin_epoch) for x in step if x-begin_epoch > 0]
return mx.lr_scheduler.MultiFactorScheduler(step=step_, factor=factor) if len(step_) else None
def main():
if args.data_type == "imagenet":
args.num_classes = 1000
if args.depth == 121:
units = [6, 12, 24, 16]
elif args.depth == 169:
units = [6, 12, 32, 32]
elif args.depth == 201:
units = [6, 12, 48, 32]
elif args.depth == 161:
units = [6, 12, 36, 24]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(args.depth))
symbol = DenseNet(units=units, num_stage=4, growth_rate=48 if args.depth == 161 else args.growth_rate, num_class=args.num_classes,
data_type="imagenet", reduction=args.reduction, drop_out=args.drop_out, bottle_neck=True,
bn_mom=args.bn_mom, workspace=args.workspace)
elif args.data_type == "vggface":
args.num_classes = 2613
if args.depth == 121:
units = [6, 12, 24, 16]
elif args.depth == 169:
units = [6, 12, 32, 32]
elif args.depth == 201:
units = [6, 12, 48, 32]
elif args.depth == 161:
units = [6, 12, 36, 24]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(args.depth))
symbol = DenseNet(units=units, num_stage=4, growth_rate=48 if args.depth == 161 else args.growth_rate, num_class=args.num_classes,
data_type="vggface", reduction=args.reduction, drop_out=args.drop_out, bottle_neck=True,
bn_mom=args.bn_mom, workspace=args.workspace)
elif args.data_type == "msface":
args.num_classes = 79051
if args.depth == 121:
units = [6, 12, 24, 16]
elif args.depth == 169:
units = [6, 12, 32, 32]
elif args.depth == 201:
units = [6, 12, 48, 32]
elif args.depth == 161:
units = [6, 12, 36, 24]
else:
raise ValueError("no experiments done on detph {}, you can do it youself".format(args.depth))
symbol = DenseNet(units=units, num_stage=4, growth_rate=48 if args.depth == 161 else args.growth_rate, num_class=args.num_classes,
data_type="msface", reduction=args.reduction, drop_out=args.drop_out, bottle_neck=True,
bn_mom=args.bn_mom, workspace=args.workspace)
else:
raise ValueError("do not support {} yet".format(args.data_type))
kv = mx.kvstore.create(args.kv_store)
devs = mx.cpu() if args.gpus is None else [mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = max(int(args.num_examples / args.batch_size / kv.num_workers), 1)
begin_epoch = args.model_load_epoch if args.model_load_epoch else 0
if not os.path.exists("./model"):
os.mkdir("./model")
model_prefix = "model/densenet-{}-{}-{}".format(args.data_type, args.depth, kv.rank)
checkpoint = mx.callback.do_checkpoint(model_prefix)
arg_params = None
aux_params = None
if args.retrain:
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.model_load_epoch)
# import pdb
# pdb.set_trace()
train = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "train.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "train_256_q90.rec") if args.aug_level == 1
else os.path.join(args.data_dir, "train_480_q90.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
batch_size = args.batch_size,
pad = 4 if args.data_type == "cifar10" else 0,
fill_value = 127, # only used when pad is valid
rand_crop = True,
max_random_scale = 1.0, # 480 with imagnet and vggface, 384 with msface, 32 with cifar10
min_random_scale = 1.0 if args.data_type == "cifar10" else 1.0 if args.aug_level == 1 else 0.667, # 256.0/480.0=0.533, 256.0/384.0=0.667
max_aspect_ratio = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 0.25,
random_h = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 36, # 0.4*90
random_s = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
random_l = 0 if args.data_type == "cifar10" else 0 if args.aug_level == 1 else 50, # 0.4*127
max_rotate_angle = 0 if args.aug_level <= 2 else 10,
max_shear_ratio = 0 if args.aug_level <= 2 else 0.1,
rand_mirror = True,
shuffle = True,
num_parts = kv.num_workers,
part_index = kv.rank)
val = mx.io.ImageRecordIter(
path_imgrec = os.path.join(args.data_dir, "val.rec") if args.data_type == 'cifar10' else
os.path.join(args.data_dir, "val_256_q90.rec"),
label_width = 1,
data_name = 'data',
label_name = 'softmax_label',
batch_size = args.batch_size,
data_shape = (3, 32, 32) if args.data_type=="cifar10" else (3, 224, 224),
rand_crop = False,
rand_mirror = False,
num_parts = kv.num_workers,
part_index = kv.rank)
model = mx.model.FeedForward(
ctx = devs,
symbol = symbol,
arg_params = arg_params,
aux_params = aux_params,
num_epoch = 200 if args.data_type == "cifar10" else 125,
begin_epoch = begin_epoch,
learning_rate = args.lr,
momentum = args.mom,
wd = args.wd,
optimizer = 'nag',
# optimizer = 'sgd',
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2),
lr_scheduler = multi_factor_scheduler(begin_epoch, epoch_size, step=[220, 260, 280], factor=0.1)
if args.data_type=='cifar10' else
multi_factor_scheduler(begin_epoch, epoch_size, step=[30, 60, 90, 95, 115, 120], factor=0.1),
)
# import pdb
# pdb.set_trace()
model.fit(
X = train,
eval_data = val,
eval_metric = ['acc'] if args.data_type=='cifar10' else
['acc', mx.metric.create('top_k_accuracy', top_k = 5)],
kvstore = kv,
batch_end_callback = mx.callback.Speedometer(args.batch_size, args.frequent),
epoch_end_callback = checkpoint)
#logging.info("top-1 and top-5 acc is {}".format(model.score(X = val,
# eval_metric = ['acc', mx.metric.create('top_k_accuracy', top_k = 5)])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="command for training DenseNet-BC")
parser.add_argument('--gpus', type=str, default='0', help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--data-dir', type=str, default='./data/imagenet/', help='the input data directory')
parser.add_argument('--data-type', type=str, default='imagenet', help='the dataset type')
parser.add_argument('--list-dir', type=str, default='./', help='the directory which contain the training list file')
parser.add_argument('--lr', type=float, default=0.1, help='initialization learning reate')
parser.add_argument('--mom', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--bn-mom', type=float, default=0.9, help='momentum for batch normlization')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd')
parser.add_argument('--batch-size', type=int, default=256, help='the batch size')
parser.add_argument('--growth-rate', type=int, default=32, help='the growth rate of DenseNet')
parser.add_argument('--drop-out', type=float, default=0.2, help='the probability of an element to be zeroed')
parser.add_argument('--reduction', type=float, default=0.5, help='the compression ratio for TransitionBlock')
parser.add_argument('--workspace', type=int, default=512, help='memory space size(MB) used in convolution, if xpu '
' memory is oom, then you can try smaller vale, such as --workspace 256')
parser.add_argument('--depth', type=int, default=50, help='the depth of resnet')
parser.add_argument('--num-classes', type=int, default=1000, help='the class number of your task')
parser.add_argument('--aug-level', type=int, default=2, choices=[1, 2, 3],
help='level 1: use only random crop and random mirror\n'
'level 2: add scale/aspect/hsv augmentation based on level 1\n'
'level 3: add rotation/shear augmentation based on level 2')
parser.add_argument('--num-examples', type=int, default=1281167, help='the number of training examples')
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--model-load-epoch', type=int, default=0,
help='load the model on an epoch using the model-load-prefix')
parser.add_argument('--frequent', type=int, default=50, help='frequency of logging')
parser.add_argument('--retrain', action='store_true', default=False, help='true means continue training')
args = parser.parse_args()
if not os.path.exists("./log"):
os.mkdir("./log")
hdlr = logging.FileHandler('./log/log-densenet-{}-{}.log'.format(args.data_type, args.depth))
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logging.info(args)
main()
|
bruinxiong/densenet.mxnet
|
train_densenet.py
|
Python
|
apache-2.0
| 11,053
|
[
"Gaussian"
] |
eba56cd87cdc8c4c3669309ff0f799a54908cd2aeb0462fd8bc2796221a017ed
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Dynamic Programming algorithms for general usage.
This module contains classes which implement Dynamic Programming
algorithms that can be used generally.
"""
from Bio._py3k import range
class AbstractDPAlgorithms(object):
"""An abstract class to calculate forward and backward probabilities.
This class should not be instantiated directly, but should be used
through a derived class which implements proper scaling of variables.
This class is just meant to encapsulate the basic forward and backward
algorithms, and allow derived classes to deal with the problems of
multiplying probabilities.
Derived class of this must implement:
o _forward_recursion -- Calculate the forward values in the recursion
using some kind of technique for preventing underflow errors.
o _backward_recursion -- Calculate the backward values in the recursion
step using some technique to prevent underflow errors.
"""
def __init__(self, markov_model, sequence):
"""Initialize to calculate forward and backward probabilities.
Arguments:
o markov_model -- The current Markov model we are working with.
o sequence -- A training sequence containing a set of emissions.
"""
self._mm = markov_model
self._seq = sequence
def _forward_recursion(self, cur_state, sequence_pos, forward_vars):
"""Calculate the forward recursion value.
"""
raise NotImplementedError("Subclasses must implement")
def forward_algorithm(self):
"""Calculate sequence probability using the forward algorithm.
This implements the forward algorithm, as described on p57-58 of
Durbin et al.
Returns:
o A dictionary containing the forward variables. This has keys of the
form (state letter, position in the training sequence), and values
containing the calculated forward variable.
o The calculated probability of the sequence.
"""
# all of the different letters that the state path can be in
state_letters = self._seq.states.alphabet.letters
# -- initialize the algorithm
#
# NOTE: My index numbers are one less than what is given in Durbin
# et al, since we are indexing the sequence going from 0 to
# (Length - 1) not 1 to Length, like in Durbin et al.
#
forward_var = {}
# f_{0}(0) = 1
forward_var[(state_letters[0], -1)] = 1
# f_{k}(0) = 0, for k > 0
for k in range(1, len(state_letters)):
forward_var[(state_letters[k], -1)] = 0
# -- now do the recursion step
# loop over the training sequence
# Recursion step: (i = 1 .. L)
for i in range(len(self._seq.emissions)):
# now loop over the letters in the state path
for main_state in state_letters:
# calculate the forward value using the appropriate
# method to prevent underflow errors
forward_value = self._forward_recursion(main_state, i,
forward_var)
if forward_value is not None:
forward_var[(main_state, i)] = forward_value
# -- termination step - calculate the probability of the sequence
first_state = state_letters[0]
seq_prob = 0
for state_item in state_letters:
# f_{k}(L)
forward_value = forward_var[(state_item,
len(self._seq.emissions) - 1)]
# a_{k0}
transition_value = self._mm.transition_prob[(state_item,
first_state)]
seq_prob += forward_value * transition_value
return forward_var, seq_prob
def _backward_recursion(self, cur_state, sequence_pos, forward_vars):
"""Calculate the backward recursion value.
"""
raise NotImplementedError("Subclasses must implement")
def backward_algorithm(self):
"""Calculate sequence probability using the backward algorithm.
This implements the backward algorithm, as described on p58-59 of
Durbin et al.
Returns:
o A dictionary containing the backwards variables. This has keys
of the form (state letter, position in the training sequence),
and values containing the calculated backward variable.
"""
# all of the different letters that the state path can be in
state_letters = self._seq.states.alphabet.letters
# -- initialize the algorithm
#
# NOTE: My index numbers are one less than what is given in Durbin
# et al, since we are indexing the sequence going from 0 to
# (Length - 1) not 1 to Length, like in Durbin et al.
#
backward_var = {}
first_letter = state_letters[0]
# b_{k}(L) = a_{k0} for all k
for state in state_letters:
backward_var[(state, len(self._seq.emissions) - 1)] = \
self._mm.transition_prob[(state, state_letters[0])]
# -- recursion
# first loop over the training sequence backwards
# Recursion step: (i = L - 1 ... 1)
all_indexes = list(range(len(self._seq.emissions) - 1))
all_indexes.reverse()
for i in all_indexes:
# now loop over the letters in the state path
for main_state in state_letters:
# calculate the backward value using the appropriate
# method to prevent underflow errors
backward_value = self._backward_recursion(main_state, i,
backward_var)
if backward_value is not None:
backward_var[(main_state, i)] = backward_value
# skip the termination step to avoid recalculations -- you should
# get sequence probabilities using the forward algorithm
return backward_var
class ScaledDPAlgorithms(AbstractDPAlgorithms):
"""Implement forward and backward algorithms using a rescaling approach.
This scales the f and b variables, so that they remain within a
manageable numerical interval during calculations. This approach is
described in Durbin et al. on p 78.
This approach is a little more straightforward then log transformation
but may still give underflow errors for some types of models. In these
cases, the LogDPAlgorithms class should be used.
"""
def __init__(self, markov_model, sequence):
"""Initialize the scaled approach to calculating probabilities.
Arguments:
o markov_model -- The current Markov model we are working with.
o sequence -- A TrainingSequence object that must have a
set of emissions to work with.
"""
AbstractDPAlgorithms.__init__(self, markov_model, sequence)
self._s_values = {}
def _calculate_s_value(self, seq_pos, previous_vars):
"""Calculate the next scaling variable for a sequence position.
This utilizes the approach of choosing s values such that the
sum of all of the scaled f values is equal to 1.
Arguments:
o seq_pos -- The current position we are at in the sequence.
o previous_vars -- All of the forward or backward variables
calculated so far.
Returns:
o The calculated scaling variable for the sequence item.
"""
# all of the different letters the state can have
state_letters = self._seq.states.alphabet.letters
# loop over all of the possible states
s_value = 0
for main_state in state_letters:
emission = self._mm.emission_prob[(main_state,
self._seq.emissions[seq_pos])]
# now sum over all of the previous vars and transitions
trans_and_var_sum = 0
for second_state in self._mm.transitions_from(main_state):
# the value of the previous f or b value
var_value = previous_vars[(second_state, seq_pos - 1)]
# the transition probability
trans_value = self._mm.transition_prob[(second_state,
main_state)]
trans_and_var_sum += (var_value * trans_value)
s_value += (emission * trans_and_var_sum)
return s_value
def _forward_recursion(self, cur_state, sequence_pos, forward_vars):
"""Calculate the value of the forward recursion.
Arguments:
o cur_state -- The letter of the state we are calculating the
forward variable for.
o sequence_pos -- The position we are at in the training seq.
o forward_vars -- The current set of forward variables
"""
# calculate the s value, if we haven't done so already (ie. during
# a previous forward or backward recursion)
if sequence_pos not in self._s_values:
self._s_values[sequence_pos] = \
self._calculate_s_value(sequence_pos, forward_vars)
# e_{l}(x_{i})
seq_letter = self._seq.emissions[sequence_pos]
cur_emission_prob = self._mm.emission_prob[(cur_state, seq_letter)]
# divide by the scaling value
scale_emission_prob = (float(cur_emission_prob) /
float(self._s_values[sequence_pos]))
# loop over all of the possible states at the position
state_pos_sum = 0
have_transition = 0
for second_state in self._mm.transitions_from(cur_state):
have_transition = 1
# get the previous forward_var values
# f_{k}(i - 1)
prev_forward = forward_vars[(second_state, sequence_pos - 1)]
# a_{kl}
cur_trans_prob = self._mm.transition_prob[(second_state,
cur_state)]
state_pos_sum += prev_forward * cur_trans_prob
# if we have the possibility of having a transition
# return the recursion value
if have_transition:
return (scale_emission_prob * state_pos_sum)
else:
return None
def _backward_recursion(self, cur_state, sequence_pos, backward_vars):
"""Calculate the value of the backward recursion
Arguments:
o cur_state -- The letter of the state we are calculating the
forward variable for.
o sequence_pos -- The position we are at in the training seq.
o backward_vars -- The current set of backward variables
"""
# calculate the s value, if we haven't done so already (ie. during
# a previous forward or backward recursion)
if sequence_pos not in self._s_values:
self._s_values[sequence_pos] = \
self._calculate_s_value(sequence_pos, backward_vars)
# loop over all of the possible states at the position
state_pos_sum = 0
have_transition = 0
for second_state in self._mm.transitions_from(cur_state):
have_transition = 1
# e_{l}(x_{i + 1})
seq_letter = self._seq.emissions[sequence_pos + 1]
cur_emission_prob = self._mm.emission_prob[(cur_state, seq_letter)]
# get the previous backward_var value
# b_{l}(i + 1)
prev_backward = backward_vars[(second_state, sequence_pos + 1)]
# the transition probability -- a_{kl}
cur_transition_prob = self._mm.transition_prob[(cur_state,
second_state)]
state_pos_sum += (cur_emission_prob * prev_backward *
cur_transition_prob)
# if we have a probability for a transition, return it
if have_transition:
return (state_pos_sum / float(self._s_values[sequence_pos]))
# otherwise we have no probability (ie. we can't do this transition)
# and return None
else:
return None
class LogDPAlgorithms(AbstractDPAlgorithms):
"""Implement forward and backward algorithms using a log approach.
This uses the approach of calculating the sum of log probabilities
using a lookup table for common values.
XXX This is not implemented yet!
"""
def __init__(self, markov_model, sequence):
raise NotImplementedError("Haven't coded this yet...")
|
zjuchenyuan/BioWeb
|
Lib/Bio/HMM/DynamicProgramming.py
|
Python
|
mit
| 12,779
|
[
"Biopython"
] |
f8ddd80a1fe62a820ddfcb0ff5efff4837f0b66e41b708466f7cf0206ee60acf
|
"""
@name: PyHouse/src/Modules/_test/test_Irrigation.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2015-2017 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 31, 2015
@Summary:
"""
__updated__ = '2016-11-21'
from twisted.trial import unittest, reporter, runner
from Modules.Housing.Irrigation import test as I_test
class Z_Suite(unittest.TestCase):
def setUp(self):
self.m_test = runner.TestLoader()
def test_Irrigation(self):
l_package = runner.TestLoader().loadPackage(I_test)
l_ret = reporter.Reporter()
l_package.run(l_ret)
l_ret.done()
#
print('\n====================\n*** test_Irrigation ***\n{}\n'.format(l_ret))
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/_test/test_Irrigation.py
|
Python
|
mit
| 769
|
[
"Brian"
] |
2b4a4dc1693bdd291ea453b90bc5a8b32dcfa9d8ba551c36da202462c7d4b47e
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pathlib import Path
from copy import copy
from nose import tools as nt
from neurom.check.runner import CheckRunner
from neurom.exceptions import ConfigError
from pathlib import Path
SWC_PATH = Path(__file__).parent.parent.parent.parent / 'test_data/swc/'
NRN_PATH_0 = str(Path(SWC_PATH, 'Neuron.swc'))
NRN_PATH_1 = str(Path(SWC_PATH, 'Neuron_zero_length_sections.swc'))
NRN_PATH_2 = str(Path(SWC_PATH, 'Single_apical.swc'))
NRN_PATH_3 = str(Path(SWC_PATH, 'Single_basal.swc'))
NRN_PATH_4 = str(Path(SWC_PATH, 'Single_axon.swc'))
NRN_PATH_5 = str(Path(SWC_PATH, 'Single_apical_no_soma.swc'))
CONFIG = {
'checks': {
'structural_checks': [
'is_single_tree',
'has_soma_points',
'has_sequential_ids',
'has_increasing_ids',
'has_valid_soma',
'has_valid_neurites'
],
'neuron_checks': [
'has_basal_dendrite',
'has_axon',
'has_apical_dendrite',
'has_all_nonzero_segment_lengths',
'has_all_nonzero_section_lengths',
'has_all_nonzero_neurite_radii',
'has_nonzero_soma_radius'
]
},
'options': {
'has_nonzero_soma_radius': 0.0,
"has_all_nonzero_neurite_radii": 0.007,
"has_all_nonzero_segment_lengths": 0.01,
"has_all_nonzero_section_lengths": [0.01]
},
}
CONFIG_COLOR = copy(CONFIG)
CONFIG_COLOR['color'] = True
REF_0 = {
'files': {
NRN_PATH_0: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": True,
"Has apical dendrite": True,
"Has all nonzero segment lengths": True,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": True
}
},
"STATUS": "PASS"
}
REF_1 = {
'files': {
NRN_PATH_1: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": True,
"Has apical dendrite": True,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": False,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_2 = {
'files': {
NRN_PATH_2: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": False,
"Has axon": False,
"Has apical dendrite": True,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_3 = {
'files': {
NRN_PATH_3: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": True,
"Has axon": False,
"Has apical dendrite": False,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": False,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_4 = {
'files': {
NRN_PATH_4: {
"Is single tree": True,
"Has soma points": True,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": True,
"Has valid neurites": True,
"Has basal dendrite": False,
"Has axon": True,
"Has apical dendrite": False,
"Has all nonzero segment lengths": False,
"Has all nonzero section lengths": True,
"Has all nonzero neurite radii": True,
"Has nonzero soma radius": True,
"ALL": False
}
},
"STATUS": "FAIL"
}
REF_5 = {
'files': {
NRN_PATH_5: {
"Is single tree": True,
"Has soma points": False,
"Has sequential ids": True,
"Has increasing ids": True,
"Has valid soma": False,
"Has valid neurites": False,
"ALL": False
}
},
"STATUS": "FAIL"
}
def test_ok_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_0)
nt.assert_equal(summ, REF_0)
def test_ok_neuron_color():
checker = CheckRunner(CONFIG_COLOR)
summ = checker.run(NRN_PATH_0)
nt.assert_equal(summ, REF_0)
def test_zero_length_sections_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_1)
nt.assert_equal(summ, REF_1)
def test_single_apical_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_2)
nt.assert_equal(summ, REF_2)
def test_single_basal_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_3)
nt.assert_equal(summ, REF_3)
def test_single_axon_neuron():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_4)
nt.assert_equal(summ, REF_4)
def test_single_apical_no_soma():
checker = CheckRunner(CONFIG)
summ = checker.run(NRN_PATH_5)
nt.assert_equal(summ, REF_5)
def test_directory_input():
checker = CheckRunner(CONFIG)
summ = checker.run(SWC_PATH)
nt.eq_(summ['files'][NRN_PATH_0]['Has axon'], True)
nt.eq_(summ['files'][NRN_PATH_2]['Has axon'], False)
@nt.raises(IOError)
def test_invalid_data_path_raises_IOError():
checker = CheckRunner(CONFIG)
_ = checker.run('foo/bar/baz')
def test__sanitize_config():
# fails if missing 'checks'
nt.assert_raises(ConfigError, CheckRunner._sanitize_config, {})
# creates minimal config
new_config = CheckRunner._sanitize_config({'checks': {}})
nt.eq_(new_config, {'checks':
{'structural_checks': [],
'neuron_checks': [],
},
'options': {},
'color': False,
})
# makes no changes to already filled out config
new_config = CheckRunner._sanitize_config(CONFIG)
nt.eq_(CONFIG, new_config)
|
wizmer/NeuroM
|
neurom/check/tests/test_runner.py
|
Python
|
bsd-3-clause
| 8,698
|
[
"NEURON"
] |
9d11eb001b03dd244e5b262861f359d2c58e2838ff24072740f39ce2df8b0fc5
|
from common import *
from ex.plott import *
from ex.pp import *
class GMM:
'''gaussian mixure model
'''
def __init__(self, weights = None, means = None, covars = None):
'''initialize the parameters.
means: size [K, dim] each row is a mean vector
covars: size [K, dim, dim]
'''
if weights is not None:
self.weights = arr(weights)
self.means = arr(means)
covars = arr(covars)
if covars.ndim == 0:
self.covars = repmat(eye(self.dim())*covars, (len(self.weights), 1, 1))
elif covars.ndim == 2:
self.covars = repmat(covars, (len(self.weights), 1, 1))
else:
self.covars = covars
check(len(self.weights) == len(self.means) and len(self.weights) == len(self.covars), 'parameter wrong')
def dim(self):
return self.means.shape[1]
def GenData(self, n):
'''generate samples from the current moddel
'''
ncomp = len(self.weights)
dim = self.dim()
weights = float64(self.weights)/self.weights.sum()
labels = randm(weights, n);
data = zeros((n, dim))
for c in range(ncomp):
idx = (labels == c)
data[idx] = random.multivariate_normal(self.means[c], self.covars[c], (idx.sum(),))
return (data, labels)
def Fit(self, X, K, options = None):
'''fit a gaussian mixture model
'''
init, epsilon, maxIter, verbose = GetOptions(
options, 'init', 'kmeans',
'epsilon', 1e-5, 'maxIter', 50, 'verbose', True)
n, dim = X.shape
K = int(K)
centers = X[RI(K, len(X))]
cl = vq.vq(X, centers)[0]
mu = zeros((K, dim))
sigma = zeros((K, dim, dim))
for k in range(K):
mu[k], sigma[k] = MeanCov(X[cl == k])
pi = ones(K)/K;
# EM
tic('gmm')
l = zeros(maxIter)
lnpdf = zeros((K, n))
for k in range(K):
lnpdf[k] = mvnpdf(X, mu[k], sigma[k], True)
for iter in range(maxIter):
# E
gama = col(pi)*exp(lnpdf + (500 - lnpdf.max(0)))
gama = Normalize(gama, 's1', 'col')[0]
# M
pi = gama.sum(1)
pi = pi/pi.sum()
for k in range(K):
mu[k], sigma[k] = MeanCov(X, gama[k])
# vs = arr([trace(ss) for ss in sigma])
# vthresh = vs.max()*1e-2
# for k in range(K):
# if vs[k] < vthresh:
# log.warn('Reset collapsing component')
# mu[k], sigma[k] = MeanCov(X[RI(len(X)/K, len(X))])
# if iter > 0: l[iter - 1] = -inf
# update the Gaussian pdf
for k in range(K):
lnpdf[k] = mvnpdf(X, mu[k], sigma[k], True)
# compute likelihood
l[iter] = GMMLikelihood(lnpdf, (pi, mu, sigma)).sum()
if verbose:
log.info('--Iter = %d, L = %g, Time elapsed = %0.2f' % (iter, l[iter], toc('gmm', show = False)))
if iter > 0 and (l[iter] - l[iter - 1])/n < epsilon:
break
self.means = mu
self.covars = sigma
self.weights = pi
self.n = n
self.L = l[iter]
return ((pi, mu, sigma), l[iter])
def Save(self, filename):
'''save the model to file
'''
format=filename.split('.')[-1].lower()
check(format in ['pkl','mat'], 'unknown file format')
if format == 'pkl':
SavePickle(filename, self)
else:
SaveMat(filename, self.__dict__)
def Load(self, filename):
'''load a model from file
'''
format=filename.split('.')[-1].lower()
check(format in ['pkl','mat'], 'unknown file format')
if format == 'pkl':
o=LoadPickles(filename)
CopyAttributes(o, self)
else:
data=LoadMat(filename)
CopyAttributes(data, self)
def Plot(self, n = 1000):
data, labels = self.GenData(n)
scatter(data[:,0], data[:,1], c = labels, edgecolors = 'none')
draw()
def FitGMM_1(X, K = None, options = None):
if istuple(X) and K is None:
X, K, options = X
SeedRand()
gmm = GMM()
return gmm.Fit(X, K, options)
def FitGMM(X, K = None, options = None):
if istuple(X) and K is None:
X, K, options = X
ntry, nproc, verbose = GetOptions(
options, 'ntry', 10, 'nproc', 1, 'verbose', True)
log.info('GMM for {0} data. K = {1}'.format(
X.shape, K))
jobs = [(X, K, options)]*ntry
R, L = unzip(ProcJobs(FitGMM_1, jobs, nproc))
ii = argmax(L)
L = L[ii]
R = R[ii]
return (R, L) # (pi, mu, sigma)
def GMMLikelihood(lnpdf, params):
pi, mu, sigma = params
return ln(mul(row(pi), exp(lnpdf)) + logsafe)
def GMMBIC(params, L, n, rou = 1):
pi, mu, sigma = params
D = pi.size - 1 + mu.size + sigma.size
bic = L - 0.5*log2(n)*D*rou
return bic
def FitGMM_BICSearch(X, Ks, options = None):
rou, nproc_bic = GetOptions(options, 'bic_coeff', 1, 'nproc_bic', 1)
n, dim = X.shape
log.info('BIC search for {0} data with {1} processes'.format(X.shape, nproc_bic))
jobs = [(X, k, options) for k in Ks]
RL = ProcJobs(FitGMM, jobs, nproc_bic)
BICs = [GMMBIC(rl[0], rl[1], n, rou) for rl in RL]
R, L = unzip(RL)
stat = hstack((col(arr(Ks)), col(arr(L)), col(arr(BICs)))) # (K, L, BIC)
ii = argmax(BICs)
R = R[ii]
L = L[ii]
log.info('K = {0} selected'.format(Ks[ii]))
gmm = GMM(R[0], R[1], R[2])
return (gmm, L, stat)
if __name__ == '__main__':
InitLog()
fig = figure(1)
subplot(fig, 121)
gmm = GMM([1, 1, 1], [[0, 1], [1, 0], [0, 0]], 0.01)
gmm.Plot(10000)
print 'Original model'
print gmm.means
print gmm.covars
subplot(fig, 122)
X, labels = gmm.GenData(1000)
gmm.Fit(X, 3, {'epsilon':-10000000})
gmm.Plot(10000)
print 'Fitted model'
print gmm.means
print gmm.covars
model, L, stat = FitGMM_BICSearch(X, arange(2, 6),
{'nproc_bic':1, 'verbose':False})
print stat
test(argmax(stat[:,2]) == 1, 'GMM BIC search')
show()
|
excelly/xpy-ml
|
ex/ml/gmm.py
|
Python
|
apache-2.0
| 6,367
|
[
"Gaussian"
] |
3e77bc1dee0316624c94af618e8ddf2d78577dce91f41ca85a7c50c8fca1e09a
|
"""
Authentication example using Flask-Login
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This provides a simple example of using Flask-Login as the authentication
framework which can guard access to certain API endpoints.
This requires the following Python libraries to be installed:
* Flask
* Flask-Login
* Flask-Restless
* Flask-SQLAlchemy
* Flask-WTF
To install them using ``pip``, do::
pip install Flask Flask-SQLAlchemy Flask-Restless Flask-Login Flask-WTF
To use this example, run this package from the command-line. If you are
using Python 2.7 or later::
python -m authentication
If you are using Python 2.6 or earlier::
python -m authentication.__main__
Attempts to access the URL of the API for the :class:`User` class at
``http://localhost:5000/api/user`` will fail with an :http:statuscode:`401`
because you have not yet logged in. To log in, visit
``http://localhost:5000/login`` and login with username ``example`` and
password ``example``. Once you have successfully logged in, you may now
make :http:get:`http://localhost:5000/api/user` requests.
:copyright: 2012 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>
:license: GNU AGPLv3+ or BSD
"""
import os
import os.path
from flask import Flask, render_template, redirect, url_for
from flask.ext.login import current_user, login_user, LoginManager, UserMixin
from flask.ext.restless import APIManager, ProcessingException, NO_CHANGE
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.wtf import PasswordField, SubmitField, TextField, Form
# Step 0: the database in this example is at './test.sqlite'.
DATABASE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test.sqlite')
if os.path.exists(DATABASE):
os.unlink(DATABASE)
# Step 1: setup the Flask application.
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['TESTING'] = True
app.config['SECRET_KEY'] = os.urandom(24)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///%s' % DATABASE
# Step 2: initialize extensions.
db = SQLAlchemy(app)
api_manager = APIManager(app, flask_sqlalchemy_db=db)
login_manager = LoginManager()
login_manager.setup_app(app)
# Step 3: create the user database model.
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Unicode)
password = db.Column(db.Unicode)
# Step 4: create the database and add a test user.
db.create_all()
user1 = User(username=u'example', password=u'example')
db.session.add(user1)
db.session.commit()
# Step 5: this is required for Flask-Login.
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
# Step 6: create the login form.
class LoginForm(Form):
username = TextField('username')
password = PasswordField('password')
submit = SubmitField('Login')
# Step 7: create endpoints for the application, one for index and one for login
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
#
# you would check username and password here...
#
username, password = form.username.data, form.password.data
matches = User.query.filter_by(username=username,
password=password).all()
if len(matches) > 0:
login_user(matches[0])
return redirect(url_for('index'))
flash('Username and password pair not found')
return render_template('login.html', form=form)
# Step 8: create the API for User with the authentication guard.
def auth_func(**kw):
if not current_user.is_authenticated():
raise ProcessingException(description='Not Authorized', code=401)
api_manager.create_api(User, preprocessors=dict(GET_SINGLE=[auth_func],
GET_MANY=[auth_func]))
# Step 9: configure and run the application
app.run()
# Step 10: visit http://localhost:5000/api/user in a Web browser. You will
# receive a "Not Authorized" response.
#
# Step 11: visit http://localhost:5000/login and enter username "example" and
# password "example". You will then be logged in.
#
# Step 12: visit http://localhost:5000/api/user again. This time you will get a
# response showing the objects in the User table of the database.
|
ternaris/flask-restless
|
examples/server_configurations/authentication/__main__.py
|
Python
|
bsd-3-clause
| 4,489
|
[
"VisIt"
] |
10653be80f9ad50ea342367a406d8542fb81461b54fdf1be309e0dedf04d5511
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_op
short_description: execute arbitrary OP commands on PANW devices (e.g. show interface all)
description: This module will allow user to pass and execute any supported OP command on the PANW device.
author: "Ivan Bojer (@ivanbojer)"
version_added: "2.5"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/)
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: "2.12"
why: Consolidating code base.
notes:
- Checkmode is NOT supported.
- Panorama is NOT supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device or Panorama management console being configured.
required: true
username:
description:
- Username credentials to use for authentication.
required: false
default: "admin"
password:
description:
- Password credentials to use for authentication.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
cmd:
description:
- The OP command to be performed.
required: true
'''
EXAMPLES = '''
- name: show list of all interfaces
panos_op:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
cmd: 'show interfaces all'
- name: show system info
panos_op:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
cmd: 'show system info'
'''
RETURN = '''
stdout:
description: output of the given OP command as JSON formatted string
returned: success
type: str
sample: "{system: {app-release-date: 2017/05/01 15:09:12}}"
stdout_xml:
description: output of the given OP command as JSON formatted string
returned: success
type: str
sample: "<response status=success><result><system><hostname>fw2</hostname>"
'''
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
cmd=dict(required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
cmd = module.params['cmd']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
changed = False
try:
xml_output = device.op(cmd, xml=True)
changed = True
except PanXapiError as exc:
if 'non NULL value' in exc.message:
# rewrap and call again
cmd_array = cmd.split()
cmd_array_len = len(cmd_array)
cmd_array[cmd_array_len - 1] = '\"' + cmd_array[cmd_array_len - 1] + '\"'
cmd2 = ' '.join(cmd_array)
try:
xml_output = device.op(cmd2, xml=True)
changed = True
except PanXapiError as exc:
module.fail_json(msg=exc.message)
obj_dict = xmltodict.parse(xml_output)
json_output = json.dumps(obj_dict)
module.exit_json(changed=changed, msg="Done", stdout=json_output, stdout_xml=xml_output)
if __name__ == '__main__':
main()
|
anryko/ansible
|
lib/ansible/modules/network/panos/_panos_op.py
|
Python
|
gpl-3.0
| 5,064
|
[
"Galaxy"
] |
a8c93f62a430ed52b8c60180612fe2bf8d9322391a9c211fb95ed2a9c35ef486
|
"""
bvp1.py solves the 1D Poisson equation using FEniCS
Copyright (C) 2013 Greg von Winckel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Solve the 1D Poisson equation -u''=-1 with u(0)=0, u'(1)=1
The exact solution is x^2/2
"""
from dolfin import *
# Number of elements
nel = 20
# Left end point
xmin = 0
# Right end point
xmax = 1
# Polynomial order of trial/test functions
p = 2
# Create mesh and function space
mesh = IntervalMesh(nel,xmin,xmax)
# Define function space for this mesh using Continuous Galerkin
# (Lagrange) functions of order p on each element
V = FunctionSpace(mesh,"CG",p)
# Define boundary boundary values
u0 = Expression("x[0]")
# This imposes a Dirichlet condition at the point x=0
def Dirichlet_boundary(x,on_boundary):
tol = 1e-14
return on_boundary and abs(x[0])<tol
# Enforce u=u0 at x=0
bc = DirichletBC(V,u0,Dirichlet_boundary)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(-1)
g = Constant(1)
a = inner(grad(u),grad(v))*dx
L = f*v*dx+g*v*ds
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# plot solution
plot(u)
# Dump solution to file in VTK format
file = File("bvp1.pvd")
file << u
# hold plot
interactive()
|
gregvw/FEniCS-examples
|
bvp1.py
|
Python
|
mit
| 1,840
|
[
"VTK"
] |
907e4cf2de9091c5bb034abc5f7ebbf83b4020bbaf7062f1e1fd352b5b039d2b
|
#!/usr/bin/env python
########################################################################
# File : tornado-start-CS
# Author : Louis MARTIN
########################################################################
# Just run this script to start Tornado and CS service
# Use dirac.cfg (or other cfg given in the command line) to change port
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import sys
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
# Must be defined BEFORE any dirac import
os.environ['DIRAC_USE_TORNADO_IOLOOP'] = "True"
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceSection
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.LocalConfiguration import LocalConfiguration
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
from DIRAC.Core.Utilities.DErrno import includeExtensionErrors
from DIRAC.Core.Tornado.Server.TornadoServer import TornadoServer
from DIRAC.FrameworkSystem.Client.Logger import gLogger
if gConfigurationData.isMaster():
gRefresher.disable()
localCfg = LocalConfiguration()
localCfg.addMandatoryEntry("/DIRAC/Setup")
localCfg.addDefaultEntry("/DIRAC/Security/UseServerCertificate", "yes")
localCfg.addDefaultEntry("LogLevel", "INFO")
localCfg.addDefaultEntry("LogColor", True)
resultDict = localCfg.loadUserData()
if not resultDict['OK']:
gLogger.initialize("Tornado-CS", "/")
gLogger.error("There were errors when loading configuration", resultDict['Message'])
sys.exit(1)
includeExtensionErrors()
gLogger.initialize('Tornado-CS', "/")
# get the specific master CS port
try:
csPort = int(gConfigurationData.extractOptionFromCFG('%s/Port' % getServiceSection('Configuration/Server')))
except TypeError:
csPort = None
serverToLaunch = TornadoServer(services='Configuration/Server', port=csPort)
serverToLaunch.startTornado()
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Core/Tornado/scripts/tornado_start_CS.py
|
Python
|
gpl-3.0
| 2,120
|
[
"DIRAC"
] |
9aa2401c3369549f0cbd00a63c991cfca6ab20ace0d5195dcbc96a512fc6d871
|
"""
Backports. Mostly from scikit-learn
"""
import numpy as np
from scipy import linalg
from scipy.stats import (poisson, expon)
###############################################################################
# For scikit-learn < 0.14
def _pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = _pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
try:
from sklearn.utils.extmath import pinvh
except ImportError:
pinvh = _pinvh
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def log_poisson_pmf(X, rates):
n_samples = len(X)
nmix = len(rates)
log_prob = np.empty((n_samples, nmix))
for c, rate in enumerate(rates):
log_prob[:, c] = poisson.logpmf(X, rate)
return log_prob
def log_exponential_density(X, rates):
n_samples = len(X)
nmix = len(rates)
log_prob = np.empty((n_samples, nmix))
for c, rate in enumerate(rates):
log_prob[:, c] = expon.logpdf(X, scale=1/rate)
return log_prob
try:
from sklearn.mixture import log_multivariate_normal_density
except ImportError:
# New in 0.14
log_multivariate_normal_density = _log_multivariate_normal_density
def _distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
try:
from sklearn.mixture import _distribute_covar_matrix_to_match_covariance_type
except ImportError:
# New in 0.14
distribute_covar_matrix_to_match_covariance_type =\
_distribute_covar_matrix_to_match_covariance_type
|
mvictor212/hmmlearn
|
hmmlearn/utils/fixes.py
|
Python
|
bsd-3-clause
| 7,426
|
[
"Gaussian"
] |
9f609b109925d29163a8efd49da967d091363e0c88ed03906d9ad64413f6bc1f
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2008 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import datetime
from hashlib import md5
import operator
from kiwi.datatypes import number
from stoqdrivers.enum import TaxType
from stoqlib.lib import latscii
from ecf.ecfdomain import ECFDocumentHistory
latscii.register_codec()
def _argtype_name(argtype):
if argtype == number:
return 'number'
else:
return argtype.__name__
# See
# http://www.fazenda.gov.br/confaz/confaz/atos/atos_cotepe/2004/ac017_04.htm
# for a complete list of this:
BRAND_CODES = {
'daruma': 'DR',
'bematech': 'BE',
}
MODEL_CODES = {
('daruma', 'FS345'): 4,
('bematech', 'MP25'): 1,
}
BRAND_FULL_NAME = {
'daruma': 'DARUMA AUTOMACAO',
'bematech': 'BEMATECH',
}
MODEL_FULL_NAME = {
('daruma', 'FS345'): 'FS-345',
('bematech', 'MP25'): 'MP-20 FI',
}
DOCUMENT_TYPES = {
ECFDocumentHistory.TYPE_MEMORY_READ: 'MF',
ECFDocumentHistory.TYPE_Z_REDUCTION: 'RZ',
ECFDocumentHistory.TYPE_SUMMARY: 'LX',
}
class CATError(Exception):
pass
class CATFile(object):
def __init__(self, printer):
self._registers = []
self.printer = printer
self.software_version = None
self.brand = BRAND_FULL_NAME[self.printer.brand]
self.model = MODEL_FULL_NAME[(self.printer.brand, self.printer.model)]
self._tax_counter = 1
def add(self, register):
"""Add register to the file.
@param register: a register
@type register: :class:`CATRegister`
"""
self._registers.append(register)
# E00
def add_software_house(self, soft_house, software_name, software_version):
"""
@param soft_house: Information from the software house: cnpj,
ie, im, name
@param software_name: Name of the software
@param software_version: Version of the software
Acording to the cat52/07, coo, software_number, line01 and
line02 should be filled with blanks
"""
self.add(CATRegisterE00(serial_number=self.printer.device_serial,
additional_mf="",
user_number=self.printer.user_number,
ecf_type='ECF-IF',
ecf_brand=self.brand,
ecf_model=self.model,
coo=0,
software_number=0,
cnpj=soft_house.cnpj,
ie=soft_house.ie,
im=soft_house.im,
soft_house=soft_house.name,
soft_name=software_name,
soft_version=(self.software_version or
software_version),
line01=" ",
line02=" "))
# E01
def add_ecf_identification(self, driver, company, initial_crz,
final_crz, start, end):
"""
@param driver:
@type driver:
@param initial_crz:
@param final_crz:
@param company:
@type company: PersonCompany
"""
today = datetime.datetime.today()
self.add(CATRegisterE01(serial_number=self.printer.device_serial,
# VERIFY
additional_mf='',
ecf_type="ECF-IF",
ecf_brand=self.brand,
ecf_model=self.model,
ecf_sb_version=driver.get_firmware_version(),
ecf_sb_date=today.date(),
ecf_sb_hour=today.time(),
# VERIFY
ecf_number=self.printer.id,
user_cnpj=company.get_cnpj_number(),
# Application
command="APL",
initial_crz=initial_crz,
final_crz=final_crz,
initial_date=start,
final_date=end,
library_version='00.00.00',
cotepe="AC1704 01.00.00"))
# E02
def add_ecf_user_identification(self, company, total):
"""
@param company:
@param total:
"""
full_address = (company.person.address.get_address_string() + ' ' +
company.person.address.get_details_string())
self.add(CATRegisterE02(serial_number=self.printer.device_serial,
additional_mf='',
ecf_model=self.model,
user_cnpj=company.get_cnpj_number(),
user_ie=company.get_state_registry_number(),
user_name=company.person.name,
user_address=full_address,
register_date=self.printer.register_date.date(),
register_hour=self.printer.register_date.time(),
cro=self.printer.register_cro,
total=int(total * 100), # 2 decimal positions
user_number=self.printer.user_number,
))
# E12
def add_z_reduction(self, reduction):
self.add(CATRegisterE12(serial_number=self.printer.device_serial,
additional_mf='',
ecf_model=self.model,
user_number=self.printer.user_number,
crz=reduction.crz,
coo=reduction.coupon_end,
cro=reduction.cro,
moviment_date=reduction.emission_date,
reduction_date=reduction.reduction_date.date(),
reduction_time=reduction.reduction_date.time(),
total=int(reduction.period_total * 100),
issqn_discount=False))
# E13
def add_z_reduction_details(self, reduction, tax, index):
# XXX: the totalizer index is quite confusing
# Before paulista invoice, we didn't store the ISS value!
# Ignore this as it doesn't have the expected values
if tax.value == 'ISS':
return
totalizer = ''
if all(c.isdigit() for c in tax.code):
type = 'T'
if tax.type == 'ISS':
type = 'S'
totalizer = '%0*d%s%s' % (2, index, type, tax.code)
elif tax.code[0] in 'INF':
type = ''
if tax.type == 'ISS':
type = 'S'
totalizer = '%s%d' % (tax.code[0], index)
else:
type = 'T'
if tax.type == 'ISS':
type = 'S'
if tax.code == 'DESC':
totalizer = 'D%s' % type
elif tax.code == 'ACRE':
totalizer = 'A%s' % type
elif tax.code == 'CANC':
totalizer = 'Can-%s' % type
self.add(CATRegisterE13(serial_number=self.printer.device_serial,
additional_mf='',
ecf_model=self.model,
user_number=self.printer.user_number,
crz=reduction.crz,
partial_totalizer=totalizer,
value=tax.value,
))
# E14
def add_fiscal_coupon(self, sale, client, fiscal_data):
subtotal = (sale.total_amount -
sale.surcharge_value +
sale.discount_value)
client_name = ''
if client:
client_name = client.person.name
cpf_cnpj = 0
if fiscal_data.document:
cpf_cnpj = int(''.join([c for c in fiscal_data.document
if c.isdigit()]))
self.add(CATRegisterE14(serial_number=self.printer.device_serial,
additional_mf='',
ecf_model=self.model,
user_number=self.printer.user_number,
document_counter=fiscal_data.document_counter,
coo=fiscal_data.coo,
emission_start=sale.confirm_date,
subtotal=subtotal,
discount=sale.discount_value,
discount_type='V', # Value
surcharge=sale.surcharge_value,
surcharge_type='V', # Value
total=sale.total_amount,
canceled=False, # !!!
surcharge_canceled=0,
# stoqlib/domain/sale.py:489
discount_surcharge_order='A',
client_name=client_name,
client_cpf_cnpj=cpf_cnpj,
))
# E15
def add_fiscal_coupon_details(self, sale, client, fiscal_data,
item, iss_tax, sequence):
tax = item.sellable.get_tax_constant()
partial_totalizer = ''
if tax.tax_type == TaxType.SERVICE: # ISS
partial_totalizer = '01S%0*d' % (4, iss_tax)
elif tax.tax_type == TaxType.NONE: # Não tributado
partial_totalizer = 'N1'
elif tax.tax_type == TaxType.EXEMPTION: # Isento
partial_totalizer = 'I1'
elif tax.tax_type == TaxType.SUBSTITUTION: # Substi. Tribut.
partial_totalizer = 'F1'
elif tax.tax_type == TaxType.CUSTOM:
partial_totalizer = '%0*dT%0*d' % (2, self._tax_counter, 4,
tax.tax_value * 100)
self._tax_counter += 1
self.add(CATRegisterE15(
serial_number=self.printer.device_serial,
additional_mf='',
ecf_model=self.model,
user_number=self.printer.user_number,
coo=fiscal_data.coo,
document_counter=fiscal_data.document_counter,
item_number=sequence,
item_code=item.sellable.code,
item_description=item.get_description(),
# precision = 2
item_amount=item.quantity * 100,
item_unit=(item.sellable.unit_description or 'un'),
item_unitary_value=item.price * 100,
# We don't offer discount,
item_discount=0,
# or surcharge for items,
item_surcharge=0,
# only for the subtotal
# precison = 2
item_total=item.price * item.quantity * 100,
item_partial_totalizer=partial_totalizer,
# Stoq does not store
item_canceled='N',
# canceled items
item_canceled_amount=0,
item_canceled_value=0,
item_canceled_surcharge=0,
round_or_trunc='A', # !!! A ou T
amount_decimal_precision=2, # !!!
value_decimal_precision=2, # !!!
))
# E21
def add_payment_method(self, sale, fiscal_data,
payment, returned_sale=None):
returned = 'N'
returned_value = 0
# If the sale was returned, all payments were given back to the client
if sale.return_date:
# Sales returned right after being emitted in the ECF dont have an
# invoice number, and they should not get this far.
invoice_number = returned_sale.invoice_number
assert invoice_number is None, invoice_number
returned = 'S'
returned_value = payment.value
self.add(CATRegisterE21(
serial_number=self.printer.device_serial,
additional_mf='',
ecf_model=self.model,
user_number=self.printer.user_number,
coo=fiscal_data.coo,
document_counter=fiscal_data.document_counter,
gnf=0,
payment_method=payment.method.get_description(),
value=payment.value * 100,
returned=returned, # S/N/P
returned_value=returned_value * 100, # Only if P
))
# E16
def add_other_document(self, document):
self.add(CATRegisterE16(serial_number=self.printer.device_serial,
additional_mf='',
ecf_model=self.model,
user_number=self.printer.user_number,
coo=document.coo,
gnf=document.gnf,
grg=0,
cdc=0,
crz=document.crz or 0,
denomination=DOCUMENT_TYPES[document.type],
emission_date=document.emission_date.date(),
emission_hour=document.emission_date.time()))
def write(self, filename=None, fp=None):
"""Writes out of the content of the file to a filename or fp
@param filename: filename
@param fp: file object, anything implementing write(data)
"""
if filename is None and fp is None:
raise TypeError
if filename is not None and fp is not None:
raise TypeError
if fp is None:
fp = open(filename, 'wb')
self._registers.sort(key=operator.attrgetter('register_type'))
data = ''
for register in self._registers:
data += register.get_string()
md5sum = md5(data).hexdigest()
ead = "EAD%s\r\n" % md5sum
fp.write(data.encode('latin1'))
fp.write(ead.encode('latin1'))
fp.close()
class CATRegister(object):
register_type = None
register_fields = None
def __init__(self, *args, **kwargs):
if not self.register_fields:
raise TypeError
if not self.register_type:
raise TypeError
if len(kwargs) != len(self.register_fields):
raise TypeError('%s expected %d parameters but got %d' % (
self.__class__.__name__, len(self.register_fields),
len(kwargs)))
sent_args = dict([(field[0], field[1:2])
for field in zip(self.register_fields,
kwargs.values())])
for key in kwargs:
if key in sent_args:
raise CATError("%s specified two times" % (key, ))
self._values = {}
for (name, length, argtype) in self.register_fields:
if kwargs[name] == "":
pass
elif not isinstance(kwargs[name], argtype):
raise TypeError("argument %s should be of type %s but got %s" % (
name, _argtype_name(argtype), type(kwargs[name]).__name__))
self._values[name] = self._arg_to_string(kwargs[name], length,
argtype)
setattr(self, name, kwargs[name])
#
# Public API
#
def get_string(self):
"""
@returns:
"""
values = []
for (name, a, b) in self.register_fields:
values.append(self._values[name])
return '%s%s\r\n' % (self.register_type,
''.join(values))
#
# Private
#
def _arg_to_string(self, value, length, argtype):
if value == "":
return ' ' * length
if argtype == number:
# If a value is higher the the maximum allowed,
# set it to the maximum allowed value instead.
max_value = (10 ** length) - 1
if value > max_value:
value = max_value
str_value = str(value)
# Return to int again, so in the formatting we add the correct
# numbers of zeros.
re_value = int(str_value.replace('.', ''))
arg = '%0*d' % (length, re_value)
elif argtype == basestring:
# Accept normal strings, which are assumed to be UTF-8
if type(value) == str:
value = unicode(value, 'utf-8')
# Convert to latscii
value = value.encode('ascii', 'replacelatscii')
arg = '%-*s' % (length, value)
# Chop strings which are too long
if len(arg) > length:
arg = arg[:length]
elif argtype == datetime.date:
# YYYYMMDD
arg = value.strftime("%Y%m%d")
elif argtype == datetime.time:
# HHMM
arg = value.strftime("%H%M%S")
elif argtype == bool:
arg = 'N'
if value:
arg = 'S'
else:
raise TypeError
assert len(arg) <= length
return arg
class CATRegisterE00(CATRegister):
"""Register E00 - Software House Identification
"""
register_type = "E00"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('user_number', 2, number),
('ecf_type', 7, basestring),
('ecf_brand', 20, basestring),
('ecf_model', 20, basestring),
('coo', 6, number),
('software_number', 2, number),
('cnpj', 14, number),
('ie', 14, number),
('im', 14, number),
('soft_house', 40, basestring),
('soft_name', 40, basestring),
('soft_version', 10, basestring),
('line01', 42, basestring),
('line02', 42, basestring),
]
class CATRegisterE01(CATRegister):
"""Register E01 - ECF Identification
"""
register_type = "E01"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_type', 7, basestring),
('ecf_brand', 20, basestring),
('ecf_model', 20, basestring),
('ecf_sb_version', 10, basestring),
('ecf_sb_date', 8, datetime.date),
('ecf_sb_hour', 6, datetime.time),
('ecf_number', 3, number),
('user_cnpj', 14, number),
('command', 3, basestring),
('initial_crz', 6, number),
('final_crz', 6, number),
('initial_date', 8, datetime.date),
('final_date', 8, datetime.date),
('library_version', 8, basestring),
('cotepe', 15, basestring),
]
class CATRegisterE02(CATRegister):
"""Register E02 - ECF User Identification
"""
register_type = "E02"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_cnpj', 14, number),
# XXX: This should be number, but acording to cat52, it is
# string ?!?
('user_ie', 14, number),
('user_name', 40, basestring),
('user_address', 120, basestring),
('register_date', 8, datetime.date),
('register_hour', 6, datetime.time),
('cro', 6, number),
('total', 18, number),
('user_number', 2, number),
]
class CATRegisterE03(CATRegister):
"""Register E03 - Service Provider Identification
"""
register_type = "E03"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('provider_number', 2, number),
('register_date', 8, datetime.date),
('register_hour', 6, datetime.time),
('provider_cnpj', 14, number),
('provider_ie', 14, basestring),
('total', 18, number)
]
class CATRegisterE04(CATRegister):
"""Register E04 - Previous Users list
"""
register_type = "E04"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('register_date', 8, datetime.date),
('register_hour', 6, datetime.time),
('user_cnpj', 14, number),
('user_ie', 14, basestring),
('cro', 6, number),
('gt', 18, number)
]
class CATRegisterE05(CATRegister):
"""Register E05 - GT Encodings
"""
register_type = "E05"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_cnpj', 14, number),
('register_date', 8, datetime.date),
('register_hour', 6, datetime.time),
('c0', 1, basestring),
('c1', 1, basestring),
('c2', 1, basestring),
('c3', 1, basestring),
('c4', 1, basestring),
('c5', 1, basestring),
('c6', 1, basestring),
('c7', 1, basestring),
('c8', 1, basestring),
('c9', 1, basestring),
]
class CATRegisterE06(CATRegister):
"""Register E06 - Currency Symbols list
"""
register_type = "E06"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_cnpj', 14, number),
('register_date', 8, datetime.date),
('register_hour', 6, datetime.time),
('currency_symbol', 4, basestring)
]
class CATRegisterE07(CATRegister):
"""Register E07 - Changes in ECF software version.
"""
register_type = "E07"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('software_version', 10, basestring),
('register_date', 8, datetime.date)
]
class CATRegisterE08(CATRegister):
"""Register E08 - MFD devices list
"""
register_type = "E08"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_cnpj', 14, number),
('mfd_number', 20, basestring)
]
class CATRegisterE09(CATRegister):
"""Register E09 - Technical Intervention list
"""
register_type = "E09"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('cro', 6, number),
('register_date', 8, datetime.date),
('register_hour', 6, datetime.time),
('lost_mt_data', 1, bool)
]
class CATRegisterE10(CATRegister):
# RFD - Registro de fita-detalhe
"""Register E10 - Issued RFD list
"""
register_type = "E10"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('cfd', 6, number),
('emission_date', 8, datetime.date),
('initial_coo', 6, number),
('final_coo', 6, number),
('user_cnpj', 14, number)
]
class CATRegisterE11(CATRegister):
"""Register E11 - Current position of counters and totalizers
"""
register_type = "E11"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('crz', 6, number),
('cro', 6, number),
('coo', 6, number),
('gnf', 6, number),
('ccf', 6, number),
('cvc', 6, number),
('cbp', 6, number),
('crg', 6, number),
('cmv', 6, number),
('cfd', 6, number),
('gt', 18, number),
# Date that file was generated
('capture_date', 8, datetime.date),
# Hour that file was generated
('capture_hour', 6, datetime.time)
]
class CATRegisterE12(CATRegister):
"""Register E12 - Z reduction list
"""
register_type = "E12"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('crz', 6, number),
('coo', 6, number),
('cro', 6, number),
('moviment_date', 8, datetime.date),
('reduction_date', 8, datetime.date),
('reduction_time', 6, datetime.time),
('total', 14, number),
('issqn_discount', 1, bool), # False, right now
]
class CATRegisterE13(CATRegister):
"""Register E13 - Z reduction details list
"""
register_type = "E13"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('crz', 6, number),
('partial_totalizer', 7, basestring), # See table 6.5.1.2
('value', 13, number), # currency
]
class CATRegisterE14(CATRegister):
"""Register E14 - Fiscal Coupon/Fiscal Invoice list
"""
register_type = "E14"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('document_counter', 6, number), # See documentation
('coo', 6, number),
('emission_start', 8, datetime.date),
('subtotal', 14, number), # currency
('discount', 13, number), # $ or %
('discount_type', 1, basestring), # V = $ / P = %
('surcharge', 13, number),
('surcharge_type', 1, basestring), # V = $ / P = %
# Total after discount and surcharge
('total', 14, number),
# If the document was canceled
('canceled', 1, bool),
# surcharge canceled value
('surcharge_canceled', 13, number),
# The order that the discount
# and surchage were applied:
# 'D' if discount came first or 'A'
# otherwise
('discount_surcharge_order', 1, basestring),
('client_name', 40, basestring),
# cpf or cnpj, depending on the client
('client_cpf_cnpj', 14, number),
]
class CATRegisterE15(CATRegister):
"""Register E15 - Fiscal Coupon/Fiscal Invoice details list
"""
register_type = "E15"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('coo', 6, number),
('document_counter', 6, number), # See documentation
('item_number', 3, number),
('item_code', 14, basestring),
('item_description', 100, basestring),
('item_amount', 7, number),
('item_unit', 3, basestring),
('item_unitary_value', 8, number),
('item_discount', 8, number),
('item_surcharge', 8, number),
('item_total', 14, number),
# See table 6.5.1.2. !!!
('item_partial_totalizer', 7, basestring),
# S - yes / N - no / P - partial
('item_canceled', 1, basestring),
# Only if partial cancelment
('item_canceled_amount', 7, number),
# idem
('item_canceled_value', 13, number),
# currency
('item_canceled_surcharge', 13, number),
# A - round / T - trunc
('round_or_trunc', 1, basestring),
# Number of decimal precision for the amount
('amount_decimal_precision', 1, number),
# Number of decimal precision for the unitary
# value
('value_decimal_precision', 1, number),
]
class CATRegisterE16(CATRegister):
"""Register E16 - Other documents
"""
register_type = "E16"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('coo', 6, number),
('gnf', 6, number), # See documentation
('grg', 6, number),
('cdc', 4, number),
('crz', 6, number),
('denomination', 2, basestring), # See table
('emission_date', 8, datetime.date),
('emission_hour', 6, datetime.time),
]
class CATRegisterE17(CATRegister):
"""Register E17 - Z reduction detail - non-fiscal totalizers
"""
register_type = "E17"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('crz', 6, number),
('non_fiscal_totalizer', 15, basestring),
('value', 13, number),
]
class CATRegisterE18(CATRegister):
"""Register E18 - Z reduction detail - payment methods and change.
"""
register_type = "E18"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('crz', 6, number),
('description', 15, basestring),
('value', 13, number)
]
class CATRegisterE19(CATRegister):
"""Register E19 - Non-fiscal coupon
"""
register_type = "E19"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('coo', 6, number),
('gnf', 6, number),
('emission_start', 8, datetime.date),
('subtotal', 14, number),
('subtotal_discount', 13, number),
('discount_type', 1, basestring),
('subtotal_surcharge', 13, number),
('surcharge_type', 1, basestring),
('total', 14, number),
('canceled', 1, bool),
('surcharge_canceled', 13, number),
('discount_surcharge_order', 1, basestring),
('client_name', 40, basestring),
('client_cpf_cnpj', 14, number)
]
class CATRegisterE20(CATRegister):
"""Register E20 - Non-fiscal document detail
"""
register_type = "E20"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('coo', 6, number),
('gnf', 6, number),
('item_number', 3, number),
('denomination', 15, basestring),
('value', 13, number),
('item_discount', 13, number),
('item_surcharge', 13, number),
('total', 13, number),
('canceled', 1, basestring),
('item_canceled_surcharge', 13, number)
]
class CATRegisterE21(CATRegister):
"""Register E21 - Fiscal Coupon and Non-fiscal document - payment methods
"""
register_type = "E21"
register_fields = [
('serial_number', 20, basestring),
('additional_mf', 1, basestring),
('ecf_model', 20, basestring),
('user_number', 2, number),
('coo', 6, number),
# ccf
('document_counter', 6, number),
# See documentation - not appliable to stoq right now.
('gnf', 6, number),
('payment_method', 15, basestring),
('value', 13, number),
# S/N/P
('returned', 1, basestring),
('returned_value', 13, number),
]
class CATRegisterEAD(CATRegister):
"""Register EAD - Digital Signature
"""
register_type = "EAD"
register_fields = [
('digital_signature', 256, basestring)
]
|
andrebellafronte/stoq
|
plugins/ecf/cat52.py
|
Python
|
gpl-2.0
| 32,816
|
[
"VisIt"
] |
7aed4140e5d02fb83ce2246b1ff5881165151161418c492e6cd5469a23e6812e
|
import subprocess
import os
import threading
import sys
from glutton.utils import get_log, tmpfile, openmp_num_threads, rm_f
from glutton.blast import Blast
from glutton.job import BlastJob
from glutton.queue import WorkQueue
class All_vs_all_search(object) :
def __init__(self, batch_size=100) :
self.nucleotide = False
self.min_hitidentity = None
self.min_hitlength = None
self.max_evalue = None
self.batch_size = batch_size
self.log = get_log()
self.cleanup_files = []
self.gene_assignments = {}
self.lock = threading.Lock()
self.q = None
self.total_jobs = 0
self.complete_jobs = 0
def _batch(self, x) :
tmp = []
for i in x :
tmp.append(i)
if len(tmp) == self.batch_size :
yield tmp
tmp = []
if not tmp :
raise StopIteration
yield tmp
def process(self, db, queries, nucleotide, min_hitidentity, min_hitlength, max_evalue) :
self.nucleotide = nucleotide
self.min_hitidentity = min_hitidentity
self.min_hitlength = min_hitlength
self.max_evalue = max_evalue
# we need to deal with the index files here because
# all of the blastx jobs need them
self.cleanup_files += [db + i for i in [".phr",".pin",".psq"]]
# creates db + {phr,pin,psq} in same dir as db
self.log.info("creating blast db...")
Blast.makedb(db) # XXX THIS IS ALWAYS PROTEIN, BECAUSE WE WANT TO RUN BLASTX
# queue up the jobs
self.log.info("starting local alignments...")
self.q = WorkQueue()
self.total_jobs = len(queries)
self.complete_jobs = -self.batch_size
self._progress()
for query in self._batch(queries) :
self.q.enqueue(BlastJob(self.job_callback, db, query, 'blastx'))
self.log.debug("waiting for job queue to drain...")
self.q.join()
rm_f(self.cleanup_files)
return self.gene_assignments
def stop(self) :
if self.q :
self.q.stop()
rm_f(self.cleanup_files)
def get_intermediate_results(self) :
return self.gene_assignments
def _progress(self) :
self.complete_jobs += self.batch_size
if self.complete_jobs > self.total_jobs :
self.complete_jobs = self.total_jobs
sys.stderr.write("\rProgress: %d / %d blastx alignments " % (self.complete_jobs, self.total_jobs))
if self.complete_jobs == self.total_jobs :
sys.stderr.write("\n")
sys.stderr.flush()
def job_callback(self, job) :
self.log.debug("%d blast results returned" % len(job.results))
self.lock.acquire()
self._progress()
if job.success() :
qlen = dict([ (q.id, len(q)) for q in job.input ])
for br in job.results :
#length = max(br.qstart, br.qend) - min(br.qstart, br.qend)
strand = '+' if br.qstart < br.qend else '-'
if (br.qseqid in self.gene_assignments) or \
(self.max_evalue < br.evalue) or \
(self.min_hitidentity > br.pident) or \
(self.min_hitlength > br.length) :
continue
self.gene_assignments[br.qseqid] = (br.sseqid, strand)
for q in job.input :
if q.id not in self.gene_assignments :
self.gene_assignments[q.id] = None
self.lock.release()
|
ajm/glutton
|
glutton/localsearch.py
|
Python
|
gpl-3.0
| 3,613
|
[
"BLAST"
] |
00aec41fbe0db40aa6086c88b9c45d7962c79d32723778a3442b01190c0d6c65
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import shutil
import numpy as np
import pytest
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter, XTCReporter
from mdtraj.testing import eq
try:
from simtk.unit import nanometers, kelvin, picoseconds, femtoseconds
from simtk.openmm import LangevinIntegrator, Platform
from simtk.openmm.app import PDBFile, ForceField, Simulation, CutoffNonPeriodic, CutoffPeriodic, HBonds, CheckpointReporter
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_reporter.py needs OpenMM.')
def test_reporter(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
xtcfile = os.path.join(tmpdir, 'traj.xtc')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
reporter4 = XTCReporter(xtcfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.reporters.append(reporter4)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
reporter4.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths.shape, (50, 3))
eq(got.cell_angles.shape, (50, 3))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths.shape, (50, 3))
eq(cell_angles.shape, (50, 3))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
xtc_traj = md.load(xtcfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(xtc_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(xtc_traj.xyz, dcd_traj.xyz, decimal=3)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_reporter_subset(tmpdir, get_fn):
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
xtcfile = os.path.join(tmpdir, 'traj.xtc')
atomSubset = [0, 1, 2, 4, 5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
reporter4 = XTCReporter(xtcfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.reporters.append(reporter4)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
reporter4.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90 * np.ones((50, 3)))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90 * np.ones((50, 3)))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
xtc_traj = md.load(xtcfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(xtc_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(xtc_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_xtc_reporter_append(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
xtcfile = os.path.join(tmpdir, 'traj.xtc')
xtcfile_cp = os.path.join(tmpdir, 'traj_cp.xtc')
checkpoint = os.path.join(tmpdir, 'checkpoint.chk')
reporter = XTCReporter(xtcfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(CheckpointReporter(checkpoint, 10))
simulation.step(10)
reporter.close()
shutil.copyfile(xtcfile, xtcfile_cp)
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.loadCheckpoint(checkpoint)
reporter = XTCReporter(xtcfile, 2, append=True)
simulation.reporters.append(reporter)
simulation.step(10)
reporter.close()
xtc_traj = md.load(xtcfile, top=get_fn('native.pdb'))
xtc_traj_cp = md.load(xtcfile_cp, top=get_fn('native.pdb'))
eq(xtc_traj.xyz[:5], xtc_traj_cp.xyz)
eq(xtc_traj.n_frames, 10)
eq(xtc_traj_cp.n_frames, 5)
eq(xtc_traj.time[:5], xtc_traj_cp.time)
|
dwhswenson/mdtraj
|
tests/test_reporter.py
|
Python
|
lgpl-2.1
| 10,148
|
[
"MDTraj",
"OpenMM"
] |
8dd56a4aa407f9bc48782c944a8b52129ee62115ef21866e6e8b5cbaf1af15c2
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, pprint, time, uuid, re
from cookielib import Cookie
from threading import current_thread
from PyQt4.Qt import (QObject, QNetworkAccessManager, QNetworkDiskCache,
QNetworkProxy, QNetworkProxyFactory, QEventLoop, QUrl, pyqtSignal,
QDialog, QVBoxLayout, QSize, QNetworkCookieJar, Qt, pyqtSlot, QPixmap)
from PyQt4.QtWebKit import QWebPage, QWebSettings, QWebView, QWebElement
from calibre import USER_AGENT, prints, get_proxies, get_proxy_info, prepare_string_for_xml
from calibre.constants import ispy3, cache_dir
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.logging import ThreadSafeLog
from calibre.gui2 import must_use_qt
from calibre.web.jsbrowser.forms import FormsMixin, default_timeout
class Timeout(Exception):
pass
class LoadError(Exception):
pass
class ElementNotFound(ValueError):
pass
class NotAFile(ValueError):
pass
class WebPage(QWebPage): # {{{
def __init__(self, log,
confirm_callback=None,
prompt_callback=None,
user_agent=USER_AGENT,
enable_developer_tools=False,
parent=None):
QWebPage.__init__(self, parent)
self.log = log
self.user_agent = user_agent if user_agent else USER_AGENT
self.confirm_callback = confirm_callback
self.prompt_callback = prompt_callback
self.setForwardUnsupportedContent(True)
self.unsupportedContent.connect(self.on_unsupported_content)
settings = self.settings()
if enable_developer_tools:
settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
QWebSettings.enablePersistentStorage(os.path.join(cache_dir(),
'webkit-persistence'))
QWebSettings.setMaximumPagesInCache(0)
self.bridge_name = 'b' + uuid.uuid4().get_hex()
self.mainFrame().javaScriptWindowObjectCleared.connect(
self.add_window_objects)
self.dom_loaded = False
def add_window_objects(self):
self.dom_loaded = False
mf = self.mainFrame()
mf.addToJavaScriptWindowObject(self.bridge_name, self)
mf.evaluateJavaScript('document.addEventListener( "DOMContentLoaded", %s.content_loaded, false )' % self.bridge_name)
def load_url(self, url):
self.dom_loaded = False
url = QUrl(url)
self.mainFrame().load(url)
self.ready_state # Without this, DOMContentLoaded does not fire for file:// URLs
@pyqtSlot()
def content_loaded(self):
self.dom_loaded = True
def userAgentForUrl(self, url):
return self.user_agent
def javaScriptAlert(self, frame, msg):
if self.view() is not None:
return QWebPage.javaScriptAlert(self, frame, msg)
prints('JSBrowser alert():', unicode(msg))
def javaScriptConfirm(self, frame, msg):
if self.view() is not None:
return QWebPage.javaScriptConfirm(self, frame, msg)
if self.confirm_callback is not None:
return self.confirm_callback(unicode(msg))
return True
def javaScriptConsoleMessage(self, msg, lineno, source_id):
prints('JSBrowser msg():%s:%s:'%(unicode(source_id), lineno), unicode(msg))
def javaScriptPrompt(self, frame, msg, default_value, *args):
if self.view() is not None:
return QWebPage.javaScriptPrompt(self, frame, msg, default_value,
*args)
if self.prompt_callback is None:
return (False, default_value) if ispy3 else False
value = self.prompt_callback(unicode(msg), unicode(default_value))
ok = value is not None
if ispy3:
return ok, value
if ok:
result = args[0]
result.clear()
result.append(value)
return ok
@pyqtSlot(result=bool)
def shouldInterruptJavaScript(self):
if self.view() is not None:
return QWebPage.shouldInterruptJavaScript(self)
return True
def on_unsupported_content(self, reply):
reply.abort()
self.log.warn('Unsupported content, ignoring: %s'%reply.url())
@property
def ready_state(self):
return unicode(self.mainFrame().evaluateJavaScript('document.readyState').toString())
@pyqtSlot(QPixmap)
def transfer_image(self, img):
self.saved_img = img
def get_image(self, qwe_or_selector):
qwe = qwe_or_selector
if not isinstance(qwe, QWebElement):
qwe = self.mainFrame().findFirstElement(qwe)
if qwe.isNull():
raise ValueError('Failed to find element with selector: %r'
% qwe_or_selector)
self.saved_img = QPixmap()
qwe.evaluateJavaScript('%s.transfer_image(this)' % self.bridge_name)
try:
return self.saved_img
finally:
del self.saved_img
# }}}
class ProxyFactory(QNetworkProxyFactory): # {{{
def __init__(self, log):
QNetworkProxyFactory.__init__(self)
proxies = get_proxies()
self.proxies = {}
for scheme, proxy_string in proxies.iteritems():
scheme = scheme.lower()
info = get_proxy_info(scheme, proxy_string)
if info is None:
continue
hn, port = info['hostname'], info['port']
if not hn or not port:
continue
log.debug('JSBrowser using proxy:', pprint.pformat(info))
pt = {'socks5':QNetworkProxy.Socks5Proxy}.get(scheme,
QNetworkProxy.HttpProxy)
proxy = QNetworkProxy(pt, hn, port)
un, pw = info['username'], info['password']
if un:
proxy.setUser(un)
if pw:
proxy.setPassword(pw)
self.proxies[scheme] = proxy
self.default_proxy = QNetworkProxy(QNetworkProxy.DefaultProxy)
def queryProxy(self, query):
scheme = unicode(query.protocolTag()).lower()
return [self.proxies.get(scheme, self.default_proxy)]
# }}}
class NetworkAccessManager(QNetworkAccessManager): # {{{
OPERATION_NAMES = {getattr(QNetworkAccessManager, '%sOperation'%x) :
x.upper() for x in ('Head', 'Get', 'Put', 'Post', 'Delete',
'Custom')
}
report_reply_signal = pyqtSignal(object)
def __init__(self, log, disk_cache_size=50, parent=None):
QNetworkAccessManager.__init__(self, parent)
self.reply_count = 0
self.log = log
if disk_cache_size > 0:
self.cache = QNetworkDiskCache(self)
self.cache.setCacheDirectory(PersistentTemporaryDirectory(prefix='disk_cache_'))
self.cache.setMaximumCacheSize(int(disk_cache_size * 1024 * 1024))
self.setCache(self.cache)
self.sslErrors.connect(self.on_ssl_errors)
self.pf = ProxyFactory(log)
self.setProxyFactory(self.pf)
self.finished.connect(self.on_finished)
self.cookie_jar = QNetworkCookieJar()
self.setCookieJar(self.cookie_jar)
self.main_thread = current_thread()
self.report_reply_signal.connect(self.report_reply, type=Qt.QueuedConnection)
def on_ssl_errors(self, reply, errors):
reply.ignoreSslErrors()
def createRequest(self, operation, request, data):
url = unicode(request.url().toString())
operation_name = self.OPERATION_NAMES[operation]
debug = []
debug.append(('Request: %s %s' % (operation_name, url)))
for h in request.rawHeaderList():
try:
d = ' %s: %s' % (h, request.rawHeader(h))
except:
d = ' %r: %r' % (h, request.rawHeader(h))
debug.append(d)
if data is not None:
raw = data.peek(1024)
try:
raw = raw.decode('utf-8')
except:
raw = repr(raw)
debug.append(' Request data: %s'%raw)
self.log.debug('\n'.join(debug))
return QNetworkAccessManager.createRequest(self, operation, request,
data)
def on_finished(self, reply):
if current_thread() is not self.main_thread:
# This method was called in a thread created by Qt. The python
# interpreter may not be in a safe state, so dont do anything
# more. This signal is queued which means the reply wont be
# reported unless someone spins the event loop. So far, I have only
# seen this happen when doing Ctrl+C in the console.
self.report_reply_signal.emit(reply)
else:
self.report_reply(reply)
def report_reply(self, reply):
reply_url = unicode(reply.url().toString())
self.reply_count += 1
err = reply.error()
if err:
l = self.log.debug if err == reply.OperationCanceledError else self.log.warn
l("Reply error: %s - %d (%s)" % (reply_url, err, unicode(reply.errorString())))
else:
debug = []
debug.append("Reply successful: %s" % reply_url)
for h in reply.rawHeaderList():
try:
d = ' %s: %s' % (h, reply.rawHeader(h))
except:
d = ' %r: %r' % (h, reply.rawHeader(h))
debug.append(d)
self.log.debug('\n'.join(debug))
def py_cookies(self):
for c in self.cookie_jar.allCookies():
name, value = map(bytes, (c.name(), c.value()))
domain = bytes(c.domain())
initial_dot = domain_specified = domain.startswith(b'.')
secure = bool(c.isSecure())
path = unicode(c.path()).strip().encode('utf-8')
expires = c.expirationDate()
is_session_cookie = False
if expires.isValid():
expires = expires.toTime_t()
else:
expires = None
is_session_cookie = True
path_specified = True
if not path:
path = b'/'
path_specified = False
c = Cookie(0, # version
name, value,
None, # port
False, # port specified
domain, domain_specified, initial_dot, path,
path_specified,
secure, expires, is_session_cookie,
None, # Comment
None, # Comment URL
{} # rest
)
yield c
# }}}
class LoadWatcher(QObject): # {{{
def __init__(self, page, parent=None):
QObject.__init__(self, parent)
self.is_loading = True
self.loaded_ok = None
page.loadFinished.connect(self)
self.page = page
def __call__(self, ok):
self.loaded_ok = ok
self.is_loading = False
self.page.loadFinished.disconnect(self)
self.page = None
# }}}
class BrowserView(QDialog): # {{{
def __init__(self, page, parent=None):
QDialog.__init__(self, parent)
self.l = l = QVBoxLayout(self)
self.setLayout(l)
self.webview = QWebView(self)
l.addWidget(self.webview)
self.resize(QSize(1024, 768))
self.webview.setPage(page)
# }}}
class Browser(QObject, FormsMixin):
'''
Browser (WebKit with no GUI).
This browser is NOT thread safe. Use it in a single thread only! If you
need to run downloads in parallel threads, use multiple browsers (after
copying the cookies).
'''
def __init__(self,
# Logging. If None, uses a default log, which does not output
# debugging info
log=None,
# Receives a string and returns True/False. By default, returns
# True for all strings
confirm_callback=None,
# Prompt callback. Receives a msg string and a default value
# string. Should return the user input value or None if the user
# canceled the prompt. By default returns None.
prompt_callback=None,
# User agent to be used
user_agent=USER_AGENT,
# The size (in MB) of the on disk cache. Note that because the disk
# cache cannot be shared between different instances, we currently
# use a temporary dir for the cache, which is deleted on
# program exit. Set to zero to disable cache.
disk_cache_size=50,
# Enable Inspect element functionality
enable_developer_tools=False,
# Verbosity
verbosity=0,
# The default timeout (in seconds)
default_timeout=30
):
must_use_qt()
QObject.__init__(self)
FormsMixin.__init__(self)
if log is None:
log = ThreadSafeLog()
if verbosity:
log.filter_level = log.DEBUG
self.log = log
self.default_timeout = default_timeout
self.page = WebPage(log, confirm_callback=confirm_callback,
prompt_callback=prompt_callback, user_agent=user_agent,
enable_developer_tools=enable_developer_tools,
parent=self)
self.nam = NetworkAccessManager(log, disk_cache_size=disk_cache_size, parent=self)
self.page.setNetworkAccessManager(self.nam)
@property
def user_agent(self):
return self.page.user_agent
def _wait_for_load(self, timeout, url=None):
timeout = self.default_timeout if timeout is default_timeout else timeout
loop = QEventLoop(self)
start_time = time.time()
end_time = start_time + timeout
lw = LoadWatcher(self.page, parent=self)
while lw.is_loading and end_time > time.time():
if not loop.processEvents():
time.sleep(0.01)
if lw.is_loading:
raise Timeout('Loading of %r took longer than %d seconds'%(
url, timeout))
return lw.loaded_ok
def _wait_for_replies(self, reply_count, timeout):
final_time = time.time() + (self.default_timeout if timeout is default_timeout else timeout)
loop = QEventLoop(self)
while (time.time() < final_time and self.nam.reply_count <
reply_count):
loop.processEvents()
time.sleep(0.1)
if self.nam.reply_count < reply_count:
raise Timeout('Waiting for replies took longer than %d seconds' %
timeout)
def run_for_a_time(self, timeout):
final_time = time.time() + timeout
loop = QEventLoop(self)
while (time.time() < final_time):
if not loop.processEvents():
time.sleep(0.1)
def wait_for_element(self, selector, timeout=default_timeout):
timeout = self.default_timeout if timeout is default_timeout else timeout
start_time = time.time()
while self.css_select(selector) is None:
self.run_for_a_time(0.1)
if time.time() - start_time > timeout:
raise Timeout('DOM failed to load in %.1g seconds' % timeout)
return self.css_select(selector)
def visit(self, url, timeout=default_timeout):
'''
Open the page specified in URL and wait for it to complete loading.
Note that when this method returns, there may still be javascript
that needs to execute (this method returns when the loadFinished()
signal is called on QWebPage). This method will raise a Timeout
exception if loading takes more than timeout seconds.
Returns True if loading was successful, False otherwise.
'''
self.current_form = None
self.page.load_url(url)
return self._wait_for_load(timeout, url)
def back(self, wait_for_load=True, timeout=default_timeout):
'''
Like clicking the back button in the browser. Waits for loading to complete.
This method will raise a Timeout exception if loading takes more than timeout seconds.
Returns True if loading was successful, False otherwise.
'''
self.page.triggerAction(self.page.Back)
if wait_for_load:
return self._wait_for_load(timeout)
def stop(self):
'Stop loading of current page'
self.page.triggerAction(self.page.Stop)
def stop_scheduled_refresh(self):
'Stop any scheduled page refresh/reloads'
self.page.triggerAction(self.page.StopScheduledPageRefresh)
def reload(self, bypass_cache=False):
action = self.page.ReloadAndBypassCache if bypass_cache else self.page.Reload
self.page.triggerAction(action)
@property
def dom_ready(self):
return self.page.dom_loaded
def wait_till_dom_ready(self, timeout=default_timeout, url=None):
timeout = self.default_timeout if timeout is default_timeout else timeout
start_time = time.time()
while not self.dom_ready:
if time.time() - start_time > timeout:
raise Timeout('Loading of %r took longer than %d seconds'%(
url, timeout))
self.run_for_a_time(0.1)
def start_load(self, url, timeout=default_timeout, selector=None):
'''
Start the loading of the page at url and return once the DOM is ready,
sub-resources such as scripts/stylesheets/images/etc. may not have all
loaded.
'''
self.current_form = None
self.page.load_url(url)
if selector is not None:
self.wait_for_element(selector, timeout=timeout, url=url)
else:
self.wait_till_dom_ready(timeout=timeout, url=url)
def click(self, qwe_or_selector, wait_for_load=True, ajax_replies=0, timeout=default_timeout):
'''
Click the :class:`QWebElement` pointed to by qwe_or_selector.
:param wait_for_load: If you know that the click is going to cause a
new page to be loaded, set this to True to have
the method block until the new page is loaded
:para ajax_replies: Number of replies to wait for after clicking a link
that triggers some AJAX interaction
'''
initial_count = self.nam.reply_count
qwe = qwe_or_selector
if not isinstance(qwe, QWebElement):
qwe = self.css_select(qwe)
if qwe is None:
raise ElementNotFound('Failed to find element with selector: %r'
% qwe_or_selector)
js = '''
var e = document.createEvent('MouseEvents');
e.initEvent( 'click', true, true );
this.dispatchEvent(e);
'''
qwe.evaluateJavaScript(js)
if ajax_replies > 0:
reply_count = initial_count + ajax_replies
self._wait_for_replies(reply_count, timeout)
elif wait_for_load and not self._wait_for_load(timeout):
raise LoadError('Clicking resulted in a failed load')
def click_text_link(self, text_or_regex, selector='a[href]',
wait_for_load=True, ajax_replies=0, timeout=default_timeout):
target = None
for qwe in self.page.mainFrame().findAllElements(selector):
src = unicode(qwe.toPlainText())
if hasattr(text_or_regex, 'match') and text_or_regex.search(src):
target = qwe
break
if src.lower() == text_or_regex.lower():
target = qwe
break
if target is None:
raise ElementNotFound('No element matching %r with text %s found'%(
selector, text_or_regex))
return self.click(target, wait_for_load=wait_for_load,
ajax_replies=ajax_replies, timeout=timeout)
def css_select(self, selector, all=False):
if all:
return tuple(self.page.mainFrame().findAllElements(selector).toList())
ans = self.page.mainFrame().findFirstElement(selector)
if ans.isNull():
ans = None
return ans
def get_image(self, qwe_or_selector):
'''
Return the image identified by qwe_or_selector as a QPixmap. If no such
image exists, the returned pixmap will be null.
'''
return self.page.get_image(qwe_or_selector)
def get_cached(self, url):
iod = self.nam.cache.data(QUrl(url))
if iod is not None:
try:
return bytes(bytearray(iod.readAll()))
finally:
# Ensure the IODevice is closed right away, so that the
# underlying file can be deleted if the space is needed,
# otherwise on windows the file stays locked
iod.close()
del iod
def wait_for_resources(self, urls, timeout=default_timeout):
timeout = self.default_timeout if timeout is default_timeout else timeout
start_time = time.time()
ans = {}
urls = set(urls)
def get_resources():
for url in tuple(urls):
raw = self.get_cached(url)
if raw is not None:
ans[url] = raw
urls.discard(url)
while urls and time.time() - start_time < timeout and not self.load_completed:
get_resources()
if urls:
self.run_for_a_time(0.1)
if urls:
get_resources()
return ans
@property
def load_completed(self):
return self.page.ready_state in {'complete', 'completed'}
def get_resource(self, url, rtype='img', use_cache=True, timeout=default_timeout):
'''
Download a resource (image/stylesheet/script). The resource is
downloaded by visiting an simple HTML page that contains only that
resource. The resource is then returned from the cache (therefore, to
use this method you must not disable the cache). If use_cache is True
then the cache is queried before loading the resource. This can result
in a stale object if the resource has changed on the server, however,
it is a big performance boost in the common case, by avoiding a
roundtrip to the server. The resource is returned as a bytestring or None
if it could not be loaded.
'''
if not hasattr(self.nam, 'cache'):
raise RuntimeError('Cannot get resources when the cache is disabled')
if use_cache:
ans = self.get_cached(url)
if ans is not None:
return ans
try:
tag = {
'img': '<img src="%s">',
'link': '<link href="%s"></link>',
'script': '<script src="%s"></script>',
}[rtype] % prepare_string_for_xml(url, attribute=True)
except KeyError:
raise ValueError('Unknown resource type: %s' % rtype)
self.page.mainFrame().setHtml(
'''<!DOCTYPE html><html><body><div>{0}</div></body></html>'''.format(tag))
self._wait_for_load(timeout)
ans = self.get_cached(url)
if ans is not None:
return ans
def download_file(self, url_or_selector_or_qwe, timeout=60):
'''
Download unsupported content: i.e. files the browser cannot handle
itself or files marked for saving as files by the website. Useful if
you want to download something like an epub file after authentication.
You can pass in either the url to the file to be downloaded, or a
selector that points to an element to be clicked on the current page
which will cause the file to be downloaded.
'''
ans = [False, None, []]
loop = QEventLoop(self)
start_time = time.time()
end_time = start_time + timeout
self.page.unsupportedContent.disconnect(self.page.on_unsupported_content)
try:
def download(reply):
if ans[0]:
reply.abort() # We only handle the first unsupported download
return
ans[0] = True
while not reply.isFinished() and end_time > time.time():
if not loop.processEvents():
time.sleep(0.01)
raw = bytes(bytearray(reply.readAll()))
if raw:
ans[-1].append(raw)
if not reply.isFinished():
ans[1] = Timeout('Loading of %r took longer than %d seconds'%(url_or_selector_or_qwe, timeout))
ans[-1].append(bytes(bytearray(reply.readAll())))
self.page.unsupportedContent.connect(download)
if hasattr(url_or_selector_or_qwe, 'rstrip') and re.match('[a-z]+://', url_or_selector_or_qwe) is not None:
# We have a URL
self.page.mainFrame().load(QUrl(url_or_selector_or_qwe))
else:
self.click(url_or_selector_or_qwe, wait_for_load=False)
lw = LoadWatcher(self.page)
while not ans[0] and lw.is_loading and end_time > time.time():
if not loop.processEvents():
time.sleep(0.01)
if not ans[0]:
raise NotAFile('%r does not point to a downloadable file. You can only'
' use this method to download files that the browser cannot handle'
' natively. Or files that are marked with the '
' content-disposition: attachment header' % url_or_selector_or_qwe)
if ans[1] is not None:
raise ans[1]
return b''.join(ans[-1])
finally:
self.page.unsupportedContent.disconnect()
self.page.unsupportedContent.connect(self.page.on_unsupported_content)
def show_browser(self):
'''
Show the currently loaded web page in a window. Useful for debugging.
'''
view = BrowserView(self.page)
view.exec_()
@property
def cookies(self):
'''
Return all the cookies set currently as :class:`Cookie` objects.
Returns expired cookies as well.
'''
return list(self.nam.py_cookies())
@property
def html(self):
return unicode(self.page.mainFrame().toHtml())
def blank(self):
try:
self.visit('about:blank', timeout=0.01)
except Timeout:
pass
def close(self):
self.stop()
self.blank()
self.stop()
self.nam.setCache(QNetworkDiskCache())
self.nam.cache = None
self.nam = self.page = None
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
|
nozuono/calibre-webserver
|
src/calibre/web/jsbrowser/browser.py
|
Python
|
gpl-3.0
| 27,188
|
[
"VisIt"
] |
12a88fb12bcab16139106867466d2f59270624f6b11eb2ab42e69ded80e5a02d
|
#!/usr/bin/env python3
# Copyright (C) 2015-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# #
# ESPResSo++ Python script for tabulated GROMACS simulation #
# #
########################################################################
# This example reads in a gromacs water system (tabulated interactions) treated with reaction field and runs a path-integral (PI) simulation using the pathintegral.py script
# ! WARNINING ! this is still an experimental feature!!
#
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import logging
import copy
import math
from espressopp import Real3D, Int3D
from espressopp.tools import gromacs
from espressopp.tools import decomp
from espressopp.tools import timers
from espressopp.tools import pathintegral
def genTabPotentials(tabfilesnb):
potentials = {}
for fg in tabfilesnb:
fe = fg.split(".")[0]+".tab" # name of espressopp file
gromacs.convertTable(fg, fe, sigma, epsilon, c6, c12)
pot = espressopp.interaction.Tabulated(itype=3, filename=fe, cutoff=rc)
t1, t2 = fg[6], fg[8] # type 1, type 2
potentials.update({t1+"_"+t2: pot})
print("created", t1, t2, fe)
return potentials
# simulation parameters (nvt = False is nve)
steps = 100
check = steps/1
rc = 0.9 # Verlet list cutoff
skin = 0.02
timestep = 0.0002
# parameters to convert GROMACS tabulated potential file
sigma = 1.0
epsilon = 1.0
c6 = 1.0
c12 = 1.0
# GROMACS setup files
grofile = "conf.gro"
topfile = "topol.top"
# this calls the gromacs parser for processing the top file (and included files) and the conf file
# The variables at the beginning defaults, types, etc... can be found by calling
# gromacs.read(grofile,topfile) without return values. It then prints out the variables to be unpacked
defaults, types, atomtypes, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, resname, resid, Lx, Ly, Lz =gromacs.read(grofile,topfile)
######################################################################
## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ##
######################################################################
#types, bonds, angles, dihedrals, x, y, z, vx, vy, vz, Lx, Ly, Lz = gromacs.read(grofile,topfile)
#defaults, types, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, Lx, Ly, Lz= gromacs.read(grofile,topfile)
num_particles = len(x)
density = num_particles / (Lx * Ly * Lz)
size = (Lx, Ly, Lz)
sys.stdout.write('Setting up simulation ...\n')
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = decomp.nodeGrid(comm.size,size,rc,skin)
cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
# setting up GROMACS interaction stuff
# create a force capped Lennard-Jones interaction that uses a verlet list
verletlist = espressopp.VerletList(system, rc)
#interaction = espressopp.interaction.VerletListLennardJonesGromacs(verletlist)
# add particles to the system and then decompose
props = ['id', 'pos', 'v', 'type', 'mass', 'q']
allParticles = []
for pid in range(num_particles):
part = [pid + 1, Real3D(x[pid], y[pid], z[pid]),
Real3D(0, 0, 0), types[pid], masses[pid], charges[pid]]
allParticles.append(part)
system.storage.addParticles(allParticles, *props)
#system.storage.decompose()
# set up LJ interaction according to the parameters read from the .top file
#ljinteraction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist,rc)
########## tabulated nb interactions ############
tabfilesnb = ["table_O_O.xvg", "table_H_O.xvg", "table_H_H.xvg"]
potentials = genTabPotentials(tabfilesnb)
tabulatedinteraction = espressopp.interaction.VerletListTabulated(verletlist)
tabulatedinteraction.setPotential(0, 0, potentials["O_O"])
tabulatedinteraction.setPotential(0, 1, potentials["H_O"])
tabulatedinteraction.setPotential(1, 1, potentials["H_H"])
system.addInteraction(tabulatedinteraction)
# set up angle interactions according to the parameters read from the .top file
angleinteractions=gromacs.setAngleInteractions(system, angletypes, angletypeparams)
# set up bonded interactions according to the parameters read from the .top file
bondedinteractions=gromacs.setBondedInteractions(system, bondtypes, bondtypeparams)
# exlusions, i.e. pairs of atoms not considered for the non-bonded part. Those are defined either by bonds which automatically generate an exclusion. Or by the nregxcl variable
verletlist.exclude(exclusions)
# langevin thermostat
langevin = espressopp.integrator.LangevinThermostat(system)
langevin.gamma = 2.0
langevin.temperature = 2.4942 # kT in gromacs units
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.addExtension(langevin)
integrator.dt = timestep
# create path integral representation of the system with P beads
# the thermostat is set to an elevated temperature of T'=TP. Therefore
# the non-bonded energies do not need to be rescaled
pathintegral.createPathintegralSystem(allParticles, props, types, system, exclusions, integrator, langevin, rc, P=16, disableVVL=True)
system.storage.decompose()
num_particles = int(espressopp.analysis.NPart(system).compute())
# print simulation parameters
print('')
print('number of particles =', num_particles)
print('density = %.4f' % (density))
print('rc =', rc)
print('dt =', integrator.dt)
print('skin =', system.skin)
print('steps =', steps)
print('NodeGrid = %s' % (nodeGrid,))
print('CellGrid = %s' % (cellGrid,))
print('')
# analysis
configurations = espressopp.analysis.Configurations(system)
configurations.gather()
temperature = espressopp.analysis.Temperature(system)
pressure = espressopp.analysis.Pressure(system)
pressureTensor = espressopp.analysis.PressureTensor(system)
print("i*timestep,Eb, EAng, ETab, Ek, Etotal T")
fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8f %15.8f\n'
outfile = open("esp.dat", "w")
start_time = time.process_time()
espressopp.tools.psfwrite("system.psf", system)
espressopp.tools.fastwritexyz("traj.xyz", system, append=False, scale=10)
for i in range(int(check)):
T = temperature.compute()
#P = pressure.compute()
Eb = 0
EAng = 0
ETab=0
for bd in list(bondedinteractions.values()): Eb+=bd.computeEnergy()
for ang in list(angleinteractions.values()): EAng+=ang.computeEnergy()
ETab= tabulatedinteraction.computeEnergy()
T = temperature.compute()
Ek = 0.5 * T * (3 * num_particles)
Etotal = Ek+Eb+EAng+ETab
print((fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T)), end='')
outfile.write(fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T))
espressopp.tools.fastwritexyz("traj.xyz", system, append=True, scale=10)
integrator.run(int(steps//check))
# print timings and neighbor list information
end_time = time.process_time()
timers.show(integrator.getTimers(), precision=2)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPU time = %.1f\n' % (end_time - start_time))
|
espressopp/espressopp
|
examples/pathintegral_water/water.py
|
Python
|
gpl-3.0
| 8,159
|
[
"ESPResSo",
"Gromacs"
] |
4ac73773c31dd34bfcd04cfdea62e8a3567e8b5d9b24185d7a81f8372ecf0a2f
|
__all__ = ['threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'threshold_li', ]
import numpy as np
import scipy.ndimage
from ..exposure import histogram
from .._shared.utils import assert_nD
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a a
given function using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Uneven size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
Returns
-------
threshold : (N, M) ndarray
Thresholded binary image
References
----------
.. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
assert_nD(image, 2)
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
scipy.ndimage.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
scipy.ndimage.gaussian_filter(image, sigma, output=thresh_image,
mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
scipy.ndimage.convolve1d(image, mask, axis=0, output=thresh_image,
mode=mode)
scipy.ndimage.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
scipy.ndimage.median_filter(image, block_size, output=thresh_image,
mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# Calculate probability mass function
pmf = hist.astype(np.float32) / hist.sum()
P1 = np.cumsum(pmf) # Cumulative normalized histogram
P1_sq = np.cumsum(pmf ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values by zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) *
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
return bin_centers[crit.argmax()]
def threshold_isodata(image, nbins=256, return_all=False):
"""Return threshold value(s) based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or inter-means.
Threshold values returned satisfy the following equality:
`threshold = (image[image <= threshold].mean() +`
`image[image > threshold].mean()) / 2.0`
That is, returned thresholds are intensities that separate the image into
two groups of pixels, where the threshold intensity is midway between the
mean intensities of these groups.
For integer images, the above equality holds to within one; for floating-
point images, the equality holds to within the histogram bin-width.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
return_all: bool, optional
If False (default), return only the lowest threshold that satisfies
the above equality. If True, return all valid thresholds.
Returns
-------
threshold : float or int or array
Threshold value(s).
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
.. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [4] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# image only contains one unique value
if len(bin_centers) == 1:
if return_all:
return bin_centers
else:
return bin_centers[0]
hist = hist.astype(np.float32)
# csuml and csumh contain the count of pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively
csuml = np.cumsum(hist)
csumh = np.cumsum(hist[::-1])[::-1] - hist
# intensity_sum contains the total pixel intensity from each bin
intensity_sum = hist * bin_centers
# l and h contain average value of all pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively.
# Note that since exp.histogram does not include empty bins at the low or
# high end of the range, csuml and csumh are strictly > 0, except in the
# last bin of csumh, which is zero by construction.
# So no worries about division by zero in the following lines, except
# for the last bin, but we can ignore that because no valid threshold
# can be in the top bin. So we just patch up csumh[-1] to not cause 0/0
# errors.
csumh[-1] = 1
l = np.cumsum(intensity_sum) / csuml
h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh
# isodata finds threshold values that meet the criterion t = (l + m)/2
# where l is the mean of all pixels <= t and h is the mean of all pixels
# > t, as calculated above. So we are looking for places where
# (l + m) / 2 equals the intensity value for which those l and m figures
# were calculated -- which is, of course, the histogram bin centers.
# We only require this equality to be within the precision of the bin
# width, of course.
all_mean = (l + h) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
# Look only at thresholds that are below the actual all_mean value,
# for consistency with the threshold being included in the lower pixel
# group. Otherwise can get thresholds that are not actually fixed-points
# of the isodata algorithm. For float images, this matters less, since
# there really can't be any guarantees anymore anyway.
distances = all_mean - bin_centers
thresholds = bin_centers[(distances >= 0) & (distances < bin_width)]
if return_all:
return thresholds
else:
return thresholds[0]
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : array
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities more than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
http://citeseer.ist.psu.edu/sezgin04survey.html
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_li(image)
>>> binary = image > thresh
"""
# Requires positive image (because of log(mean))
offset = image.min()
# Can not use fixed tolerance for float image
imrange = image.max() - offset
image -= offset
tolerance = 0.5 * imrange / 256.0
# Calculate the mean gray-level
mean = image.mean()
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
mean_obj = image[image > threshold].mean()
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
if temp < 0:
new_thresh = temp - tolerance
else:
new_thresh = temp + tolerance
return threshold + offset
|
bennlich/scikit-image
|
skimage/filters/thresholding.py
|
Python
|
bsd-3-clause
| 13,942
|
[
"Gaussian"
] |
d6c611cce65d5bc0caf76306d205bb7fb6d66796fde25834677ddaccbaf5efab
|
"""
Example Kernels
---------------
Figure 6.2
A comparison of the three kernels used for density estimation in figure 6.3:
the Gaussian kernel (eq. 6.2), the top-hat kernel (eq. 6.3), and the
exponential kernel (eq. 6.4).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Compute Kernels.
x = np.linspace(-5, 5, 10000)
dx = x[1] - x[0]
gauss = (1. / np.sqrt(2 * np.pi)) * np.exp(-0.5 * x ** 2)
exp = 0.5 * np.exp(-abs(x))
tophat = 0.5 * np.ones_like(x)
tophat[abs(x) > 1] = 0
#------------------------------------------------------------
# Plot the kernels
fig = plt.figure(figsize=(5, 3.75))
ax = fig.add_subplot(111)
ax.plot(x, gauss, '-', c='black', lw=3, label='Gaussian')
ax.plot(x, exp, '-', c='#666666', lw=2, label='Exponential')
ax.plot(x, tophat, '-', c='#999999', lw=1, label='Top-hat')
ax.legend(loc=1)
ax.set_xlabel('$u$')
ax.set_ylabel('$K(u)$')
ax.set_xlim(-5, 5)
ax.set_ylim(0, 0.6001)
plt.show()
|
eramirem/astroML
|
book_figures/chapter6/fig_kernels.py
|
Python
|
bsd-2-clause
| 1,770
|
[
"Gaussian"
] |
7e79e65bee0b56f42c60bc6007c00d98e0311944b4653183cd2942f5e9b29de9
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import socket
import os
import llnl.util.tty as tty
from os import environ as env
def cmake_cache_entry(name, value):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
return 'set({0} "{1}" CACHE PATH "")\n\n'.format(name, value)
class Ascent(Package):
"""Ascent is an open source many-core capable lightweight in situ
visualization and analysis infrastructure for multi-physics HPC
simulations."""
homepage = "https://github.com/Alpine-DAV/ascent"
url = "https://github.com/Alpine-DAV/ascent"
maintainers = ['cyrush']
version('develop',
git='https://github.com/Alpine-DAV/ascent.git',
branch='develop',
submodules=True)
###########################################################################
# package variants
###########################################################################
variant("shared", default=True, description="Build Conduit as shared libs")
variant("cmake", default=True,
description="Build CMake (if off, attempt to use cmake from PATH)")
variant("mpi", default=True, description="Build Ascent MPI Support")
# variants for python support
variant("python", default=True, description="Build Conduit Python support")
# variants for runtime features
variant("vtkh", default=True,
description="Build VTK-h filter and rendering support")
variant("tbb", default=True, description="Build tbb support")
variant("cuda", default=False, description="Build cuda support")
variant("adios", default=True, description="Build Adios filter support")
# variants for dev-tools (docs, etc)
variant("doc", default=False, description="Build Conduit's documentation")
###########################################################################
# package dependencies
###########################################################################
depends_on("cmake", when="+cmake")
depends_on("conduit")
#######################
# Python
#######################
# we need a shared version of python b/c linking with static python lib
# causes duplicate state issues when running compiled python modules.
depends_on("python+shared")
extends("python", when="+python")
# TODO: blas and lapack are disabled due to build
# issues Cyrus experienced on OSX 10.11.6
depends_on("py-numpy~blas~lapack", when="+python", type=('build', 'run'))
#######################
# MPI
#######################
depends_on("mpi", when="+mpi")
depends_on("py-mpi4py", when="+python+mpi")
#############################
# TPLs for Runtime Features
#############################
depends_on("vtkh", when="+vtkh")
depends_on("vtkh+cuda", when="+vtkh+cuda")
depends_on("adios", when="+adios")
#######################
# Documentation related
#######################
depends_on("py-sphinx", when="+python+doc", type='build')
def install(self, spec, prefix):
"""
Build and install Conduit.
"""
with working_dir('spack-build', create=True):
host_cfg_fname = self.create_host_config(spec, prefix)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
cmake(*cmake_args)
make()
make("install")
# TODO also copy host_cfg_fname into install
def create_host_config(self, spec, prefix):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build ascent.
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
f_compiler = None
if self.compiler.fc:
# even if this is set, it may not exist so do one more sanity check
if os.path.isfile(env["SPACK_FC"]):
f_compiler = env["SPACK_FC"]
#######################################################################
# By directly fetching the names of the actual compilers we appear
# to doing something evil here, but this is necessary to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec['cmake'].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = 'failed to find CMake (and cmake variant is off)'
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s.cmake" % (socket.gethostname(),
sys_type,
spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
cfg.write("# fortran compiler used by spack\n")
if f_compiler is not None:
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "ON"))
cfg.write(cmake_cache_entry("CMAKE_Fortran_COMPILER", f_compiler))
else:
cfg.write("# no fortran compiler found\n\n")
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "OFF"))
#######################################################################
# Core Dependencies
#######################################################################
#######################
# Conduit
#######################
cfg.write("# conduit from spack \n")
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
#######################################################################
# Optional Dependencies
#######################################################################
#######################
# Python
#######################
cfg.write("# Python Support\n")
if "+python" in spec:
cfg.write("# Enable python module builds\n")
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "ON"))
cfg.write("# python from spack \n")
cfg.write(cmake_cache_entry("PYTHON_EXECUTABLE",
spec['python'].command.path))
# install module to standard style site packages dir
# so we can support spack activate
cfg.write(cmake_cache_entry("PYTHON_MODULE_INSTALL_PREFIX",
site_packages_dir))
else:
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "OFF"))
if "+doc" in spec:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "ON"))
cfg.write("# sphinx from spack \n")
sphinx_build_exe = join_path(spec['py-sphinx'].prefix.bin,
"sphinx-build")
cfg.write(cmake_cache_entry("SPHINX_EXECUTABLE", sphinx_build_exe))
cfg.write("# doxygen from uberenv\n")
doxygen_exe = spec['doxygen'].command.path
cfg.write(cmake_cache_entry("DOXYGEN_EXECUTABLE", doxygen_exe))
else:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "OFF"))
#######################
# MPI
#######################
cfg.write("# MPI Support\n")
if "+mpi" in spec:
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
cfg.write(cmake_cache_entry("MPI_C_COMPILER", spec['mpi'].mpicc))
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER",
spec['mpi'].mpicxx))
cfg.write(cmake_cache_entry("MPI_Fortran_COMPILER",
spec['mpi'].mpifc))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
#######################
# CUDA
#######################
cfg.write("# CUDA Support\n")
if "+cuda" in spec:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
#######################
# VTK-h
#######################
cfg.write("# vtk-h support \n")
if "+vtkh" in spec:
cfg.write("# tbb from spack\n")
cfg.write(cmake_cache_entry("TBB_DIR", spec['tbb'].prefix))
cfg.write("# vtk-m from spack\n")
cfg.write(cmake_cache_entry("VTKM_DIR", spec['vtkm'].prefix))
cfg.write("# vtk-h from spack\n")
cfg.write(cmake_cache_entry("VTKH_DIR", spec['vtkh'].prefix))
else:
cfg.write("# vtk-h not built by spack \n")
#######################
# Adios
#######################
cfg.write("# adios support\n")
if "+adios" in spec:
cfg.write(cmake_cache_entry("ADIOS_DIR", spec['adios'].prefix))
else:
cfg.write("# adios not built by spack \n")
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname
|
skosukhin/spack
|
var/spack/repos/builtin/packages/ascent/package.py
|
Python
|
lgpl-2.1
| 12,381
|
[
"VTK"
] |
0067fbd938d8af41542ccf3e7b9e5a31b97a66bc8cd24af7ba101179c6fc9ec6
|
# GromacsWrapper plugins
# Copyright (c) 2009-2012 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`analysis.plugins` -- Plugin Modules
=========================================
Classes for :class:`gromacs.analysis.core.Simulation` that provide
code to analyze trajectory data.
New analysis plugins should follow the API sketched out in
:mod:`gromacs.analysis.core`; see an example for use there.
List of plugins
---------------
Right now the number of plugins is limited. Feel free to contribute your own by
sending it to the `package author`_. You will be acknowledged in the list below.
.. _`package author`: oliver.beckstein@asu.edu
.. table:: Plugins for analysis.
========================== ========= ========================================
plugin author description
========================== ========= ========================================
:class:`CysAccessibility` [#OB]_ estimate accessibility of Cys
residues by water
:class:`HelixBundle` [#OB]_ g_bundle analysis of helices
:class:`Distances` [#OB]_ time series of distances
:class:`MinDistances` [#OB]_ time series of shortest distances
:class:`COM` [#OB]_ time series of centres of mass
:class:`Dihedrals` [#OB]_ analysis of dihedral angles
:class:`RMSF` [#OB]_ calculate root mean square fluctuations
:class:`RMSD` [#OB]_ calculate root mean square distance
:class:`Energy` [#OB]_ terms from the energy file
:class:`HBonds` [#OB]_ hydrogen bond analysis, in particular
hydrogen bond existence
========================== ========= ========================================
.. table:: Plugins for trajectory manipulation and status queries.
========================== ========= ========================================
plugin author description
========================== ========= ========================================
:class:`Trajectories` [#OB]_ write xy-fitted trajectories
:class:`FitCompact` [#OB]_ write fitted trajectories
:class:`StripWater` [#OB]_ remove solvent (and optionally fit to
reference)
:class:`ProteinOnly' [#OB]_ remove all atoms except the Protein
(and optionally fit to reference)
:class:`Ls` [#OB]_ simple :program:`ls` (for testing)
========================== ========= ========================================
.. rubric:: Footnotes
.. [#OB] Oliver Beckstein <oliver.beckstein@asu.edu>
Plugin classes
--------------
.. autoclass:: CysAccessibility
:members:
.. autoclass:: HelixBundle
:members:
.. autoclass:: Distances
:members:
.. autoclass:: MinDistances
:members:
.. autoclass:: COM
:members:
.. autoclass:: Dihedrals
:members:
.. autoclass:: RMSF
:members:
.. autoclass:: RMSD
:members:
.. autoclass:: Energy
:members:
.. autoclass:: HBonds
:members:
.. autoclass:: Trajectories
:members:
.. autoclass:: FitCompact
:members:
.. autoclass:: StripWater
:members:
.. autoclass:: ProteinOnly
:members:
.. autoclass:: Ls
:members:
Developer notes
---------------
In principle all that needs to be done to automatically load plugins
is to add their name to :data:`__plugins__`. See the source code for
further comments and how the auto loading of plugins is done.
.. autodata:: __plugins__
.. autodata:: __plugin_classes__
"""
__docformat__ = "restructuredtext en"
#: All available plugin names are listed here. Because this is used to
#: automatically set up imports a module file *must* be named like the
#: plugin class it contains but in all lower case. For example, the
#: *Distances* plugin class is contained in the module *distances* (the
#: file ``plugins/distances.py``).
__plugins__ = ['CysAccessibility', 'Distances', 'MinDistances', 'Dihedrals',
'COM', 'RMSF', 'RMSD', 'Energy', 'HelixBundle', 'HBonds',
'Trajectories', 'FitCompact', 'StripWater', 'ProteinOnly', 'Ls',
]
__all__ = []
__all__.extend(__plugins__)
# 1. Load all modules
# (module is plugin name in lower case)
# ('__import__(m.lower(), fromlist=[m])' does not work like 'from m.lower() import m'
_modules = {p: __import__(p.lower(), globals(), locals()) for p in __plugins__}
# 2. Get the classes
#: Gives access to all available plugin classes (or use the module __dict__)
__plugin_classes__ = {p: M.__dict__[p] for p,M in _modules.items()}
# 3. add to the name space (bind classes to names)
globals().update(__plugin_classes__)
del _modules
|
jandom/GromacsWrapper
|
gromacs/analysis/plugins/__init__.py
|
Python
|
gpl-3.0
| 4,957
|
[
"Gromacs"
] |
8b6850a2d6e1dbbd43fa97a206ecdf988f0d11496651810dfd8bd3b5a724b6e9
|
"""
======================================================================
Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
======================================================================
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
"""
# Authors: Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Chris Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
from mne.viz import centers_to_edges
print(__doc__)
###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = 1024 # Just over 1 second epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
# which reduces variance. We'll also show some of the parameters that can be
# tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
# properties, and thus a different TFR. You can trade time resolution or
# frequency resolution or both in order to get a reduction in variance.
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
###############################################################################
# **(1) Least smoothing (most variance/background fluctuations).**
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
###############################################################################
# **(2) Less frequency smoothing, more time smoothing.**
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
###############################################################################
# **(3) Less time smoothing, more frequency smoothing.**
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
##############################################################################
# Stockwell (S) transform
# =======================
#
# Stockwell uses a Gaussian window to balance temporal and spectral resolution.
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors. In this case,
# we control the spectral / temporal resolution by specifying different widths
# of the gaussian window using the ``width`` parameter.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
###############################################################################
# Morlet Wavelets
# ===============
#
# Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
# with a gaussian envelope. We can control the balance between spectral and
# temporal resolution with the ``n_cycles`` parameter, which defines the
# number of cycles to include in the window.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
###############################################################################
# Calculating a TFR without averaging over epochs
# -----------------------------------------------
#
# It is also possible to calculate a TFR without averaging across trials.
# We can do this by using ``average=False``. In this case, an instance of
# :class:`mne.time_frequency.EpochsTFR` is returned.
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_freqs, n_times)``.
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
freqs=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
x, y = centers_to_edges(epochs.times * 1000, freqs)
mesh = ax.pcolormesh(x, y, power[0], cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
|
rkmaddox/mne-python
|
examples/time_frequency/time_frequency_simulated.py
|
Python
|
bsd-3-clause
| 8,475
|
[
"Gaussian"
] |
948472db6ec443845ca68ce537296ff24282e848f9f0fc358e655df351187d53
|
'''
@file : testFile14.py
@author (A) : Madhu Kumar Dadi.
@project : Social List
@function :
test14(parsedTag,postag) : Searches parsedTag on google for popularity and precision at 10 and 20 urls
@parsedTag : hashtag that is searched on google
@postag : pos tags of the parsedTag
return : a list containing google search popularity and precision at 10 and 20 urls of the parsedTag
This work is licensed under the
Creative Commons Attribution-NonCommercial-ShareAlike 4.0
International License. To view a copy of this license,
visit http://creativecommons.org/licenses/by-nc-sa/4.0/.
'''
import searchWeb
import stemming
import stemming.porter2
import urllib2 as ulib
from wordsegment import segment
import re
from socialListSettings import socialListProxy,socialListHttp_Proxy,socialListHttps_Proxy
# def checkall(postags,parsedSociallists):
# j = 0
# urlsfile = open("urls.txt","w")
# for line in parsedSociallists:
# print line,
# nounpart = []
# k = 0
# splitline = line.split()
# for x in postags[j]:
# if (x is 'M' or x is '^' or x is 'Z'):
# nounpart.append(splitline[k])
# k += 1
# while True:
# try:
# googledata = searchWeb.searchgoogle(line)
# break
# except:
# continue
# urlsfile.write(line+"\n"+str(googledata)+"\n")
# count = 0
# if " ".join(nounpart) == "":
# j+=1
# print "2"
# continue
# i = 1
# for site in googledata:
# try:
# if searchWeb.searchforstring(site,nounpart):
# count += 1
# except:
# print "",
# i += 1
# if i > 10:
# break
# if count > 5:
# print "1"
# else:
# print "0"
# j += 1
def test14(parsedTag,postag):
nounpart = []
k = 0
ret = []
splitline = parsedTag.split()
for x in postag:
if (x is 'M' or x is '^' or x is 'Z'):
nounpart.append(splitline[k])
k+= 1
if " ".join(nounpart) == "":
ret.append(2)
while True:
try:
googledata = searchWeb.searchgoogle(parsedTag)
#gets all the urls for the hashtag on google search
break
except:
continue
count = 0
i = 1
for site in googledata:
try:
if searchWeb.searchforstring(site,nounpart):
#checks if the hashtag noun parts are popular by counting the number of websites they are present
count += 1
except:
pass
i += 1
if i > 10:
break
if count > 5:
ret.append(1)
else:
ret.append(0)
seg = parsedTag.split()
m = []
for n in seg:
m.append(stemming.porter2.stem(n))
seg = " ".join(m)
if socialListProxy:
proxy = ulib.ProxyHandler({'http': socialListHttp_Proxy, 'https': socialListHttps_Proxy})
opener = ulib.build_opener(proxy)
ulib.install_opener(opener)
counter = 0
total = 0
for site in googledata:
req = ulib.Request(site, headers={'User-Agent': "Mozilla/5.0"})
site = segment(site)
l = []
for j in site:
l.append(stemming.porter2.stem(j))
site = " ".join(l)
try:
content = ulib.urlopen(req)
x = re.findall("<\S*?title\S*?>(.*?)<\S*?/\S*?title\S*?>", content.read())
#searches for a match of hastag in the title and url of every page
t = []
for s in x:
t.append(stemming.porter2.stem(s))
t = " ".join(t)
if ((seg in site) or (seg in t)):
counter = counter + 1
total = total + 1
except:
pass
if (total == 10):
ret.append("%.4f"%(float(counter)/total))
if (total == 20):
ret.append("%.4f"%(float(counter)/total))
break
if total < 10:
ret.append("%.4f"%(float(counter)/10.0))
ret.append("%.4f"%(counter/20.0))
elif total < 20:
ret.append("%.4f"%(float(counter)/20.0))
return ret
|
SummerProject16/project
|
CMUTweetTagger/testFile14.py
|
Python
|
cc0-1.0
| 3,538
|
[
"VisIt"
] |
73cade9e883765263b79190fb000de2e0c7fd71d85435c5dd86703a5c926efaa
|
##
# Copyright (C) 2012 Jasper Snoek, Hugo Larochelle and Ryan P. Adams
#
# This code is written for research and educational purposes only to
# supplement the paper entitled
# "Practical Bayesian Optimization of Machine Learning Algorithms"
# by Snoek, Larochelle and Adams
# Advances in Neural Information Processing Systems, 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import gp
import sys
import util
import tempfile
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.stats as sps
import scipy.optimize as spo
import cPickle
from Locker import *
def init(expt_dir, arg_string):
args = util.unpack_args(arg_string)
return GPEIOptChooser(expt_dir, **args)
"""
Chooser module for the Gaussian process expected improvement (EI)
acquisition function where points are sampled densely in the unit
hypercube and then a subset of the points are optimized to maximize EI
over hyperparameter samples. Slice sampling is used to sample
Gaussian process hyperparameters.
"""
class GPEIOptChooser:
def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
self.stats_file = os.path.join(expt_dir,
self.__module__ + "_hyperparameters.txt")
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 10 # top-hat prior on length scales
def dump_hypers(self):
sys.stderr.write("Waiting to lock hyperparameter pickle...")
self.locker.lock_wait(self.state_pkl)
sys.stderr.write("...acquired\n")
# Write the hyperparameters out to a Pickle.
fh = tempfile.NamedTemporaryFile(mode='w', delete=False)
cPickle.dump({ 'dims' : self.D,
'ls' : self.ls,
'amp2' : self.amp2,
'noise' : self.noise,
'mean' : self.mean },
fh)
fh.close()
# Use an atomic move for better NFS happiness.
cmd = 'mv "%s" "%s"' % (fh.name, self.state_pkl)
os.system(cmd) # TODO: Should check system-dependent return status.
self.locker.unlock(self.state_pkl)
# Write the hyperparameters out to a human readable file as well
fh = open(self.stats_file, 'w')
fh.write('Mean Noise Amplitude <length scales>\n')
fh.write('-----------ALL SAMPLES-------------\n')
meanhyps = 0*np.hstack(self.hyper_samples[0])
for i in self.hyper_samples:
hyps = np.hstack(i)
meanhyps += (1/float(len(self.hyper_samples)))*hyps
for j in hyps:
fh.write(str(j) + ' ')
fh.write('\n')
fh.write('-----------MEAN OF SAMPLES-------------\n')
for j in meanhyps:
fh.write(str(j) + ' ')
fh.write('\n')
fh.close()
def _real_init(self, dims, values):
sys.stderr.write("Waiting to lock hyperparameter pickle...")
self.locker.lock_wait(self.state_pkl)
sys.stderr.write("...acquired\n")
if os.path.exists(self.state_pkl):
fh = open(self.state_pkl, 'r')
state = cPickle.load(fh)
fh.close()
self.D = state['dims']
self.ls = state['ls']
self.amp2 = state['amp2']
self.noise = state['noise']
self.mean = state['mean']
self.needs_burnin = False
else:
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values)
# Initial observation noise.
self.noise = 1e-3
# Initial mean.
self.mean = np.mean(values)
# Save hyperparameter samples
self.hyper_samples.append((self.mean, self.noise, self.amp2,
self.ls))
self.locker.unlock(self.state_pkl)
def cov(self, x1, x2=None):
if x2 is None:
return self.amp2 * (self.cov_func(self.ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
else:
return self.amp2 * self.cov_func(self.ls, x1, x2)
# Given a set of completed 'experiments' in the unit hypercube with
# corresponding objective 'values', pick from the next experiment to
# run according to the acquisition function.
def next(self, grid, values, durations,
candidates, pending, complete):
# Don't bother using fancy GP stuff at first.
if complete.shape[0] < 2:
return int(candidates[0])
# Perform the real initialization.
if self.D == -1:
self._real_init(grid.shape[1], values[complete])
# Grab out the relevant sets.
comp = grid[complete,:]
cand = grid[candidates,:]
pend = grid[pending,:]
vals = values[complete]
numcand = cand.shape[0]
# Spray a set of candidates around the min so far
best_comp = np.argmin(vals)
cand2 = np.vstack((np.random.randn(10,comp.shape[1])*0.001 +
comp[best_comp,:], cand))
if self.mcmc_iters > 0:
# Possibly burn in.
if self.needs_burnin:
for mcmc_iter in xrange(self.burnin):
self.sample_hypers(comp, vals)
sys.stderr.write("BURN %d/%d] mean: %.2f amp: %.2f "
"noise: %.4f min_ls: %.4f max_ls: %.4f\n"
% (mcmc_iter+1, self.burnin, self.mean,
np.sqrt(self.amp2), self.noise,
np.min(self.ls), np.max(self.ls)))
self.needs_burnin = False
# Sample from hyperparameters.
# Adjust the candidates to hit ei peaks
self.hyper_samples = []
for mcmc_iter in xrange(self.mcmc_iters):
self.sample_hypers(comp, vals)
sys.stderr.write("%d/%d] mean: %.2f amp: %.2f noise: %.4f "
"min_ls: %.4f max_ls: %.4f\n"
% (mcmc_iter+1, self.mcmc_iters, self.mean,
np.sqrt(self.amp2), self.noise,
np.min(self.ls), np.max(self.ls)))
self.dump_hypers()
b = []# optimization bounds
for i in xrange(0, cand.shape[1]):
b.append((0, 1))
overall_ei = self.ei_over_hypers(comp,pend,cand2,vals)
inds = np.argsort(np.mean(overall_ei,axis=1))[-self.grid_subset:]
cand2 = cand2[inds,:]
for i in xrange(0, cand2.shape[0]):
sys.stderr.write("Optimizing candidate %d/%d\n" %
(i+1, cand2.shape[0]))
ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei_over_hypers,
cand2[i,:].flatten(), args=(comp,vals),
bounds=b, disp=0)
cand2[i,:] = ret[0]
cand = np.vstack((cand, cand2))
overall_ei = self.ei_over_hypers(comp,pend,cand,vals)
best_cand = np.argmax(np.mean(overall_ei, axis=1))
if (best_cand >= numcand):
return (int(numcand), cand[best_cand,:])
return int(candidates[best_cand])
else:
# Optimize hyperparameters
self.optimize_hypers(comp, vals)
sys.stderr.write("mean: %.2f amp: %.2f noise: %.4f "
"min_ls: %.4f max_ls: %.4f\n"
% (self.mean, np.sqrt(self.amp2), self.noise,
np.min(self.ls), np.max(self.ls)))
# Optimize over EI
b = []# optimization bounds
for i in xrange(0, cand.shape[1]):
b.append((0, 1))
for i in xrange(0, cand2.shape[0]):
ret = spo.fmin_l_bfgs_b(self.grad_optimize_ei,
cand2[i,:].flatten(), args=(comp,vals,True),
bounds=b, disp=0)
cand2[i,:] = ret[0]
cand = np.vstack((cand, cand2))
ei = self.compute_ei(comp, pend, cand, vals)
best_cand = np.argmax(ei)
if (best_cand >= numcand):
return (int(numcand), cand[best_cand,:])
return int(candidates[best_cand])
# Compute EI over hyperparameter samples
def ei_over_hypers(self,comp,pend,cand,vals):
overall_ei = np.zeros((cand.shape[0], self.mcmc_iters))
for mcmc_iter in xrange(self.mcmc_iters):
hyper = self.hyper_samples[mcmc_iter]
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
overall_ei[:,mcmc_iter] = self.compute_ei(comp, pend, cand,
vals)
return overall_ei.copy()
# Adjust points by optimizing EI over a set of hyperparameter samples
def grad_optimize_ei_over_hypers(self, cand, comp, vals, compute_grad=True):
summed_ei = 0
summed_grad_ei = np.zeros(cand.shape).flatten()
ls = self.ls.copy()
amp2 = self.amp2
mean = self.mean
noise = self.noise
for hyper in self.hyper_samples:
self.mean = hyper[0]
self.noise = hyper[1]
self.amp2 = hyper[2]
self.ls = hyper[3]
if compute_grad:
(ei,g_ei) = self.grad_optimize_ei(cand,comp,vals,compute_grad)
summed_grad_ei = summed_grad_ei + g_ei
else:
ei = self.grad_optimize_ei(cand,comp,vals,compute_grad)
summed_ei += ei
self.mean = mean
self.amp2 = amp2
self.noise = noise
self.ls = ls.copy()
if compute_grad:
return (summed_ei, summed_grad_ei)
else:
return summed_ei
# Adjust points based on optimizing their ei
def grad_optimize_ei(self, cand, comp, vals, compute_grad=True):
best = np.min(vals)
cand = np.reshape(cand, (-1, comp.shape[1]))
# The primary covariances for prediction.
comp_cov = self.cov(comp)
cand_cross = self.cov(comp, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky(obsv_cov, lower=True)
cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
cand_cross_grad = cov_grad_func(self.ls, comp, cand)
# Predictive things.
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
if not compute_grad:
return ei
# Gradients of ei w.r.t. mean and variance
g_ei_m = -ncdf
g_ei_s2 = 0.5*npdf / func_s
# Apply covariance function
grad_cross = np.squeeze(cand_cross_grad)
grad_xp_m = np.dot(alpha.transpose(),grad_cross)
grad_xp_v = np.dot(-2*spla.cho_solve(
(obsv_chol, True),cand_cross).transpose(), grad_cross)
grad_xp = 0.5*self.amp2*(grad_xp_m*g_ei_m + grad_xp_v*g_ei_s2)
ei = -np.sum(ei)
return ei, grad_xp.flatten()
def compute_ei(self, comp, pend, cand, vals):
if pend.shape[0] == 0:
# If there are no pending, don't do anything fancy.
# Current best.
best = np.min(vals)
# The primary covariances for prediction.
comp_cov = self.cov(comp)
cand_cross = self.cov(comp, cand)
# Compute the required Cholesky.
obsv_cov = comp_cov + self.noise*np.eye(comp.shape[0])
obsv_chol = spla.cholesky( obsv_cov, lower=True )
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.solve_triangular(obsv_chol, cand_cross, lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v)
u = (best - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
return ei
else:
# If there are pending experiments, fantasize their outcomes.
# Create a composite vector of complete and pending.
comp_pend = np.concatenate((comp, pend))
# Compute the covariance and Cholesky decomposition.
comp_pend_cov = (self.cov(comp_pend) +
self.noise*np.eye(comp_pend.shape[0]))
comp_pend_chol = spla.cholesky(comp_pend_cov, lower=True)
# Compute submatrices.
pend_cross = self.cov(comp, pend)
pend_kappa = self.cov(pend)
# Use the sub-Cholesky.
obsv_chol = comp_pend_chol[:comp.shape[0],:comp.shape[0]]
# Solve the linear systems.
alpha = spla.cho_solve((obsv_chol, True), vals - self.mean)
beta = spla.cho_solve((obsv_chol, True), pend_cross)
# Finding predictive means and variances.
pend_m = np.dot(pend_cross.T, alpha) + self.mean
pend_K = pend_kappa - np.dot(pend_cross.T, beta)
# Take the Cholesky of the predictive covariance.
pend_chol = spla.cholesky(pend_K, lower=True)
# Make predictions.
pend_fant = np.dot(pend_chol, npr.randn(pend.shape[0],self.pending_samples)) + self.mean
# Include the fantasies.
fant_vals = np.concatenate(
(np.tile(vals[:,np.newaxis],
(1,self.pending_samples)), pend_fant))
# Compute bests over the fantasies.
bests = np.min(fant_vals, axis=0)
# Now generalize from these fantasies.
cand_cross = self.cov(comp_pend, cand)
# Solve the linear systems.
alpha = spla.cho_solve((comp_pend_chol, True),
fant_vals - self.mean)
beta = spla.solve_triangular(comp_pend_chol, cand_cross,
lower=True)
# Predict the marginal means and variances at candidates.
func_m = np.dot(cand_cross.T, alpha) + self.mean
func_v = self.amp2*(1+1e-6) - np.sum(beta**2, axis=0)
# Expected improvement
func_s = np.sqrt(func_v[:,np.newaxis])
u = (bests[np.newaxis,:] - func_m) / func_s
ncdf = sps.norm.cdf(u)
npdf = sps.norm.pdf(u)
ei = func_s*( u*ncdf + npdf)
return np.mean(ei, axis=1)
def sample_hypers(self, comp, vals):
if self.noiseless:
self.noise = 1e-3
self._sample_noiseless(comp, vals)
else:
self._sample_noisy(comp, vals)
self._sample_ls(comp, vals)
self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
def _sample_ls(self, comp, vals):
def logprob(ls):
if np.any(ls < 0) or np.any(ls > self.max_ls):
return -np.inf
cov = (self.amp2 * (self.cov_func(ls, comp, None) +
1e-6*np.eye(comp.shape[0])) + self.noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - self.mean)
lp = (-np.sum(np.log(np.diag(chol))) -
0.5*np.dot(vals-self.mean, solve))
return lp
self.ls = util.slice_sample(self.ls, logprob, compwise=True)
def _sample_noisy(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = hypers[2]
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0 or noise < 0:
return -np.inf
cov = (amp2 * (self.cov_func(self.ls, comp, None) +
1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in noise horseshoe prior.
lp += np.log(np.log(1 + (self.noise_scale/noise)**2))
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2
return lp
hypers = util.slice_sample(np.array(
[self.mean, self.amp2, self.noise]), logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = hypers[2]
def _sample_noiseless(self, comp, vals):
def logprob(hypers):
mean = hypers[0]
amp2 = hypers[1]
noise = 1e-3
# This is pretty hacky, but keeps things sane.
if mean > np.max(vals) or mean < np.min(vals):
return -np.inf
if amp2 < 0:
return -np.inf
cov = (amp2 * (self.cov_func(self.ls, comp, None) +
1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]))
chol = spla.cholesky(cov, lower=True)
solve = spla.cho_solve((chol, True), vals - mean)
lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
# Roll in amplitude lognormal prior
lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2
return lp
hypers = util.slice_sample(np.array(
[self.mean, self.amp2, self.noise]), logprob, compwise=False)
self.mean = hypers[0]
self.amp2 = hypers[1]
self.noise = 1e-3
def optimize_hypers(self, comp, vals):
mygp = gp.GP(self.cov_func.__name__)
mygp.real_init(comp.shape[1], vals)
mygp.optimize_hypers(comp,vals)
self.mean = mygp.mean
self.ls = mygp.ls
self.amp2 = mygp.amp2
self.noise = mygp.noise
# Save hyperparameter samples
self.hyper_samples.append((self.mean, self.noise, self.amp2, self.ls))
self.dump_hypers()
return
|
ninjin/spearmint-lite
|
GPEIOptChooser.py
|
Python
|
gpl-3.0
| 20,831
|
[
"Gaussian"
] |
7886097274459012a7c6b277611c96551bc9dd0141f6bdf0f9adfbca50691e1b
|
import numpy as np
import pandas as pd
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
n_steps = 3000
temperature = 300. * u.kelvin
collision_rate = 1.0 / u.picoseconds
timestep = 2.0 * u.femtoseconds
steps_per_hmc = 12
k_max = 2
testsystem = testsystems.WaterBox(box_edge=3.18 * u.nanometers) # Around 1060 molecules of water
system = testsystem.system
positions = testsystem.positions
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, 0.25 * u.femtoseconds)
context = mm.Context(testsystem.system, integrator)
context.setPositions(testsystem.positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(5000)
positions = context.getState(getPositions=True).getPositions()
integrator = hmc_integrators.GHMC2(temperature, steps_per_hmc, timestep, collision_rate)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
for i in range(1000):
data = []
for j in range(10):
integrator.step(1)
data.append(integrator.summary())
data = pd.DataFrame(data)
print(data)
|
kyleabeauchamp/HMCNotes
|
code/old/test_xhmc_compare_ghmc.py
|
Python
|
gpl-2.0
| 1,196
|
[
"OpenMM"
] |
994a5faa3d38fc53a8e0f3b86b7450616fb0161aa9287a76b0e33d3f06b681e6
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1364979192.67012
__CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013'
__CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/tvbrowser.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class tvbrowser(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(tvbrowser, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_89730115 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2simplexmlresult>
\t<e2state>''')
_v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11
if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11.
write(u'''</e2state>
\t<e2statetext>''')
_v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15
if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15.
write(u'''</e2statetext>\t
</e2simplexmlresult>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_89730115
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_tvbrowser= 'respond'
## END CLASS DEFINITION
if not hasattr(tvbrowser, '_initCheetahAttributes'):
templateAPIClass = getattr(tvbrowser, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(tvbrowser)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=tvbrowser()).run()
|
pli3/Openwebif
|
plugin/controllers/views/web/tvbrowser.py
|
Python
|
gpl-2.0
| 5,220
|
[
"VisIt"
] |
3d844fb6ef1270c3825a91bebb098f2bc73954d78b2e818ea8aceb42642d9767
|
import multiprocessing as MP
import itertools as IT
import progressbar as PGB
import os, warnings
import numpy as NP
import healpy as HP
from astropy.table import Table
from astropy.io import fits, ascii
import h5py
import warnings
from astropy.coordinates import Angle, SkyCoord
from astropy import units
import scipy.constants as FCNST
from scipy.interpolate import interp1d, PchipInterpolator
import astroutils
import geometry as GEOM
import mathops as OPS
import lookup_operations as LKP
import constants as CNST
import foregrounds as FG
try:
from pygsm import GlobalSkyModel, GlobalSkyModel2016
except ImportError:
pygsm_found = False
else:
pygsm_found = True
#################################################################################
def healpix_smooth_and_udgrade_arg_splitter(args, **kwargs):
return healpix_smooth_and_udgrade(*args, **kwargs)
def healpix_smooth_and_udgrade(input_map, fwhm, nside, order_in='RING', verbose=True):
smooth_map = HP.smoothing(input_map.ravel(), fwhm, verbose=verbose)
return HP.ud_grade(smooth_map, nside, order_in=order_in)
#################################################################################
def retrieve_external_spectrum(spec_extfile, ind=None):
"""
-------------------------------------------------------------------------
Retrieve an externally stored spectrum (may include recursive calls)
Inputs:
spec_extfile
[string] full path to filename which contains externally
stored spectrum. Must be specified (no default)
ind [scalar, list or numpy array] Indices to select objects in
the externally stored spectrum of the catalog or sky model.
If set to None (default), all objects will be selected.
Outputs:
spectrum [numpy array] Spectrum of the sky model at the specified
sky locations. Has shape nobj x nfreq.
-------------------------------------------------------------------------
"""
try:
spec_extfile
except NameError:
raise NameError('The externally stored spectrum file must be specified')
if spec_extfile is None:
raise TypeError('Input spec_extfile must be a string')
if not isinstance(spec_extfile, str):
raise TypeError('External filename spec_extfile must be a string')
if ind is not None:
if not isinstance(ind, (int,list,NP.ndarray)):
raise TypeError('Input ind must be an integer, list or numpy array')
else:
ind = NP.asarray(ind).astype(NP.int)
if NP.any(ind < 0):
raise IndexError('Out of bound indices found in input ind')
with h5py.File(spec_extfile, 'r') as fileobj:
nobj = fileobj['object/name'].value.size
if ind is None:
ind = NP.arange(nobj, dtype=NP.int)
else:
if NP.any(ind >= nobj):
raise ValueError('Specified indices exceed maximum number of objects in the external file')
spec_type = fileobj['header/spec_type'].value
if spec_type != 'spectrum':
raise ValueError('Attribute spec_type not set to "spectrum" in external file {0}'.format(spec_extfile))
if 'spectral_info/spectrum' in fileobj:
return fileobj['spectral_info/spectrum'].value[ind,:]
elif 'spectral_info/spec_extfile' in fileobj:
next_spec_extfile = fileobj['spectral_info/spec_extfile'].value
return retrieve_external_spectrum(next_spec_extfile, ind=ind) # Recursive call
else:
raise KeyError('Externally stored spectrum not found in {0}'.format(spec_extfile))
#################################################################################
def append_SkyModel_file(skymodfile, skymod, appendaxis, filemode='a'):
"""
-----------------------------------------------------------------------------
Append an instance of class SkyModel to an already existing file or create a
new file.
Inputs:
skymodfile [string] Pull path to HDF5 file (without .hdf5 extension) that
contains saved information of an instance of class SkyModel. If
it does not exist, it will be created. If it already exists,
skymod -- an instance of class SkyModel will be appended to it.
skymod [instance of class SkyModel] Instance of class SkyModel that will
be appended on to the skymodfile if it exists or will be saved
to skymodfile if the file does not exist already
appendaxis [string] Axis along which the specified skymod data has to be
appended. Accepted values are 'freq' (append along frequency
axis) or 'src' (append along source location axis). All other
axes and attributes must match.
filemode [string] Mode in which the HDF5 must be opened. Accepted values
are 'a' (Read/write if exists, create otherwise (default)) and
'w' (Create file, truncate if exists).
-----------------------------------------------------------------------------
"""
try:
skymodfile, skymod, appendaxis
except NameError:
raise NameError('Inputs skymodfile, skymod, and appendaxis must be specified')
if not isinstance(skymodfile, str):
raise TypeError('Input skymodfile must be a string')
if not isinstance(filemode, str):
raise TypeError('Input filemode must be a string')
else:
filemode = filemode.lower()
if filemode not in ['a', 'w']:
raise ValueError('Invalid value specified for filemore')
if not isinstance(skymod, SkyModel):
raise TypeError('Input skymod must be an instance of class SkyModel')
if not isinstance(appendaxis, str):
raise TypeError('Input appendaxis must be a string')
else:
appendaxis = appendaxis.lower()
if appendaxis not in ['src', 'freq']:
raise ValueError('Invalid value specified for input appendaxis')
if not os.path.isfile(skymodfile+'.hdf5'):
skymod.save(skymodfile, fileformat='hdf5')
else:
with h5py.File(skymodfile+'.hdf5', filemode) as fileobj:
hdr_group = fileobj['header']
if hdr_group['spec_type'].value != skymod.spec_type:
raise ValueError('The spectral type in the SkyModel instance and the skymodfile do not match')
object_group = fileobj['object']
if object_group.attrs['epoch'] != skymod.epoch:
raise ValueError('The epochs in the SkyModel instance and the skymodfile do not match')
if object_group.attrs['coords'] != skymod.coords:
raise ValueError('The coordinate system in the SkyModel instance and the skymodfile do not match')
src_shape_in_file = 'shape' in object_group
src_shape_in_skymod = skymod.src_shape is not None
if src_shape_in_file != src_shape_in_skymod:
raise KeyError('src_shape is not consistent between the SkyModel instance and the skymodfile')
if skymod.coords == 'radec':
lon = 'RA'
lat = 'Dec'
else:
lon = 'Az'
lat = 'Alt'
spec_group = fileobj['spectral_info']
if appendaxis == 'src':
if skymod.frequency.size != spec_group['freq'].size:
raise IndexError('The frequencies in the skymodefile and the SkyModel instance do not match')
if NP.any(NP.abs(skymod.frequency - spec_group['freq'].value) > 1e-14):
raise ValueError('The frequencies in the skymodefile and the SkyModel instance do not match')
object_group['name'].resize(object_group['name'].size+skymod.name.size, axis=0)
object_group['name'][-skymod.name.size:] = skymod.name
object_group[lon].resize(object_group[lon].size+skymod.location.shape[0], axis=0)
object_group[lat].resize(object_group[lat].size+skymod.location.shape[0], axis=0)
if skymod.coords == 'radec':
object_group[lon][-skymod.locations.shape[0]:] = skymod.location[:,0]
object_group[lat][-skymod.locations.shape[0]:] = skymod.location[:,1]
else:
object_group[lon][-skymod.locations.shape[0]:] = skymod.location[:,1]
object_group[lat][-skymod.locations.shape[0]:] = skymod.location[:,0]
if src_shape_in_file:
object_group['shape'].resize(object_group['shape'].shape[0]+skymod.src_shape.shape[0], axis=0)
object_group['shape'][-skymod.src_shape.shape[0]:,:] = skymod.src_shape
if skymod.spec_type == 'func':
spec_group['func-name'].resize(spec_group['func-name'].size+skymod.spec_parms['name'].size, axis=0)
spec_group['func-name'][-skymod.name.size:] = skymod.spec_parms['name']
spec_group['freq'].resize(spec_group['freq'].size+skymod.spec_parms['freq-ref'].size, axis=0)
spec_group['freq'][-skymod.name.size:] = skymod.spec_parms['freq-ref']
spec_group['flux_density'].resize(spec_group['flux_density'].size+skymod.spec_parms['flux-scale'].size, axis=0)
spec_group['flux_density'][-skymod.name.size:] = skymod.spec_parms['flux-scale']
if ('spindex' in spec_group) and ('power-law-index' in skymod.spec_parms):
spec_group['spindex'].resize(spec_group['spindex'].size+skymod.spec_parms['power-law-index'].size, axis=0)
spec_group['spindex'][-skymod.name.size:] = skymod.spec_parms['power-law-index']
else:
spec_group['spectrum'].resize(spec_group['spectrum'].shape[0]+skymod.spectrum.shape[0], axis=0)
spec_group['spectrum'][-skymod.name.size:,:] = skymod.spectrum
else:
if skymod.name.size != object_group['name'].size:
raise IndexError('The objects in the skymodefile and the SkyModel instance do not match')
if NP.any(skymod.name != object_group['name'].value):
raise ValueError('The objects in the skymodefile and the SkyModel instance do not match')
if skymod.coords == 'radec':
if NP.any(NP.abs(skymod.location - NP.hstack((object_group['RA'].value.reshape(-1,1), object_group['Dec'].value.reshape(-1,1)))) > 1e-14):
raise ValueError('The locations in the skymodefile and the SkyModel instance do not match')
else:
if NP.any(NP.abs(skymod.location - NP.hstack((object_group['Alt'].value.reshape(-1,1), object_group['Az'].value.reshape(-1,1)))) > 1e-14):
raise ValueError('The locations in the skymodefile and the SkyModel instance do not match')
if src_shape_in_file:
if NP.any(NP.abs(skymod.src_shape - object_group['shape'].value) > 1e-14):
raise ValueError('The locations in the skymodefile and the SkyModel instance do not match')
spec_group['freq'].resize(spec_group['freq'].size+skymod.frequency.size, axis=0)
spec_group['freq'][-skymod.frequency.size:] = skymod.frequency.ravel()
if skymod.spec_type == 'spectrum':
spec_group['spectrum'].resize(spec_group['spectrum'].shape[1]+skymod.spectrum.shape[1], axis=1)
spec_group['spectrum'][:,-skymod.name.size:] = skymod.spectrum
#################################################################################
class SkyModel(object):
"""
-----------------------------------------------------------------------------
Class to manage sky model information.
Attributes:
astroutils_githash
[string] Git# of the AstroUtils version used to create/save
the SkyModel
name [scalar or vector] Name of the catalog. If scalar, will be
used for all sources in the sky model. If vector, will be
used for corresponding object. If vector, size must equal
the number of objects.
frequency [scalar or vector] Frequency range for which sky model is
applicable. Units in Hz.
location [numpy array or list of lists] Positions of the sources in
sky model. Each position is specified as a row (numpy array)
or a 2-element list which is input as a list of lists for all
the sources in the sky model
is_healpix [boolean] If True, it is a healpix map with ordering as
specified in attribute healpix_ordering. By default it is
set to False
healpix_ordering
[string] specifies the ordering of healpix pixels if
is_healpix is set to True in which case it is set to 'nest'
or 'ring'. If is_healpix is set to False, it is set to 'na'
(default) indicating 'not applicable'
spec_type [string] specifies the flux variation along the spectral
axis. Allowed values are 'func' and 'spectrum'. If set to
'func', values under spec_parms are applicable. If set to
'spectrum', values under key 'spectrum' are applicable.
spec_parms [dictionary] specifies spectral parameters applicable for
different spectral types. Only applicable if spec_type is
set to 'func'. It contains values in the following
keys:
'name' [string] Specifies name of the functional variation
of spectrum. Applicable when spec_type is set to
'func'. Allowed values are 'random', 'monotone',
'power-law', and 'tanh'. Default='power-law'
(with power law index set to 0). See member
functions for these function definitions.
'power-law-index'
[scalar numpy vector or list] Power law index for
each object (flux ~ freq^power_law_index). Will be
specified and applicable when value in key 'name'
is set to 'power-law'. Same size as the number of
object locations.
'freq-ref'
[scalar or numpy array or list] Reference or pivot
frequency as applicable. If a scalar, it is
identical at all object locations. If a list or
numpy array it must of size equal to the number of
objects, one value at each location. If
value under key 'name' is set to 'power-law', this
specifies the reference frequency at which the flux
density is specified under key 'flux-scale'. If
value under key 'name' is 'monotone', this specifies
the frequency at which the spectrum of the object
contains a spike and zero elsewhere. If value under
key 'name' is 'tanh', this specifies the frequency
at which the spectrum is mid-way between min and
max of the tanh function. This is not applicable
when value under key 'name' is set to 'random' or
'flat'.
'flux-scale'
[scalar or numpy array] Flux scale of the flux
densities at object locations. If a scalar, it is
common for all object locations. If it is a vector,
it has a size equal to the number of object
locations, one value for each object location. If
value in 'name' is set to 'power-law', this refers
to the flux density scale at the reference frequency
specified under key 'freq-ref'. If value under key
'name' is 'tanh', the flux density scale is half of
the value specified under this key.
'flux-offset'
[numpy vector] Flux density offset applicable after
applying the flux scale. Same units as the flux
scale. If a scalar, it is common for all object
locations. If it is a vector, it has a size equal
to the number of object locations, one value
for each object location. When value under the key
'name' is set to 'random', this amounts to setting
a mean flux density along the spectral axis.
'z-width'
[numpy vector] Characteristic redshift full-wdith
in the definition of tanh expression applicable to
global EoR signal.
spectrum [numpy array] Spectrum of the catalog. Will be applicable
if attribute spec_type is set to 'spectrum' or if spectrum
was computed using the member function. It will be of shape
nsrc x nchan. If not specified or set to None, an external
file must be specified under attribute 'spec_extfile'
spec_extfile [string] full path filename of external file containing saved
version of instance of class SkyModel which contains an
offline version of full spectrum data. This will be
applicable only if spec_type is set to 'spectrum'. If set to
None, full spectrum will be provided under attribute
'spectrum'
src_shape [3-column numpy array or list of 3-element lists] source
shape specified by major axis FWHM (first column), minor axis
FWHM (second column), and position angle (third column). The
major and minor axes and position angle are stored in degrees.
The number of rows must match the number of sources. Position
angle is in degrees east of north (same convention as local
azimuth)
epoch [string] Epoch appropriate for the coordinate system. Default
is 'J2000'
coords [string] Coordinate system used for the source positions in
the sky model. Currently accepted values are 'radec' (RA-Dec)
Member Functions:
__init__() Initialize an instance of class SkyModel
match() Match the source positions in an instance of class
SkyModel with another instance of the same class to a
specified angular radius using spherematch() in the geometry
module
subset() Provide a subset of the sky model using a list of indices onto
the existing sky model. Subset can be either in position or
frequency channels
generate_spectrum()
Generate and return a spectrum from functional spectral
parameters
load_external_spectrum()
Load full spectrum from external file
to_healpix() Convert catalog to a healpix format of given nside at
specified frequencies.
save() Save sky model to the specified output file
------------------------------------------------------------------------------
"""
##############################################################################
def __init__(self, init_file=None, init_parms=None, load_spectrum=False):
"""
--------------------------------------------------------------------------
Initialize an instance of class SkyModel
Class attributes initialized are:
frequency, location, flux_density, epoch, spectral_index, coords,
spectrum, src_shape
Inputs:
init_file [string] Full path to the file containing saved
information of an instance of class SkyModel. If set to
None (default), parameters in the input init_parms
will be used to initialize an instance of this class.
If set to a filename, the instance will be initialized from
this file, and init_parms and its parameters will be
ignored.
init_parms [dictionary] Dictionary containing parameters used to create
an instance of class SkyModel. Used only if init_file is set
to None. Initialization parameters are specified using the
following keys and values:
'name' [scalar or vector] Name of the catalog. If
scalar, will be used for all sources in the sky
model. If vector, will be used for corresponding
object. If vector, size must equal the number of
objects.
'frequency' [scalar or vector] Frequency range for which sky
model is applicable. Units in Hz.
'location' [numpy array or list of lists] Positions of the
sources in sky model. Each position is specified
as a row (numpy array) or a 2-element list which
is input as a list of lists for all the sources
in the sky model
'is_healpix'
[boolean] If True, it is a healpix map with
ordering as specified in attribute
healpix_ordering. If not specified, it will be
assumed to be False
'healpix_ordering'
[string] specifies the ordering of healpix
pixels if is_healpix is set to True in which
case it is set to 'nest' or 'ring'. If
is_healpix is set to False, it will be assumed
to be set to 'na' (default) indicating 'not
applicable'
'spec_type' [string] specifies the flux variation along the
spectral axis. Allowed values are 'func' and
'spectrum'. If set to 'func', values under
spec_parms are applicable. If set to 'spectrum',
values under key 'spectrum' are applicable.
'spec_parms'
[dictionary] specifies spectral parameters
applicable for different spectral types. Only
applicable if spec_type is set to 'func'. It
contains values in the following keys:
'name' [string] Specifies name of the
functional variation of spectrum.
Applicable when spec_type is set to
'func'. Allowed values are 'random',
'monotone', 'power-law', and 'tanh'.
Default='power-law' (with power law
index set to 0). See member functions
for these function definitions.
'power-law-index'
[scalar numpy vector or list] Power
law index for each object
(flux ~ freq^power_law_index). Will be
specified and applicable when value in
key 'name' is set to 'power-law'. Same
size as the number of object locations.
'freq-ref'
[scalar or numpy array or list]
Reference or pivot frequency as
applicable. If a scalar, it is
identical at all object locations. If a
list or numpy array it must of size
equal to the number of objects, one
value at each location. If value under
key 'name' is set to 'power-law', this
specifies the reference frequency at
which the flux density is specified
under key 'flux-scale'. If value under
key 'name' is 'monotone', this
specifies the frequency at which the
spectrum of the object contains a spike
and zero elsewhere. If value under key
'name' is 'tanh', this specifies the
frequency at which the spectrum is
mid-way between min and max of the tanh
function. This is not applicable when
value under key 'name' is set to
'random' or 'flat'.
'flux-scale'
[scalar or numpy array] Flux scale of
the flux densities at object locations.
If a scalar, it is common for all
object locations. If it is a vector, it
has a size equal to the number of
object locations, one value for each
object location. If value in 'name' is
set to 'power-law', this refers to the
flux density scale at the reference
frequency specified under key
'freq-ref'. If value under key 'name'
is 'tanh', the flux density scale is
half of the value specified under this
key.
'flux-offset'
[numpy vector] Flux density offset
applicable after applying the flux
scale. Same units as the flux scale. If
a scalar, it is common for all object
locations. If it is a vector, it has a
size equal to the number of object
locations, one value for each object
location. When value under the key
'name' is set to 'random', this amounts
to setting a mean flux density along
the spectral axis.
'z-width'
[numpy vector] Characteristic redshift
full-wdith in the definition of tanh
expression applicable to global EoR
signal.
'spectrum' [numpy array] Spectrum of the catalog. Will be
applicable if attribute spec_type is set to
'spectrum'. It must be of shape nsrc x nchan.
If not specified or set to None, an external
file must be specified under 'spec_extfile'
and the spectrum can be determined from offline
data
'spec_extfile'
[string] full path filename of external file
containing saved version of instance of class
SkyModel which contains an offline version of
full spectrum data. This will be applicable
only if spec_type is set to 'spectrum'. If set
to None, full spectrum will be provided under
key 'spectrum'
'src_shape' [3-column numpy array or list of 3-element
lists] source shape specified by major axis
FWHM (first column), minor axis FWHM (second
column), and position angle (third column). The
major and minor axes and position angle are
stored in degrees. The number of rows must match
the number of sources. Position angle is in
degrees east of north (same convention as local
azimuth)
'epoch' [string] Epoch appropriate for the coordinate
system. Default is 'J2000'
'coords' [string] Coordinate system used for the source
positions in the sky model. Currently accepted
values are 'radec' (RA-Dec)
'src_shape_units'
[3-element list or tuple of strings] Specifies
the units of major axis FWHM, minor axis FWHM,
and position angle. Accepted values for major
and minor axes units are 'arcsec', 'arcmin',
'degree', or 'radian'. Accepted values for
position angle units are 'degree' or 'radian'
load_spectrum
[boolean] It is applicable only if initialization happens from
init_file and if full spectrum is available in the group
'spectral_info/spectrum'. If set to True, it will load the
the full spectrum into the instance attribute spectrum. If
set to False (default), it will only store the path to the
file containing the full spectrum under the attribute
spec_extfile and not load the full spectrum into the instance
attribute.
--------------------------------------------------------------------------
"""
if init_file is not None:
with h5py.File(init_file, 'r') as fileobj:
# for key in fileobj.keys():
for key in ['header', 'object', 'spectral_info']:
grp = fileobj[key]
if key == 'header':
if 'AstroUtils#' in grp:
self.astroutils_githash = grp['AstroUtils#'].value
else:
self.astroutils_githash = astroutils.__githash__
self.spec_type = grp['spec_type'].value
self.is_healpix = False
self.healpix_ordering = 'na'
if 'is_healpix' in grp:
self.is_healpix = bool(grp['is_healpix'].value)
if self.is_healpix:
if 'healpix_ordering' in grp:
if grp['healpix_ordering'].value.lower() in ['nest', 'ring']:
self.healpix_ordering = grp['healpix_ordering'].value.lower()
if key == 'object':
self.epoch = grp.attrs['epoch']
self.coords = grp.attrs['coords']
self.name = grp['name'].value
if self.coords == 'radec':
self.location = NP.hstack((grp['RA'].value.reshape(-1,1), grp['Dec'].value.reshape(-1,1)))
elif self.coords == 'altaz':
self.location = NP.hstack((grp['Alt'].value.reshape(-1,1), grp['Az'].value.reshape(-1,1)))
if 'shape' in grp:
self.src_shape = grp['shape'].value
else:
self.src_shape = None
if key == 'spectral_info':
self.spec_extfile = None
self.spectrum = None
self.frequency = grp['freq'].value.reshape(1,-1)
self.spec_parms = {}
if self.spec_type == 'func':
self.spec_parms['name'] = grp['func-name'].value
self.spec_parms['freq-ref'] = grp['freq-ref'].value
self.spec_parms['flux-scale'] = grp['flux_density'].value
if 'flux_offset' in grp:
self.spec_parms['flux-offset'] = grp['flux_offset'].value
if 'spindex' in grp:
self.spec_parms['power-law-index'] = grp['spindex'].value
else:
if 'spec_extfile' in grp:
self.spec_extfile = grp['spec_extfile'].value
else:
self.spec_extfile = init_file
if not isinstance(load_spectrum, bool):
load_spectrum = False
if load_spectrum:
self.spectrum = grp['spectrum'].value
elif (init_parms is None):
raise ValueError('In the absence of init_file, init_parms must be provided for initialization')
else:
if not isinstance(init_parms, dict):
raise TypeError('Input init_parms must be a dictionary')
try:
name = init_parms['name']
frequency = init_parms['frequency']
location = init_parms['location']
spec_type = init_parms['spec_type']
except KeyError:
raise KeyError('Catalog name, frequency, location, and spectral type must be provided.')
self.astroutils_githash = astroutils.__githash__
self.is_healpix = False
self.healpix_ordering = 'na'
if 'is_healpix' in init_parms:
if isinstance(init_parms['is_healpix'], bool):
self.is_healpix = init_parms['is_healpix']
if 'healpix_ordering'in init_parms:
if not isinstance(init_parms['healpix_ordering'], str):
raise TypeError('alue under key "healpix_ordering" must be a string')
if init_parms['healpix_ordering'].lower() not in ['nest', 'ring']:
raise ValueError('Invalid specification for value under key "healpix_ordering"')
self.healpix_ordering = init_parms['healpix_ordering']
if 'spec_parms' in init_parms:
spec_parms = init_parms['spec_parms']
else:
spec_parms = None
if 'src_shape' in init_parms:
src_shape = init_parms['src_shape']
else:
src_shape = None
if 'src_shape_units' in init_parms:
src_shape_units = init_parms['src_shape_units']
else:
src_shape_units = None
if 'coords' in init_parms:
self.coords = init_parms['coords']
else:
self.coords = 'radec'
if 'epoch' in init_parms:
self.epoch = init_parms['epoch']
else:
self.epoch = 'J2000'
if isinstance(name, (int, float, str)):
self.name = NP.repeat(NP.asarray(name).reshape(-1), location.shape[0])
elif isinstance(name, NP.ndarray):
if name.size == 1:
self.name = NP.repeat(NP.asarray(name).reshape(-1), location.shape[0])
elif (name.size == location.shape[0]):
self.name = name.reshape(-1)
else:
raise ValueError('Size of input "name" does not match number of objects')
else:
raise TypeError('Catalog name must be a integer, float, string or numpy array')
if isinstance(spec_type, str):
if spec_type in ['func', 'spectrum']:
self.spec_type = spec_type
else:
raise ValueError('Spectrum specification in spec_type must be "func" or "spectrum"')
else:
raise TypeError('Spectrum specification in spec_type must be a string')
if isinstance(frequency, (int, float, NP.ndarray)):
self.frequency = NP.asarray(frequency).reshape(1,-1)
else:
raise TypeError('Sky model frequency must be a integer, float, or numpy array')
self.location = location
self.spectrum = None
self.spec_extfile= None
if self.spec_type == 'spectrum':
check_spectrum = False
check_extfile = False
if 'spec_extfile' in init_parms:
if init_parms['spec_extfile'] is None:
check_spectrum = True
warnings.warn('No value specified under key "spec_extfile". Will check for value under skey "spectrum"')
elif isinstance(init_parms['spec_extfile'], str):
self.spec_extfile = init_parms['spec_extfile']
spectrum = retrieve_external_spectrum(init_parms['spec_extfile'], ind=None)
if not isinstance(spectrum, NP.ndarray):
raise TypeError('Spectrum in external file is not a numpy array')
if spectrum.shape != (self.location.shape[0], self.frequency.size):
raise ValueError('Spectrum data in external file does not have compatible dimensions with number of objects and number of frequency channels')
del spectrum
# with h5py.File(self.spec_extfile, 'r') as fileobj:
# try:
# assert fileobj['spectral_info/spectrum'].value is not None, 'Data in external file must not be None'
# assert fileobj['spectral_info/spectrum'].value.shape == (self.location.shape[0], self.frequency.size), 'Data in external file must not be None'
# except KeyError:
# raise KeyError('Key "spectral_info/spectrum" not found in external file')
# check_extfile = True
else:
raise TypeError('Value under spec_extfile must be a string')
else:
check_spectrum = True
if check_spectrum:
if 'spectrum' not in init_parms:
raise KeyError('Sky model spectrum not provided.')
spectrum = init_parms['spectrum']
if not isinstance(spectrum, NP.ndarray):
raise TypeError('Sky model spectrum must be a numpy array')
if spectrum.shape != (self.location.shape[0], self.frequency.size):
raise ValueError('Sky model spectrum must have same number of rows as number of object locations and same number of columns as number of frequency channels')
self.spectrum = spectrum
else:
if spec_parms is None:
spec_parms = {}
spec_parms['name'] = NP.repeat('power-law', self.location.shape[0])
spec_parms['power-law-index'] = NP.zeros(self.location.shape[0])
spec_parms['freq-ref'] = NP.mean(self.frequency) + NP.zeros(self.location.shape[0])
spec_parms['flux-scale'] = NP.ones(self.location.shape[0])
spec_parms['flux-offset'] = NP.zeros(self.location.shape[0])
spec_parms['z-width'] = NP.zeros(self.location.shape[0])
elif not isinstance(spec_parms, dict):
raise TypeError('Spectral parameters in spec_parms must be specified as a dictionary')
if 'name' not in spec_parms:
spec_parms['name'] = NP.repeat('power-law', self.location.shape[0])
if isinstance(spec_parms['name'], (list, NP.ndarray)):
spec_parms['name'] = NP.asarray(spec_parms['name'])
if spec_parms['name'].size != self.location.shape[0]:
raise ValueError('Number of spectral functional names should match the number of object locations.')
uniq_names = NP.unique(spec_parms['name'])
for name in uniq_names:
if name not in ['random', 'monotone', 'power-law', 'tanh']:
raise ValueError('Spectral functional names must be from "random", "monotone", "power-law" and "tanh".')
else:
raise TypeError('Values under key "name" in dictionary spec_parms must be a list or numpy array of strings')
if 'flux-scale' not in spec_parms:
spec_parms['flux-scale'] = NP.ones(self.location.shape[0])
else:
if not isinstance(spec_parms['flux-scale'], (int,float,NP.ndarray)):
raise TypeError('Flux scale must be a scalar or numpy array')
spec_parms['flux-scale'] = NP.asarray(spec_parms['flux-scale'])
if spec_parms['flux-scale'].size == 1:
spec_parms['flux-scale'] = spec_parms['flux-scale'] + NP.zeros(self.location.shape[0])
elif spec_parms['flux-scale'].size == self.location.shape[0]:
spec_parms['flux-scale'] = spec_parms['flux-scale'].ravel()
else:
raise ValueError('Size of flux scale must be equal to the number of sky locations')
if NP.any(spec_parms['flux-scale'] <= 0.0):
raise ValueError('Flux scale values must be positive')
if 'flux-offset' not in spec_parms:
spec_parms['flux-offset'] = NP.zeros(self.location.shape[0])
if 'freq-ref' not in spec_parms:
spec_parms['freq-ref'] = NP.mean(self.frequency) + NP.zeros(self.location.shape[0])
elif NP.any(spec_parms['freq-ref'] <= 0.0):
raise ValueError('Reference frequency values must be positive')
if 'power-law-index' not in spec_parms:
spec_parms['power-law-index'] = NP.zeros(self.location.shape[0])
if 'z-width' not in spec_parms:
spec_parms['z-width'] = NP.zeros(self.location.shape[0])
elif NP.any(spec_parms['z-width'] < 0.0):
raise ValueError('Characteristic redshift widths must not be negative')
self.spec_parms = spec_parms
if src_shape is not None:
self.src_shape = NP.asarray(src_shape)
if self.src_shape.shape[1] != 3:
raise ValueError('Source shape must consist of three columns (major axis FWHM, minor axis FWHM, position angle) per source.')
if src_shape_units is not None:
if not isinstance(src_shape_units, (list, tuple)):
raise TypeError('Source shape units must be provided as a list or tuple')
if len(src_shape_units) != 3:
raise ValueError('Source shape units must contain three elements.')
if src_shape_units[0] == 'arcsec':
self.src_shape[:,0] = self.src_shape[:,0]/3.6e3
elif src_shape_units[0] == 'arcmin':
self.src_shape[:,0] = self.src_shape[:,0]/60.0
elif src_shape_units[0] == 'radian':
self.src_shape[:,0] = NP.degrees(self.src_shape[:,0])
elif src_shape_units[0] != 'degree':
raise ValueError('major axis FWHM must be specified as "arcsec", "arcmin", "degree" or "radian"')
if src_shape_units[1] == 'arcsec':
self.src_shape[:,1] = self.src_shape[:,1]/3.6e3
elif src_shape_units[1] == 'arcmin':
self.src_shape[:,1] = self.src_shape[:,1]/60.0
elif src_shape_units[1] == 'radian':
self.src_shape[:,1] = NP.degrees(self.src_shape[:,1])
elif src_shape_units[0] != 'degree':
raise ValueError('minor axis FWHM must be specified as "arcsec", "arcmin", "degree" or "radian"')
if src_shape_units[2] == 'radian':
self.src_shape[:,2] = NP.degrees(self.src_shape[:,2])
elif src_shape_units[2] != 'degree':
raise ValueError('position angle must be specified as "degree" or "radian" measured from north towards east.')
if src_shape.shape[0] != self.location.shape[0]:
raise ValueError('Number of source shapes in src_shape must match the number of object lcoations')
#############################################################################
def match(self, other, matchrad=None, nnearest=0, maxmatches=-1):
"""
-------------------------------------------------------------------------
Match the source positions in an instance of class SkyModel with
another instance of the same class to a specified angular radius using
spherematch() in the geometry module
Inputs:
other [2-column numpy array instance of class SkyModel] Numpy
array with two columns specifying the source positions in
the other sky model or the other instance of class
SkyModel with which the current instance is to be
matched with
matchrad [scalar] Angular radius (in degrees) inside which matching
should occur. If not specified, if maxmatches is positive,
all the nearest maxmatches neighbours are found, and if
maxmatches is not positive, the nnearest-th nearest
neighbour specified by nnearest is found.
maxmatches [scalar] The maximum number of matches (all of the
maxmatches nearest neighbours) that lie within matchrad are
found. If matchrad is not specified, all the maxmatches
nearest neighbours are found. If maxmatches < 0, and matchrad
is not set, then the nnearest-th nearest neighbour is found
(which defaults to the nearest neighbour if nnearest <= 0)
nnearest [scalar] nnearest-th neighbour to be found. Used only when
maxmatches is not positive. If matchrad is set, the
specified neighbour is identified if found inside matchrad,
otherwise the nnearest-th neighbour is identified regardless
of the angular distance.
Outputs:
m1 [list] List of indices of matches in the current instance of
class SkyModel
m2 [list] List of indices of matches in the other instance of
class SkyModel
d12 [list] List of angular distances between the matched subsets
of the two sky models indexed by m1 and m2 respectively
-------------------------------------------------------------------------
"""
if not isinstance(other, (NP.ndarray, SkyModel)):
raise TypeError('"other" must be a Nx2 numpy array or an instance of class SkyModel.')
if isinstance(other, SkyModel):
if (self.epoch == other.epoch) and (self.coords == other.coords):
return GEOM.spherematch(self.location[:,0], self.location[:,1],
other.location[:,0],
other.location[:,1], matchrad,
nnearest, maxmatches)
else:
raise ValueError('epoch and/or sky coordinate type mismatch. Cannot match.')
else:
return GEOM.spherematch(self.location[:,0], self.location[:,1],
other[:,0], other[:,1], matchrad,
nnearest, maxmatches)
#############################################################################
def subset(self, outloc, axis='position', interp_method='pchip'):
"""
-------------------------------------------------------------------------
Provide a subset of the sky model using a list of output locations
onto the existing sky model. Subset can be either in position or
frequency channels
Inputs:
outloc [list or numpy array] Flattened list or numpy array of
output locations of sources (integer indices) or the spectral
frequencies (in frequency units) in the current instance of
class SkyModel. The output locations correspond to the axis
specified in input axis
axis [string] the axis to take the subset along. Currently
accepted values are 'position' (default) and 'spectrum'
which indicates the output locations are to be used along the
position or spectrum axis respectively to obtain the subset.
When spectral axis is specified by spec_type='func', then
there will be no slicing along the spectral axis and will
return the original instance.
interp_method
[string] Specifies the type of spectral interpolation if the
axis is set to 'spectrum'. Accepted values are 'nearest'
(nearest neighbor), 'linear', 'cubic' or 'pchip' (default)
Output: [instance of class SkyModel] An instance of class
SkyModel holding a subset of the sources in the current
instance of class SkyModel
-------------------------------------------------------------------------
"""
try:
outloc
except NameError:
return self
if axis not in ['position', 'spectrum']:
raise ValueError('input axis must be along position or spectrum')
if (outloc is None) or (len(outloc) == 0):
return self
else:
init_parms = {}
outloc = NP.asarray(outloc).ravel()
if axis == 'position':
if not outloc.dtype != NP.int:
raise TypeError('Output locations for position axis must be integer indices')
init_parms = {'name': NP.take(self.name, outloc), 'frequency': self.frequency, 'location': NP.take(self.location, outloc, axis=0), 'spec_type': self.spec_type, 'epoch': self.epoch, 'coords': self.coords}
if self.spec_type == 'spectrum':
if self.spectrum is not None:
init_parms['spectrum'] = NP.take(self.spectrum, outloc, axis=0)
elif self.spec_extfile is not None:
init_parms['spectrum'] = retrieve_external_spectrum(self.spec_extfile, ind=outloc)
else:
raise AttributeError('Neither attribute "spectrum" nor "spec_extfile" found in the instance')
if self.src_shape is not None:
init_parms['src_shape'] = NP.take(self.src_shape, outloc, axis=0)
else:
spec_parms = {}
spec_parms['name'] = NP.take(self.spec_parms['name'], outloc)
spec_parms['power-law-index'] = NP.take(self.spec_parms['power-law-index'], outloc)
spec_parms['freq-ref'] = NP.take(self.spec_parms['freq-ref'], outloc)
spec_parms['flux-scale'] = NP.take(self.spec_parms['flux-scale'], outloc)
spec_parms['flux-offset'] = NP.take(self.spec_parms['flux-offset'], outloc)
spec_parms['z-width'] = NP.take(self.spec_parms['z-width'], outloc)
init_parms['spec_parms'] = spec_parms
if self.src_shape is not None:
init_parms['src_shape'] = NP.take(self.src_shape, outloc, axis=0)
else:
init_parms = {'name': self.name, 'frequency': outloc, 'location': self.location, 'spec_type': self.spec_type, 'epoch': self.epoch, 'coords': self.coords}
if self.src_shape is not None:
init_parms['src_shape'] = self.src_shape
if self.spec_type == 'func':
init_parms['spec_parms'] = self.spec_parms
else:
init_parms['spectrum'] = self.generate_spectrum(ind=None, frequency=outloc, interp_method=interp_method)
return SkyModel(init_parms=init_parms, init_file=None)
#############################################################################
def generate_spectrum(self, ind=None, frequency=None, interp_method='pchip'):
"""
-------------------------------------------------------------------------
Generate and return a spectrum from functional spectral parameters
Inputs:
ind [scalar, list or numpy array] Indices to select objects in
the catalog or sky model. If set to None (default), all
objects will be selected.
frequency [scalar or numpy array] Frequencies at which the spectrum at
all object locations is to be created. Must be in same units
as the attribute frequency and values under key 'freq-ref'
of attribute spec_parms. If not provided (default=None), a
spectrum is generated for all the frequencies specified in
the attribute frequency and values under keys 'freq-ref' and
'z-width' of attribute spec_parms.
interp_method
[string] Specified kind of interpolation to be used if
self.spec_type is set to 'spectrum'. Accepted values are
described in docstring of scipy.interpolate.interp1d()
['nearest', 'linear', 'cubic'] or
scipy.interpolate.PchipInterpolator() ['pchip' (default)]
or as a power law index specified by 'power-law'
Outputs:
spectrum [numpy array] Spectrum of the sky model at the specified
sky locations. Has shape nobj x nfreq.
Power law calculation uses the convention,
spectrum = flux_offset + flux_scale * (freq/freq0)**spindex
Monotone places a delta function at the frequency channel closest to the
reference frequency if it lies inside the frequency range, otherwise
a zero spectrum is assigned.
Thus spectrum = flux_scale * delta(freq-freq0)
Random (currently only gaussian) places random fluxes in the spectrum
spectrum = flux_offset + flux_scale * random_normal(freq.size)
tanh spectrum is defined as (Bowman & Rogers 2010):
spectrum = flux_scale * sqrt((1+z)/10) * 0.5 * [tanh((z-zr)/dz) + 1]
where, flux_scale is typically 0.027 K, zr = reionization redshift
when x_i = 0.5, and dz = redshift width (dz ~ 1)
If the attribute spec_type is 'spectrum' the attribute spectrum is
returned on the selected indices and requested spectral interpolation
method
-------------------------------------------------------------------------
"""
if ind is None:
ind = NP.arange(self.location.shape[0], dtype=NP.int)
elif not isinstance(ind, (int,list,NP.ndarray)):
raise TypeError('Input ind must be an integer, list or numpy array')
else:
ind = NP.asarray(ind).astype(NP.int)
if NP.any(NP.logical_or(ind < 0, ind >= self.location.shape[0])):
raise IndexError('Out of bound indices found in input ind')
if frequency is not None:
if isinstance(frequency, (int,float,NP.ndarray)):
frequency = NP.asarray(frequency).ravel()
else:
raise ValueError('Input parameter frequency must be a scalar or a numpy array')
if NP.any(frequency <= 0.0):
raise ValueError('Input parameter frequency must contain positive values')
else:
frequency = NP.copy(self.frequency)
if self.spec_type == 'func':
spectrum = NP.empty((ind.size, frequency.size))
spectrum.fill(NP.nan)
uniq_names, invind = NP.unique(self.spec_parms['name'][ind], return_inverse=True)
if len(uniq_names) > 1:
counts, edges, bnum, ri = OPS.binned_statistic(invind, statistic='count', bins=range(len(uniq_names)))
else:
counts = len(invind)
ri = range(counts)
for i, name in enumerate(uniq_names):
if len(uniq_names) > 1:
ind_to_be_filled = NP.asarray(ri[ri[i]:ri[i+1]])
else:
ind_to_be_filled = NP.asarray(ri)
ind_to_be_used = ind[ind_to_be_filled]
if name == 'random':
spectrum[ind_to_be_filled,:] = self.spec_parms['flux-offset'][ind_to_be_used].reshape(-1,1) + self.spec_parms['flux-scale'][ind_to_be_used].reshape(-1,1) * NP.random.randn(counts[i], frequency.size)
if name == 'monotone': # Needs serious testing
spectrum[ind_to_be_filled,:] = 0.0
inpind, refind, dNN = LKP.find_1NN(frequency, self.spec_parms['freq-ref'][ind_to_be_used], distance=frequency[1]-frequency[0], remove_oob=True)
ind_match = ind_to_be_used[inpind]
ind2d = zip(ind_match, refind)
spectrum[zip(*ind2d)] = self.spec_parms['flux-scale'][ind_match]
if name == 'power-law':
spectrum[ind_to_be_filled,:] = self.spec_parms['flux-offset'][ind_to_be_used].reshape(-1,1) + self.spec_parms['flux-scale'][ind_to_be_used].reshape(-1,1) * (frequency.reshape(1,-1)/self.spec_parms['freq-ref'][ind_to_be_used].reshape(-1,1))**self.spec_parms['power-law-index'][ind_to_be_used].reshape(-1,1)
if name == 'tanh':
z = CNST.rest_freq_HI/frequency.reshape(1,-1) - 1
zr = CNST.rest_freq_HI/self.spec_parms['freq-ref'][ind_to_be_used].reshape(-1,1) - 1
dz = self.spec_parms['z-width'][ind_to_be_used].reshape(-1,1)
amp = self.spec_parms['flux-scale'][ind_to_be_used].reshape(-1,1) * NP.sqrt((1+z)/10)
xh = 0.5 * (NP.tanh((z-zr)/dz) + 1)
spectrum[ind_to_be_filled,:] = amp * xh
return spectrum
else:
if not isinstance(interp_method, str):
raise TypeError('Input interp_method must be a string')
if NP.any(NP.logical_or(frequency < self.frequency.min(), frequency > self.frequency.max())):
raise ValueError('Frequencies requested in output lie out of range of sky model frequencies and hence cannot be interpolated')
if self.spectrum is not None:
spectrum = NP.take(self.spectrum, ind, axis=0)
elif self.spec_extfile is not None:
spectrum = retrieve_external_spectrum(self.spec_extfile, ind=ind)
else:
raise AttributeError('Neither attribute "spectrum" nor "spec_extfile" found in the instance')
if self.frequency.size == frequency.size:
if NP.all(NP.abs(self.frequency - frequency) < 1e-10):
return spectrum
if interp_method.lower() == 'power-law':
spindex = FG.power_law_spectral_index(self.frequency.ravel(), spectrum)
return spectrum[:,int(self.frequency.size/2)].reshape(-1,1) * (frequency.ravel()/self.frequency.ravel()[int(self.frequency.size/2)]).reshape(1,-1)**spindex.reshape(-1,1)
else:
if interp_method.lower() in ['nearest', 'linear', 'cubic']:
interp_func = interp1d(self.frequency.ravel(), spectrum, axis=1, kind=interp_method)
elif interp_method.lower() == 'pchip':
interp_func = PchipInterpolator(self.frequency.ravel(), spectrum, axis=1)
return interp_func(frequency)
#############################################################################
def load_external_spectrum(self):
"""
-------------------------------------------------------------------------
Load full spectrum from external file
-------------------------------------------------------------------------
"""
if self.spec_type == 'spectrum':
if self.spectrum is not None:
if self.spec_extfile is not None:
raise ValueError('Both attributes spectrum and spec_extfile are set. This will overwrite the existing values in attribute spectrum')
else:
warnings.warn('Attribute spec_extfile is not set. Continuing without any action.')
elif self.spec_extfile is not None:
self.spectrum = retrieve_external_spectrum(self.spec_extfile, ind=None)
if self.spectrum.shape != (self.location.shape[0], self.frequency.size):
raise ValueError('Dimensions of external spectrum incompatible with expected number of sources and spectral channels')
self.spec_extfile = None
else:
raise AttributeError('Neither attribute spectrum not spec_extfile is set')
else:
warnings.warn('Attribute spec_type is not spectrum. Continuing without any action.')
#############################################################################
def to_healpix(self, freq, nside, in_units='Jy', out_coords='equatorial',
out_units='K', outfile=None, outfmt='fits'):
"""
-------------------------------------------------------------------------
Convert catalog to a healpix format of given nside at specified
frequencies.
Inputs:
freq [scalar or numpy array] Frequencies at which HEALPIX output
maps are to be generated
nside [integer] HEALPIX nside parameter for the output map(s)
in_units [string] Units of input map or catalog. Accepted values are
'K' for Temperature of 'Jy' for flux density. Default='Jy'
out_coords [string] Output coordinate system. Accepted values are
'galactic' and 'equatorial' (default)
out_units [string] Units of output map. Accepted values are
'K' for Temperature of 'Jy' for flux density. Default='K'
outfile [string] Filename with full path to save the output HEALPIX
map(s) to. Default=None
outfmt [string] File format for output file. Accepted values are
'fits' (default) and 'ascii'
Output(s):
A dictionary with the following keys and values:
'filename' Pull path to the output file. Set only if input parameter
outfile is set. Default=None.
'spectrum' A numpy array of size nsrc x nchan where nsrc is the number
sky locations depending on input parameter out_nside and
nchan is the number of frequencies in input parameter freq
-------------------------------------------------------------------------
"""
try:
freq
except NameError:
freq = self.frequency.ravel()[self.frequency.size/2]
else:
if not isinstance(freq, (int,float,list,NP.ndarray)):
raise TypeError('Input parameter freq must be a scalar or numpy array')
else:
freq = NP.asarray(freq).reshape(-1)
try:
nside
except NameError:
raise NameError('Input parameter nside not specified')
else:
if not isinstance(nside, int):
raise TypeError('Input parameter nside must be an integer')
if not isinstance(out_coords, str):
raise TypeError('Input parameter out_coords must be a string')
elif out_coords not in ['equatorial', 'galactic']:
raise ValueError('Input parameter out_coords must be set to "equatorial" or "galactic"')
if not isinstance(in_units, str):
raise TypeError('in_units must be a string')
elif in_units not in ['K', 'Jy']:
raise ValueError('in_units must be "K" or "Jy"')
if not isinstance(out_units, str):
raise TypeError('out_units must be a string')
elif out_units not in ['K', 'Jy']:
raise ValueError('out_units must be "K" or "Jy"')
if outfile is not None:
if not isinstance(outfile, str):
raise TypeError('outfile must be a string')
if not isinstance(outfmt, str):
raise TypeError('outfile format must be specified by a string')
elif outfmt not in ['ascii', 'fits']:
raise ValueError('outfile format must be "ascii" or "fits"')
ec = SkyCoord(ra=self.location[:,0], dec=self.location[:,1], unit='deg', frame='icrs')
gc = ec.transform_to('galactic')
if out_coords == 'galactic':
phi = gc.l.radian
theta = NP.pi/2 - gc.b.radian
else:
phi = ec.ra.radian
theta = NP.pi/2 - ec.dec.radian
outmap = NP.zeros((HP.nside2npix(nside), freq.size))
pixarea = HP.nside2pixarea(nside)
pix = HP.ang2pix(nside, theta, phi)
spectrum = self.generate_spectrum(frequency=freq)
if in_units != out_units:
if out_units == 'K':
spectrum = (FCNST.c / freq.reshape(1,-1))**2 / (2*FCNST.k*pixarea) * spectrum * CNST.Jy # Convert into temperature
else:
spectrum = (freq.reshape(1,-1) / FCNST.c)**2 * (2*FCNST.k*pixarea) * spectrum / CNST.Jy # Convert into Jy
uniq_pix, uniq_pix_ind, pix_invind = NP.unique(pix, return_index=True, return_inverse=True)
counts, binedges, binnums, ri = OPS.binned_statistic(pix_invind, statistic='count', bins=NP.arange(uniq_pix.size+1))
ind_count_gt1, = NP.where(counts > 1)
ind_count_eq1, = NP.where(counts == 1)
upix_1count = []
spec_ind = []
for i in ind_count_eq1:
ind = ri[ri[i]:ri[i+1]]
upix_1count += [uniq_pix[i]]
spec_ind += [ind]
upix_1count = NP.asarray(upix_1count)
spec_ind = NP.asarray(spec_ind).ravel()
outmap[upix_1count,:] = spectrum[spec_ind,:]
for i in ind_count_gt1:
upix = uniq_pix[i]
ind = ri[ri[i]:ri[i+1]]
outmap[upix,:] = NP.sum(spectrum[ind,:], axis=0)
# Save the healpix spectrum to file
if outfile is not None:
if outfmt == 'fits':
hdulist = []
hdulist += [fits.PrimaryHDU()]
hdulist[0].header['EXTNAME'] = 'PRIMARY'
hdulist[0].header['NSIDE'] = nside
hdulist[0].header['UNTIS'] = out_units
hdulist[0].header['NFREQ'] = freq.size
for chan, f in enumerate(freq):
hdulist += [fits.ImageHDU(outmap[:,chan], name='{0:.1f} MHz'.format(f/1e6))]
hdu = fits.HDUList(hdulist)
hdu.writeto(outfile+'.fits', clobber=True)
return {'hlpxfile': outfile+outfmt, 'hlpxspec': outmap}
else:
out_dict = {}
colnames = []
colfrmts = {}
for chan, f in enumerate(freq):
out_dict['{0:.1f}_MHz'.format(f/1e6)] = outmap[:,chan]
colnames += ['{0:.1f}_MHz'.format(f/1e6)]
colfrmts['{0:.1f}_MHz'.format(f/1e6)] = '%0.5f'
tbdata = Table(out_dict, names=colnames)
ascii.write(tbdata, output=outfile+'.txt', format='fixed_width_two_line', formats=colfrmts, bookend=False, delimiter='|', delimiter_pad=' ')
return {'filename': outfile+outfmt, 'spectrum': outmap}
else:
return {'filename': outfile, 'spectrum': outmap}
############################################################################
def save(self, outfile, fileformat='hdf5', extspec_action=None):
"""
-------------------------------------------------------------------------
Save sky model to the specified output file
Inputs:
outfile [string] Output filename including full path omitting the
extension which will be appended automatically
fileformat [string] format for the output. Accepted values are 'ascii'
and 'hdf5' (default).
extspec_action
[string] Specifies if full spectrum in attribute spectrum
is unloaded on to the external file and set the spectrum
attribute to None while simultaneously setting spec_extfile
attribute to outfile. If this input is set to None
(default), then the attribute spectrum is not unloaded and
data is carried in the instance in the attribute. This only
applies if attribute 'spectrum' is present and not None.
-------------------------------------------------------------------------
"""
try:
outfile
except NameError:
raise NameError('outfile not specified')
if fileformat not in ['hdf5', 'ascii']:
raise ValueError('Output fileformat must be set to "hdf5" or "ascii"')
if fileformat == 'hdf5':
outfile = outfile + '.hdf5'
with h5py.File(outfile, 'w') as fileobj:
hdr_group = fileobj.create_group('header')
hdr_group['AstroUtils#'] = astroutils.__githash__
hdr_group['spec_type'] = self.spec_type
hdr_group['is_healpix'] = int(self.is_healpix)
hdr_group['healpix_ordering'] = self.healpix_ordering
object_group = fileobj.create_group('object')
object_group.attrs['epoch'] = self.epoch
object_group.attrs['coords'] = self.coords
name_dset = object_group.create_dataset('name', (self.name.size,), maxshape=(None,), data=self.name, compression='gzip', compression_opts=9)
if self.coords == 'radec':
ra_dset = object_group.create_dataset('RA', (self.location.shape[0],), maxshape=(None,), data=self.location[:,0], compression='gzip', compression_opts=9)
ra_dset.attrs['units'] = 'degrees'
dec_dset = object_group.create_dataset('Dec', (self.location.shape[0],), maxshape=(None,), data=self.location[:,1], compression='gzip', compression_opts=9)
dec_dset.attrs['units'] = 'degrees'
elif self.coords == 'altaz':
alt_dset = object_group.create_dataset('Alt', (self.location.shape[0],), maxshape=(None,), data=self.location[:,0], compression='gzip', compression_opts=9)
alt_dset.attrs['units'] = 'degrees'
az_dset = object_group.create_dataset('Az', (self.location.shape[0],), maxshape=(None,), data=self.location[:,1], compression='gzip', compression_opts=9)
az_dset.attrs['units'] = 'degrees'
else:
raise ValueError('This coordinate system is not currently supported')
if self.src_shape is not None:
src_shape_dset = object_group.create_dataset('shape', self.src_shape.shape, maxshape=(None,self.src_shape.shape[1]), data=self.src_shape, compression='gzip', compression_opts=9)
src_shape_dset.attrs['units'] = 'degrees'
spec_group = fileobj.create_group('spectral_info')
freq_range_dset = spec_group.create_dataset('freq', (self.frequency.size,), maxshape=(None,), data=self.frequency.ravel(), compression='gzip', compression_opts=9)
freq_range_dset.attrs['units'] = 'Hz'
if self.spec_type == 'func':
func_name_dset = spec_group.create_dataset('func-name', self.spec_parms['name'].shape, maxshape=(None,), data=self.spec_parms['name'])
freq_ref_dset = spec_group.create_dataset('freq-ref', self.spec_parms['freq-ref'].shape, maxshape=(None,), data=self.spec_parms['freq-ref'], compression='gzip', compression_opts=9)
freq_ref_dset.attrs['units'] = 'Hz'
flux_dset = spec_group.create_dataset('flux_density', self.spec_parms['flux-scale'].shape, maxshape=(None,), data=self.spec_parms['flux-scale'], compression='gzip', compression_opts=9)
flux_dset.attrs['units'] = 'Jy'
if 'flux-offset' in self.spec_parms:
fluxoffset_dset = spec_group.create_dataset('flux_offset', self.spec_parms['flux-offset'].shape, maxshape=(None,), data=self.spec_parms['flux-offset'], compression='gzip', compression_opts=9)
fluxoffset_dset.attrs['units'] = 'Jy'
if NP.all(self.spec_parms['name'] == 'power-law'):
spindex_dset = spec_group.create_dataset('spindex', self.spec_parms['power-law-index'].shape, maxshape=(None,), data=self.spec_parms['power-law-index'], compression='gzip', compression_opts=9)
else:
if self.spectrum is not None:
spectrum_dset = spec_group.create_dataset('spectrum', data=self.spectrum, chunks=(1,self.frequency.size), compression='gzip', compression_opts=9)
if extspec_action is not None:
if not isinstance(extspec_action, str):
raise TypeError('Input extspec_action must be a string')
if extspec_action.lower() not in ['unload']:
raise ValueError('Value specified for input extspec_action invalid')
if extspec_action.lower() == 'unload':
self.spectrum = None
elif self.spec_extfile is None:
raise AttributeError('Neither attribute "spectrum" nor "spec_extfile" found in the instance')
self.spec_extfile = outfile
spec_group['spec_extfile'] = outfile
else:
outfile = outfile + '.txt'
frmts = {}
frmts['ID'] = '%s19'
frmts['RA (deg)'] = '%10.6f'
frmts['DEC (deg)'] = '%+10.6f'
frmts['S (Jy)'] = '%8.3f'
frmts['FREQ (MHz)'] = '%12.7f'
data_dict = {}
if self.coords == 'radec':
if self.epoch == 'B1950':
data_dict['ID'] = NP.char.replace('B'+NP.char.array(Angle(self.location[:,0],unit=units.degree).to_string(unit=units.hour,sep=':',alwayssign=False,pad=True,precision=2))+NP.char.array(Angle(self.location[:,1],unit=units.degree).to_string(unit=units.degree,sep=':',alwayssign=True,pad=True,precision=1)), ':', '')
else:
data_dict['ID'] = NP.char.replace('J'+NP.char.array(Angle(self.location[:,0],unit=units.degree).to_string(unit=units.hour,sep=':',alwayssign=False,pad=True,precision=2))+NP.char.array(Angle(self.location[:,1],unit=units.degree).to_string(unit=units.degree,sep=':',alwayssign=True,pad=True,precision=1)), ':', '')
data_dict['RA (deg)'] = self.location[:,0]
data_dict['DEC (deg)'] = self.location[:,1]
if self.spec_type == 'func':
data_dict['S (Jy)'] = self.spec_parms['flux-scale']
data_dict['FREQ (MHz)'] = self.spec_parms['freq-ref']/1e6 # in MHz
if NP.all(self.spec_parms['name'] == 'power-law'):
data_dict['SPINDEX'] = self.spec_parms['power-law-index']
frmts['SPINDEX'] = '%0.2f'
field_names = ['ID', 'RA (deg)', 'DEC (deg)', 'S (Jy)', 'FREQ (MHz)', 'SPINDEX']
else:
field_names = ['ID', 'RA (deg)', 'DEC (deg)', 'S (Jy)', 'FREQ (MHz)']
else:
data_dict['FREQ (MHz)'] = self.frequency.flatten()[self.frequency.size/2] / 1e6 + NP.zeros(self.location.shape[0]) # MHz
field_names = ['ID', 'RA (deg)', 'DEC (deg)', 'FREQ (MHz)']
tbdata = Table(data_dict, names=field_names)
ascii.write(tbdata, output=outfile, format='fixed_width_two_line', formats=frmts, delimiter=' ', delimiter_pad=' ', bookend=False)
################################################################################
def diffuse_radio_sky_model(outfreqs, gsmversion='gsm2008', nside=512, ind=None, outfile=None, parallel=False):
"""
---------------------------------------------------------------------------
Generate a Global Sky Model (GSM) using PyGSM package, GSM2008,
(Oliveira-Costa et. al.,2008) for frequencies between 10 MHz to 5 THz
Inputs:
outfreqs [list or numpy array] array containing the list of output
frequencies in MHz for which GSMs are generated.
Must be specified, no default.
gsmversion [string] string specifying the verion of PyGSM to use which
can be either GSM2008 or GSM2016. Default = 'gsm2008'
nside [scalar] positive integer specifying the required number of
pixels per side (according to HEALPIX format) for the GSM.
If not specified, the output GSM will have NSIDE = 512.
ind [numpy array] array containing the list of sky pixel indices.
If set to None (default), all indices are returned otherwise
indices denoting specific locations are returned
outfile [string] Full path to filename (without '.hdf5' extension
which will be appended automatically) to which the sky model
will be written to. It will be in a format compatible for
initializing an instance of class SkyModel. If set to None,
no output file is written
parallel [boolean] If set to False (default), the healpix smoothing
and up/down grading will be performed in series, and if set
to True these steps will be parallelized across frequency
axis
Outputs:
skymod [instance of class SkyModel] Instance of class SkyModel. If
outfile was provided in the inputs, it will contain the
spectrum in attribute spectrum. If outfile was provided, the
spectrum would have been written to external file and unloaded
from the spectrum attribute
---------------------------------------------------------------------------
"""
if not pygsm_found:
raise ImportError('Module PyGSM not found. Cannot proceed.')
try:
outfreqs
except NameError:
raise NameError('outfreqs must be specified')
if outfreqs is None:
raise ValueError('outfreqs cannot be NoneType')
if not isinstance(outfreqs, (list, NP.ndarray)):
raise TypeError('outfreqs must be a list or a numpy array')
outfreqs = NP.asarray(outfreqs).reshape(-1)
if NP.any(NP.logical_or(outfreqs < 10e6, outfreqs > 5e12)):
raise ValueError('outfreqs must lie in the range [10MHz, 5THz]')
if not isinstance(nside, int):
raise TypeError('nside must be an integer')
if not HP.isnsideok(nside):
raise ValueError('nside must be valid')
if ind is not None:
if not isinstance(ind, NP.ndarray):
raise TypeError('ind must be a numpy array')
if outfile is not None:
if not isinstance(outfile, str):
raise TypeError('outfile must be a string')
if gsmversion == 'gsm2008':
gsm = GlobalSkyModel(freq_unit='MHz', basemap='haslam', interpolation='pchip') # in Kelvin
elif gsmversion == 'gsm2016':
gsm = GlobalSkyModel2016(freq_unit='MHz', unit='TRJ', resolution='hi', theta_rot=0, phi_rot=0) # in Kelvin
map_cube = gsm.generate(outfreqs/1e6)
if HP.npix2nside(map_cube.shape[1]) > nside:
fwhm = HP.nside2resol(nside)
if not isinstance(parallel, bool):
parallel = False
print '\tComputing diffuse radio sky model...'
if parallel:
nproc = outfreqs.size
list_split_maps = NP.array_split(map_cube, nproc, axis=0)
list_fwhm = [fwhm] * nproc
list_nside = [nside] * nproc
list_ordering = ['RING'] * nproc
list_verbose = [False] * nproc
pool = MP.Pool(processes=nproc)
list_outmaps = pool.map(healpix_smooth_and_udgrade_arg_splitter, IT.izip(list_split_maps, list_fwhm, list_nside, list_ordering, list_verbose))
outmaps = NP.asarray(list_outmaps)
else:
outmaps = None
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} channels '.format(outfreqs.size), PGB.ETA()], maxval=outfreqs.size).start()
for freqi, freq in enumerate(outfreqs):
smooth_map = HP.smoothing(map_cube[freqi,:], fwhm=fwhm, verbose=False)
outmap = HP.ud_grade(smooth_map, nside, order_in='RING')
if outmaps is None:
outmaps = outmap.reshape(1,-1)
else:
outmaps = NP.concatenate((outmaps, outmap.reshape(1,-1)))
progress.update(freqi+1)
progress.finish()
print '\tCompleted estimating diffuse radio sky model.'
elif HP.npix2nside(map_cube.shape[1]) < nside:
outmaps = None
print '\tComputing diffuse radio sky model...'
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} channels'.format(outfreqs.size), PGB.ETA()], maxval=outfreqs.size).start()
for freqi, freq in enumerate(outfreqs):
outmap = HP.ud_grade(map_cube[freqi,:], nside, order_in='RING')
if outmaps is None:
outmaps = outmap.reshape(1,-1)
else:
outmaps = NP.concatenate((outmaps, outmap.reshape(1,-1)), axis=0)
progress.update(freqi+1)
progress.finish()
print '\tCompleted estimating diffuse radio sky model.'
else:
outmaps = map_cube
outmaps = outmaps.T
pixarea = HP.nside2pixarea(nside) # Steradians
outmaps = outmaps * (2.0 * FCNST.k * outfreqs.reshape(1,-1)**2 / FCNST.c**2) * pixarea / CNST.Jy # in Jy
theta, phi = HP.pix2ang(nside, NP.arange(outmaps.shape[0]), nest=False)
gc = SkyCoord(l=NP.degrees(phi)*units.degree, b=(90.0-NP.degrees(theta))*units.degree, frame='galactic')
radec = gc.fk5
ra = radec.ra.degree
dec = radec.dec.degree
if ind is not None:
if NP.any(NP.logical_or(ind < 0, ind >= HP.nside2npix(nside))):
raise IndexError('Specified indices outside allowed range')
outmaps = outmaps[ind,:]
npix = outmaps.shape[0]
is_healpix = HP.isnpixok(npix)
if is_healpix:
healpix_ordering = 'ring'
else:
healpix_ordering = 'na'
flux_unit = 'Jy'
catlabel = NP.asarray([gsmversion]*npix)
majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(npix)
minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(npix)
spec_type = 'spectrum'
spec_parms = {}
skymod_init_parms = {'name': catlabel, 'frequency': outfreqs, 'location': NP.hstack((ra.reshape(-1,1), dec.reshape(-1,1))), 'is_healpix': is_healpix, 'healpix_ordering': healpix_ordering, 'spec_type': spec_type, 'spec_parms': spec_parms, 'spectrum': outmaps, 'src_shape': NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(npix).reshape(-1,1))), 'src_shape_units': ['degree','degree','degree']}
skymod = SkyModel(init_parms=skymod_init_parms, init_file=None)
if outfile is not None:
skymod.save(outfile, fileformat='hdf5', extspec_action='unload')
return skymod
################################################################################
|
nithyanandan/general
|
astroutils/catalog.py
|
Python
|
mit
| 88,092
|
[
"Gaussian"
] |
80f18084082b306adf0c376cfa99ad72a16e51b5489f585f52656f765bffc4eb
|
from os.path import dirname, join
from rabix.common.ref_resolver import from_url
from rabix.main import TEMPLATE_JOB, init_context
def test_remap_job():
job = TEMPLATE_JOB
tool = from_url(join(dirname(__file__), 'bwa-mem.json#tool'))
context = init_context(tool)
app = context.from_dict(tool)
input_file = from_url(join(dirname(__file__), 'inputs.json'))
startdir = './'
# dot_update_dict(job['inputs'], get_inputs_from_file(app, input_file, startdir)[
# 'inputs'])
# print(job)
test_remap_job()
|
rabix/rabix
|
rabix/tests/test-mount/test.py
|
Python
|
agpl-3.0
| 539
|
[
"BWA"
] |
e8c1fc79a7315d1481c494aa67d5f30a70060293f05a1b59dc71589d5b6a4fc3
|
#! /usr/bin/env python
# --------------------------------------------------
# Plot temperature at 50 m, using the SGrid class
#
# Bjørn Ådlandsvik, <bjorn@imr.no>
# Institute of Marine Research
# Created: 2010-01-20
# --------------------------------------------------
# ---------
# Imports
# ---------
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from roppy import SGrid
from roppy.mpl_util import landmask
# -----------------
# User settings
# -----------------
roms_file = "data/ocean_avg_example.nc"
var = "temp" # name of variable in NetCDF file
tstep = 3 # 4th time step
depth = 50 # plot depth
# --------------------
# Extract the data
# --------------------
fid = Dataset(roms_file)
grd = SGrid(fid)
# Read the 3D temperature field
F = fid.variables[var][tstep, :, :, :]
long_name = fid.variables[var].long_name
# fid.close()
# ------------------
# Handle the data
# ------------------
# Interpolate temperature to the depth wanted
F = grd.zslice(F, depth)
# Mask away temperatures below bottom
F = np.ma.masked_where(grd.h < abs(depth), F) # numpy masked array
# F[grd.h < depth] = np.nan
# ----------
# Plotting
# ----------
# Make a filled contour plot of the temperature values
plt.contourf(F)
# A slightly nicer colorbar matching the black isolines
plt.colorbar(drawedges=1)
# Draw black contour lines at the same isolevels
plt.contour(F, colors="black")
# Plot the land mask in grey
# Use keyword pcolor='pcolor' for savefig to eps or pdf
landmask(grd)
# landmask(grd, pcolor='pcolor')
# Fix aspect ratio, so that grid cells are squares
plt.axis("image")
# Display the plot
plt.show()
# plt.savefig('a.pdf')
|
bjornaa/roppy
|
examples/plot_temp50.py
|
Python
|
mit
| 1,684
|
[
"NetCDF"
] |
a1b2f021141cee7f92bd8fb231ce4d4d4edab0b2302214de6e108fcfbbd6a7b7
|
# compiler.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`~sqlalchemy.sql.compiler.SQLCompiler` - renders SQL
strings
:class:`~sqlalchemy.sql.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`~sqlalchemy.sql.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:module:`~sqlalchemy.ext.compiler`.
"""
import re
from sqlalchemy import schema, engine, util, exc
from sqlalchemy.sql import operators, functions, util as sql_util, visitors
from sqlalchemy.sql import expression as sql
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in xrange(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat':"%%(%(name)s)s",
'qmark':"?",
'format':"%%s",
'numeric':":%(position)s",
'named':":%(name)s"
}
OPERATORS = {
# binary
operators.and_ : ' AND ',
operators.or_ : ' OR ',
operators.add : ' + ',
operators.mul : ' * ',
operators.sub : ' - ',
# Py2K
operators.div : ' / ',
# end Py2K
operators.mod : ' % ',
operators.truediv : ' / ',
operators.neg : '-',
operators.lt : ' < ',
operators.le : ' <= ',
operators.ne : ' != ',
operators.gt : ' > ',
operators.ge : ' >= ',
operators.eq : ' = ',
operators.concat_op : ' || ',
operators.between_op : ' BETWEEN ',
operators.match_op : ' MATCH ',
operators.in_op : ' IN ',
operators.notin_op : ' NOT IN ',
operators.comma_op : ', ',
operators.from_ : ' FROM ',
operators.as_ : ' AS ',
operators.is_ : ' IS ',
operators.isnot : ' IS NOT ',
operators.collate : ' COLLATE ',
# unary
operators.exists : 'EXISTS ',
operators.distinct_op : 'DISTINCT ',
operators.inv : 'NOT ',
# modifiers
operators.desc_op : ' DESC',
operators.asc_op : ' ASC',
}
FUNCTIONS = {
functions.coalesce : 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user :'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression._Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name):
self.element = col
self.name = name
@property
def quote(self):
return self.element.quote
class SQLCompiler(engine.Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
# class-level defaults which can be set at the instance
# level to define if this Compiled instance represents
# INSERT/UPDATE/DELETE
isdelete = isinsert = isupdate = False
# holds the "returning" collection of columns if
# the statement is CRUD and defines returning columns
# either implicitly or explicitly
returning = None
# set to True classwide to generate RETURNING
# clauses before the VALUES or WHERE clause (i.e. MSSQL)
returning_precedes_values = False
def __init__(self, dialect, statement, column_keys=None, inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
engine.Compiled.__init__(self, dialect, statement, **kwargs)
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to _BindParamClause instances.
self.binds = {}
# a dictionary of _BindParamClause instances to "compiled" names that are
# actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to
# a tuple of local column/label name, ColumnElement object (if any) and TypeEngine.
# ResultProxy uses this for type processing and column targeting
self.result_map = {}
# true if the paramstyle is positional
self.positional = self.dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[self.dialect.paramstyle]
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = self.dialect.identifier_preparer
self.label_length = self.dialect.label_length or self.dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are
# created on the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on dialect.label_length
# or dialect.max_identifier_length
self.truncated_names = {}
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam, name in self.bind_names.iteritems():
for paramname in (bindparam.key, name):
if paramname in params:
pd[name] = params[paramname]
break
else:
if bindparam.required:
if _group_number:
raise exc.InvalidRequestError("A value is required for bind parameter %r, in parameter group %d" % (bindparam.key, _group_number))
else:
raise exc.InvalidRequestError("A value is required for bind parameter %r" % bindparam.key)
elif util.callable(bindparam.value):
pd[name] = bindparam.value()
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if util.callable(bindparam.value):
pd[self.bind_names[bindparam]] = bindparam.value()
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
params = property(construct_params, doc="""
Return the bind params for this compiled object.
""")
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, **kwargs):
return "(" + self.process(grouping.element) + ")"
def visit_label(self, label, result_map=None, within_columns_clause=False):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
if within_columns_clause:
labelname = isinstance(label.name, sql._generated_label) and \
self._truncated_identifier("colident", label.name) or label.name
if result_map is not None:
result_map[labelname.lower()] = \
(label.name, (label, label.element, labelname), label.element.type)
return self.process(label.element) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
else:
return self.process(label.element)
def visit_column(self, column, result_map=None, **kwargs):
name = column.name
if not column.is_literal and isinstance(name, sql._generated_label):
name = self._truncated_identifier("colident", name)
if result_map is not None:
result_map[name.lower()] = (name, (column, ), column.type)
if column.is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name, column.quote)
if column.table is None or not column.table.named_with_column:
return name
else:
if column.table.schema:
schema_prefix = self.preparer.quote_schema(
column.table.schema,
column.table.quote_schema) + '.'
else:
schema_prefix = ''
tablename = column.table.name
tablename = isinstance(tablename, sql._generated_label) and \
self._truncated_identifier("alias", tablename) or tablename
return schema_prefix + \
self.preparer.quote(tablename, column.table.quote) + "." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kwargs):
return self.dialect.type_compiler.process(typeclause.type)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kwargs):
if textclause.typemap is not None:
for colname, type_ in textclause.typemap.iteritems():
self.result_map[colname.lower()] = (colname, None, type_)
def do_bindparam(m):
name = m.group(1)
if name in textclause.bindparams:
return self.process(textclause.bindparams[name])
else:
return self.bindparam_string(name)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(lambda m: m.group(1),
BIND_PARAMS.sub(do_bindparam, self.post_process_text(textclause.text))
)
def visit_null(self, null, **kwargs):
return 'NULL'
def visit_clauselist(self, clauselist, **kwargs):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(s for s in (self.process(c) for c in clauselist.clauses)
if s is not None)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += self.process(clause.value) + " "
for cond, result in clause.whens:
x += "WHEN " + self.process(cond) + " THEN " + self.process(result) + " "
if clause.else_ is not None:
x += "ELSE " + self.process(clause.else_) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(self.process(cast.clause), self.process(cast.typeclause))
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (field, self.process(extract.expr))
def visit_function(self, func, result_map=None, **kwargs):
if result_map is not None:
result_map[func.name.lower()] = (func.name, None, func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(func.packagenames + [name]) % \
{'expr':self.function_argspec(func, **kwargs)}
def function_argspec(self, func, **kwargs):
return self.process(func.clause_expr, **kwargs)
def visit_compound_select(self, cs, asfrom=False, parens=True, **kwargs):
entry = self.stack and self.stack[-1] or {}
self.stack.append({'from':entry.get('from', None), 'iswrapper':True})
text = (" " + cs.keyword + " ").join(
(self.process(c, asfrom=asfrom, parens=False, compound_index=i)
for i, c in enumerate(cs.selects))
)
group_by = self.process(cs._group_by_clause, asfrom=asfrom)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs)
text += (cs._limit is not None or cs._offset is not None) and self.limit_clause(cs) or ""
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
s = self.process(unary.element, **kw)
if unary.operator:
s = OPERATORS[unary.operator] + s
if unary.modifier:
s = s + OPERATORS[unary.modifier]
return s
def visit_binary(self, binary, **kwargs):
return self._operator_dispatch(binary.operator,
binary,
lambda opstr: self.process(binary.left) + opstr + self.process(binary.right),
**kwargs
)
def visit_like_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s LIKE %s' % (self.process(binary.left), self.process(binary.right)) \
+ (escape and ' ESCAPE \'%s\'' % escape or '')
def visit_notlike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (self.process(binary.left), self.process(binary.right)) \
+ (escape and ' ESCAPE \'%s\'' % escape or '')
def visit_ilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (self.process(binary.left), self.process(binary.right)) \
+ (escape and ' ESCAPE \'%s\'' % escape or '')
def visit_notilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (self.process(binary.left), self.process(binary.right)) \
+ (escape and ' ESCAPE \'%s\'' % escape or '')
def _operator_dispatch(self, operator, element, fn, **kw):
if util.callable(operator):
disp = getattr(self, "visit_%s" % operator.__name__, None)
if disp:
return disp(element, **kw)
else:
return fn(OPERATORS[operator])
else:
return fn(" " + operator + " ")
def visit_bindparam(self, bindparam, **kwargs):
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if existing.unique or bindparam.unique:
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" % bindparam.key
)
elif getattr(existing, '_is_crud', False):
raise exc.CompileError(
"Bind parameter name '%s' is reserved "
"for the VALUES or SET clause of this insert/update statement."
% bindparam.key
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
bind_name = isinstance(bind_name, sql._generated_label) and \
self._truncated_identifier("bindparam", bind_name) or bind_name
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name % self.anon_map
if len(anonname) > self.label_length:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + "_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name):
if self.positional:
self.positiontup.append(name)
return self.bindtemplate % {'name':name, 'position':len(self.positiontup)}
else:
return self.bindtemplate % {'name':name}
def visit_alias(self, alias, asfrom=False, **kwargs):
if asfrom:
alias_name = isinstance(alias.name, sql._generated_label) and \
self._truncated_identifier("alias", alias.name) or alias.name
return self.process(alias.original, asfrom=True, **kwargs) + " AS " + \
self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def label_select_column(self, select, column, asfrom):
"""label columns present in a select()."""
if isinstance(column, sql._Label):
return column
if select is not None and select.use_labels and column._label:
return _CompileLabel(column, column._label)
if \
asfrom and \
isinstance(column, sql.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, sql.Select):
return _CompileLabel(column, sql._generated_label(column.name))
elif not isinstance(column,
(sql._UnaryExpression, sql._TextClause, sql._BindParamClause)) \
and (not hasattr(column, 'name') or isinstance(column, sql.Function)):
return _CompileLabel(column, column.anon_label)
else:
return column
def visit_select(self, select, asfrom=False, parens=True,
iswrapper=False, compound_index=1, **kwargs):
entry = self.stack and self.stack[-1] or {}
existingfroms = entry.get('from', None)
froms = select._get_display_froms(existingfroms)
correlate_froms = set(sql._from_objects(*froms))
# TODO: might want to propagate existing froms for select(select(select))
# where innermost select should correlate to outermost
# if existingfroms:
# correlate_froms = correlate_froms.union(existingfroms)
self.stack.append({'from':correlate_froms, 'iswrapper':iswrapper})
if compound_index==1 and not entry or entry.get('iswrapper', False):
column_clause_args = {'result_map':self.result_map}
else:
column_clause_args = {}
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self.process(
self.label_select_column(select, co, asfrom=asfrom),
within_columns_clause=True,
**column_clause_args)
for co in util.unique_list(select.inner_columns)
]
if c is not None
]
text = "SELECT " # we're off to a good start !
if select._prefixes:
text += " ".join(self.process(x) for x in select._prefixes) + " "
text += self.get_select_precolumns(select)
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
text += ', '.join(self.process(f, asfrom=True) for f in froms)
else:
text += self.default_from()
if select._whereclause is not None:
t = self.process(select._whereclause)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = self.process(select._group_by_clause)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = self.process(select._having)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select)
if select._limit is not None or select._offset is not None:
text += self.limit_clause(select)
if select.for_update:
text += self.for_update_clause(select)
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just before
column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select):
order_by = self.process(select._order_by_clause)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select):
if select.for_update:
return " FOR UPDATE"
else:
return ""
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += " \n LIMIT " + str(select._limit)
if select._offset is not None:
if select._limit is None:
text += " \n LIMIT -1"
text += " OFFSET " + str(select._offset)
return text
def visit_table(self, table, asfrom=False, **kwargs):
if asfrom:
if getattr(table, "schema", None):
return self.preparer.quote_schema(table.schema, table.quote_schema) + \
"." + self.preparer.quote(table.name, table.quote)
else:
return self.preparer.quote(table.name, table.quote)
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (self.process(join.left, asfrom=True) + \
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") + \
self.process(join.right, asfrom=True) + " ON " + self.process(join.onclause))
def visit_sequence(self, seq):
return None
def visit_insert(self, insert_stmt):
self.isinsert = True
colparams = self._get_colparams(insert_stmt)
if not colparams and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The version of %s you are using does "
"not support empty inserts." %
self.dialect.name)
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT"
prefixes = [self.process(x) for x in insert_stmt._prefixes]
if prefixes:
text += " " + " ".join(prefixes)
text += " INTO " + preparer.format_table(insert_stmt.table)
if colparams or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in colparams])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if not colparams and supports_default_values:
text += " DEFAULT VALUES"
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in colparams])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
return text
def visit_update(self, update_stmt):
self.stack.append({'from': set([update_stmt.table])})
self.isupdate = True
colparams = self._get_colparams(update_stmt)
text = "UPDATE " + self.preparer.format_table(update_stmt.table)
text += ' SET ' + \
', '.join(
self.preparer.quote(c[0].name, c[0].quote) + '=' + c[1]
for c in colparams
)
if update_stmt._returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(update_stmt, update_stmt._returning)
if update_stmt._whereclause is not None:
text += " WHERE " + self.process(update_stmt._whereclause)
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(update_stmt, update_stmt._returning)
self.stack.pop(-1)
return text
def _create_crud_bind_param(self, col, value, required=False):
bindparam = sql.bindparam(col.key, value, type_=col.type, required=required)
bindparam._is_crud = True
if col.key in self.binds:
raise exc.CompileError(
"Bind parameter name '%s' is reserved "
"for the VALUES or SET clause of this insert/update statement."
% col.key
)
self.binds[col.key] = bindparam
return self.bindparam_string(self._truncate_bindparam(bindparam))
def _get_colparams(self, stmt):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and returning
column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
self.postfetch = []
self.prefetch = []
self.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if self.column_keys is None and stmt.parameters is None:
return [
(c, self._create_crud_bind_param(c, None, required=True))
for c in stmt.table.columns
]
required = object()
# if we have statement parameters - set defaults in the
# compiled params
if self.column_keys is None:
parameters = {}
else:
parameters = dict((sql._column_as_key(key), required)
for key in self.column_keys
if not stmt.parameters or key not in stmt.parameters)
if stmt.parameters is not None:
for k, v in stmt.parameters.iteritems():
parameters.setdefault(sql._column_as_key(k), v)
# create a list of column assignment clauses as tuples
values = []
need_pks = self.isinsert and \
not self.inline and \
not stmt._returning
implicit_returning = need_pks and \
self.dialect.implicit_returning and \
stmt.table.implicit_returning
postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid
# iterating through columns at the top to maintain ordering.
# otherwise we might iterate through individual sets of
# "defaults", "primary key cols", etc.
for c in stmt.table.columns:
if c.key in parameters:
value = parameters[c.key]
if sql._is_literal(value):
value = self._create_crud_bind_param(c, value, required=value is required)
else:
self.postfetch.append(c)
value = self.process(value.self_group())
values.append((c, value))
elif self.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
if c.default is not None:
if c.default.is_sequence:
proc = self.process(c.default)
if proc is not None:
values.append((c, proc))
self.returning.append(c)
elif c.default.is_clause_element:
values.append((c, self.process(c.default.arg.self_group())))
self.returning.append(c)
else:
values.append((c, self._create_crud_bind_param(c, None)))
self.prefetch.append(c)
else:
self.returning.append(c)
else:
if (
c.default is not None and \
(
self.dialect.supports_sequences or
not c.default.is_sequence
)
) or self.dialect.preexecute_autoincrement_sequences:
values.append((c, self._create_crud_bind_param(c, None)))
self.prefetch.append(c)
elif c.default is not None:
if c.default.is_sequence:
proc = self.process(c.default)
if proc is not None:
values.append((c, proc))
if not c.primary_key:
self.postfetch.append(c)
elif c.default.is_clause_element:
values.append((c, self.process(c.default.arg.self_group())))
if not c.primary_key:
# dont add primary key column to postfetch
self.postfetch.append(c)
else:
values.append((c, self._create_crud_bind_param(c, None)))
self.prefetch.append(c)
elif c.server_default is not None:
if not c.primary_key:
self.postfetch.append(c)
elif self.isupdate:
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append((c, self.process(c.onupdate.arg.self_group())))
self.postfetch.append(c)
else:
values.append((c, self._create_crud_bind_param(c, None)))
self.prefetch.append(c)
elif c.server_onupdate is not None:
self.postfetch.append(c)
return values
def visit_delete(self, delete_stmt):
self.stack.append({'from': set([delete_stmt.table])})
self.isdelete = True
text = "DELETE FROM " + self.preparer.format_table(delete_stmt.table)
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
text += " WHERE " + self.process(delete_stmt._whereclause)
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(engine.Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, self.statement)
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return ddl.statement % context
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] + \
table._prefixes + \
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for column in table.columns:
text += separator
separator = ", \n"
text += "\t" + self.get_column_specification(
column,
first_pk=column.primary_key and not first_pk
)
if column.primary_key:
first_pk = True
const = " ".join(self.process(constraint) for constraint in column.constraints)
if const:
text += " " + const
const = self.create_table_constraints(table)
if const:
text += ", \n\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def create_table_constraints(self, table):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
constraints.extend([c for c in table.constraints if c is not table.primary_key])
return ", \n\t".join(p for p in
(self.process(constraint) for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_create_index(self, create):
index = create.element
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (preparer.quote(self._validate_identifier(index.name, True), index.quote),
preparer.format_table(index.table),
', '.join(preparer.quote(c.name, c.quote)
for c in index.columns))
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + \
self.preparer.quote(self._validate_identifier(index.name, False), index.quote)
def visit_add_constraint(self, create):
preparer = self.preparer
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
preparer = self.preparer
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
self.preparer.format_constraint(drop.element),
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def _validate_identifier(self, ident, truncate):
if truncate:
if len(ident) > self.dialect.max_identifier_length:
counter = getattr(self, 'counter', 0)
self.counter = counter + 1
return ident[0:self.dialect.max_identifier_length - 6] + "_" + hex(self.counter)[2:]
else:
return ident
else:
self.dialect.validate_identifier(ident)
return ident
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, basestring):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(column.server_default.arg)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
sqltext = sql_util.expression_as_ddl(constraint.sqltext)
text += "CHECK (%s)" % self.sql_compiler.process(sqltext)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = " CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
preparer.format_constraint(constraint)
remote_table = list(constraint._elements.values())[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name, f.parent.quote)
for f in constraint._elements.values()),
preparer.format_table(remote_table),
', '.join(preparer.quote(f.column.name, f.column.quote)
for f in constraint._elements.values())
)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint)
text += " UNIQUE (%s)" % (', '.join(self.preparer.quote(c.name, c.quote) for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
class GenericTypeCompiler(engine.TypeCompiler):
def visit_CHAR(self, type_):
return "CHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_NCHAR(self, type_):
return "NCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_FLOAT(self, type_):
return "FLOAT"
def visit_NUMERIC(self, type_):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % {'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {'precision': type_.precision, 'scale' : type_.scale}
def visit_DECIMAL(self, type_):
return "DECIMAL"
def visit_INTEGER(self, type_):
return "INTEGER"
def visit_SMALLINT(self, type_):
return "SMALLINT"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_TIMESTAMP(self, type_):
return 'TIMESTAMP'
def visit_DATETIME(self, type_):
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_CLOB(self, type_):
return "CLOB"
def visit_NCLOB(self, type_):
return "NCLOB"
def visit_VARCHAR(self, type_):
return "VARCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_NVARCHAR(self, type_):
return "NVARCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_BLOB(self, type_):
return "BLOB"
def visit_BINARY(self, type_):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
def visit_TEXT(self, type_):
return "TEXT"
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_boolean(self, type_):
return self.visit_BOOLEAN(type_)
def visit_time(self, type_):
return self.visit_TIME(type_)
def visit_datetime(self, type_):
return self.visit_DATETIME(type_)
def visit_date(self, type_):
return self.visit_DATE(type_)
def visit_big_integer(self, type_):
return self.visit_BIGINT(type_)
def visit_small_integer(self, type_):
return self.visit_SMALLINT(type_)
def visit_integer(self, type_):
return self.visit_INTEGER(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_numeric(self, type_):
return self.visit_NUMERIC(type_)
def visit_string(self, type_):
return self.visit_VARCHAR(type_)
def visit_unicode(self, type_):
return self.visit_VARCHAR(type_)
def visit_text(self, type_):
return self.visit_TEXT(type_)
def visit_unicode_text(self, type_):
return self.visit_TEXT(type_)
def visit_enum(self, type_):
return self.visit_VARCHAR(type_)
def visit_null(self, type_):
raise NotImplementedError("Can't generate DDL for the null type")
def visit_type_decorator(self, type_):
return self.process(type_.type_engine(self.dialect))
def visit_user_defined(self, type_):
return type_.get_col_spec()
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to `initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + self._escape_identifier(value) + self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(unicode(value))
or (lc_value != value))
def quote_schema(self, schema, force):
"""Quote a schema.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.quote(schema, force)
def quote(self, ident, force):
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name, sequence.quote)
if not self.omit_schema and use_schema and sequence.schema is not None:
name = self.quote_schema(sequence.schema, sequence.quote) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name, label.quote)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name, alias.quote)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident, savepoint.quote)
def format_constraint(self, constraint):
return self.quote(constraint.name, constraint.quote)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name, table.quote)
if not self.omit_schema and use_schema and getattr(table, "schema", None):
result = self.quote_schema(table.schema, table.quote_schema) + "." + result
return result
def format_column(self, column, use_table=False, name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(column.table, use_schema=False, name=table_name) + "." + self.quote(name, column.quote)
else:
return self.quote(name, column.quote)
else:
# literal textual elements get stuck into ColumnClause alot, which shouldnt get quoted
if use_table:
return self.format_table(column.table, use_schema=False, name=table_name) + "." + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and getattr(table, 'schema', None):
return (self.quote_schema(table.schema, table.quote_schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{ 'initial': initial,
'final': final,
'escaped': escaped_final })
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
|
obeattie/sqlalchemy
|
lib/sqlalchemy/sql/compiler.py
|
Python
|
mit
| 55,170
|
[
"VisIt"
] |
81b91b7b6f11b3a4d3b98f9dfa1a24075ca777f7657206c0d60d118cb2f40da5
|
# encoding: utf-8
r"""Modules contains information about the aux array used in the multi-layer
swe computations."""
import numpy as np
# Define aux array indices
bathy_index = 0
wind_index = 1
h_hat_index = [2,3]
kappa_index = 4
# ==============================================
# = Sets values of h_hat for linearized solver =
# ==============================================
def set_h_hat(state,jump_location,eta_left,eta_right):
"""Set the initial surfaces for Riemann solver use"""
b = state.aux[bathy_index,:]
for (i,x) in enumerate(state.grid.dimensions[0].centers):
if x < jump_location:
if eta_left[1] > b[i]:
state.aux[h_hat_index[0],i] = eta_left[0] - eta_left[1]
state.aux[h_hat_index[1],i] = eta_left[1] - b[i]
else:
state.aux[h_hat_index[0],i] = eta_left[0] - b[i]
state.aux[h_hat_index[1],i] = 0.0
else:
if eta_right[1] > b[i]:
state.aux[h_hat_index[0],i] = eta_right[0] - eta_right[1]
state.aux[h_hat_index[1],i] = eta_right[1] - b[i]
else:
state.aux[h_hat_index[0],i] = eta_right[0] - b[i]
state.aux[h_hat_index[1],i] = 0.0
# ==================
# = Wind functions =
# ==================
def set_no_wind(state):
"""Set wind field to zero"""
state.aux[wind_index,...] = 0.0
def set_oscillatory_wind(state,A=5.0,N=2.0,omega=2.0,t_length=10.0):
"""Assigns an oscillatory wind field to state
:Input:
- *state* (:class:pyclaw.state.State)
- *A* (float)
- *N* (float)
- *omega* (float)
- *t_length* (float)
"""
L = state.grid.upper[0] - state.grid.lower[0]
x = state.grid.dimensions[0].centers
state.aux[wind_index,:] = A * np.sin(np.pi*N*x/L) \
* np.sin(2.0*np.pi*omega/t_length*state.t)
# ========================
# = Bathymetry functions =
# ========================
def set_jump_bathymetry(state,jump_location,depths):
"""
Set bathymetry representing a jump from depths[0] to depths[1] at
jump_location.
This works for 1 and 2 dimensions assuming that the x-dimension is the
first available in the grid object.
"""
x = state.grid.dimensions[0].centers
state.aux[bathy_index,...] = (x < jump_location) * depths[0] + \
(x >= jump_location) * depths[1]
def set_sloped_shelf_bathymetry(state,x0,x1,basin_depth,shelf_depth):
r"""
Set bathymetry to a simple shelf with a slope coming up from the basin
(x1,shelf_depth) *-----------
/
/
/
___________* (x0,basin_depth)
This works for 1 and 2 dimensions assuming that the x-dimension is the
first available in the grid object.
"""
x = state.grid.dimensions[0].centers
slope = (basin_depth - shelf_depth) / (x0 - x1) * (x - x0) + basin_depth
state.aux[bathy_index,...] = (x < x0) * basin_depth
state.aux[bathy_index,...] += (x0 <= x) * (x < x1) * slope
state.aux[bathy_index,...] += (x1 <= x) * shelf_depth
def set_gaussian_bathymetry(state,depth,A,sigma,x0):
r"""Set bathymetry to a gaussian sill"""
x = state.grid.dimensions[0].centers
state.aux[bathy_index,...] = -(depth - A * np.exp(-(x - x0)**2 / sigma**2))
|
mandli/multilayer-examples
|
1d/multilayer/aux.py
|
Python
|
mit
| 3,435
|
[
"Gaussian"
] |
f403006ee43e0d8f8389e8ff07b5c1286a0912a009276d58b82a4c0838162db7
|
from __future__ import print_function
from datetime import datetime
import logging
import re
from nameparser import HumanName
from openelex.base.transform import Transform, registry
from openelex.models import Candidate, Contest, Office, Party, RawResult, Result
from openelex.lib.text import ocd_type_id
from openelex.lib.insertbuffer import BulkInsertBuffer
# Instantiate logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
meta_fields = ['source', 'election_id', 'state',]
contest_fields = meta_fields + ['start_date',
'end_date',
'election_type',
'primary_type',
'result_type',
'special',
]
candidate_fields = meta_fields + ['full_name', 'given_name',
'family_name', 'additional_name']
result_fields = meta_fields + ['reporting_level', 'jurisdiction',
'votes', 'total_votes', 'vote_breakdowns']
STATE = 'WA'
class BaseTransform(Transform):
"""
Base class that encapsulates shared functionality for other Washinton
transforms.
"""
PARTY_MAP = {
# Unaffiliated
'Nonpartisan': 'UN',
'(States No Party Preference)': 'UN',
'(Prefers Non Partisan Party)': 'UN',
'(Prefers Neither Party)': 'UN',
'(Prefers Non-partisan Party)': 'UN',
# Independent
'Independent Candidates': 'I',
'(Prefers Independent Party)': 'I',
'(Prefers ReganIndependent Party)': 'I',
'(Prefers Independent - No Party)': 'I',
'(Prefers Independent Dem. Party)': 'I',
'(Prefers Centrist Party)': 'I',
'(Prefers Independent No Party)': 'I',
'(Prefers Independent Dem Party)': 'I',
'(Prefers Independent-Gop Party)': 'I',
'(Prefers Prog Independent Party)': 'I',
'(Prefers Indep Republican Party)': 'I',
# Republican
'Republican': 'R',
'Republican Party Nominees': 'R',
'(Prefers Republican Party)': 'R',
'(Prefers G.O.P. Party)': 'R',
'(Prefers (G.O.P.) Party)': 'R',
'(Prefers G O P Party)': 'R',
'(Prefers R Party)': 'R',
'(Prefers Cut Taxes G.O.P. Party)': 'R',
'(Prefers Grand Old Party)': 'R',
'(Prefers (R) Problemfixer Party)': 'R',
'(Prefers GOP Party)': 'R',
'(Prefers Conservative Party)': 'R',
'(Prefers GOP Party)': 'R',
'(Prefers Gop Party)': 'R',
'(Prefers (R) Hope&change Party)': 'R',
'(Prefers Republican Party)': 'R',
# Democrat
'Democrat': 'D',
'Democratic Party Nominees': 'D',
'(Prefers Democratic Party)': 'D',
'(Prefers Progressive Dem. Party)': 'D',
'(Prefers Progressive Party)': 'D',
'(Prefers True Democratic Party)': 'D',
'(Prefers Progressive Dem Party)': 'D',
'(Prefers Demo Party)': 'D',
'(Prefers Prolife Democrat Party)': 'D',
'(Prefers F.D.R. Democrat Party)': 'D',
'(Prefers Democracy Indep. Party)': 'D',
'(Prefers Democratic-Repub Party)': 'D',
# Tea Party
'(Prefers Tea Party)': 'TEA',
# Libertarian
'(Prefers Libertarian Party)': 'LIB',
'Libertarian Party Nominees': 'LIB',
# Green
'(Prefers Green Party)': 'GRE',
'Green Party Nominees': 'GRE',
# Constitution
'(Prefers Constitution Party)': 'CON', # What's abbr for this?
'Constitution Party Nominees': 'CON',
# Party of Commons
'(Prefers Party Of Commons Party)': 'COM', # What's abbr for this?
# Socialist
# Not sure which this is
'Socialism & Libertarian Party Nominees': 'SOC',
'Socialist Workers Party Nominees': 'SOC',
'(Prefers Socialist Altern Party)': 'SOC',
# Etc
'(Prefers Reform Party': 'REF',
'(Prefers America\'s Third Party)': 'UK',
'(Prefers Salmon Yoga Party)': 'UK',
'(Prefers Lower Taxes Party)': 'UK',
'(Prefers Bull Moose Party)': 'UK',
'(Prefers Happiness Party)': 'UK',
'(Prefers SeniorSide Party)': 'UK',
'Justice Party Nominees': 'UK',
'(Prefers The 99%% Party)': 'UK',
'(Prefers Employmentwealth Party)': 'UK',
'(Prefers The Human Rights Party)': 'UK',
'(Prefers Neopopulist Party)': 'UK'
}
district_offices = set([
'U.S. Senate',
'U.S. House of Representatives',
'State Senate',
'State House of Representatives',
])
def __init__(self):
super(BaseTransform, self).__init__()
self._office_cache = {}
self._party_cache = {}
self._contest_cache = {}
def get_raw_results(self):
return RawResult.objects.filter(state=STATE).no_cache()
def get_contest_fields(self, raw_result):
fields = self._get_fields(raw_result, contest_fields)
#if not fields['primary_type']:
# del fields['primary_type']
fields['office'] = self._get_office(raw_result)
#quit()
#fields['primary_party'] = self.get_party(raw_result, 'primary')
return fields
def _get_fields(self, raw_result, field_names):
return {k: getattr(raw_result, k) for k in field_names}
def _get_office(self, raw_result):
office_query = {
'state': STATE,
'name': self._clean_office(raw_result.office)
}
if office_query['name'] is 'President':
office_query['state'] = 'US'
if office_query['name'] in self.district_offices:
#if raw_result.district:
office_query['district'] = raw_result.district or ''
key = Office.make_key(**office_query)
try:
return self._office_cache[key]
except KeyError:
try:
office = Office.objects.get(**office_query)
assert key == office.key
self._office_cache[key] = office
return office
except Office.DoesNotExist:
logger.error("\tNo office matching query {}".format(office_query))
raise
def get_party(self, raw_result, attr='party'):
party = getattr(raw_result, attr)
if not party:
return None
clean_abbrev = self._clean_party(party)
if not clean_abbrev:
return None
try:
return self._party_cache[clean_abbrev]
except KeyError:
try:
party = Party.objects.get(abbrev=clean_abbrev)
self._party_cache[clean_abbrev] = party
return party
except Party.DoesNotExist:
logger.error("No party with abbreviation {}".format(clean_abbrev))
raise
def _clean_party(self, party):
try:
return self.PARTY_MAP[party]
except KeyError:
return None
def _clean_office(self, office):
"""
See: https://github.com/openelections/core/blob/dev/openelex/us/wa/load.py#L370
"""
presidential_regex = re.compile('president', re.IGNORECASE)
senate_regex = re.compile('(senate|senator)', re.IGNORECASE)
house_regex = re.compile('(house|representative)', re.IGNORECASE)
governor_regex = re.compile('governor', re.IGNORECASE)
treasurer_regex = re.compile('treasurer', re.IGNORECASE)
auditor_regex = re.compile('auditor', re.IGNORECASE)
sos_regex = re.compile('secretary', re.IGNORECASE)
lt_gov_regex = re.compile(r'(lt|Lt|Lieutenant)', re.IGNORECASE)
ospi_regex = re.compile(
'superintendent of public instruction',
re.IGNORECASE)
ag_regex = re.compile('attorney general', re.IGNORECASE)
wcpl_regex = re.compile('commissioner of public lands', re.IGNORECASE)
local_regex = re.compile(
r'(\bState\b|Washington|Washington\s+State|Local|'
'Legislative District)',
re.IGNORECASE)
national_regex = re.compile(
r'(U\.S\.|\bUS\b|Congressional|National|United\s+States|U\.\s+S\.\s+)',
re.IGNORECASE)
if re.search(house_regex, office):
if re.search(national_regex, office):
return 'U.S. House of Representatives'
elif re.search(local_regex, office):
return 'State House of Representatives'
else:
return None
elif re.search(governor_regex, office):
return 'Governor'
elif re.search(wcpl_regex, office):
return 'Commissioner of Public Lands'
elif re.search(senate_regex, office):
if re.search(national_regex, office):
return 'U.S. Senate'
elif re.search(local_regex, office):
return 'State Senate'
else:
return None
elif re.search(lt_gov_regex, office):
return 'Lieutenant Governor'
elif re.search(ospi_regex, office):
return 'Superintendent of Public Instruction'
elif re.search(sos_regex, office):
return 'Secretary of State'
elif re.search(treasurer_regex, office):
return 'Treasurer'
elif re.search(auditor_regex, office):
return 'Auditor'
elif re.search(ag_regex, office):
return 'Attorney General'
elif re.search(presidential_regex, office):
return 'President'
else:
return None
def get_candidate_fields(self, raw_result):
year = raw_result.end_date.year
fields = self._get_fields(raw_result, candidate_fields)
try:
name = HumanName(raw_result.full_name)
except TypeError:
name = HumanName("{} {}".format(raw_result.given_name, raw_result.family_name))
fields['given_name'] = name.first
fields['family_name'] = name.last
if not fields['full_name']:
fields['full_name'] = "{} {}".format(name.first, name.last)
try:
fields['additional_name'] = name.middle
fields['suffix'] = name.suffix
except Exception as e:
logger.error(e)
return fields
def get_contest(self, raw_result):
"""
Returns the Contest model instance for a given RawResult.
Caches the result in memory to reduce the number of calls to the
datastore.
"""
key = "%s-%s" % (raw_result.election_id, raw_result.contest_slug)
try:
#print self._contest_cache[key]
return self._contest_cache[key]
except KeyError:
#raise
fields = self.get_contest_fields(raw_result)
#print fields
#quit(fields['source'])
fields.pop('source')
try:
#contest = Contest.objects.get(**fields)
try:
contest = Contest.objects.filter(**fields)[0]
except IndexError:
contest = Contest.objects.get(**fields)
#print contest
#quit("uuuuuuuuuuuu")
except Exception:
print(fields)
print("\n")
raise
self._contest_cache[key] = contest
return contest
class CreateContestsTransform(BaseTransform):
name = 'create_unique_contests'
def __call__(self):
contests = []
seen = set()
for result in self.get_raw_results():
key = self._contest_key(result)
if key not in seen:
fields = self.get_contest_fields(result)
fields['updated'] = fields['created'] = datetime.now()
contest = Contest(**fields)
contests.append(contest)
seen.add(key)
print(seen)
Contest.objects.insert(contests, load_bulk=False)
logger.info("Created {} contests.".format(len(contests)))
def reverse(self):
old = Contest.objects.filter(state=STATE)
logger.info('\tDeleting {} previously created contests'.format(old.count()))
old.delete()
def _contest_key(self, raw_result):
slug = raw_result.contest_slug
return (raw_result.election_id, slug)
class CreateCandidatesTransform(BaseTransform):
name = 'create_unique_candidates'
def __init__(self):
super(CreateCandidatesTransform, self).__init__()
def __call__(self):
candidates = []
seen = set()
for rr in self.get_raw_results():
key = (rr.election_id, rr.contest_slug, rr.candidate_slug)
if key not in seen:
fields = self.get_candidate_fields(rr)
if not fields['full_name']:
quit(fields)
fields['contest'] = self.get_contest(rr)
#print fields
candidate = Candidate(**fields)
candidates.append(candidate)
seen.add(key)
Candidate.objects.insert(candidates, load_bulk=False)
logger.info("Created {} candidates.".format(len(candidates)))
def reverse(self):
old = Candidate.objects.filter(state=STATE)
print("\tDeleting %d previously created candidates" % old.count())
old.delete()
class CreateResultsTransform(BaseTransform):
name = 'create_unique_results'
auto_reverse = True
def __init__(self):
super(CreateResultsTransform, self).__init__()
self._candidate_cache = {}
def get_raw_results(self):
return RawResult.objects.filter(state=STATE).no_cache()
def get_results(self):
election_ids = self.get_raw_results().distinct('election_id')
return Result.objects.filter(election_id__in=election_ids)
def __call__(self):
results = self._create_results_collection()
for rr in self.get_raw_results():
fields = self._get_fields(rr, result_fields)
fields['contest'] = self.get_contest(rr)
fields['candidate'] = self.get_candidate(rr, extra={
'contest': fields['contest'],
})
fields['contest'] = fields['candidate'].contest
fields['raw_result'] = rr
party = self.get_party(rr)
if party:
fields['party'] = party.abbrev
#fields['winner'] = self._parse_winner(rr)
fields['jurisdiction'] = self._strip_leading_zeros(rr.jurisdiction)
fields = self._alter_result_fields(fields, rr)
result = Result(**fields)
results.append(result)
self._create_results(results)
def _alter_result_fields(self, fields, raw_result):
"""
Hook to do set additional or alter additional field values
that will be passed to the Result constructor.
"""
fields['write_in'] = self._parse_write_in(raw_result)
fields['ocd_id'] = self._get_ocd_id(raw_result,
jurisdiction=fields['jurisdiction'])
return fields
def _create_results_collection(self):
"""
Creates the list-like object that will be used to hold the
constructed Result instances.
"""
return BulkInsertBuffer(Result)
def _create_results(self, results):
"""
Create the Result objects in the database.
"""
results.flush()
print("Created %d results." % results.count())
def reverse(self):
old_results = self.get_results()
print("\tDeleting %d previously loaded results" % old_results.count())
old_results.delete()
def get_candidate(self, raw_result, extra={}):
"""
Get the Candidate model for a RawResult
Keyword arguments:
* extra - Dictionary of extra query parameters that will
be used to select the candidate.
"""
key = (raw_result.election_id, raw_result.contest_slug,
raw_result.candidate_slug)
try:
return self._candidate_cache[key]
except KeyError:
fields = self.get_candidate_fields(raw_result)
fields.update(extra)
del fields['source']
try:
candidate = Candidate.objects.get(**fields)
except Candidate.DoesNotExist:
print(fields)
raise
self._candidate_cache[key] = candidate
return candidate
def _parse_winner(self, raw_result):
"""
Converts raw winner value into boolean
"""
if raw_result.winner == 'Y':
# Winner in post-2002 contest
return True
elif raw_result.winner == 1:
# Winner in 2002 contest
return True
else:
return False
def _parse_write_in(self, raw_result):
"""
Converts raw write-in value into boolean
"""
if raw_result.write_in == 'Y':
# Write-in in post-2002 contest
return True
elif raw_result.family_name == 'zz998':
# Write-in in 2002 contest
return True
elif raw_result.write_in == "Write-In":
return True
elif raw_result.full_name == "Other Write-Ins":
return True
else:
return False
def _get_ocd_id(self, raw_result, jurisdiction=None, reporting_level=None):
"""
Returns the OCD ID for a RawResult's reporting level.
Arguments:
raw_result: the RawResult instance used to determine the OCD ID
jurisdiction: the jurisdiction for which the OCD ID should be
created.
Default is the raw result's jurisdiction field.
reporting_level: the reporting level to reflect in the OCD ID.
Default is raw_result.reporting_level. Specifying this
argument is useful if you want to use a RawResult's
jurisdiction, but override the reporting level.
"""
if reporting_level is None:
reporting_level = raw_result.reporting_level
if jurisdiction is None:
jurisdiction = raw_result.jurisdiction
juris_ocd = ocd_type_id(jurisdiction)
if reporting_level == "county":
# TODO: Should jurisdiction/ocd_id be different for Baltimore City?
return "ocd-division/country:us/state:md/county:%s" % juris_ocd
elif reporting_level == "state_legislative":
return "ocd-division/country:us/state:md/sldl:%s" % juris_ocd
elif reporting_level == "precinct":
county_ocd_id = "/".join(raw_result.ocd_id.split('/')[:-1])
return "%s/precinct:%s" % (county_ocd_id, juris_ocd)
else:
return None
registry.register('wa', CreateContestsTransform)
registry.register('wa', CreateCandidatesTransform)
registry.register('wa', CreateResultsTransform)
|
openelections/openelections-core
|
openelex/us/wa/transform/__init__.py
|
Python
|
mit
| 19,231
|
[
"MOOSE"
] |
5ac2c70f7e6b28b8d260e174cb23abd7abf2fd1e999b3a17962edad6e9ae36f2
|
#!/usr/bin/python
"""
detangler.py - Copyright (c) 2012, Howard C. Shaw III
Licensed under the GNU GPL v3
detangler.py [filename] ...
Pass any number of filenames, detangler will extract all trees using BioPython,
and optimize them all simultaneously, minimizing on all combinations of trees.
"""
from sys import argv
import Bio
from Bio import Phylo
from StringIO import StringIO
from detangle import tree, node, process_trees
import argparse
if __name__=='__main__':
parser = argparse.ArgumentParser(description = 'Minimize tangling across multiple trees.')
#parser.add_argument('-t, --output-type', default='nexus', choices=['newick', 'nexus', 'phyloxml'])
parser.add_argument('-o, --output-filename', dest='output_filename', nargs='?', default='result.dat')
parser.add_argument('infiles', nargs='+')
args = parser.parse_args()
print args
bio_trees = {}
bio_tree_list = []
for filename in args.infiles:
""" Work out what format the file is, then parse the trees out """
f = open(filename, 'r')
first = f.readline()
if first.find('#nexus') > -1 or first.find('#NEXUS') > -1:
tree_type='nexus'
elif first.find('<') > -1:
tree_type='phyloxml'
else:
tree_type='newick'
f.close()
temp = list(Phylo.parse(filename,tree_type))
if not temp is None:
for i in temp:
bio_trees[i.name] = i
bio_tree_list.append(i)
trees = {}
tree_list = [ ]
""" At this point, bio_trees is full, now we need to convert these Phylo trees to the detangle
trees which are optimized for rotations. """
for i in bio_tree_list:
tr = tree()
tr.init_from_phylo(i)
trees[tr.name]=tr
tree_list.append(tr)
process_trees(tree_list, output_filename = args.output_filename)
|
TheGrum/Tangled
|
detangler.py
|
Python
|
gpl-3.0
| 1,884
|
[
"Biopython"
] |
11123e15e94fe5ea7dd25ead5453794ebdf45100f99c585757a063c5fe8b2da2
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Commonly-required utility methods needed by -- and potentially
customized by -- application and toolkit scripts. They have
been pulled out from the scripts because certain scripts had
gotten way too large as a result of including these methods."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs."
__license__ = "LGPL"
import pyatspi
import orca.debug as debug
import orca.script_utilities as script_utilities
import orca.settings as settings
#############################################################################
# #
# Utilities #
# #
#############################################################################
class Utilities(script_utilities.Utilities):
def __init__(self, script):
"""Creates an instance of the Utilities class.
Arguments:
- script: the script with which this instance is associated.
"""
script_utilities.Utilities.__init__(self, script)
#########################################################################
# #
# Utilities for finding, identifying, and comparing accessibles #
# #
#########################################################################
def isMessageBody(self, obj):
"""Returns True if obj is in the body of an email message.
Arguments:
- obj: the Accessible object of interest.
"""
try:
obj.queryHypertext()
ancestor = obj.parent.parent
except:
return False
else:
# The accessible text objects in the header at the top
# of the message also have STATE_MULTI_LINE. But they
# are inside panels which are inside table cells; the
# body text is not. See bug #567428.
#
return (obj.getState().contains(pyatspi.STATE_MULTI_LINE) \
and ancestor.getRole() != pyatspi.ROLE_TABLE_CELL)
def isSpellingSuggestionsList(self, obj):
"""Returns True if obj is the list of spelling suggestions
in the spellcheck dialog.
Arguments:
- obj: the Accessible object of interest.
"""
# The list of spelling suggestions is a table whose parent is
# a scroll pane. This in and of itself is not sufficiently
# unique. What makes the spell check dialog unique is the
# quantity of push buttons found. If we find this combination,
# we'll assume its the spelling dialog.
#
if obj and obj.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
if not obj \
or obj.getRole() != pyatspi.ROLE_TABLE \
or obj.parent.getRole() != pyatspi.ROLE_SCROLL_PANE:
return False
topLevel = self.topLevelObject(obj.parent)
if not self.isSameObject(topLevel, self._script.spellCheckDialog):
# The group of buttons is found in a filler which is a
# sibling of the scroll pane.
#
for sibling in obj.parent.parent:
if sibling.getRole() == pyatspi.ROLE_FILLER:
buttonCount = 0
for child in sibling:
if child.getRole() == pyatspi.ROLE_PUSH_BUTTON:
buttonCount += 1
if buttonCount >= 5:
self._script.spellCheckDialog = topLevel
return True
else:
return True
return False
def isWizard(self, obj):
"""Returns True if this object is, or is within, a wizard.
Arguments:
- obj: the Accessible object
"""
# The Setup Assistant is a frame whose child is a panel. That panel
# holds a bunch of other panels, one for each stage in the wizard.
# Only the active stage's panel has STATE_SHOWING. There is also
# one child of ROLE_FILLER which holds the buttons.
#
window = self.topLevelObject(obj) or obj
if window and window.getRole() == pyatspi.ROLE_FRAME \
and window.childCount and window[0].getRole() == pyatspi.ROLE_PANEL:
allPanels = panelsNotShowing = 0
for child in window[0]:
if child.getRole() == pyatspi.ROLE_PANEL:
allPanels += 1
if not child.getState().contains(pyatspi.STATE_SHOWING):
panelsNotShowing += 1
if allPanels - panelsNotShowing == 1 \
and window[0].childCount - allPanels == 1:
return True
return False
def isWizardNewInfoEvent(self, event):
"""Returns True if the event is judged to be the presentation of
new information in a wizard. This method should be subclassed by
application scripts as needed.
Arguments:
- event: the Accessible event being examined
"""
if event.source.getRole() == pyatspi.ROLE_FRAME \
and (event.type.startswith("window:activate") \
or (event.type.startswith("object:state-changed:active") \
and event.detail1 == 1)):
return self.isWizard(event.source)
elif event.source.getRole() == pyatspi.ROLE_PANEL \
and event.type.startswith("object:state-changed:showing") \
and event.detail1 == 1 \
and not self.isSameObject(event.source,
self._script.lastSetupPanel):
rolesList = [pyatspi.ROLE_PANEL,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_FRAME]
if self.hasMatchingHierarchy(event.source, rolesList):
return self.isWizard(event.source)
return False
def unrelatedLabels(self, root):
"""Returns a list containing all the unrelated (i.e., have no
relations to anything and are not a fundamental element of a
more atomic component like a combo box) labels under the given
root. Note that the labels must also be showing on the display.
Arguments:
- root the Accessible object to traverse
Returns a list of unrelated labels under the given root.
"""
labels = script_utilities.Utilities.unrelatedLabels(self, root)
for i, label in enumerate(labels):
if not label.getState().contains(pyatspi.STATE_SENSITIVE):
labels.remove(label)
else:
try:
text = label.queryText()
except:
pass
else:
attr = text.getAttributes(0)
if attr[0]:
[charKeys, charDict] = \
self.stringToKeysAndDict(attr[0])
if charDict.get('weight', '400') == '700':
if self.isWizard(root):
# We've passed the wizard info at the top,
# which is what we want to present. The rest
# is noise.
#
return labels[0:i]
else:
# This label is bold and thus serving as a
# heading. As such, it's not really unrelated.
#
labels.remove(label)
return labels
#########################################################################
# #
# Utilities for working with the accessible text interface #
# #
#########################################################################
def allSelectedText(self, obj):
"""Get all the text applicable text selections for the given object.
If there is selected text, look to see if there are any previous
or next text objects that also have selected text and add in their
text contents.
Arguments:
- obj: the text object to start extracting the selected text from.
Returns: all the selected text contents plus the start and end
offsets within the text for the given object.
"""
if not obj or not obj.parent:
return ["", 0, 0]
textContents = ""
startOffset = 0
endOffset = 0
if obj.queryText().getNSelections() > 0:
[textContents, startOffset, endOffset] = self.selectedText(obj)
# Unfortunately, Evolution doesn't use the FLOWS_FROM and
# FLOWS_TO relationships to easily allow us to get to previous
# and next text objects. Instead we have to move up the
# component hierarchy until we get to the object containing all
# the panels (with each line containing a single text item).
# We can then check in both directions to see if there is other
# contiguous text that is selected. We also have to jump over
# zero length (empty) text lines and continue checking on the
# other side.
#
container = obj.parent.parent
current = obj.parent.getIndexInParent()
morePossibleSelections = True
while morePossibleSelections:
morePossibleSelections = False
if (current-1) >= 0:
prevPanel = container[current-1]
try:
prevObj = prevPanel[0]
displayedText = self.substring(prevObj, 0, -1)
if len(displayedText) == 0:
current -= 1
morePossibleSelections = True
elif prevObj.queryText().getNSelections() > 0:
[newTextContents, start, end] = \
self.selectedText(prevObj)
textContents = newTextContents + " " + textContents
current -= 1
morePossibleSelections = True
except:
pass
current = obj.parent.getIndexInParent()
morePossibleSelections = True
while morePossibleSelections:
morePossibleSelections = False
if (current+1) < container.childCount:
nextPanel = container[current+1]
try:
nextObj = nextPanel[0]
displayedText = self.substring(nextObj, 0, -1)
if len(displayedText) == 0:
current += 1
morePossibleSelections = True
elif nextObj.queryText().getNSelections() > 0:
[newTextContents, start, end] = \
self.selectedText(nextObj)
textContents += " " + newTextContents
current += 1
morePossibleSelections = True
except:
pass
return [textContents, startOffset, endOffset]
def hasTextSelections(self, obj):
"""Return an indication of whether this object has selected text.
Note that it's possible that this object has no text, but is part
of a selected text area. Because of this, we need to check the
objects on either side to see if they are none zero length and
have text selections.
Arguments:
- obj: the text object to start checking for selected text.
Returns: an indication of whether this object has selected text,
or adjacent text objects have selected text.
"""
currentSelected = False
otherSelected = False
nSelections = obj.queryText().getNSelections()
if nSelections:
currentSelected = True
else:
otherSelected = False
displayedText = self.substring(obj, 0, -1)
if len(displayedText) == 0:
container = obj.parent.parent
current = obj.parent.getIndexInParent()
morePossibleSelections = True
while morePossibleSelections:
morePossibleSelections = False
if (current-1) >= 0:
prevPanel = container[current-1]
prevObj = prevPanel[0]
try:
prevObjText = prevObj.queryText()
except:
prevObjText = None
if prevObj and prevObjText:
if prevObjText.getNSelections() > 0:
otherSelected = True
else:
displayedText = prevObjText.getText(0, -1)
if len(displayedText) == 0:
current -= 1
morePossibleSelections = True
current = obj.parent.getIndexInParent()
morePossibleSelections = True
while morePossibleSelections:
morePossibleSelections = False
if (current+1) < container.childCount:
nextPanel = container[current+1]
nextObj = nextPanel[0]
try:
nextObjText = nextObj.queryText()
except:
nextObjText = None
if nextObj and nextObjText:
if nextObjText.getNSelections() > 0:
otherSelected = True
else:
displayedText = nextObjText.getText(0, -1)
if len(displayedText) == 0:
current += 1
morePossibleSelections = True
return [currentSelected, otherSelected]
#########################################################################
# #
# Miscellaneous Utilities #
# #
#########################################################################
def misspelledWordAndBody(self, suggestionsList, messagePanel):
"""Gets the misspelled word from the spelling dialog and the
list of words from the message body.
Arguments:
- suggestionsList: the list of spelling suggestions from the
spellcheck dialog
- messagePanel: the panel containing the message being checked
for spelling
Returns [mispelledWord, messageBody]
"""
misspelledWord, messageBody = "", []
# Look for the "Suggestions for "xxxxx" label in the spell
# checker dialog panel. Extract out the xxxxx. This will be
# the misspelled word.
#
text = self.displayedLabel(suggestionsList) or ""
words = text.split()
for word in words:
if word[0] in ["'", '"']:
misspelledWord = word[1:len(word) - 1]
break
if messagePanel != None:
allTextObjects = self.descendantsWithRole(
messagePanel, pyatspi.ROLE_TEXT)
for obj in allTextObjects:
for word in self.substring(obj, 0, -1).split():
messageBody.append(word)
return [misspelledWord, messageBody]
def speakBlankLine(self, obj):
"""Returns True if a blank line should be spoken.
Otherwise, returns False.
"""
# Get the the AccessibleText interrface.
try:
text = obj.queryText()
except NotImplementedError:
return False
# Get the line containing the caret
caretOffset = text.caretOffset
line = text.getTextAtOffset(caretOffset, \
pyatspi.TEXT_BOUNDARY_LINE_START)
debug.println(debug.LEVEL_FINEST,
"speakBlankLine: start=%d, end=%d, line=<%s>" % \
(line[1], line[2], line[0]))
# If this is a blank line, announce it if the user requested
# that blank lines be spoken.
if line[1] == 0 and line[2] == 0:
return settings.speakBlankLines
else:
return False
def timeForCalRow(self, row, noIncs):
"""Return a string equivalent to the time of the given row in
the calendar day view. Each calendar row is equivalent to a
certain time interval (from 5 minutes upto 1 hour), with time
(row 0) starting at 12 am (midnight).
Arguments:
- row: the row number.
- noIncs: the number of equal increments that the 24 hour period
is divided into.
Returns the time as a string.
"""
totalMins = timeIncrements[noIncs] * row
if totalMins < 720:
suffix = 'A.M.'
else:
totalMins -= 720
suffix = 'P.M.'
hrs = hours[totalMins / 60]
mins = minutes[totalMins % 60]
return hrs + ' ' + mins + ' ' + suffix
# Values used to construct a time string for calendar appointments.
#
timeIncrements = {}
timeIncrements[288] = 5
timeIncrements[144] = 10
timeIncrements[96] = 15
timeIncrements[48] = 30
timeIncrements[24] = 60
minutes = {}
minutes[0] = ''
minutes[5] = '5'
minutes[10] = '10'
minutes[15] = '15'
minutes[20] = '20'
minutes[25] = '25'
minutes[30] = '30'
minutes[35] = '35'
minutes[40] = '40'
minutes[45] = '45'
minutes[50] = '50'
minutes[55] = '55'
hours = ['12', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11']
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/evolution/script_utilities.py
|
Python
|
gpl-3.0
| 19,166
|
[
"ORCA"
] |
dcdbb0b0b6338b3ca0b52a430681bebc0421f72dad1329d4a89f0ad4bc4236f0
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para somosmovies
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import urlparse
from core import config
from core import logger
from core import scrapertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("[somosmovies.py] mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Películas" , action="menupeliculas"))
itemlist.append( Item(channel=item.channel, title="Series" , action="peliculas", url="http://www.somosmovies.com/search/label/Series?updated-max=&max-results=18"))
return itemlist
def menupeliculas(item):
logger.info("[somosmovies.py] menupeliculas")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url="http://www.somosmovies.com"))
itemlist.append( Item(channel=item.channel, title="Género" , action="generos", url="http://www.somosmovies.com/"))
itemlist.append( Item(channel=item.channel, title="Año" , action="anyos", url="http://www.somosmovies.com/"))
itemlist.append( Item(channel=item.channel, title="País" , action="paises", url="http://www.somosmovies.com/"))
return itemlist
def peliculas(item):
logger.info("[somosmovies.py] peliculas")
itemlist=[]
# Descarga la página
data = scrapertools.cachePage(item.url)
logger.info("data="+data)
# Extrae las entradas
'''
<article CLASS='post crp'>
<header><h3 CLASS='post-title entry-title item_name'>
<a href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' title='Elysium (2013)'>Elysium (2013)</a>
</h3>
</header>
<section CLASS='post-body entry-content clearfix'>
<a href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' title='Elysium (2013)'><center>
<img border="0" src="http://1.bp.blogspot.com/-J15zDm0KXVA/UoOmwu563kI/AAAAAAAALqw/zBww3WoCyEw/s1600/Poster.Elysium.2013.jpg" style="display: block; height: 400px; width: 312px;">
</center>
</a>
<div CLASS='es-LAT'></div>
<div CLASS='pie-post'>
<div style='float:left'>
<div class='fb-like' data-href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' data-layout='button_count' data-send='false' data-show-faces='false' data-width='120'></div>
</div>
</div>
<div STYLE='clear: both;'></div>
</section>
</article>
'''
patron = "<article(.*?)</article>"
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
logger.info("match="+match)
scrapedtitle = scrapertools.get_match(match,"<a href='[^']+' title='([^']+)'")
scrapedurl = urlparse.urljoin(item.url, scrapertools.get_match(match,"<a href='([^']+)' title='[^']+'") )
scrapedplot = ""
try:
scrapedthumbnail = urlparse.urljoin(item.url, scrapertools.get_match(match,'<img border="0" src="([^"]+)"') )
except:
scrapedthumbnail = ""
try:
idioma = scrapertools.get_match(match,"</center[^<]+</a[^<]+<div CLASS='([^']+)'></div>")
scrapedtitle = scrapedtitle + " ("+idioma.upper()+")"
except:
pass
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade a XBMC
itemlist.append( Item(channel=item.channel, action="enlaces", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
# Extrae el paginador
#<a CLASS='blog-pager-older-link' href='http://www.somosmovies.com/search?updated-max=2012-08-22T23:10:00-05:00&max-results=16' id='Blog1_blog-pager-older-link' title='Siguiente Película'>Siguiente »</a>
patronvideos = "<a CLASS='blog-pager-older-link' href='([^']+)' id='Blog1_blog-pager-older-link' title='Siguiente"
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
#http://www.somosmovies.com/search/label/Peliculas?updated-max=2010-12-20T08%3A27%3A00-06%3A00&max-results=12
scrapedurl = urlparse.urljoin(item.url,matches[0])
scrapedurl = scrapedurl.replace("%3A",":")
itemlist.append( Item(channel=item.channel, action="peliculas", title=">> Página siguiente" , url=scrapedurl , folder=True) )
return itemlist
def anyos(item):
logger.info("[animeflv.py] anyos")
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<h2>Año</h2>(.*?)</ul')
patron = "<a href='([^']+)'>([^<]+)</a>"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
title = scrapertools.entityunescape(scrapedtitle)
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot))
return itemlist
def generos(item):
logger.info("[animeflv.py] generos")
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<h2>Género</h2>(.*?)</ul')
patron = "<a href='([^']+)'>([^<]+)</a>"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
title = scrapertools.entityunescape(scrapedtitle)
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot))
return itemlist
def paises(item):
logger.info("[animeflv.py] paises")
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<h2>País</h2>(.*?)</ul')
patron = "<a href='([^']+)'>([^<]+)</a>"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
title = scrapertools.entityunescape(scrapedtitle)
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=item.channel, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot))
return itemlist
def enlaces(item):
logger.info("[somosmovies.py] enlaces")
itemlist = []
data = scrapertools.cachePage(item.url)
'''
<fieldset id="enlaces">
<legend>Enlaces</legend><br />
<div class="clearfix uno">
<div class="dos"><b> Episodio 1</b>: <small>30 Days Without an Accident</small></div><div class="tres"><a href="http://bit.ly/1aIiGdq" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/GY8PWg" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/15CGs8G" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/17RTYZl" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/ognvK7" target="_blank">TurboBit</a></div>
</div>
<div class="clearfix uno">
<div class="dos"><b> Episodio 2</b>: Infected</div><div class="tres"><a href="http://bit.ly/1fyubIg" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1a9voBA" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/19pmMpo" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1aYd0be" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/rI9OL7" target="_blank">TurboBit</a></div>
</div>
<div class="clearfix uno">
<div class="dos"><b> Episodio 3</b>: Isolation</div><div class="tres"><a href="http://bit.ly/1fyucfd" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/17UzXLX" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/17tmo9Y" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1eqtMEL" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/2f3Jj5" target="_blank">TurboBit</a></div>
</div>
<div class="clearfix uno">
<div class="dos"><b> Episodio 4</b>: Indifference</div><div class="tres"><a href="http://bit.ly/1aPKmwf" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/185vLcB" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1iJ5mGm" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1hadtPR" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/lYoQoo" target="_blank">TurboBit</a></div>
</div>
<div class="clearfix uno">
<div class="dos"><b> Episodio 5</b>: Internment</div><div class="tres"><a href="http://bit.ly/1aYcERL" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/HSRa1F" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1dilJZe" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1iG6sWi" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/0tHIKr" target="_blank">TurboBit</a></div>
</div>
<div class="clearfix uno">
<div class="dos"><b> Episodio 6</b>: Live Bait</div><div class="tres"><a href="http://bit.ly/17Z1EUf" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1ddc0Ym" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/I0GBKK" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1jx50TF" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/mgXyof" target="_blank">TurboBit</a></div>
</div>
<div class="clearfix uno">
<div class="dos"><b> Episodio 7</b>: Dead Weight</div><div class="tres"><a href="http://bit.ly/17UwbIi" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/17NZj1D" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1aTE4vw" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/IhQa8C" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/ZiSH47" target="_blank">TurboBit</a> <b style="font-style:italic;color:red;">Nuevo!</b></div>
</div>
<div class="clearfix uno">
<div class="dos"><b> Episodio 8</b>: Too Far Gone</div><div class="tres"><i style="font-style:italic">Disponible el 02 de Diciembre.</i></div>
</div>
</fieldset>
'''
'''
<fieldset id="enlaces">
<h5 class='h5'>Season 1</h5>
<div class="clearfix uno">
<div class="dos"><b> Capítulo 1</b>: Yesterday's Jam</div><div class="tres"><a href="http://bit.ly/14OorEU" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/Z2uWNc" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/11nIqHi" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/XYo0jN" target="_blank">FreakShare</a></div>
<div class="dos"><b> Capítulo 2</b>: Calamity Jen</div><div class="tres"><a href="http://bit.ly/XecqUq" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10algD1" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/YTsGe4" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/16xaKYZ" target="_blank">FreakShare</a></div>
<div class="dos"><b> Capítulo 3</b>: Fifty-Fifty</div><div class="tres"><a href="http://bit.ly/12i5mq8" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10aljyA" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/12gnyo1" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/10xM8LC" target="_blank">FreakShare</a></div>
<div class="dos"><b> Capítulo 4</b>: The Red Door</div><div class="tres"><a href="http://bit.ly/10al5Yg" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10wyHMz" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10rHP5P" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/10xM9PW" target="_blank">FreakShare</a></div>
<div class="dos"><b> Capítulo 5</b>: The Haunting of Bill Crouse</div><div class="tres"><a href="http://bit.ly/10wyAjT" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/XecCmO" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/XYoPt0" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/14OpPXW" target="_blank">FreakShare</a></div>
<div class="dos"><b> Capítulo 6</b>: Aunt Irma Visits</div><div class="tres"><a href="http://bit.ly/17dCeEj" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/12i5JRM" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10amVIA" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/17dDdUU" target="_blank">FreakShare</a></div>
</div>
<h5 class='h5'>Season 2</h5>
<div class="clearfix uno">
<div class="dos"><b> Capítulo 1</b>: The Work Outing</div><div class="tres"><a href="http://bit.ly/XOrCcl" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10wDjCe" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/12ibnDi" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/17dEXgU" target="_blank">FreakShare</a></div>
<div class="dos"><b> Capítulo 2</b>: Return of the Golden Child</div><div class="tres"><a href="http://bit.ly/16p6Tvh" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/13SeTJq" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10zwtuf" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/XqnsZ7" target="_blank">FreakShare</a></div>
'''
'''
<fieldset id="enlaces">
<legend>Enlaces</legend><br />
<div class="clearfix uno">
<div class="dos">
<b>AVI</b> <small>480p</small></div>
<div class="tres">
<a href="http://bit.ly/1dQbvlS" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/Nd96Hh" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1d3a534" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://goo.gl/TOipXB" target="_blank">TurboBit</a> <b class="sep">|</b> <a href="http://bit.ly/1oUWtPP" target="_blank">FreakShare</a>
</div>
</div>
<div class="clearfix uno">
<div class="dos">
<b>MP4</b> <small>1080p</small></div>
<div class="tres">
<a href="http://bit.ly/1c40BEG" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/OcZDki" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1gjElZY" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://goo.gl/fc43B2" target="_blank">TurboBit</a> <b class="sep">|</b> <a href="http://bit.ly/1e9GxAq" target="_blank">FreakShare</a>
</div>
</div>
</fieldset>
'''
# Se queda con la caja de enlaces
data = scrapertools.get_match(data,'<fieldset id="enlaces"[^<]+<legend>Enlaces</legend>(.*?)</fieldset>')
patron = '<div class="dos"[^<]+<b>([^<]+)</b>'
matches = re.compile(patron,re.DOTALL).findall(data)
for title in matches:
itemlist.append( Item(channel=item.channel, action="findvideos" , title="Enlaces "+title.strip() , url=item.url, extra=title, thumbnail=item.thumbnail, plot=item.plot, folder=True))
return itemlist
def findvideos(item):
logger.info("[somosmovies.py] findvideos")
itemlist = []
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data,'<fieldset id="enlaces"[^<]+<legend>Enlaces</legend>(.*?)</fieldset>')
logger.info("[somosmovies.py] data="+data)
'''
<div class="dos"><b> Capítulo 10</b>: Mhysa <b style="color:red;font-style:italic">Nuevo!</b></div><div class="tres"><a href="http://bit.ly/19Zh0LG" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/11vxcOd" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/14tpgBb" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/17DxZUJ" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/16YykSk" target="_blank">FreakShare</a> <b class="sep">|</b> <a href="http://bit.ly/13vOcFA" target="_blank">Ver Online »</a></div>
'''
data = scrapertools.get_match(data,'<div class="dos"[^<]+<b>'+item.extra+'</b>.*?<div(.*?)</div')
logger.info("[somosmovies.py] data="+data)
patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for url,title in matches:
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=item.thumbnail, plot=item.plot, server="", folder=False))
return itemlist
def play(item):
logger.info("[somosmovies.py] play(item.url="+item.url+")")
itemlist=[]
if "bit.ly" in item.url:
logger.info("Acortador bit.ly")
location = scrapertools.get_header_from_response(item.url,header_to_get="location")
logger.info("[somosmovies.py] location="+location)
item.url = location
return play(item)
if "goo.gl" in item.url:
logger.info("Acortador goo.gl")
location = scrapertools.get_header_from_response(item.url,header_to_get="location")
item.url = location
return play(item)
#adf.ly
elif "j.gs" in item.url:
logger.info("Acortador j.gs (adfly)")
from servers.decrypters import adfly
location = adfly.get_long_url(item.url)
item.url = location
return play(item)
else:
from core import servertools
itemlist=servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.channel=item.channel
videoitem.folder=False
return itemlist
|
Hernanarce/pelisalacarta
|
python/main-classic/channels/somosmovies.py
|
Python
|
gpl-3.0
| 18,309
|
[
"ADF"
] |
438724944d98b1618c2753bd84fab170e9f21cc6590abef94c4eace7b2bd1c6f
|
import os
import tempfile
import StringIO
import pwd
import subprocess
from cgi import FieldStorage
from galaxy import datatypes, util
from galaxy.util.odict import odict
from galaxy.datatypes import sniff
from galaxy.util.json import dumps
from galaxy.model.orm import eagerload_all
from galaxy.exceptions import ObjectInvalid
import logging
log = logging.getLogger( __name__ )
def persist_uploads( params ):
"""
Turn any uploads in the submitted form to persisted files.
"""
if 'files' in params:
new_files = []
for upload_dataset in params['files']:
f = upload_dataset['file_data']
if isinstance( f, FieldStorage ):
assert not isinstance( f.file, StringIO.StringIO )
assert f.file.name != '<fdopen>'
local_filename = util.mkstemp_ln( f.file.name, 'upload_file_data_' )
f.file.close()
upload_dataset['file_data'] = dict( filename=f.filename,
local_filename=local_filename )
elif type( f ) == dict and 'filename' and 'local_filename' not in f:
raise Exception( 'Uploaded file was encoded in a way not understood by Galaxy.' )
if upload_dataset['url_paste'] and upload_dataset['url_paste'].strip() != '':
upload_dataset['url_paste'], is_multi_byte = datatypes.sniff.stream_to_file( StringIO.StringIO( upload_dataset['url_paste'] ), prefix="strio_url_paste_" )
else:
upload_dataset['url_paste'] = None
new_files.append( upload_dataset )
params['files'] = new_files
return params
def handle_library_params( trans, params, folder_id, replace_dataset=None ):
# FIXME: the received params has already been parsed by util.Params() by the time it reaches here,
# so no complex objects remain. This is not good because it does not allow for those objects to be
# manipulated here. The received params should be the original kwd from the initial request.
library_bunch = util.bunch.Bunch()
library_bunch.replace_dataset = replace_dataset
library_bunch.message = params.get( 'ldda_message', '' )
# See if we have any template field contents
library_bunch.template_field_contents = {}
template_id = params.get( 'template_id', None )
library_bunch.folder = trans.sa_session.query( trans.app.model.LibraryFolder ).get( trans.security.decode_id( folder_id ) )
# We are inheriting the folder's info_association, so we may have received inherited contents or we may have redirected
# here after the user entered template contents ( due to errors ).
if template_id not in [ None, 'None' ]:
library_bunch.template = trans.sa_session.query( trans.app.model.FormDefinition ).get( template_id )
for field in library_bunch.template.fields:
field_name = field[ 'name' ]
if params.get( field_name, False ):
field_value = util.restore_text( params.get( field_name, '' ) )
library_bunch.template_field_contents[ field_name ] = field_value
else:
library_bunch.template = None
library_bunch.roles = []
for role_id in util.listify( params.get( 'roles', [] ) ):
role = trans.sa_session.query( trans.app.model.Role ).get( role_id )
library_bunch.roles.append( role )
return library_bunch
def get_precreated_datasets( trans, params, data_obj, controller='root' ):
"""
Get any precreated datasets (when using asynchronous uploads).
"""
rval = []
async_datasets = []
if params.get( 'async_datasets', None ) not in ["None", "", None]:
async_datasets = params['async_datasets'].split(',')
current_user_roles = trans.get_current_user_roles()
for id in async_datasets:
try:
data = trans.sa_session.query( data_obj ).get( int( id ) )
except:
log.exception( 'Unable to load precreated dataset (%s) sent in upload form' % id )
continue
if data_obj is trans.app.model.HistoryDatasetAssociation:
if trans.user is None and trans.galaxy_session.current_history != data.history:
log.error( 'Got a precreated dataset (%s) but it does not belong to anonymous user\'s current session (%s)' % ( data.id, trans.galaxy_session.id ) )
elif data.history.user != trans.user:
log.error( 'Got a precreated dataset (%s) but it does not belong to current user (%s)' % ( data.id, trans.user.id ) )
else:
rval.append( data )
elif data_obj is trans.app.model.LibraryDatasetDatasetAssociation:
if controller == 'library' and not trans.app.security_agent.can_add_library_item( current_user_roles, data.library_dataset.folder ):
log.error( 'Got a precreated dataset (%s) but this user (%s) is not allowed to write to it' % ( data.id, trans.user.id ) )
else:
rval.append( data )
return rval
def get_precreated_dataset( precreated_datasets, name ):
"""
Return a dataset matching a name from the list of precreated (via async
upload) datasets. If there's more than one upload with the exact same
name, we need to pop one (the first) so it isn't chosen next time.
"""
names = [ d.name for d in precreated_datasets ]
if names.count( name ) > 0:
return precreated_datasets.pop( names.index( name ) )
else:
return None
def cleanup_unused_precreated_datasets( precreated_datasets ):
for data in precreated_datasets:
log.info( 'Cleaned up unclaimed precreated dataset (%s).' % ( data.id ) )
data.state = data.states.ERROR
data.info = 'No file contents were available.'
def __new_history_upload( trans, uploaded_dataset, history=None, state=None ):
if not history:
history = trans.history
hda = trans.app.model.HistoryDatasetAssociation( name=uploaded_dataset.name,
extension=uploaded_dataset.file_type,
dbkey=uploaded_dataset.dbkey,
history=history,
create_dataset=True,
sa_session=trans.sa_session )
if state:
hda.state = state
else:
hda.state = hda.states.QUEUED
trans.sa_session.add( hda )
trans.sa_session.flush()
history.add_dataset( hda, genome_build=uploaded_dataset.dbkey )
permissions = trans.app.security_agent.history_get_default_permissions( history )
trans.app.security_agent.set_all_dataset_permissions( hda.dataset, permissions )
trans.sa_session.flush()
return hda
def __new_library_upload( trans, cntrller, uploaded_dataset, library_bunch, state=None ):
current_user_roles = trans.get_current_user_roles()
if not ( ( trans.user_is_admin() and cntrller in [ 'library_admin', 'api' ] ) or trans.app.security_agent.can_add_library_item( current_user_roles, library_bunch.folder ) ):
# This doesn't have to be pretty - the only time this should happen is if someone's being malicious.
raise Exception( "User is not authorized to add datasets to this library." )
folder = library_bunch.folder
if uploaded_dataset.get( 'in_folder', False ):
# Create subfolders if desired
for name in uploaded_dataset.in_folder.split( os.path.sep ):
trans.sa_session.refresh( folder )
matches = filter( lambda x: x.name == name, active_folders( trans, folder ) )
if matches:
folder = matches[0]
else:
new_folder = trans.app.model.LibraryFolder( name=name, description='Automatically created by upload tool' )
new_folder.genome_build = trans.app.genome_builds.default_value
folder.add_folder( new_folder )
trans.sa_session.add( new_folder )
trans.sa_session.flush()
trans.app.security_agent.copy_library_permissions( trans, folder, new_folder )
folder = new_folder
if library_bunch.replace_dataset:
ld = library_bunch.replace_dataset
else:
ld = trans.app.model.LibraryDataset( folder=folder, name=uploaded_dataset.name )
trans.sa_session.add( ld )
trans.sa_session.flush()
trans.app.security_agent.copy_library_permissions( trans, folder, ld )
ldda = trans.app.model.LibraryDatasetDatasetAssociation( name=uploaded_dataset.name,
extension=uploaded_dataset.file_type,
dbkey=uploaded_dataset.dbkey,
library_dataset=ld,
user=trans.user,
create_dataset=True,
sa_session=trans.sa_session )
trans.sa_session.add( ldda )
if state:
ldda.state = state
else:
ldda.state = ldda.states.QUEUED
ldda.message = library_bunch.message
trans.sa_session.flush()
# Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset
trans.app.security_agent.copy_library_permissions( trans, ld, ldda )
if library_bunch.replace_dataset:
# Copy the Dataset level permissions from replace_dataset to the new LibraryDatasetDatasetAssociation.dataset
trans.app.security_agent.copy_dataset_permissions( library_bunch.replace_dataset.library_dataset_dataset_association.dataset, ldda.dataset )
else:
# Copy the current user's DefaultUserPermissions to the new LibraryDatasetDatasetAssociation.dataset
trans.app.security_agent.set_all_dataset_permissions( ldda.dataset, trans.app.security_agent.user_get_default_permissions( trans.user ) )
folder.add_library_dataset( ld, genome_build=uploaded_dataset.dbkey )
trans.sa_session.add( folder )
trans.sa_session.flush()
ld.library_dataset_dataset_association_id = ldda.id
trans.sa_session.add( ld )
trans.sa_session.flush()
# Handle template included in the upload form, if any. If the upload is not asynchronous ( e.g., URL paste ),
# then the template and contents will be included in the library_bunch at this point. If the upload is
# asynchronous ( e.g., uploading a file ), then the template and contents will be included in the library_bunch
# in the get_uploaded_datasets() method below.
if library_bunch.template and library_bunch.template_field_contents:
# Since information templates are inherited, the template fields can be displayed on the upload form.
# If the user has added field contents, we'll need to create a new form_values and info_association
# for the new library_dataset_dataset_association object.
# Create a new FormValues object, using the template we previously retrieved
form_values = trans.app.model.FormValues( library_bunch.template, library_bunch.template_field_contents )
trans.sa_session.add( form_values )
trans.sa_session.flush()
# Create a new info_association between the current ldda and form_values
# TODO: Currently info_associations at the ldda level are not inheritable to the associated LibraryDataset,
# we need to figure out if this is optimal
info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( ldda, library_bunch.template, form_values )
trans.sa_session.add( info_association )
trans.sa_session.flush()
# If roles were selected upon upload, restrict access to the Dataset to those roles
if library_bunch.roles:
for role in library_bunch.roles:
dp = trans.app.model.DatasetPermissions( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action, ldda.dataset, role )
trans.sa_session.add( dp )
trans.sa_session.flush()
return ldda
def new_upload( trans, cntrller, uploaded_dataset, library_bunch=None, history=None, state=None ):
if library_bunch:
return __new_library_upload( trans, cntrller, uploaded_dataset, library_bunch, state )
else:
return __new_history_upload( trans, uploaded_dataset, history=history, state=state )
def get_uploaded_datasets( trans, cntrller, params, precreated_datasets, dataset_upload_inputs, library_bunch=None, history=None ):
uploaded_datasets = []
for dataset_upload_input in dataset_upload_inputs:
uploaded_datasets.extend( dataset_upload_input.get_uploaded_datasets( trans, params ) )
for uploaded_dataset in uploaded_datasets:
data = get_precreated_dataset( precreated_datasets, uploaded_dataset.name )
if not data:
data = new_upload( trans, cntrller, uploaded_dataset, library_bunch=library_bunch, history=history )
else:
data.extension = uploaded_dataset.file_type
data.dbkey = uploaded_dataset.dbkey
data.uuid = uploaded_dataset.uuid
trans.sa_session.add( data )
trans.sa_session.flush()
if library_bunch:
library_bunch.folder.genome_build = uploaded_dataset.dbkey
trans.sa_session.add( library_bunch.folder )
# Handle template included in the upload form, if any. If the upload is asynchronous ( e.g., file upload ),
# then the template and contents will be included in the library_bunch at this point. If the upload is
# not asynchronous ( e.g., URL paste ), then the template and contents will be included in the library_bunch
# in the new_library_upload() method above.
if library_bunch.template and library_bunch.template_field_contents:
# Since information templates are inherited, the template fields can be displayed on the upload form.
# If the user has added field contents, we'll need to create a new form_values and info_association
# for the new library_dataset_dataset_association object.
# Create a new FormValues object, using the template we previously retrieved
form_values = trans.app.model.FormValues( library_bunch.template, library_bunch.template_field_contents )
trans.sa_session.add( form_values )
trans.sa_session.flush()
# Create a new info_association between the current ldda and form_values
# TODO: Currently info_associations at the ldda level are not inheritable to the associated LibraryDataset,
# we need to figure out if this is optimal
info_association = trans.app.model.LibraryDatasetDatasetInfoAssociation( data, library_bunch.template, form_values )
trans.sa_session.add( info_association )
trans.sa_session.flush()
else:
if not history:
history = trans.history
history.genome_build = uploaded_dataset.dbkey
uploaded_dataset.data = data
return uploaded_datasets
def create_paramfile( trans, uploaded_datasets ):
"""
Create the upload tool's JSON "param" file.
"""
def _chown( path ):
try:
pwent = pwd.getpwnam( trans.user.email.split('@')[0] )
cmd = [ '/usr/bin/sudo', '-E', trans.app.config.external_chown_script, path, pwent[0], str( pwent[3] ) ]
log.debug( 'Changing ownership of %s with: %s' % ( path, ' '.join( cmd ) ) )
p = subprocess.Popen( cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
stdout, stderr = p.communicate()
assert p.returncode == 0, stderr
except Exception, e:
log.warning( 'Changing ownership of uploaded file %s failed: %s' % ( path, str( e ) ) )
# TODO: json_file should go in the working directory
json_file = tempfile.mkstemp()
json_file_path = json_file[1]
json_file = os.fdopen( json_file[0], 'w' )
for uploaded_dataset in uploaded_datasets:
data = uploaded_dataset.data
if uploaded_dataset.type == 'composite':
# we need to init metadata before the job is dispatched
data.init_meta()
for meta_name, meta_value in uploaded_dataset.metadata.iteritems():
setattr( data.metadata, meta_name, meta_value )
trans.sa_session.add( data )
trans.sa_session.flush()
json = dict( file_type=uploaded_dataset.file_type,
dataset_id=data.dataset.id,
dbkey=uploaded_dataset.dbkey,
type=uploaded_dataset.type,
metadata=uploaded_dataset.metadata,
primary_file=uploaded_dataset.primary_file,
composite_file_paths=uploaded_dataset.composite_files,
composite_files=dict( [ ( k, v.__dict__ ) for k, v in data.datatype.get_composite_files( data ).items() ] ) )
else:
try:
is_binary = uploaded_dataset.datatype.is_binary
except:
is_binary = None
try:
link_data_only = uploaded_dataset.link_data_only
except:
link_data_only = 'copy_files'
try:
uuid_str = uploaded_dataset.uuid
except:
uuid_str = None
json = dict( file_type=uploaded_dataset.file_type,
ext=uploaded_dataset.ext,
name=uploaded_dataset.name,
dataset_id=data.dataset.id,
dbkey=uploaded_dataset.dbkey,
type=uploaded_dataset.type,
is_binary=is_binary,
link_data_only=link_data_only,
uuid=uuid_str,
to_posix_lines=getattr(uploaded_dataset, "to_posix_lines", True),
space_to_tab=uploaded_dataset.space_to_tab,
in_place=trans.app.config.external_chown_script is None,
path=uploaded_dataset.path )
# TODO: This will have to change when we start bundling inputs.
# Also, in_place above causes the file to be left behind since the
# user cannot remove it unless the parent directory is writable.
if link_data_only == 'copy_files' and trans.app.config.external_chown_script:
_chown( uploaded_dataset.path )
json_file.write( dumps( json ) + '\n' )
json_file.close()
if trans.app.config.external_chown_script:
_chown( json_file_path )
return json_file_path
def create_job( trans, params, tool, json_file_path, data_list, folder=None, history=None ):
"""
Create the upload job.
"""
job = trans.app.model.Job()
galaxy_session = trans.get_galaxy_session()
if type( galaxy_session ) == trans.model.GalaxySession:
job.session_id = galaxy_session.id
if trans.user is not None:
job.user_id = trans.user.id
if folder:
job.library_folder_id = folder.id
else:
if not history:
history = trans.history
job.history_id = history.id
job.tool_id = tool.id
job.tool_version = tool.version
job.state = job.states.UPLOAD
trans.sa_session.add( job )
trans.sa_session.flush()
log.info( 'tool %s created job id %d' % ( tool.id, job.id ) )
trans.log_event( 'created job id %d' % job.id, tool_id=tool.id )
for name, value in tool.params_to_strings( params, trans.app ).iteritems():
job.add_parameter( name, value )
job.add_parameter( 'paramfile', dumps( json_file_path ) )
object_store_id = None
for i, dataset in enumerate( data_list ):
if folder:
job.add_output_library_dataset( 'output%i' % i, dataset )
else:
job.add_output_dataset( 'output%i' % i, dataset )
# Create an empty file immediately
if not dataset.dataset.external_filename:
dataset.dataset.object_store_id = object_store_id
try:
trans.app.object_store.create( dataset.dataset )
except ObjectInvalid:
raise Exception('Unable to create output dataset: object store is full')
object_store_id = dataset.dataset.object_store_id
trans.sa_session.add( dataset )
# open( dataset.file_name, "w" ).close()
job.object_store_id = object_store_id
job.state = job.states.NEW
job.set_handler(tool.get_job_handler(None))
trans.sa_session.add( job )
trans.sa_session.flush()
# Queue the job for execution
trans.app.job_queue.put( job.id, job.tool_id )
trans.log_event( "Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
output = odict()
for i, v in enumerate( data_list ):
output[ 'output%i' % i ] = v
return job, output
def active_folders( trans, folder ):
# Stolen from galaxy.web.controllers.library_common (importing from which causes a circular issues).
# Much faster way of retrieving all active sub-folders within a given folder than the
# performance of the mapper. This query also eagerloads the permissions on each folder.
return trans.sa_session.query( trans.app.model.LibraryFolder ) \
.filter_by( parent=folder, deleted=False ) \
.options( eagerload_all( "actions" ) ) \
.order_by( trans.app.model.LibraryFolder.table.c.name ) \
.all()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/tools/actions/upload_common.py
|
Python
|
gpl-3.0
| 22,064
|
[
"Galaxy"
] |
ba8457a8becd10212ef89b90e8062030c3386a579e1be5824150bc5900fa5320
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""exceptions handling (raising, catching, exceptions classes) checker
"""
import sys
from logilab.common.compat import builtins
BUILTINS_NAME = builtins.__name__
import astroid
from astroid import YES, Instance, unpack_infer
from pylint.checkers import BaseChecker
from pylint.checkers.utils import is_empty, is_raising, check_messages
from pylint.interfaces import IAstroidChecker
def infer_bases(klass):
""" Fully infer the bases of the klass node.
This doesn't use .ancestors(), because we need
the non-inferable nodes (YES nodes),
which can't be retrieved from .ancestors()
"""
for base in klass.bases:
try:
inferit = base.infer().next()
except astroid.InferenceError:
continue
if inferit is YES:
yield inferit
else:
for base in infer_bases(inferit):
yield base
PY3K = sys.version_info >= (3, 0)
OVERGENERAL_EXCEPTIONS = ('Exception',)
MSGS = {
'E0701': ('Bad except clauses order (%s)',
'bad-except-order',
'Used when except clauses are not in the correct order (from the '
'more specific to the more generic). If you don\'t fix the order, '
'some exceptions may not be catched by the most specific handler.'),
'E0702': ('Raising %s while only classes, instances or string are allowed',
'raising-bad-type',
'Used when something which is neither a class, an instance or a \
string is raised (i.e. a `TypeError` will be raised).'),
'E0703': ('Exception context set to something which is not an '
'exception, nor None',
'bad-exception-context',
'Used when using the syntax "raise ... from ...", '
'where the exception context is not an exception, '
'nor None.',
{'minversion': (3, 0)}),
'E0710': ('Raising a new style class which doesn\'t inherit from BaseException',
'raising-non-exception',
'Used when a new style class which doesn\'t inherit from \
BaseException is raised.'),
'E0711': ('NotImplemented raised - should raise NotImplementedError',
'notimplemented-raised',
'Used when NotImplemented is raised instead of \
NotImplementedError'),
'E0712': ('Catching an exception which doesn\'t inherit from BaseException: %s',
'catching-non-exception',
'Used when a class which doesn\'t inherit from \
BaseException is used as an exception in an except clause.'),
'W0701': ('Raising a string exception',
'raising-string',
'Used when a string exception is raised.'),
'W0702': ('No exception type(s) specified',
'bare-except',
'Used when an except clause doesn\'t specify exceptions type to \
catch.'),
'W0703': ('Catching too general exception %s',
'broad-except',
'Used when an except catches a too general exception, \
possibly burying unrelated errors.'),
'W0704': ('Except doesn\'t do anything',
'pointless-except',
'Used when an except clause does nothing but "pass" and there is\
no "else" clause.'),
'W0710': ('Exception doesn\'t inherit from standard "Exception" class',
'nonstandard-exception',
'Used when a custom exception class is raised but doesn\'t \
inherit from the builtin "Exception" class.',
{'maxversion': (3, 0)}),
'W0711': ('Exception to catch is the result of a binary "%s" operation',
'binary-op-exception',
'Used when the exception to catch is of the form \
"except A or B:". If intending to catch multiple, \
rewrite as "except (A, B):"'),
'W0712': ('Implicit unpacking of exceptions is not supported in Python 3',
'unpacking-in-except',
'Python3 will not allow implicit unpacking of exceptions in except '
'clauses. '
'See http://www.python.org/dev/peps/pep-3110/',
{'maxversion': (3, 0)}),
'W0713': ('Indexing exceptions will not work on Python 3',
'indexing-exception',
'Indexing exceptions will not work on Python 3. Use '
'`exception.args[index]` instead.',
{'maxversion': (3, 0)}),
}
if sys.version_info < (3, 0):
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
class ExceptionsChecker(BaseChecker):
"""checks for
* excepts without exception filter
* type of raise argument : string, Exceptions, other values
"""
__implements__ = IAstroidChecker
name = 'exceptions'
msgs = MSGS
priority = -4
options = (('overgeneral-exceptions',
{'default' : OVERGENERAL_EXCEPTIONS,
'type' :'csv', 'metavar' : '<comma-separated class names>',
'help' : 'Exceptions that will emit a warning '
'when being caught. Defaults to "%s"' % (
', '.join(OVERGENERAL_EXCEPTIONS),)}
),
)
@check_messages('raising-string', 'nonstandard-exception', 'raising-bad-type',
'raising-non-exception', 'notimplemented-raised', 'bad-exception-context')
def visit_raise(self, node):
"""visit raise possibly inferring value"""
# ignore empty raise
if node.exc is None:
return
if PY3K and node.cause:
try:
cause = node.cause.infer().next()
except astroid.InferenceError:
pass
else:
if isinstance(cause, astroid.Const):
if cause.value is not None:
self.add_message('bad-exception-context',
node=node)
elif (not isinstance(cause, astroid.Class) and
not inherit_from_std_ex(cause)):
self.add_message('bad-exception-context',
node=node)
expr = node.exc
if self._check_raise_value(node, expr):
return
else:
try:
value = unpack_infer(expr).next()
except astroid.InferenceError:
return
self._check_raise_value(node, value)
def _check_raise_value(self, node, expr):
"""check for bad values, string exception and class inheritance
"""
value_found = True
if isinstance(expr, astroid.Const):
value = expr.value
if isinstance(value, str):
self.add_message('raising-string', node=node)
else:
self.add_message('raising-bad-type', node=node,
args=value.__class__.__name__)
elif (isinstance(expr, astroid.Name) and \
expr.name in ('None', 'True', 'False')) or \
isinstance(expr, (astroid.List, astroid.Dict, astroid.Tuple,
astroid.Module, astroid.Function)):
self.add_message('raising-bad-type', node=node, args=expr.name)
elif ((isinstance(expr, astroid.Name) and expr.name == 'NotImplemented')
or (isinstance(expr, astroid.CallFunc) and
isinstance(expr.func, astroid.Name) and
expr.func.name == 'NotImplemented')):
self.add_message('notimplemented-raised', node=node)
elif isinstance(expr, astroid.BinOp) and expr.op == '%':
self.add_message('raising-string', node=node)
elif isinstance(expr, (Instance, astroid.Class)):
if isinstance(expr, Instance):
expr = expr._proxied
if (isinstance(expr, astroid.Class) and
not inherit_from_std_ex(expr) and
expr.root().name != BUILTINS_NAME):
if expr.newstyle:
self.add_message('raising-non-exception', node=node)
else:
self.add_message('nonstandard-exception', node=node)
else:
value_found = False
else:
value_found = False
return value_found
@check_messages('unpacking-in-except')
def visit_excepthandler(self, node):
"""Visit an except handler block and check for exception unpacking."""
if isinstance(node.name, (astroid.Tuple, astroid.List)):
self.add_message('unpacking-in-except', node=node)
@check_messages('indexing-exception')
def visit_subscript(self, node):
""" Look for indexing exceptions. """
try:
for infered in node.value.infer():
if not isinstance(infered, astroid.Instance):
continue
if inherit_from_std_ex(infered):
self.add_message('indexing-exception', node=node)
except astroid.InferenceError:
return
@check_messages('bare-except', 'broad-except', 'pointless-except',
'binary-op-exception', 'bad-except-order',
'catching-non-exception')
def visit_tryexcept(self, node):
"""check for empty except"""
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
# single except doing nothing but "pass" without else clause
if nb_handlers == 1 and is_empty(handler.body) and not node.orelse:
self.add_message('pointless-except', node=handler.type or handler.body[0])
if handler.type is None:
if nb_handlers == 1 and not is_raising(handler.body):
self.add_message('bare-except', node=handler)
# check if a "except:" is followed by some other
# except
elif index < (nb_handlers - 1):
msg = 'empty except clause should always appear last'
self.add_message('bad-except-order', node=node, args=msg)
elif isinstance(handler.type, astroid.BoolOp):
self.add_message('binary-op-exception', node=handler, args=handler.type.op)
else:
try:
excs = list(unpack_infer(handler.type))
except astroid.InferenceError:
continue
for exc in excs:
# XXX skip other non class nodes
if exc is YES or not isinstance(exc, astroid.Class):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astroid.Class)]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = '%s is an ancestor class of %s' % (
previous_exc.name, exc.name)
self.add_message('bad-except-order', node=handler.type, args=msg)
if (exc.name in self.config.overgeneral_exceptions
and exc.root().name == EXCEPTIONS_MODULE
and nb_handlers == 1 and not is_raising(handler.body)):
self.add_message('broad-except', args=exc.name, node=handler.type)
if (not inherit_from_std_ex(exc) and
exc.root().name != BUILTINS_NAME):
# try to see if the exception is based on a C based
# exception, by infering all the base classes and
# looking for inference errors
bases = infer_bases(exc)
fully_infered = all(inferit is not YES
for inferit in bases)
if fully_infered:
self.add_message('catching-non-exception',
node=handler.type,
args=(exc.name, ))
exceptions_classes += excs
def inherit_from_std_ex(node):
"""return true if the given class node is subclass of
exceptions.Exception
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
for parent in node.ancestors(recurs=False):
if inherit_from_std_ex(parent):
return True
return False
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
|
ack8006/Python-mode-klen
|
pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/exceptions.py
|
Python
|
lgpl-3.0
| 13,694
|
[
"VisIt"
] |
a2a2408e00b96f41a66f9582d1be056a8a6c1ef2eac2d2c3d1c477223568592c
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: return a list of files based on specific criteria
requirements: []
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
options:
age:
required: false
default: null
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
required: false
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
aliases: ['pattern']
contains:
required: false
default: null
description:
- One or more regex patterns which should be matched against the file content
paths:
required: true
aliases: [ "name", "path" ]
description:
- List of paths of directories to search. All paths must be fully qualified.
file_type:
required: false
description:
- Type of file to select
- The 'link' and 'any' choices were added in version 2.3
choices: [ "file", "directory", "link", "any" ]
default: "file"
recurse:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If target is a directory, recursively descend into the directory looking for files.
size:
required: false
default: null
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
required: false
default: "mtime"
choices: [ "atime", "mtime", "ctime" ]
description:
- Choose the file property against which we compare age. Default is mtime.
hidden:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
follow:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to follow symlinks in path for systems with python 2.6+
get_checksum:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to retrieve a file's sha1 checksum
use_regex:
required: false
default: "False"
choices: [ True, False ]
description:
- If false the patterns are file globs (shell) if true they are python regexes
'''
EXAMPLES = r'''
# Recursively find /tmp files older than 2 days
- find:
paths: "/tmp"
age: "2d"
recurse: yes
# Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
- find:
paths: "/tmp"
age: "4w"
size: "1m"
recurse: yes
# Recursively find /var/tmp files with last access time greater than 3600 seconds
- find:
paths: "/var/tmp"
age: "3600"
age_stamp: atime
recurse: yes
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
- find:
paths: "/var/tmp"
patterns: "*.old,*.log.gz"
size: "10m"
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
# Note that yaml double quotes require escaping backslashes but yaml single
# quotes do not.
- find:
paths: "/var/tmp"
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: "10m"
use_regex: True
'''
RETURN = '''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list of dictionaries
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
import os
import stat
import fnmatch
import time
import re
def pfilter(f, patterns=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None:
return True
if use_regex:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
else:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None or \
(age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age)) or \
(age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age)):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None or \
(size >= 0 and st.st_size >= abs(size)) or \
(size < 0 and st.st_size <= abs(size)):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None:
return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match (line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
return {
'mode' : "%04o" % stat.S_IMODE(st.st_mode),
'isdir' : stat.S_ISDIR(st.st_mode),
'ischr' : stat.S_ISCHR(st.st_mode),
'isblk' : stat.S_ISBLK(st.st_mode),
'isreg' : stat.S_ISREG(st.st_mode),
'isfifo' : stat.S_ISFIFO(st.st_mode),
'islnk' : stat.S_ISLNK(st.st_mode),
'issock' : stat.S_ISSOCK(st.st_mode),
'uid' : st.st_uid,
'gid' : st.st_gid,
'size' : st.st_size,
'inode' : st.st_ino,
'dev' : st.st_dev,
'nlink' : st.st_nlink,
'atime' : st.st_atime,
'mtime' : st.st_mtime,
'ctime' : st.st_ctime,
'wusr' : bool(st.st_mode & stat.S_IWUSR),
'rusr' : bool(st.st_mode & stat.S_IRUSR),
'xusr' : bool(st.st_mode & stat.S_IXUSR),
'wgrp' : bool(st.st_mode & stat.S_IWGRP),
'rgrp' : bool(st.st_mode & stat.S_IRGRP),
'xgrp' : bool(st.st_mode & stat.S_IXGRP),
'woth' : bool(st.st_mode & stat.S_IWOTH),
'roth' : bool(st.st_mode & stat.S_IROTH),
'xoth' : bool(st.st_mode & stat.S_IXOTH),
'isuid' : bool(st.st_mode & stat.S_ISUID),
'isgid' : bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec = dict(
paths = dict(required=True, aliases=['name','path'], type='list'),
patterns = dict(default=['*'], type='list', aliases=['pattern']),
contains = dict(default=None, type='str'),
file_type = dict(default="file", choices=['file', 'directory', 'link', 'any'], type='str'),
age = dict(default=None, type='str'),
age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'),
size = dict(default=None, type='str'),
recurse = dict(default='no', type='bool'),
hidden = dict(default="False", type='bool'),
follow = dict(default="False", type='bool'),
get_checksum = dict(default="False", type='bool'),
use_regex = dict(default="False", type='bool'),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root,dirs,files in (sys.version_info < (2,6,0) and os.walk(npath)) or \
os.walk( npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname=os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except:
msg+="%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and \
contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
grimmjow8/ansible
|
lib/ansible/modules/files/find.py
|
Python
|
gpl-3.0
| 13,484
|
[
"Brian"
] |
a3fdd51ff3f2b0619a50cd76c361bf609ae7a148d7b7a0cbdbf046eff9574530
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
"""
This module contains the feature calculators that take time series as input and calculate the values of the feature.
There are two types of features:
1. feature calculators which calculate a single number (simple)
2. feature calculators which calculate a bunch of features for a list of parameters at once,
to use e.g. cached results (combiner). They return a list of (key, value) pairs for each input parameter.
They are specified using the "fctype" parameter of each feature calculator, which is added using the
set_property function. Only functions in this python module, which have a parameter called "fctype" are
seen by tsfresh as a feature calculator. Others will not be calculated.
Feature calculators of type combiner should return the concatenated parameters sorted
alphabetically ascending.
"""
import functools
import itertools
import warnings
from builtins import range
from collections import defaultdict
import matrixprofile as mp
import numpy as np
import pandas as pd
import stumpy
from matrixprofile.exceptions import NoSolutionPossible
from numpy.linalg import LinAlgError
from scipy.signal import cwt, find_peaks_cwt, ricker, welch
from scipy.stats import linregress
from statsmodels.tools.sm_exceptions import MissingDataError
from statsmodels.tsa.ar_model import AutoReg
from tsfresh.utilities.string_manipulation import convert_to_output_format
with warnings.catch_warnings():
# Ignore warnings of the patsy package
warnings.simplefilter("ignore", DeprecationWarning)
from statsmodels.tsa.stattools import acf, adfuller, pacf
# todo: make sure '_' works in parameter names in all cases, add a warning if not
def _roll(a, shift):
"""
Roll 1D array elements. Improves the performance of numpy.roll() by reducing the overhead introduced from the
flexibility of the numpy.roll() method such as the support for rolling over multiple dimensions.
Elements that roll beyond the last position are re-introduced at the beginning. Similarly, elements that roll
back beyond the first position are re-introduced at the end (with negative shift).
Examples
--------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=2)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=-2)
>>> array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=12)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
Benchmark
---------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit _roll(x, shift=2)
>>> 1.89 µs ± 341 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit np.roll(x, shift=2)
>>> 11.4 µs ± 776 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
:param a: the input array
:type a: array_like
:param shift: the number of places by which elements are shifted
:type shift: int
:return: shifted array with the same shape as a
:return type: ndarray
"""
if not isinstance(a, np.ndarray):
a = np.asarray(a)
idx = shift % len(a)
return np.concatenate([a[-idx:], a[:-idx]])
def _get_length_sequences_where(x):
"""
This method calculates the length of all sub-sequences where the array x is either True or 1.
Examples
--------
>>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
>>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True]
>>> _get_length_sequences_where(x)
>>> [1, 3, 1, 2]
:param x: An iterable containing only 1, True, 0 and False values
:return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues
contained, the list [0] is returned.
"""
if len(x) == 0:
return [0]
else:
res = [len(list(group)) for value, group in itertools.groupby(x) if value == 1]
return res if len(res) > 0 else [0]
def _estimate_friedrich_coefficients(x, m, r):
"""
Coefficients of polynomial :math:`h(x)`, which has been fitted to
the deterministic dynamics of Langevin model
.. math::
\\dot{x}(t) = h(x(t)) + \\mathcal{N}(0,R)
As described by
Friedrich et al. (2000): Physics Letters A 271, p. 217-222
*Extracting model equations from experimental data*
For short time-series this method is highly dependent on the parameters.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param m: order of polynomial to fit for estimating fixed points of dynamics
:type m: int
:param r: number of quantiles to use for averaging
:type r: float
:return: coefficients of polynomial of deterministic dynamics
:return type: ndarray
"""
assert m > 0, "Order of polynomial need to be positive integer, found {}".format(m)
df = pd.DataFrame({"signal": x[:-1], "delta": np.diff(x)})
try:
df["quantiles"] = pd.qcut(df.signal, r)
except ValueError:
return [np.NaN] * (m + 1)
quantiles = df.groupby("quantiles")
result = pd.DataFrame(
{"x_mean": quantiles.signal.mean(), "y_mean": quantiles.delta.mean()}
)
result.dropna(inplace=True)
try:
return np.polyfit(result.x_mean, result.y_mean, deg=m)
except (np.linalg.LinAlgError, ValueError):
return [np.NaN] * (m + 1)
def _aggregate_on_chunks(x, f_agg, chunk_len):
"""
Takes the time series x and constructs a lower sampled version of it by applying the aggregation function f_agg on
consecutive chunks of length chunk_len
:param x: the time series to calculate the aggregation of
:type x: numpy.ndarray
:param f_agg: The name of the aggregation function that should be an attribute of the pandas.Series
:type f_agg: str
:param chunk_len: The size of the chunks where to aggregate the time series
:type chunk_len: int
:return: A list of the aggregation function over the chunks
:return type: list
"""
return [
getattr(x[i * chunk_len : (i + 1) * chunk_len], f_agg)()
for i in range(int(np.ceil(len(x) / chunk_len)))
]
def _into_subchunks(x, subchunk_length, every_n=1):
"""
Split the time series x into subwindows of length "subchunk_length", starting every "every_n".
For example, the input data if [0, 1, 2, 3, 4, 5, 6] will be turned into a matrix
0 2 4
1 3 5
2 4 6
with the settings subchunk_length = 3 and every_n = 2
"""
len_x = len(x)
assert subchunk_length > 1
assert every_n > 0
# how often can we shift a window of size subchunk_length over the input?
num_shifts = (len_x - subchunk_length) // every_n + 1
shift_starts = every_n * np.arange(num_shifts)
indices = np.arange(subchunk_length)
indexer = np.expand_dims(indices, axis=0) + np.expand_dims(shift_starts, axis=1)
return np.asarray(x)[indexer]
def set_property(key, value):
"""
This method returns a decorator that sets the property key of the function to value
"""
def decorate_func(func):
setattr(func, key, value)
if func.__doc__ and key == "fctype":
func.__doc__ = (
func.__doc__ + "\n\n *This function is of type: " + value + "*\n"
)
return func
return decorate_func
@set_property("fctype", "simple")
def variance_larger_than_standard_deviation(x):
"""
Is variance higher than the standard deviation?
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x
being larger than 1
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
y = np.var(x)
return y > np.sqrt(y)
@set_property("fctype", "simple")
def ratio_beyond_r_sigma(x, r):
"""
Ratio of values that are more than r * std(x) (so r times sigma) away from the mean of x.
:param x: the time series to calculate the feature of
:type x: iterable
:param r: the ratio to compare with
:type r: float
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.sum(np.abs(x - np.mean(x)) > r * np.std(x)) / x.size
@set_property("fctype", "simple")
def large_standard_deviation(x, r):
"""
Does time series have *large* standard deviation?
Boolean variable denoting if the standard dev of x is higher than 'r' times the range = difference between max and
min of x. Hence it checks if
.. math::
std(x) > r * (max(X)-min(X))
According to a rule of the thumb, the standard deviation should be a forth of the range of the values.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param r: the percentage of the range to compare with
:type r: float
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.std(x) > (r * (np.max(x) - np.min(x)))
@set_property("fctype", "combiner")
def symmetry_looking(x, param):
"""
Boolean variable denoting if the distribution of x *looks symmetric*. This is the case if
.. math::
| mean(X)-median(X)| < r * (max(X)-min(X))
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"r": x} with x (float) is the percentage of the range to compare with
:type param: list
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
mean_median_difference = np.abs(np.mean(x) - np.median(x))
max_min_difference = np.max(x) - np.min(x)
return [
("r_{}".format(r["r"]), mean_median_difference < (r["r"] * max_min_difference))
for r in param
]
@set_property("fctype", "simple")
def has_duplicate_max(x):
"""
Checks if the maximum value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.sum(x == np.max(x)) >= 2
@set_property("fctype", "simple")
def has_duplicate_min(x):
"""
Checks if the minimal value of x is observed more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.sum(x == np.min(x)) >= 2
@set_property("fctype", "simple")
def has_duplicate(x):
"""
Checks if any value in x occurs more than once
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return x.size != np.unique(x).size
@set_property("fctype", "simple")
@set_property("minimal", True)
def sum_values(x):
"""
Calculates the sum over the time series values
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if len(x) == 0:
return 0
return np.sum(x)
@set_property("fctype", "combiner")
def agg_autocorrelation(x, param):
"""
Descriptive statistics on the autocorrelation of the time series.
Calculates the value of an aggregation function :math:`f_{agg}` (e.g. the variance or the mean) over the
autocorrelation :math:`R(l)` for different lags. The autocorrelation :math:`R(l)` for lag :math:`l` is defined as
.. math::
R(l) = \\frac{1}{(n-l)\\sigma^2} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)
where :math:`X_i` are the values of the time series, :math:`n` its length. Finally, :math:`\\sigma^2` and
:math:`\\mu` are estimators for its variance and mean
(See `Estimation of the Autocorrelation function <http://en.wikipedia.org/wiki/Autocorrelation#Estimation>`_).
The :math:`R(l)` for different lags :math:`l` form a vector. This feature calculator applies the aggregation
function :math:`f_{agg}` to this vector and returns
.. math::
f_{agg} \\left( R(1), \\ldots, R(m)\\right) \\quad \\text{for} \\quad m = max(n, maxlag).
Here :math:`maxlag` is the second parameter passed to this function.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"f_agg": x, "maxlag", n} with x str, the name of a numpy function
(e.g. "mean", "var", "std", "median"), its the name of the aggregator function that is applied to the
autocorrelations. Further, n is an int and the maximal number of lags to consider.
:type param: list
:return: the value of this feature
:return type: float
"""
# if the time series is longer than the following threshold, we use fft to calculate the acf
THRESHOLD_TO_USE_FFT = 1250
var = np.var(x)
n = len(x)
max_maxlag = max([config["maxlag"] for config in param])
if np.abs(var) < 10 ** -10 or n == 1:
a = [0] * len(x)
else:
a = acf(x, adjusted=True, fft=n > THRESHOLD_TO_USE_FFT, nlags=max_maxlag)[1:]
return [
(
'f_agg_"{}"__maxlag_{}'.format(config["f_agg"], config["maxlag"]),
getattr(np, config["f_agg"])(a[: int(config["maxlag"])]),
)
for config in param
]
@set_property("fctype", "combiner")
def partial_autocorrelation(x, param):
"""
Calculates the value of the partial autocorrelation function at the given lag.
The lag `k` partial autocorrelation of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the
partial correlation of :math:`x_t` and :math:`x_{t-k}`, adjusted for the intermediate variables
:math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]).
Following [2], it can be defined as
.. math::
\\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})}
{\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}}
with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})`
being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to
predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.
It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p`
and zero for `k>p`."
With this property, it is used to determine the lag of an AR-Process.
.. rubric:: References
| [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
| Time series analysis: forecasting and control. John Wiley & Sons.
| [2] https://onlinecourses.science.psu.edu/stat510/node/62
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned
:type param: list
:return: the value of this feature
:return type: float
"""
# Check the difference between demanded lags by param and possible lags to calculate (depends on len(x))
max_demanded_lag = max([lag["lag"] for lag in param])
n = len(x)
# Check if list is too short to make calculations
if n <= 1:
pacf_coeffs = [np.nan] * (max_demanded_lag + 1)
else:
# https://github.com/statsmodels/statsmodels/pull/6846
# PACF limits lag length to 50% of sample size.
if max_demanded_lag >= n // 2:
max_lag = n // 2 - 1
else:
max_lag = max_demanded_lag
if max_lag > 0:
pacf_coeffs = list(pacf(x, method="ld", nlags=max_lag))
pacf_coeffs = pacf_coeffs + [np.nan] * max(0, (max_demanded_lag - max_lag))
else:
pacf_coeffs = [np.nan] * (max_demanded_lag + 1)
return [("lag_{}".format(lag["lag"]), pacf_coeffs[lag["lag"]]) for lag in param]
@set_property("fctype", "combiner")
def augmented_dickey_fuller(x, param):
"""
Does the time series have a unit root?
The Augmented Dickey-Fuller test is a hypothesis test which checks whether a unit root is present in a time
series sample. This feature calculator returns the value of the respective test statistic.
See the statsmodels implementation for references and more details.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"attr": x, "autolag": y} with x str, either "teststat", "pvalue" or "usedlag"
and with y str, either of "AIC", "BIC", "t-stats" or None (See the documentation of adfuller() in
statsmodels).
:type param: list
:return: the value of this feature
:return type: float
"""
@functools.lru_cache()
def compute_adf(autolag):
try:
return adfuller(x, autolag=autolag)
except LinAlgError:
return np.NaN, np.NaN, np.NaN
except ValueError: # occurs if sample size is too small
return np.NaN, np.NaN, np.NaN
except MissingDataError: # is thrown for e.g. inf or nan in the data
return np.NaN, np.NaN, np.NaN
res = []
for config in param:
autolag = config.get("autolag", "AIC")
adf = compute_adf(autolag)
index = 'attr_"{}"__autolag_"{}"'.format(config["attr"], autolag)
if config["attr"] == "teststat":
res.append((index, adf[0]))
elif config["attr"] == "pvalue":
res.append((index, adf[1]))
elif config["attr"] == "usedlag":
res.append((index, adf[2]))
else:
res.append((index, np.NaN))
return res
@set_property("fctype", "simple")
def abs_energy(x):
"""
Returns the absolute energy of the time series which is the sum over the squared values
.. math::
E = \\sum_{i=1,\\ldots, n} x_i^2
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.dot(x, x)
@set_property("fctype", "simple")
def cid_ce(x, normalize):
"""
This function calculator is an estimate for a time series complexity [1] (A more complex time series has more peaks,
valleys etc.). It calculates the value of
.. math::
\\sqrt{ \\sum_{i=1}^{n-1} ( x_{i} - x_{i-1})^2 }
.. rubric:: References
| [1] Batista, Gustavo EAPA, et al (2014).
| CID: an efficient complexity-invariant distance for time series.
| Data Mining and Knowledge Discovery 28.3 (2014): 634-669.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param normalize: should the time series be z-transformed?
:type normalize: bool
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
if normalize:
s = np.std(x)
if s != 0:
x = (x - np.mean(x)) / s
else:
return 0.0
x = np.diff(x)
return np.sqrt(np.dot(x, x))
@set_property("fctype", "simple")
def mean_abs_change(x):
"""
Average over first differences.
Returns the mean over the absolute differences between subsequent time series values which is
.. math::
\\frac{1}{n-1} \\sum_{i=1,\\ldots, n-1} | x_{i+1} - x_{i}|
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.mean(np.abs(np.diff(x)))
@set_property("fctype", "simple")
def mean_change(x):
"""
Average over time series differences.
Returns the mean over the differences between subsequent time series values which is
.. math::
\\frac{1}{n-1} \\sum_{i=1,\\ldots, n-1} x_{i+1} - x_{i} = \\frac{1}{n-1} (x_{n} - x_{1})
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
x = np.asarray(x)
return (x[-1] - x[0]) / (len(x) - 1) if len(x) > 1 else np.NaN
@set_property("fctype", "simple")
def mean_second_derivative_central(x):
"""
Returns the mean value of a central approximation of the second derivative
.. math::
\\frac{1}{2(n-2)} \\sum_{i=1,\\ldots, n-1} \\frac{1}{2} (x_{i+2} - 2 \\cdot x_{i+1} + x_i)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
x = np.asarray(x)
return (x[-1] - x[-2] - x[1] + x[0]) / (2 * (len(x) - 2)) if len(x) > 2 else np.NaN
@set_property("fctype", "simple")
@set_property("minimal", True)
def median(x):
"""
Returns the median of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.median(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def mean(x):
"""
Returns the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.mean(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def length(x):
"""
Returns the length of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: int
"""
return len(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def standard_deviation(x):
"""
Returns the standard deviation of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.std(x)
@set_property("fctype", "simple")
def variation_coefficient(x):
"""
Returns the variation coefficient (standard error / mean, give relative value of variation around mean) of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
mean = np.mean(x)
if mean != 0:
return np.std(x) / mean
else:
return np.nan
@set_property("fctype", "simple")
@set_property("minimal", True)
def variance(x):
"""
Returns the variance of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.var(x)
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def skewness(x):
"""
Returns the sample skewness of x (calculated with the adjusted Fisher-Pearson standardized
moment coefficient G1).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, pd.Series):
x = pd.Series(x)
return pd.Series.skew(x)
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def kurtosis(x):
"""
Returns the kurtosis of x (calculated with the adjusted Fisher-Pearson standardized
moment coefficient G2).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, pd.Series):
x = pd.Series(x)
return pd.Series.kurtosis(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def root_mean_square(x):
"""
Returns the root mean square (rms) of the time series.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.sqrt(np.mean(np.square(x))) if len(x) > 0 else np.NaN
@set_property("fctype", "simple")
def absolute_sum_of_changes(x):
"""
Returns the sum over the absolute value of consecutive changes in the series x
.. math::
\\sum_{i=1, \\ldots, n-1} \\mid x_{i+1}- x_i \\mid
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.sum(np.abs(np.diff(x)))
@set_property("fctype", "simple")
def longest_strike_below_mean(x):
"""
Returns the length of the longest consecutive subsequence in x that is smaller than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.max(_get_length_sequences_where(x < np.mean(x))) if x.size > 0 else 0
@set_property("fctype", "simple")
def longest_strike_above_mean(x):
"""
Returns the length of the longest consecutive subsequence in x that is bigger than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.max(_get_length_sequences_where(x > np.mean(x))) if x.size > 0 else 0
@set_property("fctype", "simple")
def count_above_mean(x):
"""
Returns the number of values in x that are higher than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
m = np.mean(x)
return np.where(x > m)[0].size
@set_property("fctype", "simple")
def count_below_mean(x):
"""
Returns the number of values in x that are lower than the mean of x
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
m = np.mean(x)
return np.where(x < m)[0].size
@set_property("fctype", "simple")
def last_location_of_maximum(x):
"""
Returns the relative last location of the maximum value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
x = np.asarray(x)
return 1.0 - np.argmax(x[::-1]) / len(x) if len(x) > 0 else np.NaN
@set_property("fctype", "simple")
def first_location_of_maximum(x):
"""
Returns the first location of the maximum value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.argmax(x) / len(x) if len(x) > 0 else np.NaN
@set_property("fctype", "simple")
def last_location_of_minimum(x):
"""
Returns the last location of the minimal value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
x = np.asarray(x)
return 1.0 - np.argmin(x[::-1]) / len(x) if len(x) > 0 else np.NaN
@set_property("fctype", "simple")
def first_location_of_minimum(x):
"""
Returns the first location of the minimal value of x.
The position is calculated relatively to the length of x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
return np.argmin(x) / len(x) if len(x) > 0 else np.NaN
@set_property("fctype", "simple")
def percentage_of_reoccurring_values_to_all_values(x):
"""
Returns the percentage of values that are present in the time series
more than once.
len(different values occurring more than once) / len(different values)
This means the percentage is normalized to the number of unique values,
in contrast to the percentage_of_reoccurring_datapoints_to_all_datapoints.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if len(x) == 0:
return np.nan
unique, counts = np.unique(x, return_counts=True)
if counts.shape[0] == 0:
return 0
return np.sum(counts > 1) / float(counts.shape[0])
@set_property("fctype", "simple")
@set_property("input", "pd.Series")
def percentage_of_reoccurring_datapoints_to_all_datapoints(x):
"""
Returns the percentage of non-unique data points. Non-unique means that they are
contained another time in the time series again.
# of data points occurring more than once / # of all data points
This means the ratio is normalized to the number of data points in the time series,
in contrast to the percentage_of_reoccurring_values_to_all_values.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if len(x) == 0:
return np.nan
if not isinstance(x, pd.Series):
x = pd.Series(x)
value_counts = x.value_counts()
reoccuring_values = value_counts[value_counts > 1].sum()
if np.isnan(reoccuring_values):
return 0
return reoccuring_values / x.size
@set_property("fctype", "simple")
def sum_of_reoccurring_values(x):
"""
Returns the sum of all values, that are present in the time series
more than once.
For example
sum_of_reoccurring_values([2, 2, 2, 2, 1]) = 2
as 2 is a reoccurring value, so it is summed up with all
other reoccuring values (there is none), so the result is 2.
This is in contrast to ``sum_of_reoccurring_data_points``,
where each reoccuring value is only counted as often as
it is present in the data.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
unique, counts = np.unique(x, return_counts=True)
counts[counts < 2] = 0
counts[counts > 1] = 1
return np.sum(counts * unique)
@set_property("fctype", "simple")
def sum_of_reoccurring_data_points(x):
"""
Returns the sum of all data points, that are present in the time series
more than once.
For example
sum_of_reoccurring_data_points([2, 2, 2, 2, 1]) = 8
as 2 is a reoccurring value, so all 2's are summed up.
This is in contrast to ``sum_of_reoccurring_values``,
where each reoccuring value is only counted once.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
unique, counts = np.unique(x, return_counts=True)
counts[counts < 2] = 0
return np.sum(counts * unique)
@set_property("fctype", "simple")
def ratio_value_number_to_time_series_length(x):
"""
Returns a factor which is 1 if all values in the time series occur only once,
and below one if this is not the case.
In principle, it just returns
# unique values / # values
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
if x.size == 0:
return np.nan
return np.unique(x).size / x.size
@set_property("fctype", "combiner")
def fft_coefficient(x, param):
"""
Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast
fourier transformation algorithm
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0,
\\ldots , n-1.
The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"),
the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag",
"abs", "angle"]
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
assert (
min([config["coeff"] for config in param]) >= 0
), "Coefficients must be positive or zero."
assert {config["attr"] for config in param} <= {
"imag",
"real",
"abs",
"angle",
}, 'Attribute must be "real", "imag", "angle" or "abs"'
fft = np.fft.rfft(x)
def complex_agg(x, agg):
if agg == "real":
return x.real
elif agg == "imag":
return x.imag
elif agg == "abs":
return np.abs(x)
elif agg == "angle":
return np.angle(x, deg=True)
res = [
complex_agg(fft[config["coeff"]], config["attr"])
if config["coeff"] < len(fft)
else np.NaN
for config in param
]
index = [
'attr_"{}"__coeff_{}'.format(config["attr"], config["coeff"])
for config in param
]
return zip(index, res)
@set_property("fctype", "combiner")
def fft_aggregated(x, param):
"""
Returns the spectral centroid (mean), variance, skew, and kurtosis of the absolute fourier transform spectrum.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"aggtype": s} where s str and in ["centroid", "variance",
"skew", "kurtosis"]
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
assert {config["aggtype"] for config in param} <= {
"centroid",
"variance",
"skew",
"kurtosis",
}, 'Attribute must be "centroid", "variance", "skew", "kurtosis"'
def get_moment(y, moment):
"""
Returns the (non centered) moment of the distribution y:
E[y**moment] = \\sum_i[index(y_i)^moment * y_i] / \\sum_i[y_i]
:param y: the discrete distribution from which one wants to calculate the moment
:type y: pandas.Series or np.array
:param moment: the moment one wants to calcalate (choose 1,2,3, ... )
:type moment: int
:return: the moment requested
:return type: float
"""
return y.dot(np.arange(len(y), dtype=float) ** moment) / y.sum()
def get_centroid(y):
"""
:param y: the discrete distribution from which one wants to calculate the centroid
:type y: pandas.Series or np.array
:return: the centroid of distribution y (aka distribution mean, first moment)
:return type: float
"""
return get_moment(y, 1)
def get_variance(y):
"""
:param y: the discrete distribution from which one wants to calculate the variance
:type y: pandas.Series or np.array
:return: the variance of distribution y
:return type: float
"""
return get_moment(y, 2) - get_centroid(y) ** 2
def get_skew(y):
"""
Calculates the skew as the third standardized moment.
Ref: https://en.wikipedia.org/wiki/Skewness#Definition
:param y: the discrete distribution from which one wants to calculate the skew
:type y: pandas.Series or np.array
:return: the skew of distribution y
:return type: float
"""
variance = get_variance(y)
# In the limit of a dirac delta, skew should be 0 and variance 0. However, in the discrete limit,
# the skew blows up as variance --> 0, hence return nan when variance is smaller than a resolution of 0.5:
if variance < 0.5:
return np.nan
else:
return (
get_moment(y, 3) - 3 * get_centroid(y) * variance - get_centroid(y) ** 3
) / get_variance(y) ** (1.5)
def get_kurtosis(y):
"""
Calculates the kurtosis as the fourth standardized moment.
Ref: https://en.wikipedia.org/wiki/Kurtosis#Pearson_moments
:param y: the discrete distribution from which one wants to calculate the kurtosis
:type y: pandas.Series or np.array
:return: the kurtosis of distribution y
:return type: float
"""
variance = get_variance(y)
# In the limit of a dirac delta, kurtosis should be 3 and variance 0. However, in the discrete limit,
# the kurtosis blows up as variance --> 0, hence return nan when variance is smaller than a resolution of 0.5:
if variance < 0.5:
return np.nan
else:
return (
get_moment(y, 4)
- 4 * get_centroid(y) * get_moment(y, 3)
+ 6 * get_moment(y, 2) * get_centroid(y) ** 2
- 3 * get_centroid(y)
) / get_variance(y) ** 2
calculation = dict(
centroid=get_centroid,
variance=get_variance,
skew=get_skew,
kurtosis=get_kurtosis,
)
fft_abs = np.abs(np.fft.rfft(x))
res = [calculation[config["aggtype"]](fft_abs) for config in param]
index = ['aggtype_"{}"'.format(config["aggtype"]) for config in param]
return zip(index, res)
@set_property("fctype", "simple")
def number_peaks(x, n):
"""
Calculates the number of peaks of at least support n in the time series x. A peak of support n is defined as a
subsequence of x where a value occurs, which is bigger than its n neighbours to the left and to the right.
Hence in the sequence
>>> x = [3, 0, 0, 4, 0, 0, 13]
4 is a peak of support 1 and 2 because in the subsequences
>>> [0, 4, 0]
>>> [0, 0, 4, 0, 0]
4 is still the highest value. Here, 4 is not a peak of support 3 because 13 is the 3th neighbour to the right of 4
and its bigger than 4.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param n: the support of the peak
:type n: int
:return: the value of this feature
:return type: float
"""
x_reduced = x[n:-n]
res = None
for i in range(1, n + 1):
result_first = x_reduced > _roll(x, i)[n:-n]
if res is None:
res = result_first
else:
res &= result_first
res &= x_reduced > _roll(x, -i)[n:-n]
return np.sum(res)
@set_property("fctype", "combiner")
def index_mass_quantile(x, param):
"""
Calculates the relative index i of time series x where q% of the mass of x lies left of i.
For example for q = 50% this feature calculator will return the mass center of the time series.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"q": x} with x float
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
x = np.asarray(x)
abs_x = np.abs(x)
s = np.sum(abs_x)
if s == 0:
# all values in x are zero or it has length 0
return [("q_{}".format(config["q"]), np.NaN) for config in param]
else:
# at least one value is not zero
mass_centralized = np.cumsum(abs_x) / s
return [
(
"q_{}".format(config["q"]),
(np.argmax(mass_centralized >= config["q"]) + 1) / len(x),
)
for config in param
]
@set_property("fctype", "simple")
def number_cwt_peaks(x, n):
"""
Number of different peaks in x.
To estimamte the numbers of peaks, x is smoothed by a ricker wavelet for widths ranging from 1 to n. This feature
calculator returns the number of peaks that occur at enough width scales and with sufficiently high
Signal-to-Noise-Ratio (SNR)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param n: maximum width to consider
:type n: int
:return: the value of this feature
:return type: int
"""
return len(
find_peaks_cwt(vector=x, widths=np.array(list(range(1, n + 1))), wavelet=ricker)
)
@set_property("fctype", "combiner")
def linear_trend(x, param):
"""
Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
# todo: we could use the index of the DataFrame here
linReg = linregress(range(len(x)), x)
return [
('attr_"{}"'.format(config["attr"]), getattr(linReg, config["attr"]))
for config in param
]
@set_property("fctype", "combiner")
def cwt_coefficients(x, param):
"""
Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is
defined by
.. math::
\\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2})
where :math:`a` is the width parameter of the wavelet function.
This feature calculator takes three different parameter: widths, coeff and w. The feature calculator takes all the
different widths arrays and then calculates the cwt one time for each different width array. Then the values for the
different coefficient for coeff and width w are returned. (For each dic in param one feature is returned)
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
calculated_cwt = {}
res = []
indices = []
for parameter_combination in param:
widths = tuple(parameter_combination["widths"])
w = parameter_combination["w"]
coeff = parameter_combination["coeff"]
if widths not in calculated_cwt:
calculated_cwt[widths] = cwt(x, ricker, widths)
calculated_cwt_for_widths = calculated_cwt[widths]
indices += ["coeff_{}__w_{}__widths_{}".format(coeff, w, widths)]
i = widths.index(w)
if calculated_cwt_for_widths.shape[1] <= coeff:
res += [np.NaN]
else:
res += [calculated_cwt_for_widths[i, coeff]]
return zip(indices, res)
@set_property("fctype", "combiner")
def spkt_welch_density(x, param):
"""
This feature calculator estimates the cross power spectral density of the time series x at different frequencies.
To do so, the time series is first shifted from the time domain to the frequency domain.
The feature calculators returns the power spectrum of the different frequencies.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x} with x int
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
freq, pxx = welch(x, nperseg=min(len(x), 256))
coeff = [config["coeff"] for config in param]
indices = ["coeff_{}".format(i) for i in coeff]
if len(pxx) <= np.max(
coeff
): # There are fewer data points in the time series than requested coefficients
# filter coefficients that are not contained in pxx
reduced_coeff = [coefficient for coefficient in coeff if len(pxx) > coefficient]
not_calculated_coefficients = [
coefficient for coefficient in coeff if coefficient not in reduced_coeff
]
# Fill up the rest of the requested coefficients with np.NaNs
return zip(
indices,
list(pxx[reduced_coeff]) + [np.NaN] * len(not_calculated_coefficients),
)
else:
return zip(indices, pxx[coeff])
@set_property("fctype", "combiner")
def ar_coefficient(x, param):
"""
This feature calculator fits the unconditional maximum likelihood
of an autoregressive AR(k) process.
The k parameter is the maximum lag of the process
.. math::
X_{t}=\\varphi_0 +\\sum _{{i=1}}^{k}\\varphi_{i}X_{{t-i}}+\\varepsilon_{t}
For the configurations from param which should contain the maxlag "k" and such an AR process is calculated. Then
the coefficients :math:`\\varphi_{i}` whose index :math:`i` contained from "coeff" are returned.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x, "k": y} with x,y int
:type param: list
:return x: the different feature values
:return type: pandas.Series
"""
calculated_ar_params = {}
x_as_list = list(x)
res = {}
for parameter_combination in param:
k = parameter_combination["k"]
p = parameter_combination["coeff"]
column_name = "coeff_{}__k_{}".format(p, k)
if k not in calculated_ar_params:
try:
calculated_AR = AutoReg(x_as_list, lags=k, trend="c")
calculated_ar_params[k] = calculated_AR.fit().params
except (ZeroDivisionError, LinAlgError, ValueError):
calculated_ar_params[k] = [np.NaN] * k
mod = calculated_ar_params[k]
if p <= k:
try:
res[column_name] = mod[p]
except IndexError:
res[column_name] = 0
else:
res[column_name] = np.NaN
return [(key, value) for key, value in res.items()]
@set_property("fctype", "simple")
def change_quantiles(x, ql, qh, isabs, f_agg):
"""
First fixes a corridor given by the quantiles ql and qh of the distribution of x.
Then calculates the average, absolute value of consecutive changes of the series x inside this corridor.
Think about selecting a corridor on the
y-Axis and only calculating the mean of the absolute change of the time series inside this corridor.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param ql: the lower quantile of the corridor
:type ql: float
:param qh: the higher quantile of the corridor
:type qh: float
:param isabs: should the absolute differences be taken?
:type isabs: bool
:param f_agg: the aggregator function that is applied to the differences in the bin
:type f_agg: str, name of a numpy function (e.g. mean, var, std, median)
:return: the value of this feature
:return type: float
"""
if ql >= qh:
return 0
div = np.diff(x)
if isabs:
div = np.abs(div)
# All values that originate from the corridor between the quantiles ql and qh will have the category 0,
# other will be np.NaN
try:
bin_cat = pd.qcut(x, [ql, qh], labels=False)
bin_cat_0 = bin_cat == 0
except ValueError: # Occurs when ql are qh effectively equal, e.g. x is not long enough or is too categorical
return 0
# We only count changes that start and end inside the corridor
ind = (bin_cat_0 & _roll(bin_cat_0, 1))[1:]
if np.sum(ind) == 0:
return 0
else:
ind_inside_corridor = np.where(ind == 1)
aggregator = getattr(np, f_agg)
return aggregator(div[ind_inside_corridor])
@set_property("fctype", "simple")
def time_reversal_asymmetry_statistic(x, lag):
"""
Returns the time reversal asymmetry statistic.
This function calculates the value of
.. math::
\\frac{1}{n-2lag} \\sum_{i=1}^{n-2lag} x_{i + 2 \\cdot lag}^2 \\cdot x_{i + lag} - x_{i + lag} \\cdot x_{i}^2
which is
.. math::
\\mathbb{E}[L^2(X)^2 \\cdot L(X) - L(X) \\cdot X^2]
where :math:`\\mathbb{E}` is the mean and :math:`L` is the lag operator. It was proposed in [1] as a
promising feature to extract from time series.
.. rubric:: References
| [1] Fulcher, B.D., Jones, N.S. (2014).
| Highly comparative feature-based time-series classification.
| Knowledge and Data Engineering, IEEE Transactions on 26, 3026–3037.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param lag: the lag that should be used in the calculation of the feature
:type lag: int
:return: the value of this feature
:return type: float
"""
n = len(x)
x = np.asarray(x)
if 2 * lag >= n:
return 0
else:
one_lag = _roll(x, -lag)
two_lag = _roll(x, 2 * -lag)
return np.mean(
(two_lag * two_lag * one_lag - one_lag * x * x)[0 : (n - 2 * lag)]
)
@set_property("fctype", "simple")
def c3(x, lag):
"""
Uses c3 statistics to measure non linearity in the time series
This function calculates the value of
.. math::
\\frac{1}{n-2lag} \\sum_{i=1}^{n-2lag} x_{i + 2 \\cdot lag} \\cdot x_{i + lag} \\cdot x_{i}
which is
.. math::
\\mathbb{E}[L^2(X) \\cdot L(X) \\cdot X]
where :math:`\\mathbb{E}` is the mean and :math:`L` is the lag operator. It was proposed in [1] as a measure of
non linearity in the time series.
.. rubric:: References
| [1] Schreiber, T. and Schmitz, A. (1997).
| Discrimination power of measures for nonlinearity in a time series
| PHYSICAL REVIEW E, VOLUME 55, NUMBER 5
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param lag: the lag that should be used in the calculation of the feature
:type lag: int
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
n = x.size
if 2 * lag >= n:
return 0
else:
return np.mean((_roll(x, 2 * -lag) * _roll(x, -lag) * x)[0 : (n - 2 * lag)])
@set_property("fctype", "simple")
def mean_n_absolute_max(x, number_of_maxima):
"""
Calculates the arithmetic mean of the n absolute maximum values of the time series.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param number_of_maxima: the number of maxima, which should be considered
:type number_of_maxima: int
:return: the value of this feature
:return type: float
"""
assert (
number_of_maxima > 0
), f" number_of_maxima={number_of_maxima} which is not greater than 1"
n_absolute_maximum_values = np.sort(np.absolute(x))[-number_of_maxima:]
return np.mean(n_absolute_maximum_values) if len(x) > number_of_maxima else np.NaN
@set_property("fctype", "simple")
def binned_entropy(x, max_bins):
"""
First bins the values of x into max_bins equidistant bins.
Then calculates the value of
.. math::
- \\sum_{k=0}^{min(max\\_bins, len(x))} p_k log(p_k) \\cdot \\mathbf{1}_{(p_k > 0)}
where :math:`p_k` is the percentage of samples in bin :math:`k`.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param max_bins: the maximal number of bins
:type max_bins: int
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
# nan makes no sense here
if np.isnan(x).any():
return np.nan
hist, bin_edges = np.histogram(x, bins=max_bins)
probs = hist / x.size
probs[probs == 0] = 1.0
return -np.sum(probs * np.log(probs))
# todo - include latex formula
# todo - check if vectorizable
@set_property("high_comp_cost", True)
@set_property("fctype", "simple")
def sample_entropy(x):
"""
Calculate and return sample entropy of x.
.. rubric:: References
| [1] http://en.wikipedia.org/wiki/Sample_Entropy
| [2] https://www.ncbi.nlm.nih.gov/pubmed/10843903?dopt=Abstract
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
x = np.array(x)
# if one of the values is NaN, we can not compute anything meaningful
if np.isnan(x).any():
return np.nan
m = 2 # common value for m, according to wikipedia...
tolerance = 0.2 * np.std(
x
) # 0.2 is a common value for r, according to wikipedia...
# Split time series and save all templates of length m
# Basically we turn [1, 2, 3, 4] into [1, 2], [2, 3], [3, 4]
xm = _into_subchunks(x, m)
# Now calculate the maximum distance between each of those pairs
# np.abs(xmi - xm).max(axis=1)
# and check how many are below the tolerance.
# For speed reasons, we are not doing this in a nested for loop,
# but with numpy magic.
# Example:
# if x = [1, 2, 3]
# then xm = [[1, 2], [2, 3]]
# so we will substract xm from [1, 2] => [[0, 0], [-1, -1]]
# and from [2, 3] => [[1, 1], [0, 0]]
# taking the abs and max gives us:
# [0, 1] and [1, 0]
# as the diagonal elements are always 0, we substract 1.
B = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= tolerance) - 1 for xmi in xm])
# Similar for computing A
xmp1 = _into_subchunks(x, m + 1)
A = np.sum(
[np.sum(np.abs(xmi - xmp1).max(axis=1) <= tolerance) - 1 for xmi in xmp1]
)
# Return SampEn
return -np.log(A / B)
@set_property("fctype", "simple")
@set_property("high_comp_cost", True)
def approximate_entropy(x, m, r):
"""
Implements a vectorized Approximate entropy algorithm.
https://en.wikipedia.org/wiki/Approximate_entropy
For short time-series this method is highly dependent on the parameters,
but should be stable for N > 2000, see:
Yentes et al. (2012) -
*The Appropriate Use of Approximate Entropy and Sample Entropy with Short Data Sets*
Other shortcomings and alternatives discussed in:
Richman & Moorman (2000) -
*Physiological time-series analysis using approximate entropy and sample entropy*
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param m: Length of compared run of data
:type m: int
:param r: Filtering level, must be positive
:type r: float
:return: Approximate entropy
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
N = x.size
r *= np.std(x)
if r < 0:
raise ValueError("Parameter r must be positive.")
if N <= m + 1:
return 0
def _phi(m):
x_re = np.array([x[i : i + m] for i in range(N - m + 1)])
C = np.sum(
np.max(np.abs(x_re[:, np.newaxis] - x_re[np.newaxis, :]), axis=2) <= r,
axis=0,
) / (N - m + 1)
return np.sum(np.log(C)) / (N - m + 1.0)
return np.abs(_phi(m) - _phi(m + 1))
@set_property("fctype", "simple")
def fourier_entropy(x, bins):
"""
Calculate the binned entropy of the power spectral density of the time series
(using the welch method).
Ref: https://hackaday.io/project/707-complexity-of-a-time-series/details
Ref: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html
"""
_, pxx = welch(x, nperseg=min(len(x), 256))
return binned_entropy(pxx / np.max(pxx), bins)
@set_property("fctype", "simple")
def lempel_ziv_complexity(x, bins):
"""
Calculate a complexity estimate based on the Lempel-Ziv compression
algorithm.
The complexity is defined as the number of dictionary entries (or sub-words) needed
to encode the time series when viewed from left to right.
For this, the time series is first binned into the given number of bins.
Then it is converted into sub-words with different prefixes.
The number of sub-words needed for this divided by the length of the time
series is the complexity estimate.
For example, if the time series (after binning in only 2 bins) would look like "100111",
the different sub-words would be 1, 0, 01 and 11 and therefore the result is 4/6 = 0.66.
Ref: https://github.com/Naereen/Lempel-Ziv_Complexity/blob/master/src/lempel_ziv_complexity.py
"""
x = np.asarray(x)
bins = np.linspace(np.min(x), np.max(x), bins + 1)[1:]
sequence = np.searchsorted(bins, x, side="left")
sub_strings = set()
n = len(sequence)
ind = 0
inc = 1
while ind + inc <= n:
# convert to tuple in order to make it hashable
sub_str = tuple(sequence[ind : ind + inc])
if sub_str in sub_strings:
inc += 1
else:
sub_strings.add(sub_str)
ind += inc
inc = 1
return len(sub_strings) / n
@set_property("fctype", "simple")
def permutation_entropy(x, tau, dimension):
"""
Calculate the permutation entropy.
Three steps are needed for this:
1. chunk the data into sub-windows of length D starting every tau.
Following the example from the reference, a vector
x = [4, 7, 9, 10, 6, 11, 3
with D = 3 and tau = 1 is turned into
[[ 4, 7, 9],
[ 7, 9, 10],
[ 9, 10, 6],
[10, 6, 11],
[ 6, 11, 3]]
2. replace each D-window by the permutation, that
captures the ordinal ranking of the data.
That gives
[[0, 1, 2],
[0, 1, 2],
[1, 2, 0],
[1, 0, 2],
[1, 2, 0]]
3. Now we just need to count the frequencies of every permutation
and return their entropy (we use log_e and not log_2).
Ref: https://www.aptech.com/blog/permutation-entropy/
Bandt, Christoph and Bernd Pompe.
“Permutation entropy: a natural complexity measure for time series.”
Physical review letters 88 17 (2002): 174102 .
"""
X = _into_subchunks(x, dimension, tau)
if len(X) == 0:
return np.nan
# Now that is clearly black, magic, but see here:
# https://stackoverflow.com/questions/54459554/numpy-find-index-in-sorted-array-in-an-efficient-way
permutations = np.argsort(np.argsort(X))
# Count the number of occurences
_, counts = np.unique(permutations, axis=0, return_counts=True)
# turn them into frequencies
probs = counts / len(permutations)
# and return their entropy
return -np.sum(probs * np.log(probs))
@set_property("fctype", "simple")
def autocorrelation(x, lag):
"""
Calculates the autocorrelation of the specified lag, according to the formula [1]
.. math::
\\frac{1}{(n-l)\\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu)
where :math:`n` is the length of the time series :math:`X_i`, :math:`\\sigma^2` its variance and :math:`\\mu` its
mean. `l` denotes the lag.
.. rubric:: References
[1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param lag: the lag
:type lag: int
:return: the value of this feature
:return type: float
"""
# This is important: If a series is passed, the product below is calculated
# based on the index, which corresponds to squaring the series.
if isinstance(x, pd.Series):
x = x.values
if len(x) < lag:
return np.nan
# Slice the relevant subseries based on the lag
y1 = x[: (len(x) - lag)]
y2 = x[lag:]
# Subtract the mean of the whole series x
x_mean = np.mean(x)
# The result is sometimes referred to as "covariation"
sum_product = np.sum((y1 - x_mean) * (y2 - x_mean))
# Return the normalized unbiased covariance
v = np.var(x)
if np.isclose(v, 0):
return np.NaN
else:
return sum_product / ((len(x) - lag) * v)
@set_property("fctype", "simple")
def quantile(x, q):
"""
Calculates the q quantile of x. This is the value of x greater than q% of the ordered values from x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param q: the quantile to calculate
:type q: float
:return: the value of this feature
:return type: float
"""
if len(x) == 0:
return np.NaN
return np.quantile(x, q)
@set_property("fctype", "simple")
def number_crossing_m(x, m):
"""
Calculates the number of crossings of x on m. A crossing is defined as two sequential values where the first value
is lower than m and the next is greater, or vice-versa. If you set m to zero, you will get the number of zero
crossings.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param m: the threshold for the crossing
:type m: float
:return: the value of this feature
:return type: int
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
# From https://stackoverflow.com/questions/3843017/efficiently-detect-sign-changes-in-python
# However, we are not going with the fastest version as it breaks with pandas
positive = x > m
return np.where(np.diff(positive))[0].size
@set_property("fctype", "simple")
@set_property("minimal", True)
def maximum(x):
"""
Calculates the highest value of the time series x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.max(x)
@set_property("fctype", "simple")
@set_property("minimal", True)
def absolute_maximum(x):
"""
Calculates the highest absolute value of the time series x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.max(np.absolute(x)) if len(x) > 0 else np.NaN
@set_property("fctype", "simple")
@set_property("minimal", True)
def minimum(x):
"""
Calculates the lowest value of the time series x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
return np.min(x)
@set_property("fctype", "simple")
def value_count(x, value):
"""
Count occurrences of `value` in time series x.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param value: the value to be counted
:type value: int or float
:return: the count
:rtype: int
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
if np.isnan(value):
return np.isnan(x).sum()
else:
return x[x == value].size
@set_property("fctype", "simple")
def range_count(x, min, max):
"""
Count observed values within the interval [min, max).
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param min: the inclusive lower bound of the range
:type min: int or float
:param max: the exclusive upper bound of the range
:type max: int or float
:return: the count of values within the range
:rtype: int
"""
return np.sum((x >= min) & (x < max))
@set_property("fctype", "combiner")
def friedrich_coefficients(x, param):
"""
Coefficients of polynomial :math:`h(x)`, which has been fitted to
the deterministic dynamics of Langevin model
.. math::
\\dot{x}(t) = h(x(t)) + \\mathcal{N}(0,R)
as described by [1].
For short time-series this method is highly dependent on the parameters.
.. rubric:: References
| [1] Friedrich et al. (2000): Physics Letters A 271, p. 217-222
| *Extracting model equations from experimental data*
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"m": x, "r": y, "coeff": z} with x being positive integer,
the order of polynomial to fit for estimating fixed points of
dynamics, y positive float, the number of quantiles to use for averaging and finally z,
a positive integer corresponding to the returned coefficient
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
# calculated is dictionary storing the calculated coefficients {m: {r: friedrich_coefficients}}
calculated = defaultdict(dict)
# res is a dictionary containing the results {"m_10__r_2__coeff_3": 15.43}
res = {}
for parameter_combination in param:
m = parameter_combination["m"]
r = parameter_combination["r"]
coeff = parameter_combination["coeff"]
assert coeff >= 0, "Coefficients must be positive or zero. Found {}".format(
coeff
)
# calculate the current friedrich coefficients if they do not exist yet
if m not in calculated or r not in calculated[m]:
calculated[m][r] = _estimate_friedrich_coefficients(x, m, r)
try:
res["coeff_{}__m_{}__r_{}".format(coeff, m, r)] = calculated[m][r][coeff]
except IndexError:
res["coeff_{}__m_{}__r_{}".format(coeff, m, r)] = np.NaN
return [(key, value) for key, value in res.items()]
@set_property("fctype", "simple")
def max_langevin_fixed_point(x, r, m):
"""
Largest fixed point of dynamics :math:argmax_x {h(x)=0}` estimated from polynomial :math:`h(x)`,
which has been fitted to the deterministic dynamics of Langevin model
.. math::
\\dot(x)(t) = h(x(t)) + R \\mathcal(N)(0,1)
as described by
Friedrich et al. (2000): Physics Letters A 271, p. 217-222
*Extracting model equations from experimental data*
For short time-series this method is highly dependent on the parameters.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param m: order of polynomial to fit for estimating fixed points of dynamics
:type m: int
:param r: number of quantiles to use for averaging
:type r: float
:return: Largest fixed point of deterministic dynamics
:return type: float
"""
coeff = _estimate_friedrich_coefficients(x, m, r)
try:
max_fixed_point = np.max(np.real(np.roots(coeff)))
except (np.linalg.LinAlgError, ValueError):
return np.nan
return max_fixed_point
@set_property("fctype", "combiner")
def agg_linear_trend(x, param):
"""
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks versus
the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are "pvalue",
"rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f an string and l an int
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
# todo: we could use the index of the DataFrame here
calculated_agg = defaultdict(dict)
res_data = []
res_index = []
for parameter_combination in param:
chunk_len = parameter_combination["chunk_len"]
f_agg = parameter_combination["f_agg"]
if f_agg not in calculated_agg or chunk_len not in calculated_agg[f_agg]:
if chunk_len >= len(x):
calculated_agg[f_agg][chunk_len] = np.NaN
else:
aggregate_result = _aggregate_on_chunks(x, f_agg, chunk_len)
lin_reg_result = linregress(
range(len(aggregate_result)), aggregate_result
)
calculated_agg[f_agg][chunk_len] = lin_reg_result
attr = parameter_combination["attr"]
if chunk_len >= len(x):
res_data.append(np.NaN)
else:
res_data.append(getattr(calculated_agg[f_agg][chunk_len], attr))
res_index.append(
'attr_"{}"__chunk_len_{}__f_agg_"{}"'.format(attr, chunk_len, f_agg)
)
return zip(res_index, res_data)
@set_property("fctype", "combiner")
def energy_ratio_by_chunks(x, param):
"""
Calculates the sum of squares of chunk i out of N chunks expressed as a ratio with the sum of squares over the whole
series.
Takes as input parameters the number num_segments of segments to divide the series into and segment_focus
which is the segment number (starting at zero) to return a feature on.
If the length of the time series is not a multiple of the number of segments, the remaining data points are
distributed on the bins starting from the first. For example, if your time series consists of 8 entries, the
first two bins will contain 3 and the last two values, e.g. `[ 0., 1., 2.], [ 3., 4., 5.]` and `[ 6., 7.]`.
Note that the answer for `num_segments = 1` is a trivial "1" but we handle this scenario
in case somebody calls it. Sum of the ratios should be 1.0.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"num_segments": N, "segment_focus": i} with N, i both ints
:return: the feature values
:return type: list of tuples (index, data)
"""
res_data = []
res_index = []
full_series_energy = np.sum(x ** 2)
for parameter_combination in param:
num_segments = parameter_combination["num_segments"]
segment_focus = parameter_combination["segment_focus"]
assert segment_focus < num_segments
assert num_segments > 0
if full_series_energy == 0:
res_data.append(np.NaN)
else:
res_data.append(
np.sum(np.array_split(x, num_segments)[segment_focus] ** 2.0)
/ full_series_energy
)
res_index.append(
"num_segments_{}__segment_focus_{}".format(num_segments, segment_focus)
)
# Materialize as list for Python 3 compatibility with name handling
return list(zip(res_index, res_data))
@set_property("fctype", "combiner")
@set_property("input", "pd.Series")
@set_property("index_type", pd.DatetimeIndex)
def linear_trend_timewise(x, param):
"""
Calculate a linear least-squares regression for the values of the time series versus the sequence from 0 to
length of the time series minus one.
This feature uses the index of the time series to fit the model, which must be of a datetime
dtype.
The parameters control which of the characteristics are returned.
Possible extracted attributes are "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of
linregress for more information.
:param x: the time series to calculate the feature of. The index must be datetime.
:type x: pandas.Series
:param param: contains dictionaries {"attr": x} with x an string, the attribute name of the regression model
:type param: list
:return: the different feature values
:return type: list
"""
ix = x.index
# Get differences between each timestamp and the first timestamp in seconds.
# Then convert to hours and reshape for linear regression
times_seconds = (ix - ix[0]).total_seconds()
times_hours = np.asarray(times_seconds / float(3600))
linReg = linregress(times_hours, x.values)
return [
('attr_"{}"'.format(config["attr"]), getattr(linReg, config["attr"]))
for config in param
]
@set_property("fctype", "simple")
def count_above(x, t):
"""
Returns the percentage of values in x that are higher than t
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param t: value used as threshold
:type t: float
:return: the value of this feature
:return type: float
"""
return np.sum(x >= t) / len(x)
@set_property("fctype", "simple")
def count_below(x, t):
"""
Returns the percentage of values in x that are lower than t
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param t: value used as threshold
:type t: float
:return: the value of this feature
:return type: float
"""
return np.sum(x <= t) / len(x)
@set_property("fctype", "simple")
def benford_correlation(x):
"""
Useful for anomaly detection applications [1][2]. Returns the correlation from first digit distribution when
compared to the Newcomb-Benford's Law distribution [3][4].
.. math::
P(d)=\\log_{10}\\left(1+\\frac{1}{d}\\right)
where :math:`P(d)` is the Newcomb-Benford distribution for :math:`d` that is the leading digit of the number
{1, 2, 3, 4, 5, 6, 7, 8, 9}.
.. rubric:: References
| [1] A Statistical Derivation of the Significant-Digit Law, Theodore P. Hill, Statistical Science, 1995
| [2] The significant-digit phenomenon, Theodore P. Hill, The American Mathematical Monthly, 1995
| [3] The law of anomalous numbers, Frank Benford, Proceedings of the American philosophical society, 1938
| [4] Note on the frequency of use of the different digits in natural numbers, Simon Newcomb, American Journal of
| mathematics, 1881
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: float
"""
x = np.asarray(x)
# retrieve first digit from data
x = np.array(
[int(str(np.format_float_scientific(i))[:1]) for i in np.abs(np.nan_to_num(x))]
)
# benford distribution
benford_distribution = np.array([np.log10(1 + 1 / n) for n in range(1, 10)])
data_distribution = np.array([(x == n).mean() for n in range(1, 10)])
# np.corrcoef outputs the normalized covariance (correlation) between benford_distribution and data_distribution.
# In this case returns a 2x2 matrix, the [0, 1] and [1, 1] are the values between the two arrays
return np.corrcoef(benford_distribution, data_distribution)[0, 1]
@set_property("fctype", "combiner")
def matrix_profile(x, param):
"""
Calculates the 1-D Matrix Profile[1] and returns Tukey's Five Number Set plus the mean of that Matrix Profile.
.. rubric:: References
| [1] Yeh et.al (2016), IEEE ICDM
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries
{"sample_pct": x, "threshold": y, "feature": z}
with sample_pct and threshold being parameters of the matrixprofile
package https://matrixprofile.docs.matrixprofile.org/api.html#matrixprofile-compute
and feature being one of "min", "max", "mean", "median", "25", "75"
and decides which feature of the matrix profile to extract
:type param: list
:return: the different feature values
:return type: pandas.Series
"""
x = np.asarray(x)
def _calculate_mp(**kwargs):
"""Calculate the matrix profile using the specified window, or the max subsequence if no window is specified"""
try:
if "windows" in kwargs:
m_p = mp.compute(x, **kwargs)["mp"]
else:
m_p = mp.algorithms.maximum_subsequence(x, include_pmp=True, **kwargs)[
"pmp"
][-1]
return m_p
except NoSolutionPossible:
return [np.nan]
# The already calculated matrix profiles
matrix_profiles = {}
# The results
res = {}
for kwargs in param:
kwargs = kwargs.copy()
key = convert_to_output_format(kwargs)
feature = kwargs.pop("feature")
featureless_key = convert_to_output_format(kwargs)
if featureless_key not in matrix_profiles:
matrix_profiles[featureless_key] = _calculate_mp(**kwargs)
m_p = matrix_profiles[featureless_key]
# Set all features to nan if Matrix Profile is nan (cannot be computed)
if len(m_p) == 1:
res[key] = np.nan
# Handle all other Matrix Profile instances
else:
finite_indices = np.isfinite(m_p)
if feature == "min":
res[key] = np.min(m_p[finite_indices])
elif feature == "max":
res[key] = np.max(m_p[finite_indices])
elif feature == "mean":
res[key] = np.mean(m_p[finite_indices])
elif feature == "median":
res[key] = np.median(m_p[finite_indices])
elif feature == "25":
res[key] = np.percentile(m_p[finite_indices], 25)
elif feature == "75":
res[key] = np.percentile(m_p[finite_indices], 75)
else:
raise ValueError(f"Unknown feature {feature} for the matrix profile")
return [(key, value) for key, value in res.items()]
@set_property("fctype", "combiner")
def query_similarity_count(x, param):
"""
This feature calculator accepts an input query subsequence parameter,
compares the query (under z-normalized Euclidean distance) to all
subsequences within the time series, and returns a count of the number
of times the query was found in the time series (within some predefined
maximum distance threshold). Note that this feature will always return
`np.nan` when no query subsequence is provided and so users will need
to enable this feature themselves.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries
{"query": Q, "threshold": thr, "normalize": norm}
with `Q` (numpy.ndarray), the query subsequence to compare the
time series against. If `Q` is omitted then a value of zero
is returned. Additionally, `thr` (float), the maximum
z-normalized Euclidean distance threshold for which to
increment the query similarity count. If `thr` is omitted
then a default threshold of `thr=0.0` is used, which
corresponds to finding exact matches to `Q`. Finally, for
non-normalized (i.e., without z-normalization) Euclidean set
`norm` (bool) to `False.
:type param: list
:return x: the different feature values
:return type: int
"""
res = {}
T = np.asarray(x).astype(float)
for i, kwargs in enumerate(param):
key = convert_to_output_format(kwargs)
normalize = kwargs.get("normalize", True)
threshold = kwargs.get("threshold", 0.0)
Q = kwargs.get("query", None)
Q = np.asarray(Q).astype(float)
count = np.nan
if Q is not None and Q.size >= 3:
if normalize:
distance_profile = stumpy.core.mass(Q, T)
else:
distance_profile = stumpy.core.mass_absolute(Q, T)
count = np.sum(distance_profile <= threshold)
res[key] = count
return [(key, value) for key, value in res.items()]
|
blue-yonder/tsfresh
|
tsfresh/feature_extraction/feature_calculators.py
|
Python
|
mit
| 81,590
|
[
"ADF",
"DIRAC"
] |
5f86218a78f663cc5d8d7c27c88cc3142bd67cf12fe32e743d03e98d23d19d1d
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar_Cookie,
compat_cookies,
compat_etree_Element,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
dict_get,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_bitrate,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
str_or_none,
strip_or_none,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
url_or_none,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url" attributes with the same semantics as videos
(see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None)
and self._GEO_BYPASS
and self._downloader.params.get('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld_list = list(re.finditer(JSON_LD_RE, html))
default = kwargs.get('default', NO_DEFAULT)
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
json_ld = []
for mobj in json_ld_list:
json_ld_item = self._parse_json(
mobj.group('json_ld'), video_id, fatal=fatal)
if not json_ld_item:
continue
if isinstance(json_ld_item, dict):
json_ld.append(json_ld_item)
elif isinstance(json_ld_item, (list, tuple)):
json_ld.extend(json_ld_item)
if json_ld:
json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
if json_ld:
return json_ld
if default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = is_e.get('interactionType')
if not isinstance(interaction_type, compat_str):
continue
interaction_count = int_or_none(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if '@context' in e:
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
continue
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
if expected_type is None:
continue
else:
break
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
if expected_type is None:
continue
else:
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError:
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal, data=data, headers=headers, query=query)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal:
return []
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False, data=None, headers={},
query={}):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
# for DailyMotion
progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
if progressive_uri:
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': f['format_id'].replace('hls-', 'http-'),
'protocol': 'http',
'url': progressive_uri,
})
formats.append(http_f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
mpd_doc, urlh = res
if mpd_doc is None:
return []
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# If there is a fragments key available then we correctly recognized fragmented media.
# Otherwise we will assume unfragmented media with direct access. Technically, such
# assumption is not necessarily correct since we may simply have no support for
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
if 'fragments' in representation_ms_info:
f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url,
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
# Assuming direct URL to unfragmented media.
f['url'] = base_url
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
# is not necessarily unique within a Period thus formats with
# the same `format_id` are quite possible. There are numerous examples
# of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
# https://github.com/ytdl-org/youtube-dl/issues/13919)
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
ism_doc, urlh = res
if ism_doc is None:
return []
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src'))
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen
# several times in the wild
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src:
continue
f = parse_content_type(s_attr.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# width, height, res, label and title attributes are
# all not standard but seen several times in the wild
labels = [
s_attr.get(lbl)
for lbl in ('label', 'title')
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
if not resolution:
continue
width = width or resolution.get('width')
height = height or resolution.get('height')
for lbl in labels:
tbr = parse_bitrate(lbl)
if tbr:
break
else:
tbr = None
f.update({
'width': width,
'height': height,
'tbr': tbr,
'format_id': s_attr.get('label') or s_attr.get('title'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = strip_or_none(track_attributes.get('src'))
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': clean_html(video_data.get('description')),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False)
or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False)
or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
Orochimarufan/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 141,278
|
[
"VisIt"
] |
022b324a59b7970fe089bb17e473e491836853a6990af821db7e3b80e10acb3e
|
""" This module contains the function
to generate core-shell A@B nanoparticle with FCC structure.
"""
from __future__ import print_function
import random
import copy
from math import sqrt
from ase.neighborlist import NeighborList
from ase.cluster.cubic import FaceCenteredCubic
import numpy as np
from qsar import QSAR
def sphericalFCC(elem, latticeconstant, nlayers):
r""" Geneartes spherical cluster of atoms of FCC metal
Parameters
----------
elem: string
symbol of chemical element.
lattice constant: float
lattice constant in Angstr.
nlayers:
number of atomic layers passed to FaceCenteredCubic. Guards the radius of cluster
Returns
-------
ase.Atoms object
Example
--------
>>> atoms = sphericalFCC('Ag', 4.09, 8)
"""
# 1. generate cubical cluster
surfaces = [(1, 0, 0)]
layers = [nlayers]
atoms = FaceCenteredCubic(elem, surfaces, layers, latticeconstant)
atoms.center()
# 2. cut al lextra atom from cube to make it spherical
Xmin = atoms.positions[:, 0].min()
Xmax = atoms.positions[:, 0].max()
C = (Xmin + Xmax) / 2.0
R = (Xmax - Xmin) / 2.0
ia = 0
while ia < len(atoms):
x2 = (atoms.positions[ia, 0] - C)**2
y2 = (atoms.positions[ia, 1] - C)**2
z2 = (atoms.positions[ia, 2] - C)**2
if (x2 + y2 + z2) > R**2:
del atoms[ia]
else:
ia += 1
return atoms
def cut_spherical_cluster(atoms, size):
r""" Cuts spherical cluster from provided atoms object
Parameters
----------
atoms: ASE.Atoms object
the original cluster to be cut off
size: float
the diameter of resulting cluster, in Angstrom
Returns
-------
ase.Atoms object of resulted cluster
Example
--------
>>> atoms = cut_spherical_cluster(atoms, 10) # 1nm cluster
"""
# atoms = copy.copy(atoms) # keep original atoms uncentered
# atoms.center()
Xmin = np.min(atoms.positions[:, 0])
Xmax = np.max(atoms.positions[:, 0])
Ymin = np.min(atoms.positions[:, 1])
Ymax = np.max(atoms.positions[:, 1])
Zmin = np.min(atoms.positions[:, 2])
Zmax = np.max(atoms.positions[:, 2])
Cx = (Xmin + Xmax) / 2.0
Cy = (Ymin + Ymax) / 2.0
Cz = (Zmin + Zmax) / 2.0
R = size/2.0 # radius of cluster
dists = np.sum((atoms.get_positions() - np.array([Cx, Cy, Cz]))**2, 1)
rem = np.nonzero(dists > R**2)[0]
if len(rem) > 0:
del atoms[rem]
else:
print('Warning: no atoms were deleted by cut_spherical_cluster()')
return atoms
def cut_elliptical_cluster(atoms, Dx, Dy, Dz):
r""" Cuts 3D ellipsiodal cluster cluster from provided atoms object
Parameters
----------
atoms: ASE.Atoms object
the original cluster to be cut off
Dx, Dy, Dz: float
ellipse paramters, in Angstrom
Returns
-------
ase.Atoms object of resulted cluster
Example
--------
>>> atoms = cut_elliptical_cluster(atoms, 10, 10, 5) #
"""
#~ atoms = copy.copy(atoms) # keep original atoms uncentered
atoms.center()
Xmin = np.min(atoms.positions[:, 0])
Xmax = np.max(atoms.positions[:, 0])
Ymin = np.min(atoms.positions[:, 1])
Ymax = np.max(atoms.positions[:, 1])
Zmin = np.min(atoms.positions[:, 2])
Zmax = np.max(atoms.positions[:, 2])
Cx = (Xmin+Xmax)/2.0
Cy = (Ymin+Ymax)/2.0
Cz = (Zmin+Zmax)/2.0
R = np.array([Dx/2.0, Dy/2.0, Dz/2.0])
dists = np.sum(((atoms.get_positions() - np.array([Cx,Cy,Cz]))/R)**2, 1)
rem = np.nonzero(dists > 1)[0]
if len(rem) > 0:
del atoms[rem]
else:
print('Warning: no atoms were deleted by cut_spherical_cluster()')
return atoms
def CoreShellFCC(atoms, type_a, type_b, ratio, a_cell, n_depth=-1):
r"""This routine generates cluster with ideal core-shell architecture,
so that atoms of type_a are placed on the surface
and atoms of type_b are forming the core of nanoparticle.
The 'surface' of nanoparticle is defined as atoms
with unfinished coordination shell.
Parameters
----------
atoms: ase.Atoms
ase Atoms object, containing atomic cluster.
type_a: string
Symbol of chemical element to be placed on the shell.
type_b: string
Symbol of chemical element to be placed in the core.
ratio: float
Guards the number of shell atoms, type_a:type_b = ratio:(1-ratio)
a_cell: float
Parameter of FCC cell, in Angstrom.
Required for calculation of neighbor distances in for infinite
crystal.
n_depth: int
Number of layers of the shell formed by atoms ratio.
Default value -1 is ignored and n_depth is calculated according
ratio. If n_depth is set then value of ratio is ignored.
Returns
-------
Function returns ASE atoms object which
contains bimetallic core-shell cluster
Notes
-----
The criterion of the atom beeing on the surface is incompletnes
of it's coordination shell. For the most outer atoms the first
coordination shell will be incomplete (coordination number
is less then 12 for FCC), for the second layer --
second coordination shell( CN1 + CN2 < 12 + 6) and so on.
In this algorithm each layer is tagged by the number
('depth'), take care if used with other routines
dealing with tags (add_adsorbate etc).
First, atoms with unfinished first shell are replaced
by atoms type_a, then second, and so on.
The last depth surface layer is replaced by random
to maintain given ratio value.
Example
--------
>>> atoms = FaceCenteredCubic('Ag',
[(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7,8,7], 4.09)
>>> atoms = CoreShellFCC(atoms, 'Pt', 'Ag', 0.6, 4.09)
>>> view(atoms)
"""
# 0 < ratio < 1
target_x = ratio
if n_depth != -1:
target_x = 1 # needed to label all needed layeres
def fill_by_tag(atoms, chems, tag):
"""Replaces all atoms within selected layer"""
for i in range(0, len(atoms)):
if atoms[i].tag == tag:
chems[i] = type_a
return
# coord numbers for FCC:
coord_nums = [1, 12, 6, 24, 12, 24, 8, 48, 6, 36, 24, 24, 24, 72, 48,
12, 48, 30, 72, 24]
# coordination radii obtained from this array as R = sqrt(coord_radii)*a/2
coord_radii = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 30,
32, 34, 36, 38, 40]
## generate FCC cluster ##
#atoms = FaceCenteredCubic(type_b, surfaces, layers, a_cell)
n_atoms = len(atoms)
## tag layers ##
positions = [0] # number of positions in layer
n_tag = 0 # number of tags to check if there is enought layers
n_shell = 0 # depth of the shell
while (n_tag < n_atoms * target_x):
n_shell += 1
if (n_depth != -1)and(n_shell > n_depth):
break
neiblist = NeighborList(
[
a_cell / 2.0 * sqrt(coord_radii[n_shell]) / 2.0 + 0.0001
] * n_atoms,
self_interaction=False, bothways=True
)
neiblist.update(atoms)
for i in range(0, n_atoms):
indeces, offsets = neiblist.get_neighbors(i)
if (atoms[i].tag == 0):
if (len(indeces) < sum(coord_nums[1:n_shell + 1])):
# coord shell is not full -> atom is on surface!
atoms[i].tag = n_shell
n_tag += 1
# save the count of positions at each layer:
positions.append(n_tag - sum(positions[0:n_shell]))
## populate layers ##
chems = atoms.get_chemical_symbols()
n_type_a = 0 # number of changes B -> A
if (n_tag < n_atoms * target_x)and(n_depth == -1):
# raise exception?
return None
else:
n_filled = n_shell - 1 # number of totally filled layers
ilayer = 1
while (ilayer < n_filled + 1):
fill_by_tag(atoms, chems, ilayer)
n_type_a += positions[ilayer]
ilayer += 1
while (n_type_a < n_atoms * target_x)and(n_depth == -1):
i = random.randint(0, n_atoms - 1)
if (atoms[i].tag == n_shell):
if (chems[i] == type_b):
chems[i] = type_a
n_type_a += 1
atoms.set_chemical_symbols(chems)
## check number of atoms ##
checkn_a = 0
for element in chems:
if element == type_a:
checkn_a += 1
assert n_type_a == checkn_a
return atoms
def CoreShellCN(atoms, type_a, type_b, ratio, R_min = 1.5, CN_max=12, n_depth=-1):
r"""This routine generates cluster with ideal core-shell architecture,
so that atoms of type_a are placed on the surface
and atoms of type_b are forming the core of nanoparticle.
The 'surface' of nanoparticle is defined as atoms
with unfinished first coordination shell.
This algorithm *does not* requires explicit knowledge of
far coordination shells parameters, as it was in CoreShellFCC(..)
Parameters
----------
atoms: ase.Atoms
ase Atoms object, containing atomic cluster.
type_a: string
Symbol of chemical element to be placed on the shell.
type_b: string
Symbol of chemical element to be placed in the core.
ratio: float
Guards the number of shell atoms, type_a:type_b = ratio:(1-ratio)
R_min: float
Typical bond length. Neighboring atoms within this value are counted
as coordination numbers.
Default is 1.5.
CN_max: float
Maximum possible coordination number (bulk coordination number).
Default is 12.
n_depth: int
Number of layers of the shell formed by atoms ratio.
Default value -1 is ignored and n_depth is calculated according
ratio. If n_depth is set then value of ratio is ignored.
Returns
-------
Function returns ASE atoms object which
contains bimetallic core-shell cluster
Example
--------
>>> atoms = FaceCenteredCubic('Ag',
[(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7,8,7], 4.09)
>>> atoms = CoreShellCN(atoms, 'Pt', 'Ag', 0.5)
>>> view(atoms)
"""
# 0 < ratio < 1
target_x = ratio
if n_depth != -1:
target_x = 1
n_atoms = len(atoms)
n_a = (np.array(atoms.get_chemical_symbols()) == type_a).sum()
#n_b = (atoms.get_chemical_symbols() == type_b).sum()
#print n_a
n_shell = 0 # depth of the shell
while (n_a < n_atoms * target_x):
n_shell += 1
print ("shell: ", n_shell)
if (n_depth != -1)and(n_shell > n_depth):
break
neiblist = NeighborList( [ R_min ] * n_atoms,
self_interaction=False, bothways=True )
neiblist.update( atoms )
for i in range( n_atoms ):
indeces, offsets = neiblist.get_neighbors(i)
if (atoms[i].symbol == type_b):
CN_temp = 0
for ii in indeces:
if atoms[ii].symbol == type_b:
CN_temp += 1
#print "CN_temp: ", CN_temp
if (CN_temp < CN_max):
# coord shell is not full, swap type to type_a!
atoms[i].tag = n_shell # not swap yet, but mark
# swap atom types now. Stop if target ratio achieved
for atom in atoms:
if (atom.tag > 0)&(atom.symbol == type_b):
if n_a < n_atoms * target_x:
atom.symbol = type_a
n_a += 1
#print "n_A: ", n_a
# end while
# check number of atoms
checkn_a = 0
for element in atoms.get_chemical_symbols():
if element == type_a:
checkn_a += 1
#print "Should be equal: ", n_a, checkn_a
assert n_a == checkn_a
return atoms
def hollowCore(atoms, radius):
print('WARNING: hollowCore() is renamed to hollow_core()')
return hollow_core(atoms, radius)
def hollow_core(atoms, radius):
r""" Make an empty (hollow) core in the middle of atoms system
Parameters
----------
atoms: ase.Atoms
ase Atoms object, containing atomic cluster.
radius: float
controlls the size of empty region in the center of cluster.
Returns
-------
ase.Atoms object
Notes
-----
Example
--------
>>> atoms = FaceCenteredCubic('Ag',
[(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7,8,7], 4.09)
>>> atoms = hollow_core(atoms, 5.1)
>>> view(atoms)
"""
assert radius > 0
pos = atoms.positions
center = (np.min(pos, axis=0) + np.max(pos, axis=0)) / 2.0
dists = np.sum((pos - center)**2, 1)
rem = np.nonzero(dists < radius**2)[0]
if len(rem) > 0:
del atoms[rem]
else:
print('Warning: no atoms were deleted by hollow_core()')
return atoms
def randomize_biatom(atoms, type_a, type_b, ratio):
""" replace randomly to acheive target conc """
n_A = 0
n_B = 0
for atom in atoms:
if atom.symbol == type_a:
n_A += 1
elif atom.symbol == type_b:
n_B += 1
else:
raise Exception('Extra chemical element %s!'%atom.symbol)
#print n_A, n_B
N = len(atoms)
#print "conc", n_A *1.0 / N
r = random.Random()
while n_A < ratio*N: # add A atoms randomly
index = r.randint(0, N-1)
if (atoms[index].symbol != type_a):
#print "changing atom #"+str(index)+" to "+type_a
#prob = probability(dists[index]/Rmax, p)
#print p
if (r.randint(0, 1000) < 500):
atoms[index].symbol = type_a
n_A += 1
return atoms
def randomize_biatom_13(atoms, type_a, type_b, ratio):
""" replace randomly by clusters of 13 atoms
to acheive target conc """
n_A = 0
n_B = 0
for atom in atoms:
if atom.symbol == type_a:
n_A += 1
elif atom.symbol == type_b:
n_B += 1
else:
raise Exception('Extra chemical element %s!'%atom.symbol)
#print n_A, n_B
N = len(atoms)
nl = NeighborList([1.5]*N, self_interaction=False, bothways=True) # 2*1.5=3 Angstr. radius
nl.update(atoms)
#print "conc", n_A *1.0 / N
r = random.Random()
while n_A < ratio*N: # add A atoms randomly
index = r.randint(0, N-1)
if (atoms[index].symbol != type_a):
#print "changing atom #"+str(index)+" to "+type_a
#if (r.randint(0, 1000) < 500):
atoms[index].symbol = type_a
n_A += 1
indeces, offsets = nl.get_neighbors(index)
for ia in indeces :
if (atoms[ia].symbol != type_a)&(n_A < ratio*N):
atoms[ia].symbol = type_a
n_A += 1
return atoms
def randomize_userfunc(atoms, new_type, user_func):
""" replace host atoms randomly by new_type of atom
by user function of probability distribution.
Concentration is hidden in that function.
Go throw all atoms of one type."""
#TODO: backup atoms?
N = len(atoms)
qsar = QSAR(atoms)
r = random.Random()
dists = qsar.atom_distances()
Rmax = max(dists)
for i_atom in range(N):
#r.random() - random float in interval [0,1)
x = dists[i_atom]/Rmax
if r.random() < user_func(x):
atoms[i_atom].symbol = new_type
return atoms
def intermetallideFCC(atoms, A, B, cellconstant):
"""
Replace atoms type A by atom type B to obtain
intermetallide FCC structure.
"""
x0 = atoms[0].position[0]
y0 = atoms[0].position[1]
z0 = atoms[0].position[2]
for atom in atoms:
if atom.symbol == A:
x = (atom.position[0] - x0) * 2 / cellconstant
y = (atom.position[1] - y0) * 2 / cellconstant
z = (atom.position[2] - z0) * 2 / cellconstant
# for face centered cubic:
n1 = round(0.5 * (-x+y+z))
n2 = round(0.5 * ( x-y+z))
n3 = round(0.5 * ( x+y-z))
# for simple cubic:
# n1 = 0.5 * x
# n2 = 0.5 * y
# n3 = 0.5 * z
#print n1, n2, n3
if ( n1 + n2 + n3 ) % 2 == 0:
atom.symbol = B
#TODO: check and show warning if no changes were made
return atoms
def janus_z_particle(atoms, A, B, ratio):
'''
two-sided particle
A - base atom type
B - atom type to fill from the lower to higher z coordinate
'''
poss = atoms.get_positions()
zs = poss[:,2]
# ~ zmin = np.min(zs)
# ~ zmax = np.max(zs)
ntarget = int(np.round(len(atoms) * ratio))
print('target sites: %i' % ntarget)
# ~ zborder = fsolve(lambda x: np.sum(zs < x) - ntarget, (zmin+zmax)/2+0.01)
# ~ print('%.2f < %.2f < %.2f' % (zmin, zborder, zmax))
# ~ print('found sites: %i' % np.sum(zs < zborder))
asort = np.argsort(zs)
syms = np.array(atoms.get_chemical_symbols())
syms[asort[:ntarget]] = B
syms[asort[ntarget:]] = A
atoms.set_chemical_symbols(syms)
return atoms
def hop_shuffle(atoms, A, B, count=10, R=3.0):
"""
Shuffle atoms in given structure
by swapping atom types within first coordination shell
Parameters
----------
atoms: ase.Atoms
ase Atoms object, containing atomic cluster.
A, B: string
symbols of atoms to swap
count: integer
number of shuffles
R: float
radius of coordination shell, were atoms will be swapped
Returns
-------
Function returns ASE atoms object whith
shuffled atoms
"""
n_atoms = len(atoms)
nswaps = 0
neiblist = NeighborList( [ R ] * n_atoms,
self_interaction=False,
bothways=True )
neiblist.update( atoms )
rnd = random.Random()
while nswaps < count:
i = rnd.randint(0, n_atoms-1)
indeces, offsets = neiblist.get_neighbors( i )
if (atoms[i].symbol == B):
candidates = []
for ii in indeces:
if atoms[ii].symbol == A:
candidates.append( ii )
if len(candidates) > 0:
j = random.choice(candidates)
atoms[i].symbol = A
atoms[j].symbol = B
nswaps += 1
neiblist.update( atoms )
elif (atoms[i].symbol == B):
candidates = []
for ii in indeces:
if atoms[ii].symbol == A:
candidates.append( ii )
if len(candidates) > 0:
j = random.choice(candidates)
atoms[i].symbol = B
atoms[j].symbol = A
nswaps += 1
neiblist.update( atoms )
return atoms
if __name__ == '__main__':
#
from ase.visualize import view
# test sphericalFCC
#atoms = sphericalFCC('Ag', 4.09, 8)
#view(atoms)
#raw_input('press enter')
# ~ atoms = FaceCenteredCubic('Ag', [(1, 0, 0)], [20], latticeconstant=4.09)
#~ atoms = cut_spherical_cluster(atoms, 40)
# ~ atoms = cut_elliptical_cluster(atoms, 40, 40, 24)
# ~ atoms = hollow_core(atoms, radius=12)
# ~ view(atoms)
# ~ input('press enter')
#
atoms = FaceCenteredCubic(
'Ag', [(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7, 8, 7], 4.09)
# test core shell
#atoms = CoreShellFCC(atoms, 'Pt', 'Ag', ratio=0.6, a_cell=4.09) # ratio-based filling
#atoms = sphericalFCC('Ag', 4.09, 8)
#atoms = CoreShellFCC(atoms, 'Pt', 'Ag', ratio=0.0, a_cell=4.09, n_depth=1)
#atoms = randomize_biatom(atoms, 'Pt', 'Ag', ratio=0.6)
#atoms = randomize_biatom_13(atoms, 'Pt', 'Ag', ratio=0.6)
#atoms = hollowCore(atoms, 5.1)
#atoms = CoreShellCN( atoms, 'Pt', 'Ag', 0.5 )
#atoms = intermetallideFCC( atoms, 'Ag', 'Pt', 4.09 )
#atoms = hop_shuffle( atoms, 'Pt', 'Ag', count=10)
# test janus
atoms = janus_z_particle(atoms, 'Cu', 'Pt', 0.6)
view(atoms)
|
lavakyan/ase-bimetall
|
coreshell.py
|
Python
|
gpl-2.0
| 20,123
|
[
"ASE",
"CRYSTAL"
] |
01f21c24122a9e35fa7a11aefd83c1edf07f22986ea85e06c5723e89ec965955
|
#!/usr/bin/env python
# Copyright 2012-2014 VPAC
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
import os
with open('VERSION.txt', 'r') as f:
version = f.readline().strip()
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
packages = []
for dirpath, dirnames, filenames in os.walk("kgapplications"):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'):
del dirnames[i]
if filenames:
packages.append('.'.join(fullsplit(dirpath)))
tests_require = [
"factory_boy",
]
setup(
name="karaage-applications",
version=version,
url='https://github.com/Karaage-Cluster/karaage-applications',
author='Brian May',
author_email='brian@v3.org.au',
description='Usage information for Karaage',
packages=packages,
license="GPL3+",
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 "
"or later (GPLv3+)",
"Operating System :: OS Independent"
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules"
],
keywords="karaage cluster user administration",
package_data={
'': ['*.css', '*.html', '*.js', '*.png', '*.gif', '*.map', '*.txt'],
},
install_requires=[
'karaage >= 3.1.2',
],
tests_require=tests_require,
extras_require={'tests': tests_require},
)
|
Karaage-Cluster/karaage-applications
|
setup.py
|
Python
|
gpl-3.0
| 2,622
|
[
"Brian"
] |
9756f8b5f4c602d67a2279f7c611e6f414b2fa6f8484ac67186e7951f3373fc0
|
#!/usr/bin/env python
import respirnet
import numpy as np
import networkx as nx
import sys
import argparse
'''
This class outputs a file (.gml) that contains a network for the pre-bot and botzinger complex based on the desired network parameters. This class allows you to vary the architecture based on the amount of intra population inhibition. Recommended range is gamma between 0 and 1. Zero will translate to a model similar to the half center oscillator and one will be unifrom inhibition across the network
INPUTS:
n0 - The number of nodes that are in population 1 (pre-bot)
n1 - The number of nodes that are in populatuon 2 (bot)
gamma - The amount of intra population inhibition
#####OPTIONAL Inputs####
pI - The probability of an inhibitory neuron being created
gE - The conductance for excitatory snyapses in nS
gI - The conductance for inhibitory synapses in nS
Return:
output - file name for the .gml file the graph will be saved to
TO-DO:
VERIFY INPUTS MATCH DESIRED (CHECK IF type= x takes care of it)
'''
def main(argv = None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(prog="genPreBotBot_gamma",
description = 'Generates Graph based on Block Model with varying amounts of intra inhibition')
parser.add_argument('n0', type = int, help='number of nodes in pop1')
parser.add_argument('n1', type = int, help='number of nodes in pop2')
parser.add_argument('gamma', type = float, help='the percentage of unifrom inhbiiton')
parser.add_argument('output', help='output filename')
parser.add_argument('-pI', type = float,
help = 'probability of inhibitory neuron',
default = .2)
parser.add_argument('-gE', type=float,
help='conductance of excitatory (E) synapses, nS',
default = 2.5)
parser.add_argument('-gI', type=float,
help='conductance of inhibitory (I) synapses, nS',
default = 2.5)
args = parser.parse_args(argv[1:])
n0 = args.n0
n1 = args.n1
#Multiply by a factor of 3/2 to maintain an average degree of 6 for each neuron
gamma = 3*float(args.gamma) / 2
output = args.output
gE = args.gE
gI = args.gI
pI = args.pI
#Each column of a matrix is responsible for the probability that index 1 connects to index 2
#Excitatory connection probability matrix
pMatE = np.array([ (3.0/(n0-1), 0.05/n1),
(0.05/n0, 3.0/(n1-1)) ])
#Inhibitory connection probaility matrix
pMatI = np.array([ (gamma/(n0-1), (3.0-gamma)/n1),
((3.0-gamma)/n0, gamma/(n1-1)) ])
#Probability distribution for neuron types (?,Bursting,Tonic,Quiescent)
pTypes = [0, 0.25, 0.45, 0.3]
print pMatE
print pMatI
g = respirnet.er_prebot_bot(n0,n1,pMatI, pMatE, pTypes, pI, gE, gI)
nx.write_gml(g, output + '.gml')
if __name__ == '__main__':
status = main()
sys.exit(status)
|
kharris/prebotc-graph-model
|
graphs/genPreBotBot_gamma.py
|
Python
|
bsd-3-clause
| 3,156
|
[
"NEURON"
] |
8f0b19b0ec51abb38cb3b36896a3ebab8b0b6fb11079219bc2d4e559a8b3bc54
|
import ast
import logging
class InvalidSortError(Exception):
pass
def sort_parse(clause):
"""
Parses sort clause.
Sort Clause Identifier Rules:
Prefix + Identifier
Prefix Rules:
Is optional
Must start with a-z OR A-Z
Can contain a-z OR A-Z OR 0-9 OR _
Must end with .
Identifier Rules:
Is mandatory
Must start with a-z OR A-Z
Can contain a-z OR A-Z OR 0-9 OR _
:param clause: The clause to be parsed.
:type clause: str
:raises InvalidSortError: Raised if the `clause` is not valid.
:returns: list containing tuples of field name, and sort order.
:rtype: list
.. example::
>>> sort_parse("-prefix.identifier")
[('prefix', 'identifier', 'DESC')]
>>> sort_parse("a, +a.b")
[('a', 'ASC'), ('a', 'b', 'ASC')]
"""
try:
n = ast.parse(clause.strip(",") + ",", mode="eval").body
except SyntaxError as e:
raise InvalidSortError("Invalid sort: %s" % e)
if not isinstance(n, ast.Tuple):
raise InvalidSortError("Invalid condition: must evaluate to a boolean value")
return _SortEvaluator().visit(n)
class _SortEvaluator(ast.NodeVisitor):
def __init__(self):
self._sort_dir = None
self.reset_context()
self._log = logging.getLogger(__name__)
def reset_context(self):
self._sort_dir = "ASC"
def visit_Tuple(self, n):
sort = []
for elt in n.elts:
self.reset_context()
sort.append(tuple(self.visit(elt)) + (self._sort_dir,))
return sort
def visit_UnaryOp(self, n):
self.visit(n.op)
return self.visit(n.operand)
def visit_UAdd(self, n):
pass
def visit_USub(self, n):
self._sort_dir = "DESC"
def visit_Attribute(self, n):
self._log.info("Attribute <%s> <%s>", n.value, n.attr)
value = self.visit(n.value)
value.append(n.attr)
return value
def visit_Name(self, n):
return [n.id]
def generic_visit(self, n):
raise InvalidSortError(
"Invalid sort clause at Line <%d>, Col <%d>" % (n.lineno, n.col_offset)
)
def _main():
import sys
logging.basicConfig(level=logging.DEBUG)
logging.debug("Expression <%s>", sys.argv[1])
result = sort_parse(sys.argv[1])
logging.debug("Evaluation Result <%s>", result)
if __name__ == "__main__":
_main()
|
pegasus-isi/pegasus
|
packages/pegasus-python/src/Pegasus/service/_sort.py
|
Python
|
apache-2.0
| 2,470
|
[
"VisIt"
] |
268d163e80bf7acf46d8a0d494522b5b51896586f772f2ba6fd81c5b780bd0cf
|
from glob import glob
from subprocess import Popen, PIPE
from math import pow
from math import floor
#from rpy2.robjects.packages import importr
import os
import re
import sys
from tiffparser import TiffParser
def getDaymetData(tiffName, ulLat, ulLon, lrLat, lrLon, startYear=2013, endYear=2013, option='tmin'):
"""
Download Daymet gridded data for the dem file
startYear & endYear specify the period
option is the measurement wanted from Daymet
tiffName is the name of converted tiff file
"""
if not os.path.exists('daymet'):
os.makedirs('daymet')
stLat = 50
stLon = -132
stTile = 12625
# Download Daymet data for the dem region
#daymetR = importr("DaymetR")
#daymetR.get_Daymet_tiles(ulLat, ulLon, lrLat, lrLon, startYear, endYear, option)
# For test only
# lrLat -= 2
# lrLon += 2
daymetCoords = dict()
ulLat = stLat + floor((ulLat - stLat) / 2) * 2
ulLon = stLon + floor((ulLon - stLon) / 2) * 2
lrLat = stLat + floor((lrLat - stLat) / 2) * 2
lrLon = stLon + floor((lrLon - stLon) / 2) * 2
ulTile = int(stTile + ((ulLat - stLat) / 2) * 180 + (ulLon - stLon) / 2)
lrTile = int(stTile + ((lrLat - stLat) / 2) * 180 + (lrLon - stLon) / 2)
baseURL = 'http://thredds.daac.ornl.gov/thredds/fileServer/ornldaac/1219/tiles/'
outPath = './daymet/%d_%d' % (ulTile, lrTile)
if not os.path.exists(outPath):
os.makedirs(outPath)
for i in range(int((ulLat - lrLat) / 2) + 1):
for j in range(int((lrLon - ulLon) / 2) + 1):
tarTile = ulTile - i * 180 + j
for year in range(startYear, endYear + 1):
os.system('wget %s/%d/%d_%d/%s.nc -O %s/%d_%d_%s.nc' % \
(baseURL, year, tarTile, year, option, outPath, tarTile, year, option))
cmdTrans = ['gdal_translate', '-of', 'GTiff', '-a_ullr','','','','',\
'-a_srs', '+proj=lcc +datum=WGS84 +lat_1=25 n +lat_2=60n \
+lat_0=42.5n +lon_0=100w','','']
# convert downloaded nc file to geotiff
for ncFile in glob('%s/*%s.nc' % (outPath, option)):
print ncFile
hint = ncFile.split('%s' % outPath)[1][1:]
tile = int(hint.split('_')[0])
year = int(hint.split('_')[1])
print hint, tile, year
# Regular experssions for coords extraction
if len(daymetCoords) == 0:
cmdInfo = ['gdalinfo', 'NETCDF:"%s":lat' % ncFile]
lrCoords = re.compile(r"""Lower\s+Right\s+\(\s?(-?\d+\.\d+),\s(-?\d+\.\d+)\)""",\
re.X | re.I)
ulCoords = re.compile(r"""Upper\s+Left\s+\(\s?(-?\d+\.\d+),\s(-?\d+\.\d+)\)""",\
re.X | re.I)
# Execute the command
process = Popen(cmdInfo, stdout=PIPE, shell=False)
output, err = process.communicate()
if process.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % \
(cmdInfo, process.returncode, output, err))
# Check each line of ouput from end
output = output.split('\n')
for i in xrange(len(output) - 1, -1, -1):
# match left right first
match = lrCoords.search(output[i])
if match:
print match.group()
daymetCoords['LR'] = (match.group(1), match.group(2))
# then match upper left
match = ulCoords.search(output[i - 3])
print match.group()
daymetCoords['UL'] = (match.group(1), match.group(2))
# Transfer nc to geotiff
cmdTrans[4] = daymetCoords['UL'][0]
cmdTrans[5] = daymetCoords['UL'][1]
cmdTrans[6] = daymetCoords['LR'][0]
cmdTrans[7] = daymetCoords['LR'][1]
cmdTrans[-2] = 'NETCDF:"%s":%s'%(ncFile, option)
cmdTrans[-1] = '%s/%d_%d_%s.tif' %(outPath, tile, year, option)
#print cmdTrans
process = Popen(cmdTrans, stdout=PIPE, shell=False)
output, err = process.communicate()
if process.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % \
(cmdTrans, process.returncode, output, err))
cmdClear = ['rm','-y', '%s' % ncFile]
print '%s' % ncFile
process = Popen(cmdTrans, stdout=PIPE, shell=False)
output, err = process.communicate()
if process.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % \
(cmdClear, process.returncode, output, err))
# Combine all tiles in target region for the year range
if ulTile != lrTile:
for year in range(startYear, endYear + 1):
suffix = '%d_%s' % (year, option)
cmdComb = ['gdalwarp', '-dstnodata', '-9999',\
'{0}/*_{1}.tif'.format(outPath, suffix),
'{0}/all_{1}.tif'.format(outPath, suffix)
]
os.system('gdalwarp -dstnodata -9999 {0}/*_{1}.tif {0}/all_{1}.tif'.format(outPath, suffix))
"""
process = Popen(cmdComb, stdout=PIPE, shell=False)
output, err = process.communicate()
if process.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % \
(cmdComb, process.returncode, output, err))
"""
def projDem(inTiff, ulx, uly, lrx, lry, outTiff):
cmdProj = ['gdal_translate', '-projwin', '%s'%ulx, '%s'%uly, '%s'%lrx, '%s'%lry, \
'%s'%inTiff, '%s'%outTiff]
process = Popen(cmdProj, stdout=PIPE, shell=False)
output, err = process.communicate()
if process.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % \
(cmdProj, process.returncode, output, err))
def main():
"""
This script examine a default dem file or
a user specified dem file
download data for the area it covered from daymet
"""
# Default parameters
inputTiff = 'pit_c.tif'
params = ['tmin']
startYr = 2013
endYr = 2013
supportedParam = ['tmin', 'tmax', 'prcp'];
# allocate user specified parameters
if len(sys.argv) == 2:
if not sys.argv[1].endswith('.tif') or not os.path.exists(sys.argv[1]):
print "File not exist or wrong type"
sys.exit(1)
else:
inputTiff = sys.argv[1]
elif len(sys.argv) >= 4:
try:
if not sys.argv[1].endswith('.tif') or not os.path.exists(sys.argv[1]):
print "File not exist or wrong type"
sys.exit(1)
else:
inputTiff = sys.argv[1]
startYr = int(sys.argv[2])
endYr = int(sys.argv[3])
except ValueError:
print "Invalid year parameters"
sys.exit(1)
if endYr < startYr or endYr > 2013 or startYr < 1980:
print "Invalid year parameters:[1980 - 2013]"
sys.exit(1)
if len(sys.argv) > 4:
try:
if not sys.argv[1].endswith('.tif') or not os.path.exists(sys.argv[1]):
print "File not exist or wrong type"
sys.exit(1)
else:
inputTiff = sys.argv[1]
startYr = int(sys.argv[2])
endYr = int(sys.argv[3])
except ValueError:
print "Invalid year parameters"
sys.exit(1)
if endYr < startYr or endYr > 2013 or startYr < 1980:
print "Invalid year parameters:[1980 - 2013]"
sys.exit(1)
for opt in sys.argv[4:]:
if opt == 'all':
params = supportedParam
break
elif opt not in supportedParam:
print "Invalid measurement parameters"
sys.exit(1)
if params != supportedParam:
params = sys.argv[4:]
# Parse dem file
demParser = TiffParser()
demParser.loadTiff(inputTiff)
# get coordinates
coords = demParser.getDecimalCoords()
# get projection coords
projs = demParser.getProjCoords()
# get converted name
tiffName = demParser.getName()
#print endYr, startYr
# download daymet data
print coords
for opt in params:
getDaymetData(tiffName, coords[1][0], coords[1][1], coords[0][0], coords[0][1], \
startYr, endYr, opt)
#print projs
#print coords
#projDem('na_dem.tif', projs[1][0], projs[1][1], projs[0][0], projs[0][1],\
# tiffName + '_dem.tif')
"""
# Clear netcaf data
cmdClear = ['rm', '*.nc']
process = Popen(cmdClear, stdout=PIPE, shell=False)
output, err = process.communicate()
if process.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % \
(cmdClear, process.returncode, output, err))
"""
if __name__ == '__main__':
main()
|
Nocsaron/Sol
|
ua_hpc_version/sol/0.0.1/process_dem.py
|
Python
|
gpl-3.0
| 9,450
|
[
"NetCDF"
] |
6dec87157246715b5f46f29ac1aa944d441ab243bf6caf0853dfbb236ad50c03
|
"""
This module contains updated code for providing user updates on searches in Goat.
Not sure if this will work properly without threading, but will try; either way,
trying to separate user information for searches from the search itself, and
provide a better way to feed back the callback from a search to the main GUI
regardless of whether or not threading is used.
"""
import threading, queue, os
import time
from tkinter import *
from tkinter import ttk
from bin.initialize_goat import configs
from searches import new_search_runner, search_obj
from searches.hmmer import hmmer_build
from results import intermediate
from summaries import summary_obj, summarizer
from util.sequences import seqs_from_summary
from util.alignment import mafft
from queries import query_file
blast_path = '/usr/local/ncbi/blast/bin'
hmmer_path = '/Users/cklinger/src/hmmer-3.1b1-macosx-intel/src'
tmp_dir = '/Users/cklinger/git/Goat/tmp'
class ProgressFrame(Frame):
def __init__(self, starting_sobj, mode, parent=None, threaded=True,
other_widget=None, callback=None, callback_args=None,
rev_search_name=None, keep_rev_output=None, **kwargs):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH)
self.start_sobj = starting_sobj
self.mode = mode
self.threaded = threaded
self.other = other_widget
self.callback = callback
self.callback_args = callback_args
# Some attributes are only applicable for analyses
self.rev_name = rev_search_name
self.rev_ko = keep_rev_output
if kwargs:
self.kwargs = {}
for k,v in kwargs.items():
self.kwargs[k] = v # store for later access
# Some search modes require access to dbs
self.qdb = configs['query_db']
self.udb = configs['result_db']
self.sdb = configs['search_db']
# Make non-modal, i.e. un-closeable
self.parent = parent
self.parent.protocol('WM_DELETE_WINDOW', lambda: None)
# Add some information, keep a ref to change the text later
self.search_info = Label(self, text='Searching databases using {}'.format(
self.start_sobj.algorithm), anchor='center', justify='center')
self.search_info.pack(expand=YES)
# Determine the maximum number of searches to do
self.num_todo = self.determine_max_searches(self.start_sobj, mode)
# Code to add progress bar, update 'value' attr after each search
self.p = ttk.Progressbar(self, # parent
orient = HORIZONTAL,
length = 200,
mode = 'determinate', # specifies a set number of steps
maximum = self.num_todo)
self.p.pack()
# Default starting value for the search bar
self.num_finished = 1 # don't index from zero, first search is number 1
# Add another label to report hard numbers in progress bar
self.search_label = Label(self, text='Performing search {} of {}'.format(
self.num_finished, self.num_todo), anchor='center', justify='center')
self.search_label.pack(side=BOTTOM, expand=YES)
def run(self):
"""start producer thread, consumer loop"""
if self.threaded:
configs['threads'].add_thread()
# always create the queue
self.queue = queue.Queue()
if self.mode == 'racc':
threading.Thread(target=self._run_racc_blast).start()
elif self.mode == 'new':
threading.Thread(target=self._run_fwd_search).start()
elif self.mode == 'rev':
threading.Thread(target=self._run_rev_search).start()
elif self.mode == 'recip_blast':
threading.Thread(target=self._run_recip_blast).start()
elif self.mode == 'hmmer_blast':
threading.Thread(target=self._run_hmmer_blast).start()
elif self.mode == 'full_blast_hmmer':
threading.Thread(target=self._run_full_blast_hmmer).start()
# always start the thread consumer function
self.thread_consumer()
else:
pass
def thread_consumer(self):
"""Checks the queue regularly for new results"""
# Even if there are no results to grab, update status bar each time
self.p['value'] = self.num_finished
self.search_label['text'] = 'Performing search {} of {}'.format(
self.num_finished, self.num_todo)
try:
done = self.queue.get(block=False)
except(queue.Empty): # nothing to grab
self.after(200, self.thread_consumer)
# when finished
else:
if done:
if self.callback:
if self.callback_args:
self.callback(*self.callback_args)
else:
self.callback()
self.parent.destroy()
def increment_search_count(self):
"""Increments counter after each search finishes"""
#print("incrementing counter")
if self.num_finished == self.num_todo:
pass # don't increment past max
else:
self.num_finished += 1
def add_file_to_delete(self, filepath):
"""Adds to callback_args, assumes it is a list"""
if not filepath in self.callback_args:
self.callback_args.append(filepath)
def determine_max_searches(self, sobj, mode):
"""Determines the number of searches to run"""
if mode == 'racc' or mode == 'rev':
return len(sobj.queries)
elif mode == 'new' or mode == 'recip_blast':
return (len(sobj.queries) * len(sobj.databases))
####################################
# Actual code for running searches #
####################################
def _run_racc_blast(self):
"""Runs blast searches for raccs, and then removes all info after"""
runner = new_search_runner.SearchRunner(self.start_sobj, mode='racc',
other_widget=self)
runner.run()
runner.parse()
if self.threaded:
self.queue.put('Done') # signal completion
def _run_fwd_search(self):
"""Function called by the thread, runs each BLAST search"""
runner = new_search_runner.SearchRunner(self.start_sobj, mode='new',
other_widget=self)
runner.run()
runner.parse()
if self.threaded:
self.queue.put('Done')
def _run_rev_search(self):
"""Calls Search Runner for reverse searches"""
runner = new_search_runner.SearchRunner(self.start_sobj, mode='rev',
other_widget=self)
runner.run()
runner.parse()
if self.threaded:
self.queue.put('Done')
def _run_recip_blast(self):
"""
Populates and runs a SearchRunner object for the forward BLAST, then obtains
queries and a new sobj for the reverse search. Refreshes label and counter,
then runs the SearchRunner object for the reverse BLAST. Commits all changes
at the end. - Can we update the db from a thread?
"""
# Run the forward search, parse output
fwd_runner = new_search_runner.SearchRunner(self.start_sobj, mode='new',
other_widget=self)
fwd_runner.run()
fwd_runner.parse()
# Populate DBs with intermediate search queries
intermediate.Search2Queries(self.start_sobj).populate_search_queries()
# Get the relvant qids
rev_queries = []
for uid in self.start_sobj.list_results():
uobj = self.udb[uid]
for qid in uobj.list_queries():
rev_queries.append(qid)
rev_sobj = search_obj.Search(
name = self.rev_name,
algorithm = 'blast',
q_type = self.start_sobj.db_type,
db_type = self.start_sobj.q_type,
queries = rev_queries,
databases = [], # rev search
keep_output = self.rev_ko,
output_location = self.start_sobj.output_location)
self.sdb.add_entry(rev_sobj.name, rev_sobj)
# Reset label and counter
self.search_info['text'] = 'Performing reverse search using blast'
self.num_todo = self.determine_max_searches(rev_sobj, 'rev')
self.p['maximum'] = self.num_todo
self.num_finished = 1
fwd_runner = new_search_runner.SearchRunner(rev_sobj, mode='rev',
other_widget=self)
fwd_runner.run()
fwd_runner.parse()
if self.threaded:
self.queue.put('Done')
def _run_hmmer_blast(self):
"""
Populates and runs a SearchRunner object for the forward HMMer, then obtains
queries and a new sobj for the reverse search. Refreshes label and counter,
then runs the SearchRunner object for the reverse BLAST. Commits all changes
at the end. - Can we update the db from a thread?
"""
# Run the forward search, parse output
fwd_runner = new_search_runner.SearchRunner(self.start_sobj, mode='new',
other_widget=self)
fwd_runner.run()
fwd_runner.parse()
# Populate DBs with intermediate search queries
intermediate.Search2Queries(self.start_sobj).populate_search_queries()
# Get the relvant qids
rev_queries = []
for uid in self.start_sobj.list_results():
uobj = self.udb[uid]
for qid in uobj.list_queries():
rev_queries.append(qid)
rev_sobj = search_obj.Search(
name = self.rev_name,
algorithm = 'blast',
q_type = self.start_sobj.db_type,
db_type = self.start_sobj.q_type,
queries = rev_queries,
databases = [], # rev search
keep_output = self.rev_ko,
output_location = self.start_sobj.output_location)
self.sdb.add_entry(rev_sobj.name, rev_sobj)
# Reset label and counter
self.search_info['text'] = 'Performing reverse search using blast'
self.num_todo = self.determine_max_searches(rev_sobj, 'rev')
self.p['maximum'] = self.num_todo
self.num_finished = 1
rev_runner = new_search_runner.SearchRunner(rev_sobj, mode='rev',
other_widget=self)
rev_runner.run()
rev_runner.parse()
if self.threaded:
self.queue.put('Done')
def _run_full_blast_hmmer(self):
"""
Populates and runs a SearchRunner object for the forward BLAST, runs a
subsequent reverse BLAST, summarizes based on user input and then uses
all "positive" hits to build a new MSA and subsequent HMM before running
another forward HMMer/reverse BLAST combo.
"""
# Run the forward search, parse output
fwd_runner = new_search_runner.SearchRunner(self.start_sobj, mode='new',
other_widget=self)
fwd_runner.run()
fwd_runner.parse()
# Populate DBs with intermediate search queries
intermediate.Search2Queries(self.start_sobj).populate_search_queries()
# Get the relvant qids
rev_queries = []
for uid in self.start_sobj.list_results():
uobj = self.udb[uid]
for qid in uobj.list_queries():
rev_queries.append(qid)
rev_sobj = search_obj.Search(
name = self.rev_name,
algorithm = 'blast',
q_type = self.start_sobj.db_type,
db_type = self.start_sobj.q_type,
queries = rev_queries,
databases = [], # rev search
keep_output = self.rev_ko,
output_location = self.start_sobj.output_location)
self.sdb.add_entry(rev_sobj.name, rev_sobj)
# Reset label and counter
self.search_info['text'] = 'Performing reverse search using blast'
self.num_todo = self.determine_max_searches(rev_sobj, 'rev')
self.p['maximum'] = self.num_todo
self.num_finished = 1
# Run reverse search, parse output
rev_runner = new_search_runner.SearchRunner(rev_sobj, mode='rev',
other_widget=self)
rev_runner.run()
rev_runner.parse()
# Summarize fwd and rev search using cutoff criteria
int_summary = summary_obj.Summary(
fwd_search = self.start_sobj.name,
fwd_qtype = self.start_sobj.q_type,
fwd_dbtype = self.start_sobj.db_type,
fwd_algorithm = self.start_sobj.algorithm,
fwd_evalue_cutoff = self.kwargs['fwd_evalue'],
fwd_max_hits = self.kwargs['fwd_hits'],
rev_search = self.rev_name,
rev_qtype = self.start_sobj.db_type,
rev_dbtype = self.start_sobj.q_type,
rev_algorithm = 'blast',
rev_evalue_cutoff = self.kwargs['rev_evalue'],
rev_max_hits = self.kwargs['rev_hits'],
next_hit_evalue_cutoff = self.kwargs['next_evalue'])
int_summarizer = summarizer.SearchSummarizer(int_summary)
int_summarizer.summarize_two_results()
mdb = configs['summary_db']
mdb.add_entry(self.kwargs['summ_name'], int_summary)
# Get sequences from summarized results - positive only!
seq_writer = seqs_from_summary.SummarySeqWriter(
basename = self.start_sobj.name,
summary_obj = int_summary,
target_dir = self.start_sobj.output_location,
hit_type = 'positive',
mode = 'all',
add_query_to_file = True) # last one adds the query object seq as well
seq_writer.run()
time.sleep(1)
# Now create MSAs and HMMs for all files
msa_dict = {}
for qid,filename in seq_writer.file_dict.items():
msa_file = (filename.rsplit('.',1)[0]) + '.mfa'
msa_dict[qid] = msa_file
mafft.MAFFT(filename,msa_file).run('file')
time.sleep(2)
hmm_dict = {}
for qid,msafile in msa_dict.items():
hmm_file = (msafile.rsplit('.',1)[0]) + '.hmm'
hmm_dict[qid] = hmm_file
hmmer_build.HMMBuild(
hmmbuild_path = hmmer_path,
msa_filepath = msafile,
hmm_out = hmm_file).run_from_file()
time.sleep(2)
# Create new intermediate queries for forward HMMer search and run
qdb = configs['query_db']
hmmer_queries = []
for qid,hmmfile in hmm_dict.items():
#print(qid)
#print(hmmfile)
#print()
name,hmm_obj = query_file.HMMFile(
hmmfile,self.start_sobj.db_type).get_query()
hmm_obj.spec_qid = qid
qobj = qdb[qid]
hmm_obj.spec_record = qobj.record
hmm_obj.add_query(qid,qobj) # copy from qdb to misc_qdb
qdb[name] = hmm_obj # add to db
hmmer_queries.append(name) # add to queries to search
fwd_hmmer = search_obj.Search( # be explicit for clarity here
name = self.kwargs['fwd_name'],
algorithm = 'hmmer',
q_type = self.start_sobj.q_type, # queries come from original DB/query type
db_type = self.start_sobj.db_type, # target DB type is same as original db type
queries = hmmer_queries, # fresh batch of queries for new fwd search
databases = self.start_sobj.databases, # searching in all dbs again
keep_output = self.kwargs['fwd_ko'], # specified for new search
output_location = self.start_sobj.output_location)
self.sdb.add_entry(fwd_hmmer.name, fwd_hmmer)
# Reset label and counter again
self.search_info['text'] = 'Performing forward search using hmmer'
self.num_todo = self.determine_max_searches(fwd_hmmer, 'new')
self.p['maximum'] = self.num_todo
self.num_finished = 1
# Run the forward search, parse output
new_fwd_runner = new_search_runner.SearchRunner(fwd_hmmer, mode='new',
other_widget=self)
new_fwd_runner.run()
new_fwd_runner.parse()
# Again, get new queries from fwd hmmer search
#print()
#print("Getting rev2 search queries")
intermediate.Search2Queries(fwd_hmmer).populate_search_queries()
# Get the relvant qids
rev_blast_queries = []
for uid in fwd_hmmer.list_results():
uobj = self.udb[uid]
for qid in uobj.list_queries():
rev_blast_queries.append(qid)
#print(rev_blast_queries)
rev_blast_sobj = search_obj.Search(
name = self.kwargs['rev_name'],
algorithm = 'blast',
q_type = fwd_hmmer.db_type,
db_type = fwd_hmmer.q_type,
queries = rev_blast_queries,
databases = [], # rev search
keep_output = self.kwargs['rev_ko'],
output_location = self.start_sobj.output_location)
self.sdb.add_entry(rev_blast_sobj.name, rev_blast_sobj)
# Reset label and counter
#print("Performing rev2 blast search")
self.search_info['text'] = 'Performing reverse search using blast'
self.num_todo = self.determine_max_searches(rev_blast_sobj, 'rev')
self.p['maximum'] = self.num_todo
self.num_finished = 1
rev_runner = new_search_runner.SearchRunner(rev_blast_sobj, mode='rev',
other_widget=self)
rev_runner.run()
rev_runner.parse()
# Finally, signal completion for callback DB commits
if self.threaded:
self.queue.put('Done')
|
chris-klinger/Goat
|
gui/searches/new_threaded_search.py
|
Python
|
gpl-3.0
| 17,902
|
[
"BLAST"
] |
97a720b460c62211869ff57fbc42d6d9df6f96fb0487545746593b8419ec684e
|
import math, scipy, numpy as np
class Lin_Alg:
'''
CLASS CONTAINING STATIC LINEAR ALGEBRA HELPER FUNCTIONS
'''
@staticmethod
def update_mat_inv(M_inv, X, mu = None):
'''
FUNCTION TO GET DYNAMICALLY CHANGING MATRIX'S INVERSE
ARGUMENTS: 2
M_inv : Current matrix inverse
add : Change done in the matrix
RETURNS: NUMPY ARRAY
Updated inverse of the matrix
'''
u = X
v = X
if mu != None:
u = mu * X
v = (1 - mu) * X
c = 1 / (1 + np.dot(np.dot(u, M_inv), v.transpose()))
add = np.outer(u, v.transpose())
inter = c * np.dot(np.dot(M_inv, add), M_inv)
return M_inv - inter
'''
LINK FUNCTION FOR GLM
ARGUMENTS: 2
name : Name of the link function
*argv : Set of arguments required for the aforementioned link function
RETURNS: FLOAT
Real valued result of the link function
'''
def link_func(name, val):
if name == "logistic":
return logistic(val)
elif name == "identity":
return val
else:
return 0
'''
FUNCTION TO GET SAMPLES FROM MULTIVARIATE GAUSSIAN
ARGUMENTS: 3
mean : Mean of the Gaussian distribution
covariance : Covariance of the Gaussian distribution
samples : Number of samples to be drawn
RETURNS: FLOAT
The output of logistic function
'''
def gaussian(mean, covariance, samples):
# print "asked samples = ",samples
nf = mean.shape[0]
shape = [samples]
final_shape = list(shape[:])
final_shape.append(nf)
x = np.random.standard_normal(final_shape).reshape(-1, nf)
# sqc = np.nan_to_num(np.sqrt(covariance))
sqc = scipy.linalg.sqrtm(covariance)
print "covariance params = ", np.amax(sqc), np.amin(sqc)
return np.dot(x, sqc) + mean
'''
LOGISTIC FUNCTION
ARGUMENTS: 1
val: input for logistic function
RETURNS: FLOAT
The output of logistic function
'''
def logistic(val):
try:
return 1 / (1 + math.exp(-val))
except:
return math.exp(val) / (1 + math.exp(val))
def func(w, contexts, rewards):
l = len(contexts)
val = np.zeros(contexts[0].shape[0])
for i in range(l):
mul = 0
if rewards[i] == 1:
v = -np.dot(w, contexts[i])
mul = logistic(v)
else:
v = np.dot(w, contexts[i])
mul = -logistic(v)
# print contexts[i].shape
# print "mul = ", mul
val += mul * contexts[i]
# print val.shape
return val
def gd(w, Q, contexts, rewards, alpha):
feats = w.shape[0]
tw = np.zeros(feats)
for i in range(feats):
tw[i] = w[i]
for i in range(1000):
eta = 1 / np.sqrt(float(i + 2))
prior = alpha * np.dot(Q, (tw - w))
likeli = func(w, contexts, rewards)
grad = prior + likeli
# if np.linalg.norm(grad) <= 0.001:
# break
# print tw
tw = tw - eta * grad
# print tw
# print np.linalg.norm(tw)
tw /= np.linalg.norm(tw)
# print tw
# print eta * grad
# print likeli
# print prior
# print "==============="
# print tw
# print contexts[0]
# print np.linalg.norm(contexts[0]), np.linalg.norm(tw), np.dot(tw, contexts[0])
# print "==============="
# print "==============="
if np.linalg.norm(eta * grad) < 0.001:
print "i = ", i
if i == 0:
print grad
print likeli
print prior
print np.linalg.norm(contexts[0]), np.linalg.norm(tw), np.dot(tw, contexts[0])
print "==============="
break
return tw
def sgld(w, f, Q, contexts, rewards, alpha, N = 100.0):
feats = w.shape[0]
tw = np.zeros(feats)
for i in range(feats):
tw[i] = w[i]
for i in range(100):
eta = 1.0 / np.sqrt(float(i + 2))
grad = (alpha / 2.0) * np.dot(Q, (tw - w))
grad -= func(w, contexts, rewards) * N / float(len(contexts))
# if np.linalg.norm(grad) <= 0.001:
# break
eps = np.random.normal(0, eta)
tw = tw + eta * grad / 2.0 + eps
return tw
# class SGLD():
# def __init__(self, feats):
# self.f = np.zeros(feats)
#
# def sgld(self, w, Q, contexts, rewards, alpha, N = 100):
# n, f = contexts.shape
|
rakshify/GLM_MAB
|
code/policies/helper.py
|
Python
|
mit
| 4,433
|
[
"Gaussian"
] |
47eb377e373a5ba53a445193aacc4892f008f1094ca5fb167b18ef38eafe239c
|
from math import sqrt
import numpy as np
from sklearn.gaussian_process.kernels import Kernel as sk_Kernel
from sklearn.gaussian_process.kernels import ConstantKernel as sk_ConstantKernel
from sklearn.gaussian_process.kernels import DotProduct as sk_DotProduct
from sklearn.gaussian_process.kernels import Exponentiation as sk_Exponentiation
from sklearn.gaussian_process.kernels import ExpSineSquared as sk_ExpSineSquared
from sklearn.gaussian_process.kernels import Hyperparameter
from sklearn.gaussian_process.kernels import Matern as sk_Matern
from sklearn.gaussian_process.kernels import NormalizedKernelMixin as sk_NormalizedKernelMixin
from sklearn.gaussian_process.kernels import Product as sk_Product
from sklearn.gaussian_process.kernels import RationalQuadratic as sk_RationalQuadratic
from sklearn.gaussian_process.kernels import RBF as sk_RBF
from sklearn.gaussian_process.kernels import StationaryKernelMixin as sk_StationaryKernelMixin
from sklearn.gaussian_process.kernels import Sum as sk_Sum
from sklearn.gaussian_process.kernels import WhiteKernel as sk_WhiteKernel
class Kernel(sk_Kernel):
"""
Base class for skopt.gaussian_process kernels.
Supports computation of the gradient of the kernel with respect to X
"""
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def gradient_x(self, x, X_train):
"""
Computes gradient of K(x, X_train) with respect to x
Parameters
----------
x: array-like, shape=(n_features,)
A single test point.
X_train: array-like, shape=(n_samples, n_features)
Training data used to fit the gaussian process.
Returns
-------
gradient_x: array-like, shape=(n_samples, n_features)
Gradient of K(x, X_train) with respect to x.
"""
raise NotImplementedError
class RBF(Kernel, sk_RBF):
def gradient_x(self, x, X_train):
# diff = (x - X) / length_scale
# size = (n_train_samples, n_dimensions)
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = np.asarray(self.length_scale)
diff = x - X_train
diff /= length_scale
# e = -exp(0.5 * \sum_{i=1}^d (diff ** 2))
# size = (n_train_samples, 1)
exp_diff_squared = np.sum(diff**2, axis=1)
exp_diff_squared *= -0.5
exp_diff_squared = np.exp(exp_diff_squared, exp_diff_squared)
exp_diff_squared = np.expand_dims(exp_diff_squared, axis=1)
exp_diff_squared *= -1
# gradient = (e * diff) / length_scale
gradient = exp_diff_squared * diff
gradient /= length_scale
return gradient
class Matern(Kernel, sk_Matern):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = np.asarray(self.length_scale)
# diff = (x - X_train) / length_scale
# size = (n_train_samples, n_dimensions)
diff = x - X_train
diff /= length_scale
# dist_sq = \sum_{i=1}^d (diff ^ 2)
# dist = sqrt(dist_sq)
# size = (n_train_samples,)
dist_sq = np.sum(diff**2, axis=1)
dist = np.sqrt(dist_sq)
if self.nu == 0.5:
# e = -np.exp(-dist) / dist
# size = (n_train_samples, 1)
scaled_exp_dist = -dist
scaled_exp_dist = np.exp(scaled_exp_dist, scaled_exp_dist)
scaled_exp_dist *= -1
# grad = (e * diff) / length_scale
# For all i in [0, D) if x_i equals y_i.
# 1. e -> -1
# 2. (x_i - y_i) / \sum_{j=1}^D (x_i - y_i)**2 approaches 1.
# Hence the gradient when for all i in [0, D),
# x_i equals y_i is -1 / length_scale[i].
gradient = -np.ones((X_train.shape[0], x.shape[0]))
mask = dist != 0.0
scaled_exp_dist[mask] /= dist[mask]
scaled_exp_dist = np.expand_dims(scaled_exp_dist, axis=1)
gradient[mask] = scaled_exp_dist[mask] * diff[mask]
gradient /= length_scale
return gradient
elif self.nu == 1.5:
# grad(fg) = f'g + fg'
# where f = 1 + sqrt(3) * euclidean((X - Y) / length_scale)
# where g = exp(-sqrt(3) * euclidean((X - Y) / length_scale))
sqrt_3_dist = sqrt(3) * dist
f = np.expand_dims(1 + sqrt_3_dist, axis=1)
# When all of x_i equals y_i, f equals 1.0, (1 - f) equals
# zero, hence from below
# f * g_grad + g * f_grad (where g_grad = -g * f_grad)
# -f * g * f_grad + g * f_grad
# g * f_grad * (1 - f) equals zero.
# sqrt_3_by_dist can be set to any value since diff equals
# zero for this corner case.
sqrt_3_by_dist = np.zeros_like(dist)
nzd = dist != 0.0
sqrt_3_by_dist[nzd] = sqrt(3) / dist[nzd]
dist_expand = np.expand_dims(sqrt_3_by_dist, axis=1)
f_grad = diff / length_scale
f_grad *= dist_expand
sqrt_3_dist *= -1
exp_sqrt_3_dist = np.exp(sqrt_3_dist, sqrt_3_dist)
g = np.expand_dims(exp_sqrt_3_dist, axis=1)
g_grad = -g * f_grad
# f * g_grad + g * f_grad (where g_grad = -g * f_grad)
f *= -1
f += 1
return g * f_grad * f
elif self.nu == 2.5:
# grad(fg) = f'g + fg'
# where f = (1 + sqrt(5) * euclidean((X - Y) / length_scale) +
# 5 / 3 * sqeuclidean((X - Y) / length_scale))
# where g = exp(-sqrt(5) * euclidean((X - Y) / length_scale))
sqrt_5_dist = sqrt(5) * dist
f2 = (5.0 / 3.0) * dist_sq
f2 += sqrt_5_dist
f2 += 1
f = np.expand_dims(f2, axis=1)
# For i in [0, D) if x_i equals y_i
# f = 1 and g = 1
# Grad = f'g + fg' = f' + g'
# f' = f_1' + f_2'
# Also g' = -g * f1'
# Grad = f'g - g * f1' * f
# Grad = g * (f' - f1' * f)
# Grad = f' - f1'
# Grad = f2' which equals zero when x = y
# Since for this corner case, diff equals zero,
# dist can be set to anything.
nzd_mask = dist != 0.0
nzd = dist[nzd_mask]
dist[nzd_mask] = np.reciprocal(nzd, nzd)
dist *= sqrt(5)
dist = np.expand_dims(dist, axis=1)
diff /= length_scale
f1_grad = dist * diff
f2_grad = (10.0 / 3.0) * diff
f_grad = f1_grad + f2_grad
sqrt_5_dist *= -1
g = np.exp(sqrt_5_dist, sqrt_5_dist)
g = np.expand_dims(g, axis=1)
g_grad = -g * f1_grad
return f * g_grad + g * f_grad
class RationalQuadratic(Kernel, sk_RationalQuadratic):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
alpha = self.alpha
length_scale = self.length_scale
# diff = (x - X_train) / length_scale
# size = (n_train_samples, n_dimensions)
diff = x - X_train
diff /= length_scale
# dist = -(1 + (\sum_{i=1}^d (diff^2) / (2 * alpha)))** (-alpha - 1)
# size = (n_train_samples,)
scaled_dist = np.sum(diff**2, axis=1)
scaled_dist /= (2 * self.alpha)
scaled_dist += 1
scaled_dist **= (-alpha - 1)
scaled_dist *= -1
scaled_dist = np.expand_dims(scaled_dist, axis=1)
diff_by_ls = diff / length_scale
return scaled_dist * diff_by_ls
class ExpSineSquared(Kernel, sk_ExpSineSquared):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = self.length_scale
periodicity = self.periodicity
diff = x - X_train
sq_dist = np.sum(diff**2, axis=1)
dist = np.sqrt(sq_dist)
pi_by_period = dist * (np.pi / periodicity)
sine = np.sin(pi_by_period) / length_scale
sine_squared = -2 * sine**2
exp_sine_squared = np.exp(sine_squared)
grad_wrt_exp = -2 * np.sin(2 * pi_by_period) / length_scale**2
# When x_i -> y_i for all i in [0, D), the gradient becomes
# zero. See https://github.com/MechCoder/Notebooks/blob/master/ExpSineSquared%20Kernel%20gradient%20computation.ipynb
# for a detailed math explanation
# grad_wrt_theta can be anything since diff is zero
# for this corner case, hence we set to zero.
grad_wrt_theta = np.zeros_like(dist)
nzd = dist != 0.0
grad_wrt_theta[nzd] = np.pi / (periodicity * dist[nzd])
return np.expand_dims(
grad_wrt_theta * exp_sine_squared * grad_wrt_exp, axis=1) * diff
class ConstantKernel(Kernel, sk_ConstantKernel):
def gradient_x(self, x, X_train):
return np.zeros_like(X_train)
class WhiteKernel(Kernel, sk_WhiteKernel):
def gradient_x(self, x, X_train):
return np.zeros_like(X_train)
class Exponentiation(Kernel, sk_Exponentiation):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
expo = self.exponent
kernel = self.kernel
K = np.expand_dims(
kernel(np.expand_dims(x, axis=0), X_train)[0], axis=1)
return expo * K ** (expo - 1) * kernel.gradient_x(x, X_train)
class Sum(Kernel, sk_Sum):
def gradient_x(self, x, X_train):
return (
self.k1.gradient_x(x, X_train) +
self.k2.gradient_x(x, X_train)
)
class Product(Kernel, sk_Product):
def gradient_x(self, x, X_train):
x = np.asarray(x)
x = np.expand_dims(x, axis=0)
X_train = np.asarray(X_train)
f_ggrad = (
np.expand_dims(self.k1(x, X_train)[0], axis=1) *
self.k2.gradient_x(x, X_train)
)
fgrad_g = (
np.expand_dims(self.k2(x, X_train)[0], axis=1) *
self.k1.gradient_x(x, X_train)
)
return f_ggrad + fgrad_g
class DotProduct(Kernel, sk_DotProduct):
def gradient_x(self, x, X_train):
return np.asarray(X_train)
class HammingKernel(sk_StationaryKernelMixin, sk_NormalizedKernelMixin,
Kernel):
r"""
The HammingKernel is used to handle categorical inputs.
``K(x_1, x_2) = exp(\sum_{j=1}^{d} -ls_j * (I(x_1j != x_2j)))``
Parameters
-----------
* `length_scale` [float, array-like, shape=[n_features,], 1.0 (default)]
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
* `length_scale_bounds` [array-like, [1e-5, 1e5] (default)]
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def hyperparameter_length_scale(self):
length_scale = self.length_scale
anisotropic = np.iterable(length_scale) and len(length_scale) > 1
if anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
* `X` [array-like, shape=(n_samples_X, n_features)]
Left argument of the returned kernel k(X, Y)
* `Y` [array-like, shape=(n_samples_Y, n_features) or None(default)]
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
* `eval_gradient` [bool, False(default)]
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
* `K` [array-like, shape=(n_samples_X, n_samples_Y)]
Kernel k(X, Y)
* `K_gradient` [array-like, shape=(n_samples_X, n_samples_X, n_dims)]
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
length_scale = self.length_scale
anisotropic = np.iterable(length_scale) and len(length_scale) > 1
if np.iterable(length_scale):
if len(length_scale) > 1:
length_scale = np.asarray(length_scale, dtype=float)
else:
length_scale = float(length_scale[0])
else:
length_scale = float(length_scale)
X = np.atleast_2d(X)
if anisotropic and X.shape[1] != len(length_scale):
raise ValueError(
"Expected X to have %d features, got %d" %
(len(length_scale), X.shape[1]))
n_samples, n_dim = X.shape
Y_is_None = Y is None
if Y_is_None:
Y = X
elif eval_gradient:
raise ValueError("gradient can be evaluated only when Y != X")
else:
Y = np.atleast_2d(Y)
indicator = np.expand_dims(X, axis=1) != Y
kernel_prod = np.exp(-np.sum(length_scale * indicator, axis=2))
# dK / d theta = (dK / dl) * (dl / d theta)
# theta = log(l) => dl / d (theta) = e^theta = l
# dK / d theta = l * dK / dl
# dK / dL computation
if anisotropic:
grad = (-np.expand_dims(kernel_prod, axis=-1) *
np.array(indicator, dtype=np.float32))
else:
grad = -np.expand_dims(kernel_prod * np.sum(indicator, axis=2),
axis=-1)
grad *= length_scale
if eval_gradient:
return kernel_prod, grad
return kernel_prod
|
scikit-optimize/scikit-optimize
|
skopt/learning/gaussian_process/kernels.py
|
Python
|
bsd-3-clause
| 14,794
|
[
"Gaussian"
] |
693247542d875f8ebbe6bd362348151e5a517a8f17bcd7703a4087ff2457e4a1
|
from __future__ import print_function, division, absolute_import
from mdtraj.utils.six import PY2
import abc
import re
import numpy as np
import warnings
from mdtraj import io
from msmbuilder.metrics.baseclasses import AbstractDistanceMetric, Vectorized
import inspect
class RedDimPNorm(Vectorized, AbstractDistanceMetric):
"""
This is a class for using a reduced dimensionality representation for the trajectory data. A transformation matrix must be generated by some other method
"""
def __init__(self, proj_obj, abs_min=None, num_vecs=None,
expl_var=None, which=None, metric='euclidean', p=2):
"""
Parameters:
-----------
proj_obj: object
object that has at least a method called project that has a
call signature like:
OBJECT.project(trajectory=None, prep_trajectory=None, which=None)
# trajectory is an instance of msmbuilder.Trajectory
# prep_trajectory is a prepared msmbuilder.Trajectory (np.ndarray)
# which is a np.ndarray of which eigenvectors to use
additionally, there needs to be an attribute called 'vals' that
holds the eigenvalues used to determine which vectors to project
onto
abs_min: float, optional
keep all eigenvectors with eigenvalues greater than <abs_min>
num_vecs: int, optional
number of vectors to use (sorted by decreasing eigenvalue)
expl_var: float, optional
keep vectors (from highest eigenvalue) such that the sum of the
kept eigenvalues divided by the sum of all the eigenvalues is
greater than <expl_var>
which: np.ndarray, optional
indices of eigenvectors to use (sorted by decreasing eigenvalue)
metric: str, optional
metric to use in reduced space. See Vectorized for choices
(default: euclidean)
p : int, optional
Exponent for the p-norm (default: 2)
Remarks:
--------
Only one of abs_min, num_vecs, expl_var, and which is necessary. If more
than one is provided, then the one that will be used is which ever
is given in this order: (which, num_vecs, abs_min, expl_var)
"""
self.proj_obj = proj_obj
if not hasattr(self.proj_obj, 'project'):
raise Exception("proj_obj must have a method called 'project'")
project_args = inspect.getargspec(self.proj_obj.project)
if not ('trajectory' in project_args.args or
'prep_trajectory' in project_args.args or
'which' in project_args.args):
raise Exception("proj_obj.project must take all of trajectory, "
"prep_trajectory, and which")
self._set_which(abs_min=abs_min, num_vecs=num_vecs, expl_var=expl_var,
which=which)
s = super(RedDimPNorm, self) if PY2 else super()
s.__init__(metric, p)
def _set_which(self, abs_min=None, num_vecs=None, expl_var=None, which=None):
"""
set which eigenvectors will be selected.
"""
if not which is None:
self.which = np.array(which).flatten().astype(int)
elif not num_vecs is None:
self.which = np.arange(num_vecs)
elif not abs_min is None:
self.which = np.where(self.proj_obj.vals > abs_min)[0]
elif not expl_var is None:
tot_sum = self.proj_obj.vals.sum()
if tot_sum <= 1e-8:
raise Exception(
"total sum of eigenvalues is almost zero (or negative): %f" % tot_sum)
self.which = np.where(np.cumsum(self.proj_obj.vals) / tot_sum < expl_var)[0]
else:
raise Exception("must pass one of (abs_min, num_vecs, which, or expl_var)")
def prepare_trajectory(self, trajectory):
"""
prepare_trajectory
"""
proj_trajectory = self.proj_obj.project(trajectory=trajectory, which=self.which)
return proj_trajectory
|
msmbuilder/msmbuilder-legacy
|
MSMBuilder/metrics/projection.py
|
Python
|
gpl-2.0
| 4,136
|
[
"MDTraj"
] |
df9a9ecfc594cf3e905767e0ec4ef9b7acc0021a7de98f48faf40d85549f95f1
|
input_name = '../examples/multi_physics/thermo_elasticity_ess.py'
output_name = 'test_thermo_elasticity_ess.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
|
RexFuzzle/sfepy
|
tests/test_input_thermo_elasticity_ess.py
|
Python
|
bsd-3-clause
| 180
|
[
"VTK"
] |
758a3fb3e535b0280de2c85dee98abfd9a5801f4fb6c07983b7826b3e7ddc4e0
|
r"""OS routines for Mac, DOS, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, mac, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, ntpath, or macpath
- os.name is 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'mac' in _names:
name = 'mac'
linesep = '\r'
from mac import *
try:
from mac import _exit
except ImportError:
pass
import macpath as path
import mac
__all__.extend(_get_exports_list(mac))
del mac
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import curdir, pardir, sep, pathsep, defpath, extsep, altsep
del _names
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
makedirs(head, mode)
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and empty all intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
from os.path import join, getsize
for root, dirs, files in walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if not islink(path):
for x in walk(path, topdown, onerror):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
from errno import ENOENT, ENOTDIR
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != ENOENT and e.errno != ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict):
for k, v in dict.items():
self[k] = v
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict):
for k, v in dict.items():
self[k] = v
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
import popen2
stdout, stdin = popen2.popen2(cmd, bufsize)
return stdin, stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
import popen2
stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
return stdin, stdout, stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
import popen2
stdout, stdin = popen2.popen4(cmd, bufsize)
return stdin, stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
|
OS2World/APP-INTERNET-torpak_2
|
Lib/os.py
|
Python
|
mit
| 21,064
|
[
"VisIt"
] |
fb137bff7a9a43ebf54ba75251caf71317c26e8950093eb97320c65ec0634a2e
|
# coding: utf-8
# In[ ]:
from Geometry import *
import numpy as np
#import matplotlib
import os
#os.environ['ETS_TOOLKIT'] = 'qt4'
#os.environ['QT_API'] = 'pyqt'
from PlotOctTree import mayaviPlot,plotOctTree
import astropy.units as au
import astropy.time as at
import astropy.coordinates as ac
#from ENUFrame import ENU
#import FermatPrincipleCartesian as fp
def mayaviPlot2(x,m,mBackground=None,maxNumPts=None):
'''Do a density plot'''
from mayavi.sources.api import VTKDataSource
from mayavi import mlab
from scipy.interpolate import griddata
xmin,ymin,zmin = np.min(x[:,0]),np.min(x[:,1]),np.min(x[:,2])
xmax,ymax,zmax = np.max(x[:,0]),np.max(x[:,1]),np.max(x[:,2])
X,Y,Z = np.mgrid[xmin:xmax:128j,ymin:ymax:128j,zmin:zmax:128j]
if mBackground is not None:
data = m - mBackground
else:
data = m
#data -= np.min(data)
#data /= np.max(data)
field = griddata((x[:,0],x[:,1],x[:,2]),data,(X.flatten(),Y.flatten(),Z.flatten()),method='linear').reshape(X.shape)
mlab.points3d(x[:,0],x[:,1],x[:,2],data,scale_mode='vector', scale_factor=10.)
mlab.contour3d(X,Y,Z,field,contours=5,opacity=0.2)
vmin = np.min(data)
vmax = np.max(data)
#l = mlab.pipeline.volume(mlab.pipeline.scalar_field(X,Y,Z,field),vmin=vmin, vmax=vmin + .5*(vmax-vmin))
#l._volume_property.scalar_opacity_unit_distance = min((xmax-xmin)/4.,(ymax-ymin)/4.,(zmax-zmin)/4.)
#l._volume_property.shade = False
mlab.colorbar()
mlab.axes()
mlab.show()
def plot_wavefront(ne_tci,rays,save=False,animate=False):
xmin = ne_tci.xvec[0]
xmax = ne_tci.xvec[-1]
ymin = ne_tci.yvec[0]
ymax = ne_tci.yvec[-1]
zmin = ne_tci.zvec[0]
zmax = ne_tci.zvec[-1]
X,Y,Z = np.mgrid[xmin:xmax:len(ne_tci.xvec)*1j,
ymin:ymax:len(ne_tci.yvec)*1j,
zmin:zmax:len(ne_tci.zvec)*1j]
#reshape array
data = ne_tci.get_shaped_array()
print(np.mean(data),np.max(data),np.min(data))
l = mlab.pipeline.volume(mlab.pipeline.scalar_field(X,Y,Z,data))#,vmin=min, vmax=min + .5*(max-min))
l._volume_property.scalar_opacity_unit_distance = min((xmax-xmin)/4.,(ymax-ymin)/4.,(zmax-zmin)/4.)
l._volume_property.shade = False
mlab.contour3d(X,Y,Z,data,contours=5,opacity=0.2)
mlab.colorbar()
def getWave(rays,idx):
xs = np.zeros(len(rays))
ys = np.zeros(len(rays))
zs = np.zeros(len(rays))
ridx = 0
while ridx < len(rays):
xs[ridx] = rays[ridx]['x'][idx]
ys[ridx] = rays[ridx]['y'][idx]
zs[ridx] = rays[ridx]['z'][idx]
ridx += 1
return xs,ys,zs
if rays is not None:
for ray in rays:
mlab.plot3d(ray["x"],ray["y"],ray["z"],tube_radius=1.5)
if animate:
plt = mlab.points3d(*getWave(rays,0),color=(1,0,0),scale_mode='vector', scale_factor=10.)
#mlab.move(-200,0,0)
view = mlab.view()
@mlab.animate(delay=100)
def anim():
nt = len(rays[0]["s"])
f = mlab.gcf()
save = False
while True:
i = 0
while i < nt:
#print("updating scene")
xs,ys,zs = getWave(rays,i)
plt.mlab_source.set(x=xs,y=ys,z=zs)
#mlab.view(*view)
if save:
#mlab.view(*view)
mlab.savefig('figs/wavefronts/wavefront_{0:04d}.png'.format(i))#,magnification = 2)#size=(1920,1080))
#f.scene.render()
i += 1
yield
save = False
anim()
mlab.show()
if save and rays is not None:
return
import os
os.system('ffmpeg -r 10 -f image2 -s 1900x1080 -i figs/wavefronts/wavefront_%04d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p figs/wavefronts/wavefront.mp4')
def plot_model(ne_tci,save=False):
'''Plot the model contained in a tricubic interpolator (a convienient container for one)'''
plot_wavefront(ne_tci,None,save=save)
def generateModelFromOctree(octTree,numRays):
'''Generate model '''
voxels = getAllDecendants(octTree)
N = len(voxels)
G = np.zeros([numRays,N])
m = np.zeros(N)
Cm = np.zeros(N)
x = np.zeros([N,3])
if 'ne' not in voxels[0].properties.keys():
#zero model if no property
i = 0
while i < N:
vox = voxels[i]
for j in vox.lineSegments.keys():
if j < numRays:
G[j,i] = vox.lineSegments[j].sep
x[i,:] = vox.centroid
i += 1
return G,Cm,m,x
i = 0
while i < N:
vox = voxels[i]
for j in vox.lineSegments.keys():
G[j,i] = vox.lineSegments[j].sep
m[i] = vox.properties['ne'][1]
Cm[i] = vox.properties['ne'][2]
x[i,:] = vox.centroid
i += 1
return G,Cm,m,x
def electronDensity2RefractiveIndex(ne,frequency=120e6):
'''input the refractive index in electron/m^3 and frequency in Hz,
and get the refractive index.'''
#eCharge = 1.60217662e-19#C = F.V = W.s/V^2.V = kg.m^2/s^2/V
#epsilonPerm = 8.854187817e-12#F/m = kg.m/s^2/V^2
#eMass = 9.10938215e-31#kg
#constant = eCharge**2*4*pi/eMass
constant = 5.63e4*5.63e4
#wp = 5.63e4*np.sqrt(ne/1e6)#Hz^2 m^3 lightman p 226
constant = 56.3 * 56.3
n = np.sqrt(1. - constant*ne/frequency**2)
dndne = constant/frequency**2/n/2.
return n,dndne
def setOctTreeElectronDensity(octTree,ne,neVar,frequency=120e6):
'''Set the model in the octTree.
Assumes the model is derived from the same octTree and
Cm is the diagonal of the covariance.
unit km^3'''
voxels = getAllDecendants(octTree)
N = len(voxels)
i = 0
while i < N:
vox = voxels[i]
vox.properties['ne'] = ['intensive',ne[i],neVar[i]]
vox.properties['Ne'] = ['extensive',ne[i]*vox.volume,neVar[i]*vox.volume]
#n,dndne = electronDensity2RefractiveIndex(vox.properties['ne'][1],frequency)
#vox.properties['n'] = ['intensive',n,dndne**2*vox.properties['ne'][2]]
vox.lineSegments = {}
i += 1
def setOctTreeElectronNumber(octTree,Ne,NeVar,frequency = 120e6):
'''Set the model in the octTree.
Assumes the model is derived from the same octTree and
Cm is the diagonal of the covariance.'''
voxels = getAllDecendants(octTree)
N = len(voxels)
i = 0
while i < N:
vox = voxels[i]
vox.properties['ne'] = ['intensive',Ne[i]/vox.volume,NeVar[i]/vox.volume]
vox.properties['Ne'] = ['extensive',Ne[i],NeVar[i]]
n,dndne = electronDensity2RefractiveIndex(vox.properties['ne'][1],frequency)
vox.properties['n'] = ['intensive',n,dndne*vox.properties['ne'][2]]
vox.lineSegments = {}
i += 1
def makeRaysFromSourceAndReciever(recievers=None,directions=None,sources=None,maxBaseline = 100.,height=1000.,numSources=15,numRecievers=10):
"""make rays"""
#make recievers
if recievers is None:
print("Generating {0} recievers".format(numRecievers))
recievers = []
for i in range(numRecievers):
recievers.append(np.array([np.random.uniform(low = -maxBaseline/4.,high = maxBaseline/4.),
np.random.uniform(low = -maxBaseline/4.,high = maxBaseline/4.),
-epsFloat]))
if directions is None:
print("Generating {0} sources".format(numSources))
theta = np.pi/4.
phi = 0.
directions = []
for i in range(numSources):
alt = theta + np.random.uniform(low = -5*np.pi/180.,high = 5*np.pi/180.)
az = phi + np.random.uniform(low = -5*np.pi/180.,high =5*np.pi/180.)
z = np.sin(alt)
x = np.cos(alt)*np.sin(az)
y = np.cos(alt)*np.cos(az)
directions.append(np.array([x,y,z]))
if directions is None:
num_directions = numSources
directions = []
print("Generating {} directions".format(num_directions))
for i in range(num_directions):
mag = np.linalg.norm(sources[i])
#direction cosines
directions.append(sources[i]/mag)
rays = []
count = 0
for r in recievers:
for d in directions:
rays.append(Ray(r,d,id=count))
count += 1
return rays
def compute3dExponentialCovariance(sigma,L,x,load=False):
'''exponential covariance model'''
filename = "covariance_{0}.npy".format(x.shape[0])
if load:
try:
Cm = np.load(filename)
print("Loaded {0}".format(filename))
return Cm
except:
pass
N = x.shape[0]
Cm = np.zeros([N,N])
if np.size(sigma) == N:
i = 0
while i < N:
d = np.linalg.norm(x[i,:] - x[i:,:],axis=1)
Cm[i,i:] = sigma[i]*sigma[i:] * np.exp(d/(-L))
Cm[i:,i] = Cm[i,i:]
i += 1
else:
sigma2 = sigma*sigma
i = 0
while i < N:
#print("{0}".format(float(i)/N))
d = np.linalg.norm(x[i,:] - x[i:,:],axis=1)
Cm[i,i:] = sigma2 * np.exp(d/(-L))
Cm[i:,i] = Cm[i,i:]
i += 1
Cm[Cm<epsFloat] = 0.
np.save(filename,Cm)
return Cm
def ionosphereModel(x,dayTime=True,bump=False):
h = x[2]
Nf1 = 4*np.exp((h-300)/100.)/(1 + np.exp((h-300)/100.))**2
res = Nf1
if dayTime:#also E layer
Ne = 0.3*4*np.exp((h-85.)/50.)/(1 + np.exp((h-85.)/50.))**2
res += Ne
if bump:
res += 0.5*np.exp(-np.sum((x - np.array([30,30,600]))**2)/30.**2)
res += 0.2*np.exp(-np.sum((x - np.array([-30,-30,200]))**2)/30.**2)
res += 0.2*np.exp(-np.sum((x - np.array([-40,-40,600]))**2)/30.**2)
return res
def repartitionOctTree(octTree,rays, maxNum=3,minScale = 5.):
'''Assuming a model has been set and a set of rays has been propagated,
refine the grid such that no cell has more than ``maxNum`` rays passing through it.'''
someRemain = True
iter=0
while someRemain:
if iter > 5:
break
iter += 1
cleanRays(octTree)
for ray in rays:
forwardRay(ray,octTree)
#plotOctTreeXZ(octTree,ax=None)
#mayaviPlot(x,m,mBackground=None,maxNumPts=None,octTree=None)
G,Cm,m,x = generateModelFromOctree(octTree,len(rays))
C = np.sum(G>0,axis=0)
if np.max(C) <= maxNum:
someRemain = False
continue
voxels = getAllDecendants(octTree)
i = 0
someRemain = False
while i < len(C):
if C[i] > maxNum:
if voxels[i].dx > 2*minScale and voxels[i].dy > 2*minScale and voxels[i].dz > 2*minScale :
subDivide(voxels[i])
someRemain = True
i += 1
#plotOctTree(octTree)
#plotOctTreeXZ(octTree,ax=None)
#plotOctTreeYZ(octTree,ax=None)
return octTree
def constructIonosphereModel(height=1000.,maxBaseline=150.,rays = None,load=False):
'''initialize with 1/m^3 at 300km +- 150km'''
if rays is not None:
fileName = "IonosphereOctTree_AutoPartition.npy"
if load:
try:
octTree = loadOctTree(filename)
return octTree
except:
pass
recievers = np.zeros([len(rays),3])
directions = np.zeros([len(rays),3])
i = 0
while i < len(rays):
recievers[i,:] = rays[i].origin
directions[i,:] = rays[i].dir
i += 1
#min and max of recievers
minLim1 = np.min(recievers,axis=0)
maxLim1 = np.max(recievers,axis=0)
#min and max of sources
upperPlane = Plane(np.array([0,0,height+ maxLim1[2]+epsFloat]),normal=([0,0,1]))
points = []
for ray in rays:
res,point = intersectRayPlane(ray,upperPlane)
if not res:
print("ray misses uper plane?")
return
points.append(point)
points = np.array(points)
minLim2 = np.min(points,axis=0)
maxLim2 = np.max(points,axis=0)
xmin = min(minLim2[0],minLim1[0])
xmax = max(maxLim2[0],maxLim1[0])
ymin = min(minLim2[1],minLim1[1])
ymax = max(maxLim2[1],maxLim1[1])
zmin = min(minLim2[2],minLim1[2])
zmax = max(maxLim2[2],maxLim1[2])
dx = 2*(np.abs(xmax) + np.abs(xmin))
dy = 2*(np.abs(ymax) + np.abs(ymin))
center = [0,0,height/2. + maxLim1[2]+epsFloat]
#print(center,dx,dy,height)
octTree = OctTree(center,dx=dx*1.2,dy=dy*1.2,dz=height)
subDivide(octTree)
#plotOctTreeXZ(octTree,ax=None)
octTree = repartitionOctTree(octTree,rays, maxNum=5,minScale=30.)
numVoxels = countDecendants(octTree)
print("Generated an octtree with {0} voxels.".format(numVoxels))
plotOctTreeXZ(octTree,ax=None)
G,Cm,m,x = generateModelFromOctree(octTree,0)
i = 0
while i < x.shape[0]:
m[i] = ionosphereModel(x[i,:],dayTime=True,bump=False)
i += 1
setOctTreeElectronDensity(octTree,m,np.ones_like(m)*0.05**2)
saveOctTree(fileName,octTree)
return octTree
fileName = "ionosphereModel_5levels.npy"
if load:
try:
octTree = loadOctTree(filename)
return octTree
except:
pass
octTree = OctTree([0,0,height/2.],dx=maxBaseline,dy=maxBaseline,dz=height)
#level 3 - all
#subDivide(subDivide(octTree))
subDivide(octTree)
subDivide(subDivide(octTree))
voxels = getAllDecendants(octTree)
voxels = []
for vox in voxels:
#level 4 - 250 to 750
if (vox.centroid[2] > 250) and (vox.centroid[2] < 750):
subDivide(vox)
#level 5 - 250 to 500
if (vox.centroid[2] > 250) and (vox.centroid[2] < 500):
subDivide(vox)
G,Cm,m,x = generateModelFromOctree(octTree,0)
i = 0
while i < x.shape[0]:
m[i] = ionosphereModel(x[i,:],dayTime=True,bump=True)
i += 1
setOctTreeElectronDensity(octTree,m,np.ones_like(m)*0.05**2)
saveOctTree(fileName,octTree)
#plotOctTreeXZ(octTree,ax=None)
#plotOctTreeXZ(octTree,ax=None)
#plotOctTree3D(octTree,model=m)
return octTree
def gradientCheck(mprior,G):
eps = 7./4. - 3./4. - 1.
eps = epsFloat
N = np.size(mprior)
M = G.shape[0]
K = np.mean(mprior)
mlog = np.log(mprior/K)
mForward = K*np.exp(mlog)
g0 = G.dot(mForward)
J = G*mForward
Jexact = np.zeros([M,N])
i = 0
while i < N:
mlog_old = mlog[i]
mlog[i] += eps
mForward = K*np.exp(mlog)
g = G.dot(mForward)
Jexact[:,i] = (g - g0)/eps
#print(Jexact[:,i])
mlog[i] = mlog_old
i += 1
import pylab as plt
plt.imshow(J-Jexact)
plt.colorbar()
plt.show()
return J,Jexact
def initHomogeneousModel(G,dobs):
return np.sum(dobs)/np.sum(G)
def transformCov2Log(Cm_linear,K):
'''Transform covariance matrix from linear model to log model using:
cov(y1,y2) = <y1y2> - <y1><y2>
with,
y = log(x/K)
thus,
<y1y2> ~ y1y2 + 0.5*(var(x1)y1''y2 +var(x2)y2''y1) + cov(x1,x2)y1'y2'
= log(x1/K)log(x2/K) - 0.5*(var(x1)log(x2/K)/x1**2 +var(x2)log(x1/K)/x2**2) + cov(x1,x2)/x1/x2
and,
<y1> ~ y1 + 0.5*var(x1)y1''
= log(x1/K) - 0.5*var(x1)/x1**2
Update using newer tecnique
'''
#K = np.mean(K)
#Cm_log = np.log(1 + Cm_linear/np.outer(mean_linear,mean_linear))
Cm_log = np.log(1 + Cm_linear/K**2)
return Cm_log
def transformCov2Linear(Cm_log,K):
'''Invert the log transform
'''
return (np.exp(Cm_log) - 1.)*K**2
def LinearSolution(dobs,G,Cd,Cmprior,mprior):
'''Assumes d = int(G * m)'''
#forward problem
print("Doing forward problem")
#d = np.log(G.dot(np.exp(mprior)))
d = G.dot(mprior)
print("Calculating residuals:")
residuals = dobs - d
Gt = G.transpose()
#smooth and adjoint
print("Calculating smoothing matrix")
smooth = np.linalg.inv(G.dot(Cmprior).dot(Gt) + Cd)
#print(smooth)
print("Calculating adjoint")
adjoint = Cmprior.dot(Gt).dot(smooth)
#print(adjoint)
print("updating model")
m = mprior + adjoint.dot(residuals)
print("updating covariance")
Cm = Cmprior - adjoint.dot(G).dot(Cmprior)
return m,Cm
def SteepestDescent(octTree,rays,dobs,Cd,Cmprior,mprior):
'''Assumes d = log(K*int(G * exp(m))) and that input is linear versions'''
def updater(x,G):
eps = np.zeros(x.shape[0])
i = 0
while i< x.shape[0]:
if np.sum(G[:,i]) > 0:
eps[i] = 0.1
else:
eps[i] = 0.01
i += 1
return eps
iter = 0
mn = mprior
Cmprior = Cmprior
while iter < 10:
#forward problem
print("Setting octTree with model_{0}".format(iter))
setOctTreeModel(octTree,mn,np.diag(Cmprior),propName='Ne',propType='extensive')
print("Propagating {0} rays".format(len(rays)))
for ray in rays:
forwardRay(ray,octTree)
print("Pulling ray propagations.")
G,CmVar,mexact,x = generateModelFromOctree(octTree,len(rays),propName='Ne')
print("Doing forward problem")
d = G.dot(mn)
print("Calculating residuals, Sum:")
residuals = d - dobs
print(np.sum(residuals**2))
#print(residuals.shape)
print("Calculating weighting residuals")
weightedRes = np.linalg.pinv(Cd).dot(residuals)
print(Cd,weightedRes)
#print(weightedRes,np.linalg.solve(Cd,residuals))
#next part should be changed
#Gt.Cd^-1.(d-dobs)
Gt = G.transpose()
#smooth and adjoint
print("Calculating adjoint")
dm = Cmprior.dot(Gt).dot(weightedRes)
print("updating model")
mn = mn - updater(x,G)*(dm + mn - mprior)
iter += 1
print("updating covariance")
print("Calculating smoothing matrix")
smooth = np.linalg.pinv(G.dot(Cmprior).dot(Gt) + Cd)
print("Calculating adjoint")
adjoint = Cmprior.dot(Gt).dot(smooth)
Cm = Cmprior - adjoint.dot(G).dot(Cmprior)
return mn,Cm
def BerrymanSol(G,dobs,mprior=None,mu = 0.0,Cd=None,Cm = None):
'''Solve d=G.m minimizing misfit:
(dobs-d)^t.W1.(dobs-d) + mu (m - mprior)^t.W2.(m-mprior)
with the berryman choice of W1, W2.
G is mxn, m - num rays, n - num cells'''
m = G.shape[0]
n = G.shape[1]
if Cd is not None:
L = Cd + np.diag(np.sum(G,axis=1))
else:
#row sums, length of path i
L = np.diag(np.sum(G,axis=1))
if Cm is not None:
C = np.linalg.pinv(Cm + np.diag(np.sum(G,axis=0)))
else:
#col sums, legnth of all rays in cell j (coverage)
C = np.diag(np.sum(G,axis=0))
Linv = np.linalg.pinv(L)
Cinv = np.linalg.pinv(C)
#m-vec choice
u = np.ones(m)
#n-vec
v = Cinv.dot(G.transpose()).dot(u)
#v = np.ones(n)
sigma0 = u.transpose().dot(dobs)/(u.transpose().dot(L).dot(u))
if mprior is None:
#weight for mean background m0 = (u^t.L.W3.dobs/u^t.L.W3.L.u)v
#W3 = inv(L)
#W3 = Linv
#mprior = u.transpose().dot(L).dot(W3).dot(dobs)/(u.transpose().dot(L).dot(W3).dot(L).dot(u))*v
mprior = sigma0*v
#W1 = Linv
#D = np.sqrt(C)
#A = np.sqrt(W1).dot(G).dot(inv(D))
Linv12 = sqrtm(Linv)
Cinv12 = sqrtm(Cinv)
A = Linv12.dot(G).dot(Cinv12)
AtA = A.transpose().dot(A)
print("eigen val solve At.A",AtA)
#sorted in ascending order
sing,eigv = np.linalg.eigh(AtA)
#Zj = xj^t.A^t.Linv12.dobs
zb = sqrtm(C).dot(mprior)
dz = np.zeros(n)
adjoint = A.transpose().dot(Linv12).dot(dobs)
i = len(sing) - 2
while i >= 0:
Zj = eigv[:,i].transpose().dot(adjoint)
#print(Zj)
if np.isnan(sing[i]) or sing[i] < 1e-5:
print("rank: {0}".format(len(sing)-1-i))
break
dz += Zj*eigv[:,i]/(sing[i]+mu)
i -= 1
#compare with
#zcomp = np.linalg.pinv(AtA).dot(Cinv12).dot(G.transpose()).dot(Linv).dot(dobs)
z = dz + zb
m = Cinv12.dot(z)
return np.abs(m)
def MetropolisSolution(G,dobs,Cd,Cmprior,mprior):
postDist = []
iter = 0
T0 = 1.
size = 1000
Cdinv_ = np.linalg.pinv(Cd)
mML = mprior
Cm = Cmprior
while iter < 100:
print("Metropolis iter-{0}".format(iter))
T = T0/(1 + iter)
Cdinv = Cdinv_/T
Cmsample = Cmprior*T
count = 0
mCandidate = np.copy(mML)
d = (G.dot(mCandidate) - dobs)
Li = np.exp(-d.transpose().dot(Cdinv).dot(d)/2.)
while count < 100:
print("New sample batch: {0}".format(count))
#sample prior
msample = np.abs(np.random.multivariate_normal(mean=mML, cov = Cmsample))
# posterior distribution
#forward problems
i = 0
while i < len(mML):
mCandidate[i] = msample[i]
d = (G.dot(mCandidate) - dobs)
Lj = np.exp(-d.transpose().dot(Cdinv).dot(d)/2.)
if Lj > Li:
Li = Lj
count += 1
else:
if np.random.uniform() < Lj/Li:
Li = Lj
count += 1
else:
mCandidate[i] = mML[i]
i += 1
postDist.append(mCandidate)
mML = mCandidate#np.mean(postDist,axis=0)
iter += 1
Cm = np.cov(postDist,rowvar=0)
return mML,Cm
def metropolisPosteriorCovariance(G,dobs,Cd,CmlogPost,mlogPost,K):
postDist = []
size = 100
Cdinv = np.linalg.pinv(Cd)
Cminv = np.linalg.pinv(CmlogPost)
mSamples = np.random.multivariate_normal(mean=mlogPost, cov = CmlogPost,size=size)
T0 = 5
i = 0
count = 0
mSample = np.random.multivariate_normal(mean=mlogPost, cov = CmlogPost)
mi = K*np.exp(mSample)
di = G.dot(mi) - dobs
dm = mSample - mlogPost
Li = np.exp(-di.transpose().dot(Cdinv).dot(di)/2.)# - dm.transpose().dot(Cminv).dot(dm)/2./T0)
while count < size:
#print (count)
j = i+1
while True:
T = T0*7/(count+7)
mSample = np.random.multivariate_normal(mean=mlogPost, cov = CmlogPost)
mj = K*np.exp(mSample)
dj = G.dot(mj) - dobs
dm = mSample - mlogPost
#print("d.Cd.d",dj.transpose().dot(Cdinv).dot(dj))
Lj = np.exp(-dj.transpose().dot(Cdinv).dot(dj)/2.)# - dm.transpose().dot(Cminv).dot(dm)/2./T)
#print(Li,Lj)
if Lj > Li:
Li = Lj
count += 1
postDist.append(mj)
i = j
break
else:
if np.random.uniform() < Lj/Li:
Li = Lj
count += 1
postDist.append(mj)
i = j
break
j += 1
Cm = np.cov(postDist,rowvar=0)
mML = np.mean(postDist,axis=0)
return mML,Cm
def LMSol(G,mprior,Cd,Cm,dobs,mu=1.,octTree=None):
"""Assume the frechet derivative is,
G(x) = exp"""
import pylab as plt
K = np.mean(mprior)
mlog = np.log(mprior/K)
Cm_log = transformCov2Log(Cm,K)#np.log(1. + Cm/K**2)#transformCov2Log(Cm,mprior)
#Cdinv = np.linalg.pinv(Cd)
if octTree is not None:
voxels = getAllDecendants(octTree)
scale = np.zeros(np.size(mprior))
i = 0
while i < np.size(mprior):
scale[i] = voxels[i].volume**(1./3.)
i+= 1
C = np.sum(G,axis=0)/scale
C = C/float(np.max(C))
C[C==0] = np.min(C[C>0])/2.
else:
C = np.sum(G>0,axis=0)
plt.hist(C,bins=40)
plt.show()
C = C/float(np.max(C))
C[C==0] = np.min(C[C>0])/2.
#C = np.sum(G,axis=0)
#C = C/np.max(C)
res = 1
iter = 0
while res > 1e-8 and iter < 10000:
#forward transform
#print(mlog)
mForward = K*np.exp(mlog)
g = G.dot(mForward)
J = G*mForward
#residuals g - dobs -> -dm
res = g - dobs
#A1 = J.transpose().dot(Cdinv)
#Cmlog_inv = A1.dot(J) + mu*Cm_log
#dm,resi,rank,s = np.linalg.lstsq(Cmlog_inv,A1.dot(res))
#S = mu Cd + J.Cm.J^t
#S = int Ri Rj k^2 exp(m(x) + m(x')) sigma^2 exp(-|x-x'|/L) + Cd
#K int dV Cm(x,x') J(x') del(i)
P1 = Cm_log.dot(J.transpose())
smooth = np.linalg.pinv(mu*Cd + J.dot(P1))
dm = P1.dot(smooth).dot(res)
res = np.sum(dm**2)/np.sum(mlog**2)
print("Iter-{0} res: {1}".format(iter,res))
#converage learn propto length of rays in cells
#print(dm)
mlog -= dm*C
iter += 1
CmlogPost = Cm_log - P1.dot(smooth).dot(P1.transpose())
cmlin = transformCov2Linear(CmlogPost,K)
#print(CmlogPost)
#mMl,cmlin = metropolisPosteriorCovariance(G,dobs,Cd,CmlogPost,mlog,K)
#print(mMl - K*np.exp(mlog))
#print(transformCov2Linear(CmlogPost,K) - cmlin)
return K*np.exp(mlog), cmlin
def invertTEC(infoFile,data_folder,timeStart = 0, timeEnd = 0,array_file='arrays/lofar.hba.antenna.cfg',load=False):
'''Invert the 3d tec from data.
timeStart, timeEnd inclusive.
Puts the data into an ENU system then rotates the up to the mean direction vector
before partitioning the system.'''
import glob
from RadioArray import RadioArray
dataFile = "TecInversionData.npz"
generate = True
if load:
print("Loading:",dataFile)
try:
TecData = np.load(dataFile)
data = TecData['data']
rotatedRays = TecData['rotatedRays']
rays = TecData['rays']
generate = False
except:
pass
if generate:
print("creating radio array")
radio_array = RadioArray(array_file)
print("creating coord sys")
coordSys = InversionCoordSys(radio_array)
coordSysSet = False
enu = ENU(location=radio_array.get_center().earth_location)
print("ENU system set: {0}".format(enu))
meanDirection = np.zeros(3)
numRays = 0
#get patch names and directions for dataset
info = np.load(infoFile)
patches = info['patches']
numPatches = len(patches)
radec = info['directions']
print("Loaded {0} patches".format(numPatches))
#get array stations (shoud fold this into radio_array. todo)
stationLabels = np.genfromtxt(array_file, comments='#',usecols = (4),dtype=type(""))
stationLocs = np.genfromtxt(array_file, comments='#',usecols = (0,1,2))
numStations = len(stationLabels)
print("Number of stations in array: {0}".format(numStations))
#assume all times and antennas are same in each datafile
recievers = []
num_time = (timeEnd - timeStart + 1)
print("Number of time stamps: {0}".format(num_time))
#each time gives a different direction for each patch
num_dirs = num_time*numPatches
print("Number of directions: {0}".format(num_dirs))
data = []
rays = []
stationIndices = []
timeIndices = []
patchIndices = []
skyPlane = Plane([0,0,1000],normal=[0,0,1])
skyProj = []
skyProjCoords = []
patch_idx = 0
failed = 0
rayId = 0
while patch_idx < numPatches:
patch = patches[patch_idx]
rd = radec[patch_idx]
files = glob.glob("{0}/*_{1}_*.npz".format(data_folder,patch))
if len(files) == 1:
file = files[0]
else:
print('Could not find patch: {0}'.format(patch))
patch_idx += 1
continue
print("Loading data file: {0}".format(file))
try:
d = np.load(file)
except:
print("Failed loading data file: {0}".format(file))
failed += 1
patch_idx += 1
continue
antennas = d['antennas']
times = d['times'][timeStart:timeEnd+1]
tecData = d['data'][timeStart:timeEnd+1,:]#times x antennas
time_idx = 0
while time_idx < num_time:
#dir_idx = i*num_time + j
time = at.Time(times[time_idx],format='gps',scale='tai')
print("Processing time: {0}".format(time.isot))
frame = ac.AltAz(location=radio_array.get_center().earth_location,obstime=time)
if not coordSysSet:
print("fixing coord sys to first patch")
fixedDir = coordSys.getDirection(rd.ra.deg,rd.dec.deg,time)
coordSys.setFixedFrame(fixedDir)
coordSysSet = True
#print(coordSys.altaz.alt)
rayFrame = Ray([0,0,0],fixedDir)
pointPOS = rayFrame.eval(1000)#1000km
skyPlane = Plane(pointPOS,normal=rayFrame.dir)
# get direction of patch at time wrt fixed frame
dir = ac.SkyCoord(rd.ra,rd.dec,frame='icrs').transform_to(frame)
print("Patch Alt: {0} Az: {1}".format(dir.alt.deg,dir.az.deg))
dir = dir.transform_to(enu)
meanDirection += dir.cartesian.xyz.value
numRays += 1
print("Patch east: {0} north: {1} up: {2}".format(dir.east,dir.north,dir.up))
#dir = coordSys.getComponents(rd.ra.deg,rd.dec.deg,time)
#xaxis,yaxis,zaxis = coordSys.getAxes()
ant_idx = 0#index in solution table
while ant_idx < len(antennas):
ant = antennas[ant_idx]
#find index in stationLabels
labelIdx = 0
while labelIdx < numStations:
if stationLabels[labelIdx] == ant:
break
labelIdx += 1
if labelIdx >= numStations:
print("Could not find {0} in available stations: {1}".format(ant,stationLabels))
continue
#ITRS WGS84
stationLoc = ac.SkyCoord(*stationLocs[labelIdx]*au.m,frame='itrs').transform_to(enu)
origin = stationLoc.cartesian.xyz.to(au.km).value#/wavelength enu system
#print(origin)
rays.append(Ray(origin,dir.cartesian.xyz.value,id = rayId))
rayId += 1
data.append(tecData[time_idx,ant_idx])
skyProj.append(data[-1])
res,point = intersectRayPlane(rays[-1],skyPlane)
skyProjCoords.append(point)
stationIndices.append(labelIdx)
timeIndices.append(time_idx)
patchIndices.append(patch_idx)
ant_idx += 1
time_idx += 1
patch_idx += 1
#rotate the rays and stations so that the mean direction points up
meanDirection /= numRays
#
axis = np.cross(np.array([0,0,1]),meanDirection)
angle = np.arccos(meanDirection.dot(np.array([0,0,1])))
R = rot(axis,-angle)
rotatedRays = []
id = 0
for ray in rays:
origin = R.dot(ray.origin)
dir = R.dot(ray.dir)
rotatedRays.append(Ray(origin,dir,id=id))
id += 1
#print (rotatedRays[-1])
np.savez(dataFile,rays=rays,rotatedRays=rotatedRays,
data=data,stationIndices=stationIndices,
timeIndices=timeIndices,patchIndices=patchIndices)
#rotated rays and data are now fit for inversion
print("Constructing the ionosphere")
octTree = constructIonosphereModel(height=2000.,rays = rays,load=False)
cleanRays(octTree)
for ray in rays:
forwardRay(ray,octTree)
print("Pulling ray propagations.")
G,mVar,mprior,x = generateModelFromOctree(octTree,len(rays))
dataBase = G.dot(mprior)
data = dataBase + data*1e3
#avgBase = np.mean(dataBase)
#print("Avg Base:",avgBase)
#print("Avg Data:",np.mean(data))
#print(np.sum(data==0))
#data += avgBase
#generate simple initial starting point
print("Setting a priori model")
mexact = []
i = 0
while i < x.shape[0]:
mexact.append(ionosphereModel(x[i,:],dayTime=True,bump=True))
i += 1
mexact = np.array(mexact)
print("Computing model 3d exponential covariance")
Cmprior = compute3dExponentialCovariance(np.sqrt(np.mean(mVar)),30.,x,load=False)
#generate simple initial starting point
#mprior = np.ones_like(mexact)*initHomogeneousModel(G,dobs)
#mprior = np.random.multivariate_normal(mean=mexact, cov = Cmprior)
print("Computing observation covariance")
dobs = []
for i in range(10):
dobs.append(G.dot(np.abs(np.random.multivariate_normal(mean=mexact, cov = Cmprior))))
dobs = np.array(dobs)
Cd = np.cov(dobs.transpose())
dobs = data
print("Solving for model from rays:")
#m,Cm = LinearSolution(dobs,G,Cd,Cmprior,mprior)
#m,Cm = MetropolisSolution(G,dobs,Cd,Cmprior,mprior)
#m = BerrymanSol(G,dobs,mprior=None,Cd=Cd,Cm=None,mu=0.00)
#m,Cm = SteepestDescent(octTree,rays,dobs,Cd,Cmprior,mprior)
np.savez("invertTECData.npz",G=G,mprior=mprior,Cd=Cd,Cmprior=Cmprior,dobs=dobs,octTree=octTree,rays=rays)
m,Cm = LMSol(G,mprior,Cd,Cmprior,dobs,mu=1.0,octTree=None)
mayaviPlot2(x,m,mBackground=mprior)
CmCm = Cm.dot(np.linalg.inv(Cmprior))
R = np.eye(CmCm.shape[0]) - CmCm
print("Resolved by dataSet:{0}, resolved by a priori:{1}".format(np.trace(R),np.trace(CmCm)))
if __name__=='__main__':
np.random.seed(1234)
#invertTEC("/Users/josh/ownCloud/ionosphere/tomography/SB120-129/WendysBootes.npz",
# "/Users/josh/ownCloud/ionosphere/tomography/SB120-129",
# timeStart = 0,
# timeEnd = 0,
# array_file='arrays/lofar.hba.antenna.cfg',load=True)
if True:
print("Constructing ionosphere model")
maxBaseline = 150.
height=1000.
rays = makeRaysFromSourceAndReciever(maxBaseline = maxBaseline,height=height,numSources=15,numRecievers=30)
octTree = constructIonosphereModel(maxBaseline=maxBaseline,height=height,rays = rays)
cleanRays(octTree)
print("Propagating {0} rays".format(len(rays)))
for ray in rays:
forwardRay(ray,octTree)
print("Pulling ray propagations.")
G,mVar,mexact,x = generateModelFromOctree(octTree,len(rays))
print("Computing model 3d exponential covariance")
Cmprior = compute3dExponentialCovariance(np.sqrt(np.mean(mVar)),30.,x)
#generate simple initial starting point
print("Setting a priori model")
mprior = []
i = 0
while i < x.shape[0]:
mprior.append(ionosphereModel(x[i,:],dayTime=False,bump=False))
i += 1
mprior = np.array(mprior)
#mprior = np.ones_like(mexact)*initHomogeneousModel(G,dobs)
#mprior = np.random.multivariate_normal(mean=mexact, cov = Cmprior)
print("Computing observation covariance")
dobs = []
for i in range(10):
dobs.append(G.dot(np.abs(np.random.multivariate_normal(mean=mexact, cov = Cmprior))))
dobs = np.array(dobs)
Cd = np.cov(dobs.transpose())
dobs = G.dot(mexact)
print("Solving for model from rays:")
#m,Cm = LinearSolution(dobs,G,Cd,Cmprior,mprior)
#m,Cm = MetropolisSolution(G,dobs,Cd,Cmprior,mprior)
#m = BerrymanSol(G,dobs,mprior=None,Cd=Cd,Cm=None,mu=0.00)
#m,Cm = SteepestDescent(octTree,rays,dobs,Cd,Cmprior,mprior)
m,Cm = LMSol(G,mprior,Cd,Cmprior,dobs,mu=1.0,octTree=None)
#smoothify and plot
#s = fp.SmoothVoxel(octTree)
#model = s.smoothifyOctTree()
#fp.plotCube(model ,-octTree.dx/2.,octTree.dx/2.,-octTree.dy/2.,octTree.dy/2.,0.,1000.,N=128,dx=None,dy=None,dz=None)
mayaviPlot2(x,m,mBackground=None)
mayaviPlot2(x,mexact,mBackground=None)
CmCm = Cm.dot(np.linalg.inv(Cmprior))
R = np.eye(CmCm.shape[0]) - CmCm
print("Resolved by dataSet:{0}, resolved by a priori:{1}".format(np.trace(R),np.trace(CmCm)))
plot=False
if plot:
import pylab as plt
plt.plot(m,label='res')
plt.plot(mexact,label='ex')
plt.plot(mprior,label='pri')
C = np.sum(G>0,axis=0)
C = C < 3
plt.scatter(np.arange(len(m))[C],m[C])
plt.legend(frameon=False)
plt.show()
plotOctTreeXZ(octTree,ax=None)
plotOctTree3D(octTree,model=m,rays=False)
# In[ ]:
|
Joshuaalbert/IonoTomo
|
src/ionotomo/geometry/oct_trees/Inversion.py
|
Python
|
apache-2.0
| 38,631
|
[
"Mayavi"
] |
f088a8af0c766ad57658d515e746e92e3e7dbe9b8deee7f4c9a066949b0ceb89
|
# This Python file uses the following encoding: utf-8
# !/usr/local/bin/python3.4
####################################################
# This file is part of MULLPY.
#
# MULLPY is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MULLPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MULLPY. If not, see <http://www.gnu.org/licenses/>.
####################################################
import copy
import pickle
import os
from mullpy.statistics import Statistics
import numpy as np
from mullpy.classifier_info import ClassifiersInfo
from mullpy.auxiliar import AutoVivification
####################################################
class Classifier:
"""
Main class where are defined common functions within different kinds of classifiers.
Every classifier inherit from this main class.
It gives subclasses:
a) A generic learning algorithm
b) Some maths functions
c) Build real outputs function
"""
####################################################
def gaussian(self, base, width, input):
"""Return gaussian radial function.
Args:
radial: (num, num) of gaussian (base, width^2) pair
input: input
Returns:
num of gaussian output
"""
y = np.exp(-1 / width / 2 * np.power(input - base, 2))
return y
####################################################
def sign(self, x):
if x < 0:
return -1
elif x > 0:
return 1
elif x == 0:
return 0
####################################################
def sigmoid(self, x):
"""Implementation of the sigmoid function"""
p = 1.0
return 1.0 / (1.0 + np.exp(-p * x))
####################################################
def dsigmoid(self, x):
"""
Sigmoid derivate function.
"""
p = 1.0
return p * (x - np.power(x, 2))
####################################################
def tanh(self, y):
"""
Tanh derivate function.
"""
return np.tanh(y)
####################################################
def dtanh(self, y):
"""
Tanh derivate function.
"""
return 1.0 - np.square(y)
####################################################
def real_outputs(self, context, classifier_name, patterns):
"""
Every kind of sub-classifier must have a real_outputs function which return a list of real outputs given
by input pattern named predict.
"""
classes_texts = context["classifiers"][classifier_name]["classes_names"]
if "deployment" in context["execution_kind"]:
# if context["execution_kind"] == "deployment_classification":
len_inputs = len(patterns[0])
else:
len_inputs = len(patterns[0]) - len(classes_texts)
return [self.predict(context, classifier_name, p[:len_inputs]) for p in patterns]
####################################################
def core_learning(self, context, classifier_name, **kwargs):
"""
Core learning. Proceed as a generic learning process for every classifier that has no itself core learning
process.
Parameters:
a)context
b)classifier name
c) Kwargs:
-) "ensemble_error": In the case of negative correlation learning (NCL) procedure,
the ensemble call to each classifier to learn during 1 epoch.
The NCL needs the ensemble output to be part of the learning process of each classifier.
Core learning calls to the learning process of each classifier, controlling the number of epochs and the
objective error determined. At this moment, the core learning process stop when the sum of the learning RMS
and the test RMS rise the objective error. If the total error increases, the weights of the best error achieved
is recorded, recovering it at the end of the process.
If "plot_interactive" is marked to one, a graphic is plotted and saved into a file with the same name
of the classifier.
"""
statistics = Statistics()
information = ClassifiersInfo()
objective_error = context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["objective_error"]
epochs = context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["epochs"]
if epochs > 1:
best_classifier = copy.deepcopy(context["classifiers"][classifier_name]["instance"])
i = 0
early_stopping = context["classifiers"][classifier_name]["learning_algorithm"]["early_stopping"]["activate"]
if context["classifiers"][classifier_name]["learning_algorithm"]["early_stopping"]["activate"]:
learning_percent = context["classifiers"][classifier_name]["learning_algorithm"]["early_stopping"][
"learning"]
validation_percent = context["classifiers"][classifier_name]["learning_algorithm"]["early_stopping"][
"validation"]
best_error = 10000000.
iteration_best_error = 0
validation_error = 10000000.
if context["plot_training"]["activate"]:
learning_accumulated_error = np.zeros(int(epochs / context["plot_training"]["times"]))
validation_accumulated_error = np.zeros(int(epochs / context["plot_training"]["times"]))
while validation_error >= objective_error and i < epochs:
if "ensemble_error" in kwargs:
learning_error = self.learning(context, classifier_name, ensemble_error=kwargs["ensemble_error"],
epoch=i)
if len(kwargs.keys()) and "ensemble_error" in kwargs and "classifier_error" in kwargs:
learning_error = self.learning(context, classifier_name, ensemble_error=kwargs["ensemble_error"],
epoch=i)
else:
learning_error = self.learning(context, classifier_name, epoch=i)
if context["interactive"]["activate"] == 1 and i % context["interactive"]["epochs"] == 0 and i > 0:
information.build_real_outputs(context, classifier_name, "learning")
statistics.rms(classifier_name, context, information, "learning")
learning_error = statistics.measures[classifier_name]["rms"]["learning"]
information.build_real_outputs(context, classifier_name, "validation")
statistics.rms(classifier_name, context, information, "validation")
validation_error = statistics.measures[classifier_name]["rms"]["validation"]
print("%s: epoch:%d, learning_error:%f, validation_error:%f" %
(classifier_name, i + 1, learning_error, validation_error))
if context["plot_training"]["activate"] == 1 and i % context["plot_training"]["times"] == 0:
if context["interactive"]["activate"] == 1 and i % context["interactive"]["epochs"] == 0:
learning_accumulated_error[int(i / context["plot_training"]["times"])] = learning_error
validation_accumulated_error[int(i / context["plot_training"]["times"])] = validation_error
else:
information.build_real_outputs(context, classifier_name, "learning")
statistics.rms(classifier_name, context, information, "learning")
learning_error = statistics.measures[classifier_name]["rms"]["learning"]
information.build_real_outputs(context, classifier_name, "validation")
statistics.rms(classifier_name, context, information, "validation")
validation_error = statistics.measures[classifier_name]["rms"]["validation"]
learning_accumulated_error[int(i / context["plot_training"]["times"])] = learning_error
validation_accumulated_error[int(i / context["plot_training"]["times"])] = validation_error
#To give also relevance to the learning error
if early_stopping:
stop_criteria_error = learning_percent * learning_error + validation_percent * validation_error
if early_stopping and stop_criteria_error < best_error and context[
"execution_kind"] == "learning" and epochs > 1:
iteration_best_error = i
best_error = stop_criteria_error
best_classifier = copy.deepcopy(context["classifiers"][classifier_name]["instance"])
i += 1
#Return the value of the last epoch plus one
if context["execution_kind"] != "NClearning":
if context["interactive"]["activate"]:
if early_stopping:
print("Best error of classifier {0}:{1} on epoch {2}".format(
classifier_name,
str(best_error),
str(iteration_best_error + 1)))
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"epochs"] = iteration_best_error + 1
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"objetive_error"] = best_error
context["classifiers"][classifier_name]["instance"] = best_classifier
else:
information.build_real_outputs(context, classifier_name, "learning")
statistics.rms(classifier_name, context, information, "learning")
learning_error = statistics.measures[classifier_name]["rms"]["learning"]
information.build_real_outputs(context, classifier_name, "validation")
statistics.rms(classifier_name, context, information, "validation")
validation_error = statistics.measures[classifier_name]["rms"]["validation"]
print("Final error of classifier {0}: learning={1}, validation={2}".format(
classifier_name,
str(learning_error),
str(validation_error)))
if context["plot_training"]["activate"]:
from mullpy.presentations import Presentation
Presentation().learning_graphic(
context,
classifier_name,
learning_accumulated_error,
validation_accumulated_error)
####################################################
def save_config_file(self, context, classifier_name):
"""
Save all data structure of a classifier
"""
w = context["classifiers"][classifier_name]
f = open(context["classifiers"][classifier_name]["config_file"], 'wb')
pickle.dump(w, f, pickle.HIGHEST_PROTOCOL)
# try:
# pickle.load(f)
# except:
# print("Error saving the file of the classifier {0}. Not pickable".format(
# context["classifiers"][classifier_name]["config_file"]))
f.close()
####################################################
def load_config_file(self, context, classifier_name):
"""
Recovery all the information from the file and initialize the attributes.
"""
f = open(context["classifiers"][classifier_name]["config_file"], 'rb')
try:
w = pickle.load(f)
except ValueError:
import os
print("Found a different pickle protocol and proceeding to remove the old/new by the present protocol.")
os.remove(context["classifiers"][classifier_name]["config_file"])
error = "Error loading the file of the classifier {0}. Just restart the process".format(classifier_name)
raise NameError(error)
except:
raise NameError("Error loading the file of the classifier {0}".format(classifier_name))
f.close()
return w
####################################################
# class Neurolab(Classifier):
# """
# """
#
#
# ####################################################
#
#
# class Kohonen(Neurolab):
# """
#
# """
#
# def func(self):
# pass
#
# ####################################################
#
#
# class Elman(Neurolab):
# """
#
# """
#
# def func(self):
# pass
#
# ####################################################
#
#
# class Perceptron(Neurolab):
# """
#
# """
#
# def func(self):
# pass
#
# ####################################################
#
#
# class Lvq(Neurolab):
# """
#
# """
#
# def func(self):
# pass
#
# ####################################################
#
#
# class Hopfield(Neurolab):
# """
#
# """
#
# def func(self):
# pass
#
# ####################################################
#
#
# class Hemming(Neurolab):
# """
#
# """
#
# def func(self):
# pass
####################################################
# class MLP_(Neurolab):
# """
#
# """
#
# def __init__(self, context, classifier_name):
# import neurolab as nl
#
# self.ni = len(context["patterns"].patterns[classifier_name][context["patterns_texts"][0]][0][0])
# def_layers = context["classifiers"][classifier_name]["configuration"]["neurons"]
# self.no = len(context["classifiers"][classifier_name]["classes_names"])
#
# def_layers.insert(0, self.ni)
# def_layers.append(self.no)
#
# initialization_weight_values = [[-1, 1]] * self.ni
# transfer_function_array = \
# [getattr(nl.trans, context["classifiers"][classifier_name]["classifier_kind"]["transfer_function"])()] * \
# len(def_layers)
#
# layers = []
# for i, nn in enumerate(def_layers):
# layer_ci = def_layers[i - 1] if i > 0 else self.ni
# l = nl.layer.Perceptron(layer_ci, nn, transfer_function_array[i])
# l.initf = nl.init.initnw
# layers.append(l)
# connect = [[i - 1] for i in range(len(layers) + 1)]
# from neurolab import core
#
# self.classifier = core.Net(initialization_weight_values,
# self.no,
# layers,
# connect,
# context["classifiers"][classifier_name]["learning_algorithm"]["kind"],
# context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
# "error_function"])
#
# ####################################################
# def core_learning(self, context, classifier_name, **kwargs):
# inputs = list(context["patterns"].patterns[classifier_name]["learning"][:, 0])
# outputs = list(context["patterns"].patterns[classifier_name]["learning"][:, 1])
# validation = context["patterns"].patterns[classifier_name]["validation"]
# epochs = context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["epochs"]
# goal = context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["objective_error"]
# if context["interactive"]["activate"]:
# show = context["interactive"]["epochs"]
# else:
# show = 0
#
# self.classifier.train(inputs, outputs, epochs=epochs, show=show, goal=goal)
#
# ####################################################
# def predict(self, context, classifier_name, inputs):
# return [y for x in self.classifier.sim([inputs]) for y in x]
####################################################
class MLP(Classifier):
def __init__(self, context, classifier_name):
import neurolab as nl
self.no = len(context["classifiers"][classifier_name]["classes_names"])
self.ni = len(context["patterns"].patterns[classifier_name][context["patterns_texts"][0]][0]) - self.no
def_layers = context["classifiers"][classifier_name]["configuration"]["neurons"]
def_layers.insert(0, self.ni)
def_layers.append(self.no)
initialization_weight_values = [[-1, 1]] * self.ni
self.classifier = nl.net.newff(initialization_weight_values, def_layers)
####################################################
def core_learning(self, context, classifier_name, **kwargs):
if "features_names" in context["classifiers"][classifier_name]:
len_inputs = len(context["classifiers"][classifier_name]["features_names"])
else:
len_inputs = self.ni
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, self.no + len_inputs)]
# validation = context["patterns"].patterns[classifier_name]["validation"]
epochs = context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["epochs"]
goal = context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["objective_error"]
if context["interactive"]["activate"]:
show = context["interactive"]["epochs"]
else:
show = 0
self.classifier.train(inputs, outputs, epochs=epochs, show=show, goal=goal)
####################################################
def predict(self, context, classifier_name, inputs):
return [y for x in self.classifier.sim([inputs]) for y in x]
####################################################
class Cutoff(Classifier):
def __init__(self, context, classifier_name):
pass
def predict(self, context, classifier_name, inputs):
if sum((inputs[3], inputs[5], inputs[6], inputs[7])) < 29:
outputs = [0., 1.]
else:
outputs = [1., 0.]
return outputs
def core_learning(self, context, classifier_name, **kwargs):
pass
####################################################
class Sklearn(Classifier):
"""
Generic functions to use the learners from the scikit-learning library.
"""
####################################################
def predict(self, context, classifier_name, inputs):
"""
Every scikit-learning algorithm has its own predict function. This provide a general framework to use them
through this main class.
"""
outputs = [y for x in self.classifier.predict_proba([inputs]) for y in x]
return outputs
####################################################
def core_learning(self, context, classifier_name, **kwargs):
"""
Every scikit-learning ML algorithm has it own classifier learning process, but many of them needs to
convert the pattern structure to a contiguous float64 elements, like the SVM libray.
"""
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]
self.classifier.fit(inputs, outputs)
####################################################
class RandomForestRegressor(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.ensemble import RandomForestRegressor
self.classifier = RandomForestRegressor(
n_estimators=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_estimators"],
criterion=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"],
bootstrap=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["bootstrap"],
random_state=None,
compute_importances=None)
####################################################
def predict(self, context, classifier_name, inputs):
print(inputs)
outputs = [x for x in self.classifier.predict([inputs])]
return outputs
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[0] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
class ExtraTreesClassifier(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.ensemble import ExtraTreesClassifier
self.classifier = ExtraTreesClassifier(
n_estimators=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_estimators"],
criterion=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"],
bootstrap=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["bootstrap"],
random_state=None,
compute_importances=None)
####################################################
def predict(self, context, classifier_name, inputs):
outputs = [y[1] for x in self.classifier.predict_proba([inputs]) for y in x]
return outputs
####################################################
class ExtraTreesRegressor(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.ensemble import ExtraTreesRegressor
self.classifier = ExtraTreesRegressor(
n_estimators=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_estimators"],
criterion=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"],
bootstrap=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["bootstrap"],
random_state=None,
compute_importances=None)
####################################################
def predict(self, context, classifier_name, inputs):
outputs = [y[1] for x in self.classifier.predict([inputs]) for y in x]
return outputs
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[0] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
class RandomTreesEmbedding(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.ensemble import RandomTreesEmbedding
self.classifier = RandomTreesEmbedding(
n_estimators=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_estimators"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
random_state=None)
####################################################
class RandomForestClassifier(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.ensemble import RandomForestClassifier
self.classifier = RandomForestClassifier(
n_estimators=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_estimators"],
criterion=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"],
bootstrap=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["bootstrap"],
random_state=None,
compute_importances=None)
############################
def predict(self, context, classifier_name, inputs):
outputs = [y[1] for x in self.classifier.predict_proba([inputs]) for y in x]
return outputs
####################################################
class GradientBoostingClassifier(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.ensemble import GradientBoostingClassifier
self.classifier = GradientBoostingClassifier(
loss=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["loss"],
learning_rate=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["learning_rate"],
n_estimators=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_estimators"],
subsample=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["subsample"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"])
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[1] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
############################
def predict(self, context, classifier_name, inputs):
outputs = self.classifier.predict_proba([inputs])[0]
return outputs
####################################################
class GradientBoostingRegressor(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.ensemble import GradientBoostingRegressor
self.classifier = GradientBoostingRegressor(
loss=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["loss"],
learning_rate=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["learning_rate"],
n_estimators=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_estimators"],
subsample=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["subsample"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"],
)
####################################################
def predict(self, context, classifier_name, inputs):
outputs = [x for x in self.classifier.predict([inputs])]
return outputs
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[0] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
class RadiusNeighborsClassifier(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.neighbors import RadiusNeighborsClassifier
self.classifier = RadiusNeighborsClassifier(
radius=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["radius"],
weights=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["weights"],
algorithm=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["algorithm"],
leaf_size=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["leaf_size"],
p=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["p"],
metric=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["metric"])
####################################################
def predict(self, context, classifier_name, inputs):
outputs = [x[0] for x in self.classifier.predict([inputs])]
return outputs
####################################################
class KNeighborsClassifier(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.neighbors import KNeighborsClassifier
self.classifier = KNeighborsClassifier(
n_neighbors=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_neighbors"],
weights=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["weights"],
algorithm=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["algorithm"],
leaf_size=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["leaf_size"],
p=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["p"],
metric=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["metric"])
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
####################################################
class KNeighborsRegressor(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.neighbors import KNeighborsRegressor
self.classifier = KNeighborsRegressor(
n_neighbors=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_neighbors"],
weights=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["weights"],
algorithm=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["algorithm"],
leaf_size=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["leaf_size"],
p=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["p"],
metric=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["metric"])
####################################################
def predict(self, context, classifier_name, inputs):
return [x[0] for x in self.classifier.predict([inputs])]
####################################################
class Gaussian(Sklearn):
def __init__(self, context, classifier_name):
from sklearn import gaussian_process
self.classifier = gaussian_process.GaussianProcess(
regr=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["regr"],
corr=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["corr"],
normalize=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["normalize"],
beta0=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["beta0"],
theta0=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["theta0"],
thetaL=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["thetaL"],
thetaU=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["thetaU"])
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
####################################################
class SVR(Sklearn):
####################################################
def __init__(self, context, classifier_name):
from sklearn.svm import SVR
self.classifier = \
SVR(
C=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["C"],
cache_size=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"cache_size"],
coef0=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["coef0"],
degree=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["degree"],
gamma=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["gamma"],
kernel=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["kernel"],
max_iter=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_iter"],
probability=True,
shrinking=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["shrinking"],
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"],
verbose=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["verbose"]
)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])]
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[0] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
class NuSVC(Sklearn):
####################################################
def __init__(self, context, classifier_name):
from sklearn.svm import NuSVC
self.classifier = \
NuSVC(
nu=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["nu"],
cache_size=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"cache_size"],
coef0=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["coef0"],
degree=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["degree"],
gamma=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["gamma"],
kernel=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["kernel"],
max_iter=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_iter"],
probability=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"probability"],
shrinking=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["shrinking"],
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"],
verbose=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["verbose"]
)
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[1] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict_proba([inputs])[0]]
####################################################
class SVM(Sklearn):
####################################################
def __init__(self, context, classifier_name):
from sklearn.svm import SVC
self.classifier = \
SVC(
C=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["C"],
cache_size=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"cache_size"],
class_weight=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"class_weight"],
coef0=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["coef0"],
degree=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["degree"],
gamma=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["gamma"],
kernel=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["kernel"],
max_iter=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_iter"],
probability=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"probability"],
shrinking=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["shrinking"],
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"],
verbose=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["verbose"]
)
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[1] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict_proba([inputs])[0]]
####################################################
class GaussianNB(Sklearn):
def __init__(self, context, classifier_name):
from sklearn import naive_bayes
self.classifier = naive_bayes.GaussianNB()
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[1] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
def predict(self, context, classifier_name, inputs):
outputs = [x for x in self.classifier.predict_proba([inputs])[0]]
return outputs
####################################################
class BernoulliNB(Sklearn):
def __init__(self, context, classifier_name):
from sklearn import naive_bayes
self.classifier = naive_bayes.BernoulliNB()
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[1] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
def predict(self, context, classifier_name, inputs):
outputs = [x for x in self.classifier.predict_proba([inputs])[0]]
return outputs
####################################################
class MultinomialNB(Sklearn):
def __init__(self, context, classifier_name):
from sklearn import naive_bayes
self.classifier = naive_bayes.MultinomialNB()
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[1] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict_proba([inputs])[0]]
####################################################
class DTClassifier(Sklearn):
"""
A decision tree classifier.
"""
####################################################
def __init__(self, context, classifier_name):
from sklearn import tree
self.classifier = \
tree.DecisionTreeClassifier(
criterion=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
splitter="best",
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"max_features"],
random_state=None,
compute_importances=None)
def predict(self, context, classifier_name, inputs):
return [y[1] for x in self.classifier.predict_proba([inputs]) for y in x]
#######################################################################
class ETClassifier(Sklearn):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
"""
def __init__(self, context, classifier_name):
from sklearn import tree
self.classifier = \
tree.ExtraTreeClassifier(
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["min_samples_split"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["min_samples_leaf"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["min_density"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"],
)
def core_learning(self, context, classifier_name, **kwargs):
"""
The difference in GaussianNB is that it expects another output format for training
"""
inputs = context["patterns"].patterns[classifier_name]["learning"][:, 0]
new_inputs = np.ndarray(shape=(len(inputs), len(inputs[0])), dtype=np.float16, order='C')
for i in range(len(inputs)):
for j in range(len(inputs[i])):
new_inputs[i][j] = inputs[i][j]
outputs = [np.nonzero(x)[0][0] for x in context["patterns"].patterns[classifier_name]["learning"][:, 1]]
# print(outputs)
self.classifier.fit(new_inputs, outputs)
####################################################
def predict(self, context, classifier_name, inputs):
# print([y for x in self.classifier.predict_proba([inputs]) for y in x])
# print(self.classifier.classes_)
# print(self.classifier.n_features_)
return [y for x in self.classifier.predict_proba([inputs]) for y in x]
#######################################################################
class DTR(Sklearn):
def __init__(self, context, classifier_name):
from sklearn import tree
self.classifier = \
tree.DecisionTreeRegressor(
criterion=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
max_depth=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
min_samples_split=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_split"],
min_samples_leaf=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"min_samples_leaf"],
max_features=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"]
)
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[0] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
def predict(self, context, classifier_name, inputs):
output = [x for x in self.classifier.predict([inputs])]
return output
#######################################################################
class ETRegressor(Sklearn):
def __init__(self, context, classifier_name):
from sklearn import tree
self.classifier = \
tree.ExtraTreeRegressor(
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["criterion"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_depth"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["min_samples_split"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["min_samples_leaf"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["min_density"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["max_features"],
)
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
#######################################################################
class LM_LinearRegression(Sklearn):
def __init__(self, context, classifier_name):
from sklearn import linear_model
self.classifier = \
linear_model.LinearRegression()
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
#######################################################################
class ARDRegression(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.linear_model import ARDRegression
self.classifier = \
ARDRegression(
n_iter=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_iter"],
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"],
alpha_1=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["alpha_1"],
alpha_2=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["alpha_2"],
lambda_1=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["lambda_1"],
lambda_2=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["lambda_2"],
)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
#######################################################################
class BayesianRidge(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.linear_model import BayesianRidge
self.classifier = BayesianRidge(
n_iter=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["n_iter"],
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"],
alpha_1=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["alpha1"],
alpha_2=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["alpha2"],
lambda_1=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["lambda1"],
lambda_2=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["lambda2"],
fit_intercept=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["fit_intercept"],
normalize=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["normalize"],
)
####################################################
def core_learning(self, context, classifier_name, **kwargs):
classes_len = len(context["classifiers"][classifier_name]["classes_names"])
total_len = len(context["patterns"].patterns[classifier_name]["learning"][0])
len_inputs = total_len - classes_len
inputs = context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs)]
outputs = [x[0] for x in
context["patterns"].patterns[classifier_name]["learning"][:, range(len_inputs, total_len)]]
self.classifier.fit(inputs, outputs)
####################################################
def predict(self, context, classifier_name, inputs):
output = [x for x in self.classifier.predict([inputs])]
return output
#######################################################################
class Ridge(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.linear_model import Ridge
self.classifier = \
Ridge(alpha=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["alpha"],
copy_X=True,
solver=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["solver"],
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"]
)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
#######################################################################
class Lasso(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.linear_model import Lasso
self.classifier = \
Lasso(
alpha=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["alpha"],
copy_X=True,
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"]
)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
#######################################################################
class ElasticNet(Sklearn):
def __init__(self, context, classifier_name):
from sklearn.linear_model import ElasticNet
self.classifier = \
ElasticNet(
alpha=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["alpha"],
l1_ratio=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["l1_ratio"],
tol=context["classifiers"][classifier_name]["learning_algorithm"]["parameters"]["tol"]
)
####################################################
def predict(self, context, classifier_name, inputs):
return [x for x in self.classifier.predict([inputs])[0]]
#######################################################################
class NN(Classifier):
"""
Neural Network class.
"""
####################################################
def __init__(self, context, classifier_name):
"""
Construct a Neural network defined by context param or load it from a data file, giving:
-Neurons as a set of number of hidden neurons
"""
#Otherwise initialize with the manual values
if type(context["classifiers"][classifier_name]["configuration"]["neurons"]) is not list:
raise ValueError("Neurons must be a list in NN. Define the number of neurons for each layer")
# +1 for bias node
patterns = context["patterns"].patterns[classifier_name][context["patterns_texts"][0]]
len_classes = len(context["classifiers"][classifier_name]["classes_names"])
len_inputs = len(patterns[0]) - len_classes
self.ni = len_inputs + 1
self.no = len_classes
if len(context["classifiers"][classifier_name]["configuration"]["neurons"]):
self.layers = [self.ni]
for element in context["classifiers"][classifier_name]["configuration"]["neurons"]:
self.layers.append(element)
self.layers.append(self.no)
else:
self.layers = [self.ni, self.no]
# activations for nodes
self.a = []
for number_layer, elements in enumerate(self.layers):
self.a.append(np.ones(elements, dtype="f"))
self.w = []
#One layer minus of weights than activations
low_interval_initialization = -np.sqrt(6. / (self.ni + self.no))
high_interval_initialization = np.sqrt(6. / (self.ni + self.no))
for number_layer, elements in zip(range(len(self.layers) - 1), self.layers):
self.w.append(np.random.uniform(low_interval_initialization,
high_interval_initialization,
(elements, self.layers[number_layer + 1])))
if context["classifiers"][classifier_name]["learning_algorithm"]["kind"] == "backpropagate":
# last change in weights for momentum
self.c = []
for number_layer, elements in zip(range(len(self.layers) - 1), self.layers):
self.c.append(np.zeros((elements, self.layers[number_layer + 1]), dtype="f"))
####################################################
def predict(self, context, classifier_name, inputs):
"""
Return the activation output of the NN class.
"""
if len(inputs) != self.ni - 1:
raise NameError('wrong number of inputs')
# input activations
self.a[0][0:self.ni - 1] = inputs
trans_function = context["classifiers"][classifier_name]["classifier_kind"]["transfer_function"]
# hidden and output activations
for number_layer in range(len(self.layers) - 1):
res = np.dot(np.transpose(self.w[number_layer]), self.a[number_layer])
elements = self.layers[number_layer + 1]
self.a[number_layer + 1][0:elements] = getattr(self, trans_function)(res)[0:elements]
return self.a[-1]
####################################################
def backpropagate(self, context, classifier_name,
targets,
n,
m,
penalty_term=0.0, ensemble_evaluation=0.0, alpha=0.0):
"""
Adjust weights matrix to the targets with momentum and Learning rate.
"""
if len(targets) != self.no:
raise NameError('wrong number of target values')
# calculate error terms for output
if context["execution_kind"] == "learning":
# error_o = np.square(targets - self.a[-1])
error_o = targets - self.a[-1]
else:
if context["execution_kind"] == "NClearning":
error_o = targets - self.a[-1] - penalty_term * (ensemble_evaluation - self.a[-1])
elif context["execution_kind"] == "RNClearning":
error_o = targets - self.a[-1] - (ensemble_evaluation - self.a[-1]) + alpha
else:
raise ValueError("Error in learning process. Execution_kind is not well defined")
errors = error_o
transfer_function = context["classifiers"][classifier_name]["classifier_kind"]["transfer_function"]
# calculate error terms for hidden
for number_layer, elements in zip(reversed(range(len(self.layers) - 1)), reversed(self.layers)):
delta = getattr(self, "d" + transfer_function)(self.a[number_layer + 1]) * errors
change = delta * np.reshape(self.a[number_layer], (self.a[number_layer].shape[0], 1))
self.w[number_layer] = self.w[number_layer] + n * change + m * self.c[number_layer]
self.c[number_layer] = change
if number_layer > 0:
errors = np.dot(self.w[number_layer], delta)
# calculate error
return sum(0.5 * error_o ** 2)
####################################################
def learning(self, context, classifier_name, **kwargs):
"""
Make an epoch of the NN classifier
"""
#Due to the Negative correlation learning
if len(kwargs.keys()) < 2:
kwargs = AutoVivification()
kwargs["ensemble_error"] = []
if type(kwargs["ensemble_error"]) != np.ndarray:
kwargs["ensemble_error"] = np.array(context["patterns"].patterns[classifier_name]["learning"][:, 1])
error = 0.0
for i, pattern in enumerate(context["patterns"].patterns[classifier_name]["learning"]):
inputs = pattern[:self.ni - 1]
self.predict(context, classifier_name, inputs)
targets = pattern[self.ni - 1:]
error += self.learning_functions_scheduler(context, classifier_name, targets, kwargs["ensemble_error"][i])
return error / float(len(context["patterns"].patterns[classifier_name]["learning"]))
####################################################
def learning_functions_scheduler(self, context, classifier_name, targets, ensemble_evaluation):
if context["classifiers"][classifier_name]["learning_algorithm"]["kind"] == "backpropagate":
return self.backpropagate(context, classifier_name, targets,
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"learning_rate"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"momentum"],
context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
"penalty_term"],
# context["classifiers"][classifier_name]["learning_algorithm"]["parameters"][
# "alpha"],
ensemble_evaluation)
#######################################################################
class Hybrid(Classifier):
"""
An Hybrid classifier is comprised of a set of layers arranged sequentially.
The first layer receive the inputs from the data
The output will be given from the last layer.
Each layer has to be a classifier defined individually.
"""
def __init__(self, context, classifier_name):
self.classifier = None
####################################
def predict(self, context, classifier_name, inputs):
outputs = None
for i, layer in enumerate(context["classifiers"][classifier_name]["classifier_kind"]["Hybrid"]):
if i > 0:
outputs = context["classifiers"][layer]["instance"].predict(outputs)
else:
outputs = context["classifiers"][layer]["instance"].predict(inputs)
return outputs
####################################
def core_learning(self, context, classifier_name, **kwargs):
for i, classifier_name in enumerate(context["classifiers"][classifier_name]["classifier_kind"]["Hybrid"]):
context["classifiers"][classifier_name]["instance"].core_learning(context, classifier_name, **kwargs)
#######################################################################
class Grossberg(Classifier):
def __init__(self, context, classifier_name):
# create Grossberg output layer
inputs = context["classifiers"][classifier_name]["configuration"]["neurons"][0]
som_dimensions = list(context["classifiers"][classifier_name]["configuration"]["neurons"])
outputs = len(context["classifiers"][classifier_name]["classes_names"])
grossberg_activation_func = \
context["classifiers"][classifier_name]["classifier_kind"]["transfer_function"]["grossberg"]
#Insert the number of outputs at the beginning, each output neuron receives a signal from every SOM element
som_dimensions.insert(0, outputs)
self.w = np.random.uniform(-1, 1, tuple(som_dimensions))
def predict(self, context, classifier_name, inputs):
return [0.0] * len(context["classifiers"][classifier_name]["classes_names"])
def learning(self, context, classifier_name):
return 0.0
#######################################################################
class Classifier_from_file(Classifier):
"""
A classifier defined by the outputs on a file
"""
def __init__(self):
"""
Init the structure for the outputs
"""
self.output = AutoVivification()
def load_config_file(self, context, classifier_name):
if os.path.isfile(context["classifiers"][classifier_name]["config_file"]):
f = open(context["classifiers"][classifier_name]["config_file"], "r")
all_elements = f.readlines()
for i in range(len(all_elements)):
res = list(
map(float, all_elements[i].replace("\t", context["patterns_separator"]).replace("\n", "").split()))
inputs = res[:len(res) - len(context["classifiers"][classifier_name]["classes_names"])]
if context["classifiers"][classifier_name]["patterns"]["range"] == "[-1,1]":
outputs = [-1.0 if x == 0 else x for x in res[len(res) - len(
context["classifiers"][classifier_name]["classes_names"]):]]
elif context["classifiers"][classifier_name]["patterns"]["range"] == "[0,1]":
outputs = [0.0 if x == -1.0 else x for x in res[len(res) - len(
context["classifiers"][classifier_name]["classes_names"]):]]
self.output[str(inputs)] = outputs
####################################################
def predict(self, inputs):
return self.output[str(inputs)]
#######################################################################
#######################################################################
#######################################################################
#######################################################################
|
enanablancaynumeros/mullpy
|
mullpy/classifiers.py
|
Python
|
mit
| 70,005
|
[
"Gaussian",
"NEURON"
] |
928c26c7fa29bec6a74060f069e5dcad416e3059ac340c5e225dbeea46cb3559
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Use callback method to save CASSCF orbitals in each iteration.
See also pyscf/examples/scf/24-callback.py
'''
import numpy
from pyscf import gto, scf, mcscf
mol = gto.M(
atom = [
["F", (0., 0., 0.)],
["H", (0., 0., 1.6)],],
basis = 'cc-pvdz')
mf = scf.RHF(mol)
mf.kernel()
# 6 active orbitals, 4 alpha, 2 beta electrons
mc = mcscf.CASSCF(mf, 6, (4,2))
def save_mo_coeff(envs):
imacro = envs['imacro']
imicro = envs['imicro']
if imacro % 3 == 2:
fname = 'mcscf-mo-%d-%d.npy' % (imacro+1, imicro+1)
print('Save MO of step %d-%d in file %s' % (imacro+1, imicro+1, fname))
numpy.save(fname, envs['mo_coeff'])
mc.callback = save_mo_coeff
mc.kernel()
# Read one of the saved orbitals for the initial guess for new calculation
mc = mcscf.CASSCF(mf, 6, (4,2))
mo = numpy.load('mcscf-mo-6-2.npy')
mc.kernel(mo)
|
gkc1000/pyscf
|
examples/mcscf/24-callback.py
|
Python
|
apache-2.0
| 943
|
[
"PySCF"
] |
f5de79d6061bd316eeff51e62a06db523356e5d13264b350a2019f9192a07802
|
#!/bin/env python
""" script to obtain release notes from DIRAC PRs
"""
from collections import defaultdict
from datetime import datetime, timedelta
import argparse
from pprint import pformat
import logging
import textwrap
import requests
try:
from GitTokens import GITHUBTOKEN
except ImportError:
raise ImportError(textwrap.dedent("""
***********************
Failed to import GITHUBTOKEN please!
Point the pythonpath to your GitTokens.py file which contains
your "Personal Access Token" for Github
I.e.:
Filename: GitTokens.py
Content:
```
GITHUBTOKEN = "e0b83063396fc632646603f113437de9"
```
(without the triple quotes)
***********************
"""),
)
SESSION = requests.Session()
SESSION.headers.update({'Authorization': "token %s " % GITHUBTOKEN})
logging.basicConfig(level=logging.WARNING, format='%(levelname)-5s - %(name)-8s: %(message)s')
LOGGER = logging.getLogger('GetReleaseNotes')
def req2Json(url, parameterDict=None, requestType='GET'):
"""Call to github API using requests package."""
log = LOGGER.getChild("Requests")
log.debug("Running %s with %s ", requestType, parameterDict)
req = getattr(SESSION, requestType.lower())(url, json=parameterDict)
if req.status_code not in (200, 201):
log.error("Unable to access API: %s", req.text)
raise RuntimeError("Failed to access API")
log.debug("Result obtained:\n %s", pformat(req.json()))
return req.json()
def getCommands(*args):
"""Create a flat list.
:param *args: list of strings or tuples/lists
:returns: flattened list of strings
"""
comList = []
for arg in args:
if isinstance(arg, (tuple, list)):
comList.extend(getCommands(*arg))
else:
comList.append(arg)
return comList
def checkRate():
"""Return the result for check_rate call."""
rate = req2Json(url="https://api.github.com/rate_limit")
LOGGER.getChild("Rate").info("Remaining calls to github API are %s of %s",
rate['rate']['remaining'], rate['rate']['limit'])
def _parsePrintLevel(level):
"""Translate debug count to logging level."""
level = level if level <= 2 else 2
return [logging.WARNING,
logging.INFO,
logging.DEBUG,
][level]
def getFullSystemName(name):
"""Translate abbreviations to full system names."""
name = {'API': 'Interfaces',
'AS': 'AccountingSystem',
'CS': 'ConfigurationSystem',
'Config': 'ConfigurationSystem',
'Configuration': 'ConfigurationSystem',
'DMS': 'DataManagementSystem',
'DataManagement': 'DataManagementSystem',
'FS': 'FrameworkSystem',
'Framework': 'FrameworkSystem',
'MS': 'MonitoringSystem',
'Monitoring': 'MonitoringSystem',
'RMS': 'RequestManagementSystem',
'RequestManagement': 'RequestManagementSystem',
'RSS': 'ResourceStatusSystem',
'ResourceStatus': 'ResourceStatusSystem',
'SMS': 'StorageManagamentSystem',
'StorageManagement': 'StorageManagamentSystem',
'TS': 'TransformationSystem',
'TMS': 'TransformationSystem',
'Transformation': 'TransformationSystem',
'WMS': 'WorkloadManagementSystem',
'Workload': 'WorkloadManagementSystem',
}.get(name, name)
return name
def parseForReleaseNotes(commentBody):
"""Look for "BEGINRELEASENOTES / ENDRELEASENOTES" and extend releaseNoteList if there are entries."""
if not all(tag in commentBody for tag in ("BEGINRELEASENOTES", "ENDRELEASENOTES")):
return ''
return commentBody.split("BEGINRELEASENOTES")[1].split("ENDRELEASENOTES")[0]
def collateReleaseNotes(prs):
"""Put the release notes in the proper order.
FIXME: Tag numbers could be obtained by getting the last tag with a name similar to
the branch, will print out just the base branch for now.
"""
releaseNotes = ""
for baseBranch, pr in prs.iteritems():
releaseNotes += "[%s]\n\n" % baseBranch
systemChangesDict = defaultdict(list)
for prid, content in pr.iteritems():
notes = content['comment']
system = ''
for line in notes.splitlines():
line = line.strip()
if line.startswith("*"):
system = getFullSystemName(line.strip("*:").strip())
elif line:
splitline = line.split(":", 1)
if splitline[0] == splitline[0].upper() and len(splitline) > 1:
line = "%s: (#%s) %s" % (splitline[0], prid, splitline[1].strip())
systemChangesDict[system].append(line)
for system, changes in systemChangesDict.iteritems():
if system:
releaseNotes += "*%s\n\n" % system
releaseNotes += "\n".join(changes)
releaseNotes += "\n\n"
releaseNotes += "\n"
return releaseNotes
class GithubInterface(object):
"""Object to make calls to github API."""
def __init__(self, owner='DiracGrid', repo='Dirac'):
"""Set default values to parse release notes for DIRAC."""
self.owner = owner
self.repo = repo
self.branches = ['Integration', 'rel-v6r19', 'rel-v6r20']
self.openPRs = False
self.startDate = str(datetime.now() - timedelta(days=14))[:10]
self.printLevel = logging.WARNING
LOGGER.setLevel(self.printLevel)
@property
def _options(self):
"""Return options dictionary."""
return dict(owner=self.owner, repo=self.repo)
def parseOptions(self):
"""Parse the command line options."""
log = LOGGER.getChild('Options')
parser = argparse.ArgumentParser("Dirac Release Notes",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--branches", action="store", default=self.branches,
dest="branches", nargs='+',
help="branches to get release notes for")
parser.add_argument("--date", action="store", default=self.startDate, dest="startDate",
help="date after which PRs are checked, default (two weeks ago): %s" % self.startDate)
parser.add_argument("--openPRs", action="store_true", dest="openPRs", default=self.openPRs,
help="get release notes for open (unmerged) PRs, for testing purposes")
parser.add_argument("-d", "--debug", action="count", dest="debug", help="d, dd, ddd", default=0)
parser.add_argument("-r", "--repo", action="store", dest="repo", help="Repository to check: [Group/]Repo",
default='DiracGrid/Dirac')
parsed = parser.parse_args()
self.printLevel = _parsePrintLevel(parsed.debug)
LOGGER.setLevel(self.printLevel)
self.branches = parsed.branches
log.info('Getting PRs for: %s', self.branches)
self.startDate = parsed.startDate
log.info('Starting from: %s', self.startDate)
self.openPRs = parsed.openPRs
log.info('Also including openPRs?: %s', self.openPRs)
repo = parsed.repo
repos = repo.split('/')
if len(repos) == 1:
self.repo = repo
elif len(repos) == 2:
self.owner = repos[0]
self.repo = repos[1]
else:
raise RuntimeError("Cannot parse repo option: %s" % repo)
def _github(self, action):
"""Return the url to perform actions on github.
:param str action: command to use in the gitlab API, see documentation there
:returns: url to be used
"""
log = LOGGER.getChild('GitHub')
options = dict(self._options)
options["action"] = action
ghURL = "https://api.github.com/repos/%(owner)s/%(repo)s/%(action)s" % options
log.debug('Calling: %s', ghURL)
return ghURL
def getGithubPRs(self, state="open", mergedOnly=False, perPage=100):
"""Get all PullRequests from github.
:param str state: state of the PRs, open/closed/all, default open
:param bool merged: if PR has to be merged, only sensible for state=closed
:returns: list of githubPRs
"""
url = self._github("pulls?state=%s&per_page=%s" % (state, perPage))
prs = req2Json(url=url)
if not mergedOnly:
return prs
# only merged PRs
prsToReturn = []
for pr in prs:
if pr.get('merged_at', None) is not None:
prsToReturn.append(pr)
return prsToReturn
def getNotesFromPRs(self, prs):
"""Loop over prs, get base branch, get PR comment and collate into dictionary.
:returns: dict of branch:dict(#PRID, dict(comment, mergeDate))
"""
rawReleaseNotes = defaultdict(dict)
for pr in prs:
baseBranch = pr['base']['label'][len("DiracGrid:"):]
if baseBranch not in self.branches:
continue
comment = parseForReleaseNotes(pr['body'])
prID = pr['number']
mergeDate = pr.get('merged_at', None)
mergeDate = mergeDate if mergeDate is not None else '9999-99-99'
if mergeDate[:10] < self.startDate:
continue
rawReleaseNotes[baseBranch].update({prID: dict(comment=comment, mergeDate=mergeDate)})
return rawReleaseNotes
def getReleaseNotes(self):
"""Create the release notes."""
if self.openPRs:
prs = self.getGithubPRs(state='open', mergedOnly=False)
else:
prs = self.getGithubPRs(state='closed', mergedOnly=True)
prs = self.getNotesFromPRs(prs)
releaseNotes = collateReleaseNotes(prs)
print releaseNotes
checkRate()
if __name__ == "__main__":
RUNNER = GithubInterface()
try:
RUNNER.parseOptions()
except RuntimeError as e:
LOGGER.error("Error during argument parsing: %s", e)
exit(1)
try:
RUNNER.getReleaseNotes()
except RuntimeError as e:
LOGGER.error("Error during runtime: %s", e)
exit(1)
|
andresailer/DIRAC
|
docs/Tools/GetReleaseNotes.py
|
Python
|
gpl-3.0
| 9,834
|
[
"DIRAC"
] |
3a4de5f8cc827262e3b1d060be8ce703db05c36bc19da642b23c1ea3c6f1a35b
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SurveyQuestion.statistic'
db.delete_column(u'survey_surveyquestion', 'statistic_id')
def backwards(self, orm):
# Adding field 'SurveyQuestion.statistic'
db.add_column(u'survey_surveyquestion', 'statistic',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['statistics.Statistic'], null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'unique_together': "[('clinic', 'serial')]", 'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'flow_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'survey.surveyquestion': {
'Meta': {'ordering': "['order', 'id']", 'unique_together': "[('survey', 'label')]", 'object_name': 'SurveyQuestion'},
'categories': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'designation': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '8'}),
'for_display': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'question_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.surveyquestionresponse': {
'Meta': {'unique_together': "[('visit', 'question')]", 'object_name': 'SurveyQuestionResponse'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.SurveyQuestion']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Visit']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey']
|
myvoice-nigeria/myvoice
|
myvoice/survey/migrations/0005_auto__del_field_surveyquestion_statistic.py
|
Python
|
bsd-2-clause
| 12,627
|
[
"VisIt"
] |
781ddfd92160c9b669811fefb9ea2a32720c5775e612661de63dd79be66b863a
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import jinja2
import os
import tusers
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
if user:
template_values = {
'user' : user,
'logout' : tusers.create_logout_url('/')
}
template = JINJA_ENVIRONMENT.get_template('view/index.html')
self.response.write(template.render(template_values))
else:
self.redirect(tusers.create_login_url(self.request.uri))
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
sarrionandia/tournatrack
|
main.py
|
Python
|
apache-2.0
| 1,267
|
[
"Brian"
] |
f0668d5cc9f44d844fbc2d9591eddd663c16fdfff10a6186a8095228b5ec8bca
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import decimal
import os
import pytest
import selenium
from selenium.webdriver.common.keys import Keys
from shuup.core.models import ShipmentStatus
from shuup.testing.browser_utils import (
click_element,
initialize_admin_browser_test,
move_to_element,
wait_until_appeared,
wait_until_condition,
)
from shuup.testing.factories import (
create_order_with_product,
get_default_product,
get_default_shop,
get_default_supplier,
)
from shuup.utils.django_compat import reverse
from shuup.utils.i18n import format_money
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
@pytest.mark.django_db
def test_refunds(browser, admin_user, live_server, settings):
order = create_order_with_product(
get_default_product(), get_default_supplier(), 10, decimal.Decimal("10"), n_lines=10, shop=get_default_shop()
)
order2 = create_order_with_product(
get_default_product(), get_default_supplier(), 10, decimal.Decimal("10"), n_lines=10, shop=get_default_shop()
)
order2.create_payment(order2.taxful_total_price)
initialize_admin_browser_test(browser, live_server, settings)
_test_toolbar_visibility(browser, live_server, order)
_test_create_full_refund(browser, live_server, order)
_test_refund_view(browser, live_server, order2)
def _check_create_refund_link(browser, order, present):
url = reverse("shuup_admin:order.create-refund", kwargs={"pk": order.pk})
wait_until_condition(browser, lambda x: x.is_element_present_by_css("a[href='%s']" % url) == present)
def _check_order_details_visible(browser):
wait_until_condition(browser, lambda x: x.is_element_present_by_id("order_details"))
def _test_toolbar_visibility(browser, live_server, order):
url = reverse("shuup_admin:order.detail", kwargs={"pk": order.pk})
browser.visit("%s%s" % (live_server, url))
_check_order_details_visible(browser)
_check_create_refund_link(browser, order, False)
order.create_payment(order.taxful_total_price)
browser.visit("%s%s" % (live_server, url))
_check_order_details_visible(browser)
_check_create_refund_link(browser, order, True)
def _test_create_full_refund(browser, live_server, order):
url = reverse("shuup_admin:order.create-refund", kwargs={"pk": order.pk})
browser.visit("%s%s" % (live_server, url))
wait_until_condition(
browser, lambda x: x.is_text_present("Refunded: %s" % format_money(order.shop.create_price("0.00")))
)
wait_until_condition(browser, lambda x: x.is_text_present("Remaining: %s" % format_money(order.taxful_total_price)))
url = reverse("shuup_admin:order.create-full-refund", kwargs={"pk": order.pk})
click_element(browser, "a[href='%s']" % url)
wait_until_condition(
browser, lambda x: x.is_text_present("Refund Amount: %s" % format_money(order.taxful_total_price))
)
click_element(browser, "#create-full-refund")
_check_create_refund_link(browser, order, False)
_check_order_details_visible(browser)
order.refresh_from_db()
assert not order.taxful_total_price
assert order.is_paid()
assert not order.is_fully_shipped()
assert not order.shipments.exists()
def _test_refund_view(browser, live_server, order):
url = reverse("shuup_admin:order.create-refund", kwargs={"pk": order.pk})
browser.visit("%s%s" % (live_server, url))
wait_until_condition(
browser, lambda x: x.is_text_present("Refunded: %s" % format_money(order.shop.create_price("0.00")))
)
assert len(browser.find_by_css("#id_form-0-line_number option")) == 12 # blank + arbitrary amount + num lines
try:
click_element(browser, "#select2-id_form-0-line_number-container")
wait_until_appeared(browser, "input.select2-search__field")
except selenium.common.exceptions.TimeoutException as e:
# For some reason first click happen before the element is not ready so
# let's re-click when timeout happens. The actual functionality seem
# to work nicely.
click_element(browser, "#select2-id_form-0-line_number-container")
wait_until_appeared(browser, "input.select2-search__field")
wait_until_appeared(browser, ".select2-results__option[aria-selected='false']")
browser.execute_script('$($(".select2-results__option")[1]).trigger({type: "mouseup"})') # select arbitrary amount
wait_until_condition(browser, lambda x: len(x.find_by_css("#id_form-0-text")))
wait_until_condition(browser, lambda x: len(x.find_by_css("#id_form-0-amount")))
browser.find_by_css("#id_form-0-text").first.value = "test"
browser.find_by_css("#id_form-0-amount").first.value = "900"
move_to_element(browser, "#add-refund")
click_element(browser, "#add-refund")
# New line starts here...
move_to_element(browser, "#add-refund")
click_element(browser, "#select2-id_form-1-line_number-container")
wait_until_appeared(browser, "input.select2-search__field")
elem = browser.find_by_css("input.select2-search__field").first
elem._element.send_keys("line 1")
elem._element.send_keys(Keys.RETURN)
assert decimal.Decimal(browser.find_by_css("#id_form-1-amount").first.value) == decimal.Decimal("100.00")
assert int(decimal.Decimal(browser.find_by_css("#id_form-1-quantity").first.value)) == 10
click_element(browser, "button[form='create_refund']")
_check_create_refund_link(browser, order, True) # can still refund quantity
_check_order_details_visible(browser)
order.refresh_from_db()
assert not order.taxful_total_price
assert order.is_paid()
assert not order.is_fully_shipped()
|
shoopio/shoop
|
shuup_tests/browser/admin/test_refunds.py
|
Python
|
agpl-3.0
| 5,946
|
[
"VisIt"
] |
c71a6e8b823c1208c40278b1b8ea521694a32ca9cd70ed9013678f6b72271ff0
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def metric_accessors(ip,port):
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
# regression
response_col = "economy"
distribution = "gaussian"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# r2
r21 = gbm.r2(train=True, valid=False, xval=False)
assert isinstance(r21, float)
r22 = gbm.r2(train=False, valid=True, xval=False)
assert isinstance(r22, float)
r23 = gbm.r2(train=False, valid=False, xval=True)
assert isinstance(r23, float)
r2 = gbm.r2(train=True, valid=True, xval=False)
assert "train" in r2.keys() and "valid" in r2.keys(), "expected training and validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["valid"]))
assert r2["valid"] == r22
r2 = gbm.r2(train=True, valid=False, xval=True)
assert "train" in r2.keys() and "xval" in r2.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["train"]), type(r2["xval"]))
assert r2["xval"] == r23
r2 = gbm.r2(train=True, valid=True, xval=True)
assert "train" in r2.keys() and "valid" in r2.keys() and "xval" in r2.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["train"], float) and isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(r2["train"]), type(r2["valid"]), type(r2["xval"]))
r2 = gbm.r2(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(r2, float)
assert r2 == r21
r2 = gbm.r2(train=False, valid=True, xval=True)
assert "valid" in r2.keys() and "xval" in r2.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert len(r2) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(r2.keys())
assert isinstance(r2["valid"], float) and isinstance(r2["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(r2["valid"]), type(r2["xval"]))
# mean_residual_deviance
mean_residual_deviance1 = gbm.mean_residual_deviance(train=True, valid=False, xval=False)
assert isinstance(mean_residual_deviance1, float)
mean_residual_deviance2 = gbm.mean_residual_deviance(train=False, valid=True, xval=False)
assert isinstance(mean_residual_deviance2, float)
mean_residual_deviance3 = gbm.mean_residual_deviance(train=False, valid=False, xval=True)
assert isinstance(mean_residual_deviance3, float)
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=False)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys(), "expected training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]))
assert mean_residual_deviance["valid"] == mean_residual_deviance2
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=False, xval=True)
assert "train" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["xval"]))
assert mean_residual_deviance["xval"] == mean_residual_deviance3
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=True)
assert "train" in mean_residual_deviance.keys() and "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mean_residual_deviance, float)
assert mean_residual_deviance == mean_residual_deviance1
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=True, xval=True)
assert "valid" in mean_residual_deviance.keys() and "xval" in mean_residual_deviance.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert len(mean_residual_deviance) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mean_residual_deviance.keys())
assert isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
# binomial
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
distribution = "bernoulli"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col], x=train[predictors], validation_y=valid[response_col], validation_x=valid[predictors], nfolds=3, distribution=distribution, fold_assignment="Random")
# auc
auc1 = gbm.auc(train=True, valid=False, xval=False)
assert isinstance(auc1, float)
auc2 = gbm.auc(train=False, valid=True, xval=False)
assert isinstance(auc2, float)
auc3 = gbm.auc(train=False, valid=False, xval=True)
assert isinstance(auc3, float)
auc = gbm.auc(train=True, valid=True, xval=False)
assert "train" in auc.keys() and "valid" in auc.keys(), "expected training and validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["valid"]))
assert auc["valid"] == auc2
auc = gbm.auc(train=True, valid=False, xval=True)
assert "train" in auc.keys() and "xval" in auc.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["xval"]))
assert auc["xval"] == auc3
auc = gbm.auc(train=True, valid=True, xval=True)
assert "train" in auc.keys() and "valid" in auc.keys() and "xval" in auc.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(auc["train"]), type(auc["valid"]), type(auc["xval"]))
auc = gbm.auc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(auc, float)
assert auc == auc1
auc = gbm.auc(train=False, valid=True, xval=True)
assert "valid" in auc.keys() and "xval" in auc.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert len(auc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(auc.keys())
assert isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["valid"]), type(auc["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# giniCoef
giniCoef1 = gbm.giniCoef(train=True, valid=False, xval=False)
assert isinstance(giniCoef1, float)
giniCoef2 = gbm.giniCoef(train=False, valid=True, xval=False)
assert isinstance(giniCoef2, float)
giniCoef3 = gbm.giniCoef(train=False, valid=False, xval=True)
assert isinstance(giniCoef3, float)
giniCoef = gbm.giniCoef(train=True, valid=True, xval=False)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys(), "expected training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["valid"]))
assert giniCoef["valid"] == giniCoef2
giniCoef = gbm.giniCoef(train=True, valid=False, xval=True)
assert "train" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["train"]), type(giniCoef["xval"]))
assert giniCoef["xval"] == giniCoef3
giniCoef = gbm.giniCoef(train=True, valid=True, xval=True)
assert "train" in giniCoef.keys() and "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["train"], float) and isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(giniCoef["train"]), type(giniCoef["valid"]), type(giniCoef["xval"]))
giniCoef = gbm.giniCoef(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(giniCoef, float)
assert giniCoef == giniCoef1
giniCoef = gbm.giniCoef(train=False, valid=True, xval=True)
assert "valid" in giniCoef.keys() and "xval" in giniCoef.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert len(giniCoef) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(giniCoef.keys())
assert isinstance(giniCoef["valid"], float) and isinstance(giniCoef["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(giniCoef["valid"]), type(giniCoef["xval"]))
# F1
F11 = gbm.F1(train=True, valid=False, xval=False)
F12 = gbm.F1(train=False, valid=True, xval=False)
F13 = gbm.F1(train=False, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=False)
F1 = gbm.F1(train=True, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=True)
F1 = gbm.F1(train=False, valid=False, xval=False) # default: return training metrics
F1 = gbm.F1(train=False, valid=True, xval=True)
# F0point5
F0point51 = gbm.F0point5(train=True, valid=False, xval=False)
F0point52 = gbm.F0point5(train=False, valid=True, xval=False)
F0point53 = gbm.F0point5(train=False, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=False)
F0point5 = gbm.F0point5(train=True, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=True)
F0point5 = gbm.F0point5(train=False, valid=False, xval=False) # default: return training metrics
F0point5 = gbm.F0point5(train=False, valid=True, xval=True)
# F2
F21 = gbm.F2(train=True, valid=False, xval=False)
F22 = gbm.F2(train=False, valid=True, xval=False)
F23 = gbm.F2(train=False, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=False)
F2 = gbm.F2(train=True, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=True)
F2 = gbm.F2(train=False, valid=False, xval=False) # default: return training metrics
F2 = gbm.F2(train=False, valid=True, xval=True)
# accuracy
accuracy1 = gbm.accuracy(train=True, valid=False, xval=False)
accuracy2 = gbm.accuracy(train=False, valid=True, xval=False)
accuracy3 = gbm.accuracy(train=False, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=False)
accuracy = gbm.accuracy(train=True, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=True)
accuracy = gbm.accuracy(train=False, valid=False, xval=False) # default: return training metrics
accuracy = gbm.accuracy(train=False, valid=True, xval=True)
# error
error1 = gbm.error(train=True, valid=False, xval=False)
error2 = gbm.error(train=False, valid=True, xval=False)
error3 = gbm.error(train=False, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=False)
error = gbm.error(train=True, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=True)
error = gbm.error(train=False, valid=False, xval=False) # default: return training metrics
error = gbm.error(train=False, valid=True, xval=True)
# precision
precision1 = gbm.precision(train=True, valid=False, xval=False)
precision2 = gbm.precision(train=False, valid=True, xval=False)
precision3 = gbm.precision(train=False, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=False)
precision = gbm.precision(train=True, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=True)
precision = gbm.precision(train=False, valid=False, xval=False) # default: return training metrics
precision = gbm.precision(train=False, valid=True, xval=True)
# mcc
mcc1 = gbm.mcc(train=True, valid=False, xval=False)
mcc2 = gbm.mcc(train=False, valid=True, xval=False)
mcc3 = gbm.mcc(train=False, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=False)
mcc = gbm.mcc(train=True, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=True)
mcc = gbm.mcc(train=False, valid=False, xval=False) # default: return training metrics
mcc = gbm.mcc(train=False, valid=True, xval=True)
# max_per_class_error
max_per_class_error1 = gbm.max_per_class_error(train=True, valid=False, xval=False)
max_per_class_error2 = gbm.max_per_class_error(train=False, valid=True, xval=False)
max_per_class_error3 = gbm.max_per_class_error(train=False, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=False)
max_per_class_error = gbm.max_per_class_error(train=True, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=True)
max_per_class_error = gbm.max_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
max_per_class_error = gbm.max_per_class_error(train=False, valid=True, xval=True)
# confusion_matrix
confusion_matrix1 = gbm.confusion_matrix(train=True, valid=False, xval=False)
confusion_matrix2 = gbm.confusion_matrix(train=False, valid=True, xval=False)
confusion_matrix3 = gbm.confusion_matrix(train=False, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=False)
confusion_matrix = gbm.confusion_matrix(train=True, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=True)
confusion_matrix = gbm.confusion_matrix(train=False, valid=False, xval=False) # default: return training metrics
confusion_matrix = gbm.confusion_matrix(train=False, valid=True, xval=True)
# # plot
# plot1 = gbm.plot(train=True, valid=False, xval=False)
# plot2 = gbm.plot(train=False, valid=True, xval=False)
# plot3 = gbm.plot(train=False, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=False)
# plot = gbm.plot(train=True, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=True)
# plot = gbm.plot(train=False, valid=False, xval=False) # default: return training metrics
# plot = gbm.plot(train=False, valid=True, xval=True)
# # tpr
# tpr1 = gbm.tpr(train=True, valid=False, xval=False)
# tpr2 = gbm.tpr(train=False, valid=True, xval=False)
# tpr3 = gbm.tpr(train=False, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=False)
# tpr = gbm.tpr(train=True, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=True)
# tpr = gbm.tpr(train=False, valid=False, xval=False) # default: return training metrics
# tpr = gbm.tpr(train=False, valid=True, xval=True)
#
# # tnr
# tnr1 = gbm.tnr(train=True, valid=False, xval=False)
# tnr2 = gbm.tnr(train=False, valid=True, xval=False)
# tnr3 = gbm.tnr(train=False, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=False)
# tnr = gbm.tnr(train=True, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=True)
# tnr = gbm.tnr(train=False, valid=False, xval=False) # default: return training metrics
# tnr = gbm.tnr(train=False, valid=True, xval=True)
#
# # fnr
# fnr1 = gbm.fnr(train=True, valid=False, xval=False)
# fnr2 = gbm.fnr(train=False, valid=True, xval=False)
# fnr3 = gbm.fnr(train=False, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=False)
# fnr = gbm.fnr(train=True, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=True)
# fnr = gbm.fnr(train=False, valid=False, xval=False) # default: return training metrics
# fnr = gbm.fnr(train=False, valid=True, xval=True)
#
# # fpr
# fpr1 = gbm.fpr(train=True, valid=False, xval=False)
# fpr2 = gbm.fpr(train=False, valid=True, xval=False)
# fpr3 = gbm.fpr(train=False, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=False)
# fpr = gbm.fpr(train=True, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=True)
# fpr = gbm.fpr(train=False, valid=False, xval=False) # default: return training metrics
# fpr = gbm.fpr(train=False, valid=True, xval=True)
# multinomial
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
distribution = "multinomial"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = h2o.gbm(y=train[response_col],
x=train[predictors],
validation_y=valid[response_col],
validation_x=valid[predictors],
nfolds=3,
distribution=distribution,
fold_assignment="Random")
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in mse.keys() and "valid" in mse.keys(), "expected training and validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in mse.keys() and "xval" in mse.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in mse.keys() and "valid" in mse.keys() and "xval" in mse.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in mse.keys() and "xval" in mse.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(mse.keys())
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in logloss.keys() and "valid" in logloss.keys(), "expected training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in logloss.keys() and "xval" in logloss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in logloss.keys() and "valid" in logloss.keys() and "xval" in logloss.keys(), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in logloss.keys() and "xval" in logloss.keys(), "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(logloss.keys())
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# hit_ratio_table
hit_ratio_table1 = gbm.hit_ratio_table(train=True, valid=False, xval=False)
hit_ratio_table2 = gbm.hit_ratio_table(train=False, valid=True, xval=False)
hit_ratio_table3 = gbm.hit_ratio_table(train=False, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=False)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=False, xval=False) # default: return training metrics
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=True, xval=True)
# clustering
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
km = h2o.kmeans(x=iris[0:4],
nfolds=3,
k=3)
# betweenss
betweenss1 = km.betweenss(train=True, valid=False, xval=False)
assert isinstance(betweenss1, float)
betweenss3 = km.betweenss(train=False, valid=False, xval=True)
assert isinstance(betweenss3, float)
betweenss = km.betweenss(train=True, valid=False, xval=True)
assert "train" in betweenss.keys() and "xval" in betweenss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert len(betweenss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(betweenss.keys())
assert isinstance(betweenss["train"], float) and isinstance(betweenss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(betweenss["train"]), type(betweenss["xval"]))
assert betweenss["xval"] == betweenss3
betweenss = km.betweenss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(betweenss, float)
assert betweenss == betweenss1
# totss
totss1 = km.totss(train=True, valid=False, xval=False)
assert isinstance(totss1, float)
totss3 = km.totss(train=False, valid=False, xval=True)
assert isinstance(totss3, float)
totss = km.totss(train=True, valid=False, xval=True)
assert "train" in totss.keys() and "xval" in totss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert len(totss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(totss.keys())
assert isinstance(totss["train"], float) and isinstance(totss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(totss["train"]), type(totss["xval"]))
assert totss["xval"] == totss3
totss = km.totss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(totss, float)
assert totss == totss1
# tot_withinss
tot_withinss1 = km.tot_withinss(train=True, valid=False, xval=False)
assert isinstance(tot_withinss1, float)
tot_withinss3 = km.tot_withinss(train=False, valid=False, xval=True)
assert isinstance(tot_withinss3, float)
tot_withinss = km.tot_withinss(train=True, valid=False, xval=True)
assert "train" in tot_withinss.keys() and "xval" in tot_withinss.keys(), "expected training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert len(tot_withinss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(tot_withinss.keys())
assert isinstance(tot_withinss["train"], float) and isinstance(tot_withinss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(tot_withinss["train"]), type(tot_withinss["xval"]))
assert tot_withinss["xval"] == tot_withinss3
tot_withinss = km.tot_withinss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(tot_withinss, float)
assert tot_withinss == tot_withinss1
# withinss
withinss1 = km.withinss(train=True, valid=False, xval=False)
withinss3 = km.withinss(train=False, valid=False, xval=True)
withinss = km.withinss(train=True, valid=False, xval=True)
withinss = km.withinss(train=False, valid=False, xval=False) # default: return training metrics
# centroid_stats
centroid_stats1 = km.centroid_stats(train=True, valid=False, xval=False)
centroid_stats3 = km.centroid_stats(train=False, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=True, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=False, valid=False, xval=False) # default: return training metrics
# size
size1 = km.size(train=True, valid=False, xval=False)
size3 = km.size(train=False, valid=False, xval=True)
size = km.size(train=True, valid=False, xval=True)
size = km.size(train=False, valid=False, xval=False) # default: return training metrics
if __name__ == "__main__":
tests.run_test(sys.argv, metric_accessors)
|
bospetersen/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_metric_accessors.py
|
Python
|
apache-2.0
| 39,151
|
[
"Gaussian"
] |
2fa3166140c9ca1b7534655ef655426eeff3f54bb37caefc7713d019e9bf8a9f
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
###############################################################################
#234567890123456789012345678901234567890123456789012345678901234567890123456789
#--------1---------2---------3---------4---------5---------6---------7---------
# ##### BEGIN COPYRIGHT BLOCK #####
#
# initial script copyright (c)2013, 2014 Alexander Nussbaumer
#
# ##### END COPYRIGHT BLOCK #####
bl_info = {
'name': "Future Pinball FPx format (.fpm/.fpl/.fpt)",
'description': "Import Future Pinball Model, Library and Table files",
'author': "Alexander Nussbaumer",
'version': (0, 0, 201401111),
'blender': (2, 68, 0),
'location': "File > Import",
'warning': "",
'wiki_url': "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/FuturePinball_FPx",
'tracker_url': "https://developer.blender.org/T36215",
'category': "Import-Export"}
# KNOWN ISSUES & TODOs & MAYBEs (in a random order):
#
# - issue: material assignment is not consistent.
# models got multiple materials assigned.
# models got crystal material assigned instead texture.
# - issue: some images could not be loaded to blender.
# #DEBUG fpl images.load C:\Users\user\AppData\Local\Temp\__grab__fpl__\bulb_trigger_star_v2\Bulb-Trigger-Star-v2.bmp
# IMB_ibImageFromMemory: unknown fileformat (C:\Users\user\AppData\Local\Temp\__grab__fpl__\bulb_trigger_star_v2\Bulb-Trigger-Star-v2.bmp)
# #DEBUG fpl images.load C:\Users\user\AppData\Local\Temp\__grab__fpl__\gameover\GameOver.tga
# decodetarga: incomplete file, 7.9% missing
#
# - todo: delete all unused temporary scenes with its content.
# to shrink file size.
# - todo: create better light settings.
# should give nice results for "Blender Render" and "Cycles Render" render engine.
# - todo: create better material settings.
# handling texture, color, transparent, crystal, light, chrome.
# - todo: create camera + setup
# to see the whole table, playfield, backglass.
# - todo: make all materials and material textures as separate.
# to bypass cyclic textures at texture baking.
# - todo: cut holes to playfield and surfaces for mask models.
# using curves? - by separate model mask and add as curve - multiple curves to one mesh?
# using boolean? - by separate model mask and for each a boolean modifier?
# - todo: align models only on .fpt import not as currently on .fpm level.
# find a way to get a general method, to align model position alignment at .fpt level, not on .fpm level.
# (more hardcoding?)
# - todo: improve mark_as_ramp_end_point handling (see def create_ramp_curve_points).
#
# - maybe: add a pop-up message/dialog to inform the user, that the import process takes its time.
# progress bar/text - is there something like that available in blender?
# - maybe: light dome (baking ambient occlusion has some issues)
# - maybe: import image lists as image sequences (maybe for BGE usage far far later)
# - maybe: animation got lost by got rid of using dupli-groups
# copy the animations object-by-object and make them as NLA action strip (maybe for BGE usage far far later)
# - maybe: import sounds. (maybe for BGE usage far far later)
# - maybe: import music. (maybe for BGE usage far far later)
# - maybe: import VisualBasic script and transform to python script. (maybe for BGE usage far far later)
#
# - maybe: add possibility to export/write back future pinball model files (.fpm)
# import/handle/export collision data
# rewrite/extend cfb_spec.py for write IO
# rewrite/extend fpx_spec.py for write IO
# rewrite/extend lzo_spec.py for write IO
# To support reload properly, try to access a package var,
# if it's there, reload everything
if 'bpy' in locals():
import imp
if 'io_scene_fpx.fpx_ui' in locals():
imp.reload(io_scene_fpx.fpx_ui)
else:
from io_scene_fpx.fpx_ui import (
FpmImportOperator,
FplImportOperator,
FptImportOperator,
)
#import blender stuff
from bpy.utils import (
register_module,
unregister_module,
)
from bpy.types import (
INFO_MT_file_export,
INFO_MT_file_import,
)
###############################################################################
# registration
def register():
####################
# F8 - key
import imp
imp.reload(fpx_ui)
# F8 - key
####################
fpx_ui.register()
register_module(__name__)
INFO_MT_file_import.append(FpmImportOperator.menu_func)
INFO_MT_file_import.append(FplImportOperator.menu_func)
INFO_MT_file_import.append(FptImportOperator.menu_func)
def unregister():
fpx_ui.unregister()
unregister_module(__name__)
INFO_MT_file_import.remove(FpmImportOperator.menu_func)
INFO_MT_file_import.remove(FplImportOperator.menu_func)
INFO_MT_file_import.remove(FptImportOperator.menu_func)
###############################################################################
# global entry point
if (__name__ == "__main__"):
register()
###############################################################################
###############################################################################
#234567890123456789012345678901234567890123456789012345678901234567890123456789
#--------1---------2---------3---------4---------5---------6---------7---------
# ##### END OF FILE #####
|
Passtechsoft/TPEAlpGen
|
blender/release/scripts/addons_contrib/io_scene_fpx/__init__.py
|
Python
|
gpl-3.0
| 6,269
|
[
"CRYSTAL"
] |
0a6163ddfabfcde4340a182f060e1375cb690dd3fda8fd75b4faa2a3df5fbd9c
|
from __future__ import print_function
import numpy as np
from ..utils import Util2d, Util3d, Transient2d, MfList, \
HeadFile, CellBudgetFile, UcnFile, FormattedHeadFile
from ..mbase import BaseModel
from ..pakbase import Package
from . import NetCdf, netcdf
from . import shapefile_utils
NC_UNITS_FORMAT = {"hk": "{0}/{1}", "sy": "", "ss": "1/{0}", "rech": "{0}/{1}",
"strt": "{0}",
"wel_flux": "{0}^3/{1}", "top": "{0}", "model_top": "{0}",
"botm": "{0}", "thickness": "{0}",
"ghb_cond": "{0}/{1}^2", "ghb_bhead": "{0}",
"transmissivity": "{0}^2/{1}",
"vertical_conductance": "{0}/{1}^2",
"primary_storage_coefficient": "1/{1}",
"horizontal_hydraulic_conductivity": "{0}/{1}",
"riv_cond": "1/{1}",
"riv_stage": "{0}", "riv_rbot": "{0}", "head": "{0}",
"drawdown": "{0}", "cell_by_cell_flow": "{0}^3/{1}",
"sy": "{1}/{1}",
"prsity": "{1}/{1}", "hani": "{0}/{0}", "al": "{0}/{0}",
"drn_elev": "{0}",
"drn_cond": "1/{1}", "dz": "{0}", "subsidence": "{0}",
"chd_shead": "{0}", "chd_ehead": "{0}",
"2D_cumulative_well_flux": "{0}^3/{1}",
"3D_cumulative_well_flux": "{0}^3/{1}", "vka": "{0}/{1}"}
NC_PRECISION_TYPE = {np.float32: "f4", np.int: "i4", np.int64: "i4",
np.int32: "i4"}
NC_LONG_NAMES = {"hk": "horizontal hydraulic conductivity",
"vka": "vertical hydraulic conductivity",
"sy": "specific yield",
"ss": "specific storage",
"rech": " recharge",
"strt": "starting heads",
"wel_flux": "well flux",
"top": "model top",
"botm": "layer bottom",
"thickness": "layer thickness",
"ghb_cond": "GHB boundary conductance",
"ghb_bhead": "GHB boundary head",
"riv_cond": "river bed conductance",
"riv_stage": "river stage",
"riv_rbot": "river bottom elevation",
"drn_elev": "drain elevation",
"drn_cond": "drain conductance",
"hani": "horizontal anisotropy",
"prsity": "porosity",
"sconc1": "starting concentration",
"ibound": "flow model active array",
"icbund": "transport model active array"
}
def get_var_array_dict(m):
vdict = {}
# for vname in f.var_attr_dict.keys():
# vdict[vname] = f.nc.variables[vname][:]
for attr in m:
if hasattr(attr, "stress_period_data"):
array_dict = attr.stress_period_data.array
return vdict
def ensemble_helper(inputs_filename, outputs_filename, models, add_reals=True,
**kwargs):
""" helper to export an ensemble of model instances. Assumes
all models have same dis and sr, only difference is properties and
boundary conditions. Assumes model.nam.split('_')[-1] is the
realization suffix to use in the netcdf variable names
"""
f_in, f_out = None, None
for m in models[1:]:
assert m.get_nrow_ncol_nlay_nper() == models[
0].get_nrow_ncol_nlay_nper()
if inputs_filename is not None:
f_in = models[0].export(inputs_filename, **kwargs)
vdict = {}
vdicts = [models[0].export(vdict, **kwargs)]
i = 1
for m in models[1:]:
suffix = m.name.split('.')[0].split('_')[-1]
vdict = {}
m.export(vdict, **kwargs)
vdicts.append(vdict)
if add_reals:
f_in.append(vdict, suffix=suffix)
i += 1
mean, stdev = {}, {}
for vname in vdict.keys():
alist = []
for vd in vdicts:
alist.append(vd[vname])
alist = np.array(alist)
mean[vname] = alist.mean(axis=0)
stdev[vname] = alist.std(axis=0)
mean[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE
stdev[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE
mean[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE
stdev[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE
if i >= 2:
if not add_reals:
f_in.write()
f_in = NetCdf.empty_like(mean, output_filename=inputs_filename)
f_in.append(mean, suffix="**mean**")
f_in.append(stdev, suffix="**stdev**")
else:
f_in.append(mean, suffix="**mean**")
f_in.append(stdev, suffix="**stdev**")
f_in.add_global_attributes({"namefile": ''})
if outputs_filename is not None:
f_out = output_helper(outputs_filename, models[0], models[0]. \
load_results(as_dict=True), **kwargs)
vdict = {}
vdicts = [output_helper(vdict, models[0], models[0]. \
load_results(as_dict=True), **kwargs)]
i = 1
for m in models[1:]:
suffix = m.name.split('.')[0].split('_')[-1]
oudic = m.load_results(as_dict=True)
vdict = {}
output_helper(vdict, m, oudic, **kwargs)
vdicts.append(vdict)
if add_reals:
f_out.append(vdict, suffix=suffix)
i += 1
mean, stdev = {}, {}
for vname in vdict.keys():
alist = []
for vd in vdicts:
alist.append(vd[vname])
alist = np.array(alist)
mean[vname] = alist.mean(axis=0)
stdev[vname] = alist.std(axis=0)
mean[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE
stdev[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE
mean[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE
stdev[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE
if i >= 2:
if not add_reals:
f_out.write()
f_out = NetCdf.empty_like(mean,
output_filename=outputs_filename)
f_out.append(mean, suffix="**mean**")
f_out.append(stdev, suffix="**stdev**")
else:
f_out.append(mean, suffix="**mean**")
f_out.append(stdev, suffix="**stdev**")
f_out.add_global_attributes({"namefile": ''})
return f_in, f_out
def _add_output_nc_variable(f, times, shape3d, out_obj, var_name, logger=None,
text='',
mask_vals=[], mask_array3d=None):
if logger:
logger.log("creating array for {0}".format(
var_name))
array = np.zeros((len(times), shape3d[0], shape3d[1], shape3d[2]),
dtype=np.float32)
array[:] = np.NaN
for i, t in enumerate(times):
if t in out_obj.recordarray["totim"]:
try:
if text:
a = out_obj.get_data(totim=t, full3D=True, text=text)
if isinstance(a, list):
a = a[0]
else:
a = out_obj.get_data(totim=t)
except Exception as e:
estr = "error getting data for {0} at time {1}:{2}".format(
var_name + text.decode().strip().lower(), t, str(e))
if logger:
logger.warn(estr)
else:
print(estr)
continue
if mask_array3d is not None and a.shape == mask_array3d.shape:
a[mask_array3d] = np.NaN
try:
array[i, :, :, :] = a.astype(np.float32)
except Exception as e:
estr = "error assigning {0} data to array for time {1}:{2}".format(
var_name + text.decode().strip().lower(), t, str(e))
if logger:
logger.warn(estr)
else:
print(estr)
continue
if logger:
logger.log("creating array for {0}".format(
var_name))
for mask_val in mask_vals:
array[np.where(array == mask_val)] = np.NaN
mx, mn = np.nanmax(array), np.nanmin(array)
array[np.isnan(array)] = netcdf.FILLVALUE
if isinstance(f, dict):
if text:
var_name = text.decode().strip().lower()
f[var_name] = array
return f
units = None
if var_name in NC_UNITS_FORMAT:
units = NC_UNITS_FORMAT[var_name].format(
f.grid_units, f.time_units)
precision_str = "f4"
if text:
var_name = text.decode().strip().lower()
attribs = {"long_name": var_name}
attribs["coordinates"] = "time layer latitude longitude"
attribs["min"] = mn
attribs["max"] = mx
if units is not None:
attribs["units"] = units
try:
var = f.create_variable(var_name, attribs,
precision_str=precision_str,
dimensions=("time", "layer", "y", "x"))
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(
var_name, str(e))
if logger:
logger.lraise(estr)
else:
raise Exception(estr)
try:
var[:] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(
var_name, str(e))
if logger:
logger.lraise(estr)
else:
raise Exception(estr)
def output_helper(f, ml, oudic, **kwargs):
"""export model outputs using the model spatial reference
info.
Parameters
----------
f : filename for output - must have .shp or .nc extension
ml : BaseModel derived type
oudic : dict {output_filename,flopy datafile/cellbudgetfile instance}
Returns
-------
None
Note:
----
casts down double precision to single precision for netCDF files
"""
assert isinstance(ml, BaseModel)
assert len(oudic.keys()) > 0
logger = kwargs.pop("logger", None)
stride = kwargs.pop("stride", 1)
suffix = kwargs.pop("suffix", None)
forgive = kwargs.pop("forgive", False)
if len(kwargs) > 0 and logger is not None:
str_args = ','.join(kwargs)
logger.warn("unused kwargs: " + str_args)
# this sucks! need to round the totims in each output file instance so
# that they will line up
for key, out in oudic.items():
times = [float("{0:15.6f}".format(t)) for t in
out.recordarray["totim"]]
out.recordarray["totim"] = times
times = []
for filename, df in oudic.items():
[times.append(t) for t in df.recordarray["totim"] if t not in times]
assert len(times) > 0
times.sort()
# rectify times - only use times that are common to every output file
common_times = []
skipped_times = []
for t in times:
keep = True
for filename, df in oudic.items():
if t not in df.recordarray["totim"]:
keep = False
break
if keep:
common_times.append(t)
else:
skipped_times.append(t)
assert len(common_times) > 0
if len(skipped_times) > 0:
if logger:
logger.warn("the following output times are not common to all" + \
" output files and are being skipped:\n" + \
"{0}".format(skipped_times))
else:
print("the following output times are not common to all" + \
" output files and are being skipped:\n" + \
"{0}".format(skipped_times))
times = [t for t in common_times[::stride]]
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, ml, time_values=times, logger=logger,
forgive=forgive)
elif isinstance(f, NetCdf):
otimes = list(f.nc.variables["time"][:])
assert otimes == times
if isinstance(f, NetCdf) or isinstance(f, dict):
shape3d = (ml.nlay, ml.nrow, ml.ncol)
mask_vals = []
mask_array3d = None
if ml.bas6:
mask_vals.append(ml.bas6.hnoflo)
mask_array3d = ml.bas6.ibound.array == 0
if ml.bcf:
mask_vals.append(ml.bcf.hdry)
if ml.lpf:
mask_vals.append(ml.lpf.hdry)
for filename, out_obj in oudic.items():
filename = filename.lower()
if isinstance(out_obj, UcnFile):
_add_output_nc_variable(f, times, shape3d, out_obj,
"concentration", logger=logger,
mask_vals=mask_vals,
mask_array3d=mask_array3d)
elif isinstance(out_obj, HeadFile):
_add_output_nc_variable(f, times, shape3d, out_obj,
out_obj.text.decode(), logger=logger,
mask_vals=mask_vals,
mask_array3d=mask_array3d)
elif isinstance(out_obj, FormattedHeadFile):
_add_output_nc_variable(f, times, shape3d, out_obj,
out_obj.text, logger=logger,
mask_vals=mask_vals,
mask_array3d=mask_array3d)
elif isinstance(out_obj, CellBudgetFile):
var_name = "cell_by_cell_flow"
for text in out_obj.textlist:
_add_output_nc_variable(f, times, shape3d, out_obj,
var_name, logger=logger, text=text,
mask_vals=mask_vals,
mask_array3d=mask_array3d)
else:
estr = "unrecognized file extention:{0}".format(filename)
if logger:
logger.lraise(estr)
else:
raise Exception(estr)
else:
if logger:
logger.lraise("unrecognized export argument:{0}".format(f))
else:
raise NotImplementedError("unrecognized export argument" + \
":{0}".format(f))
return f
def model_helper(f, ml, **kwargs):
assert isinstance(ml, BaseModel)
package_names = kwargs.get("package_names", None)
if package_names is None:
package_names = [pak.name[0] for pak in ml.packagelist]
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, ml, **kwargs)
if isinstance(f, str) and f.lower().endswith(".shp"):
shapefile_utils.model_attributes_to_shapefile(f, ml,
package_names=package_names,
**kwargs)
elif isinstance(f, NetCdf):
for pak in ml.packagelist:
if pak.name[0] in package_names:
f = pak.export(f, **kwargs)
assert f is not None
return f
elif isinstance(f, dict):
for pak in ml.packagelist:
f = pak.export(f, **kwargs)
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
return f
def package_helper(f, pak, **kwargs):
assert isinstance(pak, Package)
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, pak.parent)
if isinstance(f, str) and f.lower().endswith(".shp"):
shapefile_utils.model_attributes_to_shapefile(f, pak.parent,
package_names=pak.name,
**kwargs)
elif isinstance(f, NetCdf) or isinstance(f, dict):
attrs = dir(pak)
if 'sr' in attrs:
attrs.remove('sr')
if 'start_datetime' in attrs:
attrs.remove('start_datetime')
for attr in attrs:
if '__' in attr:
continue
a = pak.__getattribute__(attr)
if isinstance(a, Util2d) and len(a.shape) == 2 and a.shape[1] > 0:
try:
f = util2d_helper(f, a, **kwargs)
except:
f.logger.warn(
"error adding {0} as variable".format(a.name))
elif isinstance(a, Util3d):
f = util3d_helper(f, a, **kwargs)
elif isinstance(a, Transient2d):
f = transient2d_helper(f, a, **kwargs)
elif isinstance(a, MfList):
f = mflist_helper(f, a, **kwargs)
elif isinstance(a, list):
for v in a:
if isinstance(v, Util3d):
f = util3d_helper(f, v, **kwargs)
return f
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
def generic_array_helper(f, array, var_name="generic_array",
dimensions=("time", "layer", "y", "x"),
precision_str="f4", units="unitless", **kwargs):
# assert isinstance(f,NetCdf),"generic_array_helper() can only be used " +\
# "with instantiated netCDfs"
if isinstance(f, str) and f.lower().endswith(".nc"):
assert "model" in kwargs.keys(), "creating a new netCDF using generic_array_helper requires a 'model' kwarg"
assert isinstance(kwargs["model"], BaseModel)
f = NetCdf(f, kwargs.pop("model"))
assert array.ndim == len(dimensions), "generic_array_helper() " + \
"array.ndim != dimensions"
coords_dims = {"time": "time", "layer": "layer", "y": "latitude",
"x": "longitude"}
coords = ' '.join([coords_dims[d] for d in dimensions])
mn = kwargs.pop("min", -1.0e+9)
mx = kwargs.pop("max", 1.0e+9)
long_name = kwargs.pop("long_name", var_name)
if len(kwargs) > 0:
f.logger.warn("generic_array_helper(): unrecognized kwargs:" + \
",".join(kwargs.keys()))
attribs = {"long_name": long_name}
attribs["coordinates"] = coords
attribs["units"] = units
attribs["min"] = mn
attribs["max"] = mx
if np.isnan(attribs["min"]) or np.isnan(attribs["max"]):
raise Exception("error processing {0}: all NaNs".format(var_name))
try:
var = f.create_variable(var_name, attribs, precision_str=precision_str,
dimensions=dimensions)
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
try:
var[:] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(var_name,
str(e))
f.logger.warn(estr)
raise Exception(estr)
return f
def mflist_helper(f, mfl, **kwargs):
""" export helper for MfList instances
Parameters
-----------
f : string (filename) or existing export instance type (NetCdf only for now)
mfl : MfList instance
"""
assert isinstance(mfl, MfList) \
, "mflist_helper only helps MfList instances"
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, mfl.model)
if isinstance(f, str) and f.lower().endswith(".shp"):
kper = kwargs.get("kper", None)
sparse = kwargs.get("sparse", False)
if mfl.sr is None:
raise Exception("MfList.to_shapefile: SpatialReference not set")
import flopy.utils.flopy_io as fio
if kper is None:
keys = mfl.data.keys()
keys.sort()
else:
keys = [kper]
if not sparse:
array_dict = {}
for kk in keys:
arrays = mfl.to_array(kk)
for name, array in arrays.items():
for k in range(array.shape[0]):
# aname = name+"{0:03d}_{1:02d}".format(kk, k)
n = fio.shape_attr_name(name, length=4)
aname = "{}{:03d}{:03d}".format(n, k + 1, int(kk) + 1)
array_dict[aname] = array[k]
shapefile_utils.write_grid_shapefile(f, mfl.sr, array_dict)
else:
from ..export.shapefile_utils import recarray2shp
recarray2shp()
elif isinstance(f, NetCdf) or isinstance(f, dict):
base_name = mfl.package.name[0].lower()
# f.log("getting 4D masked arrays for {0}".format(base_name))
# m4d = mfl.masked_4D_arrays
# f.log("getting 4D masked arrays for {0}".format(base_name))
# for name, array in m4d.items():
for name, array in mfl.masked_4D_arrays_itr():
var_name = base_name + '_' + name
if isinstance(f, dict):
f[var_name] = array
continue
f.log("processing {0} attribute".format(name))
units = None
if var_name in NC_UNITS_FORMAT:
units = NC_UNITS_FORMAT[var_name].format(f.grid_units,
f.time_units)
precision_str = NC_PRECISION_TYPE[mfl.dtype[name].type]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
else:
attribs = {"long_name": var_name}
attribs["coordinates"] = "time layer latitude longitude"
attribs["min"] = np.nanmin(array)
attribs["max"] = np.nanmax(array)
if np.isnan(attribs["min"]) or np.isnan(attribs["max"]):
raise Exception(
"error processing {0}: all NaNs".format(var_name))
if units is not None:
attribs["units"] = units
try:
var = f.create_variable(var_name, attribs,
precision_str=precision_str,
dimensions=("time", "layer", "y", "x"))
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(var_name,
str(e))
f.logger.warn(estr)
raise Exception(estr)
array[np.isnan(array)] = f.fillvalue
try:
var[:] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(
var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
f.log("processing {0} attribute".format(name))
return f
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
def transient2d_helper(f, t2d, **kwargs):
""" export helper for Transient2d instances
Parameters
-----------
f : string (filename) or existing export instance type (NetCdf only for now)
t2d : Transient2d instance
min_valid : minimum valid value
max_valid : maximum valid value
"""
assert isinstance(t2d, Transient2d) \
, "transient2d_helper only helps Transient2d instances"
min_valid = kwargs.get("min_valid", -1.0e+9)
max_valid = kwargs.get("max_valid", 1.0e+9)
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, t2d.model)
if isinstance(f, str) and f.lower().endswith(".shp"):
array_dict = {}
for kper in range(t2d.model.nper):
u2d = t2d[kper]
name = '{}_{:03d}'.format(
shapefile_utils.shape_attr_name(u2d.name), kper + 1)
array_dict[name] = u2d.array
shapefile_utils.write_grid_shapefile(f, t2d.model.sr, array_dict)
elif isinstance(f, NetCdf) or isinstance(f, dict):
# mask the array is defined by any row col with at lease
# one active cell
mask = None
if t2d.model.bas6 is not None:
ibnd = np.abs(t2d.model.bas6.ibound.array).sum(axis=0)
mask = ibnd == 0
elif t2d.model.btn is not None:
ibnd = np.abs(t2d.model.btn.icbund.array).sum(axis=0)
mask = ibnd == 0
# f.log("getting 4D array for {0}".format(t2d.name_base))
array = t2d.array
# f.log("getting 4D array for {0}".format(t2d.name_base))
with np.errstate(invalid="ignore"):
if array.dtype not in [int, np.int, np.int32, np.int64]:
if mask is not None:
array[:, 0, mask] = np.NaN
array[array <= min_valid] = np.NaN
array[array >= max_valid] = np.NaN
mx, mn = np.nanmax(array), np.nanmin(array)
else:
mx, mn = np.nanmax(array), np.nanmin(array)
array[array <= min_valid] = netcdf.FILLVALUE
array[array >= max_valid] = netcdf.FILLVALUE
# if t2d.model.bas6 is not None:
# array[:, 0, t2d.model.bas6.ibound.array[0] == 0] = \
# f.fillvalue
# elif t2d.model.btn is not None:
# array[:, 0, t2d.model.btn.icbund.array[0] == 0] = \
# f.fillvalue
var_name = t2d.name_base.replace('_', '')
if isinstance(f, dict):
array[array == netcdf.FILLVALUE] = np.NaN
f[var_name] = array
return f
array[np.isnan(array)] = f.fillvalue
units = "unitless"
if var_name in NC_UNITS_FORMAT:
units = NC_UNITS_FORMAT[var_name].format(f.grid_units,
f.time_units)
try:
precision_str = NC_PRECISION_TYPE[t2d.dtype]
except:
precision_str = NC_PRECISION_TYPE[t2d.dtype.type]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
else:
attribs = {"long_name": var_name}
attribs["coordinates"] = "time layer latitude longitude"
attribs["units"] = units
attribs["min"] = mn
attribs["max"] = mx
if np.isnan(attribs["min"]) or np.isnan(attribs["max"]):
raise Exception("error processing {0}: all NaNs".format(var_name))
try:
var = f.create_variable(var_name, attribs,
precision_str=precision_str,
dimensions=("time", "layer", "y", "x"))
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
try:
var[:, 0] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(var_name,
str(e))
f.logger.warn(estr)
raise Exception(estr)
return f
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
def util3d_helper(f, u3d, **kwargs):
""" export helper for Transient2d instances
Parameters
-----------
f : string (filename) or existing export instance type (NetCdf only for now)
u3d : Util3d instance
min_valid : minimum valid value
max_valid : maximum valid value
"""
assert isinstance(u3d, Util3d), "util3d_helper only helps Util3d instances"
assert len(u3d.shape) == 3, "util3d_helper only supports 3D arrays"
min_valid = kwargs.get("min_valid", -1.0e+9)
max_valid = kwargs.get("max_valid", 1.0e+9)
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, u3d.model)
if isinstance(f, str) and f.lower().endswith(".shp"):
array_dict = {}
for ilay in range(u3d.model.nlay):
u2d = u3d[ilay]
name = '{}_{:03d}'.format(
shapefile_utils.shape_attr_name(u2d.name), ilay + 1)
array_dict[name] = u2d.array
shapefile_utils.write_grid_shapefile(f, u3d.model.sr,
array_dict)
elif isinstance(f, NetCdf) or isinstance(f, dict):
var_name = u3d.name[0].replace(' ', '_').lower()
# f.log("getting 3D array for {0}".format(var_name))
array = u3d.array
# this is for the crappy vcont in bcf6
# if isinstance(f,NetCdf) and array.shape != f.shape:
# f.log("broadcasting 3D array for {0}".format(var_name))
# full_array = np.empty(f.shape)
# full_array[:] = np.NaN
# full_array[:array.shape[0]] = array
# array = full_array
# f.log("broadcasting 3D array for {0}".format(var_name))
# f.log("getting 3D array for {0}".format(var_name))
#
mask = None
if u3d.model.bas6 is not None and "ibound" not in var_name:
mask = u3d.model.bas6.ibound.array == 0
elif u3d.model.btn is not None and 'icbund' not in var_name:
mask = u3d.model.btn.icbund.array == 0
if mask is not None and array.shape != mask.shape:
# f.log("broadcasting 3D array for {0}".format(var_name))
full_array = np.empty(mask.shape)
full_array[:] = np.NaN
full_array[:array.shape[0]] = array
array = full_array
# f.log("broadcasting 3D array for {0}".format(var_name))
# runtime warning issued in some cases - need to track down cause
# happens when NaN is already in array
with np.errstate(invalid="ignore"):
if array.dtype not in [int, np.int, np.int32, np.int64]:
# if u3d.model.bas6 is not None and "ibound" not in var_name:
# array[u3d.model.bas6.ibound.array == 0] = np.NaN
# elif u3d.model.btn is not None and 'icbund' not in var_name:
# array[u3d.model.btn.icbund.array == 0] = np.NaN
if mask is not None:
array[mask] = np.NaN
array[array <= min_valid] = np.NaN
array[array >= max_valid] = np.NaN
mx, mn = np.nanmax(array), np.nanmin(array)
else:
mx, mn = np.nanmax(array), np.nanmin(array)
if mask is not None:
array[mask] = netcdf.FILLVALUE
array[array <= min_valid] = netcdf.FILLVALUE
array[array >= max_valid] = netcdf.FILLVALUE
if u3d.model.bas6 is not None and "ibound" not in var_name:
array[u3d.model.bas6.ibound.array == 0] = netcdf.FILLVALUE
elif u3d.model.btn is not None and 'icbund' not in var_name:
array[u3d.model.btn.icbund.array == 0] = netcdf.FILLVALUE
if isinstance(f, dict):
f[var_name] = array
return f
array[np.isnan(array)] = f.fillvalue
units = "unitless"
if var_name in NC_UNITS_FORMAT:
units = NC_UNITS_FORMAT[var_name].format(f.grid_units,
f.time_units)
precision_str = NC_PRECISION_TYPE[u3d.dtype]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
else:
attribs = {"long_name": var_name}
attribs["coordinates"] = "layer latitude longitude"
attribs["units"] = units
attribs["min"] = mn
attribs["max"] = mx
if np.isnan(attribs["min"]) or np.isnan(attribs["max"]):
raise Exception("error processing {0}: all NaNs".format(var_name))
try:
var = f.create_variable(var_name, attribs,
precision_str=precision_str,
dimensions=("layer", "y", "x"))
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
try:
var[:] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(var_name,
str(e))
f.logger.warn(estr)
raise Exception(estr)
return f
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
def util2d_helper(f, u2d, **kwargs):
""" export helper for Util2d instances
Parameters
----------
f : string (filename) or existing export instance type (NetCdf only for now)
u2d : Util2d instance
min_valid : minimum valid value
max_valid : maximum valid value
"""
assert isinstance(u2d, Util2d), "util2d_helper only helps Util2d instances"
assert len(u2d.shape) == 2, "util2d_helper only supports 2D arrays"
min_valid = kwargs.get("min_valid", -1.0e+9)
max_valid = kwargs.get("max_valid", 1.0e+9)
if isinstance(f, str) and f.lower().endswith(".nc"):
f = NetCdf(f, u2d.model)
if isinstance(f, str) and f.lower().endswith(".shp"):
name = shapefile_utils.shape_attr_name(u2d.name, keep_layer=True)
shapefile_utils.write_grid_shapefile(f, u2d.model.sr,
{name: u2d.array})
return
elif isinstance(f, NetCdf) or isinstance(f, dict):
# try to mask the array - assume layer 1 ibound is a good mask
# f.log("getting 2D array for {0}".format(u2d.name))
array = u2d.array
# f.log("getting 2D array for {0}".format(u2d.name))
with np.errstate(invalid="ignore"):
if array.dtype not in [int, np.int, np.int32, np.int64]:
if u2d.model.bas6 is not None and \
"ibound" not in u2d.name.lower():
array[u2d.model.bas6.ibound.array[0, :, :] == 0] = np.NaN
elif u2d.model.btn is not None and \
"icbund" not in u2d.name.lower():
array[u2d.model.btn.icbund.array[0, :, :] == 0] = np.NaN
array[array <= min_valid] = np.NaN
array[array >= max_valid] = np.NaN
mx, mn = np.nanmax(array), np.nanmin(array)
else:
mx, mn = np.nanmax(array), np.nanmin(array)
array[array <= min_valid] = netcdf.FILLVALUE
array[array >= max_valid] = netcdf.FILLVALUE
if u2d.model.bas6 is not None and \
"ibound" not in u2d.name.lower():
array[u2d.model.bas6.ibound.array[0, :, :] == 0] = \
netcdf.FILLVALUE
elif u2d.model.btn is not None and \
"icbund" not in u2d.name.lower():
array[u2d.model.btn.icbund.array[0, :, :] == 0] = \
netcdf.FILLVALUE
var_name = u2d.name
if isinstance(f, dict):
f[var_name] = array
return f
array[np.isnan(array)] = f.fillvalue
units = "unitless"
if var_name in NC_UNITS_FORMAT:
units = NC_UNITS_FORMAT[var_name].format(f.grid_units,
f.time_units)
precision_str = NC_PRECISION_TYPE[u2d.dtype]
if var_name in NC_LONG_NAMES:
attribs = {"long_name": NC_LONG_NAMES[var_name]}
else:
attribs = {"long_name": var_name}
attribs["coordinates"] = "latitude longitude"
attribs["units"] = units
attribs["min"] = mn
attribs["max"] = mx
if np.isnan(attribs["min"]) or np.isnan(attribs["max"]):
raise Exception("error processing {0}: all NaNs".format(var_name))
try:
var = f.create_variable(var_name, attribs,
precision_str=precision_str,
dimensions=("y", "x"))
except Exception as e:
estr = "error creating variable {0}:\n{1}".format(var_name, str(e))
f.logger.warn(estr)
raise Exception(estr)
try:
var[:] = array
except Exception as e:
estr = "error setting array to variable {0}:\n{1}".format(var_name,
str(e))
f.logger.warn(estr)
raise Exception(estr)
return f
else:
raise NotImplementedError("unrecognized export argument:{0}".format(f))
|
bdestombe/flopy-1
|
flopy/export/utils.py
|
Python
|
bsd-3-clause
| 38,049
|
[
"NetCDF"
] |
084f395428ed599857bcab053e67da17354da8f2a684421cbc5c330bffe8ff8a
|
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import msvcrt
import os
import ConfigParser
import sys
import argparse
import logging
import json
import urllib
import urllib2
import urlparse
import base64
import subprocess
import re
import getpass
import traceback
import locale
import binascii
import xml.etree.ElementTree
MIN_CRUCIBLE_VERSION = '3.0.0'
SCRIPT_NAME = os.path.basename(__file__)
SCRIPT_VERSION='faefac2e8d32053719c807c4cd3dfaf3'
global_review_id = None
global_hex_string = None
### subprocess wrappers
def check_output(*popenargs, **kwargs):
"""Run command with arguments and return its output as a byte string."""
if 'stdout' in kwargs or 'stderr' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
logging.debug('Trying to execute %s', popenargs)
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE, *popenargs, **kwargs)
output, err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
logging.debug('Error executing, exit code %s\nstdout=%s\nstderr=%s', retcode, output, err)
raise CalledProcessError(retcode, cmd, output=output, error=err)
logging.debug('Finished executing, exit code 0\nstdout=%s\nstderr=%s', output, err)
#sys.stderr.write("\n" + "output = " + output + "err = " + err + "\n")
return output
class CalledProcessError(subprocess.CalledProcessError):
def __init__(self, returncode, cmd, output=None, error=None):
super(CalledProcessError, self).__init__(returncode, cmd)
self.output = output
self.error = error
class Console:
NO_ANSI = sys.platform == 'win32' or not sys.stdout.isatty()
GREEN = '\033[92m'
RED = '\033[91m'
ESCAPE = '\033[0m'
@staticmethod
def print(s, color=None):
if Console.NO_ANSI or not color:
print(s)
else:
print('%s%s%s' % (color, s, Console.ESCAPE))
@staticmethod
def error(s):
Console.print(s, color=Console.RED)
@staticmethod
def success(s):
Console.print(s, color=Console.GREEN)
class HTTPRedirectHandler(urllib2.HTTPRedirectHandler):
""" Override HTTPRedirectHandler to make sure the request data is preserved on redirect """
def redirect_request(self, req, fp, code, msg, headers, newurl):
m = req.get_method()
if code in (301, 302, 303, 307) and m in ("GET", "HEAD") or code in (301, 302, 303) and m == "POST":
newurl = newurl.replace(' ', '%20')
newheaders = dict((k, v) for k, v in req.headers.items() if k.lower() not in 'content-length')
return urllib2.Request(newurl,
headers=newheaders,
data=req.data,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
class Configuration(object):
"""Represents the configuration of the current execution of the script"""
def __init__(self):
self._url = None
self.username = None
self.password = None
self.authtoken = None
self._id = None
self.title = None
self.reviewers = []
self.moderator = None
self.executables = {
'svn': 'svn',
}
self.repository = None
self.last_project = None
self.initial_fill = True
self.no_anchor = False
self.encoding = None
self.diff_file = None
self.patch_source = None
self.new_patch_source = False
self.review_id = None
self.hex_string = None
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value.rstrip('/').strip() if value else None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value.strip() if value else None
def store_review_id(self, value):
self.review_id = value
def store_hex_string(self, hex_string):
self.hex_string = hex_string
def fill_from_defaults(self):
self.url = 'http://aunr-fisheye-01.ali.local:8060'
self.username = getpass.getuser()
return self
def fill_from_config_file(self, config_file):
if not self.url:
self.url = config_file.get_default_url()
if not self.password:
stored_token, stored_user = config_file.get_token(self.url)
if not self.username or self.username == stored_user:
self.authtoken, self.username = stored_token, stored_user
if not self.review_id:
self.review_id = config_file.get_review_id(self.url)
global global_review_id
global_review_id = self.review_id
if not self.hex_string:
self.hex_string = config_file.get_hex_string(self.url)
global global_hex_string
global_hex_string = self.hex_string
return self
def fill_from_args(self, args):
self.url = args.server or self.url
self.username = args.user or self.username
self.password = args.password or self.password
self.title = args.title or self.title
args_dict = vars(args)
self.id = args_dict[str('project/review')] or self.id
if '@reviewer' in args_dict:
for reviewer in args_dict[str('@reviewer')]:
self.reviewers.append(reviewer.lstrip('@').strip())
logging.debug('Parsed reviewers: %s', self.reviewers)
self.moderator = args.moderator.lstrip('@').strip() if args.moderator else self.moderator
self.repository = args.repository or self.repository
self.no_anchor = args.noanchor or self.no_anchor
self.encoding = args.encoding or self.encoding
self.diff_file = args.file or self.diff_file
self.new_patch_source = args.newpatch or self.new_patch_source
return self
def validate(self, check_title=False, check_id=False):
"""Checks if all the required server options are set"""
if self.url and (self.authtoken or (self.username and self.password)) and (not check_id or self.id) and (not check_title or self.title):
return True
if not self.url:
Console.error('ERROR: Please specify a Crucible server')
sys.stderr.write("ERROR: Please specify a Crucible server\n")
if not self.authtoken and not self.username:
Console.error('ERROR: Please specify a Crucible username')
sys.stderr.write("ERROR: Please specify a Crucible username\n")
if not self.authtoken and not self.password:
Console.error('ERROR: Please specify a Crucible password')
sys.stderr.write("ERROR: Please specify a Crucible password\n")
if check_id and not self.id:
Console.error('ERROR: Please specify a project or review id')
sys.stderr.write("ERROR: Please specify a project or review id\n")
if check_title and not self.title:
Console.error('ERROR: Please specify a review title')
sys.stderr.write("ERROR: Please specify a review title\n")
sys.stderr.write(CommandLine().help_blurb())
sys.exit(1)
def get_input(self, prompt='Password: ', pwd=False):
"""Prompt for password with echo off, using Windows getch()."""
for c in prompt:
msvcrt.putch(c)
input = ""
while 1:
c = msvcrt.getch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
if input == '':
pass
else:
input = input[:-1]
msvcrt.putch('\b')
msvcrt.putch(" ")
msvcrt.putch('\b')
else:
input = input + c
if pwd == True:
msvcrt.putch("*")
else:
msvcrt.putch(c)
msvcrt.putch('\r')
msvcrt.putch('\n')
return input
def fill_interactively(self, get_title=False, get_id=False, get_reviewers=False, get_pwd=False):
"""Fills the required parameters by prompting the user if they're not specified"""
if sys.stdin.isatty():
logging.debug('Not prompting for parameters interactively because stdin is not a tty')
else:
if get_id:
while not self.id:
self.id = self.get_input(prompt='Please specify a project to create the review in: ')
#sys.stderr.write(self.id)
if get_title:
while not self.title:
self.title = self.get_input(prompt='Please specify the review title: ')
#sys.stderr.write(self.title)
if get_pwd or self.authtoken == None:
while not self.password:
self.password = self.get_input(prompt='Please specify the password: ', pwd = True)
#sys.stderr.write(self.password)
if get_reviewers and not self.reviewers:
self.reviewers.append('aroraam')
self.validate(check_title=get_title, check_id=get_id)
self.initial_fill = False
return self
def choose_anchor(self, repositories):
"""Allows choosing an anchor repository if none detected"""
if not sys.stdin.isatty():
logging.debug('Not prompting for anchor interactively because stdin is not a tty')
return
repository_names = [str(repository.get('name'))
for repository in filter(lambda repository: repository.get('enabled'), repositories)]
while not self.repository and not self.no_anchor:
repository = raw_input("Please choose a repository to anchor to, or press Enter to skip anchoring: ")
if not repository:
self.no_anchor = True
elif repository in repository_names:
self.repository = repository
else:
print('The repository doesn\'t exist or is disabled')
def choose_source(self, matching_patch_groups):
"""Allows choosing a source from a list of matching ones"""
if len(matching_patch_groups) == 1:
print('Adding patch to existing one: %s. Use --newpatch to add as a new patch instead.' % matching_patch_groups[0]['displayName'])
self.patch_source = matching_patch_groups[0]['sourceName']
elif len(matching_patch_groups) > 1:
if not sys.stdin.isatty():
logging.debug('Not prompting for source interactively because stdin is not a tty')
return
print('Found %s patches to add to:' % len(matching_patch_groups))
i = 1
print('0. Create a new patch')
for patch_group in matching_patch_groups:
print('%s. Add to %s' % (i, patch_group['displayName']))
i += 1
while not self.patch_source and not self.new_patch_source:
try:
choice = int(raw_input('Pick patch to add to [0-%s]: ' % len(matching_patch_groups)))
if 0 < choice <= len(matching_patch_groups):
self.new_patch_source = False
self.patch_source = matching_patch_groups[choice - 1]['sourceName']
elif choice == 0:
self.new_patch_source = True
self.patch_source = None
except ValueError:
pass
class ConfigFile:
"""Handles reading and storing settings from the config file"""
userConfigPath = os.path.expanduser('~/.atlassian/crucible.conf')
DEFAULT_SECTION = 'DEFAULT'
EXECUTABLES_SECTION = 'executables'
URL = 'url'
TOKEN = 'authtoken'
USER = 'user'
REVIEW_ID = 'review_id'
HEX_STRING = 'hex_string'
def __init__(self):
self.config_parser = ConfigParser.RawConfigParser()
self.config_parser.read([self.userConfigPath])
def _get(self, section, key):
if not self.config_parser.has_option(section, key):
return None
return self.config_parser.get(section, key)
def get_default_url(self):
return self._get(self.DEFAULT_SECTION, self.URL)
def store_configuration(self, configuration):
if self._get(self.DEFAULT_SECTION, self.URL) != configuration.url:
self.config_parser.set(self.DEFAULT_SECTION, self.URL, configuration.url)
print('Saved the default server URL %s to %s' % (configuration.url, self.userConfigPath))
self.store_token(configuration.url, configuration.username, configuration.authtoken)
self.store_review_id(configuration.url, configuration.review_id)
self.store_hex_string(configuration.url, configuration.hex_string)
self.save()
def get_token(self, url):
if not self.config_parser.has_section(url):
return None, None
return self._get(url, self.TOKEN), self._get(url, self.USER)
def store_token(self, url, user, token):
if not self.get_token(url) == (token, user):
if not self.config_parser.has_section(url):
self.config_parser.add_section(url)
self.config_parser.set(url, self.TOKEN, token)
self.config_parser.set(url, self.USER, user)
print ('Saved an authentication token for %s to %s' % (url, self.userConfigPath))
def get_review_id(self, url):
if not self.config_parser.has_section(url):
return None
return self._get(url, self.REVIEW_ID)
def store_review_id(self, section, review_id):
if not self.config_parser.has_section(section):
self.config_parser.add_section(section)
self.config_parser.set(section, self.REVIEW_ID, review_id)
def get_hex_string(self, url):
if not self.config_parser.has_section(url):
return None
return self._get(url, self.HEX_STRING)
def store_hex_string(self, section, hex_string):
if not self.config_parser.has_section(section):
self.config_parser.add_section(section)
self.config_parser.set(section, self.HEX_STRING, hex_string)
def save(self):
try:
if not os.path.exists(os.path.dirname(self.userConfigPath)):
os.makedirs(os.path.dirname(self.userConfigPath))
with os.fdopen(os.open(self.userConfigPath, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600), 'wb') as configfile:
self.config_parser.write(configfile)
except IOError as e:
print('Error saving the configuration file %s - %s' % (self.userConfigPath, e))
logging.debug(traceback.format_exc())
class CommandLine:
"""Handles parsing the commandline parameters"""
def __init__(self):
self.parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description='Creates reviews in Atlassian Crucible from the command line',
add_help=False,
epilog='''
EXAMPLE USAGE:
%(executable)s
run interactively, try to get a patch from SCM, prompt for review details and create a new review\n
cat diff | %(executable)s CR -m "Review title"
take the patch from the output of first command, and create a review in project CR with title "Review title"\n
%(executable)s CR @ted @matt --moderator @john
try to get a patch from SCM, create a review in project CR add ted and matt as reviewers, and john as moderator\n
%(executable)s CR-120
update the review CR-120, adding the current patch from SCM\n
%(executable)s -r repository1
create a review and anchor it to repository1, instead of trying to detect the repository automatically\n
''' % {'executable': SCRIPT_NAME})
class HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
print(parser.format_help()
.replace('usage:', 'USAGE:')
.replace('positional arguments:', 'POSITIONAL ARGUMENTS:')
.replace('optional arguments:', 'OPTIONAL ARGUMENTS:'))
parser.exit()
self.parser.add_argument('project/review', type=str, nargs='?', default=None, help='the name of the project to create the review in or the id of a review to update\n\n')
self.parser.add_argument('@reviewer', type=str, nargs='*', default=[], help='the usernames of the reviewers to be added')
self.parser.add_argument('-h', '--help', action=HelpAction, help='show this help message and exit\n\n')
self.parser.add_argument('-m', '--title', type=str, help="the title for the review\n\n")
self.parser.add_argument('-M', '--moderator', type=str, help="the moderator for the review\n\n")
self.parser.add_argument('-r', '--repository', type=str, help='the repository to anchor to\n\n')
self.parser.add_argument('-f', '--file', type=str, help='get the diff from the specified file\n\n')
self.parser.add_argument('-s', '--server', type=str, help="the url of the Crucible server to connect to\n\n")
self.parser.add_argument('-u', '--user', type=str, help="the Crucible username to create the review as\n\n")
self.parser.add_argument('-p', '--password', type=str, help="the Crucible user password\n\n")
self.parser.add_argument('-n', '--noanchor', action='store_const', const=True, help='don\'t try to detect the repository to anchor the patch\n\n')
self.parser.add_argument('-N', '--newpatch', action='store_const', const=True, help='add as a new patch instead of trying to an existing one\n\n')
self.parser.add_argument('-e', '--encoding', type=str, help='the name of the encoding to use, see http://docs.python.org/2/library/codecs.html#standard-encodings\n\n')
self.parser.add_argument('-d', '--debug', action='store_const', const=True, help="print debugging information\n\n")
self.parser.add_argument('-v', '--version', action='version', version=('%s %s' % (SCRIPT_NAME, SCRIPT_VERSION)))
def parse_args(self):
args = self.parser.parse_args()
if args.debug:
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
if args.noanchor and args.repository:
Console.error('Please choose either --noanchor or --repository')
print(self.help_blurb())
sys.exit(1)
return args
def help_blurb(self):
return 'Try \'%s --help\' for more information.' % os.path.basename(sys.argv[0])
class CrucibleRest:
"""Encapsulates Crucible REST endopoints"""
timeout = 30000
code_anchor_failed = 'PatchAnchorFailed'
code_content_too_large = 'ChangeSetContentTooLarge'
code_review_content_too_large = 'ReviewContentTooLarge'
http_handlers_none_on_errors = {
400: lambda http_error, error_body: None,
404: lambda http_error, error_body: None,
}
def __init__(self, configuration):
self.configuration = configuration
self.headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
self.repositories = None
def __build_headers(self, custom_headers, use_token):
headers = {}
for key, value in self.headers.items():
headers[key] = value
if not use_token and self.configuration.username and self.configuration.password:
headers['Authorization'] = 'Basic ' + base64.b64encode(
str('%s:%s') % (self.configuration.username, self.configuration.password)).strip(),
for key, value in custom_headers.items():
headers[key] = value
return headers
def __build_payload(self, data):
if not data:
return None
encodings = []
if self.configuration.encoding:
encodings.append(self.configuration.encoding)
else:
encodings.append('UTF-8')
default_encoding = locale.getdefaultlocale()[1]
if default_encoding and default_encoding not in encodings:
encodings.append(default_encoding)
if sys.stdin.encoding not in encodings:
encodings.append(sys.stdin.encoding)
payload = None
for encoding in encodings:
try:
if isinstance(data, unicode):
payload = data
if isinstance(data, str):
logging.debug('Trying to encode str as %s', encoding)
payload = unicode(data, encoding=encoding).encode('utf-8')
else:
logging.debug('Trying to encode json as %s', encoding)
payload = json.dumps(data, encoding=encoding)
break
except ValueError:
logging.debug('Encoding failed: %s', traceback.format_exc())
sys.stderr.write('Encoding failed:' + str(traceback.format_exc()) +'\n')
if not payload:
Console.error(
'Error encoding the request (tried %s), please specify an --encoding parameter' % ', '.join(
encodings))
sys.exit(1)
return payload
def __build_url(self, url, use_token):
joined_url = self.configuration.url + url
if not use_token:
return joined_url
else:
scheme, netloc, path, query, fragment = urlparse.urlsplit(joined_url)
query_dict = urlparse.parse_qs(query)
query_dict['FEAUTH'] = [self.configuration.authtoken]
return urlparse.urlunsplit((scheme, netloc, path, urllib.urlencode(query_dict, doseq=True), fragment))
def _log_request(self, headers, payload, resourceUrl):
resourceUrl = re.sub(r'FEAUTH=(.*)%3A([0-9]+)%3A([0-9a-f]+)', r'FEAUTH=\1%3A\2%3A++SANITIZED++', resourceUrl)
if payload:
payload = re.sub(r'password=(.*)([\s&]?)', r'password=++SANITIZED++\2', payload)
if headers and headers.get('Authorization'):
headers['Authorization'] = '++SANITIZED++'
logging.debug('RestRequest: %s - %s - %s', resourceUrl, headers, payload)
def _log_response(self, response):
if response:
response = re.sub(r'"token":"(.*):([0-9]+):([0-9a-f]+)"', r'"token":"\1:\2:++SANITIZED++"', response)
logging.debug('RestResponse: %s', response)
def _request(self, url, data=None, http_handlers = {}, use_token=True, custom_headers={}):
"""Executes a REST request the given rest resource, It's a POST if data is set, a GET otherwise"""
resourceUrl = self.__build_url(url, use_token)
headers = self.__build_headers(custom_headers, use_token)
request = urllib2.Request(url=resourceUrl, headers=headers)
payload = self.__build_payload(data)
self._log_request(headers, payload, resourceUrl)
try:
response = urllib2.build_opener(HTTPRedirectHandler()).open(request, data=payload, timeout=self.timeout).read()
except urllib2.HTTPError as error:
logging.debug('RestError: %s', error)
sys.stderr.write('RestError: '+ str(error)+'\n')
error_body = error.read()
try:
error_body = json.loads(error_body)
except ValueError:
pass
logging.debug('RestErrorBody: %s', error_body)
sys.stderr.write('RestErrorBody: '+ str(error_body)+'\n')
if error.code in http_handlers:
return http_handlers[error.code](error, error_body)
if error.code == 404:
error_msg = error_body['message'] if isinstance(error_body, dict) and 'message' in error_body \
else 'Please check that %s is a Crucible server and the version is at least %s' % (self.configuration.url, MIN_CRUCIBLE_VERSION)
Console.error('Error : %s' % error_msg)
sys.stderr.write('Error : ' + str(error_msg)+'\n')
elif error.code == 401:
Console.error('Authorization error: please check that your username and password are valid, and that you have the correct permissions.')
sys.stderr.write('Authorization error: please check that your username and password are valid, and that you have the correct permissions.\n')
elif error.code == 403:
error_msg = error_body['message'] if isinstance(error_body, dict) and 'message' in error_body else 'You do not have permission to perform this operation'
permissions_msg = 'Please contact your administrator if you need access'
Console.error('Error: %s\n%s' % (error_msg, permissions_msg))
sys.stderr.write('Error: '+ str(error_msg)+'\n'+str(permissions_msg)+'\n')
elif error.code == 500:
Console.error('Server Error: %s' % error_body['message'] if isinstance(error_body, dict) and 'message' in error_body else error_body)
sys.stderr.write('Server Error code 500 \n')
else:
Console.error('Received an unexpected response %s. Please check that %s is a Crucible server' % (error, self.configuration.url))
sys.stderr.write('Received an unexpected response '+str(error)+ ' Please check that '+ str(self.configuration.url) + ' is a Crucible server')
sys.exit(1)
except Exception as error:
logging.debug(traceback.format_exc())
sys.stderr.write(traceback.format_exc())
Console.error('Eror executing request %s - %s' % (resourceUrl, error))
sys.stderr.write('Error executing request ' + str(resourceUrl)+' '+ str(error))
sys.exit(1)
self._log_response(response)
if 200 in http_handlers:
return http_handlers[200](response)
try:
return json.loads(response)
except ValueError:
sys.stderr.write("response returned an error")
return response
def check_connection(self):
""" Makes sure we can connect and authenticate with Crucible"""
# if there's a token, check that it's still valid
server_info = None
if self.configuration.authtoken:
server_info = self._request(url="/rest-service-fecru/server-v1", http_handlers={
401: lambda error, error_body: None
})
if not server_info:
sys.stderr.write("Your login token has expired, please re-authenticate\n")
self.configuration.authtoken = None
self.configuration.fill_interactively(get_pwd = True)
# get a token if none defined at this point
if not self.configuration.authtoken:
logging.debug('No authtoken, trying to get one')
login_success, login_response = self._request(
url='/rest-service/auth-v1/login',
data=urllib.urlencode({'userName':self.configuration.username, 'password':self.configuration.password}),
http_handlers={
403: lambda error, error_body: (False, error_body),
200: lambda response: (True, json.loads(response))
},
use_token=False,
custom_headers={'Content-Type':'application/x-www-form-urlencoded'}
)
if not login_success:
message = login_response['error'] if isinstance(login_response, dict) and 'error' in login_response else 'Please check that the username and password provided are correct.'
Console.error('Error authenticating with Crucible. ' + message)
sys.stderr.write('Error authenticating with Crucible. ' + str(message) + '\n')
sys.exit(1)
self.configuration.authtoken = login_response['token']
server_info = server_info or self._request(url="/rest-service-fecru/server-v1")
if not server_info['isCrucible']:
Console.error('Connected successfully to %s but no Crucible license is present.' % self.configuration.url)
sys.stderr.write('Connected successfully to '+ str(self.configuration.url)+ ' but no Crucible license is present.')
sys.exit(1)
logging.debug('Connected successfully to %s - Crucible version %s', self.configuration.url, server_info['version']['releaseNumber'])
def completedReviewers(self):
global global_review_id
resp = self._request(url = "/rest-service/reviews-v1/" + global_review_id + "/reviewers/completed", custom_headers={'content-type':'application/json', 'accept':'application/json'})
response = resp["reviewer"]
for item in response:
isCompleted = item.get("completed")
if str(isCompleted) == "True":
return True
return False
def add_reviewers(self, review_id, reviewers):
"""Adds the configuration.reviewers to the review with the given id. Returns True if any reviewers were added"""
if not reviewers: return False
logging.debug('Adding reviewers: %s', ', '.join(reviewers))
reviewers_added = False
for reviewer in reviewers:
if reviewer:
reviewer_added, reviewer_msg = self._request("/rest-service/reviews-v1/%s/reviewers" % str(review_id), data=reviewer,
http_handlers={
404: lambda http_error, error_body: (False, 'No user \'%s\' found - not adding as a reviewer' % reviewer),
400: lambda http_error, error_body: (False, error_body['message'] if 'message' in error_body else 'Error adding reviewer %s' % reviewer ),
200: lambda response: (True, '')
})
if not reviewer_added:
print(reviewer_msg)
else:
reviewers_added = True
return reviewers_added
def handle_anchor_error(self, http_error, error_body):
if error_body.get('code') == CrucibleRest.code_anchor_failed:
Console.error('Error: Failed to anchor the patch to repository %s, please check that the repository is the correct one' % self.configuration.repository)
sys.stderr.write('Error: Failed to anchor the patch to repository '+ str(self.configuration.repository) + ' please check that the repository is the correct one')
sys.exit(1)
elif error_body.get('code') == CrucibleRest.code_content_too_large or error_body.get('code') == CrucibleRest.code_review_content_too_large:
sys.stderr.write('Error: The patch you\'re trying to upload is too large. ' + str(error_body.get('message')))
Console.error('Error: The patch you\'re trying to upload is too large. ' + error_body.get('message'))
sys.exit(1)
else:
sys.stderr.write('Received a 409 Conflict response: ' + str(error_body['message']))
Console.error('Received a 409 Conflict response: %s' % error_body['message'])
def create_review(self, patch, project):
"""Creates a new review from the patch, in the given project"""
logging.debug('Creating new review in project %s', project['key'])
review_data = {"reviewData": {
"projectKey": project['key'],
"name": self.configuration.title,
"description": project['defaultObjectives'] if 'defaultObjectives' in project else None,
},
}
review_data = self.add_patch_data(patch, request_dict=review_data)
if self.configuration.moderator:
if not project['moderatorEnabled']:
print('Project %s doesn\'t have a moderator role enabled, not setting a moderator' % project['key'])
else:
review_data['reviewData']['moderator'] = {'userName':self.configuration.moderator}
create_response = self._request("/rest-service/reviews-v1", review_data, http_handlers={
409: self.handle_anchor_error
})
review_id = create_response['permaId']['id']
global global_review_id
global_review_id = review_id
sys.stderr.write("\nReview created: "+ str(review_id) +"\n")
review_state = create_response['state']
reviewers_added = self.add_reviewers(review_id, self.configuration.reviewers)
if reviewers_added or project['defaultReviewers']:
logging.debug('Starting review')
approve_response = self._request('/rest-service/reviews-v1/%s/transition?action=action:approveReview' % review_id, data=' ', http_handlers={
401: lambda http_error, error_body: print('You don\'t have permission to approve the review')
})
if approve_response:
review_state = approve_response['state']
else:
print('No reviewers added, review will be left in Draft state')
Console.success('Created review %(id)s (state: %(state)s) - %(url)s/cru/%(id)s'
% ({'id': review_id, 'state': review_state,'url':self.configuration.url}))
sys.stderr.write('Created review '+ str(review_id) + ' state: ' + str(review_state) + ' - ' + str(self.configuration.url) + '/cru/'+str(review_id) + '\n')
def add_patch_data(self, patch, request_dict={}):
request_dict['patch'] = patch
if self.configuration.repository:
request_dict['anchor'] = {'anchorRepository' : self.configuration.repository}
return request_dict
def get_iterable_patchgroups(self, review_id, repository):
patch_groups = self._request('/rest-service/reviews-v1/%s/patch' % review_id)['patchGroup']
matching_repo = lambda patch_group: 'anchor' in patch_group['patches'][0] and \
patch_group['patches'][0]['anchor'].get('anchorRepository') == repository
return filter(matching_repo, patch_groups)
def add_to_review(self, patch, review):
"""Adds the patch and reviewers to the given review"""
#review_id = review['permaId']['id']
review_id = review
data = self.add_patch_data(patch)
if 'anchor' in data and not self.configuration.new_patch_source:
matching_patch_groups = self.get_iterable_patchgroups(review_id, data['anchor']['anchorRepository'])
self.configuration.choose_source(matching_patch_groups)
if self.configuration.patch_source:
data['source'] = self.configuration.patch_source
sys.stderr.write('Adding patch to review ' + str(review_id) + "\n")
#raw_input('Please specify the review title: ')
patch_response = self._request('/rest-service/reviews-v1/%s/patch' % review_id, data=data, http_handlers={
409: self.handle_anchor_error
})
self.add_reviewers(review_id, self.configuration.reviewers)
sys.stderr.write('Updated review: ' + str(review_id)+ ' state: ' + str(patch_response['state']) + ' url: ' + str(self.configuration.url) + '\n')
def get_review(self, id):
"""A ReviewData json if the id represents an existing review, None otherwise"""
try:
return self._request('/rest-service/reviews-v1/%s' % id, http_handlers=CrucibleRest.http_handlers_none_on_errors)
except StandardError:
return None
def get_project(self, id):
"""A ProjectDat json if the id represents an existing project, None otherwise"""
try:
return self._request('/rest-service/projects-v1/%s?excludeAllowedReviewers=true' % id, http_handlers=CrucibleRest.http_handlers_none_on_errors)
except StandardError:
return None
def get_last_project(self):
"""Retrieves the most recent project, or None if it fails"""
try:
recent_projects = self._request('/rest-service-fecru/recently-visited-v1/projects')['project']
if(len(recent_projects)) == 0: return None
return recent_projects[0]['entityId']
except StandardError:
logging.debug('Error getting last project: %s', traceback.format_exc())
return None
def find_repository(self, source):
"""Tries to find the anchor repository for the source given"""
try:
for repository in self.get_repositories():
logging.debug('Matching remote repository %s', repository['name'])
if repository['enabled'] and source.matches_repository(repository):
return repository['name']
except StandardError:
logging.debug('Error suggesting anchor repository: %s', traceback.format_exc())
return None
def get_repositories(self):
if not self.repositories:
self.repositories = self._request('/rest-service/repositories-v1')['repoData']
return self.repositories
def is_script_update_available(self):
return self._request('/rest/review-cli/1.0/version/updateCheck?version=%s' % SCRIPT_VERSION)['isUpdateAvailable']
class PatchSource(object):
"""Base class for different ways to get a patch"""
def __init__(self, configuration):
self.configuration = configuration
self.paths = []
def is_active(self):
"""Should return True if this is the source can be used"""
def get_patch(self):
"""Should return the patch content"""
def get_review(self):
"""A user visible name for the source"""
def executable(self):
"""The binary to execute commands on the repository"""
def matches_repository(self, repository_data):
"""Whether the source matches the given RepositoryData json"""
def load_patch(self):
"""Queries different patch sources for a patch"""
for source_class in PatchSource.__subclasses__():
logging.debug('Checking %s.is_active', source_class)
source = source_class(self.configuration)
if source.is_active():
source._validate_executable()
patch = source.get_patch()
if patch:
logging.debug('Got %s bytes', len(patch))
if not self.configuration.no_anchor and not self.configuration.repository:
source.load_paths()
logging.debug('Loaded paths: %s', source.paths)
return patch, source
return None, None
def _validate_executable(self):
try:
self.validate_executable()
except (OSError, CalledProcessError) as e:
Console.error('A %s repository was detected, but there was an error executing \'%s\': %s' % (self, self.configuration.executables[self.executable()], e))
sys.stderr.write('A '+str(self)+ 'repository was detected, but there was an error executing\n')
sys.exit(1)
def validate_executable(self):
pass
def load_paths(self):
"""Loads the potential remote paths for the given repo type to be used when trying to detect a repository to anchor to"""
pass
def matches_url(self, url):
requested_path = urlparse.urlsplit(url)
logging.debug('Matching remote url: %s', requested_path.__str__())
for path in self.paths:
local_path = urlparse.urlsplit(path)
logging.debug('Matching local url: %s', local_path.__str__())
if requested_path.hostname == local_path.hostname and requested_path.port == local_path.port and requested_path.path == local_path.path:
return True
return False
def find_metadata_dir(self, dirname):
"""Searches for the specified directory name starting with the current directory and going upwards the directory tree"""
cwd = os.getcwd()
if os.path.exists(os.path.join(cwd, dirname)):
return True
tmp = os.path.abspath(os.path.join(cwd, os.pardir))
while tmp is not None and tmp != cwd:
cwd = tmp
if os.path.exists(os.path.join(cwd, dirname)):
return True
tmp = os.path.abspath(os.path.join(cwd, os.pardir))
return False
class SvnSource(PatchSource):
"""Gets a patch from a subversion repository"""
def is_active(self):
return self.find_metadata_dir('.svn')
def get_patch(self):
try:
return check_output([self.configuration.executables[self.executable()], 'diff'])
except CalledProcessError as e:
print('svn diff returned error: %s' % e.error.strip())
return None
def __str__(self):
return 'Subversion'
def executable(self):
return 'svn'
def validate_executable(self):
check_output([self.configuration.executables[self.executable()], 'help'])
def load_paths(self):
info_output = check_output([self.configuration.executables[self.executable()], 'info']).splitlines()
sys.stderr.write('load path')
if info_output:
for line in info_output:
if line.startswith('URL:'):
line_split = line.split()
if (len(line_split)) > 1:
self.paths.append(line_split[1].strip())
break
def matches_repository(self, repository_data):
if not repository_data['type'] == 'svn':
return False
remote_url = (repository_data['url'].rstrip('/') + '/' + repository_data['path']).rstrip('/')
logging.debug('Matching remote url %s', remote_url)
return self.paths[0].startswith(remote_url)
# a function to get the uncompleted reviwers for a single review
def CheckDollarCharacter():
filePath=[]
fileList=[]
for root, dirs, files in os.walk(os.path.dirname(os.path.abspath(__file__))):
for file in files:
filePath=[]
if file.endswith(".c") or file.endswith(".cpp") or file.endswith(".h") or file.endswith(".xml"):
filePath.append(os.path.join(root, file))
for line in open(filePath[0]):
if '$' in line:
fileList.append(str(file))
break
sys.stderr.write("Please remove hard coded dollar character from following files:\n")
for file in fileList:
sys.stderr.write(str(file)+"\n")
def CheckShowProgramModeEnabled():
confPath=[]
for root, dirs, files in os.walk(os.path.dirname(os.path.abspath(__file__))):
for file in files:
if file.endswith("configuration.xml"):
confPath.append(os.path.join(root, file))
e = xml.etree.ElementTree.parse(confPath[0]).getroot()
for child in e:
temp = str(child)
if 'ShowPrograms' in temp:
p = list(child)
for i in p:
temp2 = str(i)
if 'Enabled' in temp2:
print(i.text)
if i.text == 'true':
sys.stderr.write("Please change show program enabled tag value to false in configuration.xml\nExiting...\n")
sys.exit(1)
def main():
"""Runs the script to create a review"""
#To check the show progam mode is enabled in the config file
CheckShowProgramModeEnabled()
#To check if any hard coded '$' is present in the code
#CheckDollarCharacter()
# read commandline
command_line = CommandLine()
args = command_line.parse_args()
# parse/fill config
config_file = ConfigFile()
configuration = Configuration().fill_from_args(args).fill_from_config_file(config_file).fill_from_defaults().fill_interactively()
# set up authentication, check for updates
rest = CrucibleRest(configuration)
rest.check_connection()
config_file.store_configuration(configuration)
if rest.is_script_update_available():
print('An updated version of this script is available. Visit %s to download it' % (configuration.url + '/plugins/servlet/viewReviewCLI'))
# get a patch
patch, source = PatchSource(configuration).load_patch()
if not patch:
Console.error('Failed to get a patch%s. Please make sure to call the script from an SCM directory with local changes, or pipe in some input.'
% (' from %s' % source if source else ''))
sys.stderr.write('Failed to get a patch\n')
sys.exit(1)
# create or update the review
if not configuration.repository and not configuration.no_anchor:
detected_repository = rest.find_repository(source)
if detected_repository:
configuration.repository = detected_repository
print('Detected Crucible repository %s. Use --repository or --noanchor options to override' % configuration.repository)
else:
sys.stderr.write('No matching FishEye repository detected\n')
configuration.choose_anchor(rest.get_repositories())
review = configuration.review_id
if review != "None" and review != None:
isCompleteReview = bool(rest.completedReviewers())
if isCompleteReview == True:
sys.stderr.write("Commiting Changes\n")
configuration.review_id = None
config_file.store_configuration(configuration)
sys.exit(0)
else:
hex_string = binascii.hexlify(patch)
global global_hex_string
if hex_string != global_hex_string:
global_hex_string = hex_string
configuration.store_hex_string(hex_string)
config_file.store_configuration(configuration)
rest.add_to_review(patch, review)
sys.stderr.write("Please get your changes reviewed before commit\n")
sys.exit(1)
configuration.fill_interactively(get_id=True)
project = rest.get_project(configuration.id)
if project:
configuration.fill_interactively(get_title=True, get_reviewers=True)
sys.stderr.write("Creating Review...\n")
rest.create_review(patch, project)
hex_string = binascii.hexlify(patch)
configuration.store_hex_string(hex_string)
global global_review_id
configuration.store_review_id(global_review_id)
config_file.store_configuration(configuration)
sys.stderr.write("Please get your changes reviewed for the review id: " + str(global_review_id)+"\n")
sys.exit(1)
sys.stderr.write('Failed to find a review or project with an id ' + str (configuration.id)+ ', make sure the id is correct.')
sys.exit(1)
if __name__ == '__main__':
main()
|
jalandra/cool_scripts
|
crucible.py
|
Python
|
gpl-3.0
| 47,518
|
[
"VisIt"
] |
f0e7e3d692c4b42eec18542e8ea02f5b9615b174ce9253da588f1324a6173c92
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 - 2019 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <adam.dybbroe@smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Make a CTTH composite."""
import argparse
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
from trollimage.xrimage import XRImage
from mesan_compositer import ctth_height
from satpy.composites import ColormapCompositor
from mesan_compositer import (ProjectException, LoadException)
from mesan_compositer.pps_msg_conversions import ctth_procflags2pps
from nwcsaf_formats.pps_conversions import ctth_convert_flags
from mesan_compositer.composite_tools import METOPS
from mesan_compositer.netcdf_io import ncCTTHComposite
from mesan_compositer import get_config
from mesan_compositer.composite_tools import (get_msglist,
get_ppslist,
get_weight_ctth)
import sys
import os
import tempfile
import shutil
from logging import handlers
import logging
LOG = logging.getLogger(__name__)
#: Default time format
_DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
#: Default log format
_DEFAULT_LOG_FORMAT = '[%(levelname)s: %(asctime)s : %(name)s] %(message)s'
def get_arguments():
"""Get command line arguments.
args.logging_conf_file, args.config_file, obs_time, area_id, wsize
Return
File path of the logging.ini file
File path of the application configuration file
Observation/Analysis time
Area id
Window size
"""
parser = argparse.ArgumentParser()
parser.add_argument('--datetime', '-d', help='Date and time of observation - yyyymmddhh',
required=True)
parser.add_argument('--time_window', '-t', help='Number of minutes before and after time window',
required=True)
parser.add_argument('--area_id', '-a', help='Area id',
required=True)
parser.add_argument('-c', '--config_file',
type=str,
dest='config_file',
required=True,
help="The file containing configuration parameters e.g. mesan_sat_config.yaml")
parser.add_argument("-l", "--logging",
help="The path to the log-configuration file (e.g. './logging.ini')",
dest="logging_conf_file",
type=str,
required=False)
parser.add_argument("-v", "--verbose",
help="print debug messages too",
action="store_true")
args = parser.parse_args()
tanalysis = datetime.strptime(args.datetime, '%Y%m%d%H')
delta_t = timedelta(minutes=int(args.time_window))
area_id = args.area_id
if 'template' in args.config_file:
print("Template file given as master config, aborting!")
sys.exit()
return args.logging_conf_file, args.config_file, tanalysis, area_id, delta_t
def ctth_pps(pps, areaid):
"""Load PPS CTTH and reproject."""
from satpy.scene import Scene
scene = Scene(filenames=[pps.uri, pps.geofilename], reader='nwcsaf-pps_nc')
scene.load(['ctth_alti', 'ctth_pres', 'ctth_tempe', 'ctth_quality',
'ctth_conditions', 'ctth_status_flag'])
retv = scene.resample(areaid, radius_of_influence=8000)
return retv
def ctth_msg(msg, areaid):
"""Load MSG paralax corrected ctth and reproject."""
from satpy.scene import Scene
scene = Scene(filenames=[msg.uri, ], reader='nwcsaf-msg2013-hdf5')
scene.load(['ctth_alti', 'ctth_pres', 'ctth_tempe', 'ctth_quality', 'ctth_effective_cloudiness'])
retv = scene.resample(areaid, radius_of_influence=20000)
return retv
class mesanComposite(object):
"""Master class for the Mesan cloud product composite generators."""
def __init__(self, obstime, tdiff, areaid, **kwargs):
"""Initialize the Mesan Composite instance."""
self.description = "Unknown composite"
self.obstime = obstime
self.timediff = tdiff
self.time_window = (obstime - tdiff, obstime + tdiff)
LOG.debug("Time window: " + str(self.time_window[0]) +
" - " + str(self.time_window[1]))
self.polar_satellites = []
self.msg_satellites = []
self.msg_areaname = 'unknown'
self.longitude = None
self.latitude = None
# A Satpy-scene area object:
self.area = None
self._options = {}
self.pps_scenes = []
self.msg_scenes = []
self.product_names = {'msg': 'unknown', 'pps': 'unknown'}
self.composite = None
def get_catalogue(self, product):
"""Get a list of meta-data for all input scenes to process.
Get the meta data (start-time, satellite, orbit number etc) for all
available satellite scenes (both polar and geostationary) within the
time window specified. For the time being this catalouge generation
will be done by simple file globbing. In a later stage this will be
done by doing a DB search.
*product* can be either 'cloudtype' or 'ctth'
"""
from glob import glob
# Get all polar satellite scenes:
pps_dr_dir = self._options.get('pps_direct_readout_dir', None)
LOG.debug('pps_dr_dir = ' + str(pps_dr_dir))
pps_gds_dir = self._options.get('pps_metop_gds_dir')
prodn = self.product_names['pps']
dr_list = glob(
os.path.join(pps_dr_dir, 'S_NWC_' + str(prodn) + '*.nc'))
ppsdr = get_ppslist(dr_list, self.time_window,
satellites=self.polar_satellites)
ppsgds = []
if pps_gds_dir:
now = datetime.utcnow()
gds_list = glob(os.path.join(pps_gds_dir, '*' + str(prodn) + '*.nc'))
if len(gds_list) > 0:
LOG.info("Number of Metop GDS files in dir: " + str(len(gds_list)))
ppsgds = get_ppslist(gds_list, self.time_window,
satellites=METOPS, variant='global')
tic = datetime.utcnow()
LOG.info("Retrieve the metop-gds list took " +
str((tic - now).seconds) + " sec")
self.pps_scenes = ppsdr + ppsgds
self.pps_scenes.sort()
LOG.info(str(len(self.pps_scenes)) + " Polar scenes located")
for scene in self.pps_scenes:
LOG.debug("Polar scene:\n" + str(scene))
# Get all geostationary satellite scenes:
msg_dir = self._options['msg_dir'] % {"number": "03"}
if product == 'cloudtype':
ext = self._options['msg_cty_file_ext']
# SAFNWC_MSG2_CT___201206252345_EuropeCanary.h5
elif product == 'ctth':
ext = self._options['msg_ctth_file_ext']
# SAFNWC_MSG2_CTTH_201206252345_EuropeCanary.h5
# What about EuropeCanary and possible other areas!? FIXME!
prodn = self.product_names['msg']
msg_list = glob(
os.path.join(msg_dir, '*_' + str(prodn) + '*' + str(ext)))
self.msg_scenes = get_msglist(msg_list, self.time_window,
self.msg_areaname) # satellites=self.msg_satellites)
self.msg_scenes.sort()
LOG.info(str(len(self.msg_scenes)) + " MSG scenes located")
for scene in self.msg_scenes:
LOG.debug("Geo scene:\n" + str(scene))
class ctthComposite(mesanComposite):
"""The CTTH Composite generator class."""
def __init__(self, obstime, tdiff, areaid, config_options, **kwargs):
"""Initialize the CTTH composite instance."""
super(ctthComposite, self).__init__(obstime, tdiff, areaid, **kwargs)
values = {"area": areaid, }
if 'filename' in kwargs:
self.filename = kwargs['filename']
else:
# Generate the filename from the observation time and the
# specifcations in the config file:
bname = obstime.strftime(
config_options['ctth_composite_filename']) % values
path = config_options['composite_output_dir']
self.filename = os.path.join(path, bname)
self.description = "Cloud Top Temperature and Height composite for Mesan"
self._options = config_options
self.pps_scenes = []
self.msg_scenes = []
self.polar_satellites = config_options['polar_satellites'].split()
self.msg_satellites = config_options['msg_satellites'].split()
self.msg_areaname = config_options['msg_areaname']
self.areaid = areaid
self.product_names = {'msg': 'CTTH', 'pps': 'CTTH'}
self.composite = ncCTTHComposite()
def get_catalogue(self, product='ctth'):
"""Get a list with meta-data for all inout scenes."""
super(ctthComposite, self).get_catalogue(product)
def make_composite(self):
"""Make the CTTH composite."""
# Reference time for time stamp in composite file
# sec1970 = datetime(1970, 1, 1)
import time
comp_temperature = None
comp_height = None
comp_pressure = None
if len(self.msg_scenes + self.pps_scenes) == 0:
LOG.critical(
"Cannot make ctth composite when no Scenes have been found!")
return False
# Loop over all polar scenes:
is_MSG = False
LOG.info(
"CTTH composite - Loop over all polar and geostationary scenes:")
for scene in self.msg_scenes + self.pps_scenes:
LOG.info("Scene: " + str(scene))
if (scene.platform_name.startswith("Meteosat") and
not hasattr(scene, 'orbit')):
is_MSG = True
x_local = ctth_msg(scene, self.areaid)
dummy, lat = x_local['ctth_alti'].area.get_lonlats()
x_temperature = x_local['ctth_tempe'].data.compute()
x_pressure = x_local['ctth_pres'].data.compute()
x_height = x_local['ctth_alti'].data.compute()
# convert msg flags to pps
# fill_value = 0, fill with 65535 (same as pps flag fill value)
# so that bit 0 is set -> unprocessed -> w=0
# The weight for masked data is set further down
x_flag = np.ma.filled(ctth_procflags2pps(x_local['ctth_quality'].data.compute()),
fill_value=65535)
x_id = 1 * np.ones(x_temperature.shape)
else:
is_MSG = False
try:
x_local = ctth_pps(scene, self.areaid)
except (ProjectException, LoadException) as err:
LOG.critical("Couldn't load pps scene: %s\nException was: %s",
(str(scene), str(err)))
continue
# Temperature (K)', u'no_data_value': 255, u'intercept': 100.0,
# u'gain': 1.0
# LOG.debug("scale and offset: %s %s", str(x_local['ctth_tempe'].attrs['scale_factor']),
# str(x_local['ctth_tempe'].attrs['add_offset']))
x_temperature = x_local['ctth_tempe'].data.compute()
x_pressure = x_local['ctth_pres'].data.compute()
x_height = x_local['ctth_alti'].data.compute()
sflags = x_local['ctth_status_flag'].data.compute()
cflags = x_local['ctth_conditions'].data.compute()
# qflags = x_local['CTTH'].ctth_quality.data.filled(0)
qflags = x_local['ctth_quality'].data.compute()
oldflags = ctth_convert_flags(sflags, cflags, qflags)
# fill_value = 65535 i.e bit 0 is set -> unprocessed -> w=0
# x_flag = np.ma.filled(oldflags, fill_value=65535)
x_flag = oldflags
x_id = 0 * np.ones(x_temperature.shape)
lat = 0 * np.ones(x_temperature.shape)
# time identifier is seconds since 1970-01-01 00:00:00
x_time = time.mktime(scene.timeslot.timetuple()) * \
np.ones(x_temperature.shape)
idx_MSG = is_MSG * np.ones(x_temperature.shape, dtype=np.bool)
if comp_temperature is None:
# initialize field with current CTTH
comp_lon, comp_lat = x_local['ctth_alti'].area.get_lonlats()
comp_temperature = x_temperature
comp_pressure = x_pressure
comp_height = x_height
comp_flag = x_flag
comp_time = x_time
comp_id = x_id
comp_w = get_weight_ctth(x_flag, lat,
abs(self.obstime - scene.timeslot),
idx_MSG)
# fix to cope with unprocessed data
# ii = (x_height.mask == True) | (x_height == 0)
ii = np.isnan(x_height)
comp_w[ii] = 0
else:
# compare with quality of current CTTH
x_w = get_weight_ctth(x_flag, lat,
abs(self.obstime - scene.timeslot),
idx_MSG)
# fix to cope with unprocessed data
# ii = (x_height.mask == True) | (x_height == 0)
ii = np.isnan(x_height)
x_w[ii] = 0
# replace info where current CTTH data is best
ii = x_w > comp_w
comp_temperature[ii] = x_temperature[ii]
comp_pressure[ii] = x_pressure[ii]
comp_height[ii] = x_height[ii]
comp_flag[ii] = x_flag[ii]
comp_w[ii] = x_w[ii]
comp_time[ii] = x_time[ii]
comp_id[ii] = x_id[ii]
self.longitude = comp_lon
self.latitude = comp_lat
self.area = x_local['ctth_alti'].area
composite = {"temperature": comp_temperature,
"height": comp_height,
"pressure": comp_pressure,
"flag": comp_flag,
"weight": comp_w,
"time": comp_time,
"id": comp_id.astype(np.uint8)}
self.composite.store(composite, self.area)
return True
def write(self):
"""Write the composite to a netcdf file."""
tmpfname = tempfile.mktemp(suffix=os.path.basename(self.filename),
dir=os.path.dirname(self.filename))
self.composite.write(tmpfname)
now = datetime.utcnow()
fname_with_timestamp = str(
self.filename) + now.strftime('_%Y%m%d%H%M%S.nc')
shutil.copy(tmpfname, fname_with_timestamp)
os.rename(tmpfname, self.filename + '.nc')
return
def make_quicklooks(self):
"""Make quicklook images."""
palette = ctth_height()
filename = self.filename.strip('.nc') + '_height.png'
ctth_data = self.composite.height.data
cloud_free = self.composite.height.data == 0
ctth_data = ctth_data / 500.0 + 1
ctth_data[cloud_free] = 0
ctth_data = ctth_data.astype(np.uint8)
cmap = ColormapCompositor('mesan_cloudheight_composite')
colors, sqpal = cmap.build_colormap(palette, np.uint8, {})
attrs = {'_FillValue': 0}
xdata = xr.DataArray(ctth_data, dims=['y', 'x'], attrs=attrs).astype('uint8')
pimage = XRImage(xdata)
pimage.palettize(colors)
pimage.save(filename)
if __name__ == "__main__":
(logfile, config_filename, time_of_analysis, areaid, delta_time_window) = get_arguments()
if logfile:
logging.config.fileConfig(logfile)
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(fmt=_DEFAULT_LOG_FORMAT,
datefmt=_DEFAULT_TIME_FORMAT)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(handler)
logging.getLogger('').setLevel(logging.DEBUG)
logging.getLogger('satpy').setLevel(logging.INFO)
LOG = logging.getLogger('make_ctth_composite')
log_handlers = logging.getLogger('').handlers
for log_handle in log_handlers:
if type(log_handle) is handlers.SMTPHandler:
LOG.debug("Mail notifications to: %s", str(log_handle.toaddrs))
OPTIONS = get_config(config_filename)
ctth_comp = ctthComposite(time_of_analysis, delta_time_window, areaid, OPTIONS)
ctth_comp.get_catalogue()
ctth_comp.make_composite()
ctth_comp.write()
ctth_comp.make_quicklooks()
|
adybbroe/mesan_compositer
|
mesan_compositer/make_ctth_composite.py
|
Python
|
gpl-3.0
| 17,392
|
[
"NetCDF"
] |
b40724a55e36aaa1c5f92eaa05718c3de5dda2bcfc9d259b35ee4b98b2a22d8b
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
#
# Demonstrate the use of implicit selection loop as well as closest point
# connectivity
#
# create pipeline
#
sphere = vtk.vtkSphereSource()
sphere.SetRadius(1)
sphere.SetPhiResolution(100)
sphere.SetThetaResolution(100)
selectionPoints = vtk.vtkPoints()
selectionPoints.InsertPoint(0, 0.07325, 0.8417, 0.5612)
selectionPoints.InsertPoint(1, 0.07244, 0.6568, 0.7450)
selectionPoints.InsertPoint(2, 0.1727, 0.4597, 0.8850)
selectionPoints.InsertPoint(3, 0.3265, 0.6054, 0.7309)
selectionPoints.InsertPoint(4, 0.5722, 0.5848, 0.5927)
selectionPoints.InsertPoint(5, 0.4305, 0.8138, 0.4189)
loop = vtk.vtkImplicitSelectionLoop()
loop.SetLoop(selectionPoints)
extract = vtk.vtkExtractGeometry()
extract.SetInputConnection(sphere.GetOutputPort())
extract.SetImplicitFunction(loop)
connect = vtk.vtkConnectivityFilter()
connect.SetInputConnection(extract.GetOutputPort())
connect.SetExtractionModeToClosestPointRegion()
connect.SetClosestPoint(selectionPoints.GetPoint(0))
clipMapper = vtk.vtkDataSetMapper()
clipMapper.SetInputConnection(connect.GetOutputPort())
backProp = vtk.vtkProperty()
backProp.SetDiffuseColor(GetRGBColor('tomato'))
clipActor = vtk.vtkActor()
clipActor.SetMapper(clipMapper)
clipActor.GetProperty().SetColor(GetRGBColor('peacock'))
clipActor.SetBackfaceProperty(backProp)
# Create graphics stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(clipActor)
ren1.SetBackground(1, 1, 1)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(30)
ren1.GetActiveCamera().Elevation(30)
ren1.GetActiveCamera().Dolly(1.2)
ren1.ResetCameraClippingRange()
renWin.SetSize(400, 400)
renWin.Render()
# render the image
#
#iren.Start()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Common/DataModel/Testing/Python/SelectionLoop.py
|
Python
|
gpl-3.0
| 2,191
|
[
"VTK"
] |
6e334d589a8739ed0dfbcce03865a715a325f2c7db14c58a8a7abd7464c335f0
|
"""
============================================
Reading BEM surfaces from a forward solution
============================================
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
print(__doc__)
import mne
from mne.datasets import sample
data_path = sample.data_path()
fname = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
surfaces = mne.read_bem_surfaces(fname, add_geom=True)
print("Number of surfaces : %d" % len(surfaces))
###############################################################################
# Show result
head_col = (0.95, 0.83, 0.83) # light pink
skull_col = (0.91, 0.89, 0.67)
brain_col = (0.67, 0.89, 0.91) # light blue
colors = [head_col, skull_col, brain_col]
# 3D source space
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
for c, surf in zip(colors, surfaces):
points = surf['rr']
faces = surf['tris']
mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces,
color=c, opacity=0.3)
|
effigies/mne-python
|
examples/plot_read_bem_surfaces.py
|
Python
|
bsd-3-clause
| 1,129
|
[
"Mayavi"
] |
2268d04bb143fa57178e91f08dc9be1bffa08c8f02175ec45c568cefddfa3963
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 14:23:47 2015
@author: lpinello
"""
import os
import errno
import sys
import subprocess as sb
import glob
import argparse
import unicodedata
import string
import re
import multiprocessing
import logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)-5s @ %(asctime)s:\n\t %(message)s \n',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
error = logging.critical
warn = logging.warning
debug = logging.debug
info = logging.info
_ROOT = os.path.abspath(os.path.dirname(__file__))
####Support functions###
def get_data(path):
return os.path.join(_ROOT, 'data', path)
def check_library(library_name):
try:
return __import__(library_name)
except:
error('You need to install %s module to use CRISPRessoPooled!' % library_name)
sys.exit(1)
#GENOME_LOCAL_FOLDER=get_data('genomes')
def force_symlink(src, dst):
if os.path.exists(dst) and os.path.samefile(src,dst):
return
try:
os.symlink(src, dst)
except OSError as exc:
if exc.errno == errno.EEXIST:
os.remove(dst)
os.symlink(src, dst)
nt_complement=dict({'A':'T','C':'G','G':'C','T':'A','N':'N','_':'_','-':'-'})
def reverse_complement(seq):
return "".join([nt_complement[c] for c in seq.upper()[-1::-1]])
def find_wrong_nt(sequence):
return list(set(sequence.upper()).difference(set(['A','T','C','G','N'])))
def capitalize_sequence(x):
return str(x).upper() if not pd.isnull(x) else x
def check_file(filename):
try:
with open(filename): pass
except IOError:
raise Exception('I cannot open the file: '+filename)
def slugify(value): #adapted from the Django project
value = unicodedata.normalize('NFKD', unicode(value)).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '_', value).strip())
value = unicode(re.sub('[-\s]+', '-', value))
return str(value)
#the dependencies are bowtie2 and samtools
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check_samtools():
cmd_path=which('samtools')
if cmd_path:
return True
else:
sys.stdout.write('\nCRISPRessoPooled requires samtools')
sys.stdout.write('\n\nPlease install it and add to your path following the instruction at: http://www.htslib.org/download/')
return False
def check_bowtie2():
cmd_path1=which('bowtie2')
cmd_path2=which('bowtie2-inspect')
if cmd_path1 and cmd_path2:
return True
else:
sys.stdout.write('\nCRISPRessoPooled requires Bowtie2!')
sys.stdout.write('\n\nPlease install it and add to your path following the instruction at: http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml#obtaining-bowtie-2')
return False
#this is overkilling to run for many sequences,
#but for few is fine and effective.
def get_align_sequence(seq,bowtie2_index):
cmd='''bowtie2 -x %s -c -U %s''' %(bowtie2_index,seq) + ''' |\
grep -v '@' | awk '{OFS="\t"; bpstart=$4; split ($6,a,"[MIDNSHP]"); n=0; bpend=bpstart;\
for (i=1; i in a; i++){\
n+=1+length(a[i]); \
if (substr($6,n,1)=="S"){\
bpstart-=a[i];\
if (bpend==$4)\
bpend=bpstart;\
} else if( (substr($6,n,1)!="I") && (substr($6,n,1)!="H") )\
bpend+=a[i];\
}if ( ($2 % 32)>=16) print $3,bpstart,bpend,"-",$1,$10,$11;else print $3,bpstart,bpend,"+",$1,$10,$11;}' '''
p = sb.Popen(cmd, shell=True,stdout=sb.PIPE)
return p.communicate()[0]
#if a reference index is provided align the reads to it
#extract region
def get_region_from_fa(chr_id,bpstart,bpend,uncompressed_reference):
region='%s:%d-%d' % (chr_id,bpstart,bpend-1)
p = sb.Popen("samtools faidx %s %s | grep -v ^\> | tr -d '\n'" %(uncompressed_reference,region), shell=True,stdout=sb.PIPE)
return p.communicate()[0]
def get_n_reads_fastq(fastq_filename):
p = sb.Popen(('z' if fastq_filename.endswith('.gz') else '' ) +"cat < %s | wc -l" % fastq_filename , shell=True,stdout=sb.PIPE)
return int(float(p.communicate()[0])/4.0)
def get_n_aligned_bam(bam_filename):
p = sb.Popen("samtools view -F 0x904 -c %s" % bam_filename , shell=True,stdout=sb.PIPE)
return int(p.communicate()[0])
#get a clean name that we can use for a filename
validFilenameChars = "+-_.() %s%s" % (string.ascii_letters, string.digits)
def clean_filename(filename):
cleanedFilename = unicodedata.normalize('NFKD', unicode(filename)).encode('ASCII', 'ignore')
return ''.join(c for c in cleanedFilename if c in validFilenameChars)
def get_avg_read_lenght_fastq(fastq_filename):
cmd=('z' if fastq_filename.endswith('.gz') else '' ) +('cat < %s' % fastq_filename)+\
r''' | awk 'BN {n=0;s=0;} NR%4 == 2 {s+=length($0);n++;} END { printf("%d\n",s/n)}' '''
p = sb.Popen(cmd, shell=True,stdout=sb.PIPE)
return int(p.communicate()[0].strip())
def find_overlapping_genes(row,df_genes):
df_genes_overlapping=df_genes.ix[(df_genes.chrom==row.chr_id) &
(df_genes.txStart<=row.bpend) &
(row.bpstart<=df_genes.txEnd)]
genes_overlapping=[]
for idx_g,row_g in df_genes_overlapping.iterrows():
genes_overlapping.append( '%s (%s)' % (row_g.name2,row_g['name']))
row['gene_overlapping']=','.join(genes_overlapping)
return row
pd=check_library('pandas')
np=check_library('numpy')
###EXCEPTIONS############################
class FlashException(Exception):
pass
class TrimmomaticException(Exception):
pass
class Bowtie2Exception(Exception):
pass
class AmpliconsNotUniqueException(Exception):
pass
class AmpliconsNamesNotUniqueException(Exception):
pass
class NoReadsAlignedException(Exception):
pass
class DonorSequenceException(Exception):
pass
class AmpliconEqualDonorException(Exception):
pass
class SgRNASequenceException(Exception):
pass
class NTException(Exception):
pass
class ExonSequenceException(Exception):
pass
def main():
try:
print ' \n~~~CRISPRessoPooled~~~'
print '-Analysis of CRISPR/Cas9 outcomes from POOLED deep sequencing data-'
print r'''
) )
( _______________________ (
__)__ | __ __ __ __ __ | __)__
C\| \ ||__)/ \/ \| |_ | \ | C\| \
\ / || \__/\__/|__|__|__/ | \ /
\___/ |_______________________| \___/
'''
print'\n[Luca Pinello 2015, send bugs, suggestions or *green coffee* to lucapinello AT gmail DOT com]\n\n',
__version__ = re.search(
'^__version__\s*=\s*"(.*)"',
open(os.path.join(_ROOT,'CRISPRessoCORE.py')).read(),
re.M
).group(1)
print 'Version %s\n' % __version__
parser = argparse.ArgumentParser(description='CRISPRessoPooled Parameters',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r1','--fastq_r1', type=str, help='First fastq file', required=True,default='Fastq filename' )
parser.add_argument('-r2','--fastq_r2', type=str, help='Second fastq file for paired end reads',default='')
parser.add_argument('-f','--amplicons_file', type=str, help='Amplicons description file. In particular, this file, is a tab delimited text file with up to 5 columns (2 required):\
\nAMPLICON_NAME: an identifier for the amplicon (must be unique)\nAMPLICON_SEQUENCE: amplicon sequence used in the design of the experiment\n\
\nsgRNA_SEQUENCE (OPTIONAL): sgRNA sequence used for this amplicon without the PAM sequence. If more than one separate them by commas and not spaces. If not available enter NA.\
\nEXPECTED_AMPLICON_AFTER_HDR (OPTIONAL): expected amplicon sequence in case of HDR. If not available enter NA.\
\nCODING_SEQUENCE (OPTIONAL): Subsequence(s) of the amplicon corresponding to coding sequences. If more than one separate them by commas and not spaces. If not available enter NA.', default='')
parser.add_argument('-x','--bowtie2_index', type=str, help='Basename of Bowtie2 index for the reference genome', default='')
#tool specific optional
parser.add_argument('--gene_annotations', type=str, help='Gene Annotation Table from UCSC Genome Browser Tables (http://genome.ucsc.edu/cgi-bin/hgTables?command=start), \
please select as table "knowGene", as output format "all fields from selected table" and as file returned "gzip compressed"', default='')
parser.add_argument('-p','--n_processes',type=int, help='Specify the number of processes to use for the quantification.\
Please use with caution since increasing this parameter will increase significantly the memory required to run CRISPResso.',default=1)
parser.add_argument('--bowtie2_options_string', type=str, help='Override options for the Bowtie2 alignment command',default=' -k 1 --end-to-end -N 0 --np 0 ')
parser.add_argument('--min_reads_to_use_region', type=float, help='Minimum number of reads that align to a region to perform the CRISPResso analysis', default=1000)
#general CRISPResso optional
parser.add_argument('-q','--min_average_read_quality', type=int, help='Minimum average quality score (phred33) to keep a read', default=0)
parser.add_argument('-s','--min_single_bp_quality', type=int, help='Minimum single bp score (phred33) to keep a read', default=0)
parser.add_argument('--min_identity_score', type=float, help='Min identity score for the alignment', default=60.0)
parser.add_argument('-n','--name', help='Output name', default='')
parser.add_argument('-o','--output_folder', help='', default='')
parser.add_argument('--trim_sequences',help='Enable the trimming of Illumina adapters with Trimmomatic',action='store_true')
parser.add_argument('--trimmomatic_options_string', type=str, help='Override options for Trimmomatic',default=' ILLUMINACLIP:%s:0:90:10:0:true MINLEN:40' % get_data('NexteraPE-PE.fa'))
parser.add_argument('--min_paired_end_reads_overlap', type=int, help='Minimum required overlap length between two reads to provide a confident overlap. ', default=4)
parser.add_argument('--max_paired_end_reads_overlap', type=int, help='parameter for the flash merging step, this parameter is the maximum overlap length expected in approximately 90%% of read pairs. Please see the flash manual for more information.', default=100)
parser.add_argument('--hide_mutations_outside_window_NHEJ',help='This parameter allows to visualize only the mutations overlapping the cleavage site and used to classify a read as NHEJ. This parameter has no effect on the quanitification of the NHEJ. It may be helpful to mask a pre-existing and known mutations or sequencing errors outside the window used for quantification of NHEJ events.',action='store_true')
parser.add_argument('-w','--window_around_sgrna', type=int, help='Window(s) in bp around the cleavage position (half on on each side) as determined by the provide guide RNA sequence to quantify the indels. Any indels outside this window are excluded. A value of 0 disables this filter.', default=1)
parser.add_argument('--cleavage_offset', type=int, help="Cleavage offset to use within respect to the 3' end of the provided sgRNA sequence. Remember that the sgRNA sequence must be entered without the PAM. The default is -3 and is suitable for the SpCas9 system. For alternate nucleases, other cleavage offsets may be appropriate, for example, if using Cpf1 this parameter would be set to 1.", default=-3)
parser.add_argument('--exclude_bp_from_left', type=int, help='Exclude bp from the left side of the amplicon sequence for the quantification of the indels', default=15)
parser.add_argument('--exclude_bp_from_right', type=int, help='Exclude bp from the right side of the amplicon sequence for the quantification of the indels', default=15)
parser.add_argument('--hdr_perfect_alignment_threshold', type=float, help='Sequence homology %% for an HDR occurrence', default=98.0)
parser.add_argument('--ignore_substitutions',help='Ignore substitutions events for the quantification and visualization',action='store_true')
parser.add_argument('--ignore_insertions',help='Ignore insertions events for the quantification and visualization',action='store_true')
parser.add_argument('--ignore_deletions',help='Ignore deletions events for the quantification and visualization',action='store_true')
parser.add_argument('--needle_options_string',type=str,help='Override options for the Needle aligner',default=' -gapopen=10 -gapextend=0.5 -awidth3=5000')
parser.add_argument('--keep_intermediate',help='Keep all the intermediate files',action='store_true')
parser.add_argument('--dump',help='Dump numpy arrays and pandas dataframes to file for debugging purposes',action='store_true')
parser.add_argument('--save_also_png',help='Save also .png images additionally to .pdf files',action='store_true')
args = parser.parse_args()
crispresso_options=['window_around_sgrna','cleavage_offset','min_average_read_quality','min_single_bp_quality','min_identity_score',
'min_single_bp_quality','exclude_bp_from_left',
'exclude_bp_from_right',
'hdr_perfect_alignment_threshold','ignore_substitutions','ignore_insertions','ignore_deletions',
'needle_options_string',
'keep_intermediate',
'dump',
'save_also_png','hide_mutations_outside_window_NHEJ','n_processes',]
def propagate_options(cmd,options,args):
for option in options :
if option:
val=eval('args.%s' % option )
if type(val)==str:
cmd+=' --%s "%s"' % (option,str(val)) # this is for options with space like needle...
elif type(val)==bool:
if val:
cmd+=' --%s' % option
else:
cmd+=' --%s %s' % (option,str(val))
return cmd
info('Checking dependencies...')
if check_samtools() and check_bowtie2():
info('\n All the required dependencies are present!')
else:
sys.exit(1)
#check files
check_file(args.fastq_r1)
if args.fastq_r2:
check_file(args.fastq_r2)
if args.bowtie2_index:
check_file(args.bowtie2_index+'.1.bt2')
if args.amplicons_file:
check_file(args.amplicons_file)
if args.gene_annotations:
check_file(args.gene_annotations)
if args.amplicons_file and not args.bowtie2_index:
RUNNING_MODE='ONLY_AMPLICONS'
info('Only the Amplicon description file was provided. The analysis will be perfomed using only the provided amplicons sequences.')
elif args.bowtie2_index and not args.amplicons_file:
RUNNING_MODE='ONLY_GENOME'
info('Only the bowtie2 reference genome index file was provided. The analysis will be perfomed using only genomic regions where enough reads align.')
elif args.bowtie2_index and args.amplicons_file:
RUNNING_MODE='AMPLICONS_AND_GENOME'
info('Amplicon description file and bowtie2 reference genome index files provided. The analysis will be perfomed using the reads that are aligned ony to the amplicons provided and not to other genomic regions.')
else:
error('Please provide the amplicons description file (-f or --amplicons_file option) or the bowtie2 reference genome index file (-x or --bowtie2_index option) or both.')
sys.exit(1)
####TRIMMING AND MERGING
get_name_from_fasta=lambda x: os.path.basename(x).replace('.fastq','').replace('.gz','').replace("/","_")
if not args.name:
if args.fastq_r2!='':
database_id='%s_%s' % (get_name_from_fasta(args.fastq_r1),get_name_from_fasta(args.fastq_r2))
else:
database_id='%s' % get_name_from_fasta(args.fastq_r1)
else:
database_id=args.name
OUTPUT_DIRECTORY='CRISPRessoPooled_on_%s' % database_id
if args.output_folder:
OUTPUT_DIRECTORY=os.path.join(os.path.abspath(args.output_folder),OUTPUT_DIRECTORY)
_jp=lambda filename: os.path.join(OUTPUT_DIRECTORY,filename) #handy function to put a file in the output directory
try:
info('Creating Folder %s' % OUTPUT_DIRECTORY)
os.makedirs(OUTPUT_DIRECTORY)
info('Done!')
except:
warn('Folder %s already exists.' % OUTPUT_DIRECTORY)
log_filename=_jp('CRISPRessoPooled_RUNNING_LOG.txt')
logging.getLogger().addHandler(logging.FileHandler(log_filename))
with open(log_filename,'w+') as outfile:
outfile.write('[Command used]:\nCRISPRessoPooled %s\n\n[Execution log]:\n' % ' '.join(sys.argv))
if args.fastq_r2=='': #single end reads
#check if we need to trim
if not args.trim_sequences:
#create a symbolic link
symlink_filename=_jp(os.path.basename(args.fastq_r1))
force_symlink(os.path.abspath(args.fastq_r1),symlink_filename)
output_forward_filename=symlink_filename
else:
output_forward_filename=_jp('reads.trimmed.fq.gz')
#Trimming with trimmomatic
cmd='java -jar %s SE -phred33 %s %s %s >>%s 2>&1'\
% (get_data('trimmomatic-0.33.jar'),args.fastq_r1,
output_forward_filename,
args.trimmomatic_options_string.replace('NexteraPE-PE.fa','TruSeq3-SE.fa'),
log_filename)
#print cmd
TRIMMOMATIC_STATUS=sb.call(cmd,shell=True)
if TRIMMOMATIC_STATUS:
raise TrimmomaticException('TRIMMOMATIC failed to run, please check the log file.')
processed_output_filename=output_forward_filename
else:#paired end reads case
if not args.trim_sequences:
output_forward_paired_filename=args.fastq_r1
output_reverse_paired_filename=args.fastq_r2
else:
info('Trimming sequences with Trimmomatic...')
output_forward_paired_filename=_jp('output_forward_paired.fq.gz')
output_forward_unpaired_filename=_jp('output_forward_unpaired.fq.gz')
output_reverse_paired_filename=_jp('output_reverse_paired.fq.gz')
output_reverse_unpaired_filename=_jp('output_reverse_unpaired.fq.gz')
#Trimming with trimmomatic
cmd='java -jar %s PE -phred33 %s %s %s %s %s %s %s >>%s 2>&1'\
% (get_data('trimmomatic-0.33.jar'),
args.fastq_r1,args.fastq_r2,output_forward_paired_filename,
output_forward_unpaired_filename,output_reverse_paired_filename,
output_reverse_unpaired_filename,args.trimmomatic_options_string,log_filename)
#print cmd
TRIMMOMATIC_STATUS=sb.call(cmd,shell=True)
if TRIMMOMATIC_STATUS:
raise TrimmomaticException('TRIMMOMATIC failed to run, please check the log file.')
info('Done!')
#Merging with Flash
info('Merging paired sequences with Flash...')
cmd='flash %s %s --allow-outies --min-overlap %d --max-overlap %d -z -d %s >>%s 2>&1' %\
(output_forward_paired_filename,
output_reverse_paired_filename,
args.min_paired_end_reads_overlap,
args.max_paired_end_reads_overlap,
OUTPUT_DIRECTORY,log_filename)
FLASH_STATUS=sb.call(cmd,shell=True)
if FLASH_STATUS:
raise FlashException('Flash failed to run, please check the log file.')
info('Done!')
flash_hist_filename=_jp('out.hist')
flash_histogram_filename=_jp('out.histogram')
flash_not_combined_1_filename=_jp('out.notCombined_1.fastq.gz')
flash_not_combined_2_filename=_jp('out.notCombined_2.fastq.gz')
processed_output_filename=_jp('out.extendedFrags.fastq.gz')
#count reads
N_READS_INPUT=get_n_reads_fastq(args.fastq_r1)
N_READS_AFTER_PREPROCESSING=get_n_reads_fastq(processed_output_filename)
#load gene annotation
if args.gene_annotations:
info('Loading gene coordinates from annotation file: %s...' % args.gene_annotations)
try:
df_genes=pd.read_table(args.gene_annotations,compression='gzip')
df_genes.txEnd=df_genes.txEnd.astype(int)
df_genes.txStart=df_genes.txStart.astype(int)
df_genes.head()
except:
info('Failed to load the gene annotations file.')
if RUNNING_MODE=='ONLY_AMPLICONS' or RUNNING_MODE=='AMPLICONS_AND_GENOME':
#load and validate template file
df_template=pd.read_csv(args.amplicons_file,names=[
'Name','Amplicon_Sequence','sgRNA',
'Expected_HDR','Coding_sequence'],comment='#',sep='\t',dtype={'Name':str})
#remove empty amplicons/lines
df_template.dropna(subset=['Amplicon_Sequence'],inplace=True)
df_template.dropna(subset=['Name'],inplace=True)
df_template.Amplicon_Sequence=df_template.Amplicon_Sequence.apply(capitalize_sequence)
df_template.Expected_HDR=df_template.Expected_HDR.apply(capitalize_sequence)
df_template.sgRNA=df_template.sgRNA.apply(capitalize_sequence)
df_template.Coding_sequence=df_template.Coding_sequence.apply(capitalize_sequence)
if not len(df_template.Amplicon_Sequence.unique())==df_template.shape[0]:
raise Exception('The amplicons should be all distinct!')
if not len(df_template.Name.unique())==df_template.shape[0]:
raise Exception('The amplicon names should be all distinct!')
df_template=df_template.set_index('Name')
df_template.index=df_template.index.to_series().str.replace(' ','_')
for idx,row in df_template.iterrows():
wrong_nt=find_wrong_nt(row.Amplicon_Sequence)
if wrong_nt:
raise NTException('The amplicon sequence %s contains wrong characters:%s' % (idx,' '.join(wrong_nt)))
if not pd.isnull(row.sgRNA):
cut_points=[]
for current_guide_seq in row.sgRNA.strip().upper().split(','):
wrong_nt=find_wrong_nt(current_guide_seq)
if wrong_nt:
raise NTException('The sgRNA sequence %s contains wrong characters:%s' % (current_guide_seq, ' '.join(wrong_nt)))
offset_fw=args.cleavage_offset+len(current_guide_seq)-1
offset_rc=(-args.cleavage_offset)-1
cut_points+=[m.start() + offset_fw for \
m in re.finditer(current_guide_seq, row.Amplicon_Sequence)]+[m.start() + offset_rc for m in re.finditer(reverse_complement(current_guide_seq), row.Amplicon_Sequence)]
if not cut_points:
warn('\nThe guide sequence/s provided: %s is(are) not present in the amplicon sequence:%s! \nNOTE: The guide will be ignored for the analysis. Please check your input!' % (row.sgRNA,row.Amplicon_Sequence))
df_template.ix[idx,'sgRNA']=''
if RUNNING_MODE=='ONLY_AMPLICONS':
#create a fasta file with all the amplicons
amplicon_fa_filename=_jp('AMPLICONS.fa')
fastq_gz_amplicon_filenames=[]
with open(amplicon_fa_filename,'w+') as outfile:
for idx,row in df_template.iterrows():
if row['Amplicon_Sequence']:
outfile.write('>%s\n%s\n' %(clean_filename('AMPL_'+idx),row['Amplicon_Sequence']))
#create place-holder fastq files
fastq_gz_amplicon_filenames.append(_jp('%s.fastq.gz' % clean_filename('AMPL_'+idx)))
open(fastq_gz_amplicon_filenames[-1], 'w+').close()
df_template['Demultiplexed_fastq.gz_filename']=fastq_gz_amplicon_filenames
info('Creating a custom index file with all the amplicons...')
custom_index_filename=_jp('CUSTOM_BOWTIE2_INDEX')
sb.call('bowtie2-build %s %s >>%s 2>&1' %(amplicon_fa_filename,custom_index_filename,log_filename), shell=True)
#align the file to the amplicons (MODE 1)
info('Align reads to the amplicons...')
bam_filename_amplicons= _jp('CRISPResso_AMPLICONS_ALIGNED.bam')
aligner_command= 'bowtie2 -x %s -p %s %s -U %s 2>>%s | samtools view -bS - > %s' %(custom_index_filename,args.n_processes,args.bowtie2_options_string,processed_output_filename,log_filename,bam_filename_amplicons)
sb.call(aligner_command,shell=True)
N_READS_ALIGNED=get_n_aligned_bam(bam_filename_amplicons)
s1=r"samtools view -F 4 %s 2>>%s | grep -v ^'@'" % (bam_filename_amplicons,log_filename)
s2=r'''|awk '{ gzip_filename=sprintf("gzip >> OUTPUTPATH%s.fastq.gz",$3);\
print "@"$1"\n"$10"\n+\n"$11 | gzip_filename;}' '''
cmd=s1+s2.replace('OUTPUTPATH',_jp(''))
sb.call(cmd,shell=True)
info('Demultiplex reads and run CRISPResso on each amplicon...')
n_reads_aligned_amplicons=[]
for idx,row in df_template.iterrows():
info('\n Processing:%s' %idx)
n_reads_aligned_amplicons.append(get_n_reads_fastq(row['Demultiplexed_fastq.gz_filename']))
crispresso_cmd='CRISPResso -r1 %s -a %s -o %s --name %s' % (row['Demultiplexed_fastq.gz_filename'],row['Amplicon_Sequence'],OUTPUT_DIRECTORY,idx)
if n_reads_aligned_amplicons[-1]>args.min_reads_to_use_region:
if row['sgRNA'] and not pd.isnull(row['sgRNA']):
crispresso_cmd+=' -g %s' % row['sgRNA']
if row['Expected_HDR'] and not pd.isnull(row['Expected_HDR']):
crispresso_cmd+=' -e %s' % row['Expected_HDR']
if row['Coding_sequence'] and not pd.isnull(row['Coding_sequence']):
crispresso_cmd+=' -c %s' % row['Coding_sequence']
crispresso_cmd=propagate_options(crispresso_cmd,crispresso_options,args)
info('Running CRISPResso:%s' % crispresso_cmd)
sb.call(crispresso_cmd,shell=True)
else:
warn('Skipping amplicon [%s] since no reads are aligning to it\n'% idx)
df_template['n_reads']=n_reads_aligned_amplicons
df_template['n_reads_aligned_%']=df_template['n_reads']/float(N_READS_ALIGNED)*100
df_template.fillna('NA').to_csv(_jp('REPORT_READS_ALIGNED_TO_AMPLICONS.txt'),sep='\t')
if RUNNING_MODE=='AMPLICONS_AND_GENOME':
print 'Mapping amplicons to the reference genome...'
#find the locations of the amplicons on the genome and their strand and check if there are mutations in the reference genome
additional_columns=[]
for idx,row in df_template.iterrows():
fields_to_append=list(np.take(get_align_sequence(row.Amplicon_Sequence, args.bowtie2_index).split('\t'),[0,1,2,3,5]))
if fields_to_append[0]=='*':
info('The amplicon [%s] is not mappable to the reference genome provided!' % idx )
additional_columns.append([idx,'NOT_ALIGNED',0,-1,'+',''])
else:
additional_columns.append([idx]+fields_to_append)
info('The amplicon [%s] was mapped to: %s ' % (idx,' '.join(fields_to_append[:3]) ))
df_template=df_template.join(pd.DataFrame(additional_columns,columns=['Name','chr_id','bpstart','bpend','strand','Reference_Sequence']).set_index('Name'))
df_template.bpstart=df_template.bpstart.astype(int)
df_template.bpend=df_template.bpend.astype(int)
#Check reference is the same otherwise throw a warning
for idx,row in df_template.iterrows():
if row.Amplicon_Sequence != row.Reference_Sequence and row.Amplicon_Sequence != reverse_complement(row.Reference_Sequence):
warn('The amplicon sequence %s provided:\n%s\n\nis different from the reference sequence(both strands):\n\n%s\n\n%s\n' %(row.name,row.Amplicon_Sequence,row.Amplicon_Sequence,reverse_complement(row.Amplicon_Sequence)))
if RUNNING_MODE=='ONLY_GENOME' or RUNNING_MODE=='AMPLICONS_AND_GENOME':
###HERE we recreate the uncompressed genome file if not available###
#check you have all the files for the genome and create a fa idx for samtools
uncompressed_reference=args.bowtie2_index+'.fa'
#if not os.path.exists(GENOME_LOCAL_FOLDER):
# os.mkdir(GENOME_LOCAL_FOLDER)
if os.path.exists(uncompressed_reference):
info('The uncompressed reference fasta file for %s is already present! Skipping generation.' % args.bowtie2_index)
else:
#uncompressed_reference=os.path.join(GENOME_LOCAL_FOLDER,'UNCOMPRESSED_REFERENCE_FROM_'+args.bowtie2_index.replace('/','_')+'.fa')
info('Extracting uncompressed reference from the provided bowtie2 index since it is not available... Please be patient!')
cmd_to_uncompress='bowtie2-inspect %s > %s 2>>%s' % (args.bowtie2_index,uncompressed_reference,log_filename)
sb.call(cmd_to_uncompress,shell=True)
info('Indexing fasta file with samtools...')
#!samtools faidx {uncompressed_reference}
sb.call('samtools faidx %s 2>>%s ' % (uncompressed_reference,log_filename),shell=True)
#####CORRECT ONE####
#align in unbiased way the reads to the genome
if RUNNING_MODE=='ONLY_GENOME' or RUNNING_MODE=='AMPLICONS_AND_GENOME':
info('Aligning reads to the provided genome index...')
bam_filename_genome = _jp('%s_GENOME_ALIGNED.bam' % database_id)
aligner_command= 'bowtie2 -x %s -p %s %s -U %s 2>>%s| samtools view -bS - > %s' %(args.bowtie2_index,args.n_processes,args.bowtie2_options_string,processed_output_filename,log_filename,bam_filename_genome)
info(aligner_command)
sb.call(aligner_command,shell=True)
N_READS_ALIGNED=get_n_aligned_bam(bam_filename_genome)
#REDISCOVER LOCATIONS and DEMULTIPLEX READS
MAPPED_REGIONS=_jp('MAPPED_REGIONS/')
if not os.path.exists(MAPPED_REGIONS):
os.mkdir(MAPPED_REGIONS)
s1=r'''samtools view -F 0x0004 %s 2>>%s |''' % (bam_filename_genome,log_filename)+\
r'''awk '{OFS="\t"; bpstart=$4; bpend=bpstart; split ($6,a,"[MIDNSHP]"); n=0;\
for (i=1; i in a; i++){\
n+=1+length(a[i]);\
if (substr($6,n,1)=="S"){\
if (bpend==$4)\
bpstart-=a[i];\
else
bpend+=a[i];
}\
else if( (substr($6,n,1)!="I") && (substr($6,n,1)!="H") )\
bpend+=a[i];\
}\
if ( ($2 % 32)>=16)\
print $3,bpstart,bpend,"-",$1,$10,$11;\
else\
print $3,bpstart,bpend,"+",$1,$10,$11;}' | '''
s2=r''' sort -k1,1 -k2,2n | awk \
'BEGIN{chr_id="NA";bpstart=-1;bpend=-1; fastq_filename="NA"}\
{ if ( (chr_id!=$1) || (bpstart!=$2) || (bpend!=$3) )\
{\
if (fastq_filename!="NA") {close(fastq_filename); system("gzip "fastq_filename)}\
chr_id=$1; bpstart=$2; bpend=$3;\
fastq_filename=sprintf("__OUTPUTPATH__REGION_%s_%s_%s.fastq",$1,$2,$3);\
}\
print "@"$5"\n"$6"\n+\n"$7 >> fastq_filename;\
}' '''
cmd=s1+s2.replace('__OUTPUTPATH__',MAPPED_REGIONS)
info('Demultiplexing reads by location...')
sb.call(cmd,shell=True)
#gzip the missing ones
sb.call('gzip %s/*.fastq' % MAPPED_REGIONS,shell=True)
'''
The most common use case, where many different target sites are pooled into a single
high-throughput sequencing library for quantification, is not directly addressed by this implementation.
Potential users of CRISPResso would need to write their own code to generate separate input files for processing.
Importantly, this preprocessing code would need to remove any PCR amplification artifacts
(such as amplification of sequences from a gene and a highly similar pseudogene )
which may confound the interpretation of results.
This can be done by mapping of input sequences to a reference genome and removing
those that do not map to the expected genomic location, but is non-trivial for an end-user to implement.
'''
if RUNNING_MODE=='AMPLICONS_AND_GENOME':
files_to_match=glob.glob(os.path.join(MAPPED_REGIONS,'REGION*'))
n_reads_aligned_genome=[]
fastq_region_filenames=[]
for idx,row in df_template.iterrows():
info('Processing amplicon:%s' % idx )
#check if we have reads
fastq_filename_region=os.path.join(MAPPED_REGIONS,'REGION_%s_%s_%s.fastq.gz' % (row['chr_id'],row['bpstart'],row['bpend']))
if os.path.exists(fastq_filename_region):
N_READS=get_n_reads_fastq(fastq_filename_region)
n_reads_aligned_genome.append(N_READS)
fastq_region_filenames.append(fastq_filename_region)
files_to_match.remove(fastq_filename_region)
if N_READS>=args.min_reads_to_use_region:
info('\nThe amplicon [%s] has enough reads (%d) mapped to it! Running CRISPResso!\n' % (idx,N_READS))
crispresso_cmd='CRISPResso -r1 %s -a %s -o %s --name %s' % (fastq_filename_region,row['Amplicon_Sequence'],OUTPUT_DIRECTORY,idx)
if row['sgRNA'] and not pd.isnull(row['sgRNA']):
crispresso_cmd+=' -g %s' % row['sgRNA']
if row['Expected_HDR'] and not pd.isnull(row['Expected_HDR']):
crispresso_cmd+=' -e %s' % row['Expected_HDR']
if row['Coding_sequence'] and not pd.isnull(row['Coding_sequence']):
crispresso_cmd+=' -c %s' % row['Coding_sequence']
crispresso_cmd=propagate_options(crispresso_cmd,crispresso_options,args)
info('Running CRISPResso:%s' % crispresso_cmd)
sb.call(crispresso_cmd,shell=True)
else:
warn('The amplicon [%s] has not enough reads (%d) mapped to it! Skipping the execution of CRISPResso!' % (idx,N_READS))
else:
fastq_region_filenames.append('')
n_reads_aligned_genome.append(0)
warn("The amplicon %s doesn't have any read mapped to it!\n Please check your amplicon sequence." % idx)
df_template['Amplicon_Specific_fastq.gz_filename']=fastq_region_filenames
df_template['n_reads']=n_reads_aligned_genome
df_template['n_reads_aligned_%']=df_template['n_reads']/float(N_READS_ALIGNED)*100
if args.gene_annotations:
df_template=df_template.apply(lambda row: find_overlapping_genes(row, df_genes),axis=1)
df_template.fillna('NA').to_csv(_jp('REPORT_READS_ALIGNED_TO_GENOME_AND_AMPLICONS.txt'),sep='\t')
#write another file with the not amplicon regions
info('Reporting problematic regions...')
coordinates=[]
for region in files_to_match:
coordinates.append(os.path.basename(region).replace('.fastq.gz','').replace('.fastq','').split('_')[1:4]+[region,get_n_reads_fastq(region)])
df_regions=pd.DataFrame(coordinates,columns=['chr_id','bpstart','bpend','fastq_file','n_reads'])
df_regions=df_regions.convert_objects(convert_numeric=True)
df_regions.dropna(inplace=True) #remove regions in chrUn
df_regions.bpstart=df_regions.bpstart.astype(int)
df_regions.bpend=df_regions.bpend.astype(int)
df_regions['n_reads_aligned_%']=df_regions['n_reads']/float(N_READS_ALIGNED)*100
df_regions['Reference_sequence']=df_regions.apply(lambda row: get_region_from_fa(row.chr_id,row.bpstart,row.bpend,uncompressed_reference),axis=1)
if args.gene_annotations:
info('Checking overlapping genes...')
df_regions=df_regions.apply(lambda row: find_overlapping_genes(row, df_genes),axis=1)
if np.sum(np.array(map(int,pd.__version__.split('.')))*(100,10,1))< 170:
df_regions.sort('n_reads',ascending=False,inplace=True)
else:
df_regions.sort_values(by='n_reads',ascending=False,inplace=True)
df_regions.fillna('NA').to_csv(_jp('REPORTS_READS_ALIGNED_TO_GENOME_NOT_MATCHING_AMPLICONS.txt'),sep='\t',index=None)
if RUNNING_MODE=='ONLY_GENOME' :
#Load regions and build REFERENCE TABLES
info('Parsing the demultiplexed files and extracting locations and reference sequences...')
coordinates=[]
for region in glob.glob(os.path.join(MAPPED_REGIONS,'REGION*.fastq.gz')):
coordinates.append(os.path.basename(region).replace('.fastq.gz','').split('_')[1:4]+[region,get_n_reads_fastq(region)])
print 'C:',coordinates
df_regions=pd.DataFrame(coordinates,columns=['chr_id','bpstart','bpend','fastq_file','n_reads'])
print 'D:', df_regions
df_regions=df_regions.convert_objects(convert_numeric=True)
df_regions.dropna(inplace=True) #remove regions in chrUn
df_regions.bpstart=df_regions.bpstart.astype(int)
df_regions.bpend=df_regions.bpend.astype(int)
print df_regions
df_regions['sequence']=df_regions.apply(lambda row: get_region_from_fa(row.chr_id,row.bpstart,row.bpend,uncompressed_reference),axis=1)
df_regions['n_reads_aligned_%']=df_regions['n_reads']/float(N_READS_ALIGNED)*100
if args.gene_annotations:
info('Checking overlapping genes...')
df_regions=df_regions.apply(lambda row: find_overlapping_genes(row, df_genes),axis=1)
if np.sum(np.array(map(int,pd.__version__.split('.')))*(100,10,1))< 170:
df_regions.sort('n_reads',ascending=False,inplace=True)
else:
df_regions.sort_values(by='n_reads',ascending=False,inplace=True)
df_regions.fillna('NA').to_csv(_jp('REPORT_READS_ALIGNED_TO_GENOME_ONLY.txt'),sep='\t',index=None)
#run CRISPResso
#demultiplex reads in the amplicons and call crispresso!
info('Running CRISPResso on the regions discovered...')
for idx,row in df_regions.iterrows():
if row.n_reads > args.min_reads_to_use_region:
info('\nRunning CRISPResso on: %s-%d-%d...'%(row.chr_id,row.bpstart,row.bpend ))
crispresso_cmd='CRISPResso -r1 %s -a %s -o %s' %(row.fastq_file,row.sequence,OUTPUT_DIRECTORY)
crispresso_cmd=propagate_options(crispresso_cmd,crispresso_options,args)
info('Running CRISPResso:%s' % crispresso_cmd)
sb.call(crispresso_cmd,shell=True)
else:
info('Skipping region: %s-%d-%d , not enough reads (%d)' %(row.chr_id,row.bpstart,row.bpend, row.n_reads))
#write alignment statistics
with open(_jp('MAPPING_STATISTICS.txt'),'w+') as outfile:
outfile.write('READS IN INPUTS:%d\nREADS AFTER PREPROCESSING:%d\nREADS ALIGNED:%d' % (N_READS_INPUT,N_READS_AFTER_PREPROCESSING,N_READS_ALIGNED))
#write a file with basic quantification info for each sample
def check_output_folder(output_folder):
quantification_file=os.path.join(output_folder,'Quantification_of_editing_frequency.txt')
if os.path.exists(quantification_file):
return quantification_file
else:
return None
def parse_quantification(quantification_file):
with open(quantification_file) as infile:
infile.readline()
N_UNMODIFIED=float(re.findall("Unmodified:(\d+)",infile.readline())[0])
N_MODIFIED=float(re.findall("NHEJ:(\d+)",infile.readline())[0])
N_REPAIRED=float(re.findall("HDR:(\d+)", infile.readline())[0])
N_MIXED_HDR_NHEJ=float(re.findall("Mixed HDR-NHEJ:(\d+)", infile.readline())[0])
infile.readline()
N_TOTAL=float(re.findall("Total Aligned:(\d+) reads",infile.readline())[0])
return N_UNMODIFIED,N_MODIFIED,N_REPAIRED,N_MIXED_HDR_NHEJ,N_TOTAL
quantification_summary=[]
if RUNNING_MODE=='ONLY_AMPLICONS' or RUNNING_MODE=='AMPLICONS_AND_GENOME':
df_final_data=df_template
else:
df_final_data=df_regions
for idx,row in df_final_data.iterrows():
if RUNNING_MODE=='ONLY_AMPLICONS' or RUNNING_MODE=='AMPLICONS_AND_GENOME':
folder_name='CRISPResso_on_%s' % slugify(idx)
else:
folder_name='CRISPResso_on_REGION_%s_%d_%d' %(row.chr_id,row.bpstart,row.bpend )
quantification_file=check_output_folder(_jp(folder_name))
if quantification_file:
N_UNMODIFIED,N_MODIFIED,N_REPAIRED,N_MIXED_HDR_NHEJ,N_TOTAL=parse_quantification(quantification_file)
quantification_summary.append([idx,N_UNMODIFIED/N_TOTAL*100,N_MODIFIED/N_TOTAL*100,N_REPAIRED/N_TOTAL*100,N_MIXED_HDR_NHEJ/N_TOTAL*100,N_TOTAL,row.n_reads])
else:
quantification_summary.append([idx,np.nan,np.nan,np.nan,np.nan,np.nan,row.n_reads])
warn('Skipping the folder %s, not enough reads or empty folder.'% folder_name)
df_summary_quantification=pd.DataFrame(quantification_summary,columns=['Name','Unmodified%','NHEJ%','HDR%', 'Mixed_HDR-NHEJ%','Reads_aligned','Reads_total'])
df_summary_quantification.fillna('NA').to_csv(_jp('SAMPLES_QUANTIFICATION_SUMMARY.txt'),sep='\t',index=None)
if RUNNING_MODE != 'ONLY_GENOME':
tot_reads_aligned = df_summary_quantification['Reads_aligned'].fillna(0).sum()
tot_reads = df_summary_quantification['Reads_total'].sum()
if RUNNING_MODE=='AMPLICONS_AND_GENOME':
this_bam_filename = bam_filename_genome
if RUNNING_MODE=='ONLY_AMPLICONS':
this_bam_filename = bam_filename_amplicons
#if less than 1/2 of reads aligned, find most common unaligned reads and advise the user
if tot_reads > 0 and tot_reads_aligned/tot_reads < 0.5:
warn('Less than half (%d/%d) of reads aligned. Finding most frequent unaligned reads.'%(tot_reads_aligned,tot_reads))
###
###this results in the unpretty messages being printed:
### sort: write failed: standard output: Broken pipe
### sort: write error
###
#cmd = "samtools view -f 4 %s | awk '{print $10}' | sort | uniq -c | sort -nr | head -n 10"%this_bam_filename
import signal
def default_sigpipe():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
cmd = "samtools view -f 4 %s | head -n 10000 | awk '{print $10}' | sort | uniq -c | sort -nr | head -n 10 | awk '{print $2}'"%this_bam_filename
# print("command is: "+cmd)
#p = sb.Popen(cmd, shell=True,stdout=sb.PIPE)
p = sb.Popen(cmd, shell=True,stdout=sb.PIPE,preexec_fn=default_sigpipe)
top_unaligned = p.communicate()[0]
top_unaligned_filename=_jp('CRISPRessoPooled_TOP_UNALIGNED.txt')
with open(top_unaligned_filename,'w') as outfile:
outfile.write(top_unaligned)
warn('Perhaps one or more of the given amplicon sequences were incomplete or incorrect. Below is a list of the most frequent unaligned reads (in the first 10000 unaligned reads). Check this list to see if an amplicon is among these reads.\n%s'%top_unaligned)
#cleaning up
if not args.keep_intermediate:
info('Removing Intermediate files...')
if args.fastq_r2!='':
files_to_remove=[processed_output_filename,flash_hist_filename,flash_histogram_filename,\
flash_not_combined_1_filename,flash_not_combined_2_filename]
else:
files_to_remove=[processed_output_filename]
if args.trim_sequences and args.fastq_r2!='':
files_to_remove+=[output_forward_paired_filename,output_reverse_paired_filename,\
output_forward_unpaired_filename,output_reverse_unpaired_filename]
if RUNNING_MODE=='ONLY_GENOME' or RUNNING_MODE=='AMPLICONS_AND_GENOME':
files_to_remove+=[bam_filename_genome]
if RUNNING_MODE=='ONLY_AMPLICONS':
files_to_remove+=[bam_filename_amplicons,amplicon_fa_filename]
for bowtie2_file in glob.glob(_jp('CUSTOM_BOWTIE2_INDEX.*')):
files_to_remove.append(bowtie2_file)
for file_to_remove in files_to_remove:
try:
if os.path.islink(file_to_remove):
#print 'LINK',file_to_remove
os.unlink(file_to_remove)
else:
os.remove(file_to_remove)
except:
warn('Skipping:%s' %file_to_remove)
info('All Done!')
print r'''
) )
( _______________________ (
__)__ | __ __ __ __ __ | __)__
C\| \ ||__)/ \/ \| |_ | \ | C\| \
\ / || \__/\__/|__|__|__/ | \ /
\___/ |_______________________| \___/
'''
sys.exit(0)
except Exception as e:
error('\n\nERROR: %s' % e)
sys.exit(-1)
if __name__ == '__main__':
main()
|
lucapinello/CRISPResso
|
CRISPResso/CRISPRessoPooledCORE.py
|
Python
|
agpl-3.0
| 50,699
|
[
"Bowtie"
] |
35f8fb15dca2d7a34dd2b7c730feb607a9112ae057f54c55d7268f8a58696f8e
|
"""Translate a Python AST to JavaScript
"""
import ast
import contextlib
import functools
import inspect
import textwrap
from collections import ChainMap
import ibis.expr.datatypes as dt
from .find import find_names
from .rewrite import rewrite
class SymbolTable(ChainMap):
"""ChainMap subclass implementing scope for the translator.
Notes
-----
JavaScript requires declarations in strict mode, so to implement this we
shove a "let" at the beginning of every variable name if it doesn't already
exist in the current scope.
"""
def __getitem__(self, key):
if key not in self:
self[key] = key
return "let {}".format(key)
return key
def indent(lines, spaces=4):
"""Indent `lines` by `spaces` spaces.
Parameters
----------
lines : Union[str, List[str]]
A string or list of strings to indent
spaces : int
The number of spaces to indent `lines`
Returns
-------
indented_lines : str
"""
if isinstance(lines, str):
text = [lines]
text = "\n".join(lines)
return textwrap.indent(text, " " * spaces)
def semicolon(f):
"""Add a semicolon to the result of a visit_* call.
Parameters
----------
f : callable
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs) + ";"
return wrapper
@rewrite.register(ast.Call(func=ast.Name(id="print")))
def rewrite_print(node):
return ast.Call(
func=ast.Attribute(
value=ast.Name(id="console", ctx=ast.Load()), attr="log", ctx=ast.Load(),
),
args=node.args,
keywords=node.keywords,
)
@rewrite.register(ast.Call(func=ast.Name(id="len")))
def rewrite_len(node):
assert len(node.args) == 1
return ast.Attribute(value=node.args[0], attr="length", ctx=ast.Load())
@rewrite.register(ast.Call(func=ast.Attribute(attr="append")))
def rewrite_append(node):
return ast.Call(
func=ast.Attribute(value=node.func.value, attr="push", ctx=ast.Load()),
args=node.args,
keywords=node.keywords,
)
@rewrite.register(
ast.Call(func=ast.Attribute(value=ast.Name(id="Array"), attr="from_"))
)
def rewrite_array_from(node):
return ast.Call(
func=ast.Attribute(value=node.func.value, attr="from"),
args=node.args,
keywords=node.keywords,
)
class PythonToJavaScriptTranslator:
constructor_map = {
"list": "Array",
"Array": "Array",
"Date": "Date",
"dict": "Object",
"Map": "Map",
"WeakMap": "WeakMap",
"str": "String",
"String": "String",
"set": "Set",
"Set": "Set",
"WeakSet": "WeakSet",
}
def __init__(self, function):
self.function = function
self.source = textwrap.dedent(inspect.getsource(function))
self.ast = ast.parse(self.source)
self.scope = SymbolTable()
self.current_function = None
self.current_class = None
self.is_generator = False
self.is_nested_definition = False
def compile(self):
return self.visit(self.ast)
def visit(self, node):
node = rewrite(node)
typename = node.__class__.__name__
method_name = "visit_{}".format(typename)
method = getattr(self, method_name, None)
if method is None:
raise NotImplementedError(
"{!r} nodes not yet implemented".format(method_name)
)
assert callable(method)
result = method(node)
return result
def visit_Name(self, node):
if self.current_class is not None and node.id == "self":
return "this"
return node.id
def visit_Yield(self, node):
self.is_generator = True
return "yield {}".format(self.visit(node.value))
def visit_YieldFrom(self, node):
self.is_generator = True
return "yield* {}".format(self.visit(node.value))
@semicolon
def visit_Assign(self, node):
try:
(target,) = node.targets
except ValueError:
raise NotImplementedError("Only single assignment supported for now")
if not isinstance(target, (ast.Name, ast.Subscript, ast.Attribute)):
raise NotImplementedError(
"Only index, attribute, and variable name assigment "
"supported, got {}".format(type(target).__name__)
)
is_name = isinstance(target, ast.Name)
compiled_target = self.visit(target)
if not is_name or (
self.current_class is not None and compiled_target.startswith("this.")
):
self.scope[compiled_target] = compiled_target
return "{} = {}".format(self.scope[compiled_target], self.visit(node.value))
def translate_special_method(self, name):
return {"__init__": "constructor"}.get(name, name)
def visit_FunctionDef(self, node):
self.current_function = node
is_property_getter = any(
getattr(dec, "id", None) == "property" for dec in node.decorator_list
)
if self.current_class is None: # not a method
if is_property_getter:
raise TypeError("Functions cannot be properties, only methods can")
prefix = "function"
else:
if is_property_getter and self.is_generator:
raise TypeError("generator methods cannot be properties")
prefix = "get " * is_property_getter
with self.local_scope():
body = indent(map(self.visit, node.body))
if self.is_generator:
prefix += "* "
else:
prefix += " " * (self.current_class is None)
lines = [
prefix
+ self.translate_special_method(node.name)
+ "({}) {{".format(self.visit(node.args)),
body,
"}",
]
self.current_function = None
self.is_generator = False
return "\n".join(lines)
@semicolon
def visit_Return(self, node):
return "return {}".format(self.visit(node.value))
def visit_Add(self, node):
return "+"
def visit_Sub(self, node):
return "-"
def visit_Mult(self, node):
return "*"
def visit_Div(self, node):
return "/"
def visit_FloorDiv(self, node):
assert False, "should never reach FloorDiv"
def visit_Pow(self, node):
assert False, "should never reach Pow"
def visit_UnaryOp(self, node):
return "({}{})".format(self.visit(node.op), self.visit(node.operand))
def visit_USub(self, node):
return "-"
def visit_UAdd(self, node):
return "+"
def visit_BinOp(self, node):
left, op, right = node.left, node.op, node.right
if isinstance(op, ast.Pow):
return "Math.pow({}, {})".format(self.visit(left), self.visit(right))
elif isinstance(op, ast.FloorDiv):
return "Math.floor({} / {})".format(self.visit(left), self.visit(right))
return "({} {} {})".format(self.visit(left), self.visit(op), self.visit(right))
def visit_Constant(self, node):
value = node.value
if value is None:
return "null"
if isinstance(value, bool):
return "true" if value else "false"
if isinstance(value, (int, float, str)):
return repr(value)
raise NotImplementedError(
"{!r} constants not yet implemented".format(value.__class__.__name__)
)
def visit_NameConstant(self, node):
value = node.value
if value is True:
return "true"
elif value is False:
return "false"
assert (
value is None
), "value is not True and is not False, must be None, got {}".format(value)
return "null"
def visit_Str(self, node):
return repr(node.s)
def visit_Num(self, node):
return repr(node.n)
def visit_List(self, node):
return "[{}]".format(", ".join(map(self.visit, node.elts)))
def visit_Tuple(self, node):
# tuples becomes lists in javascript
return "[{}]".format(", ".join(map(self.visit, node.elts)))
def visit_Dict(self, node):
return "{{{}}}".format(
", ".join(
"[{}]: {}".format(self.visit(key), self.visit(value))
for key, value in zip(node.keys, node.values)
)
)
@semicolon
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Starred(self, node):
return "...{}".format(self.visit(node.value))
def visit_Call(self, node):
thing_to_call = self.visit(node.func)
constructors = self.__class__.constructor_map
args = ", ".join(map(self.visit, node.args))
try:
thing_to_call = constructors[thing_to_call]
except KeyError:
format_string = "{}({})"
else:
format_string = "(new {}({}))"
return format_string.format(thing_to_call, args)
def visit_Attribute(self, node):
return "{}.{}".format(self.visit(node.value), node.attr)
def visit_For(self, node):
lines = [
"for (let {} of {}) {{".format(
self.visit(node.target), self.visit(node.iter)
)
]
with self.local_scope():
lines.append(indent(map(self.visit, node.body)))
lines.append("}")
return "\n".join(lines)
def visit_While(self, node):
lines = ["while ({}) {{".format(self.visit(node.test))]
with self.local_scope():
lines.append(indent(map(self.visit, node.body)))
lines.append("}")
return "\n".join(lines)
@semicolon
def visit_Break(self, node):
return "break"
@semicolon
def visit_Continue(self, node):
return "continue"
def visit_Eq(self, node):
return "==="
def visit_NotEq(self, node):
return "!=="
def visit_Or(self, node):
return "||"
def visit_And(self, node):
return "&&"
def visit_BoolOp(self, node):
return "({})".format(
" {} ".format(self.visit(node.op)).join(map(self.visit, node.values))
)
def visit_Lt(self, node):
return "<"
def visit_LtE(self, node):
return "<="
def visit_Gt(self, node):
return ">"
def visit_GtE(self, node):
return ">="
def visit_Compare(self, node):
rights = node.comparators
ops = node.ops
left = node.left
comparisons = []
for op, right in zip(ops, rights):
comparisons.append(
"({} {} {})".format(self.visit(left), self.visit(op), self.visit(right))
)
left = right
return " && ".join(comparisons)
@semicolon
def visit_AugAssign(self, node):
return "{} {}= {}".format(
self.visit(node.target), self.visit(node.op), self.visit(node.value),
)
def visit_Module(self, node):
return "\n\n".join(map(self.visit, node.body))
def visit_arg(self, node):
if self.current_class is not None and node.arg == "self":
return ""
return node.arg
def visit_arguments(self, node):
args = list(filter(None, map(self.visit, node.args[:])))
vararg = node.vararg
if vararg is not None:
args.append("...{}".format(vararg.arg))
return ", ".join(args)
def visit_Lambda(self, node):
args = node.args
generated_args = self.visit(args)
return "(({}) => {})".format(generated_args, self.visit(node.body))
@contextlib.contextmanager
def local_scope(self):
"""Assign symbols to local variables.
"""
self.scope = self.scope.new_child()
try:
yield self.scope
finally:
self.scope = self.scope.parents
def visit_If(self, node):
lines = ["if ({}) {{".format(self.visit(node.test))]
with self.local_scope():
lines.append(indent(map(self.visit, node.body)))
lines.append("}")
if node.orelse:
lines[-1] += " else {"
with self.local_scope():
lines.append(indent(map(self.visit, node.orelse)))
lines.append("}")
return "\n".join(lines)
def visit_IfExp(self, node):
return "({} ? {} : {})".format(
self.visit(node.test), self.visit(node.body), self.visit(node.orelse),
)
def visit_Index(self, node):
return self.visit(node.value)
def visit_Subscript(self, node):
return "{}[{}]".format(self.visit(node.value), self.visit(node.slice))
def visit_ClassDef(self, node):
self.current_class = node
bases = node.bases
lines = ["class {}".format(node.name)]
if bases:
lines[-1] += " extends {}".format(", ".join(map(self.visit, bases)))
lines[-1] += " {"
lines.append(indent(map(self.visit, node.body)))
lines.append("}")
self.current_class = None
self.__class__.constructor_map[node.name] = node.name
return "\n".join(lines)
def visit_Not(self, node):
return "!"
def visit_ListComp(self, node):
"""Generate a curried lambda function
[x + y for x, y in [[1, 4], [2, 5], [3, 6]]]
becomes
[[1, 4], [2, 5], [3, 6]]].map(([x, y]) => x + y)
"""
try:
(generator,) = node.generators
except ValueError:
raise NotImplementedError("Only single loop comprehensions are allowed")
names = find_names(generator.target)
argslist = [ast.arg(arg=name.id, annotation=None) for name in names]
if len(names) <= 1:
signature = ast.arguments(
args=argslist,
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
)
else:
signature = ast.List(elts=argslist, ctx=ast.Load())
array = generator.iter
lam_sig = functools.partial(ast.Lambda, args=signature)
filters = generator.ifs
if filters:
filt = ast.BoolOp(op=ast.And(), values=filters)
# array.filter
method = ast.Attribute(value=array, attr="filter", ctx=ast.Load())
# array.filter(func)
array = ast.Call(func=method, args=[lam_sig(body=filt)], keywords=[])
method = ast.Attribute(value=array, attr="map", ctx=ast.Load())
mapped = ast.Call(func=method, args=[lam_sig(body=node.elt)], keywords=[])
result = self.visit(mapped)
return result
def visit_Delete(self, node):
return "\n".join(
"delete {};".format(self.visit(target)) for target in node.targets
)
if __name__ == "__main__":
from . import udf
@udf(
input_type=[dt.double, dt.double, dt.int64],
output_type=dt.Array(dt.double),
strict=False,
)
def my_func(a, b, n):
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
@property
def area(self):
return self.width * self.height
@property
def perimeter(self):
return self.width * 2 + self.height * 2
def foobar(self, n):
for i in range(n):
yield i
def sum(values):
result = 0
for value in values:
result += value
console.log(result) # noqa: F821
return values.reduce(lambda a, b: a + b, 0)
def range(n):
i = 0
while i < n:
yield i
i += 1
some_stuff = [x + y for x, y in [[1, 4], [2, 5], [3, 6]] if 2 < x < 3]
some_stuff1 = [range(x) for x in [1, 2, 3]]
some_stuff2 = [x + y for x, y in [(1, 4), (2, 5), (3, 6)]]
print(some_stuff)
print(some_stuff1)
print(some_stuff2)
x = 1
y = 2
x = 3
values = []
for i in range(10): # noqa: F821
values.append(i)
i = 0
foo = 2
bar = lambda x: x # noqa: E731
bazel = lambda x: y # noqa: E731
while i < n:
foo = bar(bazel(10))
i += 1
console.log(i) # noqa: F821
foo = 2
if i == 10 and (y < 2 or i != 42):
y += 2
else:
y -= 2
z = 42.0
w = 3
w = not False
yyz = None
print(yyz) # noqa: F821
foobar = x < y < z < w # x < y and y < z
foobar = 1
baz = foobar // 3
console.log(baz) # noqa: F821
my_obj = {"a": 1, "b": 2} # noqa: F841
z = (x if y else b) + 2 + foobar
foo = Rectangle(1, 2)
nnn = len(values)
return [sum(values) - a + b * y ** -x, z, foo.width, nnn]
print(my_func.js)
|
ibis-project/ibis-bigquery
|
ibis_bigquery/udf/core.py
|
Python
|
apache-2.0
| 17,287
|
[
"VisIt"
] |
bb3be62ec5d12cef85f793d2aec14ec809a4dcdf69062dfc1ad0b8692cf69543
|
import copy
import cv2
from matplotlib import pyplot as plt
import numpy as np
import regional
from scipy import interpolate
from skimage import segmentation
from skimage.draw import ellipse
from skimage.feature import peak_local_max
class ImageProcModel():
# Amount to increase around a neuron when extracting a template
TEMPLATE_BUFFER = 5
def __init__(self):
pass
def _prep_images(self, images, visualize=False):
blur = lambda img, sigma: cv2.GaussianBlur(img, (0, 0), sigma, None, sigma, cv2.BORDER_DEFAULT)
global_mean = np.mean(images, axis=0).astype(np.float32)
blurred_global_mean = blur(global_mean, 10)
preped_images = np.subtract(images.astype(np.float32), blurred_global_mean)
if visualize:
plt.figure(figsize=(10,10))
plt.subplot(1, 2, 1)
plt.imshow(global_mean)
plt.axis('off')
plt.title("Global Mean Image")
plt.subplot(1, 2, 2)
plt.imshow(blurred_global_mean)
plt.axis('off')
plt.title("Blurred Global Mean Image")
plt.figure(figsize=(10,10))
plt.subplot(1,2,1)
plt.imshow(images[0])
plt.axis('off')
plt.title("Original Frame 0")
plt.subplot(1,2,2)
plt.imshow(preped_images[0])
plt.axis('off')
plt.title("Background Subtracted Frame 0")
return preped_images
def _extract_template(self, region, images, make_square=True, visualize=False):
"""
Extract a template for this region
"""
image_dims = images[0].shape
binary_mask = np.zeros(image_dims)
binary_mask[zip(*region.coordinates)] = 1
center_y, center_x = region.center
min_y, min_x, max_y, max_x = region.bbox
min_y = max(0, min_y - self.TEMPLATE_BUFFER)
max_y = min(image_dims[0], max_y + self.TEMPLATE_BUFFER)
min_x = max(0, min_x - self.TEMPLATE_BUFFER)
max_x = min(image_dims[1], max_x + self.TEMPLATE_BUFFER)
# it is nice to have square templates.
if make_square:
if center_x - min_x < max_x - center_x:
bbox_radius_w = center_x - min_x
else:
bbox_radius_w = max_x - center_x
if center_y - min_y < max_y - center_y:
bbox_radius_h = center_y - min_y
else:
bbox_radius_h = max_y - center_y
if bbox_radius_w < bbox_radius_h:
bbox_radius = bbox_radius_w
else:
bbox_radius = bbox_radius_h
x1 = center_x - bbox_radius
x2 = center_x + bbox_radius
y1 = center_y - bbox_radius
y2 = center_y + bbox_radius
# Lets try to make the diameter odd while we are at it
y2 = min(image_dims[0], y2 + 1)
x2 = min(image_dims[1], x2 + 1)
else:
x1 = min_x
x2 = max_x
y1 = min_y
y2 = max_y
neuron_templates = images[:, y1:y2, x1:x2]
template = np.mean(neuron_templates, axis=0)
if visualize:
neuron_mask = binary_mask[y1:y2, x1:x2]
activations = []
for template in neuron_templates:
masked_template = neuron_mask * template
activations.append(np.mean(masked_template))
high_activations_indices = np.argsort(activations).tolist()
high_activations_indices.reverse()
plt.figure()
plt.subplot(1,3,1)
plt.imshow(np.mean(neuron_templates, axis=0))
plt.axis('off')
plt.title("Mean Neuron Template")
plt.subplot(1,3,2)
plt.imshow(neuron_mask)
plt.axis('off')
plt.title("Neuron Mask")
plt.subplot(1,3,3)
plt.imshow(np.mean(neuron_templates[high_activations_indices[:20]], axis=0))
plt.axis('off')
plt.title("Mean Neuron Template from top 20 Activations")
plt.figure()
plt.plot(np.arange(len(activations)), activations)
plt.title("Activations per frame")
# show the frames with the "highest activations"
plt.figure(figsize=(15, 15))
for i, frame_index in enumerate(high_activations_indices[:5]):
plt.subplot(1, 5, i+1)
plt.imshow(neuron_templates[frame_index])
plt.axis('off')
plt.title("Frame %d" % (frame_index,))
# What do we want to return as the actual template?
return template
def train(self, images, regions, visualize=False):
# compute a background image and subtract it from the frames.
preped_images = self._prep_images(images, visualize=visualize)
self.regions = regions#copy.deepcopy(regions)
for region in self.regions:
region.template = self._extract_template(region, preped_images, make_square=True, visualize=visualize)
def add_to_training_set(self):
pass
def _compute_correlation(self, images, template):
xcorrs = [cv2.matchTemplate(image, template, cv2.TM_CCORR_NORMED) for image in images]
# max, mean, median?
xcorr = np.max(np.array(xcorrs), axis=0)
# scale the correlation to be between 0 and 1
f = interpolate.interp1d([-1, 1], [0, 1])
xcorr_img = np.zeros(images[0].shape)
y1 = template.shape[0]/2
y2 = y1 + xcorr.shape[0]
x1 = template.shape[1]/2
x2 = x1 + xcorr.shape[1]
xcorr_img[y1:y2,x1:x2] = f(xcorr)
return xcorr_img
def _non_max_suppression(self, xcorr_img, threshold_abs=0.65, min_distance=10):
return peak_local_max(xcorr_img, threshold_abs=threshold_abs, min_distance=min_distance)
def _construct_region_for_location(self, location, images, template, mean_image):
"""
Given a location, build a mask around it. Just do some simple segmenting
to construct the mask.
"""
image_dims = images[0].shape
template_dims = template.shape
template_area = template_dims[0] * template_dims[1]
template_half_height = template_dims[0] / 2
template_half_width = template_dims[1] / 2
y, x = location
min_x = max(0, x - template_half_width)
max_x = min(image_dims[1], x + template_half_width)
min_y = max(0, y - template_half_height)
max_y = min(image_dims[0], y + template_half_height)
neuron = mean_image[min_y:max_y, min_x:max_x]
neuron_area = neuron.shape[0] * neuron.shape[1]
f = interpolate.interp1d([np.min(neuron), np.max(neuron)], [-1, 1])
scaled_neuron = f(neuron)
seg = segmentation.felzenszwalb(scaled_neuron, scale=1000, sigma=1, min_size=int(.4 * template_area))
# Is this mask good to go?
seg_g2g = True
# if the mask is more than 75% percent of the window, its probably bad
if np.sum(seg) >= 0.75 * neuron_area:
seg_g2g = False
# did we even find a region?
if np.count_nonzero(seg) == 0:
seg_g2g = False
# did we find too many regions?
if np.unique(seg).shape[0] != 2: # we only want 0s and 1s
seg_g2g = False
# we didn't find a good segmentation
if not seg_g2g:
# Just make an ellipse?
# Rather than an ellipse, we should just use the template
cx = neuron.shape[1] / 2
cy = neuron.shape[0] / 2
rx = cx * 3 / 4
ry = cy * 3 / 4
seg = np.zeros(neuron.shape)
rr, cc = ellipse(cy, cx, ry, rx)
seg[rr, cc] = 1
mask = np.zeros(image_dims)
mask[min_y:max_y, min_x:max_x] = seg
coordinates = np.transpose(np.nonzero(mask)).tolist()
return regional.one(coordinates)
def test(self, images):
"""
Return neurons
Are the images already processed?
"""
preped_images = self._prep_images(images, visualize=False)
mean_image = np.mean(preped_images, axis=0)
new_regions = []
for region in self.regions:
template = region.template
xcorr_img = self._compute_correlation(preped_images, template)
peaks = self._non_max_suppression(xcorr_img, threshold_abs=0.7, min_distance=template.shape[0] / 2)
for location in peaks:
correlation = xcorr_img[location]
new_region = self._construct_region_for_location(location, preped_images, region.template, mean_image)
new_region.conf = correlation
new_regions.append(new_region)
return new_regions
def update(self):
pass
|
gvanhorn38/active_neurofinder
|
baselearners/imageproc/model.py
|
Python
|
mit
| 8,168
|
[
"NEURON"
] |
a3457af0410d3efacd3218b48fe2ea32324fc6d0986739922a5be2119fe99c00
|
## @namespace pygenn.genn_model
"""
This module provides the GeNNModel class to simplify working with pygenn module and
helper functions to derive custom model classes.
GeNNModel should be used to configure a model, build, load and
finally run it. Recording is done manually by pulling from the population of
interest and then copying the values from Variable.view attribute. Each
simulation step must be triggered manually by calling step_time function.
Example:
The following example shows in a (very) simplified manner how to build and
run a simulation using GeNNModel::
from pygenn.genn_model import GeNNModel
gm = GeNNModel("float", "test")
# add populations
neuron_pop = gm.add_neuron_population(_parameters_truncated_)
syn_pop = gm.add_synapse_population(_parameters_truncated_)
# build and load model
gm.build()
gm.load()
Vs = numpy.empty((simulation_length, population_size))
# Variable.view provides a view into a raw C array
# here a Variable call V (voltage) will be recorded
v_view = neuron_pop.vars["V"].view
# run a simulation for 1000 steps
for i in range 1000:
# manually trigger one simulation step
gm.step_time()
# when you pull state from device, views of all variables
# are updated and show current simulated values
neuron_pop.pull_state_from_device()
# finally, record voltage by copying form view into array.
Vs[i,:] = v_view
"""
# python imports
from collections import namedtuple, OrderedDict
from importlib import import_module
from os import path
from platform import system
from psutil import cpu_count
from subprocess import check_call # to call make
from textwrap import dedent
from warnings import warn
# 3rd party imports
import numpy as np
from six import iteritems, itervalues, string_types
# pygenn imports
from . import genn_wrapper
from .genn_wrapper import SharedLibraryModelNumpy as slm
from .genn_wrapper.Models import (Var, VarRef, VarInit, VarReference,
WUVarReference, VarVector, VarRefVector)
from .genn_wrapper.InitSparseConnectivitySnippet import Init as InitSparse
from .genn_wrapper.InitToeplitzConnectivitySnippet import Init as InitToeplitz
from .genn_wrapper.Snippet import (make_dpf, EGP, ParamVal, DerivedParam,
EGPVector, ParamValVector,
DerivedParamVector)
from .genn_wrapper.InitSparseConnectivitySnippet import make_cmlf, make_cksf
from .genn_wrapper.StlContainers import StringVector
from .genn_wrapper import VarLocation_HOST_DEVICE
from .genn_groups import (NeuronGroup, SynapseGroup,
CurrentSource, CustomUpdate)
from .model_preprocessor import prepare_snippet
# Loop through backends in preferential order
backend_modules = OrderedDict()
for b in ["CUDA", "SingleThreadedCPU", "OpenCL"]:
# Try and import
try:
m = import_module(".genn_wrapper." + b + "Backend", "pygenn")
# Ignore failed imports - likely due to non-supported backends
except ImportError as ex:
pass
# Raise any other errors
except:
raise
# Otherwise add to (ordered) dictionary
else:
backend_modules[b] = m
GeNNType = namedtuple("GeNNType", ["np_dtype", "assign_ext_ptr_array", "assign_ext_ptr_single"])
class GeNNModel(object):
"""GeNNModel class
This class helps to define, build and run a GeNN model from python
"""
def __init__(self, precision="float", model_name="GeNNModel",
backend=None, time_precision=None,
genn_log_level=genn_wrapper.warning,
code_gen_log_level=genn_wrapper.warning,
backend_log_level=genn_wrapper.warning,
**preference_kwargs):
"""Init GeNNModel
Keyword args:
precision -- string precision as string ("float", "double"
or "long double"). defaults to float.
model_name -- string name of the model. Defaults to "GeNNModel".
backend -- string specifying name of backend module to use
Defaults to None to pick 'best' backend for your system
time_precision -- string time precision as string ("float", "double"
or "long double"). defaults to float.
genn_log_level -- Log level for GeNN
code_gen_log_level -- Log level for GeNN code-generator
backend_log_level -- Log level for backend
preference_kwargs -- Additional keyword arguments to set in backend preferences structure
"""
# Based on time precision, create correct type
# of SLM class and determine GeNN time type
# **NOTE** all SLM uses its template parameter for is time variable
self._time_precision = precision if time_precision is None else time_precision
if self._time_precision == "float":
self._slm = slm.SharedLibraryModelNumpy_f()
genn_time_type = "TimePrecision_FLOAT"
elif self._time_precision == "double":
self._slm = slm.SharedLibraryModelNumpy_d()
genn_time_type = "TimePrecision_DOUBLE"
else:
raise ValueError(
"Supported time precisions are float and double, "
"but '{1}' was given".format(self._time_precision))
# Store precision in class and determine GeNN scalar type
self._scalar = precision
if precision == "float":
genn_scalar_type = "GENN_FLOAT"
elif precision == "double":
genn_scalar_type = "GENN_DOUBLE"
else:
raise ValueError(
"Supported precisions are float and double, "
"but '{1}' was given".format(precision))
# Initialise GeNN logging
genn_wrapper.init_logging(genn_log_level, code_gen_log_level)
self._built = False
self._loaded = False
self.use_backend = backend
self._preferences = preference_kwargs
self.backend_log_level=backend_log_level
self._model = genn_wrapper.ModelSpecInternal()
self._model.set_precision(getattr(genn_wrapper, genn_scalar_type))
self._model.set_time_precision(getattr(genn_wrapper, genn_time_type))
self.default_var_location = genn_wrapper.VarLocation_HOST_DEVICE
self.model_name = model_name
self.neuron_populations = {}
self.synapse_populations = {}
self.current_sources = {}
self.custom_updates = {}
self.dT = 0.1
# Build dictionary containing conversions between GeNN C++ types and numpy types
self.genn_types = {
"float": GeNNType(np.float32, self._slm.assign_external_pointer_array_f, self._slm.assign_external_pointer_single_f),
"double": GeNNType(np.float64, self._slm.assign_external_pointer_array_d, self._slm.assign_external_pointer_single_d),
"int": GeNNType(np.int32, self._slm.assign_external_pointer_array_i, self._slm.assign_external_pointer_single_i),
"unsigned int": GeNNType(np.uint32, self._slm.assign_external_pointer_array_ui, self._slm.assign_external_pointer_single_ui),
"short": GeNNType(np.int16, self._slm.assign_external_pointer_array_s, self._slm.assign_external_pointer_single_s),
"unsigned short": GeNNType(np.uint16, self._slm.assign_external_pointer_array_us, self._slm.assign_external_pointer_single_us),
"char": GeNNType(np.int8, self._slm.assign_external_pointer_array_sc, self._slm.assign_external_pointer_single_sc),
"unsigned char": GeNNType(np.uint8, self._slm.assign_external_pointer_array_uc, self._slm.assign_external_pointer_single_uc),
"uint64_t": GeNNType(np.uint64, None, None),
"int64_t": GeNNType(np.int64, None, None),
"uint32_t": GeNNType(np.uint32, self._slm.assign_external_pointer_array_ui, self._slm.assign_external_pointer_single_ui),
"int32_t": GeNNType(np.int32, self._slm.assign_external_pointer_array_i, self._slm.assign_external_pointer_single_i),
"uint16_t": GeNNType(np.uint16, self._slm.assign_external_pointer_array_us, self._slm.assign_external_pointer_single_us),
"int16_t": GeNNType(np.int16, self._slm.assign_external_pointer_array_s, self._slm.assign_external_pointer_single_s),
"uint8_t": GeNNType(np.uint8, self._slm.assign_external_pointer_array_uc, self._slm.assign_external_pointer_single_uc),
"int8_t": GeNNType(np.int8, self._slm.assign_external_pointer_array_sc, self._slm.assign_external_pointer_single_sc)}
# Add "scalar" type to genn_types - pointing at float or double as appropriate
if precision == "float":
self.genn_types["scalar"] = self.genn_types["float"]
else:
self.genn_types["scalar"] = self.genn_types["double"]
# For backward compatibility, if selected GPU is set, remove it from
# preferences dictionary and add in underlying GeNN preferences
selected_gpu = self._preferences.pop("selected_gpu", None)
if selected_gpu is not None:
self._preferences["deviceSelectMethod"] = self._backend_module.DeviceSelect_MANUAL
self._preferences["preferences.manualDeviceID"] = selected_gpu
@property
def use_backend(self):
return self._backend_name
@use_backend.setter
def use_backend(self, backend):
if self._built:
raise Exception("GeNN model already built")
# If no backend is specified
if backend is None:
# Check we have managed to import any bagenn_wrapperckends
assert len(backend_modules) > 0
# Set name to first (i.e. best) backend and lookup module from dictionary
self._backend_name = next(iter(backend_modules))
self._backend_module = backend_modules[self._backend_name]
else:
self._backend_name = backend
self._backend_module = backend_modules[backend]
@property
def timing_enabled(self):
return self._model.is_timing_enabled()
@timing_enabled.setter
def timing_enabled(self, timing):
if self._built:
raise Exception("GeNN model already built")
self._model.set_timing(timing)
@property
def batch_size(self):
return self._model.get_batch_size()
@batch_size.setter
def batch_size(self, batch_size):
if self._built:
raise Exception("GeNN model already built")
self._model.set_batch_size(batch_size)
@property
def default_var_location(self):
"""Default variable location - defines
where state variables are initialised"""
assert False
# return self._model.get_default
@default_var_location.setter
def default_var_location(self, location):
if self._built:
raise Exception("GeNN model already built")
self._model.set_default_var_location(location)
@property
def default_sparse_connectivity_location(location):
"""Default sparse connectivity mode - where
connectivity is initialised"""
assert False
# return genn_wrapper.GeNNPreferences.cvar.defaultSparseConnectivityMode
@default_sparse_connectivity_location.setter
def default_sparse_connectivity_location(self, location):
if self._built:
raise Exception("GeNN model already built")
self._model.set_default_sparse_connectivity_location(location)
@property
def model_name(self):
"""Name of the model"""
return self._model.get_name()
@model_name.setter
def model_name(self, model_name):
if self._built:
raise Exception("GeNN model already built")
self._model.set_name(model_name)
@property
def t(self):
"""Simulation time in ms"""
return self._slm.get_time()
@t.setter
def t(self, t):
self._slm.set_time(t)
@property
def timestep(self):
"""Simulation time step"""
return self._slm.get_timestep()
@timestep.setter
def timestep(self, timestep):
self._slm.set_timestep(timestep)
@property
def free_device_mem_bytes(self):
return self._slm.get_free_device_mem_bytes();
@property
def dT(self):
"""Step size"""
return self._model.get_dt()
@dT.setter
def dT(self, dt):
if self._built:
raise Exception("GeNN model already built")
self._model.set_dt(dt)
@property
def neuron_update_time(self):
return self._slm.get_neuron_update_time()
@property
def init_time(self):
return self._slm.get_init_time()
@property
def presynaptic_update_time(self):
return self._slm.get_presynaptic_update_time()
@property
def postsynaptic_update_time(self):
return self._slm.get_postsynaptic_update_time()
@property
def synapse_dynamics_time(self):
return self._slm.get_synapse_dynamics_time()
@property
def init_sparse_time(self):
return self._slm.get_init_sparse_time()
def get_custom_update_time(self, name):
return self._slm.get_custom_update_time(name)
def get_custom_update_transpose_time(self, name):
return self._slm.get_custom_update_transpose_time(name)
def add_neuron_population(self, pop_name, num_neurons, neuron,
param_space, var_space):
"""Add a neuron population to the GeNN model
Args:
pop_name -- name of the new population
num_neurons -- number of neurons in the new population
neuron -- type of the NeuronModels class as string or instance of
neuron class derived from
``pygenn.genn_wrapper.NeuronModels.Custom`` (see also
pygenn.genn_model.create_custom_neuron_class)
param_space -- dict with param values for the NeuronModels class
var_space -- dict with initial variable values for the
NeuronModels class
"""
if self._built:
raise Exception("GeNN model already built")
if pop_name in self.neuron_populations:
raise ValueError("Neuron population '{0}'"
"already exists".format(pop_name))
n_group = NeuronGroup(pop_name, self)
n_group.set_neuron(neuron, param_space, var_space)
n_group.add_to(int(num_neurons))
self.neuron_populations[pop_name] = n_group
return n_group
def add_synapse_population(self, pop_name, matrix_type, delay_steps,
source, target, w_update_model, wu_param_space,
wu_var_space, wu_pre_var_space,
wu_post_var_space, postsyn_model,
ps_param_space, ps_var_space,
connectivity_initialiser=None):
"""Add a synapse population to the GeNN model
Args:
pop_name -- name of the new population
matrix_type -- type of the matrix as string
delay_steps -- delay in number of steps
source -- source neuron group (either name or NeuronGroup object)
target -- target neuron group (either name or NeuronGroup object)
w_update_model -- type of the WeightUpdateModels class
as string or instance of weight update
model class derived from
``pygenn.genn_wrapper.WeightUpdateModels.Custom`` (see also
pygenn.genn_model.create_custom_weight_update_class)
wu_param_space -- dict with param values for the
WeightUpdateModels class
wu_var_space -- dict with initial values for
WeightUpdateModels state variables
wu_pre_var_space -- dict with initial values for
WeightUpdateModels presynaptic variables
wu_post_var_space -- dict with initial values for
WeightUpdateModels postsynaptic variables
postsyn_model -- type of the PostsynapticModels class
as string or instance of postsynaptic
model class derived from
``pygenn.genn_wrapper.PostsynapticModels.Custom`` (see also
pygenn.genn_model.create_custom_postsynaptic_class)
ps_param_space -- dict with param values for the
PostsynapticModels class
ps_var_space -- dict with initial variable values for
the PostsynapticModels class
connectivity_initialiser -- InitSparseConnectivitySnippet::Init
for connectivity
"""
if self._built:
raise Exception("GeNN model already built")
if pop_name in self.synapse_populations:
raise ValueError("synapse population '{0}' "
"already exists".format(pop_name))
# Validate source and target groups
source = self._validate_neuron_group(source, "source")
target = self._validate_neuron_group(target, "target")
s_group = SynapseGroup(pop_name, self)
s_group.matrix_type = matrix_type
s_group.set_connected_populations(source, target)
s_group.set_weight_update(w_update_model, wu_param_space, wu_var_space,
wu_pre_var_space, wu_post_var_space)
s_group.set_post_syn(postsyn_model, ps_param_space, ps_var_space)
s_group.connectivity_initialiser = connectivity_initialiser
s_group.add_to(delay_steps)
self.synapse_populations[pop_name] = s_group
return s_group
def add_slave_synapse_population(self, pop_name, master_pop, delay_steps,
source, target, postsyn_model,
ps_param_space, ps_var_space):
"""Add a 'slave' population to the GeNN model which shares
weights and connectivity with a 'master' population
Args:
pop_name -- name of the new population
master_pop -- master synapse group to share weights with
(either name or SynapseGroup object)
delay_steps -- delay in number of steps
source -- source neuron group (either name or NeuronGroup object)
target -- target neuron group (either name or NeuronGroup object)
postsyn_model -- type of the PostsynapticModels class
as string or instance of postsynaptic
model class derived from
``pygenn.genn_wrapper.PostsynapticModels.Custom`` (see also
pygenn.genn_model.create_custom_postsynaptic_class)
ps_param_space -- dict with param values for the
PostsynapticModels class
ps_var_space -- dict with initial variable values for
the PostsynapticModels class
"""
if self._built:
raise Exception("GeNN model already built")
if pop_name in self.synapse_populations:
raise ValueError("synapse population '{0}' "
"already exists".format(pop_name))
# Validate source and target groups
source = self._validate_neuron_group(source, "source")
target = self._validate_neuron_group(target, "target")
master_pop = self._validate_synapse_group(master_pop, "master_pop")
s_group = SynapseGroup(pop_name, self, master_pop)
s_group.set_connected_populations(source, target)
s_group.set_post_syn(postsyn_model, ps_param_space, ps_var_space)
s_group.add_to(delay_steps)
self.synapse_populations[pop_name] = s_group
return s_group
def add_current_source(self, cs_name, current_source_model, pop,
param_space, var_space):
"""Add a current source to the GeNN model
Args:
cs_name -- name of the new current source
current_source_model -- type of the CurrentSourceModels class as
string or instance of CurrentSourceModels
class derived from
``pygenn.genn_wrapper.CurrentSourceModels.Custom`` (see also
pygenn.genn_model.create_custom_current_source_class)
pop -- population into which the current source
should be injected (either name or NeuronGroup object)
param_space -- dict with param values for the
CurrentSourceModels class
var_space -- dict with initial variable values for the
CurrentSourceModels class
"""
if self._built:
raise Exception("GeNN model already built")
if cs_name in self.current_sources:
raise ValueError("current source '{0}' "
"already exists".format(cs_name))
# Validate population
pop = self._validate_neuron_group(pop, "pop")
c_source = CurrentSource(cs_name, self)
c_source.set_current_source_model(current_source_model,
param_space, var_space)
c_source.add_to(pop)
self.current_sources[cs_name] = c_source
return c_source
def add_custom_update(self, cu_name, group_name, custom_update_model,
param_space, var_space, var_ref_space):
"""Add a current source to the GeNN model
Args:
cu_name -- name of the new current source
group_name -- name of
custom_update_model -- type of the CustomUpdateModel class as
string or instance of CustomUpdateModel
class derived from
``pygenn.genn_wrapper.CustomUpdateModel.Custom`` (see also
pygenn.genn_model.create_custom_custom_update_class)
param_space -- dict with param values for the
CustomUpdateModel class
var_space -- dict with initial variable values for the
CustomUpdateModel class
var_ref_space -- dict with variable references for the
CustomUpdateModel class
"""
if self._built:
raise Exception("GeNN model already built")
if cu_name in self.current_sources:
raise ValueError("current source '{0}' "
"already exists".format(cu_name))
c_update = CustomUpdate(cu_name, self)
c_update.set_custom_update_model(custom_update_model,
param_space, var_space,
var_ref_space)
c_update.add_to(group_name)
self.custom_updates[cu_name] = c_update
return c_update
def build(self, path_to_model="./", force_rebuild=False):
"""Finalize and build a GeNN model
Keyword args:
path_to_model -- path where to place the generated model code.
Defaults to the local directory.
force_rebuild -- should model be rebuilt even if
it doesn't appear to be required
"""
if self._built:
raise Exception("GeNN model already built")
self._path_to_model = path_to_model
# Create output path
output_path = path.join(path_to_model, self.model_name + "_CODE")
share_path = path.join(path.split(__file__)[0], "share")
# Finalize model
self._model.finalize()
# Create suitable preferences object for backend
preferences = self._backend_module.Preferences()
# Set attributes on preferences object
for k, v in iteritems(self._preferences):
if hasattr(preferences, k):
setattr(preferences, k, v)
# When using PyGeNN, always include model name in DLL
preferences.includeModelNameInDLL = True
# Create backend
backend = self._backend_module.create_backend(self._model, output_path,
self.backend_log_level,
preferences)
# Generate code
mem_alloc = genn_wrapper.generate_code(self._model, backend,
share_path, output_path, force_rebuild)
# **YUCK** SWIG doesn't handle return objects returned by value very well so delete manually
backend = None
# Build code
if system() == "Windows":
check_call(["msbuild", "/p:Configuration=Release", "/m", "/verbosity:minimal",
path.join(output_path, "runner.vcxproj")])
else:
check_call(["make", "-j", str(cpu_count(logical=False)), "-C", output_path])
self._built = True
return mem_alloc
def load(self, path_to_model="./", num_recording_timesteps=None):
"""import the model as shared library and initialize it"""
if self._loaded:
raise Exception("GeNN model already loaded")
self._path_to_model = path_to_model
self._slm.open(self._path_to_model, self.model_name, True)
self._slm.allocate_mem()
# If model uses recording system
if self._model.is_recording_in_use():
# Raise exception if recording timesteps is not set
if num_recording_timesteps is None:
raise Exception("Cannot use recording system without passing "
"number of recording timesteps to GeNNModel.load")
# Allocate recording buffers
self._slm.allocate_recording_buffers(num_recording_timesteps)
# Loop through synapse populations and load any
# extra global parameters required for initialization
for pop_data in itervalues(self.neuron_populations):
pop_data.load_init_egps()
# Loop through synapse populations and load any
# extra global parameters required for initialization
for pop_data in itervalues(self.synapse_populations):
pop_data.load_init_egps()
# Loop through current sources
for src_data in itervalues(self.current_sources):
src_data.load_init_egps()
# Loop through custom updates
for cu_data in itervalues(self.custom_updates):
cu_data.load_init_egps()
# Initialize model
self._slm.initialize()
# Loop through neuron populations
for pop_data in itervalues(self.neuron_populations):
pop_data.load(num_recording_timesteps)
# Loop through synapse populations
for pop_data in itervalues(self.synapse_populations):
pop_data.load()
# Loop through current sources
for src_data in itervalues(self.current_sources):
src_data.load()
# Loop through custom updates
for cu_data in itervalues(self.custom_updates):
cu_data.load()
# Now everything is set up call the sparse initialisation function
self._slm.initialize_sparse()
# Set loaded flag and built flag
self._loaded = True
self._built = True
def reinitialise(self):
"""reinitialise model to its original state without re-loading"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before reinitialising")
# Initialise dense and neuron variables
self._slm.initialize()
# Loop through neuron populations
for pop_data in itervalues(self.neuron_populations):
pop_data.reinitialise()
# Loop through synapse populations
for pop_data in itervalues(self.synapse_populations):
pop_data.reinitialise()
# Loop through current sources
for src_data in itervalues(self.current_sources):
src_data.reinitialise()
# Loop through custom updates
for cu_data in itervalues(self.custom_updates):
cu_data.reinitialise()
# Initialise any sparse variables
self._slm.initialize_sparse()
def step_time(self):
"""Make one simulation step"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before stepping")
self._slm.step_time()
def custom_update(self, name):
"""Perform custom update"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before performing custom update")
self._slm.custom_update(name)
def pull_state_from_device(self, pop_name):
"""Pull state from the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_state_from_device(pop_name)
def pull_spikes_from_device(self, pop_name):
"""Pull spikes from the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_spikes_from_device(pop_name)
def pull_spike_events_from_device(self, pop_name):
"""Pull spike events from the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_spike_events_from_device(pop_name)
def pull_current_spikes_from_device(self, pop_name):
"""Pull spikes from the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_current_spikes_from_device(pop_name)
def pull_current_spike_events_from_device(self, pop_name):
"""Pull spike events from the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_current_spike_events_from_device(pop_name)
def pull_connectivity_from_device(self, pop_name):
"""Pull connectivity from the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_connectivity_from_device(pop_name)
def pull_var_from_device(self, pop_name, var_name):
"""Pull variable from the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_var_from_device(pop_name, var_name)
def pull_extra_global_param_from_device(self, pop_name, egp_name, size=None):
"""Pull extra global parameter from the device for a given population"""
if size is None:
warn("The default of size=1 is very counter-intuitive and "
"will be removed in future", DeprecationWarning)
size = 1
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling")
self._slm.pull_extra_global_param(pop_name, egp_name, size)
def push_state_to_device(self, pop_name):
"""Push state to the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_state_to_device(pop_name)
def push_spikes_to_device(self, pop_name):
"""Push spikes to the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_spikes_to_device(pop_name)
def push_spike_events_to_device(self, pop_name):
"""Push spike events to the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_spike_events_to_device(pop_name)
def push_current_spikes_to_device(self, pop_name):
"""Push current spikes to the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_current_spikes_to_device(pop_name)
def push_current_spike_events_to_device(self, pop_name):
"""Push current spike events to the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_current_spike_events_to_device(pop_name)
def push_connectivity_to_device(self, pop_name):
"""Push connectivity to the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_connectivity_to_device(pop_name)
def push_var_to_device(self, pop_name, var_name):
"""Push variable to the device for a given population"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_var_to_device(pop_name, var_name)
def push_extra_global_param_to_device(self, pop_name, egp_name, size=None):
"""Push extra global parameter to the device for a given population"""
if size is None:
warn("The default of size=1 is very counter-intuitive and "
"will be removed in future", DeprecationWarning)
size = 1
if not self._loaded:
raise Exception("GeNN model has to be loaded before pushing")
self._slm.push_extra_global_param(pop_name, egp_name, size)
def pull_recording_buffers_from_device(self):
"""Pull recording buffers from device"""
if not self._loaded:
raise Exception("GeNN model has to be loaded before pulling recording buffers")
if not self._model.is_recording_in_use():
raise Exception("Cannot pull recording buffer if recording system is not in use")
# Pull recording buffers from device
self._slm.pull_recording_buffers_from_device()
def end(self):
"""Free memory"""
for group in [self.neuron_populations, self.synapse_populations,
self.current_sources, custom_updates]:
for g_name, g_dat in iteritems(group):
for egp_name, egp_dat in iteritems(g_dat.extra_global_params):
# if auto allocation is not enabled, let the user care
# about freeing of the EGP
if egp_dat.needsAllocation:
self._slm.free_extra_global_param(g_name, egp_name)
# "normal" variables are freed when SharedLibraryModel is destoyed
def _validate_neuron_group(self, group, context):
# If group is a string
if isinstance(group, string_types):
# If it's the name of a neuron group, return it
if group in self.neuron_populations:
return self.neuron_populations[group]
# Otherwise, raise error
else:
raise ValueError("'%s' neuron group '%s' not found" %
(context, group))
# Otherwise, if group is a neuron group, return it
elif isinstance(group, NeuronGroup):
return group
# Otherwise, raise error
else:
raise ValueError("'%s' must be a NeuronGroup or string" % context)
def _validate_synapse_group(self, group, context):
# If group is a string
if isinstance(group, string_types):
# If it's the name of a neuron group, return it
if group in self.synapse_populations:
return self.synapse_populations[group]
# Otherwise, raise error
else:
raise ValueError("'%s' synapse group '%s' not found" %
(context, group))
# Otherwise, if group is a synapse group, return it
elif isinstance(group, SynapseGroup):
return group
# Otherwise, raise error
else:
raise ValueError("'%s' must be a SynapseGroup or string" % context)
def init_var(init_var_snippet, param_space):
"""This helper function creates a VarInit object
to easily initialise a variable using a snippet.
Args:
init_var_snippet -- type of the InitVarSnippet class as string or
instance of class derived from
InitVarSnippet::Custom class.
param_space -- dict with param values for the InitVarSnippet class
"""
# Prepare snippet
(s_instance, s_type, param_names, params) = \
prepare_snippet(init_var_snippet, param_space,
genn_wrapper.InitVarSnippet)
# **YUCK** VarInit (and GeNN) assume that the snippet will live forever but
# as far as Python is concerned, s_instance is never used again so it will be
# destroyed. Disowning it here hands over it's ownership to C++
# **NOTE** this isn't the case with models as references to neuron and synapse
# models are kept within NeuronGroup and SynapseGroup objects
s_instance.__disown__()
# Use add function to create suitable VarInit
return VarInit(s_instance, params)
def init_connectivity(init_sparse_connect_snippet, param_space):
"""This helper function creates a InitSparseConnectivitySnippet::Init
object to easily initialise connectivity using a snippet.
Args:
init_sparse_connect_snippet -- type of the InitSparseConnectivitySnippet
class as string or instance of class
derived from
InitSparseConnectivitySnippet::Custom.
param_space -- dict with param values for the
InitSparseConnectivitySnippet class
"""
# Prepare snippet
(s_instance, s_type, param_names, params) = \
prepare_snippet(init_sparse_connect_snippet, param_space,
genn_wrapper.InitSparseConnectivitySnippet)
# **YUCK** VarInit (and GeNN) assume that the snippet will live forever but
# as far as Python is concerned, s_instance is never used again so it will be
# destroyed. Disowning it here hands over it's ownership to C++
# **NOTE** this isn't the case with models as references to neuron and synapse
# models are kept within NeuronGroup and SynapseGroup objects
s_instance.__disown__()
# Use add function to create suitable VarInit
return InitSparse(s_instance, params)
def init_toeplitz_connectivity(init_toeplitz_connect_snippet, param_space):
"""This helper function creates a InitToeplitzConnectivitySnippet::Init
object to easily initialise connectivity using a snippet.
Args:
init_toeplitz_connect_snippet -- type of the InitToeplitzConnectivitySnippet
class as string or instance of class
derived from
InitSparseConnectivitySnippet::Custom.
param_space -- dict with param values for the
InitToeplitzConnectivitySnippet class
"""
# Prepare snippet
(s_instance, s_type, param_names, params) = \
prepare_snippet(init_toeplitz_connect_snippet, param_space,
genn_wrapper.InitToeplitzConnectivitySnippet)
# **YUCK** VarInit (and GeNN) assume that the snippet will live forever but
# as far as Python is concerned, s_instance is never used again so it will be
# destroyed. Disowning it here hands over it's ownership to C++
# **NOTE** this isn't the case with models as references to neuron and synapse
# models are kept within NeuronGroup and SynapseGroup objects
s_instance.__disown__()
# Use add function to create suitable VarInit
return InitToeplitz(s_instance, params)
def create_var_ref(pop, var_name):
"""This helper function creates a Models::VarReference
pointing to a neuron or current source variable
for initialising variable references.
Args:
pop -- population, either a NeuronGroup or CurrentSource object
var_name -- name of variable in population to reference
"""
return (genn_wrapper.create_var_ref(pop.pop, var_name), pop)
def create_psm_var_ref(sg, var_name):
"""This helper function creates a Models::VarReference
pointing to a postsynaptic model variable
for initialising variable references.
Args:
sg -- SynapseGroup object
var_name -- name of postsynaptic model variable
in synapse group to reference
"""
return (genn_wrapper.create_psmvar_ref(sg.pop, var_name), sg)
def create_wu_pre_var_ref(sg, var_name):
"""This helper function creates a Models::VarReference
pointing to a presynaptic weight update model variable
for initialising variable references.
Args:
sg -- SynapseGroup object
var_name -- name of presynaptic weight update model
variable in synapse group to reference
"""
return (genn_wrapper.create_wupre_var_ref(sg.pop, var_name), sg)
def create_wu_post_var_ref(sg, var_name):
"""This helper function creates a Models::VarReference
pointing to a postsynaptic weight update model variable
for initialising variable references.
Args:
sg -- SynapseGroup object
var_name -- name of postsynaptic weight update model
variable in synapse group to reference
"""
return (genn_wrapper.create_wupost_var_ref(sg.pop, var_name), sg)
def create_wu_var_ref(g, var_name, tp_sg=None, tp_var_name=None):
"""This helper function creates a Models::WUVarReference
pointing to a weight update model variable for
initialising variable references.
Args:
g -- SynapseGroup or CustomUpdate object
var_name -- name of weight update model variable
in synapse group to reference
tp_sg -- (optional) SynapseGroup object to
copy transpose of variable to
tp_var_name -- (optional) name of weight update
model variable in tranpose synapse group
to copy transpose to
"""
# If we're referencing a WU variable in a custom update,
# Use it's synapse group for the PyGeNN-level backreference
sg = g._synapse_group if isinstance(g, CustomUpdate) else g
if tp_sg is None:
return (genn_wrapper.create_wuvar_ref(g.pop, var_name), sg)
else:
return (genn_wrapper.create_wuvar_ref(g.pop, var_name,
tp_sg.pop, tp_var_name), sg)
def create_custom_neuron_class(class_name, param_names=None,
var_name_types=None, derived_params=None,
sim_code=None, threshold_condition_code=None,
reset_code=None, support_code=None,
extra_global_params=None,
additional_input_vars=None,
is_auto_refractory_required=None,
custom_body=None):
"""This helper function creates a custom NeuronModel class.
See also:
create_custom_postsynaptic_class
create_custom_weight_update_class
create_custom_current_source_class
create_custom_init_var_snippet_class
create_custom_sparse_connect_init_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names
of the model
var_name_types -- list of pairs of strings with varible names
and types of the model
derived_params -- list of pairs, where the first member
is string with name of the derived
parameter and the second should be a
functor returned by create_dpf_class
sim_code -- string with the simulation code
threshold_condition_code -- string with the threshold condition code
reset_code -- string with the reset code
support_code -- string with the support code
extra_global_params -- list of pairs of strings with names and
types of additional parameters
additional_input_vars -- list of tuples with names and types as
strings and initial values of additional
local input variables
is_auto_refractory_required -- does this model require auto-refractory
logic to be generated?
custom_body -- dictionary with additional attributes and
methods of the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError("custom_body must be an isinstance of dict or None")
body = {}
if sim_code is not None:
body["get_sim_code"] = lambda self: dedent(sim_code)
if threshold_condition_code is not None:
body["get_threshold_condition_code"] = \
lambda self: dedent(threshold_condition_code)
if reset_code is not None:
body["get_reset_code"] = lambda self: dedent(reset_code)
if support_code is not None:
body["get_support_code"] = lambda self: dedent(support_code)
if extra_global_params is not None:
body["get_extra_global_params"] = \
lambda self: EGPVector([EGP(egp[0], egp[1])
for egp in extra_global_params])
if additional_input_vars:
body["get_additional_input_vars"] = \
lambda self: ParamValVector([ParamVal(a[0], a[1], a[2])
for a in additional_input_vars])
if is_auto_refractory_required is not None:
body["is_auto_refractory_required"] = \
lambda self: is_auto_refractory_required
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.NeuronModels.Custom, param_names,
var_name_types, derived_params, body)
def create_custom_postsynaptic_class(class_name, param_names=None,
var_name_types=None, derived_params=None,
decay_code=None, apply_input_code=None,
support_code=None, custom_body=None):
"""This helper function creates a custom PostsynapticModel class.
See also:
create_custom_neuron_class
create_custom_weight_update_class
create_custom_current_source_class
create_custom_init_var_snippet_class
create_custom_sparse_connect_init_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names of the model
var_name_types -- list of pairs of strings with varible names and
types of the model
derived_params -- list of pairs, where the first member is string
with name of the derived parameter and the second
should be a functor returned by create_dpf_class
decay_code -- string with the decay code
apply_input_code -- string with the apply input code
support_code -- string with the support code
custom_body -- dictionary with additional attributes and methods
of the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError()
body = {}
if decay_code is not None:
body["get_decay_code"] = lambda self: dedent(decay_code)
if apply_input_code is not None:
body["get_apply_input_code"] = lambda self: dedent(apply_input_code)
if support_code is not None:
body["get_support_code"] = lambda self: dedent(support_code)
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.PostsynapticModels.Custom, param_names,
var_name_types, derived_params, body)
def create_custom_weight_update_class(class_name, param_names=None,
var_name_types=None,
pre_var_name_types=None,
post_var_name_types=None,
derived_params=None, sim_code=None,
event_code=None, learn_post_code=None,
synapse_dynamics_code=None,
event_threshold_condition_code=None,
pre_spike_code=None,
post_spike_code=None,
pre_dynamics_code=None,
post_dynamics_code=None,
sim_support_code=None,
learn_post_support_code=None,
synapse_dynamics_suppport_code=None,
extra_global_params=None,
is_pre_spike_time_required=None,
is_post_spike_time_required=None,
is_pre_spike_event_time_required=None,
is_prev_pre_spike_time_required=None,
is_prev_post_spike_time_required=None,
is_prev_pre_spike_event_time_required=None,
custom_body=None):
"""This helper function creates a custom WeightUpdateModel class.
See also:
create_custom_neuron_class
create_custom_postsynaptic_class
create_custom_current_source_class
create_custom_init_var_snippet_class
create_custom_sparse_connect_init_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names of
the model
var_name_types -- list of pairs of strings with variable
names and types of the model
pre_var_name_types -- list of pairs of strings with
presynaptic variable names and
types of the model
post_var_name_types -- list of pairs of strings with
postsynaptic variable names and
types of the model
derived_params -- list of pairs, where the first member
is string with name of the derived
parameter and the second should be
a functor returned by create_dpf_class
sim_code -- string with the simulation code
event_code -- string with the event code
learn_post_code -- string with the code to include in
learn_synapse_post kernel/function
synapse_dynamics_code -- string with the synapse dynamics code
event_threshold_condition_code -- string with the event threshold
condition code
pre_spike_code -- string with the code run once per
spiking presynaptic neuron
post_spike_code -- string with the code run once per
spiking postsynaptic neuron
pre_dynamics_code -- string with the code run every
timestep on presynaptic neuron
post_dynamics_code -- string with the code run every
timestep on postsynaptic neuron
sim_support_code -- string with simulation support code
learn_post_support_code -- string with support code for
learn_synapse_post kernel/function
synapse_dynamics_suppport_code -- string with synapse dynamics
support code
extra_global_params -- list of pairs of strings with names and
types of additional parameters
is_pre_spike_time_required -- boolean, is presynaptic spike time
required in any weight update kernels?
is_post_spike_time_required -- boolean, is postsynaptic spike time
required in any weight update kernels?
is_pre_spike_event_time_required -- boolean, is presynaptic spike-like-event
time required in any weight update kernels?
is_prev_pre_spike_time_required -- boolean, is previous presynaptic spike time
required in any weight update kernels?
is_prev_post_spike_time_required -- boolean, is previous postsynaptic spike time
required in any weight update kernels?
is_prev_pre_spike_event_time_required -- boolean, is _previous_ presynaptic spike-like-event
time required in any weight update kernels?
custom_body -- dictionary with additional attributes
and methods of the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError("custom_body must be an instance of dict or None")
body = {}
if sim_code is not None:
body["get_sim_code"] = lambda self: dedent(sim_code)
if event_code is not None:
body["get_event_code"] = lambda self: dedent(event_code)
if learn_post_code is not None:
body["get_learn_post_code"] = lambda self: dedent(learn_post_code)
if synapse_dynamics_code is not None:
body["get_synapse_dynamics_code"] = lambda self: dedent(synapse_dynamics_code)
if event_threshold_condition_code is not None:
body["get_event_threshold_condition_code"] = \
lambda self: dedent(event_threshold_condition_code)
if pre_spike_code is not None:
body["get_pre_spike_code"] = lambda self: dedent(pre_spike_code)
if post_spike_code is not None:
body["get_post_spike_code"] = lambda self: dedent(post_spike_code)
if pre_dynamics_code is not None:
body["get_pre_dynamics_code"] = lambda self: dedent(pre_dynamics_code)
if post_dynamics_code is not None:
body["get_post_dynamics_code"] = lambda self: dedent(post_dynamics_code)
if sim_support_code is not None:
body["get_sim_support_code"] = lambda self: dedent(sim_support_code)
if learn_post_support_code is not None:
body["get_learn_post_support_code"] = \
lambda self: dedent(learn_post_support_code)
if synapse_dynamics_suppport_code is not None:
body["get_synapse_dynamics_suppport_code"] = \
lambda self: dedent(synapse_dynamics_suppport_code)
if extra_global_params is not None:
body["get_extra_global_params"] = \
lambda self: EGPVector([EGP(egp[0], egp[1])
for egp in extra_global_params])
if pre_var_name_types is not None:
body["get_pre_vars"] = \
lambda self: VarVector([Var(*vn)
for vn in pre_var_name_types])
if post_var_name_types is not None:
body["get_post_vars"] = \
lambda self: VarVector([Var(*vn)
for vn in post_var_name_types])
if is_pre_spike_time_required is not None:
body["is_pre_spike_time_required"] = \
lambda self: is_pre_spike_time_required
if is_post_spike_time_required is not None:
body["is_post_spike_time_required"] = \
lambda self: is_post_spike_time_required
if is_pre_spike_event_time_required is not None:
body["is_pre_spike_event_time_required"] = \
lambda self: is_pre_spike_event_time_required
if is_prev_pre_spike_time_required is not None:
body["is_prev_pre_spike_time_required"] = \
lambda self: is_prev_pre_spike_time_required
if is_prev_post_spike_time_required is not None:
body["is_prev_post_spike_time_required"] = \
lambda self: is_prev_post_spike_time_required
if is_prev_pre_spike_event_time_required is not None:
body["is_prev_pre_spike_event_time_required"] = \
lambda self: is_prev_pre_spike_event_time_required
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.WeightUpdateModels.Custom, param_names,
var_name_types, derived_params, body)
def create_custom_current_source_class(class_name, param_names=None,
var_name_types=None,
derived_params=None,
injection_code=None,
extra_global_params=None,
custom_body=None):
"""This helper function creates a custom NeuronModel class.
See also:
create_custom_neuron_class
create_custom_weight_update_class
create_custom_current_source_class
create_custom_init_var_snippet_class
create_custom_sparse_connect_init_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names of the model
var_name_types -- list of pairs of strings with varible names and
types of the model
derived_params -- list of pairs, where the first member is string
with name of the derived parameter and the second
should be a functor returned by create_dpf_class
injection_code -- string with the current injection code
extra_global_params -- list of pairs of strings with names and types of
additional parameters
custom_body -- dictionary with additional attributes and methods
of the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError("custom_body must be an instance of dict or None")
body = {}
if injection_code is not None:
body["get_injection_code"] = lambda self: dedent(injection_code)
if extra_global_params is not None:
body["get_extra_global_params"] = \
lambda self: EGPVector([EGP(egp[0], egp[1])
for egp in extra_global_params])
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.CurrentSourceModels.Custom, param_names,
var_name_types, derived_params, body)
def create_custom_custom_update_class(class_name, param_names=None,
var_name_types=None,
derived_params=None,
var_refs=None,
update_code=None,
extra_global_params=None,
custom_body=None):
"""This helper function creates a custom CustomUpdate class.
See also:
create_custom_neuron_class
create_custom_weight_update_class
create_custom_current_source_class
create_custom_init_var_snippet_class
create_custom_sparse_connect_init_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names of the model
var_name_types -- list of tuples of strings with varible names and
types of the variable
derived_params -- list of tuples, where the first member is string
with name of the derived parameter and the second
should be a functor returned by create_dpf_class
var_refs -- list of tuples of strings with varible names and
types of variabled variable
update_code -- string with the current injection code
extra_global_params -- list of pairs of strings with names and types of
additional parameters
custom_body -- dictionary with additional attributes and methods
of the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError("custom_body must be an instance of dict or None")
body = {}
if update_code is not None:
body["get_update_code"] = lambda self: dedent(update_code)
if extra_global_params is not None:
body["get_extra_global_params"] = \
lambda self: EGPVector([EGP(egp[0], egp[1])
for egp in extra_global_params])
if var_refs is not None:
body["get_var_refs"] = \
lambda self: VarRefVector([VarRef(*v)
for v in var_refs])
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.CustomUpdateModels.Custom, param_names,
var_name_types, derived_params, body)
def create_custom_model_class(class_name, base, param_names, var_name_types,
derived_params, custom_body):
"""This helper function completes a custom model class creation.
This part is common for all model classes and is nearly useless on its own
unless you specify custom_body.
See also:
create_custom_neuron_class
create_custom_weight_update_class
create_custom_postsynaptic_class
create_custom_current_source_class
create_custom_init_var_snippet_class
create_custom_sparse_connect_init_snippet_class
Args:
class_name -- name of the new class
base -- base class
param_names -- list of strings with param names of the model
var_name_types -- list of pairs of strings with varible names and
types of the model
derived_params -- list of pairs, where the first member is string with
name of the derived parameter and the second should
be a functor returned by create_dpf_class
custom_body -- dictionary with attributes and methods of the new class
"""
def ctor(self):
base.__init__(self)
body = {
"__init__": ctor,
}
if param_names is not None:
body["get_param_names"] = lambda self: StringVector(param_names)
if var_name_types is not None:
body["get_vars"] = \
lambda self: VarVector([Var(*vn)
for vn in var_name_types])
if derived_params is not None:
body["get_derived_params"] = \
lambda self: DerivedParamVector([DerivedParam(dp[0], make_dpf(dp[1]))
for dp in derived_params])
if custom_body is not None:
body.update(custom_body)
return type(class_name, (base,), body)()
def create_dpf_class(dp_func):
"""Helper function to create derived parameter function class
Args:
dp_func -- a function which computes the derived parameter and takes
two args "pars" (vector of double) and "dt" (double)
"""
dpf = genn_wrapper.Snippet.DerivedParamFunc
def ctor(self):
dpf.__init__(self)
def call(self, pars, dt):
return dp_func(pars, dt)
return type("", (dpf,), {"__init__": ctor, "__call__": call})
def create_cmlf_class(cml_func):
"""Helper function to create function class for calculating sizes of
matrices initialised with sparse connectivity initialisation snippet
Args:
cml_func -- a function which computes the length and takes
three args "num_pre" (unsigned int), "num_post" (unsigned int)
and "pars" (vector of double)
"""
cmlf = genn_wrapper.InitSparseConnectivitySnippet.CalcMaxLengthFunc
def ctor(self):
cmlf.__init__(self)
def call(self, num_pre, num_post, pars):
return cml_func(num_pre, num_post, pars)
return type("", (cmlf,), {"__init__": ctor, "__call__": call})
def create_cksf_class(cks_func):
"""Helper function to create function class for calculating sizes
of kernels from connectivity initialiser parameters
Args:
cks_func -- a function which computes the kernel size and takes
one arg "pars" (vector of double)
"""
cksf = genn_wrapper.InitSparseConnectivitySnippet.CalcKernelSizeFunc
def ctor(self):
cksf.__init__(self)
def call(self, pars):
return cks_func(pars)
return type("", (cksf,), {"__init__": ctor, "__call__": call})
def create_custom_init_var_snippet_class(class_name, param_names=None,
derived_params=None,
var_init_code=None,
extra_global_params=None,
custom_body=None):
"""This helper function creates a custom InitVarSnippet class.
See also:
create_custom_neuron_class
create_custom_weight_update_class
create_custom_postsynaptic_class
create_custom_current_source_class
create_custom_sparse_connect_init_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names of the model
derived_params -- list of pairs, where the first member is string with
name of the derived parameter and the second MUST be
an instance of the pygenn.genn_wrapper.DerivedParamFunc class
var_init_code -- string with the variable initialization code
extra_global_params -- list of pairs of strings with names and
types of additional parameters
custom_body -- dictionary with additional attributes and methods of
the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError("custom_body must be an instance of dict or None")
body = {}
if var_init_code is not None:
body["get_code"] = lambda self: dedent(var_init_code)
if extra_global_params is not None:
body["get_extra_global_params"] = \
lambda self: EGPVector([EGP(egp[0], egp[1])
for egp in extra_global_params])
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.InitVarSnippet.Custom, param_names,
None, derived_params, body)
def create_custom_sparse_connect_init_snippet_class(class_name,
param_names=None,
derived_params=None,
row_build_code=None,
row_build_state_vars=None,
col_build_code=None,
col_build_state_vars=None,
calc_max_row_len_func=None,
calc_max_col_len_func=None,
calc_kernel_size_func=None,
extra_global_params=None,
custom_body=None):
"""This helper function creates a custom
InitSparseConnectivitySnippet class.
See also:
create_custom_neuron_class
create_custom_weight_update_class
create_custom_postsynaptic_class
create_custom_current_source_class
create_custom_init_var_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names of the model
derived_params -- list of pairs, where the first member is string
with name of the derived parameter and the
second MUST be an instance of the class which
inherits from pygenn.genn_wrapper.DerivedParamFunc
row_build_code -- string with row building initialization code
row_build_state_vars -- list of tuples of state variables, their types
and their initial values to use across
row building loop
col_build_code -- string with column building initialization code
col_build_state_vars -- list of tuples of state variables, their types
and their initial values to use across
column building loop
calc_max_row_len_func -- instance of class inheriting from
CalcMaxLengthFunc used to calculate maximum
row length of synaptic matrix
calc_max_col_len_func -- instance of class inheriting from
CalcMaxLengthFunc used to calculate maximum
col length of synaptic matrix
calc_kernel_size_func -- instance of class inheriting from CalcKernelSizeFunc
used to calculate kernel dimensions
extra_global_params -- list of pairs of strings with names and
types of additional parameters
custom_body -- dictionary with additional attributes and
methods of the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError("custom_body must be an instance of dict or None")
body = {}
if row_build_code is not None:
body["get_row_build_code"] = lambda self: dedent(row_build_code)
if row_build_state_vars is not None:
body["get_row_build_state_vars"] = \
lambda self: ParamValVector([ParamVal(r[0], r[1], r[2])
for r in row_build_state_vars])
if col_build_code is not None:
body["get_col_build_code"] = lambda self: dedent(col_build_code)
if col_build_state_vars is not None:
body["get_col_build_state_vars"] = \
lambda self: ParamValVector([ParamVal(r[0], r[1], r[2])
for r in col_build_state_vars])
if calc_max_row_len_func is not None:
body["get_calc_max_row_length_func"] = \
lambda self: make_cmlf(calc_max_row_len_func)
if calc_max_col_len_func is not None:
body["get_calc_max_col_length_func"] = \
lambda self: make_cmlf(calc_max_col_len_func)
if calc_kernel_size_func is not None:
body["get_calc_kernel_size_func"] = \
lambda self: make_cksf(calc_kernel_size_func)
if extra_global_params is not None:
body["get_extra_global_params"] = \
lambda self: EGPVector([EGP(egp[0], egp[1])
for egp in extra_global_params])
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.InitSparseConnectivitySnippet.Custom, param_names,
None, derived_params, body)
def create_custom_toeplitz_connect_init_snippet_class(class_name,
param_names=None,
derived_params=None,
diagonal_build_code=None,
diagonal_build_state_vars=None,
calc_max_row_len_func=None,
calc_kernel_size_func=None,
extra_global_params=None,
custom_body=None):
"""This helper function creates a custom
InitToeplitzConnectivitySnippet class.
See also:
create_custom_neuron_class
create_custom_weight_update_class
create_custom_postsynaptic_class
create_custom_current_source_class
create_custom_init_var_snippet_class
Args:
class_name -- name of the new class
Keyword args:
param_names -- list of strings with param names of the model
derived_params -- list of pairs, where the first member is string
with name of the derived parameter and the
second MUST be an instance of the class which
inherits from pygenn.genn_wrapper.DerivedParamFunc
diagonal_build_code -- string with diagonal building initialization code
diagonal_build_state_vars -- list of tuples of state variables, their types
and their initial values to use across
diagonal building loop
calc_max_row_len_func -- instance of class inheriting from
CalcMaxLengthFunc used to calculate maximum
row length of synaptic matrix
calc_kernel_size_func -- instance of class inheriting from CalcKernelSizeFunc
used to calculate kernel dimensions
extra_global_params -- list of pairs of strings with names and
types of additional parameters
custom_body -- dictionary with additional attributes and
methods of the new class
"""
if not isinstance(custom_body, dict) and custom_body is not None:
raise ValueError("custom_body must be an instance of dict or None")
body = {}
if diagonal_build_code is not None:
body["get_diagonal_build_code"] = lambda self: dedent(diagonal_build_code)
if diagonal_build_state_vars is not None:
body["get_diagonal_build_state_vars"] = \
lambda self: ParamValVector([ParamVal(r[0], r[1], r[2])
for r in diagonal_build_state_vars])
if calc_max_row_len_func is not None:
body["get_calc_max_row_length_func"] = \
lambda self: make_cmlf(calc_max_row_len_func)
if calc_kernel_size_func is not None:
body["get_calc_kernel_size_func"] = \
lambda self: make_cksf(calc_kernel_size_func)
if extra_global_params is not None:
body["get_extra_global_params"] = \
lambda self: EGPVector([EGP(egp[0], egp[1])
for egp in extra_global_params])
if custom_body is not None:
body.update(custom_body)
return create_custom_model_class(
class_name, genn_wrapper.InitToeplitzConnectivitySnippet.Custom, param_names,
None, derived_params, body)
|
genn-team/genn
|
pygenn/genn_model.py
|
Python
|
gpl-2.0
| 78,477
|
[
"NEURON"
] |
81f054a976bd86229712a46e649b84cd53b5b9dbe07c99e7034bd10c5c7ddd76
|
"""
Module responsible for translating variant data into GA4GH native
objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import random
import hashlib
import pysam
import ga4gh.protocol as protocol
import ga4gh.exceptions as exceptions
import ga4gh.datamodel as datamodel
def convertVCFPhaseset(vcfPhaseset):
"""
Parses the VCF phaseset string
"""
if vcfPhaseset is not None and vcfPhaseset != ".":
phaseset = vcfPhaseset
else:
phaseset = "*"
return phaseset
def convertVCFGenotype(vcfGenotype, vcfPhaseset):
"""
Parses the VCF genotype and VCF phaseset strings
"""
phaseset = None
if vcfGenotype is not None:
delim = "/"
if "|" in vcfGenotype:
delim = "|"
phaseset = convertVCFPhaseset(vcfPhaseset)
if "." in vcfGenotype:
genotype = [-1]
else:
genotype = map(int, vcfGenotype.split(delim))
else:
genotype = [-1]
return genotype, phaseset
class CallSet(datamodel.DatamodelObject):
"""
Class representing a CallSet. A CallSet basically represents the
metadata associated with a single VCF sample column.
"""
compoundIdClass = datamodel.CallSetCompoundId
def toProtocolElement(self):
"""
Returns the representation of this CallSet as the corresponding
ProtocolElement.
"""
variantSet = self.getParentContainer()
gaCallSet = protocol.CallSet()
gaCallSet.created = variantSet.getCreationTime()
gaCallSet.updated = variantSet.getUpdatedTime()
gaCallSet.id = self.getId()
gaCallSet.name = self.getLocalId()
gaCallSet.sampleId = self.getLocalId()
gaCallSet.variantSetIds = [variantSet.getId()]
return gaCallSet
def getSampleName(self):
"""
Returns the sample name for this CallSet.
"""
return self.getLocalId()
class AbstractVariantSet(datamodel.DatamodelObject):
"""
An abstract base class of a variant set
"""
compoundIdClass = datamodel.VariantSetCompoundId
def __init__(self, parentContainer, localId):
super(AbstractVariantSet, self).__init__(parentContainer, localId)
self._callSetIdMap = {}
self._callSetNameMap = {}
self._callSetIds = []
self._creationTime = None
self._updatedTime = None
self._referenceSetId = ""
def getCreationTime(self):
"""
Returns the creation time for this variant set.
"""
return self._creationTime
def getUpdatedTime(self):
"""
Returns the time this variant set was last updated.
"""
return self._updatedTime
def addCallSet(self, sampleName):
"""
Adds a CallSet for the specified sample name.
"""
callSet = CallSet(self, sampleName)
callSetId = callSet.getId()
self._callSetIdMap[callSetId] = callSet
self._callSetNameMap[sampleName] = callSet
self._callSetIds.append(callSetId)
def getCallSets(self):
"""
Returns the list of CallSets in this VariantSet.
"""
return [self._callSetIdMap[id_] for id_ in self._callSetIds]
def getNumCallSets(self):
"""
Returns the number of CallSets in this variant set.
"""
return len(self._callSetIds)
def getCallSetByName(self, name):
"""
Returns a CallSet with the specified name, or raises a
CallSetNameNotFoundException if it does not exist.
"""
if name not in self._callSetNameMap:
raise exceptions.CallSetNameNotFoundException(name)
return self._callSetNameMap[name]
def getCallSetByIndex(self, index):
"""
Returns the CallSet at the specfied index in this VariantSet.
"""
return self._callSetIdMap[self._callSetIds[index]]
def getCallSet(self, id_):
"""
Returns a CallSet with the specified id, or raises a
CallSetNotFoundException if it does not exist.
"""
if id_ not in self._callSetIdMap:
raise exceptions.CallSetNotFoundException(id_)
return self._callSetIdMap[id_]
def toProtocolElement(self):
"""
Converts this VariantSet into its GA4GH protocol equivalent.
"""
protocolElement = protocol.VariantSet()
protocolElement.id = self.getId()
protocolElement.datasetId = self.getParentContainer().getId()
protocolElement.referenceSetId = self._referenceSetId
protocolElement.metadata = self.getMetadata()
protocolElement.name = self.getLocalId()
return protocolElement
def getNumVariants(self):
"""
Returns the number of variants contained in this VariantSet.
"""
raise NotImplementedError()
def _createGaVariant(self):
"""
Convenience method to set the common fields in a GA Variant
object from this variant set.
"""
ret = protocol.Variant()
ret.created = self._creationTime
ret.updated = self._updatedTime
ret.variantSetId = self.getId()
return ret
def getVariantId(self, gaVariant):
"""
Returns an ID string suitable for the specified GA Variant
object in this variant set.
"""
md5 = self.hashVariant(gaVariant)
compoundId = datamodel.VariantCompoundId(
self.getCompoundId(), gaVariant.referenceName,
gaVariant.start, md5)
return str(compoundId)
def getCallSetId(self, sampleName):
"""
Returns the callSetId for the specified sampleName in this
VariantSet.
"""
compoundId = datamodel.CallSetCompoundId(
self.getCompoundId(), sampleName)
return str(compoundId)
@classmethod
def hashVariant(cls, gaVariant):
"""
Produces an MD5 hash of the ga variant object to uniquely
identify it
"""
return hashlib.md5(
gaVariant.referenceBases +
str(tuple(gaVariant.alternateBases))).hexdigest()
class SimulatedVariantSet(AbstractVariantSet):
"""
A variant set that doesn't derive from a data store.
Used mostly for testing.
"""
def __init__(
self, parentContainer, localId, randomSeed=1, numCalls=1,
variantDensity=1):
super(SimulatedVariantSet, self).__init__(parentContainer, localId)
self._randomSeed = randomSeed
self._numCalls = numCalls
for j in range(numCalls):
self.addCallSet("simCallSet_{}".format(j))
self._variantDensity = variantDensity
now = protocol.convertDatetime(datetime.datetime.now())
self._creationTime = now
self._updatedTime = now
def getNumVariants(self):
return 0
def getMetadata(self):
ret = []
# TODO Add simulated metadata.
return ret
def getVariant(self, compoundId):
randomNumberGenerator = random.Random()
start = int(compoundId.start)
randomNumberGenerator.seed(self._randomSeed + start)
variant = self.generateVariant(
compoundId.referenceName, start, randomNumberGenerator)
return variant
def getVariants(self, referenceName, startPosition, endPosition,
callSetIds=None):
randomNumberGenerator = random.Random()
randomNumberGenerator.seed(self._randomSeed)
i = startPosition
while i < endPosition:
if randomNumberGenerator.random() < self._variantDensity:
randomNumberGenerator.seed(self._randomSeed + i)
yield self.generateVariant(
referenceName, i, randomNumberGenerator)
i += 1
def generateVariant(self, referenceName, position, randomNumberGenerator):
"""
Generate a random variant for the specified position using the
specified random number generator. This generator should be seeded
with a value that is unique to this position so that the same variant
will always be produced regardless of the order it is generated in.
"""
variant = self._createGaVariant()
variant.names = []
variant.referenceName = referenceName
variant.start = position
variant.end = position + 1 # SNPs only for now
bases = ["A", "C", "G", "T"]
ref = randomNumberGenerator.choice(bases)
variant.referenceBases = ref
alt = randomNumberGenerator.choice(
[base for base in bases if base != ref])
variant.alternateBases = [alt]
variant.calls = []
for callSet in self.getCallSets():
call = protocol.Call()
call.callSetId = callSet.getId()
# for now, the genotype is either [0,1], [1,1] or [1,0] with equal
# probability; probably will want to do something more
# sophisticated later.
randomChoice = randomNumberGenerator.choice(
[[0, 1], [1, 0], [1, 1]])
call.genotype = randomChoice
# TODO What is a reasonable model for generating these likelihoods?
# Are these log-scaled? Spec does not say.
call.genotypeLikelihood = [-100, -100, -100]
variant.calls.append(call)
variant.id = self.getVariantId(variant)
return variant
def _encodeValue(value):
if isinstance(value, (list, tuple)):
return [str(v) for v in value]
else:
return [str(value)]
_nothing = object()
def isEmptyIter(it):
"""Return True iff the iterator is empty or exhausted"""
return next(it, _nothing) is _nothing
class HtslibVariantSet(datamodel.PysamDatamodelMixin, AbstractVariantSet):
"""
Class representing a single variant set backed by a directory of indexed
VCF or BCF files.
"""
def __init__(self, parentContainer, localId, dataDir, backend):
super(HtslibVariantSet, self).__init__(parentContainer, localId)
self._dataDir = dataDir
self._setAccessTimes(dataDir)
self._chromFileMap = {}
self._metadata = None
self._scanDataFiles(dataDir, ['*.bcf', '*.vcf.gz'])
def _updateMetadata(self, variantFile):
"""
Updates the metadata for his variant set based on the specified
variant file, and ensures that it is consistent with already
existing metadata.
"""
metadata = self._getMetadataFromVcf(variantFile)
if self._metadata is None:
self._metadata = metadata
else:
if self._metadata != metadata:
raise exceptions.InconsistentMetaDataException(
variantFile.filename)
def getNumVariants(self):
"""
Returns the total number of variants in this VariantSet.
"""
# TODO How do we get the number of records in a VariantFile?
return 0
def _updateCallSetIds(self, variantFile):
"""
Updates the call set IDs based on the specified variant file.
"""
# If this is the first file, we add in the samples. If not, we check
# for consistency.
if len(self._callSetIdMap) == 0:
for sample in variantFile.header.samples:
self.addCallSet(sample)
else:
callSetIds = set([
self.getCallSetId(sample)
for sample in variantFile.header.samples])
if callSetIds != set(self._callSetIdMap.keys()):
raise exceptions.InconsistentCallSetIdException(
variantFile.filename)
def openFile(self, filename):
return pysam.VariantFile(filename)
def _addDataFile(self, filename):
varFile = self.openFile(filename)
if varFile.index is None:
raise exceptions.NotIndexedException(filename)
for chrom in varFile.index:
# Unlike Tabix indices, CSI indices include all contigs defined
# in the BCF header. Thus we must test each one to see if
# records exist or else they are likely to trigger spurious
# overlapping errors.
chrom, _, _ = self.sanitizeVariantFileFetch(chrom)
if not isEmptyIter(varFile.fetch(chrom)):
if chrom in self._chromFileMap:
raise exceptions.OverlappingVcfException(filename, chrom)
self._updateMetadata(varFile)
self._updateCallSetIds(varFile)
self._chromFileMap[chrom] = filename
varFile.close()
def _convertGaCall(self, recordId, name, pysamCall, genotypeData):
compoundId = self.getCallSetId(name)
callSet = self.getCallSet(compoundId)
call = protocol.Call()
call.callSetId = callSet.getId()
call.callSetName = callSet.getSampleName()
call.sampleId = callSet.getSampleName()
# TODO:
# NOTE: THE FOLLOWING TWO LINES IS NOT THE INTENDED IMPLEMENTATION,
###########################################
call.phaseset = None
call.genotype, call.phaseset = convertVCFGenotype(
genotypeData, call.phaseset)
###########################################
# THEY SHOULD BE REPLACED BY THE FOLLOWING, ONCE NEW PYSAM
# RELEASE SUPPORTS phaseset. AS WELL AS REMOVING genotypeData
# FROM THE FUNCTION CALL
###########################################
# call.genotype = list(pysamCall.allele_indices)
# call.phaseset = pysamCall.phaseset
###########################################
call.genotypeLikelihood = []
for key, value in pysamCall.iteritems():
if key == 'GL' and value is not None:
call.genotypeLikelihood = list(value)
elif key != 'GT':
call.info[key] = _encodeValue(value)
return call
def convertVariant(self, record, callSetIds):
"""
Converts the specified pysam variant record into a GA4GH Variant
object. Only calls for the specified list of callSetIds will
be included.
"""
variant = self._createGaVariant()
variant.referenceName = record.contig
if record.id is not None:
variant.names = record.id.split(';')
variant.start = record.start # 0-based inclusive
variant.end = record.stop # 0-based exclusive
variant.referenceBases = record.ref
if record.alts is not None:
variant.alternateBases = list(record.alts)
# record.filter and record.qual are also available, when supported
# by GAVariant.
for key, value in record.info.iteritems():
if value is not None:
variant.info[key] = _encodeValue(value)
# NOTE: THE LABELED LINES SHOULD BE REMOVED ONCE PYSAM SUPPORTS
# phaseset
sampleData = record.__str__().split()[9:] # REMOVAL
variant.calls = []
sampleIterator = 0 # REMOVAL
for name, call in record.samples.iteritems():
if self.getCallSetId(name) in callSetIds:
genotypeData = sampleData[sampleIterator].split(
":")[0] # REMOVAL
variant.calls.append(self._convertGaCall(
record.id, name, call, genotypeData)) # REPLACE
sampleIterator += 1 # REMOVAL
variant.id = self.getVariantId(variant)
return variant
def getVariant(self, compoundId):
if compoundId.referenceName in self._chromFileMap:
varFileName = self._chromFileMap[compoundId.referenceName]
else:
raise exceptions.ObjectNotFoundException(compoundId)
start = int(compoundId.start)
referenceName, startPosition, endPosition = \
self.sanitizeVariantFileFetch(
compoundId.referenceName, start, start + 1)
cursor = self.getFileHandle(varFileName).fetch(
referenceName, startPosition, endPosition)
for record in cursor:
variant = self.convertVariant(record, self._callSetIds)
if (record.start == start and
compoundId.md5 == self.hashVariant(variant)):
return variant
elif record.start > start:
raise exceptions.ObjectNotFoundException()
raise exceptions.ObjectNotFoundException(compoundId)
def getVariants(self, referenceName, startPosition, endPosition,
callSetIds=None):
"""
Returns an iterator over the specified variants. The parameters
correspond to the attributes of a GASearchVariantsRequest object.
"""
if callSetIds is None:
callSetIds = self._callSetIds
else:
for callSetId in callSetIds:
if callSetId not in self._callSetIds:
raise exceptions.CallSetNotInVariantSetException(
callSetId, self.getId())
if referenceName in self._chromFileMap:
varFileName = self._chromFileMap[referenceName]
referenceName, startPosition, endPosition = \
self.sanitizeVariantFileFetch(
referenceName, startPosition, endPosition)
cursor = self.getFileHandle(varFileName).fetch(
referenceName, startPosition, endPosition)
for record in cursor:
yield self.convertVariant(record, callSetIds)
def getMetadata(self):
return self._metadata
def _getMetadataFromVcf(self, varFile):
# All the metadata is available via each varFile.header, including:
# records: header records
# version: VCF version
# samples -- not immediately needed
# contigs -- not immediately needed
# filters -- not immediately needed
# info
# formats
def buildMetadata(
key, type="String", number="1", value="", id="",
description=""): # All input are strings
metadata = protocol.VariantSetMetadata()
metadata.key = key
metadata.value = value
metadata.id = id
metadata.type = type
metadata.number = number
metadata.description = description
return metadata
ret = []
header = varFile.header
ret.append(buildMetadata(key="version", value=header.version))
formats = header.formats.items()
infos = header.info.items()
# TODO: currently ALT field is not implemented through pysam
# NOTE: contigs field is different between vcf files,
# so it's not included in metadata
# NOTE: filters in not included in metadata unless needed
for prefix, content in [("FORMAT", formats), ("INFO", infos)]:
for contentKey, value in content:
attrs = dict(value.header.attrs)
# TODO: refactor description at next pysam release
# since description will be implemented as a member of
# VariantMetadata
description = attrs.get('Description', '').strip('"')
key = "{0}.{1}".format(prefix, value.name)
if key != "FORMAT.GT":
ret.append(buildMetadata(
key=key, type=value.type,
number="{}".format(value.number),
description=description))
return ret
|
ekalosak/server
|
ga4gh/datamodel/variants.py
|
Python
|
apache-2.0
| 19,752
|
[
"pysam"
] |
6cc8f6c13ebfb35a5fb3b261f29d90e536dace6ffafb4c0f2d55a2cad92dc9c4
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python
# ---
# # Riemannian Optimization for Inference in MoG models
# The Mixture of Gaussians (MoG) model assumes that datapoints $\mathbf{x}_i\in\mathbb{R}^d$ follow a distribution described by the following probability density function:
#
# $p(\mathbf{x}) = \sum_{m=1}^M \pi_m p_\mathcal{N}(\mathbf{x};\mathbf{\mu}_m,\mathbf{\Sigma}_m)$ where $\pi_m$ is the probability that the data point belongs to the $m^\text{th}$ mixture component and $p_\mathcal{N}(\mathbf{x};\mathbf{\mu}_m,\mathbf{\Sigma}_m)$ is the probability density function of a multivariate Gaussian distribution with mean $\mathbf{\mu}_m \in \mathbb{R}^d$ and psd covariance matrix $\mathbf{\Sigma}_m \in \{\mathbf{M}\in\mathbb{R}^{d\times d}: \mathbf{M}\succeq 0\}$.
#
# As an example consider the mixture of three Gaussians with means
# $\mathbf{\mu}_1 = \begin{bmatrix} -4 \\ 1 \end{bmatrix}$,
# $\mathbf{\mu}_2 = \begin{bmatrix} 0 \\ 0 \end{bmatrix}$ and
# $\mathbf{\mu}_3 = \begin{bmatrix} 2 \\ -1 \end{bmatrix}$, covariances
# $\mathbf{\Sigma}_1 = \begin{bmatrix} 3 & 0 \\ 0 & 1 \end{bmatrix}$,
# $\mathbf{\Sigma}_2 = \begin{bmatrix} 1 & 1 \\ 1 & 3 \end{bmatrix}$ and
# $\mathbf{\Sigma}_3 = \begin{bmatrix} 0.5 & 0 \\ 0 & 0.5 \end{bmatrix}$
# and mixture probability vector $\boldsymbol{\pi}=\left[0.1, 0.6, 0.3\right]^\top$.
# Let's generate $N=1000$ samples of that MoG model and scatter plot the samples:
# +
import autograd.numpy as np
np.set_printoptions(precision=2)
import matplotlib.pyplot as plt
# %matplotlib inline
# Number of data points
N = 1000
# Dimension of each data point
D = 2
# Number of clusters
K = 3
pi = [0.1, 0.6, 0.3]
mu = [np.array([-4, 1]), np.array([0, 0]), np.array([2, -1])]
Sigma = [
np.array([[3, 0], [0, 1]]),
np.array([[1, 1.0], [1, 3]]),
0.5 * np.eye(2),
]
components = np.random.choice(K, size=N, p=pi)
samples = np.zeros((N, D))
# For each component, generate all needed samples
for k in range(K):
# indices of current component in X
indices = k == components
# number of those occurrences
n_k = indices.sum()
if n_k > 0:
samples[indices, :] = np.random.multivariate_normal(
mu[k], Sigma[k], n_k
)
colors = ["r", "g", "b", "c", "m"]
for k in range(K):
indices = k == components
plt.scatter(
samples[indices, 0],
samples[indices, 1],
alpha=0.4,
color=colors[k % K],
)
plt.axis("equal")
plt.show()
# -
# Given a data sample the de facto standard method to infer the parameters is the [expectation maximisation](https://en.wikipedia.org/wiki/Expectation-maximization_algorithm) (EM) algorithm that, in alternating so-called E and M steps, maximises the log-likelihood of the data.
# In [arXiv:1506.07677](http://arxiv.org/pdf/1506.07677v1.pdf) Hosseini and Sra propose Riemannian optimisation as a powerful counterpart to EM. Importantly, they introduce a reparameterisation that leaves local optima of the log-likelihood unchanged while resulting in a geodesically convex optimisation problem over a product manifold $\prod_{m=1}^M\mathcal{PD}^{(d+1)\times(d+1)}$ of manifolds of $(d+1)\times(d+1)$ symmetric positive definite matrices.
# The proposed method is on par with EM and shows less variability in running times.
#
# The reparameterised optimisation problem for augmented data points $\mathbf{y}_i=[\mathbf{x}_i^\top, 1]^\top$ can be stated as follows:
#
# $$\min_{(\mathbf{S}_1, ..., \mathbf{S}_m, \boldsymbol{\nu}) \in \mathcal{D}}
# -\sum_{n=1}^N\log\left(
# \sum_{m=1}^M \frac{\exp(\nu_m)}{\sum_{k=1}^M\exp(\nu_k)}
# q_\mathcal{N}(\mathbf{y}_n;\mathbf{S}_m)
# \right)$$
#
# where
#
# * $\mathcal{D} := \left(\prod_{m=1}^M \mathcal{PD}^{(d+1)\times(d+1)}\right)\times\mathbb{R}^{M-1}$ is the search space
# * $\mathcal{PD}^{(d+1)\times(d+1)}$ is the manifold of symmetric positive definite
# $(d+1)\times(d+1)$ matrices
# * $\mathcal{\nu}_m = \log\left(\frac{\alpha_m}{\alpha_M}\right), \ m=1, ..., M-1$ and $\nu_M=0$
# * $q_\mathcal{N}(\mathbf{y}_n;\mathbf{S}_m) =
# 2\pi\exp\left(\frac{1}{2}\right)
# |\operatorname{det}(\mathbf{S}_m)|^{-\frac{1}{2}}(2\pi)^{-\frac{d+1}{2}}
# \exp\left(-\frac{1}{2}\mathbf{y}_i^\top\mathbf{S}_m^{-1}\mathbf{y}_i\right)$
#
# **Optimisation problems like this can easily be solved using Pymanopt – even without the need to differentiate the cost function manually!**
#
# So let's infer the parameters of our toy example by Riemannian optimisation using Pymanopt:
# +
import sys
sys.path.insert(0, "../..")
from autograd.scipy.special import logsumexp
import pymanopt
from pymanopt import Problem
from pymanopt.manifolds import Euclidean, Product, SymmetricPositiveDefinite
from pymanopt.solvers import SteepestDescent
# (1) Instantiate the manifold
manifold = Product([SymmetricPositiveDefinite(D + 1, k=K), Euclidean(K - 1)])
# (2) Define cost function
# The parameters must be contained in a list theta.
@pymanopt.function.Autograd(manifold)
def cost(S, v):
# Unpack parameters
nu = np.append(v, 0)
logdetS = np.expand_dims(np.linalg.slogdet(S)[1], 1)
y = np.concatenate([samples.T, np.ones((1, N))], axis=0)
# Calculate log_q
y = np.expand_dims(y, 0)
# 'Probability' of y belonging to each cluster
log_q = -0.5 * (np.sum(y * np.linalg.solve(S, y), axis=1) + logdetS)
alpha = np.exp(nu)
alpha = alpha / np.sum(alpha)
alpha = np.expand_dims(alpha, 1)
loglikvec = logsumexp(np.log(alpha) + log_q, axis=0)
return -np.sum(loglikvec)
problem = Problem(manifold=manifold, cost=cost, verbosity=1)
# (3) Instantiate a Pymanopt solver
solver = SteepestDescent()
# let Pymanopt do the rest
Xopt = solver.solve(problem)
# -
# Once Pymanopt has finished the optimisation we can obtain the inferred parameters as follows:
mu1hat = Xopt[0][0][0:2, 2:3]
Sigma1hat = Xopt[0][0][:2, :2] - mu1hat @ mu1hat.T
mu2hat = Xopt[0][1][0:2, 2:3]
Sigma2hat = Xopt[0][1][:2, :2] - mu2hat @ mu2hat.T
mu3hat = Xopt[0][2][0:2, 2:3]
Sigma3hat = Xopt[0][2][:2, :2] - mu3hat @ mu3hat.T
pihat = np.exp(np.concatenate([Xopt[1], [0]], axis=0))
pihat = pihat / np.sum(pihat)
# And convince ourselves that the inferred parameters are close to the ground truth parameters.
#
# The ground truth parameters $\mathbf{\mu}_1, \mathbf{\Sigma}_1, \mathbf{\mu}_2, \mathbf{\Sigma}_2, \mathbf{\mu}_3, \mathbf{\Sigma}_3, \pi_1, \pi_2, \pi_3$:
print(mu[0])
print(Sigma[0])
print(mu[1])
print(Sigma[1])
print(mu[2])
print(Sigma[2])
print(pi[0])
print(pi[1])
print(pi[2])
# And the inferred parameters $\hat{\mathbf{\mu}}_1, \hat{\mathbf{\Sigma}}_1, \hat{\mathbf{\mu}}_2, \hat{\mathbf{\Sigma}}_2, \hat{\mathbf{\mu}}_3, \hat{\mathbf{\Sigma}}_3, \hat{\pi}_1, \hat{\pi}_2, \hat{\pi}_3$:
print(mu1hat)
print(Sigma1hat)
print(mu2hat)
print(Sigma2hat)
print(mu3hat)
print(Sigma3hat)
print(pihat[0])
print(pihat[1])
print(pihat[2])
# Et voilà – this was a brief demonstration of how to do inference for MoG models by performing Manifold optimisation using Pymanopt.
# ## When Things Go Astray
#
# A well-known problem when fitting parameters of a MoG model is that one Gaussian may collapse onto a single data point resulting in singular covariance matrices (cf. e.g. p. 434 in Bishop, C. M. "Pattern Recognition and Machine Learning." 2001). This problem can be avoided by the following heuristic: if a component's covariance matrix is close to being singular we reset its mean and covariance matrix. Using Pymanopt this can be accomplished by using an appropriate line search rule (based on [LineSearchBackTracking](https://github.com/pymanopt/pymanopt/blob/master/pymanopt/solvers/linesearch.py)) -- here we demonstrate this approach:
class LineSearchMoG:
"""
Back-tracking line-search that checks for close to singular matrices.
"""
def __init__(
self,
contraction_factor=0.5,
optimism=2,
suff_decr=1e-4,
maxiter=25,
initial_stepsize=1,
):
self.contraction_factor = contraction_factor
self.optimism = optimism
self.suff_decr = suff_decr
self.maxiter = maxiter
self.initial_stepsize = initial_stepsize
self._oldf0 = None
def search(self, objective, manifold, x, d, f0, df0):
"""
Function to perform backtracking line-search.
Arguments:
- objective
objective function to optimise
- manifold
manifold to optimise over
- x
starting point on the manifold
- d
tangent vector at x (descent direction)
- df0
directional derivative at x along d
Returns:
- stepsize
norm of the vector retracted to reach newx from x
- newx
next iterate suggested by the line-search
"""
# Compute the norm of the search direction
norm_d = manifold.norm(x, d)
if self._oldf0 is not None:
# Pick initial step size based on where we were last time.
alpha = 2 * (f0 - self._oldf0) / df0
# Look a little further
alpha *= self.optimism
else:
alpha = self.initial_stepsize / norm_d
alpha = float(alpha)
# Make the chosen step and compute the cost there.
newx, newf, reset = self._newxnewf(x, alpha * d, objective, manifold)
step_count = 1
# Backtrack while the Armijo criterion is not satisfied
while (
newf > f0 + self.suff_decr * alpha * df0
and step_count <= self.maxiter
and not reset
):
# Reduce the step size
alpha = self.contraction_factor * alpha
# and look closer down the line
newx, newf, reset = self._newxnewf(
x, alpha * d, objective, manifold
)
step_count = step_count + 1
# If we got here without obtaining a decrease, we reject the step.
if newf > f0 and not reset:
alpha = 0
newx = x
stepsize = alpha * norm_d
self._oldf0 = f0
return stepsize, newx
def _newxnewf(self, x, d, objective, manifold):
newx = manifold.retr(x, d)
try:
newf = objective(newx)
except np.linalg.LinAlgError:
replace = np.asarray(
[
np.linalg.matrix_rank(newx[0][k, :, :])
!= newx[0][0, :, :].shape[0]
for k in range(newx[0].shape[0])
]
)
x[0][replace, :, :] = manifold.rand()[0][replace, :, :]
return x, objective(x), True
return newx, newf, False
|
pymanopt/pymanopt
|
examples/notebooks/mixture_of_gaussians.py
|
Python
|
bsd-3-clause
| 11,003
|
[
"Gaussian"
] |
6f6c55d3421f2514021c4dd7e47e0825e4599ebc52ca9d2a91184f049fd5a3a0
|
import abc
from collections import OrderedDict
from ctypes import POINTER, c_void_p, c_int, sizeof
from functools import reduce
from itertools import product
from operator import mul
from sympy import Integer
from devito.data import OWNED, HALO, NOPAD, LEFT, CENTER, RIGHT, default_allocator
from devito.ir.equations import DummyEq
from devito.ir.iet import (Call, Callable, Conditional, Expression, ExpressionBundle,
AugmentedExpression, Iteration, List, Prodder, Return,
make_efunc, FindNodes, Transformer)
from devito.ir.support import AFFINE, PARALLEL
from devito.mpi import MPI
from devito.symbolics import (Byref, CondNe, FieldFromPointer, FieldFromComposite,
IndexedPointer, Macro, subs_op_args)
from devito.tools import OrderedSet, dtype_to_mpitype, dtype_to_ctype, flatten, generator
from devito.types import Array, Dimension, Symbol, LocalObject, CompositeObject
__all__ = ['HaloExchangeBuilder', 'mpi_registry']
class HaloExchangeBuilder(object):
"""
Build IET-based routines to implement MPI halo exchange.
"""
def __new__(cls, mode, **generators):
obj = object.__new__(mpi_registry[mode])
# Unique name generators
obj._gen_msgkey = generators.get('msg', generator())
obj._gen_commkey = generators.get('comm', generator())
obj._gen_compkey = generators.get('comp', generator())
obj._cache_halo = OrderedDict()
obj._cache_dims = OrderedDict()
obj._objs = OrderedSet()
obj._regions = OrderedDict()
obj._msgs = OrderedDict()
obj._efuncs = []
return obj
@property
def efuncs(self):
return self._efuncs
@property
def msgs(self):
return [i for i in self._msgs.values() if i is not None]
@property
def regions(self):
return [i for i in self._regions.values() if i is not None]
@property
def objs(self):
return list(self._objs) + self.msgs + self.regions
def make(self, hs):
"""
Construct Callables and Calls implementing distributed-memory halo
exchange for the HaloSpot ``hs``.
"""
# Sanity check
assert all(f.is_Function and f.grid is not None for f in hs.fmapper)
for f, hse in hs.fmapper.items():
# Build an MPIMsg, a data structure to be propagated across the
# various halo exchange routines
if (f, hse) not in self._msgs:
key = self._gen_msgkey()
msg = self._msgs.setdefault((f, hse), self._make_msg(f, hse, key))
else:
msg = self._msgs[(f, hse)]
# Callables for send/recv/wait
if (f.ndim, hse) not in self._cache_halo:
self._make_all(f, hse, msg)
msgs = [self._msgs[(f, hse)] for f, hse in hs.fmapper.items()]
# Callable for poking the asynchronous progress engine
key = self._gen_compkey()
poke = self._make_poke(hs, key, msgs)
if poke is not None:
self._efuncs.append(poke)
# Callable for compute over the CORE region
callpoke = self._call_poke(poke)
compute = self._make_compute(hs, key, msgs, callpoke)
if compute is not None:
self._efuncs.append(compute)
# Callable for compute over the OWNED region
region = self._make_region(hs, key)
region = self._regions.setdefault(hs, region)
callcompute = self._call_compute(hs, compute, msgs)
remainder = self._make_remainder(hs, key, callcompute, region)
if remainder is not None:
self._efuncs.append(remainder)
# Now build up the HaloSpot body, with explicit Calls to the constructed Callables
haloupdates = []
halowaits = []
for i, (f, hse) in enumerate(hs.fmapper.items()):
msg = self._msgs[(f, hse)]
haloupdate, halowait = self._cache_halo[(f.ndim, hse)]
haloupdates.append(self._call_haloupdate(haloupdate.name, f, hse, msg))
if halowait is not None:
halowaits.append(self._call_halowait(halowait.name, f, hse, msg))
body = []
body.append(HaloUpdateList(body=haloupdates))
if callcompute is not None:
body.append(callcompute)
body.append(HaloWaitList(body=halowaits))
if remainder is not None:
body.append(self._call_remainder(remainder))
return List(body=body)
@abc.abstractmethod
def _make_region(self, hs, key):
"""
Construct an MPIRegion describing the HaloSpot's OWNED DataRegion.
"""
return
@abc.abstractmethod
def _make_msg(self, f, hse, key):
"""
Construct an MPIMsg, to propagate information such as buffers, sizes,
offsets, ..., across the MPI Call stack.
"""
return
@abc.abstractmethod
def _make_all(self, f, hse, msg):
"""
Construct the Callables required to perform a halo update given a
DiscreteFunction and a set of halo requirements.
"""
return
@abc.abstractmethod
def _make_copy(self, f, hse, key, swap=False):
"""
Construct a Callable performing a copy of:
* an arbitrary convex region of ``f`` into a contiguous Array, OR
* if ``swap=True``, a contiguous Array into an arbitrary convex
region of ``f``.
"""
return
@abc.abstractmethod
def _make_sendrecv(self, f, hse, key, **kwargs):
"""
Construct a Callable performing, for a given DiscreteFunction, a halo exchange
along given Dimension and DataSide.
"""
return
@abc.abstractmethod
def _call_sendrecv(self, name, *args, **kwargs):
"""
Construct a Call to ``sendrecv``, the Callable produced by
:meth:`_make_sendrecv`.
"""
return
@abc.abstractmethod
def _make_haloupdate(self, f, hse, key, **kwargs):
"""
Construct a Callable performing, for a given DiscreteFunction, a halo exchange.
"""
return
@abc.abstractmethod
def _call_haloupdate(self, name, f, hse, *args):
"""
Construct a Call to ``haloupdate``, the Callable produced by
:meth:`_make_haloupdate`.
"""
return
@abc.abstractmethod
def _make_compute(self, hs, key, *args):
"""
Construct a Callable performing computation over the CORE region, that is
the region that does *not* require up-to-date halo values. The Callable
body will essentially coincide with the HaloSpot body.
"""
return
@abc.abstractmethod
def _call_compute(self, hs, *args):
"""
Construct a Call to ``compute``, the Callable produced by :meth:`_make_compute`.
"""
return
@abc.abstractmethod
def _make_poke(self, hs, key, msgs):
"""
Construct a Callable poking the MPI engine for asynchronous progress (e.g.,
by calling MPI_Test)
"""
return
@abc.abstractmethod
def _call_poke(self, poke):
"""
Construct a Call to ``poke``, the Callable produced by :meth:`_make_poke`.
"""
return
@abc.abstractmethod
def _make_wait(self, f, hse, key, **kwargs):
"""
Construct a Callable performing, for a given DiscreteFunction, a wait on
a halo exchange along given Dimension and DataSide.
"""
return
@abc.abstractmethod
def _make_halowait(self, f, hse, key, **kwargs):
"""
Construct a Callable performing, for a given DiscreteFunction, a wait on
a halo exchange.
"""
return
@abc.abstractmethod
def _call_halowait(self, name, f, hse, *args):
"""
Construct a Call to ``halowait``, the Callable produced by :meth:`_make_halowait`.
"""
return
@abc.abstractmethod
def _make_remainder(self, hs, key, callcompute, *args):
"""
Construct a Callable performing computation over the OWNED region, that is
the region requiring up-to-date halo values.
"""
return
@abc.abstractmethod
def _call_remainder(self, remainder):
"""
Construct a Call to ``remainder``, the Callable produced by
:meth:`_make_remainder`.
"""
return
class BasicHaloExchangeBuilder(HaloExchangeBuilder):
"""
A HaloExchangeBuilder making use of synchronous MPI routines only.
Generates:
haloupdate()
compute()
"""
def _make_msg(self, f, hse, key):
return
def _make_all(self, f, hse, msg):
df = f.__class__.__base__(name='a', grid=f.grid, shape=f.shape_global,
dimensions=f.dimensions)
if f.dimensions not in self._cache_dims:
key = "".join(str(d) for d in f.dimensions)
sendrecv = self._make_sendrecv(df, hse, key, msg=msg)
gather = self._make_copy(df, hse, key)
scatter = self._make_copy(df, hse, key, swap=True)
self._cache_dims[f.dimensions] = [sendrecv, gather, scatter]
self._efuncs.extend([sendrecv, gather, scatter])
key = self._gen_commkey()
haloupdate = self._make_haloupdate(df, hse, key, msg=msg)
halowait = self._make_halowait(df, hse, key, msg=msg)
assert halowait is None
self._cache_halo[(f.ndim, hse)] = (haloupdate, halowait)
self._efuncs.append(haloupdate)
self._objs.add(f.grid.distributor._obj_comm)
self._objs.add(f.grid.distributor._obj_neighborhood)
return haloupdate, halowait
def _make_copy(self, f, hse, key, swap=False):
buf_dims = []
buf_indices = []
for d in f.dimensions:
if d not in hse.loc_indices:
buf_dims.append(Dimension(name='buf_%s' % d.root))
buf_indices.append(d.root)
buf = Array(name='buf', dimensions=buf_dims, dtype=f.dtype, padding=0)
f_offsets = []
f_indices = []
for d in f.dimensions:
offset = Symbol(name='o%s' % d.root)
f_offsets.append(offset)
f_indices.append(offset + (d.root if d not in hse.loc_indices else 0))
if swap is False:
eq = DummyEq(buf[buf_indices], f[f_indices])
name = 'gather%s' % key
else:
eq = DummyEq(f[f_indices], buf[buf_indices])
name = 'scatter%s' % key
iet = Expression(eq)
for i, d in reversed(list(zip(buf_indices, buf_dims))):
# The -1 below is because an Iteration, by default, generates <=
iet = Iteration(iet, i, d.symbolic_size - 1, properties=(PARALLEL, AFFINE))
parameters = [buf] + list(buf.shape) + [f] + f_offsets
return CopyBuffer(name, iet, parameters)
def _make_sendrecv(self, f, hse, key, **kwargs):
comm = f.grid.distributor._obj_comm
buf_dims = [Dimension(name='buf_%s' % d.root) for d in f.dimensions
if d not in hse.loc_indices]
bufg = Array(name='bufg', dimensions=buf_dims, dtype=f.dtype, padding=0)
bufs = Array(name='bufs', dimensions=buf_dims, dtype=f.dtype, padding=0)
ofsg = [Symbol(name='og%s' % d.root) for d in f.dimensions]
ofss = [Symbol(name='os%s' % d.root) for d in f.dimensions]
fromrank = Symbol(name='fromrank')
torank = Symbol(name='torank')
gather = Call('gather%s' % key, [bufg] + list(bufg.shape) + [f] + ofsg)
scatter = Call('scatter%s' % key, [bufs] + list(bufs.shape) + [f] + ofss)
# The `gather` is unnecessary if sending to MPI.PROC_NULL
gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather)
# The `scatter` must be guarded as we must not alter the halo values along
# the domain boundary, where the sender is actually MPI.PROC_NULL
scatter = Conditional(CondNe(fromrank, Macro('MPI_PROC_NULL')), scatter)
count = reduce(mul, bufs.shape, 1)
rrecv = MPIRequestObject(name='rrecv')
rsend = MPIRequestObject(name='rsend')
recv = IrecvCall([bufs, count, Macro(dtype_to_mpitype(f.dtype)),
fromrank, Integer(13), comm, rrecv])
send = IsendCall([bufg, count, Macro(dtype_to_mpitype(f.dtype)),
torank, Integer(13), comm, rsend])
waitrecv = Call('MPI_Wait', [rrecv, Macro('MPI_STATUS_IGNORE')])
waitsend = Call('MPI_Wait', [rsend, Macro('MPI_STATUS_IGNORE')])
iet = List(body=[recv, gather, send, waitsend, waitrecv, scatter])
parameters = ([f] + list(bufs.shape) + ofsg + ofss + [fromrank, torank, comm])
return SendRecv(key, iet, parameters, bufg, bufs)
def _call_sendrecv(self, name, *args, **kwargs):
return Call(name, flatten(args))
def _make_haloupdate(self, f, hse, key, **kwargs):
distributor = f.grid.distributor
nb = distributor._obj_neighborhood
comm = distributor._obj_comm
sendrecv = self._cache_dims[f.dimensions][0]
fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices}
# Build a mapper `(dim, side, region) -> (size, ofs)` for `f`. `size` and
# `ofs` are symbolic objects. This mapper tells what data values should be
# sent (OWNED) or received (HALO) given dimension and side
mapper = {}
for d0, side, region in product(f.dimensions, (LEFT, RIGHT), (OWNED, HALO)):
if d0 in fixed:
continue
sizes = []
ofs = []
for d1 in f.dimensions:
if d1 in fixed:
ofs.append(fixed[d1])
else:
meta = f._C_get_field(region if d0 is d1 else NOPAD, d1, side)
ofs.append(meta.offset)
sizes.append(meta.size)
mapper[(d0, side, region)] = (sizes, ofs)
body = []
for d in f.dimensions:
if d in fixed:
continue
name = ''.join('r' if i is d else 'c' for i in distributor.dimensions)
rpeer = FieldFromPointer(name, nb)
name = ''.join('l' if i is d else 'c' for i in distributor.dimensions)
lpeer = FieldFromPointer(name, nb)
if (d, LEFT) in hse.halos:
# Sending to left, receiving from right
lsizes, lofs = mapper[(d, LEFT, OWNED)]
rsizes, rofs = mapper[(d, RIGHT, HALO)]
args = [f, lsizes, lofs, rofs, rpeer, lpeer, comm]
body.append(self._call_sendrecv(sendrecv.name, *args, **kwargs))
if (d, RIGHT) in hse.halos:
# Sending to right, receiving from left
rsizes, rofs = mapper[(d, RIGHT, OWNED)]
lsizes, lofs = mapper[(d, LEFT, HALO)]
args = [f, rsizes, rofs, lofs, lpeer, rpeer, comm]
body.append(self._call_sendrecv(sendrecv.name, *args, **kwargs))
iet = List(body=body)
parameters = [f, comm, nb] + list(fixed.values())
return HaloUpdate(key, iet, parameters)
def _call_haloupdate(self, name, f, hse, *args):
comm = f.grid.distributor._obj_comm
nb = f.grid.distributor._obj_neighborhood
args = [f, comm, nb] + list(hse.loc_indices.values())
return HaloUpdateCall(name, flatten(args))
def _make_compute(self, *args):
return
def _make_poke(self, *args):
return
def _call_poke(self, *args):
return
def _call_compute(self, hs, *args):
return hs.body
def _make_halowait(self, *args, **kwargs):
return
def _call_halowait(self, *args, **kwargs):
return
def _make_remainder(self, *args):
return
def _call_remainder(self, *args):
return
class DiagHaloExchangeBuilder(BasicHaloExchangeBuilder):
"""
Similar to a BasicHaloExchangeBuilder, but communications to diagonal
neighbours are performed explicitly.
Generates:
haloupdate()
compute()
"""
def _make_haloupdate(self, f, hse, key, **kwargs):
distributor = f.grid.distributor
nb = distributor._obj_neighborhood
comm = distributor._obj_comm
sendrecv = self._cache_dims[f.dimensions][0]
fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices}
# Only retain the halos required by the Diag scheme
# Note: `sorted` is only for deterministic code generation
halos = sorted(i for i in hse.halos if isinstance(i.dim, tuple))
body = []
for dims, tosides in halos:
mapper = OrderedDict(zip(dims, tosides))
sizes = [f._C_get_field(OWNED, d, s).size for d, s in mapper.items()]
torank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb)
ofsg = [fixed.get(d, f._C_get_field(OWNED, d, mapper.get(d)).offset)
for d in f.dimensions]
mapper = OrderedDict(zip(dims, [i.flip() for i in tosides]))
fromrank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb)
ofss = [fixed.get(d, f._C_get_field(HALO, d, mapper.get(d)).offset)
for d in f.dimensions]
kwargs['haloid'] = len(body)
body.append(self._call_sendrecv(sendrecv.name, f, sizes, ofsg, ofss,
fromrank, torank, comm, **kwargs))
iet = List(body=body)
parameters = [f, comm, nb] + list(fixed.values())
return HaloUpdate(key, iet, parameters)
class OverlapHaloExchangeBuilder(DiagHaloExchangeBuilder):
"""
A DiagHaloExchangeBuilder making use of asynchronous MPI routines to implement
computation-communication overlap.
Generates:
haloupdate()
compute_core()
halowait()
remainder()
"""
def _make_msg(self, f, hse, key):
# Only retain the halos required by the Diag scheme
halos = sorted(i for i in hse.halos if isinstance(i.dim, tuple))
return MPIMsg('msg%d' % key, f, halos)
def _make_all(self, f, hse, msg):
df = f.__class__.__base__(name='a', grid=f.grid, shape=f.shape_global,
dimensions=f.dimensions)
if f.dimensions not in self._cache_dims:
key = "".join(str(d) for d in f.dimensions)
sendrecv = self._make_sendrecv(df, hse, key, msg=msg)
gather = self._make_copy(df, hse, key)
wait = self._make_wait(df, hse, key, msg=msg)
scatter = self._make_copy(df, hse, key, swap=True)
self._cache_dims[f.dimensions] = [sendrecv, gather, wait, scatter]
self._efuncs.extend([sendrecv, gather, wait, scatter])
key = self._gen_commkey()
haloupdate = self._make_haloupdate(df, hse, key, msg=msg)
halowait = self._make_halowait(df, hse, key, msg=msg)
self._cache_halo[(f.ndim, hse)] = (haloupdate, halowait)
self._efuncs.extend([haloupdate, halowait])
self._objs.add(f.grid.distributor._obj_comm)
self._objs.add(f.grid.distributor._obj_neighborhood)
return haloupdate, halowait
def _make_sendrecv(self, f, hse, key, msg=None):
comm = f.grid.distributor._obj_comm
bufg = FieldFromPointer(msg._C_field_bufg, msg)
bufs = FieldFromPointer(msg._C_field_bufs, msg)
ofsg = [Symbol(name='og%s' % d.root) for d in f.dimensions]
fromrank = Symbol(name='fromrank')
torank = Symbol(name='torank')
sizes = [FieldFromPointer('%s[%d]' % (msg._C_field_sizes, i), msg)
for i in range(len(f._dist_dimensions))]
gather = Call('gather%s' % key, [bufg] + sizes + [f] + ofsg)
# The `gather` is unnecessary if sending to MPI.PROC_NULL
gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather)
count = reduce(mul, sizes, 1)
rrecv = Byref(FieldFromPointer(msg._C_field_rrecv, msg))
rsend = Byref(FieldFromPointer(msg._C_field_rsend, msg))
recv = IrecvCall([bufs, count, Macro(dtype_to_mpitype(f.dtype)),
fromrank, Integer(13), comm, rrecv])
send = IsendCall([bufg, count, Macro(dtype_to_mpitype(f.dtype)),
torank, Integer(13), comm, rsend])
iet = List(body=[recv, gather, send])
parameters = ([f] + ofsg + [fromrank, torank, comm, msg])
return SendRecv(key, iet, parameters, bufg, bufs)
def _call_sendrecv(self, name, *args, msg=None, haloid=None):
# Drop `sizes` as this HaloExchangeBuilder conveys them through `msg`
# Drop `ofss` as this HaloExchangeBuilder only needs them in `wait()`,
# to collect and scatter the result of an MPI_Irecv
f, _, ofsg, _, fromrank, torank, comm = args
msg = Byref(IndexedPointer(msg, haloid))
return Call(name, [f] + ofsg + [fromrank, torank, comm, msg])
def _make_haloupdate(self, f, hse, key, msg=None):
iet = super(OverlapHaloExchangeBuilder, self)._make_haloupdate(f, hse, key,
msg=msg)
iet = iet._rebuild(parameters=iet.parameters + (msg,))
return iet
def _call_haloupdate(self, name, f, hse, msg):
call = super(OverlapHaloExchangeBuilder, self)._call_haloupdate(name, f, hse)
call = call._rebuild(arguments=call.arguments + (msg,))
return call
def _make_compute(self, hs, key, *args):
if hs.body.is_Call:
return None
else:
return make_efunc('compute%d' % key, hs.body, hs.arguments)
def _call_compute(self, hs, compute, *args):
if compute is None:
assert hs.body.is_Call
return hs.body._rebuild(dynamic_args_mapper=hs.omapper.core)
else:
return compute.make_call(dynamic_args_mapper=hs.omapper.core)
def _make_wait(self, f, hse, key, msg=None):
bufs = FieldFromPointer(msg._C_field_bufs, msg)
ofss = [Symbol(name='os%s' % d.root) for d in f.dimensions]
fromrank = Symbol(name='fromrank')
sizes = [FieldFromPointer('%s[%d]' % (msg._C_field_sizes, i), msg)
for i in range(len(f._dist_dimensions))]
scatter = Call('scatter%s' % key, [bufs] + sizes + [f] + ofss)
# The `scatter` must be guarded as we must not alter the halo values along
# the domain boundary, where the sender is actually MPI.PROC_NULL
scatter = Conditional(CondNe(fromrank, Macro('MPI_PROC_NULL')), scatter)
rrecv = Byref(FieldFromPointer(msg._C_field_rrecv, msg))
waitrecv = Call('MPI_Wait', [rrecv, Macro('MPI_STATUS_IGNORE')])
rsend = Byref(FieldFromPointer(msg._C_field_rsend, msg))
waitsend = Call('MPI_Wait', [rsend, Macro('MPI_STATUS_IGNORE')])
iet = List(body=[waitsend, waitrecv, scatter])
parameters = ([f] + ofss + [fromrank, msg])
return Callable('wait_%s' % key, iet, 'void', parameters, ('static',))
def _make_halowait(self, f, hse, key, msg=None):
nb = f.grid.distributor._obj_neighborhood
wait = self._cache_dims[f.dimensions][2]
fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices}
# Only retain the halos required by the Diag scheme
# Note: `sorted` is only for deterministic code generation
halos = sorted(i for i in hse.halos if isinstance(i.dim, tuple))
body = []
for dims, tosides in halos:
mapper = OrderedDict(zip(dims, [i.flip() for i in tosides]))
fromrank = FieldFromPointer(''.join(i.name[0] for i in mapper.values()), nb)
ofss = [fixed.get(d, f._C_get_field(HALO, d, mapper.get(d)).offset)
for d in f.dimensions]
msgi = Byref(IndexedPointer(msg, len(body)))
body.append(Call(wait.name, [f] + ofss + [fromrank, msgi]))
iet = List(body=body)
parameters = [f] + list(fixed.values()) + [nb, msg]
return Callable('halowait%d' % key, iet, 'void', parameters, ('static',))
def _call_halowait(self, name, f, hse, msg):
nb = f.grid.distributor._obj_neighborhood
return HaloWaitCall(name, [f] + list(hse.loc_indices.values()) + [nb, msg])
def _make_remainder(self, hs, key, callcompute, *args):
assert callcompute.is_Call
body = [callcompute._rebuild(dynamic_args_mapper=i) for _, i in hs.omapper.owned]
return make_efunc('remainder%d' % key, body)
def _call_remainder(self, remainder):
efunc = remainder.make_call()
call = RemainderCall(efunc.name, efunc.arguments)
return call
class Overlap2HaloExchangeBuilder(OverlapHaloExchangeBuilder):
"""
A OverlapHaloExchangeBuilder with reduced Call overhead and increased code
readability, achieved by supplying more values via Python-land-produced
structs, which replace explicit Call arguments.
Generates:
haloupdate()
compute_core()
halowait()
remainder()
"""
def _make_region(self, hs, key):
return MPIRegion('reg', key, hs.arguments, hs.omapper.owned)
def _make_msg(self, f, hse, key):
# Only retain the halos required by the Diag scheme
halos = sorted(i for i in hse.halos if isinstance(i.dim, tuple))
return MPIMsgEnriched('msg%d' % key, f, halos)
def _make_all(self, f, hse, msg):
df = f.__class__.__base__(name='a', grid=f.grid, shape=f.shape_global,
dimensions=f.dimensions)
if f.dimensions not in self._cache_dims:
# Note: unlike the less smarter builders (superclasses), here we can
# cache `haloupdate` and `halowait` on the `f.dimensions` too, as the
# `hse` information is carried by `msg`, which is an MPIMsgEnriched
key = self._gen_commkey()
gather = self._make_copy(df, hse, key)
scatter = self._make_copy(df, hse, key, swap=True)
haloupdate = self._make_haloupdate(df, hse, key, msg=msg)
halowait = self._make_halowait(df, hse, key, msg=msg)
self._cache_dims[f.dimensions] = [gather, scatter, haloupdate, halowait]
self._efuncs.extend([gather, scatter, haloupdate, halowait])
else:
_, _, haloupdate, halowait = self._cache_dims[f.dimensions]
self._cache_halo[(f.ndim, hse)] = (haloupdate, halowait)
self._objs.add(f.grid.distributor._obj_comm)
return haloupdate, halowait
def _make_haloupdate(self, f, hse, key, msg=None):
comm = f.grid.distributor._obj_comm
fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices}
dim = Dimension(name='i')
msgi = IndexedPointer(msg, dim)
bufg = FieldFromComposite(msg._C_field_bufg, msgi)
bufs = FieldFromComposite(msg._C_field_bufs, msgi)
fromrank = FieldFromComposite(msg._C_field_from, msgi)
torank = FieldFromComposite(msg._C_field_to, msgi)
sizes = [FieldFromComposite('%s[%d]' % (msg._C_field_sizes, i), msgi)
for i in range(len(f._dist_dimensions))]
ofsg = [FieldFromComposite('%s[%d]' % (msg._C_field_ofsg, i), msgi)
for i in range(len(f._dist_dimensions))]
ofsg = [fixed.get(d) or ofsg.pop(0) for d in f.dimensions]
# The `gather` is unnecessary if sending to MPI.PROC_NULL
gather = Call('gather%s' % key, [bufg] + sizes + [f] + ofsg)
gather = Conditional(CondNe(torank, Macro('MPI_PROC_NULL')), gather)
# Make Irecv/Isend
count = reduce(mul, sizes, 1)
rrecv = Byref(FieldFromComposite(msg._C_field_rrecv, msgi))
rsend = Byref(FieldFromComposite(msg._C_field_rsend, msgi))
recv = IrecvCall([bufs, count, Macro(dtype_to_mpitype(f.dtype)),
fromrank, Integer(13), comm, rrecv])
send = IsendCall([bufg, count, Macro(dtype_to_mpitype(f.dtype)),
torank, Integer(13), comm, rsend])
# The -1 below is because an Iteration, by default, generates <=
ncomms = Symbol(name='ncomms')
iet = Iteration([recv, gather, send], dim, ncomms - 1)
parameters = ([f, comm, msg, ncomms]) + list(fixed.values())
return HaloUpdate(key, iet, parameters)
def _call_haloupdate(self, name, f, hse, msg):
comm = f.grid.distributor._obj_comm
args = [f, comm, msg, msg.npeers] + list(hse.loc_indices.values())
return HaloUpdateCall(name, args)
def _make_sendrecv(self, *args):
return
def _call_sendrecv(self, *args):
return
def _make_halowait(self, f, hse, key, msg=None):
fixed = {d: Symbol(name="o%s" % d.root) for d in hse.loc_indices}
dim = Dimension(name='i')
msgi = IndexedPointer(msg, dim)
bufs = FieldFromComposite(msg._C_field_bufs, msgi)
fromrank = FieldFromComposite(msg._C_field_from, msgi)
sizes = [FieldFromComposite('%s[%d]' % (msg._C_field_sizes, i), msgi)
for i in range(len(f._dist_dimensions))]
ofss = [FieldFromComposite('%s[%d]' % (msg._C_field_ofss, i), msgi)
for i in range(len(f._dist_dimensions))]
ofss = [fixed.get(d) or ofss.pop(0) for d in f.dimensions]
# The `scatter` must be guarded as we must not alter the halo values along
# the domain boundary, where the sender is actually MPI.PROC_NULL
scatter = Call('scatter%s' % key, [bufs] + sizes + [f] + ofss)
scatter = Conditional(CondNe(fromrank, Macro('MPI_PROC_NULL')), scatter)
rrecv = Byref(FieldFromComposite(msg._C_field_rrecv, msgi))
waitrecv = Call('MPI_Wait', [rrecv, Macro('MPI_STATUS_IGNORE')])
rsend = Byref(FieldFromComposite(msg._C_field_rsend, msgi))
waitsend = Call('MPI_Wait', [rsend, Macro('MPI_STATUS_IGNORE')])
# The -1 below is because an Iteration, by default, generates <=
ncomms = Symbol(name='ncomms')
iet = Iteration([waitsend, waitrecv, scatter], dim, ncomms - 1)
parameters = ([f] + list(fixed.values()) + [msg, ncomms])
return Callable('halowait%d' % key, iet, 'void', parameters, ('static',))
def _call_halowait(self, name, f, hse, msg):
args = [f] + list(hse.loc_indices.values()) + [msg, msg.npeers]
return HaloWaitCall(name, args)
def _make_wait(self, *args):
return
def _call_wait(self, *args):
return
def _make_remainder(self, hs, key, callcompute, region):
assert callcompute.is_Call
dim = Dimension(name='i')
region_i = IndexedPointer(region, dim)
dynamic_args_mapper = {}
for i in hs.arguments:
if i.is_Dimension:
dynamic_args_mapper[i] = (FieldFromComposite(i.min_name, region_i),
FieldFromComposite(i.max_name, region_i))
else:
dynamic_args_mapper[i] = (FieldFromComposite(i.name, region_i),)
iet = callcompute._rebuild(dynamic_args_mapper=dynamic_args_mapper)
# The -1 below is because an Iteration, by default, generates <=
iet = Iteration(iet, dim, region.nregions - 1)
return make_efunc('remainder%d' % key, iet)
class Diag2HaloExchangeBuilder(Overlap2HaloExchangeBuilder):
"""
A DiagHaloExchangeBuilder which uses preallocated buffers for comms
as in Overlap2HaloExchange builder.
Generates:
haloupdate()
halowait()
compute()
"""
def _make_compute(self, hs, key, *args):
return
def _call_compute(self, hs, compute, *args):
return
_make_remainder = Overlap2HaloExchangeBuilder._make_compute
class FullHaloExchangeBuilder(Overlap2HaloExchangeBuilder):
"""
An Overlap2HaloExchangeBuilder which generates explicit Calls to MPI_Test
poking the MPI runtime to advance communication while computing.
Generates:
haloupdate()
compute_core()
halowait()
remainder()
"""
def _make_compute(self, hs, key, msgs, callpoke):
if hs.body.is_Call:
return None
else:
mapper = {i: List(body=[callpoke, i]) for i in
FindNodes(ExpressionBundle).visit(hs.body)}
iet = Transformer(mapper).visit(hs.body)
return make_efunc('compute%d' % key, iet, hs.arguments)
def _make_poke(self, hs, key, msgs):
lflag = Symbol(name='lflag')
gflag = Symbol(name='gflag')
# Init flags
body = [Expression(DummyEq(lflag, 0)),
Expression(DummyEq(gflag, 1))]
# For each msg, build an Iteration calling MPI_Test on all peers
for msg in msgs:
dim = Dimension(name='i')
msgi = IndexedPointer(msg, dim)
rrecv = Byref(FieldFromComposite(msg._C_field_rrecv, msgi))
testrecv = Call('MPI_Test', [rrecv, Byref(lflag), Macro('MPI_STATUS_IGNORE')])
rsend = Byref(FieldFromComposite(msg._C_field_rsend, msgi))
testsend = Call('MPI_Test', [rsend, Byref(lflag), Macro('MPI_STATUS_IGNORE')])
update = AugmentedExpression(DummyEq(gflag, lflag), '&')
body.append(Iteration([testsend, update, testrecv, update],
dim, msg.npeers - 1))
body.append(Return(gflag))
return make_efunc('pokempi%d' % key, List(body=body), retval='int')
def _call_poke(self, poke):
return Prodder(poke.name, poke.parameters, single_thread=True, periodic=True)
mpi_registry = {
True: BasicHaloExchangeBuilder,
'basic': BasicHaloExchangeBuilder,
'diag': DiagHaloExchangeBuilder,
'diag2': Diag2HaloExchangeBuilder,
'overlap': OverlapHaloExchangeBuilder,
'overlap2': Overlap2HaloExchangeBuilder,
'full': FullHaloExchangeBuilder
}
# Callable sub-hierarchy
class MPICallable(Callable):
def __init__(self, name, body, parameters):
super(MPICallable, self).__init__(name, body, 'void', parameters, ('static',))
class CopyBuffer(MPICallable):
pass
class SendRecv(MPICallable):
def __init__(self, key, body, parameters, bufg, bufs):
super(SendRecv, self).__init__('sendrecv%s' % key, body, parameters)
self.bufg = bufg
self.bufs = bufs
class HaloUpdate(MPICallable):
def __init__(self, key, body, parameters):
super(HaloUpdate, self).__init__('haloupdate%s' % key, body, parameters)
# Call sub-hierarchy
class IsendCall(Call):
def __init__(self, parameters):
super(IsendCall, self).__init__('MPI_Isend', parameters)
class IrecvCall(Call):
def __init__(self, parameters):
super(IrecvCall, self).__init__('MPI_Irecv', parameters)
class MPICall(Call):
pass
class HaloUpdateCall(MPICall):
pass
class HaloWaitCall(MPICall):
pass
class RemainderCall(MPICall):
pass
class MPIList(List):
pass
class HaloUpdateList(MPIList):
pass
class HaloWaitList(MPIList):
pass
# Types sub-hierarchy
class MPIStatusObject(LocalObject):
dtype = type('MPI_Status', (c_void_p,), {})
def __init__(self, name):
self.name = name
# Pickling support
_pickle_args = ['name']
class MPIRequestObject(LocalObject):
dtype = type('MPI_Request', (c_void_p,), {})
def __init__(self, name):
self.name = name
# Pickling support
_pickle_args = ['name']
class MPIMsg(CompositeObject):
_C_field_bufs = 'bufs'
_C_field_bufg = 'bufg'
_C_field_sizes = 'sizes'
_C_field_rrecv = 'rrecv'
_C_field_rsend = 'rsend'
if MPI._sizeof(MPI.Request) == sizeof(c_int):
c_mpirequest_p = type('MPI_Request', (c_int,), {})
else:
c_mpirequest_p = type('MPI_Request', (c_void_p,), {})
fields = [
(_C_field_bufs, c_void_p),
(_C_field_bufg, c_void_p),
(_C_field_sizes, POINTER(c_int)),
(_C_field_rrecv, c_mpirequest_p),
(_C_field_rsend, c_mpirequest_p),
]
def __init__(self, name, function, halos):
self._function = function
self._halos = halos
super(MPIMsg, self).__init__(name, 'msg', self.fields)
# Required for buffer allocation/deallocation before/after jumping/returning
# to/from C-land
self._allocator = default_allocator()
self._memfree_args = []
def __del__(self):
self._C_memfree()
def _C_memfree(self):
# Deallocate the MPI buffers
for i in self._memfree_args:
self._allocator.free(*i)
self._memfree_args[:] = []
def __value_setup__(self, dtype, value):
# We eventually produce an array of `struct msg` that is as big as
# the number of peers we have to communicate with
return (dtype._type_*self.npeers)()
@property
def function(self):
return self._function
@property
def halos(self):
return self._halos
@property
def npeers(self):
return len(self._halos)
def _arg_defaults(self, alias=None):
function = alias or self.function
for i, halo in enumerate(self.halos):
entry = self.value[i]
# Buffer size for this peer
shape = []
for dim, side in zip(*halo):
try:
shape.append(getattr(function._size_owned[dim], side.name))
except AttributeError:
assert side is CENTER
shape.append(function._size_domain[dim])
entry.sizes = (c_int*len(shape))(*shape)
# Allocate the send/recv buffers
size = reduce(mul, shape)
ctype = dtype_to_ctype(function.dtype)
entry.bufg, bufg_memfree_args = self._allocator._alloc_C_libcall(size, ctype)
entry.bufs, bufs_memfree_args = self._allocator._alloc_C_libcall(size, ctype)
# The `memfree_args` will be used to deallocate the buffer upon returning
# from C-land
self._memfree_args.extend([bufg_memfree_args, bufs_memfree_args])
return {self.name: self.value}
def _arg_values(self, args=None, **kwargs):
return self._arg_defaults(alias=kwargs.get(self.function.name, self.function))
def _arg_apply(self, *args, **kwargs):
self._C_memfree()
# Pickling support
_pickle_args = ['name', 'function', 'halos']
class MPIMsgEnriched(MPIMsg):
_C_field_ofss = 'ofss'
_C_field_ofsg = 'ofsg'
_C_field_from = 'fromrank'
_C_field_to = 'torank'
fields = MPIMsg.fields + [
(_C_field_ofss, POINTER(c_int)),
(_C_field_ofsg, POINTER(c_int)),
(_C_field_from, c_int),
(_C_field_to, c_int)
]
def _arg_defaults(self, alias=None):
super(MPIMsgEnriched, self)._arg_defaults(alias)
function = alias or self.function
neighborhood = function.grid.distributor.neighborhood
for i, halo in enumerate(self.halos):
entry = self.value[i]
# `torank` peer + gather offsets
entry.torank = neighborhood[halo.side]
ofsg = []
for dim, side in zip(*halo):
try:
ofsg.append(getattr(function._offset_owned[dim], side.name))
except AttributeError:
assert side is CENTER
ofsg.append(function._offset_owned[dim].left)
entry.ofsg = (c_int*len(ofsg))(*ofsg)
# `fromrank` peer + scatter offsets
entry.fromrank = neighborhood[tuple(i.flip() for i in halo.side)]
ofss = []
for dim, side in zip(*halo):
try:
ofss.append(getattr(function._offset_halo[dim], side.flip().name))
except AttributeError:
assert side is CENTER
# Note `_offset_owned`, and not `_offset_halo`, is *not* a bug here.
# If it's the CENTER we need, we can't use `_offset_halo[d].left`
# as otherwise we would be picking the corner
ofss.append(function._offset_owned[dim].left)
entry.ofss = (c_int*len(ofss))(*ofss)
return {self.name: self.value}
class MPIRegion(CompositeObject):
def __init__(self, prefix, key, arguments, owned):
self._prefix = prefix
self._key = key
self._owned = owned
# Sorting for deterministic codegen
self._arguments = sorted(arguments, key=lambda i: i.name)
name = "%s%d" % (prefix, key)
pname = "region%d" % key
fields = []
for i in self.arguments:
if i.is_Dimension:
fields.append((i.min_name, c_int))
fields.append((i.max_name, c_int))
else:
fields.append((i.name, c_int))
super(MPIRegion, self).__init__(name, pname, fields)
def __value_setup__(self, dtype, value):
# We eventually produce an array of `struct region` that is as big as
# the number of OWNED sub-regions we have to compute to complete a
# halo update
return (dtype._type_*self.nregions)()
@property
def arguments(self):
return self._arguments
@property
def prefix(self):
return self._prefix
@property
def key(self):
return self._key
@property
def owned(self):
return self._owned
@property
def nregions(self):
return len(self.owned)
def _arg_values(self, args, **kwargs):
values = self._arg_defaults()
for i, (_, mapper) in enumerate(self.owned):
entry = values[self.name][i]
for a in self.arguments:
if a.is_Dimension:
a_m, a_M = mapper[a]
setattr(entry, a.min_name, subs_op_args(a_m, args))
setattr(entry, a.max_name, subs_op_args(a_M, args))
else:
try:
setattr(entry, a.name, subs_op_args(mapper[a][0], args))
except AttributeError:
setattr(entry, a.name, mapper[a][0])
return values
# Pickling support
_pickle_args = ['prefix', 'key', 'arguments', 'owned']
|
opesci/devito
|
devito/mpi/routines.py
|
Python
|
mit
| 42,676
|
[
"VisIt"
] |
3b4bffe727ea2a34c3b9067489ccf1b685828077602cb1eaa0a8c1b9cdbdfa05
|
# Extract matrices from Gauusian .log files and write them to text.
# Copyright (C) 2013 Joshua J Goings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import numpy as np
import os
import sys
##########################################################
#
# FUNCTIONS
#
##########################################################
def make_symmetric(matrix):
return matrix+matrix.T - np.diag(np.diag(matrix))
def triangle_fill(matrix,istrt,iend,jstrt,count,element):
for i in range(istrt,iend):
for j in range(jstrt,i+1):
matrix[i,j] = element[count]
count += 1
return matrix, count
def block_fill(matrix,istrt,nbf,jstrt,jend,count,element):
for i in range(istrt,nbf):
for j in range(jstrt,jend):
matrix[i,j] = element[count]
count += 1
return matrix, count
def create_matrix(matrix,elements):
''' create lower triangular matrix from list of matrix elements
indexed like so:
[[0,0,0,...,0],
[1,2,0,...,0],
[3,4,5,...,0]]
nbf is number of basis functions
elements is a list of matrix elements indexed like above, e.g.
[0,1,2,3,...]
Gaussian prints every 5 columns, so the mod5 accounts for this
'''
count = 0 # count is our index
# fill the main block, leaving remainder for triangle fill
for i in range(0,nbf-nbf%5,5):
matrix,count = triangle_fill(matrix,i,i+5,i,count,elements)
matrix,count = block_fill(matrix,i+5,nbf,i,i+5,count,elements)
# finish filling the last triangle bit
matrix,count = triangle_fill(matrix,nbf-nbf%5,nbf,nbf-nbf%5,count,elements)
return matrix
'''
Get basis functions,
repulsion energy, set options
'''
# Determine logfile
if len(sys.argv) == 1:
print "Enter name of folder containing .dat files"
sys.exit(1)
g09file = sys.argv[1]
fileName, fileExtension = os.path.splitext(g09file)
if os.path.exists(fileName):
sys.exit('Error: directory exists! Delete it and re-run gauss_parse')
else:
os.makedirs(fileName)
np.set_printoptions(precision=2)
# Extract the number of basis functions from G09 .log file
get_overlap = get_KE = get_PE = get_ERI = False
logfile = open(g09file,'r')
for text in logfile:
words = text.split()
if all(x in words for x in ['Overlap']):
get_overlap = True
if all(x in words for x in ['Kinetic', 'Energy']):
get_KE = True
if all(x in words for x in ['Potential', 'Energy']):
get_PE = True
if all(x in words for x in ['Dumping','Two-Electron','integrals']):
get_ERI = True
if all(x in words for x in ['primitive','gaussians,','basis','functions,']):
nbf = int(words[0]) # number basis functions
if all(x in words for x in ['nuclear','repulsion','energy','Hartrees.']):
enuc = float(words[3]) # nuclear repulstion energy in Hartrees
if all(x in words for x in ['alpha','beta','electrons']):
nelec = int(words[0]) + int(words[3]) # number alpha elec + beta elec
logfile.close()
'''
Get and create overlap matrix
'''
logfile = open(g09file,'r')
data = logfile.read()
overlap_matrix = np.zeros((nbf,nbf))
# grab all text between "Overlap ***" and "*** Kinetic"
raw_overlap_string = re.findall(r'Overlap \*\*\*(.*?)\*\*\* Kinetic',data,re.DOTALL)
raw_overlap_string = raw_overlap_string[0].replace('D','E')
raw_overlap_elements = raw_overlap_string.split()
matrix_elements = []
for overlap_value in raw_overlap_elements:
if 'E' in overlap_value:
matrix_elements.append(overlap_value)
overlap = create_matrix(overlap_matrix,matrix_elements)
overlap = make_symmetric(overlap)
logfile.close()
'''
Get and create KE matrix
'''
logfile = open(g09file,'r')
data = logfile.read()
KE_matrix = np.zeros((nbf,nbf))
# grab all text between "Overlap ***" and "*** Kinetic"
raw_KE_string = re.findall(r'Kinetic Energy \*\*\*(.*?)Entering OneElI...',data,re.DOTALL)
raw_KE_string = raw_KE_string[0].replace('D','E')
raw_KE_elements = raw_KE_string.split()
matrix_elements = []
for KE_value in raw_KE_elements:
if 'E' in KE_value:
matrix_elements.append(KE_value)
KE = create_matrix(KE_matrix,matrix_elements)
KE = make_symmetric(KE)
logfile.close()
#print 'Kinetic Energy matrix: \n', KE
'''
Get and create PE matrix
'''
logfile = open(g09file,'r')
data = logfile.read()
PE_matrix = np.zeros((nbf,nbf))
# grab all text between "Overlap ***" and "*** Kinetic"
raw_PE_string = re.findall(r'Potential Energy \*\*\*\*\*(.*?)\*\*\*\*\*\* Core Hamiltonian',data,re.DOTALL)
raw_PE_string = raw_PE_string[0].replace('D','E')
raw_PE_elements = raw_PE_string.split()
matrix_elements = []
for PE_value in raw_PE_elements:
if 'E' in PE_value:
matrix_elements.append(PE_value)
PE = create_matrix(PE_matrix,matrix_elements)
PE = make_symmetric(PE)
logfile.close()
#print 'Potential Energy matrix: \n', PE
'''
Get and create table of two electron integrals
'''
logfile = open(g09file,'r')
ERI_list = []
count = 0
for text in logfile:
words = text.split()
if 'I=' and 'J=' and 'K=' and 'L=' in words:
ERI_list.append([int(words[1]),int(words[3]),int(words[5]),int(words[7]),float(words[9].replace('D','E'))])
ERI = np.array(ERI_list)
#print 'Electron repulsion integrals: \n', ERI
logfile.close()
'''
Write to file
'''
if get_overlap == True:
np.savetxt(fileName + '/overlap.dat',overlap,fmt='%.8e',delimiter = ' ')
if get_KE == True:
np.savetxt(fileName + '/kinetic_energy.dat',KE,fmt='%.8e',delimiter = ' ')
if get_PE == True:
np.savetxt(fileName + '/potential_energy.dat',PE,fmt='%.8e',delimiter = ' ')
if get_ERI == True:
np.savetxt(fileName + '/two_electron_ints.dat',ERI,fmt='%d %d %d %d %.8f',delimiter = ' ')
np.savetxt(fileName + '/nuclear_repulsion.dat',np.array([enuc]),fmt='%.8f')
np.savetxt(fileName + '/number_basis_functions.dat',np.array([nbf]),fmt='%d')
np.savetxt(fileName + '/number_electrons.dat',np.array([nelec]),fmt='%d')
|
jjgoings/gaussian_matrix_parser
|
gauss_parse.py
|
Python
|
gpl-3.0
| 6,566
|
[
"Gaussian"
] |
ebaefb0c78f9ae68ba276347e6a92e27a3fc2df68068b24ac9f09af99620b115
|
from __future__ import division
from rdkit import RDConfig
import unittest
from rdkit.DataManip.Metric import rdMetricMatrixCalc as rdmmc
import numpy
import random
from rdkit import DataStructs
def feq(v1, v2, tol2=1e-4):
return abs(v1 - v2) <= tol2
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test0DistsArray(self):
exp = numpy.array([1., 1.414213, 1.0], 'd')
# initialize a double array and check if get back the expected distances
desc = numpy.zeros((3, 2), 'd')
desc[1, 0] = 1.0
desc[2, 0] = 1.0
desc[2, 1] = 1.0
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
# repeat with an flaot array
desc = numpy.zeros((3, 2), 'f')
desc[1, 0] = 1.0
desc[2, 0] = 1.0
desc[2, 1] = 1.0
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
# finally with an interger array
desc = numpy.zeros((3, 2), 'i')
desc[1, 0] = 1
desc[2, 0] = 1
desc[2, 1] = 1
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
def ctest1DistsListArray(self):
exp = numpy.array([1., 1.414213, 1.0], 'd')
desc = [numpy.array([0.0, 0.0], 'd'), numpy.array([1.0, 0.0], 'd'),
numpy.array([1.0, 1.0], 'd')]
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
# repeat the test with a list of numpy.arrays of floats
desc = [numpy.array([0.0, 0.0], 'f'), numpy.array([1.0, 0.0], 'f'),
numpy.array([1.0, 1.0], 'f')]
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
# repeat the test with a list of numpy.arrays of ints
desc = [numpy.array([0, 0], 'i'), numpy.array([1, 0], 'i'), numpy.array([1, 1], 'i')]
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
def test2DistListList(self):
exp = numpy.array([1., 1.414213, 1.0], 'd')
desc = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
#test with ints
desc = [[0, 0], [1, 0], [1, 1]]
dmat = rdmmc.GetEuclideanDistMat(desc)
for i in range(numpy.shape(dmat)[0]):
assert feq(dmat[i], exp[i])
def test3Compare(self):
n = 30
m = 5
dscArr = numpy.zeros((n, m), 'd')
for i in range(n):
for j in range(m):
dscArr[i, j] = random.random()
dmatArr = rdmmc.GetEuclideanDistMat(dscArr)
dscLL = []
for i in range(n):
row = []
for j in range(m):
row.append(dscArr[i, j])
dscLL.append(row)
dmatLL = rdmmc.GetEuclideanDistMat(dscLL)
assert numpy.shape(dmatArr) == numpy.shape(dmatLL)
for i in range(n * (n - 1) // 2):
assert feq(dmatArr[i], dmatLL[i])
def test4ebv(self):
n = 30
m = 2048
dm = 800
lst = []
for i in range(n):
v = DataStructs.ExplicitBitVect(m)
for j in range(dm):
v.SetBit(random.randrange(0, m))
lst.append(v)
dMat = rdmmc.GetTanimotoDistMat(lst)
sMat = rdmmc.GetTanimotoSimMat(lst)
for i in range(n * (n - 1) // 2):
assert feq(sMat[i] + dMat[i], 1.0)
def test5sbv(self):
n = 30
m = 2048
dm = 800
lst = []
for i in range(n):
v = DataStructs.SparseBitVect(m)
for j in range(dm):
v.SetBit(random.randrange(0, m))
lst.append(v)
dMat = rdmmc.GetTanimotoDistMat(lst)
sMat = rdmmc.GetTanimotoSimMat(lst)
for i in range(n * (n - 1) // 2):
assert feq(sMat[i] + dMat[i], 1.0)
if __name__ == '__main__':
unittest.main()
|
rvianello/rdkit
|
Code/DataManip/MetricMatrixCalc/Wrap/testMatricCalc.py
|
Python
|
bsd-3-clause
| 3,864
|
[
"RDKit"
] |
ddd25a60dd3ca99edacbe4fdbd314571d5bee7aa4723de2388761643461e18f6
|
#
# game.py - core game functionality
#
# Space Travel
# Copyright (C) 2014 Eric Eveleigh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
import random
import pygame.draw
from vector import Vector2D
import entity
import physics
import screen
STAR_VELOCITY_MEAN = -2000
STAR_SIZE = 6.0
class StarField(object):
'''
Attempts to appear as background
of stars which have been flown by
'''
class Star(object):
'''
Helper for StarField,
stores state for one star
'''
def __init__(self):
self.position = Vector2D()
self.velocity = Vector2D()
self.size = 3.0
self.color = (255,255,255)
def get_position(self):
return self.position
def get_position_horiz(self):
'''
get horizontal position
'''
return self.position.get_x()
def set_position_horiz(self, x):
'''
set horizontal position
'''
self.position.set_x(x)
def set_position_vert(self, y):
'''
set vertical position
'''
self.position.set_y(y)
def set_velocity_horiz(self, x):
'''
set vertical velocity
'''
self.velocity.set_x(x)
def set_velocity_vert(self, x):
'''
set vertical velocity
'''
self.velocity.set_x(x)
def set_velocity(self, velocity):
'''
set velocity as Vector2D
'''
self.velocity.set_vect(velocity)
def set_size(self, size):
'''
set how large the star appears
'''
self.size = size
def get_size(self):
'''
get how large
'''
return self.size
def set_color(self, color):
self.color = color
def get_color(self):
return self.color
def update(self, dt):
'''
just 'integrate' for position
'''
self.position.add(self.velocity.scaled(dt))
def __init__(self, width, height, num_stars):
self.width = width
self.height = height
self.num_stars = num_stars
self.star_velocity = STAR_VELOCITY_MEAN
'''
Create the initial field
'''
self.stars = [] # raises exceptions when removed
i = 0
while i < self.num_stars:
star = StarField.Star()
self.distribute_horiz(star)
self.distribute_vert(star)
self.stars.append(star)
i += 1
def distribute_vert(self, star):
'''
Vertically position a star pseudo-randomly;
Gaussian distributed.
'''
factor = float('inf') # something where fabs(factor) is > 1.0 basically
'''
The while loop is important to ensure
that |factor| <= 1.0 (star on the screen)
'''
while math.fabs(factor) > 1.0:
factor = random.gauss(0.0, 0.75)
vertical = (factor * self.height/2) + self.height/2
velocity = (1.0-math.fabs(factor)) * self.star_velocity
star.set_size((1.0-math.fabs(factor)) * STAR_SIZE)
star.set_position_vert(vertical)
star.set_velocity(Vector2D(velocity, 0))
def distribute_horiz(self, star):
'''
Put the star anywhere randomly
horizontally on the screen.
Uniformly distributed (presumably)
'''
horiz = random.random()*self.width
star.set_position_horiz(horiz)
def random_color(self, star):
'''
Because not all stars appear white
'''
color = lambda: int(127 + random.random()*128)
r = color()
g = color()
b = color()
star.set_color((r,g,b))
def wrap_star(self, star):
'''
Keep re-using the Stars we
have.
'''
star.set_position_horiz(self.width)
self.distribute_vert(star)
self.random_color(star)
def update(self, dt):
'''
Progress the StarField
'''
for star in self.stars:
star.update(dt)
if star.get_position_horiz() < 0:
self.wrap_star(star)
def draw(self, surface):
'''
Show all the Stars
'''
for star in self.stars:
pos = star.get_position().get_int()
pygame.draw.circle(surface, star.get_color(), pos, int(star.get_size()), 0)
# because
pygame.font.init()
INFO_DISTANCE_TEXT = "Distance Travelled: "
INFO_POINTS_TEXT = "Points: "
INFO_REGENS_TEXT = "Regens Left: "
INFO_SHIELD_TEXT = "Shield Time Left: "
INFO_WEAPON_TEXT = "Weapon Upgraded: "
INFODISPLAY_FONT = pygame.font.Font("fonts/NEW ACADEMY.ttf", 20)
INFO_TEXT_COLOR = (255,255,255)
class InfoDisplay(object):
'''
Displays game information on the screen
as the game plays out.
'''
class Text(object):
'''
Helper for InfoDisplay
'''
def __init__(self, static_text, initial_value, visible=True):
self.static = self.create_static(static_text)
self.value = initial_value
self.value_text = self.create_static(str(initial_value))
self.visible = visible
def set_visible(self, visible):
self.visible = visible
def get_visible(self):
return self.visible
def create_static(self, text):
'''
Create static, pre-rendered text
'''
return screen.RenderedText(INFODISPLAY_FONT, text, INFO_TEXT_COLOR, False, True)
def set_value(self, value):
'''
set the value displayed
'''
self.value = value
self.value_text.set_text(str(value))
def set_position(self, position):
self.static.set_position(position)
self.value_text.set_position((position[0]+self.static.get_width(),position[1]))
def get_height(self):
return self.static.get_height()
def get_with(self):
return self.static.get_width() + self.value_text.get_width()
def draw(self, surface):
self.static.draw(surface)
self.value_text.draw(surface)
def __init__(self, position):
self.text_list = []
self.set_position(position)
def add_text(self, info_text):
self.text_list.append(info_text)
self.update_text_positions()
def set_position(self, position):
self.position = position
self.update_text_positions()
def update_text_positions(self):
'''
set the position of all added
texts appropriately
'''
pos_x = self.position[0]
pos_y = self.position[1]
for text in self.text_list:
if text.get_visible() == True:
text.set_position((pos_x,pos_y))
pos_y += text.get_height()
def draw(self, surface):
'''
Show all info
'''
for text in self.text_list:
if text.get_visible() == True:
text.draw(surface)
HP_TEXT = "HP:"
class HPBar(object):
'''
Don't want cluttered code
so make classes
'''
def __init__(self, max_hp, width, height):
self.set_width(width)
self.set_height(height)
self.hp_value = 0
self.hp_max = float(max_hp)
def set_position(self, position):
'''
set position as x,y coordinate tuple
'''
self.position = position
self.hp_static.set_position(position)
def set_width(self, width):
self.width = width
def set_height(self, height):
self.height = height
self.font = pygame.font.Font("fonts/NEW ACADEMY.ttf", height)
self.hp_static = screen.RenderedText(self.font, HP_TEXT, (0,255,0), False, False, True, False, False, True)
def set_value(self, value):
self.hp_value = value
def draw(self, surface):
'''
Draw a nice HP bar
'''
ratio = self.hp_value / self.hp_max
width_hp = int(self.width * ratio)
pygame.draw.rect(surface, (255, 0, 0), pygame.Rect(self.position, (width_hp, self.height)))
pygame.draw.rect(surface, (127, 127, 127), pygame.Rect(self.position, (self.width, self.height)), 2)
self.hp_static.draw(surface)
# three difficulty modes
GAME_DIFF_EASY = 1
GAME_DIFF_MEDIUM = 2
GAME_DIFF_HARD = 3
# corresponding game settings
GAME_SETTINGS_EASY = {'distance': 1200, 'default_regens': 5, 'default_hp': 100, 'aster_prob': 1.0/5, 'hole_prob': 1.0/35, 'shield_prob': 1.0/10, 'weapon_prob': 1.0/20}
GAME_SETTINGS_MEDIUM = {'distance': 2400, 'default_regens': 4, 'default_hp': 100, 'aster_prob': 1.0/4, 'hole_prob': 1.0/25, 'shield_prob': 1.0/15, 'weapon_prob': 1.0/25}
GAME_SETTINGS_HARD = {'distance': 4800, 'default_regens': 3, 'default_hp': 100, 'aster_prob': 1.0/3, 'hole_prob': 1.0/10, 'shield_prob': 1.0/20, 'weapon_prob': 1.0/30 }
# game modes
GAME_MODE_NORMAL = 1 # fly until destination reached
GAME_MODE_ENDURANCE = 2 # fly until no regenerations left
GAME_TRAVEL_VELOCITY = 10.0 # 10 units of distance per second
GAME_SPAWN_PERIOD = 1.0 # how many seconds between spawning objects
GAME_SHOW_HISCORES = 26 # event injected to show hiscores
GAME_SHOW_TITLE = 27 # event to show titlescreen
class Game(object):
'''
The whole reason for creating every other class.
'''
class Settings(object):
'''
Helper for Game settings.
'''
def __init__(self, settings):
self.add(settings)
def add(self, settings):
for attr in settings:
setattr(self, attr, settings[attr])
def __init__(self, screen_rect, difficulty, mode):
self.default_settings()
self.set_settings({'difficulty': difficulty, 'mode': mode})
self.screen_rect = screen_rect
self.star_field = StarField(screen_rect.width, screen_rect.height, 10)
self.dynamics = physics.Dynamics()
self.infodisplay = InfoDisplay((20,20))
self.populate_info_display()
hp_width = 200
hp_height = 40
self.hp_bar = HPBar(self.settings.default_hp, hp_width, hp_height)
self.hp_bar.set_position((hp_height*2, self.screen_rect.height-hp_height*2))
self.entity_list = []
# Useful to remove entities that fly off the screen too far
self.despawn_rect = pygame.Rect(self.screen_rect)
self.despawn_rect.width = self.screen_rect.width + 300
self.despawn_rect.height = self.screen_rect.height + 100
self.despawn_rect.center = self.screen_rect.center
self.start_game()
def default_settings(self):
self.settings = Game.Settings({'difficulty': GAME_DIFF_MEDIUM, 'mode': GAME_MODE_NORMAL})
def set_settings(self, settings):
'''
Sets all the settings.
'''
if self.settings != None:
self.settings.add(settings)
else:
self.settings = Game.Settings(settings)
diff = self.settings.difficulty
if diff == GAME_DIFF_EASY:
self.settings.add(GAME_SETTINGS_EASY)
elif diff == GAME_DIFF_MEDIUM:
self.settings.add(GAME_SETTINGS_MEDIUM)
elif diff == GAME_DIFF_HARD:
self.settings.add(GAME_SETTINGS_HARD)
else:
self.settings.add(GAME_SETTINGS_EASY)
def populate_info_display(self):
self.distance_text = InfoDisplay.Text(INFO_DISTANCE_TEXT, 0, True)
self.add_info_text(self.distance_text)
self.points_text = InfoDisplay.Text(INFO_POINTS_TEXT, 0, True)
self.add_info_text(self.points_text)
self.regens_text = InfoDisplay.Text(INFO_REGENS_TEXT, 0, True)
self.add_info_text(self.regens_text)
self.shield_text = InfoDisplay.Text(INFO_SHIELD_TEXT, 0.0, False)
self.add_info_text(self.shield_text)
self.weapon_text = InfoDisplay.Text(INFO_WEAPON_TEXT, 0.0, False)
self.add_info_text(self.weapon_text)
def add_info_text(self, text):
self.infodisplay.add_text(text)
def update_distance_display(self):
value = "%.2f" % self.distance_travelled
if self.settings.mode == GAME_MODE_NORMAL:
value += " of " + ("%.0f"%self.distance)
self.distance_text.set_value(value)
def update_points_display(self):
self.points_text.set_value(self.player.get_points())
def update_regens_display(self):
self.regens_text.set_value(self.player.get_regens_left())
def update_shield_display(self):
if self.player.has_shield() == True:
self.shield_text.set_value(("%.2f"%self.player.get_shield_timer()) + " seconds")
self.shield_text.set_visible(True)
else:
self.shield_text.set_visible(False)
self.infodisplay.update_text_positions()
def update_weapon_display(self):
if self.player.has_weapon_upgrade() == True:
self.weapon_text.set_value(str(self.player.get_weapon_upgrades()) + " times")
self.weapon_text.set_visible(True)
else:
self.weapon_text.set_visible(False)
self.infodisplay.update_text_positions()
def start_game(self):
self.create_player()
self.spawn_player()
self.distance_travelled = 0
self.distance = self.settings.distance
self.spawn_timer = 1.0
self.points = 0
self.update_distance_display()
self.update_points_display()
self.update_regens_display()
self.update_shield_display()
self.update_weapon_display()
self.shooting = False
self.game_won = False
self.game_over = False
def random_vertical_position(self):
return random.random() * self.screen_rect.height
def random_position(self):
position_x = self.screen_rect.width + 100
position_y = self.random_vertical_position()
position = Vector2D(position_x, position_y)
return position
def probability_event(self, probability):
rand = random.random()
if rand <= probability:
return True
return False
def random_float(self, min_, max_):
return min_ + random.random()*(max_-min_)
def spawn_asteroid(self):
position = self.random_position()
v_min = entity.ASTEROID_VELOCITY_MIN
v_max = entity.ASTEROID_VELOCITY_MAX
speed = self.random_float(v_min, v_max)
velocity = Vector2D(-1.0, 0.0).rotate(self.random_float(-entity.ASTEROID_DIRECTION_SPREAD, entity.ASTEROID_DIRECTION_SPREAD)).scale(speed)
ang_velocity = self.random_float(-entity.ASTEROID_DIRECTION_SPREAD, entity.ASTEROID_DIRECTION_SPREAD)
ent = entity.Asteroid(entity.ASTEROID_HP, position, velocity, 0.0, ang_velocity)
self.add_entity(ent)
def spawn_hole(self):
position = self.random_position()
v_min = entity.HOLE_VELOCITY_MIN
v_max = entity.HOLE_VELOCITY_MAX
speed = self.random_float(v_min, v_max)
velocity = Vector2D(-1.0, 0.0).rotate(self.random_float(-entity.HOLE_DIRECTION_SPREAD, entity.HOLE_DIRECTION_SPREAD)).scale(speed)
ang_velocity = self.random_float(-entity.HOLE_DIRECTION_SPREAD, entity.HOLE_DIRECTION_SPREAD)
ent = entity.Hole(position, velocity, 0.0, ang_velocity)
# holes should be drawn under other entities:
self.add_entity_bottom(ent)
def spawn_powerup(self, powerup_class):
position = self.random_position()
v_min = entity.POWERUP_VELOCITY_MIN
v_max = entity.POWERUP_VELOCITY_MAX
speed = self.random_float(v_min, v_max)
velocity = Vector2D(-1.0, 0.0).scale(speed)
ent = powerup_class(position, velocity, 0.0, 0.0)
self.add_entity(ent)
def update_spawner(self, dt):
self.spawn_timer -= dt
if self.spawn_timer <= 0.0:
self.spawn_timer += GAME_SPAWN_PERIOD
settings = self.settings
spawn_asteroid = self.probability_event(settings.aster_prob)
spawn_hole = self.probability_event(settings.hole_prob)
spawn_shield = self.probability_event(settings.shield_prob)
spawn_weapon = self.probability_event(settings.weapon_prob)
if spawn_asteroid == True:
# spawn an asteroid
self.spawn_asteroid()
if spawn_hole == True:
# spawn a black hole
self.spawn_hole()
if spawn_shield == True:
# spawn a shield powerup
self.spawn_powerup(entity.ShieldPowerup)
if spawn_weapon == True:
# spawn a weapon powerup
self.spawn_powerup(entity.WeaponPowerup)
def update_distance(self, dt):
self.distance_travelled += GAME_TRAVEL_VELOCITY*dt
self.update_distance_display()
if self.settings.mode == GAME_MODE_NORMAL:
if self.distance_travelled >= self.distance:
self.distance_travelled = self.distance
self.game_is_over()
def create_player(self):
self.player = entity.Player(self.settings.default_hp, self.settings.default_regens, Vector2D(self.screen_rect.width/4, self.screen_rect.height/2), Vector2D(0,0), 0.0)
def spawn_player(self):
self.player.respawn(self.settings.default_hp, Vector2D(self.screen_rect.width/4, self.screen_rect.height/2), Vector2D(0,0), 0.0)
self.add_entity(self.player)
def wrap_player(self, entity):
'''
Keep player on the screen; this
simplifies things greatly.
'''
position = entity.get_position()
if position.get_x() < 0:
position.set_x(self.screen_rect.width)
elif position.get_x() > self.screen_rect.width:
position.set_x(0)
if position.get_y() < 0:
position.set_y(self.screen_rect.height)
elif position.get_y() > self.screen_rect.height:
position.set_y(0)
entity.set_position(position)
def player_destroyed(self):
'''
Do what is required when player
gets destroyed
'''
self.player.lose_regen()
self.update_regens_display()
self.show_player_explosion()
if self.player.get_regens_left() <= 0:
self.game_is_over()
def show_player_explosion(self):
'''
explode the player
'''
self.player_explosion = self.show_explosion(self.player)
def is_player_finished_exploding(self):
'''
check if the player has exploded
'''
return self.player_explosion.finished()
def show_explosion(self, ent):
'''
explode an entity
'''
expl = entity.Explosion(ent.get_position(), ent.get_velocity(), ent.get_orientation(), ent.get_ang_velocity())
self.add_entity(expl)
return expl
def add_entity(self, entity):
'''
inserts a new entity into the game
'''
if entity != None:
self.entity_list.append(entity)
def add_entity_bottom(self, entity):
'''
inserts a new entity below all others
'''
if entity != None:
self.entity_list.insert(0, entity)
def key_down(self, key):
'''
Handle all the keypresses
'''
if self.game_over == False:
if key == pygame.K_w or key == pygame.K_UP:
self.player.accelerate(True)
if key == pygame.K_RIGHT or key == pygame.K_d:
self.player.turn_clockwise()
if key == pygame.K_LEFT or key == pygame.K_a:
self.player.turn_counterclockwise()
if key == pygame.K_SPACE:
self.shooting = True
elif key == pygame.K_k: # kill yourself
if self.player.get_alive() == True:
self.player.set_hp(0)
self.player.set_alive(False)
self.player_destroyed()
self.entity_list.remove(self.player)
'''
elif key == pygame.K_p: # win the game
self.distance_travelled = self.distance
'''
else:
if key == pygame.K_RETURN:
if self.settings.mode == GAME_MODE_NORMAL or self.settings.mode == GAME_MODE_ENDURANCE:
pygame.event.post(pygame.event.Event(GAME_SHOW_HISCORES))
else:
pygame.event.post(pygame.event.Event(GAME_SHOW_TITLE))
def key_up(self, key):
'''
Handle all the key unpresses
'''
if self.game_over == False:
if key == pygame.K_w or key == pygame.K_UP:
self.player.accelerate(False)
if key == pygame.K_RIGHT or key == pygame.K_d:
self.player.turn_cw_stop()
if key == pygame.K_LEFT or key == pygame.K_a:
self.player.turn_ccw_stop()
if key == pygame.K_SPACE:
self.shooting = False
def player_fire_weapon(self):
'''
shoot the gun
'''
if self.shooting:
if self.player.can_shoot():
self.add_entity(self.player.shoot())
def remove_offscreen_entity(self, entity):
'''
Remove entities that have gone too
far off screen
-> Reduce lag
'''
if self.despawn_rect.colliderect(entity.get_bounding_rect()) == False:
self.entity_list.remove(entity)
return True
else:
return False
# http://en.wikipedia.org/wiki/Newton's_law_of_universal_gravitation
def hole_gravity_force(self, hole, entity):
'''
Simulate black hole gravity force
'''
if entity.get_collidable() == False:
return
# this G is made up because the real value is much too small for this purpose
G = 6.67 # x 10^-11
disp = hole.get_position().addition(entity.get_position().reversed())
r_squared = disp.norm_squared()
r_hat = disp.scaled(1/math.sqrt(r_squared))
force = r_hat.scaled(G * hole.get_mass() * entity.get_mass() / r_squared)
entity.apply_force(force)
hole.apply_force(force.reversed())
def game_is_over(self):
'''
End the game.
This function could really be
cleaned up but other things are
more important at the moment.
'''
self.game_over = True
self.final_points = self.player.get_points()
self.final_distance = self.distance_travelled
game_over_font = pygame.font.Font("fonts/Rase-GPL-Bold.otf", 40)
message_font = pygame.font.Font("fonts/NEW ACADEMY.ttf", 20)
self.game_over_text = screen.RenderedText(game_over_font, "Game Over.", (0, 255, 0), True)
self.game_over_text.set_position(((self.screen_rect.width/2, self.screen_rect.height/3)))
if self.settings.mode == GAME_MODE_NORMAL:
if self.distance_travelled >= self.distance:
message1_text = "Winner, you have reached the destination!"
message2_text = "You collected " + str(self.player.get_points()) + " points along the way."
message3_text = "Press enter to record your score for everyone to see."
else:
message1_text = "What a pity, you didn't make it."
message2_text = "You only had a distance of " + ("%.2f"%(self.distance-self.distance_travelled)) + " left too..."
message3_text = "Press enter key to see everyone else who did better than you."
elif self.settings.mode == GAME_MODE_ENDURANCE:
message1_text = "You endured a distance of " + ("%.2f"%(self.distance_travelled)) + ". Congratulations."
message2_text = "You also collected " + str(self.player.get_points()) + " points along the way."
message3_text = "Press enter key to record your score."
else:
message1_text = "Well this is unusual, I don't know this game mode."
message2_text = "Press enter key, anyway, to return to the title screen."
message3_text = ""
x = self.game_over_text.get_x()
y = self.game_over_text.get_y() + self.game_over_text.get_height()
self.game_over_message1 = screen.RenderedText(message_font, message1_text, (255, 255, 255), True)
self.game_over_message1.set_position((x,y))
y += self.game_over_message1.get_height()
self.game_over_message2 = screen.RenderedText(message_font, message2_text, (255, 255, 255), True)
self.game_over_message2.set_position((x,y))
y += self.game_over_message2.get_height()
self.game_over_message3 = screen.RenderedText(message_font, message3_text, (255, 255, 255), True)
self.game_over_message3.set_position((x,y))
y += self.game_over_message3.get_height()
def get_final_points(self):
'''
get points at the end of the game
'''
return self.final_points
def get_final_distance(self):
'''
get distance traveled at the
end of the game
'''
return self.final_distance
def get_game_mode(self):
return self.settings.mode
def get_game_difficulty(self):
return self.settings.difficulty
def get_game_won(self):
'''
get whether the game was won
'''
if self.settings.mode == GAME_MODE_NORMAL:
if self.get_final_distance() >= self.distance:
return True
else:
return False
elif self.settings.mode == GAME_MODE_ENDURANCE:
return True
else:
return False
def game_over_draw(self, surface):
'''
Show game over/win text
'''
self.game_over_text.draw(surface)
self.game_over_message1.draw(surface)
self.game_over_message2.draw(surface)
self.game_over_message3.draw(surface)
def update(self, frametime):
'''
Update EVERYTHING
'''
self.star_field.update(frametime)
self.dynamics.resolve_collisions(self.entity_list, frametime)
for entity1 in self.entity_list:
entity1.update(frametime)
# remove Entitys that are destroyed;
# update powerup info if it was a powerup
if entity1.get_alive() == False:
if isinstance(entity1, entity.Player):
self.player_destroyed()
elif isinstance(entity1, entity.WeaponPowerup):
self.update_weapon_display()
elif isinstance(entity1, entity.Asteroid):
self.show_explosion(entity1)
self.update_points_display()
self.entity_list.remove(entity1)
continue # we don't need to do anything more with a dead Entity
# remove Entitys that are outside of
# the allowable region
if self.remove_offscreen_entity(entity1) == True:
continue
# do Entity type-specific updates
if isinstance(entity1, entity.Player):
self.wrap_player(entity1)
elif isinstance(entity1, entity.Hole):
# every entity1 is attracted to the hole
for entity2 in self.entity_list:
if entity2 == entity1:
continue # avoid divn by zero in hole_gravity_force (zero separation between ent and itself)
self.hole_gravity_force(entity1, entity2)
# update shield powerup display
self.update_shield_display()
# update HP bar, regardless of if dead or not
self.hp_bar.set_value(self.player.get_hp())
'''
Do the spawning and distance updates
'''
if self.game_over == False:
if self.player.get_alive() == True:
# spawn Asteroids, Holes and Powerups
# when the player is alive
self.update_spawner(frametime)
self.update_distance(frametime)
self.player_fire_weapon()
else:
# spawn the player when they finish exploding
if self.is_player_finished_exploding():
self.spawn_player()
def draw(self, surface):
'''
Draw the star field and all the
entities on top, basically.
draw info texts and hp bar as well
'''
surface.fill((0,0,0))
self.star_field.draw(surface)
for entity in self.entity_list:
entity.draw(surface)
if self.game_over == False:
self.hp_bar.draw(surface)
self.infodisplay.draw(surface)
else:
self.game_over_draw(surface)
|
mre521/space-travel
|
game.py
|
Python
|
gpl-3.0
| 31,233
|
[
"Gaussian"
] |
da904a72eda3e749d265368a353224fdfdac9c97503e83a933fd321cbf44582f
|
"""Create index file for atoms or distance pairs.
This index file can be used by for RMSD distance calculations, to specify
pairs of atoms for AtomPairsFeaturizer, or to specify particular atoms
for SuperposeFeaturizer.
The format of the index file is flat text, with each line containing either
1 or 2 0-based atom indices.
"""
# Author: Robert McGibbon <rmcgibbo@gmail.com>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
from __future__ import print_function, division, absolute_import
import os
import itertools
import mdtraj as md
import numpy as np
from ..cmdline import Command, argument, argument_group, exttype
class AtomIndices(Command):
_group = '0-Support'
_concrete = True
description = __doc__
pdb = argument('-p', '--pdb', required=True, help='Path to PDB file')
out = argument('-o', '--out', required=True, help='Path to output file',
type=exttype('.txt'))
section1 = argument_group(description='Mode: Choose One')
group1 = section1.add_mutually_exclusive_group(required=True)
group1.add_argument('-d', '--distance-pairs', action='store_true', help='''
Create a 2-dimensional index file with (N choose 2) rows and 2
columns, where each row specifies a pair of indices. All (N choose 2)
pairs of the selected atoms will be written.''')
group1.add_argument('-a', '--atoms', action='store_true', help='''
Create a 1-dimensional index file containing the indices of the
selected atoms.''')
section2 = argument_group(description='Selection Criteria: Choose One')
group2 = section2.add_mutually_exclusive_group(required=True)
group2.add_argument('--minimal', action='store_true', help='''Keep the
atoms in protein residues with names CA, CB, C, N, O, (recommended).''')
group2.add_argument('--heavy', action='store_true', help='''All
non-hydrogen atoms that are not symmetry equivalent. By symmetry
equivalent, we mean atoms identical under an exchange of labels. For
example, heavy will exclude the two pairs of equivalent carbons (CD,
CE) in a PHE ring.''')
group2.add_argument('--alpha', action='store_true', help='''Only alpha
carbons.''')
group2.add_argument('--water', action='store_true', help='''Water oxygen
atoms.''')
group2.add_argument('--all', action='store_true', help='''Selection
includes every atom.''')
def __init__(self, args):
self.args = args
if os.path.exists(args.out):
self.error('IOError: file exists: %s' % args.out)
self.pdb = md.load(os.path.expanduser(args.pdb))
print('Loaded pdb containing (%d) chains, (%d) residues, (%d) atoms.' %
(self.pdb.topology.n_chains, self.pdb.topology.n_residues,
self.pdb.topology.n_atoms))
def start(self):
if self.args.all:
s = 'all'
elif self.args.alpha:
s = 'alpha'
elif self.args.minimal:
s = 'minimal'
elif self.args.heavy:
s = 'heavy'
elif self.args.water:
s = 'water'
else:
raise RuntimeError()
atom_indices = self.pdb.topology.select_atom_indices(s)
n_atoms = len(atom_indices)
n_residues = len(np.unique(
[self.pdb.topology.atom(i).residue.index for i in atom_indices]))
print('Selected (%d) atoms from (%d) unique residues.' % (
n_atoms, n_residues))
if self.args.distance_pairs:
out = np.array(list(itertools.combinations(atom_indices, 2)))
elif self.args.atoms:
out = np.array(atom_indices)
else:
raise RuntimeError
np.savetxt(self.args.out, out, '%d')
|
msultan/msmbuilder
|
msmbuilder/commands/atom_indices.py
|
Python
|
lgpl-2.1
| 3,793
|
[
"MDTraj"
] |
ed7049d9b7ed7e24710795410b41f38630d6b4ca908e30d4b707bbf7e07fa527
|
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import cmd
import glob
import os
import platform
import time
import threading
import sys
import shutil
import subprocess
import codecs
import argparse
import locale
import logging
import traceback
import re
from appdirs import user_cache_dir, user_config_dir, user_data_dir
from serial import SerialException
from . import printcore
from .utils import install_locale, run_command, get_command_output, \
format_time, format_duration, RemainingTimeEstimator, \
get_home_pos, parse_build_dimensions, parse_temperature_report, \
setup_logging
install_locale('pronterface')
from .settings import Settings, BuildDimensionsSetting
from .power import powerset_print_start, powerset_print_stop
from printrun import gcoder
from .rpc import ProntRPC
from printrun.spoolmanager import spoolmanager
if os.name == "nt":
try:
import winreg
except:
pass
READLINE = True
try:
import readline
try:
readline.rl.mode.show_all_if_ambiguous = "on" # config pyreadline on windows
except:
pass
except:
READLINE = False # neither readline module is available
tempreading_exp = re.compile("(^T:| T:)")
REPORT_NONE = 0
REPORT_POS = 1
REPORT_TEMP = 2
REPORT_MANUAL = 4
DEG = "\N{DEGREE SIGN}"
class Status:
def __init__(self):
self.extruder_temp = 0
self.extruder_temp_target = 0
self.bed_temp = 0
self.bed_temp_target = 0
self.print_job = None
self.print_job_progress = 1.0
def update_tempreading(self, tempstr):
temps = parse_temperature_report(tempstr)
if "T0" in temps and temps["T0"][0]: hotend_temp = float(temps["T0"][0])
elif "T" in temps and temps["T"][0]: hotend_temp = float(temps["T"][0])
else: hotend_temp = None
if "T0" in temps and temps["T0"][1]: hotend_setpoint = float(temps["T0"][1])
elif "T" in temps and temps["T"][1]: hotend_setpoint = float(temps["T"][1])
else: hotend_setpoint = None
if hotend_temp is not None:
self.extruder_temp = hotend_temp
if hotend_setpoint is not None:
self.extruder_temp_target = hotend_setpoint
bed_temp = float(temps["B"][0]) if "B" in temps and temps["B"][0] else None
if bed_temp is not None:
self.bed_temp = bed_temp
setpoint = temps["B"][1]
if setpoint:
self.bed_temp_target = float(setpoint)
@property
def bed_enabled(self):
return self.bed_temp != 0
@property
def extruder_enabled(self):
return self.extruder_temp != 0
class pronsole(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
if not READLINE:
self.completekey = None
self.status = Status()
self.dynamic_temp = False
self.compute_eta = None
self.statuscheck = False
self.status_thread = None
self.monitor_interval = 3
self.p = printcore.printcore()
self.p.recvcb = self.recvcb
self.p.startcb = self.startcb
self.p.endcb = self.endcb
self.p.layerchangecb = self.layer_change_cb
self.p.process_host_command = self.process_host_command
self.recvlisteners = []
self.in_macro = False
self.p.onlinecb = self.online
self.p.errorcb = self.logError
self.fgcode = None
self.filename = None
self.rpc_server = None
self.curlayer = 0
self.sdlisting = 0
self.sdlisting_echo = 0
self.sdfiles = []
self.paused = False
self.sdprinting = 0
self.uploading = 0 # Unused, just for pronterface generalization
self.temps = {"pla": "185", "abs": "230", "off": "0"}
self.bedtemps = {"pla": "60", "abs": "110", "off": "0"}
self.percentdone = 0
self.posreport = ""
self.tempreadings = ""
self.userm114 = 0
self.userm105 = 0
self.m105_waitcycles = 0
self.macros = {}
self.rc_loaded = False
self.processing_rc = False
self.processing_args = False
self.settings = Settings(self)
self.settings._add(BuildDimensionsSetting("build_dimensions", "200x200x100+0+0+0+0+0+0", _("Build dimensions"), _("Dimensions of Build Platform\n & optional offset of origin\n & optional switch position\n\nExamples:\n XXXxYYY\n XXX,YYY,ZZZ\n XXXxYYYxZZZ+OffX+OffY+OffZ\nXXXxYYYxZZZ+OffX+OffY+OffZ+HomeX+HomeY+HomeZ"), "Printer"), self.update_build_dimensions)
self.settings._port_list = self.scanserial
self.settings._temperature_abs_cb = self.set_temp_preset
self.settings._temperature_pla_cb = self.set_temp_preset
self.settings._bedtemp_abs_cb = self.set_temp_preset
self.settings._bedtemp_pla_cb = self.set_temp_preset
self.update_build_dimensions(None, self.settings.build_dimensions)
self.update_tcp_streaming_mode(None, self.settings.tcp_streaming_mode)
self.monitoring = 0
self.starttime = 0
self.extra_print_time = 0
self.silent = False
self.commandprefixes = 'MGT$'
self.promptstrs = {"offline": "%(bold)soffline>%(normal)s ",
"fallback": "%(bold)s%(red)s%(port)s%(white)s PC>%(normal)s ",
"macro": "%(bold)s..>%(normal)s ",
"online": "%(bold)s%(green)s%(port)s%(white)s %(extruder_temp_fancy)s%(progress_fancy)s>%(normal)s "}
self.spool_manager = spoolmanager.SpoolManager(self)
self.current_tool = 0 # Keep track of the extruder being used
self.cache_dir = os.path.join(user_cache_dir("Printrun"))
self.history_file = os.path.join(self.cache_dir,"history")
self.config_dir = os.path.join(user_config_dir("Printrun"))
self.data_dir = os.path.join(user_data_dir("Printrun"))
self.lineignorepattern=re.compile("ok ?\d*$|.*busy: ?processing|.*busy: ?heating|.*Active Extruder: ?\d*$")
# --------------------------------------------------------------
# General console handling
# --------------------------------------------------------------
def postloop(self):
self.p.disconnect()
cmd.Cmd.postloop(self)
def preloop(self):
self.log(_("Welcome to the printer console! Type \"help\" for a list of available commands."))
self.prompt = self.promptf()
cmd.Cmd.preloop(self)
# We replace this function, defined in cmd.py .
# It's default behavior with regards to Ctr-C
# and Ctr-D doesn't make much sense...
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ": complete")
history = (self.history_file)
if not os.path.exists(history):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
history = os.path.join(self.cache_dir, "history")
if os.path.exists(history):
readline.read_history_file(history)
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + "\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
self.log("")
self.do_exit("")
except KeyboardInterrupt:
self.log("")
line = ""
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = ""
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
readline.write_history_file(self.history_file)
except ImportError:
pass
def confirm(self):
y_or_n = input("y/n: ")
if y_or_n == "y":
return True
elif y_or_n != "n":
return self.confirm()
return False
def log(self, *msg):
msg = "".join(str(i) for i in msg)
logging.info(msg)
def logError(self, *msg):
msg = "".join(str(i) for i in msg)
logging.error(msg)
if not self.settings.error_command:
return
output = get_command_output(self.settings.error_command, {"$m": msg})
if output:
self.log("Error command output:")
self.log(output.rstrip())
def promptf(self):
"""A function to generate prompts so that we can do dynamic prompts. """
if self.in_macro:
promptstr = self.promptstrs["macro"]
elif not self.p.online:
promptstr = self.promptstrs["offline"]
elif self.status.extruder_enabled:
promptstr = self.promptstrs["online"]
else:
promptstr = self.promptstrs["fallback"]
if "%" not in promptstr:
return promptstr
else:
specials = {}
specials["extruder_temp"] = str(int(self.status.extruder_temp))
specials["extruder_temp_target"] = str(int(self.status.extruder_temp_target))
specials["port"] = self.settings.port[5:]
if self.status.extruder_temp_target == 0:
specials["extruder_temp_fancy"] = str(int(self.status.extruder_temp)) + DEG
else:
specials["extruder_temp_fancy"] = "%s%s/%s%s" % (str(int(self.status.extruder_temp)), DEG, str(int(self.status.extruder_temp_target)), DEG)
if self.p.printing:
progress = int(1000 * float(self.p.queueindex) / len(self.p.mainqueue)) / 10
elif self.sdprinting:
progress = self.percentdone
else:
progress = 0.0
specials["progress"] = str(progress)
if self.p.printing or self.sdprinting:
specials["progress_fancy"] = " " + str(progress) + "%"
else:
specials["progress_fancy"] = ""
specials["red"] = "\033[31m"
specials["green"] = "\033[32m"
specials["white"] = "\033[37m"
specials["bold"] = "\033[01m"
specials["normal"] = "\033[00m"
return promptstr % specials
def postcmd(self, stop, line):
""" A hook we override to generate prompts after
each command is executed, for the next prompt.
We also use it to send M105 commands so that
temp info gets updated for the prompt."""
if self.p.online and self.dynamic_temp:
self.p.send_now("M105")
self.prompt = self.promptf()
return stop
def kill(self):
self.statuscheck = False
if self.status_thread:
self.status_thread.join()
self.status_thread = None
if self.rpc_server is not None:
self.rpc_server.shutdown()
def write_prompt(self):
sys.stdout.write(self.promptf())
sys.stdout.flush()
def help_help(self, l = ""):
self.do_help("")
def do_gcodes(self, l = ""):
self.help_gcodes()
def help_gcodes(self):
self.log("Gcodes are passed through to the printer as they are")
def precmd(self, line):
if line.upper().startswith("M114"):
self.userm114 += 1
elif line.upper().startswith("M105"):
self.userm105 += 1
return line
def help_shell(self):
self.log("Executes a python command. Example:")
self.log("! os.listdir('.')")
def do_shell(self, l):
exec(l)
def emptyline(self):
"""Called when an empty line is entered - do not remove"""
pass
def default(self, l):
if l[0].upper() in self.commandprefixes.upper():
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l.upper())
self.p.send_now(l.upper())
else:
self.logError(_("Printer is not online."))
return
elif l[0] == "@":
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l[1:])
self.p.send_now(l[1:])
else:
self.logError(_("Printer is not online."))
return
else:
cmd.Cmd.default(self, l)
def do_exit(self, l):
if self.status.extruder_temp_target != 0:
self.log("Setting extruder temp to 0")
self.p.send_now("M104 S0.0")
if self.status.bed_enabled:
if self.status.bed_temp_target != 0:
self.log("Setting bed temp to 0")
self.p.send_now("M140 S0.0")
self.log("Disconnecting from printer...")
if self.p.printing and l is not "force":
self.log(_("Are you sure you want to exit while printing?\n\
(this will terminate the print)."))
if not self.confirm():
return
self.log(_("Exiting program. Goodbye!"))
self.p.disconnect()
self.kill()
sys.exit()
def help_exit(self):
self.log(_("Disconnects from the printer and exits the program."))
# --------------------------------------------------------------
# Macro handling
# --------------------------------------------------------------
def complete_macro(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.macros.keys() if i.startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
return [i for i in ["/D", "/S"] + self.completenames(text) if i.startswith(text)]
else:
return []
def hook_macro(self, l):
l = l.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if len(ws) == 0:
self.end_macro()
# pass the unprocessed line to regular command processor to not require empty line in .pronsolerc
return self.onecmd(l)
self.cur_macro_def += l + "\n"
def end_macro(self):
if "onecmd" in self.__dict__: del self.onecmd # remove override
self.in_macro = False
self.prompt = self.promptf()
if self.cur_macro_def != "":
self.macros[self.cur_macro_name] = self.cur_macro_def
macro = self.compile_macro(self.cur_macro_name, self.cur_macro_def)
setattr(self.__class__, "do_" + self.cur_macro_name, lambda self, largs, macro = macro: macro(self, *largs.split()))
setattr(self.__class__, "help_" + self.cur_macro_name, lambda self, macro_name = self.cur_macro_name: self.subhelp_macro(macro_name))
if not self.processing_rc:
self.log("Macro '" + self.cur_macro_name + "' defined")
# save it
if not self.processing_args:
macro_key = "macro " + self.cur_macro_name
macro_def = macro_key
if "\n" in self.cur_macro_def:
macro_def += "\n"
else:
macro_def += " "
macro_def += self.cur_macro_def
self.save_in_rc(macro_key, macro_def)
else:
self.logError("Empty macro - cancelled")
del self.cur_macro_name, self.cur_macro_def
def compile_macro_line(self, line):
line = line.rstrip()
ls = line.lstrip()
ws = line[:len(line) - len(ls)] # just leading whitespace
if ls == "" or ls.startswith('#'): return "" # no code
if ls.startswith('!'):
return ws + ls[1:] + "\n" # python mode
else:
ls = ls.replace('"', '\\"') # need to escape double quotes
ret = ws + 'self.precmd("' + ls + '".format(*arg))\n' # parametric command mode
return ret + ws + 'self.onecmd("' + ls + '".format(*arg))\n'
def compile_macro(self, macro_name, macro_def):
if macro_def.strip() == "":
self.logError("Empty macro - cancelled")
return
macro = None
namespace={}
pycode = "def macro(self,*arg):\n"
if "\n" not in macro_def.strip():
pycode += self.compile_macro_line(" " + macro_def.strip())
else:
lines = macro_def.split("\n")
for l in lines:
pycode += self.compile_macro_line(l)
exec(pycode,namespace)
try:
macro=namespace['macro']
except:
pass
return macro
def start_macro(self, macro_name, prev_definition = "", suppress_instructions = False):
if not self.processing_rc and not suppress_instructions:
self.logError("Enter macro using indented lines, end with empty line")
self.cur_macro_name = macro_name
self.cur_macro_def = ""
self.onecmd = self.hook_macro # override onecmd temporarily
self.in_macro = False
self.prompt = self.promptf()
def delete_macro(self, macro_name):
if macro_name in self.macros.keys():
delattr(self.__class__, "do_" + macro_name)
del self.macros[macro_name]
self.log("Macro '" + macro_name + "' removed")
if not self.processing_rc and not self.processing_args:
self.save_in_rc("macro " + macro_name, "")
else:
self.logError("Macro '" + macro_name + "' is not defined")
def do_macro(self, args):
if args.strip() == "":
self.print_topics("User-defined macros", [str(k) for k in self.macros.keys()], 15, 80)
return
arglist = args.split(None, 1)
macro_name = arglist[0]
if macro_name not in self.macros and hasattr(self.__class__, "do_" + macro_name):
self.logError("Name '" + macro_name + "' is being used by built-in command")
return
if len(arglist) == 2:
macro_def = arglist[1]
if macro_def.lower() == "/d":
self.delete_macro(macro_name)
return
if macro_def.lower() == "/s":
self.subhelp_macro(macro_name)
return
self.cur_macro_def = macro_def
self.cur_macro_name = macro_name
self.end_macro()
return
if macro_name in self.macros:
self.start_macro(macro_name, self.macros[macro_name])
else:
self.start_macro(macro_name)
def help_macro(self):
self.log("Define single-line macro: macro <name> <definition>")
self.log("Define multi-line macro: macro <name>")
self.log("Enter macro definition in indented lines. Use {0} .. {N} to substitute macro arguments")
self.log("Enter python code, prefixed with ! Use arg[0] .. arg[N] to substitute macro arguments")
self.log("Delete macro: macro <name> /d")
self.log("Show macro definition: macro <name> /s")
self.log("'macro' without arguments displays list of defined macros")
def subhelp_macro(self, macro_name):
if macro_name in self.macros.keys():
macro_def = self.macros[macro_name]
if "\n" in macro_def:
self.log("Macro '" + macro_name + "' defined as:")
self.log(self.macros[macro_name] + "----------------")
else:
self.log("Macro '" + macro_name + "' defined as: '" + macro_def + "'")
else:
self.logError("Macro '" + macro_name + "' is not defined")
# --------------------------------------------------------------
# Configuration handling
# --------------------------------------------------------------
def set(self, var, str):
try:
t = type(getattr(self.settings, var))
value = self.settings._set(var, str)
if not self.processing_rc and not self.processing_args:
self.save_in_rc("set " + var, "set %s %s" % (var, value))
except AttributeError:
logging.debug(_("Unknown variable '%s'") % var)
except ValueError as ve:
if hasattr(ve, "from_validator"):
self.logError(_("Bad value %s for variable '%s': %s") % (str, var, ve.args[0]))
else:
self.logError(_("Bad value for variable '%s', expecting %s (%s)") % (var, repr(t)[1:-1], ve.args[0]))
def do_set(self, argl):
args = argl.split(None, 1)
if len(args) < 1:
for k in [kk for kk in dir(self.settings) if not kk.startswith("_")]:
self.log("%s = %s" % (k, str(getattr(self.settings, k))))
return
if len(args) < 2:
# Try getting the default value of the setting to check whether it
# actually exists
try:
getattr(self.settings, args[0])
except AttributeError:
logging.warning("Unknown variable '%s'" % args[0])
return
self.set(args[0], args[1])
def help_set(self):
self.log("Set variable: set <variable> <value>")
self.log("Show variable: set <variable>")
self.log("'set' without arguments displays all variables")
def complete_set(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in dir(self.settings) if not i.startswith("_") and i.startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
return [i for i in self.settings._tabcomplete(line.split()[1]) if i.startswith(text)]
else:
return []
def load_rc(self, rc_filename):
self.processing_rc = True
try:
rc = codecs.open(rc_filename, "r", "utf-8")
self.rc_filename = os.path.abspath(rc_filename)
for rc_cmd in rc:
if not rc_cmd.lstrip().startswith("#"):
self.onecmd(rc_cmd)
rc.close()
if hasattr(self, "cur_macro_def"):
self.end_macro()
self.rc_loaded = True
finally:
self.processing_rc = False
def load_default_rc(self):
# Check if a configuration file exists in an "old" location,
# if not, use the "new" location provided by appdirs
if os.path.exists(os.path.expanduser("~/.pronsolerc")):
config = os.path.expanduser("~/.pronsolerc")
elif os.path.exists(os.path.expanduser("~/printrunconf.ini")):
config = os.path.expanduser("~/printrunconf.ini")
else:
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
if platform.system() == 'Windows':
config_name = "printrunconf.ini"
else:
config_name = "pronsolerc"
config = os.path.join(self.config_dir, config_name)
# Load the default configuration file
try:
self.load_rc(config)
except FileNotFoundError:
# Make sure the filename is initialized,
# and create the file if it doesn't exist
self.rc_filename = config
open(self.rc_filename, 'a').close()
def save_in_rc(self, key, definition):
"""
Saves or updates macro or other definitions in .pronsolerc
key is prefix that determines what is being defined/updated (e.g. 'macro foo')
definition is the full definition (that is written to file). (e.g. 'macro foo move x 10')
Set key as empty string to just add (and not overwrite)
Set definition as empty string to remove it from .pronsolerc
To delete line from .pronsolerc, set key as the line contents, and definition as empty string
Only first definition with given key is overwritten.
Updates are made in the same file position.
Additions are made to the end of the file.
"""
rci, rco = None, None
if definition != "" and not definition.endswith("\n"):
definition += "\n"
try:
written = False
if os.path.exists(self.rc_filename):
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
configcache = os.path.join(self.cache_dir, os.path.basename(self.rc_filename))
configcachebak = configcache + "~bak"
configcachenew = configcache + "~new"
shutil.copy(self.rc_filename, configcachebak)
rci = codecs.open(configcachebak, "r", "utf-8")
rco = codecs.open(configcachenew, "w", "utf-8")
if rci is not None:
overwriting = False
for rc_cmd in rci:
l = rc_cmd.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if overwriting and len(ws) == 0:
overwriting = False
if not written and key != "" and rc_cmd.startswith(key) and (rc_cmd + "\n")[len(key)].isspace():
overwriting = True
written = True
rco.write(definition)
if not overwriting:
rco.write(rc_cmd)
if not rc_cmd.endswith("\n"): rco.write("\n")
if not written:
rco.write(definition)
if rci is not None:
rci.close()
rco.close()
shutil.move(configcachenew, self.rc_filename)
# if definition != "":
# self.log("Saved '"+key+"' to '"+self.rc_filename+"'")
# else:
# self.log("Removed '"+key+"' from '"+self.rc_filename+"'")
except Exception as e:
self.logError("Saving failed for ", key + ":", str(e))
finally:
del rci, rco
# --------------------------------------------------------------
# Configuration update callbacks
# --------------------------------------------------------------
def update_build_dimensions(self, param, value):
self.build_dimensions_list = parse_build_dimensions(value)
self.p.analyzer.home_pos = get_home_pos(self.build_dimensions_list)
def update_tcp_streaming_mode(self, param, value):
self.p.tcp_streaming_mode = self.settings.tcp_streaming_mode
def update_rpc_server(self, param, value):
if value:
if self.rpc_server is None:
self.rpc_server = ProntRPC(self)
else:
if self.rpc_server is not None:
self.rpc_server.shutdown()
self.rpc_server = None
# --------------------------------------------------------------
# Command line options handling
# --------------------------------------------------------------
def add_cmdline_arguments(self, parser):
parser.add_argument('-v', '--verbose', help = _("increase verbosity"), action = "store_true")
parser.add_argument('-c', '--conf', '--config', help = _("load this file on startup instead of .pronsolerc ; you may chain config files, if so settings auto-save will use the last specified file"), action = "append", default = [])
parser.add_argument('-e', '--execute', help = _("executes command after configuration/.pronsolerc is loaded ; macros/settings from these commands are not autosaved"), action = "append", default = [])
parser.add_argument('filename', nargs='?', help = _("file to load"))
def process_cmdline_arguments(self, args):
if args.verbose:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
for config in args.conf:
try:
self.load_rc(config)
except EnvironmentError as err:
print(("ERROR: Unable to load configuration file: %s" %
str(err)[10:]))
sys.exit(1)
if not self.rc_loaded:
self.load_default_rc()
self.processing_args = True
for command in args.execute:
self.onecmd(command)
self.processing_args = False
self.update_rpc_server(None, self.settings.rpc_server)
if args.filename:
filename = args.filename.decode(locale.getpreferredencoding())
self.cmdline_filename_callback(filename)
def cmdline_filename_callback(self, filename):
self.do_load(filename)
def parse_cmdline(self, args):
parser = argparse.ArgumentParser(description = 'Printrun 3D printer interface')
self.add_cmdline_arguments(parser)
args = [arg for arg in args if not arg.startswith("-psn")]
args = parser.parse_args(args = args)
self.process_cmdline_arguments(args)
setup_logging(sys.stdout, self.settings.log_path, True)
# --------------------------------------------------------------
# Printer connection handling
# --------------------------------------------------------------
def connect_to_printer(self, port, baud, dtr):
try:
self.p.connect(port, baud, dtr)
except SerialException as e:
# Currently, there is no errno, but it should be there in the future
if e.errno == 2:
self.logError(_("Error: You are trying to connect to a non-existing port."))
elif e.errno == 8:
self.logError(_("Error: You don't have permission to open %s.") % port)
self.logError(_("You might need to add yourself to the dialout group."))
else:
self.logError(traceback.format_exc())
# Kill the scope anyway
return False
except OSError as e:
if e.errno == 2:
self.logError(_("Error: You are trying to connect to a non-existing port."))
else:
self.logError(traceback.format_exc())
return False
self.statuscheck = True
self.status_thread = threading.Thread(target = self.statuschecker)
self.status_thread.start()
return True
def do_connect(self, l):
a = l.split()
p = self.scanserial()
port = self.settings.port
if (port == "" or port not in p) and len(p) > 0:
port = p[0]
baud = self.settings.baudrate or 115200
if len(a) > 0:
port = a[0]
if len(a) > 1:
try:
baud = int(a[1])
except:
self.log("Bad baud value '" + a[1] + "' ignored")
if len(p) == 0 and not port:
self.log("No serial ports detected - please specify a port")
return
if len(a) == 0:
self.log("No port specified - connecting to %s at %dbps" % (port, baud))
if port != self.settings.port:
self.settings.port = port
self.save_in_rc("set port", "set port %s" % port)
if baud != self.settings.baudrate:
self.settings.baudrate = baud
self.save_in_rc("set baudrate", "set baudrate %d" % baud)
self.connect_to_printer(port, baud, self.settings.dtr)
def help_connect(self):
self.log("Connect to printer")
self.log("connect <port> <baudrate>")
self.log("If port and baudrate are not specified, connects to first detected port at 115200bps")
ports = self.scanserial()
if ports:
self.log("Available ports: ", " ".join(ports))
else:
self.log("No serial ports were automatically found.")
def complete_connect(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.scanserial() if i.startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
return [i for i in ["2400", "9600", "19200", "38400", "57600", "115200"] if i.startswith(text)]
else:
return []
def scanserial(self):
"""scan for available ports. return a list of device names."""
baselist = []
if os.name == "nt":
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "HARDWARE\\DEVICEMAP\\SERIALCOMM")
i = 0
while(1):
baselist += [winreg.EnumValue(key, i)[1]]
i += 1
except:
pass
for g in ['/dev/ttyUSB*', '/dev/ttyACM*', "/dev/tty.*", "/dev/cu.*", "/dev/rfcomm*"]:
baselist += glob.glob(g)
return [p for p in baselist if self._bluetoothSerialFilter(p)]
def _bluetoothSerialFilter(self, serial):
return not ("Bluetooth" in serial or "FireFly" in serial)
def online(self):
self.log("\rPrinter is now online")
self.write_prompt()
def do_disconnect(self, l):
self.p.disconnect()
def help_disconnect(self):
self.log("Disconnects from the printer")
def do_block_until_online(self, l):
while not self.p.online:
time.sleep(0.1)
def help_block_until_online(self, l):
self.log("Blocks until printer is online")
self.log("Warning: if something goes wrong, this can block pronsole forever")
# --------------------------------------------------------------
# Printer status monitoring
# --------------------------------------------------------------
def statuschecker_inner(self, do_monitoring = True):
if self.p.online:
if self.p.writefailures >= 4:
self.logError(_("Disconnecting after 4 failed writes."))
self.status_thread = None
self.disconnect()
return
if do_monitoring:
if self.sdprinting and not self.paused:
self.p.send_now("M27")
if self.m105_waitcycles % 10 == 0:
self.p.send_now("M105")
self.m105_waitcycles += 1
cur_time = time.time()
wait_time = 0
while time.time() < cur_time + self.monitor_interval - 0.25:
if not self.statuscheck:
break
time.sleep(0.25)
# Safeguard: if system time changes and goes back in the past,
# we could get stuck almost forever
wait_time += 0.25
if wait_time > self.monitor_interval - 0.25:
break
# Always sleep at least a bit, if something goes wrong with the
# system time we'll avoid freezing the whole app this way
time.sleep(0.25)
def statuschecker(self):
while self.statuscheck:
self.statuschecker_inner()
# --------------------------------------------------------------
# File loading handling
# --------------------------------------------------------------
def do_load(self, filename):
self._do_load(filename)
def _do_load(self, filename):
if not filename:
self.logError("No file name given.")
return
self.log(_("Loading file: %s") % filename)
if not os.path.exists(filename):
self.logError("File not found!")
return
self.load_gcode(filename)
self.log(_("Loaded %s, %d lines.") % (filename, len(self.fgcode)))
self.log(_("Estimated duration: %d layers, %s") % self.fgcode.estimate_duration())
def load_gcode(self, filename, layer_callback = None, gcode = None):
if gcode is None:
self.fgcode = gcoder.LightGCode(deferred = True)
else:
self.fgcode = gcode
self.fgcode.prepare(open(filename, "r", encoding="utf-8"),
get_home_pos(self.build_dimensions_list),
layer_callback = layer_callback)
self.fgcode.estimate_duration()
self.filename = filename
def complete_load(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_load(self):
self.log("Loads a gcode file (with tab-completion)")
def do_slice(self, l):
l = l.split()
if len(l) == 0:
self.logError(_("No file name given."))
return
settings = 0
if l[0] == "set":
settings = 1
else:
self.log(_("Slicing file: %s") % l[0])
if not(os.path.exists(l[0])):
self.logError(_("File not found!"))
return
try:
if settings:
command = self.settings.slicecommandpath+self.settings.sliceoptscommand
self.log(_("Entering slicer settings: %s") % command)
run_command(command, blocking = True)
else:
command = self.settings.slicecommandpath+self.settings.slicecommand
stl_name = l[0]
gcode_name = stl_name.replace(".stl", "_export.gcode").replace(".STL", "_export.gcode")
run_command(command,
{"$s": stl_name,
"$o": gcode_name},
blocking = True)
self.log(_("Loading sliced file."))
self.do_load(l[0].replace(".stl", "_export.gcode"))
except Exception as e:
self.logError(_("Slicing failed: %s") % e)
def complete_slice(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.stl")]
else:
return glob.glob("*/") + glob.glob("*.stl")
def help_slice(self):
self.log(_("Creates a gcode file from an stl model using the slicer (with tab-completion)"))
self.log(_("slice filename.stl - create gcode file"))
self.log(_("slice filename.stl view - create gcode file and view using skeiniso (if using skeinforge)"))
self.log(_("slice set - adjust slicer settings"))
# --------------------------------------------------------------
# Print/upload handling
# --------------------------------------------------------------
def do_upload(self, l):
names = l.split()
if len(names) == 2:
filename = names[0]
targetname = names[1]
else:
self.logError(_("Please enter target name in 8.3 format."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self._do_load(filename)
self.log(_("Uploading as %s") % targetname)
self.log(_("Uploading %s") % self.filename)
self.p.send_now("M28 " + targetname)
self.log(_("Press Ctrl-C to interrupt upload."))
self.p.startprint(self.fgcode)
try:
sys.stdout.write(_("Progress: ") + "00.0%")
sys.stdout.flush()
while self.p.printing:
time.sleep(0.5)
sys.stdout.write("\b\b\b\b\b%04.1f%%" % (100 * float(self.p.queueindex) / len(self.p.mainqueue),))
sys.stdout.flush()
self.p.send_now("M29 " + targetname)
time.sleep(0.2)
self.p.clear = True
self._do_ls(False)
self.log("\b\b\b\b\b100%.")
self.log(_("Upload completed. %s should now be on the card.") % targetname)
return
except (KeyboardInterrupt, Exception) as e:
if isinstance(e, KeyboardInterrupt):
self.logError(_("...interrupted!"))
else:
self.logError(_("Something wrong happened while uploading:")
+ "\n" + traceback.format_exc())
self.p.pause()
self.p.send_now("M29 " + targetname)
time.sleep(0.2)
self.p.cancelprint()
self.logError(_("A partial file named %s may have been written to the sd card.") % targetname)
def complete_upload(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_upload(self):
self.log("Uploads a gcode file to the sd card")
def help_print(self):
if not self.fgcode:
self.log(_("Send a loaded gcode file to the printer. Load a file with the load command first."))
else:
self.log(_("Send a loaded gcode file to the printer. You have %s loaded right now.") % self.filename)
def do_print(self, l):
if not self.fgcode:
self.logError(_("No file loaded. Please use load first."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self.log(_("Printing %s") % self.filename)
self.log(_("You can monitor the print with the monitor command."))
self.sdprinting = False
self.p.startprint(self.fgcode)
def do_pause(self, l):
if self.sdprinting:
self.p.send_now("M25")
else:
if not self.p.printing:
self.logError(_("Not printing, cannot pause."))
return
self.p.pause()
self.paused = True
def help_pause(self):
self.log(_("Pauses a running print"))
def pause(self, event = None):
return self.do_pause(None)
def do_resume(self, l):
if not self.paused:
self.logError(_("Not paused, unable to resume. Start a print first."))
return
self.paused = False
if self.sdprinting:
self.p.send_now("M24")
return
else:
self.p.resume()
def help_resume(self):
self.log(_("Resumes a paused print."))
def listfiles(self, line):
if "Begin file list" in line:
self.sdlisting = 1
elif "End file list" in line:
self.sdlisting = 0
self.recvlisteners.remove(self.listfiles)
if self.sdlisting_echo:
self.log(_("Files on SD card:"))
self.log("\n".join(self.sdfiles))
elif self.sdlisting:
self.sdfiles.append(re.sub(" \d+$","",line.strip().lower()))
def _do_ls(self, echo):
# FIXME: this was 2, but I think it should rather be 0 as in do_upload
self.sdlisting = 0
self.sdlisting_echo = echo
self.sdfiles = []
self.recvlisteners.append(self.listfiles)
self.p.send_now("M20")
def do_ls(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
self._do_ls(True)
def help_ls(self):
self.log(_("Lists files on the SD card"))
def waitforsdresponse(self, l):
if "file.open failed" in l:
self.logError(_("Opening file failed."))
self.recvlisteners.remove(self.waitforsdresponse)
return
if "File opened" in l:
self.log(l)
if "File selected" in l:
self.log(_("Starting print"))
self.p.send_now("M24")
self.sdprinting = True
# self.recvlisteners.remove(self.waitforsdresponse)
return
if "Done printing file" in l:
self.log(l)
self.sdprinting = False
self.recvlisteners.remove(self.waitforsdresponse)
return
if "SD printing byte" in l:
# M27 handler
try:
resp = l.split()
vals = resp[-1].split("/")
self.percentdone = 100.0 * int(vals[0]) / int(vals[1])
except:
pass
def do_reset(self, l):
self.p.reset()
def help_reset(self):
self.log(_("Resets the printer."))
def do_sdprint(self, l):
if not self.p.online:
self.log(_("Printer is not online. Please connect to it first."))
return
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if l.lower() not in self.sdfiles:
self.log(_("File is not present on card. Please upload it first."))
return
self.recvlisteners.append(self.waitforsdresponse)
self.p.send_now("M23 " + l.lower())
self.log(_("Printing file: %s from SD card.") % l.lower())
self.log(_("Requesting SD print..."))
time.sleep(1)
def help_sdprint(self):
self.log(_("Print a file from the SD card. Tab completes with available file names."))
self.log(_("sdprint filename.g"))
def complete_sdprint(self, text, line, begidx, endidx):
if not self.sdfiles and self.p.online:
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.sdfiles if i.startswith(text)]
# --------------------------------------------------------------
# Printcore callbacks
# --------------------------------------------------------------
def startcb(self, resuming = False):
self.starttime = time.time()
if resuming:
self.log(_("Print resumed at: %s") % format_time(self.starttime))
else:
self.log(_("Print started at: %s") % format_time(self.starttime))
if not self.sdprinting:
self.compute_eta = RemainingTimeEstimator(self.fgcode)
else:
self.compute_eta = None
if self.settings.start_command:
output = get_command_output(self.settings.start_command,
{"$s": str(self.filename),
"$t": format_time(time.time())})
if output:
self.log("Start command output:")
self.log(output.rstrip())
try:
powerset_print_start(reason = "Preventing sleep during print")
except:
self.logError(_("Failed to set power settings:")
+ "\n" + traceback.format_exc())
def endcb(self):
try:
powerset_print_stop()
except:
self.logError(_("Failed to set power settings:")
+ "\n" + traceback.format_exc())
if self.p.queueindex == 0:
print_duration = int(time.time() - self.starttime + self.extra_print_time)
self.log(_("Print ended at: %(end_time)s and took %(duration)s") % {"end_time": format_time(time.time()),
"duration": format_duration(print_duration)})
# Update total filament length used
if self.fgcode is not None:
new_total = self.settings.total_filament_used + self.fgcode.filament_length
self.set("total_filament_used", new_total)
# Update the length of filament in the spools
self.spool_manager.refresh()
if(len(self.fgcode.filament_length_multi)>1):
for i in enumerate(self.fgcode.filament_length_multi):
if self.spool_manager.getSpoolName(i[0]) != None:
self.spool_manager.editLength(
-i[1], extruder = i[0])
else:
if self.spool_manager.getSpoolName(0) != None:
self.spool_manager.editLength(
-self.fgcode.filament_length, extruder = 0)
else:
if not self.settings.final_command:
return
output = get_command_output(self.settings.final_command,
{"$s": str(self.filename),
"$t": format_duration(print_duration)})
if output:
self.log("Final command output:")
self.log(output.rstrip())
def recvcb_report(self, l):
isreport = REPORT_NONE
if "ok C:" in l or "Count" in l \
or ("X:" in l and len(gcoder.m114_exp.findall(l)) == 6):
self.posreport = l
isreport = REPORT_POS
if self.userm114 > 0:
self.userm114 -= 1
isreport |= REPORT_MANUAL
if "ok T:" in l or tempreading_exp.findall(l):
self.tempreadings = l
isreport = REPORT_TEMP
if self.userm105 > 0:
self.userm105 -= 1
isreport |= REPORT_MANUAL
else:
self.m105_waitcycles = 0
return isreport
def recvcb_actions(self, l):
if l.startswith("!!"):
self.do_pause(None)
msg = l.split(" ", 1)
if len(msg) > 1 and self.silent is False: self.logError(msg[1].ljust(15))
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
elif l.startswith("//"):
command = l.split(" ", 1)
if len(command) > 1:
command = command[1]
self.log(_("Received command %s") % command)
command = command.split(":")
if len(command) == 2 and command[0] == "action":
command = command[1]
if command == "pause":
self.do_pause(None)
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
elif command == "resume":
self.do_resume(None)
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
elif command == "disconnect":
self.do_disconnect(None)
sys.stdout.write(self.promptf())
sys.stdout.flush()
return True
return False
def recvcb(self, l):
l = l.rstrip()
for listener in self.recvlisteners:
listener(l)
if not self.recvcb_actions(l):
report_type = self.recvcb_report(l)
if report_type & REPORT_TEMP:
self.status.update_tempreading(l)
if not self.lineignorepattern.match(l) and l[:4] != "wait" and not self.sdlisting \
and not self.monitoring and (report_type == REPORT_NONE or report_type & REPORT_MANUAL):
if l[:5] == "echo:":
l = l[5:].lstrip()
if self.silent is False: self.log("\r" + l.ljust(15))
sys.stdout.write(self.promptf())
sys.stdout.flush()
def layer_change_cb(self, newlayer):
layerz = self.fgcode.all_layers[newlayer].z
if layerz is not None:
self.curlayer = layerz
if self.compute_eta:
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
self.compute_eta.update_layer(newlayer, secondselapsed)
def get_eta(self):
if self.sdprinting or self.uploading:
if self.uploading:
fractioncomplete = float(self.p.queueindex) / len(self.p.mainqueue)
else:
fractioncomplete = float(self.percentdone / 100.0)
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
# Prevent division by zero
secondsestimate = secondselapsed / max(fractioncomplete, 0.000001)
secondsremain = secondsestimate - secondselapsed
progress = fractioncomplete
elif self.compute_eta is not None:
secondselapsed = int(time.time() - self.starttime + self.extra_print_time)
secondsremain, secondsestimate = self.compute_eta(self.p.queueindex, secondselapsed)
progress = self.p.queueindex
else:
secondsremain, secondsestimate, progress = 1, 1, 0
return secondsremain, secondsestimate, progress
def do_eta(self, l):
if not self.p.printing:
self.logError(_("Printer is not currently printing. No ETA available."))
else:
secondsremain, secondsestimate, progress = self.get_eta()
eta = _("Est: %s of %s remaining") % (format_duration(secondsremain),
format_duration(secondsestimate))
self.log(eta.strip())
def help_eta(self):
self.log(_("Displays estimated remaining print time."))
# --------------------------------------------------------------
# Temperature handling
# --------------------------------------------------------------
def set_temp_preset(self, key, value):
if not key.startswith("bed"):
self.temps["pla"] = str(self.settings.temperature_pla)
self.temps["abs"] = str(self.settings.temperature_abs)
self.log("Hotend temperature presets updated, pla:%s, abs:%s" % (self.temps["pla"], self.temps["abs"]))
else:
self.bedtemps["pla"] = str(self.settings.bedtemp_pla)
self.bedtemps["abs"] = str(self.settings.bedtemp_abs)
self.log("Bed temperature presets updated, pla:%s, abs:%s" % (self.bedtemps["pla"], self.bedtemps["abs"]))
def tempcb(self, l):
if "T:" in l:
self.log(l.strip().replace("T", "Hotend").replace("B", "Bed").replace("ok ", ""))
def do_gettemp(self, l):
if "dynamic" in l:
self.dynamic_temp = True
if self.p.online:
self.p.send_now("M105")
time.sleep(0.75)
if not self.status.bed_enabled:
self.log(_("Hotend: %s%s/%s%s") % (self.status.extruder_temp, DEG, self.status.extruder_temp_target, DEG))
else:
self.log(_("Hotend: %s%s/%s%s") % (self.status.extruder_temp, DEG, self.status.extruder_temp_target, DEG))
self.log(_("Bed: %s%s/%s%s") % (self.status.bed_temp, DEG, self.status.bed_temp_target, DEG))
def help_gettemp(self):
self.log(_("Read the extruder and bed temperature."))
def do_settemp(self, l):
l = l.lower().replace(", ", ".")
for i in self.temps.keys():
l = l.replace(i, self.temps[i])
try:
f = float(l)
except:
self.logError(_("You must enter a temperature."))
return
if f >= 0:
if f > 250:
self.log(_("%s is a high temperature to set your extruder to. Are you sure you want to do that?") % f)
if not self.confirm():
return
if self.p.online:
self.p.send_now("M104 S" + l)
self.log(_("Setting hotend temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the hotend off entirely, set its temperature to 0."))
def help_settemp(self):
self.log(_("Sets the hotend temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(", ".join([i + "(" + self.temps[i] + ")" for i in self.temps.keys()]))
def complete_settemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.temps.keys() if i.startswith(text)]
def do_bedtemp(self, l):
f = None
try:
l = l.lower().replace(", ", ".")
for i in self.bedtemps.keys():
l = l.replace(i, self.bedtemps[i])
f = float(l)
except:
self.logError(_("You must enter a temperature."))
if f is not None and f >= 0:
if self.p.online:
self.p.send_now("M140 S" + l)
self.log(_("Setting bed temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the bed off entirely, set its temperature to 0."))
def help_bedtemp(self):
self.log(_("Sets the bed temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(", ".join([i + "(" + self.bedtemps[i] + ")" for i in self.bedtemps.keys()]))
def complete_bedtemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.bedtemps.keys() if i.startswith(text)]
def do_monitor(self, l):
interval = 5
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
if not (self.p.printing or self.sdprinting):
self.logError(_("Printer is not printing. Please print something before monitoring."))
return
self.log(_("Monitoring printer, use ^C to interrupt."))
if len(l):
try:
interval = float(l)
except:
self.logError(_("Invalid period given."))
self.log(_("Updating values every %f seconds.") % (interval,))
self.monitoring = 1
prev_msg_len = 0
try:
while True:
self.p.send_now("M105")
if self.sdprinting:
self.p.send_now("M27")
time.sleep(interval)
if self.p.printing:
preface = _("Print progress: ")
progress = 100 * float(self.p.queueindex) / len(self.p.mainqueue)
elif self.sdprinting:
preface = _("SD print progress: ")
progress = self.percentdone
prev_msg = preface + "%.1f%%" % progress
if self.silent is False:
sys.stdout.write("\r" + prev_msg.ljust(prev_msg_len))
sys.stdout.flush()
prev_msg_len = len(prev_msg)
except KeyboardInterrupt:
if self.silent is False: self.log(_("Done monitoring."))
self.monitoring = 0
def help_monitor(self):
self.log(_("Monitor a machine's temperatures and an SD print's status."))
self.log(_("monitor - Reports temperature and SD print status (if SD printing) every 5 seconds"))
self.log(_("monitor 2 - Reports temperature and SD print status (if SD printing) every 2 seconds"))
# --------------------------------------------------------------
# Manual printer controls
# --------------------------------------------------------------
def do_tool(self, l):
tool = None
try:
tool = int(l.lower().strip())
except:
self.logError(_("You must specify the tool index as an integer."))
if tool is not None and tool >= 0:
if self.p.online:
self.p.send_now("T%d" % tool)
self.log(_("Using tool %d.") % tool)
self.current_tool = tool
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative tool numbers."))
def help_tool(self):
self.log(_("Switches to the specified tool (e.g. doing tool 1 will emit a T1 G-Code)."))
def do_move(self, l):
if len(l.split()) < 2:
self.logError(_("No move specified."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
l = l.split()
if l[0].lower() == "x":
feed = self.settings.xy_feedrate
axis = "X"
elif l[0].lower() == "y":
feed = self.settings.xy_feedrate
axis = "Y"
elif l[0].lower() == "z":
feed = self.settings.z_feedrate
axis = "Z"
elif l[0].lower() == "e":
feed = self.settings.e_feedrate
axis = "E"
else:
self.logError(_("Unknown axis."))
return
try:
float(l[1]) # check if distance can be a float
except:
self.logError(_("Invalid distance"))
return
try:
feed = int(l[2])
except:
pass
self.p.send_now("G91")
self.p.send_now("G0 " + axis + str(l[1]) + " F" + str(feed))
self.p.send_now("G90")
def help_move(self):
self.log(_("Move an axis. Specify the name of the axis and the amount. "))
self.log(_("move X 10 will move the X axis forward by 10mm at %s mm/min (default XY speed)") % self.settings.xy_feedrate)
self.log(_("move Y 10 5000 will move the Y axis forward by 10mm at 5000mm/min"))
self.log(_("move Z -1 will move the Z axis down by 1mm at %s mm/min (default Z speed)") % self.settings.z_feedrate)
self.log(_("Common amounts are in the tabcomplete list."))
def complete_move(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in ["X ", "Y ", "Z ", "E "] if i.lower().startswith(text)]
elif len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " "):
base = line.split()[-1]
rlen = 0
if base.startswith("-"):
rlen = 1
if line[-1] == " ":
base = ""
return [i[rlen:] for i in ["-100", "-10", "-1", "-0.1", "100", "10", "1", "0.1", "-50", "-5", "-0.5", "50", "5", "0.5", "-200", "-20", "-2", "-0.2", "200", "20", "2", "0.2"] if i.startswith(base)]
else:
return []
def do_extrude(self, l, override = None, overridefeed = 300):
length = self.settings.default_extrusion # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError("Printer is not online. Unable to extrude.")
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
if override is not None:
length = override
feed = overridefeed
self.do_extrude_final(length, feed)
def do_extrude_final(self, length, feed):
if length > 0:
self.log(_("Extruding %fmm of filament.") % (length,))
elif length < 0:
self.log(_("Reversing %fmm of filament.") % (-length,))
else:
self.log(_("Length is 0, not doing anything."))
self.p.send_now("G91")
self.p.send_now("G1 E" + str(length) + " F" + str(feed))
self.p.send_now("G90")
# Update the length of filament in the current spool
self.spool_manager.refresh()
if self.spool_manager.getSpoolName(self.current_tool) != None:
self.spool_manager.editLength(-length,
extruder = self.current_tool)
def help_extrude(self):
self.log(_("Extrudes a length of filament, 5mm by default, or the number of mm given as a parameter"))
self.log(_("extrude - extrudes 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude 20 - extrudes 20mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude -5 - REVERSES 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude 10 210 - extrudes 10mm of filament at 210mm/min (3.5mm/s)"))
def do_reverse(self, l):
length = self.settings.default_extrusion # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError(_("Printer is not online. Unable to reverse."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
self.do_extrude("", -length, feed)
def help_reverse(self):
self.log(_("Reverses the extruder, 5mm by default, or the number of mm given as a parameter"))
self.log(_("reverse - reverses 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("reverse 20 - reverses 20mm of filament at 300mm/min (5mm/s)"))
self.log(_("reverse 10 210 - extrudes 10mm of filament at 210mm/min (3.5mm/s)"))
self.log(_("reverse -5 - EXTRUDES 5mm of filament at 300mm/min (5mm/s)"))
def do_home(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if "x" in l.lower():
self.p.send_now("G28 X0")
if "y" in l.lower():
self.p.send_now("G28 Y0")
if "z" in l.lower():
self.p.send_now("G28 Z0")
if "e" in l.lower():
self.p.send_now("G92 E0")
if not len(l):
self.p.send_now("G28")
self.p.send_now("G92 E0")
def help_home(self):
self.log(_("Homes the printer"))
self.log(_("home - homes all axes and zeroes the extruder(Using G28 and G92)"))
self.log(_("home xy - homes x and y axes (Using G28)"))
self.log(_("home z - homes z axis only (Using G28)"))
self.log(_("home e - set extruder position to zero (Using G92)"))
self.log(_("home xyze - homes all axes and zeroes the extruder (Using G28 and G92)"))
def do_off(self, l):
self.off()
def off(self, ignore = None):
if self.p.online:
if self.p.printing: self.pause(None)
self.log(_("; Motors off"))
self.onecmd("M84")
self.log(_("; Extruder off"))
self.onecmd("M104 S0")
self.log(_("; Heatbed off"))
self.onecmd("M140 S0")
self.log(_("; Fan off"))
self.onecmd("M107")
self.log(_("; Power supply off"))
self.onecmd("M81")
else:
self.logError(_("Printer is not online. Unable to turn it off."))
def help_off(self):
self.log(_("Turns off everything on the printer"))
# --------------------------------------------------------------
# Host commands handling
# --------------------------------------------------------------
def process_host_command(self, command):
"""Override host command handling"""
command = command.lstrip()
if command.startswith(";@"):
command = command[2:]
self.log(_("G-Code calling host command \"%s\"") % command)
self.onecmd(command)
def do_run_script(self, l):
p = run_command(l, {"$s": str(self.filename)}, stdout = subprocess.PIPE, universal_newlines = True)
for line in p.stdout.readlines():
self.log("<< " + line.strip())
def help_run_script(self):
self.log(_("Runs a custom script. Current gcode filename can be given using $s token."))
def do_run_gcode_script(self, l):
p = run_command(l, {"$s": str(self.filename)}, stdout = subprocess.PIPE, universal_newlines = True)
for line in p.stdout.readlines():
self.onecmd(line.strip())
def help_run_gcode_script(self):
self.log(_("Runs a custom script which output gcode which will in turn be executed. Current gcode filename can be given using $s token."))
|
hg42/Printrun
|
printrun/pronsole.py
|
Python
|
gpl-3.0
| 71,843
|
[
"Firefly"
] |
5e7d28b31856d05d93e0fd11df1d0cf34a7a122b0310929d9facacb861564009
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.