gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import pandas as pd
import astropy.coordinates as cd
from ctc_observ import *
from ctc_arrays import *
#from scipy.interpolate import interp1d
from scipy.interpolate import pchip
from statsmodels.nonparametric.smoothers_lowess import lowess
#load HSC catalog first
#hsc = pd.read_csv('/cuc36/xxl/multiwavelength/HSC/wide.csv')
def pdf_sep_gen(sep_arcsec,xposerr,opterr,pdf='Rayleigh'):
'''
PDF of angular separation between an X-ray object and the other input catalog
with positional error poserr
'''
if pdf == 'Gaussian':
#that was 2d-normal
poserr=2*(opterr**2+xposerr**2)# this is 2*sigma^2
return np.exp(-sep_arcsec**2/poserr)/(np.pi*poserr)
else:
poserr = (opterr**2+xposerr**2)
return (sep_arcsec/poserr)*np.exp((-sep_arcsec**2)/poserr)
def MLE(match):
'''
R and C for a single LRthreshold value
'''
if 'level_0' in match.columns:
match = match.drop('level_0',axis=1)
if 'index' in match.columns:
match = match.drop('index',axis=1)
tmp = match.set_index('matchid')
grp = tmp.groupby('xid')
#select sources with only one match
onematch = grp.filter(lambda x: len(x) == 1).copy()
onematch['Rc'] = 1.0
#these are sources with multiple matches
multimatch = tmp.loc[np.delete(tmp.index.values, onematch.index.values),:]
#onematch.reset_index(inplace=True)
nmx = match.xid.nunique() - onematch.xid.nunique()
if 'level_0' in onematch.columns:
onematch = onematch.drop('level_0',axis=1)
if 'index' in onematch.columns:
onematch = onematch.drop('index',axis=1)
if 'level_0' in multimatch.columns:
multimatch = multimatch.drop('level_0',axis=1)
if 'index' in multimatch.columns:
multimatch = multimatch.drop('index',axis=1)
if nmx == 0:
allmatch = onematch
elif nmx == 1:
multimatch['Rc'] = multimatch.LR / multimatch.LR.sum()
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
else:
#regroup, and for each group only keep sources with LR larger than LRfrac*max(LR)
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR / df.LR.sum()).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
return allmatch.reset_index().sort_values(by='matchid').reset_index().drop('index',axis=1)
def getbkgcat(xcat,catopt,optdf,r_in = 7., r_out=35.,magonly=False,\
nmagbin=15, magname = 'imag_psf', ora='ra',odec='dec',corr_glob=False,globonly=False):
'''
Takes in xcat and catopt,
find optical sources with separation from any x-ray sources
between r_in and r_out (in arcsec),
and derive the magnitude dependence of these background sources
optdf = optdf_in.copy()
optdf.reset_index(inplace=True)
if len(catopt) != len(optdf):
print("catopt should be the astropy coordinate object computed from optdf!")
sys.exit(1)
'''
idhsc,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_in*u.arcsec)
#Excluding each optical source with an x-ray source within r_in
itmp=np.arange(len(catopt))
itmp[np.unique(idhsc)]=-1
#indicies for optical sources with **NO** X-ray counterparts within r_in
idhsc_ext=np.where(np.equal(optdf.index.values, itmp))[0]
#Now search for X-ray and optical matches within r_out
idhsc_in,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_out*u.arcsec)
idhsc_in = np.unique(idhsc_in)
#Cross-correlated the ``no r_in list'', and the ``r_out list''
#This will create a list of ``background optical sources''
idhsc_bkgd=np.intersect1d(idhsc_ext,idhsc_in)
hsc_bkgd=optdf.loc[idhsc_bkgd].copy()
hsc_bkgd.reset_index(inplace=True)
if magonly:
return hsc_bkgd[magname].values
else:
out,rmagbin=pd.cut(hsc_bkgd[magname].values,bins=nmagbin,retbins=True)
groups=hsc_bkgd.groupby(out)
#number density = total number of sources divided by the area of annulus
N_xmm=len(xcat) #number of unique XMM sources
N_bkgd=len(hsc_bkgd)
nm=groups[ora].count().values/(np.pi*(r_out**2-r_in**2)*N_xmm)
if corr_glob | globonly:
#According to Brusa et al. 2007, at faint magnitudes
#nm is not correct and should use a global one.
out,rmagbin_global=pd.cut(optdf[magname].values,bins=nmagbin,retbins=True)
groups=optdf.groupby(out)
rmag_global = binvalue(rmagbin_global)
area = \
(optdf[ora].max() - optdf[ora].min())*(optdf[odec].max() - optdf[odec].min())*3600**2
nm_global = groups[ora].count().values/area
iglobal = np.where(rmagbin > 23.)[0][:-1]
if corr_glob:
nm[iglobal] = nm_global[iglobal]
elif globonly:
return nm_global, rmagbin
return nm,rmagbin
#def getqm(match,rmagbin, Q, NX, nm, r0=2.5):
def getqm(match,rmagbin, Q, nm, NX, r0=3.0):
'''
Estimate q(m) -- the expected optical counterpart magnitude
distribution of at magintude m
'''
grp=match.groupby(pd.cut(match['rmag'].values,bins=rmagbin))
real_m=grp.rax.count().values# - np.pi*r0**2*NX*nm
real_m[np.where(real_m < 0.)] = \
0.1*nm[np.where(real_m < 0.)]*np.pi*NX*r0**2
qm = real_m*Q/np.sum(real_m)
rmagarr = np.array([])
qmarr = np.array([])
nmarr = np.array([])
for index, i in enumerate(rmagbin[:-1]):
rmagarr = np.hstack((rmagarr,np.linspace(i, rmagbin[index+1], 5)))
qmarr = np.hstack((qmarr, np.zeros(5) + qm[index]))
result = lowess(qmarr,rmagarr,frac=0.2)
x_smooth = result[:,0]
y_smooth = result[:,1]
return x_smooth, y_smooth, Q, qm#, real_m
def calc_RCMAX(match, quntarr, Q,NX,LRfrac=0.2,first=False):
'''
R and C for a single LRthreshold value
'''
if type(NX) != float:
NX = float(NX)
LRth = quntarr
tmp = match[match.LR > LRth].copy().reset_index().drop('index',axis=1)
grp = tmp.groupby('xid')
#select sources with only one match
onematch = grp.filter(lambda x: len(x) == 1).copy()
onematch['Rc'] = onematch.LR.values/(onematch.LR.values + 1 - Q)
#these are sources with multiple matches
multimatch = tmp.loc[np.delete(tmp.index.values, onematch.index.values),:].reset_index().drop('index',axis=1)
onematch.reset_index(inplace=True)
nmx = tmp.xid.nunique() - onematch.xid.nunique()
if nmx == 0:
allmatch = onematch
elif nmx == 1:
multimatch['Rc'] = multimatch.LR/(multimatch.LR.sum() + (1-Q))
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
else:
#regroup, and for each group only keep sources with LR larger than LRfrac*max(LR)
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
multimatch = multimatch[igood].reset_index().drop('index',axis=1)
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
R = allmatch.Rc.mean()
C = allmatch.Rc.sum()/NX
return allmatch, R, C, LRth
def calc_RC(match, quntarr, Q,NX,LRfrac=0.2,first=False):
'''
R and C for an array of LRthreshold values
if first==True,
quantarr should be between 0 to 1
and the LRthreshold values to be looped through would be
match.LR.quantile(quntarr)
If quntarr is an array with length > 1 (and values between 0 to 1)
This subroutine finds the LRth value that maximize C and R.
'''
if type(NX) != float:
NX = float(NX)
if first:
#if it's the first time, loop through the LR values in quantile arrays
#return R, C, LRth
LRth = match.LR.quantile(quntarr).values
print('first -- ', 'min/max LRth are ', np.min(LRth), np.max(LRth))
else:
LRth = quntarr
R = np.zeros(len(quntarr))
C = np.zeros(len(quntarr))
for index, lrthiter in enumerate(LRth):
tmp = match[match.LR > lrthiter].copy().reset_index().drop('index',axis=1)
grp = tmp.groupby('xid')
onematch = grp.filter(lambda x: len(x) == 1).copy() #select sources with only one match
#onematch.reset_index(inplace=True)
onematch['Rc'] = onematch.LR.values/(onematch.LR.values + 1 - Q)
#these are sources with multiple matches
multimatch = tmp.loc[np.delete(tmp.index.values, onematch.index.values),:].reset_index().drop('index',axis=1)
onematch.reset_index(inplace=True)
nmx = tmp.xid.nunique() - onematch.xid.nunique()
if nmx == 0:
#no x-ray sources have multiple good counterparts
allmatch = onematch
elif nmx == 1:
#only one x-ray sources have multiple good counterparts
multimatch['Rc'] = multimatch.LR/(multimatch.LR.sum() + (1-Q))
allmatch = pd.concat([onematch,multimatch],ignore_index=True)
else:
grp = multimatch.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
#dropping sources with LR < LRfrac*LRmax
multimatch = multimatch[igood].reset_index().drop('index',axis=1)
#regroup
grp = multimatch.groupby('xid')
multiRc = grp.apply(lambda df: df.LR/(df.LR.sum()+(1-Q))).values
multimatch['Rc'] = multiRc
allmatch = pd.concat([onematch,multimatch],ignore_index=False)
R[index] = allmatch.Rc.mean()
C[index] = allmatch.Rc.sum()/NX
return R, C, LRth
def calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX,rsearch=5.0,\
lth = None, LRfrac=0.2,lrmax=None,\
magname = 'imag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid',opterr = 0.1,pdf='Rayleigh',first=False):
'''
input variables:
xdf, xcat, optdf,catopt,optdf,nm, qm, Q, rmag, rsearch=5.0,\
magname = 'rmag_psf',xerrname='xposerr',
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',
opticalid = 'hscid'
For computing LR for every optical source within rsearch:
'''
if first:
print('first calc_LR')
idxmm, idhsc, d2d , d3d=catopt.search_around_sky(xcat,rsearch*u.arcsec)
match = pd.DataFrame({'xid':idxmm,'optid':idhsc,'dist':d2d.arcsec,\
'rmag':optdf.loc[idhsc,magname].values,'xposerr':xdf.loc[idxmm,xerrname],\
'raopt':optdf.loc[idhsc,ora].values,'decopt':optdf.loc[idhsc,odec].values,\
'rax':xdf.loc[idxmm,xra].values,'decx':xdf.loc[idxmm,xdec].values,\
'optname':optdf.loc[idhsc,opticalid].values})
#print('match len = ',len(match), 'xid nunique = ', match.xid.nunique())
fr = pdf_sep_gen(match.dist.values,match.xposerr.values,opterr,pdf=pdf)
n_m = pchip(rmag, nm)#, bounds_error=False,fill_value='extrapolate')
q_m = pchip(rmag, qm)#, bounds_error=False,fill_value='extrapolate')
fnm = n_m(match.rmag.values)
fqm = q_m(match.rmag.values)
fqm[np.where(fqm < 0.)] = 1e-8
fnm[np.where(fnm < 0.)] = 1e-8
LR = fr*fqm/fnm
match['LR'] = pd.Series(LR, index=match.index)
match['matchid'] = pd.Series(range(len(match)),index=match.index)
match['raoff'] = pd.Series((match.rax - match.raopt)*3600., index=match.index)
match['decoff'] = pd.Series((match.decx - match.decopt)*3600., index=match.index)
#several situations :
#1. all matches are unique, no further action is required.
if match.xid.nunique() - len(match) == 0:
return match, match, 1.0, 1.0, match.LR.min()
else:
if lth is None:
#If the array of lth values is not provided,
#guess it by assuming that only NX sources would be reliable,
#so loop through the LR values around that LR quantile
#qcenter = match.LR.quantile(float(NX)/len(match))
qcenter = 1. - 1.5*float(NX)/len(match)
if qcenter < 0.:
qcenter = 0.1
lth = np.linspace(0.5*qcenter,
min([2.0*qcenter, 0.95]), 30.)
#print(lth)
if lrmax is None:
#first
R, C, LRth = calc_RC(match, lth, Q, NX,LRfrac=LRfrac,first=first)
lthmax = LRth[np.argmax((R+C))]
if not np.isscalar(lthmax):
if len(lthmax) >= 1:
lthmax = lthmax[0]
goodmatch, R, C, LRth = calc_RCMAX(match,lthmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lthmax, LRth
else:
goodmatch, R, C, LRth = calc_RCMAX(match,lrmax, Q, len(xcat),LRfrac=LRfrac)
return match, goodmatch, R, C, lrmax, LRth
def likmatch(xdf, xcat, optdf_in, catopt, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = None,LRfrac=0.5,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.1,pdf='Rayleigh',verbose=True):
'''
Likelihood ratio based source matching.
Currently is based on HSC public data release 1
(wide survey) in the XMM-LSS region.
Input: source list data frame or fits filename of the source lists.
See the input parameters for default column names
***Note that ``opticalid''
should be provided for each unique optical source
Default : xdf is in XMM SRCLIST format
optdf is for HSC.
Input parameters:
r0 - radius used for defining q(m)
r_in and r_out - radius used for selecting background sources
(X-ray sources with distance from optical counterparts that's larger than
r_in and smaller than r_out are defined as background sources.)
if (len(catopt) != len(optdf)) or (len(xcat) != len(xdf)) :
print("x/opt catalogs should be the astropy coordinate objects computed from the dataframes!!")
sys.exit(1)
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
#making a copy for output
dfout = xdf.copy(deep=True).reset_index()
#Background number surface density
nm, rmagbin = getbkgcat(xcat,catopt,optdf,r_in = r_in, r_out=r_out,
nmagbin=nmagbin, magname = magname,ora=ora,odec=odec)
if verbose:print('Calculating background mag. distribution, nm')
#nm = nm/np.sum(nm)
#find the number of X-ray sources at least one matching withn 1' (sample completeness)
idopt_r0,d2d,d3d=xcat.match_to_catalog_sky(catopt)#,1.0*u.arcmin)
NX = sum(d2d.arcmin <= 1.)
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
print('Q = ', Q, ', N1 = ',N1, ' NX = ', NX)
if (N1 != float(len(idopt_r0))):
print('duplicated optical sources in qm calculation')
opt_qm = optdf.loc[idopt_r0,:]
grp=opt_qm.groupby(pd.cut(opt_qm[magname].values,bins=rmagbin))
total_m=grp[ora].count().values
real_m0=total_m-np.pi*r0**2*NX*nm
real_m0[np.where(real_m0 < 0.)] = 0.1*nm[np.where(real_m0 < 0.)]*np.pi*NX*r0**2
qm0 = real_m0*(Q/np.sum(real_m0))
rmagarr = np.array([])
qmarr = np.array([])
nmarr = np.array([])
for index, i in enumerate(rmagbin[:-1]):
rmagarr = np.hstack((rmagarr,np.linspace(i, rmagbin[index+1], 5)))
qmarr = np.hstack((qmarr, np.zeros(5) + qm0[index]))
nmarr = np.hstack((nmarr, np.zeros(5) + nm[index]))
result = lowess(qmarr,rmagarr,frac=0.2)
rmagsmooth = result[:,0]
qmsmooth = result[:,1]
result = lowess(nmarr,rmagarr,frac=0.2)
#rmagsmooth = result[:,0]
nmsmooth = result[:,1]
#for unrealistical qm values (<0), assuming the real counterpart distribution is the same
#as the background
#qm0[np.where(qm0 < 0.)] = nm[np.where(qm0 < 0.)]
rmag = rmagsmooth#binvalue(rmagbin)
if verbose:print('Calculating initial counterpart mag. dist., qm')
if verbose:print('Calculating background mag. distribution, rmag')
density_raw = pd.DataFrame({
'rmag':binvalue(rmagbin),
'qm0':qm0,
'nm':nm
}
)
density = pd.DataFrame({'rmag':rmag,'qm0':qmsmooth,'qms'+str(np.round(Q,2)):qmsmooth,'nm':nmsmooth})#,'real_ms':real_m0})
#With qm, nm, and Q, calculate the first match
if verbose:print('First LR matching')
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qmsmooth, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf,first=True)
if verbose:print('Q0='+str(Q), 'R0='+str(R),'C0='+str(C), len(goodmatch), lthmax)
#With the new ``matched sources'', recalculate qm again until C and R converges
if lrmax is None:
for i in range(niter):
if len(goodmatch) == 0:
print('No goodmatches (LRthreshold = ',lthmax,'), resetting to 0.4')
lthmax = 0.4
lth = np.sort(np.hstack((match.LR.quantile([0.1, 0.25, 0.5, 0.75, 0.9]).values, \
np.linspace(lthmax*0.5,lthmax*1.5,5))))
lthmax0 = lthmax * 1.
x_smooth, qm, Q, qmraw = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)#, NX, nm)
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth, lrmax=lrmax , magname = magname,xerrname=xerrname,\
xra = xra, xdec = xdec, ora = ora, odec = odec,\
opticalid = opticalid,opterr=opterr,pdf=pdf, first=False)
density['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qm,index=density.index)
density_raw['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qmraw,index=density_raw.index)
#density['real_m'+str(i)] = pd.Series(real_m,index=density.index)
if verbose:print('R, C, len(goodmatch), LRth:' ,R, C, len(goodmatch),lthmax)
if verbose:print('Iter',i, 'new LRth = ', lthmax, 'old LRth =', lthmax0 )
if (np.abs(lthmax0 - lthmax) < 0.01) & (lthmax > 0.1) & (i >= 4):
if verbose:print('LR threshold converges, breaking now')
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
break
elif i == max(range(niter)):
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
return match,goodmatch, R, C, density, density_raw, lthmax, rmagbin
else:
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nmsmooth, qmsmooth, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
x_smooth, qm, Q, qmraw = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)
density['qmfinal'] = pd.Series(qm,index=density.index)
density_raw['qmfinal'] = pd.Series(qmraw,index=density_raw.index)
return match,goodmatch, R, C, density, density_raw, lthmax, rmagbin
def likmatch_rerun(xdf, xcat, optdf_in, catopt, density, radecerr = False, r0=2.5,rsearch=5.0, \
r_in = 7., r_out=35., lth = np.linspace(0.05,0.9,10),LRfrac=0.2,lrmax=None,\
nmagbin=15, niter=10,numid='numid',magname = 'imag_psf',xerrname='xposerr',\
xra = 'RA', xdec = 'DEC', ora = 'ra', odec = 'dec',\
opticalid = 'hscid',opterr=0.1,pdf='Rayleigh',verbose=True,rc=False):
'''
similar to likmatch, but requires the density output from likmatch
useful for shift-and-rematch simulations
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
NX = float(len(xcat))
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
nm = density.nm.values
qm = density.qmfinal.values
rmag = density.rmag.values
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
if rc:
return match,goodmatch, R, C
else:
return match,goodmatch
def likmatch_ext(
xdf, xcat, optdf_in, catopt, density, r0=3.0, rsearch=10.0, \
r_in = 10., r_out=50., \
lth = None, LRfrac=0.5, lrmax=None, \
nmagbin=15, niter=10, numid='numid', magname = 'imag_psf',
xerrname='xposerr', xra = 'RA', xdec = 'DEC', \
ora = 'ra', odec = 'dec', opticalid = 'hscid',opterr=0.1, \
pdf='Rayleigh',verbose=True):
'''
Likelihood ratio based source matching.
different from the original likmatch function,
this one requires an input array true-counterpart mag, which will be used
to calculate q(m) using kernel density estimation
The background mag. distribution nm is optional
'''
optdf = optdf_in.copy(deep=True)
optdf.set_index(numid,inplace=True)
NX = float(len(xcat))
idopt_r0,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r0*u.arcsec)
N1 = float(len(np.unique(idopt_r0)))
Q = N1/NX
nm = density.nm.values
qm = density.qmfinal.values
rmag = density.rmag.values
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, NX, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf,first=True)
if verbose:print('Q0='+str(Q), 'R0='+str(R),'C0='+str(C), len(goodmatch), lthmax)
return match,goodmatch, R, C, lthmax
'''
#With the new ``matched sources'', recalculate qm again until C and R converges
if lrmax is None:
for i in range(niter):
if len(goodmatch) == 0:
print('No goodmatches (LRthreshold = ',lthmax,'), resetting to 0.4')
lthmax = 0.4
lth = np.sort(np.hstack((match.LR.quantile([0.1, 0.25, 0.5, 0.75, 0.9]).values, \
np.linspace(lthmax*0.5,lthmax*1.5,5))))
lthmax0 = lthmax * 1.
#qm, Q, real_m = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)#, NX, nm)
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth, lrmax=lrmax , magname = magname,xerrname=xerrname,\
xra = xra, xdec = xdec, ora = ora, odec = odec,\
opticalid = opticalid,opterr=opterr,pdf=pdf, first=False)
#density['qm'+str(i)+'_'+str(np.round(Q,2))] = pd.Series(qm,index=density.index)
#density['real_m'+str(i)] = pd.Series(real_m,index=density.index)
if verbose:print(R, C, len(goodmatch),lthmax)
if verbose:print('Iter',i, 'new LRth = ', lthmax, 'old LRth =', lthmax0 )
if (np.abs(lthmax0 - lthmax) < 0.01) & (lthmax > 0.1) & (i >= 4):
if verbose:print('LR threshold converges, breaking now')
#density['qmfinal'] = pd.Series(qm,index=density.index)
break
elif i == max(range(niter)):
print('max niter reached, should check convergence')
#density['qmfinal'] = pd.Series(qm,index=density.index)
return match,goodmatch, R, C, lthmax
else:
match, goodmatch, R, C, lthmax, LRth = \
calc_LR(xdf, xcat, optdf,catopt,nm, qm, Q, rmag, rsearch=rsearch,LRfrac=LRfrac,\
lth = lth,lrmax=lrmax, magname = magname,xerrname=xerrname,
xra = xra, xdec = xdec, ora = ora, odec = odec,
opticalid = opticalid,opterr=opterr,pdf=pdf)
#qm, Q, real_m = getqm(goodmatch,rmagbin, C, nm, NX, r0 = r0)
#density['qmfinal'] = pd.Series(qm,index=density.index)
return match,goodmatch, R, C, lthmax
'''
'''
def finalmatch(match,goodmatch):
match.set_index(match.matchid.values,inplace=True)
mid_all = np.arange(len(match))
mid_all[goodmatch.matchid.values] = -1
badmatch = match.loc[mid_all[mid_all > 0],:]
#if an xid alread has a counterpart in goodmatch, drop it.
badmatch = badmatch[np.in1d(badmatch.xid.values, goodmatch.xid.unique(),invert=True)].copy()
badmatch.reset_index(inplace=True)
bad_ok = badmatch.drop_duplicates('xid',keep=False)
ibad = np.arange(len(badmatch))
ibad[bad_ok.index.values] = -1
bad_bad = badmatch.loc[np.where(ibad > -1)[0],:]
bad_bad.drop('index',axis=1,inplace=True)
okmatch = pd.concat([goodmatch, bad_ok])
return okmatch, bad_bad
'''
def finalmatch(match,lrth,LRfrac=0.5, r99=True):
'''
turn the match DF into the ``good matches''
the ``ok matches''
and the ``bad matches''
'''
if 'level_0' in match.columns:
match = match.drop('level_0',axis=1)
if 'index' in match.columns:
match = match.drop('index',axis=1)
tmp = match.set_index('matchid')
tmp['r99'] = tmp.xposerr * 3.5
grp = tmp.groupby('xid')
onematch = grp.filter(lambda x: len(x) == 1).reset_index()
multimatch = tmp.loc[np.delete(tmp.index.values,
onematch.matchid.values),:].reset_index()
#now work on good matches
goodones = onematch[onematch.LR >= lrth]
goodmul = multimatch[multimatch.LR >= lrth]
grp = goodmul.groupby('xid')
igood = grp.apply(lambda df:df.LR/df.LR.max() >= LRfrac).values
goodmul = goodmul[igood].reset_index()
allgood = pd.concat([goodones, goodmul], ignore_index=False)
#now work on the ok matches
nogood = tmp.drop(allgood.matchid.values)
if r99:
ng_99 = nogood[nogood.dist <= nogood.r99].drop_duplicates('xid',keep=False).reset_index()
else:
ng_99 = nogood.drop_duplicates('xid',keep=False).reset_index()
#ng_ok = ng_99[ng_99.LR >= lrth].reset_index()
bad = nogood.drop(ng_99.matchid.values)
allok = pd.concat([allgood, ng_99], ignore_index=False)
return allgood, allok, bad
|
|
#An automated selenium test designed to test the interface of a web calculator
# Available @ http://teaching.csse.uwa.edu.au/units/CITS5501/Assinments/calculator.html
# By Aden Huen
#Tests web interface by input space partitioning
import unittest
from selenium import webdriver
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
class test_webcalc(unittest.TestCase):
def test_input(self):
#Tests the amount of digits allowed in calculator register
input_size = 0
alert_popped = False
while (alert_popped == False):
try:
expected_conditions.alert_is_present()
alert = driver.switch_to_alert()
alert.accept()
alert_popped = True
except Exception:
btn1.click()
input_size += 1
self.assertEqual(input_size, 17)
btndel.click()
def test_buttons_c(self):
btndel.click()
#Test all buttons besides del and =
btn1.click()
btndot.click()
btn2.click()
btnplus.click()
btn3.click()
btnminus.click()
btn4.click()
btntimes.click()
btn5.click()
btndivide.click()
btn6.click()
btnmod.click()
btn7.click()
btn8.click()
btn9.click()
btn0.click()
self.assertEqual(result.text, '1.2+3-4*5/6%7890')
#Test Del
btndel.click()
self.assertEqual(result.text, '')
#Test =
btndot.click()
btnequal.click()
self.assertEqual(result.text, '0')
btndel.click()
#Test putting zeros after each characteristic as zero is a special case number
def test_zero_after_digit_c(self):
btndel.click()
btn4.click()
btn0.click()
self.assertEqual(result.text, '40')
btndel.click()
def test_zero_at_start_c(self):
btndel.click()
btn0.click()
btndot.click()
btn1.click()
self.assertEqual(result.text, '0.1')
btndel.click()
def test_zero_after_decimal_c(self):
btndel.click()
btn1.click()
btndot.click()
btn0.click()
btn1.click()
self.assertEqual(result.text, '1.01')
btndel.click()
def test_zero_after_operator_c(self):
btndel.click()
btn1.click()
btnplus.click()
btn0.click()
self.assertEqual(result.text, '1+0')
btndel.click()
def test_overflow_c(self):
btndel.click()
#Test Overflow
# ENTER 99 999 999 * 9 999 999 EXPECT
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btntimes.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btn9.click()
btnequal.click()
self.assertEqual(result.text, '999999890000001')
btndel.click()
def test_underflow_c(self):
btndel.click()
#Test Underflow
#ENTER '22/7' EXPECT 3.142857142857142 (17 character limit)
btn2.click()
btn2.click()
btndivide.click()
btn7.click()
btnequal.click()
self.assertEqual(result.text, '3.142857142857143')
btndel.click()
def test_bidmas_c(self):
btndel.click()
#Test BIDMAS
# ENTER 1 + 6 * 4 / 2 - 3 EXPECTING 10
btn1.click()
btnplus.click()
btn6.click()
btntimes.click()
btn4.click()
btndivide.click()
btn2.click()
btnminus.click()
btn3.click()
btnequal.click()
self.assertEqual(result.text, '10')
btndel.click()
# I will be using shorthand notation to explain each input
# [digits] = 'd'
# [decimal] = '.'
# [operator] = 'o'
# [evaluate] = 'e'
# valid evaluation = 'V'
# invalid evaluation = 'I'
# where an evaluation is valid if the output is not nothing and is correct arithmetically
###>Test single characteristic evaluation
def test_del(self):
btndel.click()
# Special case delete
btn1.click()
btndel.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_digit(self):
btndel.click()
# de = V
btn1.click()
btnequal.click()
self.assertEqual(result.text, '1')
btndel.click()
def test_operator(self):
btndel.click()
# oe = I
btnplus.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_decimal(self):
btndel.click()
# .e = I
btndot.click()
btnequal.click()
self.assertEqual(result.text, '0')
btndel.click()
def test_evaluator(self):
btndel.click()
# e = I
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
###>Test double characteristic evaluation
# In order to avoid naming problems, we will be replacing decimal
# with 'x' for describing our test
def test_dx(self):
btndel.click()
# d.e = I
btn1.click()
btndot.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_do(self):
btndel.click()
# doe = I
btn1.click()
btnplus.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_xd(self):
btndel.click()
# .de = V
btndot.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '0.1')
btndel.click()
def test_xo(self):
btndel.click()
# .oe = I
btndot.click()
btnplus.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
# Starting with operator in single characteristic test fails,
# Thus we do not need to consider operator first branches
###> Triple characteric evaluation
def test_dxd(self):
btndel.click()
# d.de = V
btn1.click()
btndot.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '1.1')
btndel.click()
def test_dxo_and_dox(self):
btndel.click()
# d.oe = I / do.e = I
btn1.click()
btndot.click()
#here and below we find inputing o after . will replace the . with the o
# and inputing . after o does not change the register
# thus, we do not need to consider (./o)-(./o) pairs
self.assertEqual(result.text, '1.')
btnplus.click()
self.assertEqual(result.text, '1+')
btndot.click()
self.assertEqual(result.text, '1+')
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_dxx(self):
btndel.click()
# d..e = I
btn1.click()
btndot.click()
self.assertEqual(result.text, '1.')
btndot.click() #the decimal does not add anything into the register
self.assertEqual(result.text, '1.') #thus, we do not need to consider decimal-decimal pairs
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_dod(self):
btndel.click()
# dode = V
btn1.click()
btnplus.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '2')
btndel.click()
def test_dox(self):
btndel.click()
# do.e = I
btn1.click()
btnplus.click()
btndot.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_doo(self):
btndel.click()
# dooe = V
btn1.click()
btnplus.click()
self.assertEqual(result.text, '1+')
btnminus.click() #the operator replaces previous operator
self.assertEqual(result.text, '1-') #thus, we do not need to consider operator-operator pairs
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_xdo(self):
btndel.click()
# .doe = I
btndot.click()
btn1.click()
btnplus.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_xdx(self):
btndel.click()
# .d.e = I
btndot.click()
btn1.click()
btndot.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
###> Quadruple characteristic evaluation
def test_dodx(self):
btndel.click()
# dod.e = I
btn1.click()
btnplus.click()
btn1.click()
btndot.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_dodo(self):
btndel.click()
# dodoe = I
btn1.click()
btnplus.click()
btn1.click()
btnplus.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_xdod(self):
btndel.click()
# .dode = V
btndot.click()
btn1.click()
btnplus.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '1.1')
btndel.click()
def test_xdxd(self):
btndel.click()
# .d.de = I
btndot.click()
btn1.click()
btndot.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
###> Quintuple characteristic evaluation
def test_dodod(self):
btndel.click()
# dodode = V
btn1.click()
btnplus.click()
btn1.click()
btnplus.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '3')
btndel.click()
def test_dxdod(self):
btndel.click()
# d.dode = V
btn1.click()
btndot.click()
btn1.click()
btnplus.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '2.1')
btndel.click()
def test_dodxd(self):
btndel.click()
# dod.de = V
btn1.click()
btnplus.click()
btn1.click()
btndot.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '2.1')
btndel.click()
def test_xdodx(self):
btndel.click()
# .dod.e = I
btndot.click()
btn1.click()
btnplus.click()
btn1.click()
btndot.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_xdodo(self):
btndel.click()
# .dodoe = I
btndot.click()
btn1.click()
btnplus.click()
btn1.click()
btnplus.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_xdxdo(self):
btndel.click()
# .d.doe = I
btndot.click()
btn1.click()
btndot.click()
btn1.click()
btnplus.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
def test_xdxdx(self):
btndel.click()
# .d.d.e = I
btndot.click()
btn1.click()
btndot.click()
btn1.click()
btndot.click()
btnequal.click()
self.assertEqual(result.text, '')
btndel.click()
# Here we can assume that besides having a decimal as the first input,
# decimals need to be in the form d.d or they will
# reset the register, we can omit them from further branches
###> Sextuple characteristic evaluation
def test_xdodxd(self):
btndel.click()
# .dod.de = V
btndot.click()
btn1.click()
btnplus.click()
btn1.click()
btndot.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '1.2')
btndel.click()
###> Septuple characteristic evaluation
def test_dxdodxd(self):
btndel.click()
# d.dod.de = V
btn1.click()
btndot.click()
btn1.click()
btnplus.click()
btn1.click()
btndot.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '2.2')
btndel.click()
def test_dododod(self):
btndel.click()
# dododode = V
btn1.click()
btnplus.click()
btn1.click()
btnplus.click()
btn1.click()
btnplus.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '4')
btndel.click()
def test_dodxdod(self):
btndel.click()
# dod.dode = V
btn1.click()
btnplus.click()
btn1.click()
btndot.click()
btn1.click()
btnplus.click()
btn1.click()
btnequal.click()
self.assertEqual(result.text, '3.1')
btndel.click()
# Here we can see the test has devolved into having permutations with every
# second characteristic is digit, we can simplify the problem by also noting
# that only one decimal can be present between operators
if __name__ == "__main__":
#Use chrome webdriver
driver = webdriver.Chrome()
#Go to target website
driver.get("http://teaching.csse.uwa.edu.au/units/CITS5501/Assignments/calculator.html")
#map buttons for testing
btn1 = driver.find_element_by_xpath("//button[@value='1']")
btn2 = driver.find_element_by_xpath("//button[@value='2']")
btn3 = driver.find_element_by_xpath("//button[@value='3']")
btn4 = driver.find_element_by_xpath("//button[@value='4']")
btn5 = driver.find_element_by_xpath("//button[@value='5']")
btn6 = driver.find_element_by_xpath("//button[@value='6']")
btn7 = driver.find_element_by_xpath("//button[@value='7']")
btn8 = driver.find_element_by_xpath("//button[@value='8']")
btn9 = driver.find_element_by_xpath("//button[@value='9']")
btn0 = driver.find_element_by_xpath("//button[@value='0']")
btnplus = driver.find_element_by_xpath("//button[@value='+']")
btnminus = driver.find_element_by_xpath("//button[@value='-']")
btntimes = driver.find_element_by_xpath("//button[@value='*']")
btndivide = driver.find_element_by_xpath("//button[@value='/']")
btnequal = driver.find_element_by_xpath("//button[@value='=']")
btndot = driver.find_element_by_xpath("//button[@value='.']")
btnmod = driver.find_element_by_xpath("//button[@value='%']")
btndel = driver.find_element_by_id("delete")
result = driver.find_element_by_id("result")
unittest.main(exit=False)
driver.close()
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import core
import multiprocessing
import framework
import executor
import warnings
import sys
import os
__all__ = ['ParallelExecutor', 'ExecutionStrategy', 'BuildStrategy']
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
class ParallelExecutor(object):
"""
ParallelExecutor can run program in parallel.
Args:
use_cuda (bool): Whether to use CUDA or not.
loss_name (str): The loss name must set in training. Default None.
main_program (Program): The program that need to run, if not provided,
then default_main_program will be used. Default None.
share_vars_from(ParallelExecutor): If provied, it will share variables
from the specified ParallelExecutor. Default None.
num_trainers(int): If greater than 1, NCCL will be initialized with
multiple rank of nodes, each node should have same number of GPUs.
Distributed training will be enabled then. Default 1.
trainer_id(int: Must use together with num_trainers. trainer_id is the
"rank" of current node starts from 0. Default 0.
Returns:
ParallelExecutor: The initialized ParallelExecutor object.
Raises:
TypeError: If share_vars_from is provided, but not ParallelExecutor object.
Examples:
.. code-block:: python
train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name)
test_exe = fluid.ParallelExecutor(use_cuda=True,
main_program=test_program,
share_vars_from=train_exe)
train_loss, = train_exe.run([loss.name], feed=feed_dict)
test_loss, = test_exe.run([loss.name], feed=feed_dict)
"""
def __init__(self,
use_cuda,
loss_name=None,
main_program=None,
share_vars_from=None,
exec_strategy=None,
build_strategy=None,
num_trainers=1,
trainer_id=0,
**kwargs):
if len(kwargs) != 0:
err_msg = ""
for key in kwargs:
if key in dir(ExecutionStrategy):
err_msg += \
"Setting {0} by constructor is deprecated. Use " \
"strategy=ExecutionStrategy(); strategy.{0}=xxx; " \
"pe=ParallelExecutor(exec_strategy=strategy) " \
"instead.\n ".format(key)
elif key in dir(BuildStrategy):
err_msg += \
"Setting {0} by constructor is deprecated. Use " \
"strategy=BuildStrategy(); See help(" \
"paddle.fluid.ParallelExecutor.BuildStrategy) \n".format(
key)
else:
err_msg += "Setting {0} by constructor is deprecated. Use strategy.\n".format(
key)
raise ValueError(err_msg)
self._places = []
self._act_places = []
if use_cuda:
for i in xrange(core.get_cuda_device_count()):
p = core.Place()
self._act_places.append(core.CUDAPlace(i))
p.set_place(self._act_places[-1])
self._places.append(p)
else:
cpu_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
for i in xrange(cpu_num):
p = core.Place()
self._act_places.append(core.CPUPlace())
p.set_place(self._act_places[-1])
self._places.append(p)
assert self._places, "no place for execution"
if exec_strategy is None:
exec_strategy = ExecutionStrategy()
exec_strategy.use_cuda = use_cuda
if exec_strategy.num_threads == 0:
if use_cuda:
# Experiments on se-resnext shows that too many threads hurt
# performance. Worth tunning for other models in the future.
exec_strategy.num_threads = len(self._places) * 4
else:
cpu_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
exec_strategy.num_threads = cpu_num * 2
if build_strategy is None:
build_strategy = BuildStrategy()
main = main_program
main = main if main else framework.default_main_program()
scope = executor.global_scope()
# FIXME(Yancey1989): it's a temporary approach to determinate the distribute
# train program, call self.bcast_param() at the end of each mini-batch.
self.is_dist = True if "recv" in [
op.type for op in main.global_block().ops
] else False
if share_vars_from and not isinstance(share_vars_from,
ParallelExecutor):
raise TypeError("share_vars_from must be ParallelExecutor.")
local_scopes = share_vars_from.executor.local_scopes(
) if share_vars_from else []
self.persistable_vars = [
v.name
for v in filter(
lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW,
main.list_vars())
]
self.executor = core.ParallelExecutor(
self._places,
set([
p.name for p in main.global_block()._iter_parameters()
if not p.stop_gradient
]),
set(self.persistable_vars), main.desc, loss_name
if loss_name else '', scope, local_scopes, exec_strategy,
build_strategy, num_trainers, trainer_id)
self.scope = scope
def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True):
"""
Run a parallel executor with fetch_list.
The feed parameter can be a dict or a list. If feed is a dict, the
feed data will be split into multiple devices. If feed is a list, we
assume the data has been splitted into multiple devices, the each
element in the list will be copied to each device directly.
For example, if the feed is a dict:
>>> exe = ParallelExecutor()
>>> # the image will be splitted into devices. If there is two devices
>>> # each device will process an image with shape (24, 1, 28, 28)
>>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))})
For example, if the feed is a list:
>>> exe = ParallelExecutor()
>>> # each device will process each element in the list.
>>> # the 1st device will process an image with shape (48, 1, 28, 28)
>>> # the 2nd device will process an image with shape (32, 1, 28, 28)
>>> #
>>> # you can use exe.device_count to get the device number.
>>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))},
>>> {"image": numpy.random.random(size=(32, 1, 28, 28))},
>>> ])
Args:
fetch_list(list): The fetched variable names
feed(list|dict|None): The feed variables. If the feed is a dict,
tensors in that dict will be splitted into each devices. If
the feed is a list, each element of the list will be copied
to each device. Default None.
feed_dict: Alias for feed parameter, for backward compatibility.
This parameter has been deprecated. Default None.
return_numpy(bool): Whether converts the fetched tensor to numpy.
Default: True.
Returns:
List: The fetched result list.
Raises:
ValueError: If the feed is a list, but its length is not equal the
length of active places, or its element's is not dict.
NOTES:
1. If the feed's type is dict, the number of data that feeds to
ParallelExecutor must be bigger than active places. Otherwise,
it will throw exception from C++ side. Special attention should be
paid to check whether the last batch of the dataset is bigger
than active places.
2. If active places are more than one, the fetch results for each
variable is a list, and each element of this list is the variable of
respective active place.
Examples:
.. code-block:: python
pe = fluid.ParallelExecutor(use_cuda=use_cuda,
loss_name=avg_cost.name,
main_program=fluid.default_main_program())
loss = pe.run(feed=feeder.feed(cur_batch),
fetch_list=[avg_cost.name]))
"""
if feed is None and feed_dict is not None:
feed = feed_dict
print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`"
if isinstance(feed, dict):
feed_tensor_dict = dict()
for feed_name in feed:
feed_tensor = feed[feed_name]
if not isinstance(feed_tensor, core.LoDTensor):
feed_tensor = core.LoDTensor()
# always set to CPU place, since the tensor need to be splitted
# it is fast in CPU
feed_tensor.set(feed[feed_name], core.CPUPlace())
feed_tensor_dict[feed_name] = feed_tensor
self.executor.feed_and_split_tensor_into_local_scopes(
feed_tensor_dict)
elif isinstance(feed, list) or isinstance(feed, tuple):
if len(feed) != len(self._act_places):
raise ValueError(
"Feed a list of tensor, the list should be the same size as places"
)
res = list()
for i, each in enumerate(feed):
if not isinstance(each, dict):
raise TypeError(
"Each element of feed list should be a dict")
res_dict = dict()
for feed_name in each:
tensor = each[feed_name]
if not isinstance(tensor, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(tensor, self._act_places[i])
tensor = tmp
res_dict[feed_name] = tensor
res.append(res_dict)
self.executor.feed_tensors_into_local_scopes(res)
fetch_var_name = '@FETCHED_VAR_NAME@'
self.executor.run(fetch_list, fetch_var_name)
arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
if self.is_dist:
self.bcast_params()
if return_numpy:
return executor.as_numpy(arr)
return [arr[i] for i in range(len(arr))]
def bcast_params(self):
"""
Broadcast the parameters to other devices. It is used during
distributed training.
"""
self.executor.bcast_params(set(self.persistable_vars))
@property
def device_count(self):
return len(self._act_places)
|
|
import re
from LUIObject import LUIObject
from LUISprite import LUISprite
from LUILabel import LUILabel
from LUIInitialState import LUIInitialState
from LUILayouts import LUIHorizontalStretchedLayout
__all__ = ["LUIInputField"]
class LUIInputField(LUIObject):
""" Simple input field, accepting text input. This input field supports
entering text and navigating. Selecting text is (currently) not supported.
The input field also supports various keyboard shortcuts:
[pos1] Move to the beginning of the text
[end] Move to the end of the text
[arrow_left] Move one character to the left
[arrow_right] Move one character to the right
[ctrl] + [arrow_left] Move to the left, skipping over words
[ctrl] + [arrow_right] Move to the right, skipping over words
[escape] Un-focus input element
"""
re_skip = re.compile("\W*\w+\W")
def __init__(self, parent=None, width=200, placeholder=u"Enter some text ..", value=u"", **kwargs):
""" Constructs a new input field. An input field always needs a width specified """
LUIObject.__init__(self, x=0, y=0, solid=True)
self.set_width(width)
self._layout = LUIHorizontalStretchedLayout(parent=self, prefix="InputField", width="100%")
# Container for the text
self._text_content = LUIObject(self)
self._text_content.margin = (5, 7, 5, 7)
self._text_content.clip_bounds = (0,0,0,0)
self._text_content.set_size("100%", "100%")
# Scroller for the text, so we can move right and left
self._text_scroller = LUIObject(parent=self._text_content)
self._text_scroller.center_vertical = True
self._text = LUILabel(parent=self._text_scroller, text="")
# Cursor for the current position
self._cursor = LUISprite(self._text_scroller, "blank", "skin", x=0, y=0, w=2, h=15)
self._cursor.color = (0.5, 0.5, 0.5)
self._cursor.margin.top = 2
self._cursor.z_offset = 20
self._cursor_index = 0
self._cursor.hide()
self._value = value
# Placeholder text, shown when out of focus and no value exists
self._placeholder = LUILabel(parent=self._text_content, text=placeholder, shadow=False,
center_vertical=True, alpha=0.2)
# Various states
self._tickrate = 1.0
self._tickstart = 0.0
self._render_text()
if parent is not None:
self.parent = parent
LUIInitialState.init(self, kwargs)
@property
def value(self):
""" Returns the value of the input field """
return self._value
@value.setter
def value(self, new_value):
""" Sets the value of the input field """
self._value = new_value
self._render_text()
self.trigger_event("changed", self._value)
def clear(self):
""" Clears the input value """
self.value = u""
@property
def cursor_pos(self):
""" Set the cursor position """
return self._cursor_index
@cursor_pos.setter
def cursor_pos(self, pos):
""" Set the cursor position """
if pos >= 0:
self._cursor_index = max(0, min(len(self._value), pos))
else:
self._cursor_index = max(len(self._value) + pos + 1, 0)
self._reset_cursor_tick()
self._render_text()
def on_tick(self, event):
""" Tick handler, gets executed every frame """
frame_time = globalClock.get_frame_time() - self._tickstart
show_cursor = frame_time % self._tickrate < 0.5 * self._tickrate
if show_cursor:
self._cursor.color = (0.5, 0.5, 0.5, 1)
else:
self._cursor.color = (1, 1, 1, 0)
def on_click(self, event):
""" Internal on click handler """
self.request_focus()
def on_mousedown(self, event):
""" Internal mousedown handler """
local_x_offset = self._text.text_handle.get_relative_pos(event.coordinates).x
self.cursor_pos = self._text.text_handle.get_char_index(local_x_offset)
def _reset_cursor_tick(self):
""" Internal method to reset the cursor tick """
self._tickstart = globalClock.get_frame_time()
def on_focus(self, event):
""" Internal focus handler """
self._cursor.show()
self._placeholder.hide()
self._reset_cursor_tick()
self._layout.color = (0.9, 0.9, 0.9, 1)
def on_keydown(self, event):
""" Internal keydown handler. Processes the special keys, and if none are
present, redirects the event """
key_name = event.message
if key_name == "backspace":
self._value = self._value[:max(0, self._cursor_index - 1)] + self._value[self._cursor_index:]
self.cursor_pos -= 1
self.trigger_event("changed", self._value)
elif key_name == "delete":
post_value = self._value[min(len(self._value), self._cursor_index + 1):]
self._value = self._value[:self._cursor_index] + post_value
self.cursor_pos = self._cursor_index
self.trigger_event("changed", self._value)
elif key_name == "arrow_left":
if event.get_modifier_state("alt") or event.get_modifier_state("ctrl"):
self.cursor_skip_left()
else:
self.cursor_pos -= 1
elif key_name == "arrow_right":
if event.get_modifier_state("alt") or event.get_modifier_state("ctrl"):
self.cursor_skip_right()
else:
self.cursor_pos += 1
elif key_name == "escape":
self.blur()
elif key_name == "home":
self.cursor_pos = 0
elif key_name == "end":
self.cursor_pos = len(self.value)
self.trigger_event(key_name, self._value)
def on_keyrepeat(self, event):
""" Internal keyrepeat handler """
self.on_keydown(event)
def on_textinput(self, event):
""" Internal textinput handler """
self._value = self._value[:self._cursor_index] + event.message + \
self._value[self._cursor_index:]
self.cursor_pos = self._cursor_index + len(event.message)
self.trigger_event("changed", self._value)
def on_blur(self, event):
""" Internal blur handler """
self._cursor.hide()
if len(self._value) < 1:
self._placeholder.show()
self._layout.color = (1, 1, 1, 1)
def _render_text(self):
""" Internal method to render the text """
self._text.set_text(self._value)
self._cursor.left = self._text.left + \
self._text.text_handle.get_char_pos(self._cursor_index) + 1
max_left = self.width - 15
if self._value:
self._placeholder.hide()
else:
if not self.focused:
self._placeholder.show()
# Scroll if the cursor is outside of the clip bounds
rel_pos = self.get_relative_pos(self._cursor.get_abs_pos()).x
if rel_pos >= max_left:
self._text_scroller.left = min(0, max_left - self._cursor.left)
if rel_pos <= 0:
self._text_scroller.left = min(0, - self._cursor.left - rel_pos)
def cursor_skip_left(self):
""" Moves the cursor to the left, skipping the previous word """
left_hand_str = ''.join(reversed(self.value[0:self.cursor_pos]))
match = self.re_skip.match(left_hand_str)
if match is not None:
self.cursor_pos -= match.end() - 1
else:
self.cursor_pos = 0
def cursor_skip_right(self):
""" Moves the cursor to the right, skipping the next word """
right_hand_str = self.value[self.cursor_pos:]
match = self.re_skip.match(right_hand_str)
if match is not None:
self.cursor_pos += match.end() - 1
else:
self.cursor_pos = len(self.value)
|
|
import unittest
import random
from array import array
from coldb import compress
from coldb import colimpl
def _get_data(length, drange):
""" return a list of randomized data """
_min, _max = drange
return list(random.randint(_min, _max - 1) for i in range(length))
def _get_sorted_data(length, drange):
""" return a random sorted list
"""
result = _get_data(length, drange)
return sorted(result)
def _get_bytes(n_bytes):
"""random n_bytes generation"""
return array('B', (random.randint(0, 255) for i in range(n_bytes))).tostring()
# algorithm mixins
class _TestGetMixin(object):
""" requires these attributes:
- datarange: range of auto generated data
- lengthlist: a list of lengths of array data
- compress_algo: the compress algorithm to be used
- col_type: compress_algo's col_type param
- colimpl: impl class
"""
def testGet(self):
for length in self.lengthlist:
l = _get_data(length, self.datarange)
l_data = self.compress_algo(self.col_type, l, **self.opts)
col = self.colimpl(l_data, len(l))
for i in range(length):
self.assertEqual(l[i], col.get(i))
class _TestFindMixin(object):
""" requires these attributes:
- datarange: range of auto generated data
- find_lengthlist: a list of lengths of array data
- compress_algo: the compress algorithm to be used
- col_type: compress_algo's col_type param
- colimpl: impl class
"""
def testFind(self):
for length in self.lengthlist:
l = _get_sorted_data(length, self.datarange)
l_data = self.compress_algo(self.col_type, l, **self.opts)
col = self.colimpl(l_data, length)
# test find first
cur_data = l[0]
self.assertEqual(0, col.find(cur_data))
last_data = cur_data
for i, cur_data in enumerate(l[1:], 1):
if cur_data == last_data:
continue
#self.assertEqual(i, col.find(cur_data), "%d, %d, %d, %s" % (i, col.find(cur_data), cur_data, l))
self.assertEqual(i, col.find(cur_data))
last_data = cur_data
class TestPlain_b(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-128, 128)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_plain_normal
self.col_type = 'b'
self.colimpl = colimpl.Plain_b
self.opts = {}
class TestPlain_B(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 256)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_plain_normal
self.col_type = 'B'
self.colimpl = colimpl.Plain_B
self.opts = {}
class TestPlain_h(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-32768, 32768)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_plain_normal
self.col_type = 'h'
self.colimpl = colimpl.Plain_h
self.opts = {}
class TestPlain_H(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 65536)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_plain_normal
self.col_type = 'H'
self.colimpl = colimpl.Plain_H
self.opts = {}
class TestPlain_i(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-2 ** 31, 2 ** 31)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_plain_normal
self.col_type = 'i'
self.colimpl = colimpl.Plain_i
self.opts = {}
class TestPlain_I(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 2 ** 32)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_plain_normal
self.col_type = 'I'
self.colimpl = colimpl.Plain_I
self.opts = {}
class TestRun0_b(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-128, 128)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run0
self.col_type = 'b'
self.colimpl = colimpl.Run0_bH
self.opts = {'pt': 'H'}
class TestRun0_B(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 256)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run0
self.col_type = 'B'
self.colimpl = colimpl.Run0_BH
self.opts = {'pt': 'H'}
class TestRun0_h(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-32768, 32768)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run0
self.col_type = 'h'
self.colimpl = colimpl.Run0_hH
self.opts = {'pt': 'H'}
class TestRun0_H(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 65536)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run0
self.col_type = 'H'
self.colimpl = colimpl.Run0_HH
self.opts = {'pt': 'H'}
class TestRun0_i(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-2 ** 31, 2 ** 31)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run0
self.col_type = 'i'
self.colimpl = colimpl.Run0_iH
self.opts = {'pt': 'H'}
class TestRun0_I(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 2 ** 32)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run0
self.col_type = 'I'
self.colimpl = colimpl.Run0_IH
self.opts = {'pt': 'H'}
class TestRun1_b(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-128, 128)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run1
self.col_type = 'b'
self.colimpl = colimpl.Run1_bH
self.opts = {'pt': 'H'}
class TestRun1_B(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 256)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run1
self.col_type = 'B'
self.colimpl = colimpl.Run1_BH
self.opts = {'pt': 'H'}
class TestRun1_h(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-32768, 32768)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run1
self.col_type = 'h'
self.colimpl = colimpl.Run1_hH
self.opts = {'pt': 'H'}
class TestRun1_H(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 65536)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run1
self.col_type = 'H'
self.colimpl = colimpl.Run1_HH
self.opts = {'pt': 'H'}
class TestRun1_i(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-2 ** 31, 2 ** 31)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run1
self.col_type = 'i'
self.colimpl = colimpl.Run1_iH
self.opts = {'pt': 'H'}
class TestRun1_I(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 2 ** 32)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_run1
self.col_type = 'I'
self.colimpl = colimpl.Run1_IH
self.opts = {'pt': 'H'}
class TestEnum_h(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-32768, 32768)
self.lengthlist = [1, 10, 100] # enum can't go up to 255
self.compress_algo = compress.c_enum
self.col_type = 'h'
self.colimpl = colimpl.Enum_h
self.opts = {}
class TestEnum_H(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 65536)
self.lengthlist = [1, 10, 100]
self.compress_algo = compress.c_enum
self.col_type = 'H'
self.colimpl = colimpl.Enum_H
self.opts = {}
class TestEnum_i(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-2 ** 31, 2 ** 31)
self.lengthlist = [1, 10, 100]
self.compress_algo = compress.c_enum
self.col_type = 'i'
self.colimpl = colimpl.Enum_i
self.opts = {}
class TestEnum_I(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 2 ** 32)
self.lengthlist = [1, 10, 100]
self.compress_algo = compress.c_enum
self.col_type = 'I'
self.colimpl = colimpl.Enum_I
self.opts = {}
class TestFrame_i(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (-2 ** 31, 2 ** 31)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_frame
self.col_type = 'i'
self.colimpl = colimpl.Frame_iH
self.opts = {'pt': 'H'}
class TestFrame_I(unittest.TestCase, _TestGetMixin, _TestFindMixin):
def setUp(self):
self.datarange = (0, 2 ** 32)
self.lengthlist = [1, 10, 100, 1000, 10000]
self.compress_algo = compress.c_frame
self.col_type = 'I'
self.colimpl = colimpl.Frame_IH
self.opts = {'pt': 'H'}
class _TestStruct(object):
"""requires lengthlist, structlength, col_type, colimpl"""
def testGet(self):
for length in self.lengthlist:
l = list(_get_bytes(self.structlength) for i in range(length))
l_data = compress.c_plain_struct(self.col_type, l)
col = self.colimpl(l_data, length)
for i, val in enumerate(l):
self.assertEqual(val, col.get(i))
class TestStruct_7(unittest.TestCase, _TestStruct):
"""struct7 is a test case to test the struct column"""
def setUp(self):
self.lengthlist = [1, 10, 100, 1000, 10000]
self.structlength = 7
self.col_type = '7s'
self.colimpl = colimpl.Struct_7
class TestStruct_8(unittest.TestCase, _TestStruct):
"""struct7 is a test case to test the struct column"""
def setUp(self):
self.lengthlist = [1, 10, 100, 1000, 10000]
self.structlength = 8
self.col_type = '8s'
self.colimpl = colimpl.Struct_8
class _TestBlob(object):
"""requires lengthlist, align, col_type, colimpl, slenrange
"""
def testGet(self):
for length in self.lengthlist:
slens = (random.randint(*self.slenrange) for i in range(length))
l = list(_get_bytes(self.align * slen) for slen in slens)
l_data = compress.c_plain_blob(self.col_type, l, **self.opts)
col = self.colimpl(l_data, length)
for i, val in enumerate(l):
self.assertEqual(val, col.get(i))
class TestBlob_1(unittest.TestCase, _TestBlob):
def setUp(self):
self.lengthlist = [1, 10, 100, 1000, 10000]
self.align = 1
self.col_type = 'blob1'
self.colimpl = colimpl.Blob_1H
self.slenrange = (0, 10)
self.opts = {'bpt': 'H'}
class TestBlob_2(unittest.TestCase, _TestBlob):
def setUp(self):
self.lengthlist = [1, 10, 100, 1000, 10000]
self.align = 2
self.col_type = 'blob2'
self.colimpl = colimpl.Blob_2H
self.slenrange = (0, 10)
self.opts = {'bpt': 'H'}
class TestBlob_4(unittest.TestCase, _TestBlob):
def setUp(self):
self.lengthlist = [1, 10, 100, 1000, 10000]
self.align = 4
self.col_type = 'blob4'
self.colimpl = colimpl.Blob_4H
self.slenrange = (0, 10)
self.opts = {'bpt': 'H'}
|
|
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description:
# [MS-TSCH] ATSVC Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/CoreSecurity/impacket/tree/master/impacket/testcases/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT, NDRPOINTER, NDRUniConformantArray
from impacket.dcerpc.v5.dtypes import DWORD, LPWSTR, UCHAR, ULONG, LPDWORD, NULL
from impacket import hresult_errors
from impacket.uuid import uuidtup_to_bin
from impacket.dcerpc.v5.rpcrt import DCERPCException
MSRPC_UUID_ATSVC = uuidtup_to_bin(('1FF70682-0A51-30E8-076D-740BE8CEE98B','1.0'))
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
key = self.error_code
if hresult_errors.ERROR_MESSAGES.has_key(key):
error_msg_short = hresult_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = hresult_errors.ERROR_MESSAGES[key][1]
return 'TSCH SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'TSCH SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# CONSTANTS
################################################################################
ATSVC_HANDLE = LPWSTR
# 2.3.1 Constant Values
CNLEN = 15
DNLEN = CNLEN
UNLEN = 256
MAX_BUFFER_SIZE = (DNLEN+UNLEN+1+1)
# 2.3.7 Flags
TASK_FLAG_INTERACTIVE = 0x1
TASK_FLAG_DELETE_WHEN_DONE = 0x2
TASK_FLAG_DISABLED = 0x4
TASK_FLAG_START_ONLY_IF_IDLE = 0x10
TASK_FLAG_KILL_ON_IDLE_END = 0x20
TASK_FLAG_DONT_START_IF_ON_BATTERIES = 0x40
TASK_FLAG_KILL_IF_GOING_ON_BATTERIES = 0x80
TASK_FLAG_RUN_ONLY_IF_DOCKED = 0x100
TASK_FLAG_HIDDEN = 0x200
TASK_FLAG_RUN_IF_CONNECTED_TO_INTERNET = 0x400
TASK_FLAG_RESTART_ON_IDLE_RESUME = 0x800
TASK_FLAG_SYSTEM_REQUIRED = 0x1000
TASK_FLAG_RUN_ONLY_IF_LOGGED_ON = 0x2000
################################################################################
# STRUCTURES
################################################################################
# 2.3.4 AT_INFO
class AT_INFO(NDRSTRUCT):
structure = (
('JobTime',DWORD),
('DaysOfMonth',DWORD),
('DaysOfWeek',UCHAR),
('Flags',UCHAR),
('Command',LPWSTR),
)
class LPAT_INFO(NDRPOINTER):
referent = (
('Data',AT_INFO),
)
# 2.3.6 AT_ENUM
class AT_ENUM(NDRSTRUCT):
structure = (
('JobId',DWORD),
('JobTime',DWORD),
('DaysOfMonth',DWORD),
('DaysOfWeek',UCHAR),
('Flags',UCHAR),
('Command',LPWSTR),
)
class AT_ENUM_ARRAY(NDRUniConformantArray):
item = AT_ENUM
class LPAT_ENUM_ARRAY(NDRPOINTER):
referent = (
('Data',AT_ENUM_ARRAY),
)
# 2.3.5 AT_ENUM_CONTAINER
class AT_ENUM_CONTAINER(NDRSTRUCT):
structure = (
('EntriesRead',DWORD),
('Buffer',LPAT_ENUM_ARRAY),
)
################################################################################
# RPC CALLS
################################################################################
# 3.2.5.2.1 NetrJobAdd (Opnum 0)
class NetrJobAdd(NDRCALL):
opnum = 0
structure = (
('ServerName',ATSVC_HANDLE),
('pAtInfo', AT_INFO),
)
class NetrJobAddResponse(NDRCALL):
structure = (
('pJobId',DWORD),
('ErrorCode',ULONG),
)
# 3.2.5.2.2 NetrJobDel (Opnum 1)
class NetrJobDel(NDRCALL):
opnum = 1
structure = (
('ServerName',ATSVC_HANDLE),
('MinJobId', DWORD),
('MaxJobId', DWORD),
)
class NetrJobDelResponse(NDRCALL):
structure = (
('ErrorCode',ULONG),
)
# 3.2.5.2.3 NetrJobEnum (Opnum 2)
class NetrJobEnum(NDRCALL):
opnum = 2
structure = (
('ServerName',ATSVC_HANDLE),
('pEnumContainer', AT_ENUM_CONTAINER),
('PreferedMaximumLength', DWORD),
('pResumeHandle', DWORD),
)
class NetrJobEnumResponse(NDRCALL):
structure = (
('pEnumContainer', AT_ENUM_CONTAINER),
('pTotalEntries', DWORD),
('pResumeHandle',LPDWORD),
('ErrorCode',ULONG),
)
# 3.2.5.2.4 NetrJobGetInfo (Opnum 3)
class NetrJobGetInfo(NDRCALL):
opnum = 3
structure = (
('ServerName',ATSVC_HANDLE),
('JobId', DWORD),
)
class NetrJobGetInfoResponse(NDRCALL):
structure = (
('ppAtInfo', LPAT_INFO),
('ErrorCode',ULONG),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (NetrJobAdd,NetrJobAddResponse ),
1 : (NetrJobDel,NetrJobDelResponse ),
2 : (NetrJobEnum,NetrJobEnumResponse ),
3 : (NetrJobGetInfo,NetrJobGetInfoResponse ),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def hNetrJobAdd(dce, serverName = NULL, atInfo = NULL):
netrJobAdd = NetrJobAdd()
netrJobAdd['ServerName'] = serverName
netrJobAdd['pAtInfo'] = atInfo
return dce.request(netrJobAdd)
def hNetrJobDel(dce, serverName = NULL, minJobId = 0, maxJobId = 0):
netrJobDel = NetrJobDel()
netrJobDel['ServerName'] = serverName
netrJobDel['MinJobId'] = minJobId
netrJobDel['MaxJobId'] = maxJobId
return dce.request(netrJobDel)
def hNetrJobEnum(dce, serverName = NULL, pEnumContainer = NULL, preferedMaximumLength = 0xffffffff):
netrJobEnum = NetrJobEnum()
netrJobEnum['ServerName'] = serverName
netrJobEnum['pEnumContainer']['Buffer'] = pEnumContainer
netrJobEnum['PreferedMaximumLength'] = preferedMaximumLength
return dce.request(netrJobEnum)
def hNetrJobGetInfo(dce, serverName = NULL, jobId = 0):
netrJobGetInfo = NetrJobGetInfo()
netrJobGetInfo['ServerName'] = serverName
netrJobGetInfo['JobId'] = jobId
return dce.request(netrJobGetInfo)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from typing import Any, List, Dict, Optional, Callable, Tuple, Iterable, Sequence
from django.utils import translation
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse, HttpRequest
from django.shortcuts import redirect
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.utils.cache import patch_cache_control
from django.core.exceptions import ValidationError
from django.core import validators
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.core.mail import send_mail
from django.middleware.csrf import get_token
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, \
PreregistrationUser, get_client, UserActivity, \
get_stream, UserPresence, get_recipient, \
split_email_to_domain, resolve_email_to_domain, email_to_username, get_realm, \
completely_open, get_unique_open_realm, remote_user_to_email, email_allowed_for_realm, \
get_cross_realm_users, resolve_subdomain_to_realm
from zerver.lib.actions import do_change_password, do_change_full_name, do_change_is_admin, \
do_activate_user, do_create_user, do_create_realm, set_default_streams, \
internal_send_message, update_user_presence, do_events_register, \
do_change_enable_offline_email_notifications, \
do_change_enable_digest_emails, do_change_tos_version, \
get_default_subs, user_email_is_unique, do_invite_users, do_refer_friend, \
compute_mit_user_fullname, do_set_muted_topics, clear_followup_emails_queue, \
do_update_pointer, realm_user_count
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.forms import RegistrationForm, HomepageForm, RealmCreationForm, ToSForm, \
CreateUserForm, OurAuthenticationForm
from zerver.lib.actions import is_inactive
from django.views.decorators.csrf import csrf_exempt
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.decorator import require_post, authenticated_json_post_view, \
has_request_variables, \
JsonableError, get_user_profile_by_email, REQ, \
zulip_login_required
from zerver.lib.avatar import avatar_url
from zerver.lib.i18n import get_language_list, get_language_name, \
get_language_list_for_templates
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd, get_subdomain
from version import ZULIP_VERSION
from zproject.backends import password_auth_enabled, dev_auth_enabled, google_auth_enabled
from confirmation.models import Confirmation, RealmCreationKey, check_key_is_valid
import requests
import calendar
import datetime
import simplejson
import re
from six import text_type
from six.moves import urllib, zip_longest, zip, range
import time
import logging
import jwt
import hashlib
import hmac
import os
from zproject.jinja2 import render_to_response
def name_changes_disabled(realm):
# type: (Optional[Realm]) -> bool
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
@require_post
def accounts_register(request):
# type: (HttpRequest) -> HttpResponse
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
realm_creation = prereg_user.realm_creation
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
# If OPEN_REALM_CREATION is enabled all user sign ups should go through the
# special URL with domain name so that REALM can be identified if multiple realms exist
unique_open_realm = get_unique_open_realm()
if unique_open_realm is not None:
realm = unique_open_realm
domain = realm.domain
elif prereg_user.referred_by:
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
realm = prereg_user.referred_by.realm
domain = realm.domain
if not email_allowed_for_realm(email, realm):
return render_to_response("zerver/closed_realm.html", {"closed_domain_name": realm.name})
elif prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open
# realm.
domain = prereg_user.realm.domain
realm = get_realm(domain)
else:
domain = resolve_email_to_domain(email)
realm = get_realm(domain)
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render_to_response("zerver/deactivated.html",
{"deactivated_domain_name": realm.name,
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' +
urllib.parse.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if realm is not None and realm.is_zephyr_mirror_realm and domain == "mit.edu":
# for MIT users, we can get an authoritative name from Hesiod
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
ldap_full_name = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
request.session['authenticated_full_name'] = ldap_full_name
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm({'full_name': ldap_full_name})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata)
if not password_auth_enabled(realm):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm):
password = form.cleaned_data['password']
else:
# SSO users don't need no passwords
password = None
if realm_creation:
domain = split_email_to_domain(email)
realm_name = form.cleaned_data['realm_name']
org_type = int(form.cleaned_data['realm_org_type'])
if settings.REALMS_HAVE_SUBDOMAINS:
realm = do_create_realm(domain, realm_name, org_type=org_type,
subdomain=form.cleaned_data['realm_subdomain'])[0]
else:
realm = do_create_realm(domain, realm_name, org_type=org_type)[0]
set_default_streams(realm, settings.DEFAULT_NEW_REALM_STREAMS)
full_name = form.cleaned_data['full_name']
short_name = email_to_username(email)
first_in_realm = len(UserProfile.objects.filter(realm=realm, is_bot=False)) == 0
# FIXME: sanitize email addresses and fullname
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
try:
user_profile = existing_user_profile
do_activate_user(user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name)
except UserProfile.DoesNotExist:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
tos_version=settings.TOS_VERSION,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
else:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
tos_version=settings.TOS_VERSION,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
# This logs you in using the ZulipDummyBackend, since honestly nothing
# more fancy than this is required.
return_data = {} # type: Dict[str, bool]
auth_result = authenticate(username=user_profile.email,
realm_subdomain=realm.subdomain,
return_data=return_data,
use_dummy_backend=True)
if return_data.get('invalid_subdomain'):
# By construction, this should never happen.
logging.error("Subdomain mismatch in registration %s: %s" % (
realm.subdomain, user_profile.email,))
return redirect('/')
login(request, auth_result)
if first_in_realm:
do_change_is_admin(user_profile, True)
invite_url = reverse('zerver.views.initial_invite_page')
if (realm_creation and settings.REALMS_HAVE_SUBDOMAINS):
invite_url = "%s%s.%s%s" % (
settings.EXTERNAL_URI_SCHEME,
form.cleaned_data['realm_subdomain'],
settings.EXTERNAL_HOST,
reverse('zerver.views.initial_invite_page')
)
return HttpResponseRedirect(invite_url)
else:
return HttpResponseRedirect(reverse('zerver.views.home'))
return render_to_response('zerver/register.html',
{'form': form,
'company_name': domain,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'creating_new_team': realm_creation,
'realms_have_subdomains': settings.REALMS_HAVE_SUBDOMAINS,
'password_auth_enabled': password_auth_enabled(realm),
},
request=request)
@zulip_login_required
def accounts_accept_terms(request):
# type: (HttpRequest) -> HttpResponse
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.email
domain = resolve_email_to_domain(email)
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE
return render_to_response('zerver/accounts_accept_terms.html',
{ 'form': form, 'company_name': domain, 'email': email, \
'special_message_template' : special_message_template },
request=request)
@authenticated_json_post_view
@has_request_variables
def json_invite_users(request, user_profile, invitee_emails_raw=REQ("invitee_emails")):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if not invitee_emails_raw:
return json_error(_("You must specify at least one email address."))
invitee_emails = get_invitee_emails_set(invitee_emails_raw)
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error(_("You must specify at least one stream for invitees to join."))
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = [] # type: List[Stream]
for stream_name in stream_names:
stream = get_stream(stream_name, user_profile.realm)
if stream is None:
return json_error(_("Stream does not exist: %s. No invites were sent.") % (stream_name,))
streams.append(stream)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
return json_success()
def get_invitee_emails_set(invitee_emails_raw):
# type: (str) -> Set[str]
invitee_emails_list = set(re.split(r'[,\n]', invitee_emails_raw))
invitee_emails = set()
for email in invitee_emails_list:
is_email_with_name = re.search(r'<(?P<email>.*)>', email)
if is_email_with_name:
email = is_email_with_name.group('email')
invitee_emails.add(email.strip())
return invitee_emails
def create_homepage_form(request, user_info=None):
# type: (HttpRequest, Optional[Dict[str, Any]]) -> HomepageForm
if user_info:
return HomepageForm(user_info, domain=request.session.get("domain"),
subdomain=get_subdomain(request))
# An empty fields dict is not treated the same way as not
# providing it.
return HomepageForm(domain=request.session.get("domain"), subdomain=get_subdomain(request))
def maybe_send_to_registration(request, email, full_name=''):
# type: (HttpRequest, text_type, text_type) -> HttpResponse
form = create_homepage_form(request, user_info={'email': email})
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
url = reverse('register')
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': lambda: url},
request=request)
def login_or_register_remote_user(request, remote_username, user_profile, full_name=''):
# type: (HttpRequest, text_type, UserProfile, text_type) -> HttpResponse
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
if settings.OPEN_REALM_CREATION and user_profile.realm.subdomain is not None:
return HttpResponseRedirect("%s%s.%s" % (settings.EXTERNAL_URI_SCHEME,
user_profile.realm.subdomain,
settings.EXTERNAL_HOST))
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
# type: (HttpRequest) -> HttpResponse
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError(_("No REMOTE_USER set."))
user_profile = authenticate(remote_user=remote_user, realm_subdomain=get_subdomain(request))
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
# type: (HttpRequest) -> HttpResponse
try:
json_web_token = request.POST["json_web_token"]
payload, signing_input, header, signature = jwt.load(json_web_token)
except KeyError:
raise JsonableError(_("No JSON web token passed in request"))
except jwt.DecodeError:
raise JsonableError(_("Bad JSON web token"))
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError(_("No user specified in JSON web token claims"))
domain = payload.get('realm', None)
if domain is None:
raise JsonableError(_("No domain specified in JSON web token claims"))
email = "%s@%s" % (remote_user, domain)
try:
jwt.verify_signature(payload, signing_input, header, signature,
settings.JWT_AUTH_KEYS[domain])
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email,
realm_subdomain=get_subdomain(request),
return_data=return_data,
use_dummy_backend=True)
if return_data.get('invalid_subdomain'):
logging.warning("User attempted to JWT login to wrong subdomain %s: %s" % (get_subdomain(request), email,))
raise JsonableError(_("Wrong subdomain"))
except (jwt.DecodeError, jwt.ExpiredSignature):
raise JsonableError(_("Bad JSON web token signature"))
except KeyError:
raise JsonableError(_("Realm not authorized for JWT login"))
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
# type: (HttpRequest, str) -> HttpResponse
return hmac.new(get_token(request).encode('utf-8'), value.encode("utf-8"), hashlib.sha256).hexdigest()
def start_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time),
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(uri + urllib.parse.urlencode(prams))
def finish_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login: %s' % (request.GET.get("error"),))
return HttpResponse(status=400)
csrf_state = request.GET.get('state')
if csrf_state is None or len(csrf_state.split(':')) != 2:
logging.warning('Missing Google oauth2 CSRF state')
return HttpResponse(status=400)
value, hmac_value = csrf_state.split(':')
if hmac_value != google_oauth2_csrf(request, value):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Could not convert google oauth2 code to access_token: %s' % (resp.text,))
return HttpResponse(status=400)
access_token = resp.json()['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Google login failed making API call: %s' % (resp.text,))
return HttpResponse(status=400)
body = resp.json()
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
logging.error('Google oauth2 account email not found: %s' % (body,))
return HttpResponse(status=400)
email_address = email['value']
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email_address,
realm_subdomain=get_subdomain(request),
use_dummy_backend=True,
return_data=return_data)
if return_data.get('invalid_subdomain'):
logging.warning("User attempted to Google login to wrong subdomain %s: %s" % (get_subdomain(request), email_address,))
return redirect('/')
return login_or_register_remote_user(request, email_address, user_profile, full_name)
def login_page(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
# Development environments usually have only a few users, but
# it still makes sense to limit how many users we render to
# support performance testing with DevAuthBackend.
MAX_DEV_BACKEND_USERS = 100
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
users = users_query.order_by('email')[0:MAX_DEV_BACKEND_USERS]
extra_context['direct_admins'] = [u.email for u in users if u.is_realm_admin]
extra_context['direct_users'] = [u.email for u in users if not u.is_realm_admin]
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email, realm_subdomain=get_subdomain(request))
if user_profile is None:
raise Exception("User cannot login")
login(request, user_profile)
if settings.OPEN_REALM_CREATION and settings.DEVELOPMENT:
if user_profile.realm.subdomain is not None:
return HttpResponseRedirect("%s%s.%s" % (settings.EXTERNAL_URI_SCHEME,
user_profile.realm.subdomain,
settings.EXTERNAL_HOST))
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@csrf_exempt
@require_post
@has_request_variables
def api_dev_fetch_api_key(request, username=REQ()):
# type: (HttpRequest, str) -> HttpResponse
"""This function allows logging in without a password on the Zulip
mobile apps when connecting to a Zulip development environment. It
requires DevAuthBackend to be included in settings.AUTHENTICATION_BACKENDS.
"""
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=username,
realm_subdomain=get_subdomain(request),
return_data=return_data)
if return_data.get("inactive_realm") == True:
return json_error(_("Your realm has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("inactive_user") == True:
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
login(request, user_profile)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_dev_get_emails(request):
# type: (HttpRequest) -> HttpResponse
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
MAX_DEV_BACKEND_USERS = 100 # type: int
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
users = users_query.order_by('email')[0:MAX_DEV_BACKEND_USERS]
return json_success(dict(direct_admins=[u.email for u in users if u.is_realm_admin],
direct_users=[u.email for u in users if not u.is_realm_admin]))
@authenticated_json_post_view
@has_request_variables
def json_bulk_invite_users(request, user_profile,
invitee_emails_list=REQ('invitee_emails',
validator=check_list(check_string))):
# type: (HttpRequest, UserProfile, List[str]) -> HttpResponse
invitee_emails = set(invitee_emails_list)
streams = get_default_subs(user_profile)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
# Report bulk invites to internal Zulip.
invited = PreregistrationUser.objects.filter(referred_by=user_profile)
internal_message = "%s <`%s`> invited %d people to Zulip." % (
user_profile.full_name, user_profile.email, invited.count())
internal_send_message(settings.NEW_USER_BOT, "stream", "signups",
user_profile.realm.domain, internal_message)
return json_success()
@zulip_login_required
def initial_invite_page(request):
# type: (HttpRequest) -> HttpResponse
user = request.user
# Only show the bulk-invite page for the first user in a realm
domain_count = len(UserProfile.objects.filter(realm=user.realm))
if domain_count > 1:
return redirect('zerver.views.home')
params = {'company_name': user.realm.domain}
if (user.realm.restricted_to_domain):
params['invite_suffix'] = user.realm.domain
else:
params['invite_suffix'] = ''
return render_to_response('zerver/initial_invite_page.html', params,
request=request)
@require_post
def logout_then_login(request, **kwargs):
# type: (HttpRequest, **Any) -> HttpResponse
return django_logout_then_login(request, kwargs)
def create_preregistration_user(email, request, realm_creation=False):
# type: (text_type, HttpRequest, bool) -> HttpResponse
domain = request.session.get("domain")
if completely_open(domain):
# Clear the "domain" from the session object; it's no longer needed
request.session["domain"] = None
# The user is trying to sign up for a completely open realm,
# so create them a PreregistrationUser for that realm
return PreregistrationUser.objects.create(email=email,
realm=get_realm(domain),
realm_creation=realm_creation)
return PreregistrationUser.objects.create(email=email, realm_creation=realm_creation)
def accounts_home_with_domain(request, domain):
# type: (HttpRequest, str) -> HttpResponse
if completely_open(domain):
# You can sign up for a completely open realm through a
# special registration path that contains the domain in the
# URL. We store this information in the session rather than
# elsewhere because we don't have control over URL or form
# data for folks registering through OpenID.
request.session["domain"] = domain
return accounts_home(request)
else:
return HttpResponseRedirect(reverse('zerver.views.accounts_home'))
def send_registration_completion_email(email, request, realm_creation=False):
# type: (str, HttpRequest, bool) -> Confirmation
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request, realm_creation)
context = {'support_email': settings.ZULIP_ADMINISTRATOR,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS}
return Confirmation.objects.send_confirmation(prereg_user, email,
additional_context=context,
host=request.get_host())
def redirect_to_email_login_url(email):
# type: (str) -> HttpResponseRedirect
login_url = reverse('django.contrib.auth.views.login')
redirect_url = login_url + '?email=' + urllib.parse.quote_plus(email)
return HttpResponseRedirect(redirect_url)
"""
When settings.OPEN_REALM_CREATION is enabled public users can create new realm. For creating the realm the user should
not be the member of any current realm. The realm is created with domain same as the that of the user's email.
When there is no unique_open_realm user registrations are made by visiting /register/domain_of_the_realm.
"""
def create_realm(request, creation_key=None):
# type: (HttpRequest, Optional[text_type]) -> HttpResponse
if not settings.OPEN_REALM_CREATION:
if creation_key is None:
return render_to_response("zerver/realm_creation_failed.html",
{'message': _('New organization creation disabled.')})
elif not check_key_is_valid(creation_key):
return render_to_response("zerver/realm_creation_failed.html",
{'message': _('The organization creation link has been expired'
' or is not valid.')})
if request.method == 'POST':
form = RealmCreationForm(request.POST, domain=request.session.get("domain"))
if form.is_valid():
email = form.cleaned_data['email']
confirmation_key = send_registration_completion_email(email, request, realm_creation=True).confirmation_key
if settings.DEVELOPMENT:
request.session['confirmation_key'] = {'confirmation_key': confirmation_key}
if (creation_key is not None and check_key_is_valid(creation_key)):
RealmCreationKey.objects.get(creation_key=creation_key).delete()
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
user_email_is_unique(email)
except ValidationError:
# if the user user is already registered he can't create a new realm as a realm
# with the same domain as user's email already exists
return redirect_to_email_login_url(email)
else:
form = RealmCreationForm(domain=request.session.get("domain"))
return render_to_response('zerver/create_realm.html',
{'form': form, 'current_url': request.get_full_path},
request=request)
def confirmation_key(request):
# type: (HttpRequest) -> HttpResponse
return json_success(request.session.get('confirmation_key'))
def accounts_home(request):
# type: (HttpRequest) -> HttpResponse
if request.method == 'POST':
form = create_homepage_form(request, user_info=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
send_registration_completion_email(email, request)
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
# Note: We don't check for uniqueness
is_inactive(email)
except ValidationError:
return redirect_to_email_login_url(email)
else:
form = create_homepage_form(request)
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': request.get_full_path},
request=request)
def approximate_unread_count(user_profile):
# type: (UserProfile) -> int
not_in_home_view_recipients = [sub.recipient.id for sub in \
Subscription.objects.filter(
user_profile=user_profile, in_home_view=False)]
# TODO: We may want to exclude muted messages from this count.
# It was attempted in the past, but the original attempt
# was broken. When we re-architect muting, we may
# want to to revisit this (see git issue #1019).
return UserMessage.objects.filter(
user_profile=user_profile, message_id__gt=user_profile.pointer).exclude(
message__recipient__type=Recipient.STREAM,
message__recipient__id__in=not_in_home_view_recipients).exclude(
flags=UserMessage.flags.read).count()
def sent_time_in_epoch_seconds(user_message):
# type: (UserMessage) -> float
# user_message is a UserMessage object.
if not user_message:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
@zulip_login_required
def home(request):
# type: (HttpRequest) -> HttpResponse
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
request._email = request.user.email
request.client = get_client("website")
# If a user hasn't signed the current Terms of Service, send them there
if settings.TERMS_OF_SERVICE is not None and settings.TOS_VERSION is not None and \
int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version():
return accounts_accept_terms(request)
narrow = [] # type: List[List[text_type]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream = get_stream(request.GET.get("stream"), user_profile.realm)
assert(narrow_stream is not None)
assert(narrow_stream.is_public())
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if not user_profile.last_reminder is None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
desktop_notifications_enabled = user_profile.enable_desktop_notifications
if narrow_stream is not None:
desktop_notifications_enabled = False
if user_profile.realm.notifications_stream:
notifications_stream = user_profile.realm.notifications_stream.name
else:
notifications_stream = ""
# Set default language and make it persist
default_language = register_ret['default_language']
url_lang = '/{}'.format(request.LANGUAGE_CODE)
if not request.path.startswith(url_lang):
translation.activate(default_language)
request.session[translation.LANGUAGE_SESSION_KEY] = default_language
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
zulip_version = ZULIP_VERSION,
share_the_love = settings.SHARE_THE_LOVE,
development_environment = settings.DEVELOPMENT,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
server_uri = settings.SERVER_URI,
realm_uri = user_profile.realm.uri,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
server_generation = settings.SERVER_GENERATION,
password_auth_enabled = password_auth_enabled(user_profile.realm),
have_initial_messages = user_has_messages,
subbed_info = register_ret['subscriptions'],
unsubbed_info = register_ret['unsubscribed'],
neversubbed_info = register_ret['never_subscribed'],
email_dict = register_ret['email_dict'],
people_list = register_ret['realm_users'],
bot_list = register_ret['realm_bots'],
initial_pointer = register_ret['pointer'],
initial_presences = register_ret['presences'],
initial_servertime = time.time(), # Used for calculating relative presence age
fullname = user_profile.full_name,
email = user_profile.email,
domain = user_profile.realm.domain,
realm_name = register_ret['realm_name'],
realm_invite_required = register_ret['realm_invite_required'],
realm_invite_by_admins_only = register_ret['realm_invite_by_admins_only'],
realm_create_stream_by_admins_only = register_ret['realm_create_stream_by_admins_only'],
realm_allow_message_editing = register_ret['realm_allow_message_editing'],
realm_message_content_edit_limit_seconds = register_ret['realm_message_content_edit_limit_seconds'],
realm_restricted_to_domain = register_ret['realm_restricted_to_domain'],
realm_default_language = register_ret['realm_default_language'],
enter_sends = user_profile.enter_sends,
user_id = user_profile.id,
left_side_userlist = register_ret['left_side_userlist'],
default_language = register_ret['default_language'],
default_language_name = get_language_name(register_ret['default_language']),
language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']),
language_list = get_language_list(),
referrals = register_ret['referrals'],
realm_emoji = register_ret['realm_emoji'],
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
notifications_stream = notifications_stream,
cross_realm_user_emails = list(get_cross_realm_users()),
# Stream message notification settings:
stream_desktop_notifications_enabled =
user_profile.enable_stream_desktop_notifications,
stream_sounds_enabled = user_profile.enable_stream_sounds,
# Private message and @-mention notification settings:
desktop_notifications_enabled = desktop_notifications_enabled,
sounds_enabled =
user_profile.enable_sounds,
enable_offline_email_notifications =
user_profile.enable_offline_email_notifications,
enable_offline_push_notifications =
user_profile.enable_offline_push_notifications,
twenty_four_hour_time = register_ret['twenty_four_hour_time'],
enable_digest_emails = user_profile.enable_digest_emails,
event_queue_id = register_ret['queue_id'],
last_event_id = register_ret['last_event_id'],
max_message_id = register_ret['max_message_id'],
unread_count = approximate_unread_count(user_profile),
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES,
alert_words = register_ret['alert_words'],
muted_topics = register_ret['muted_topics'],
realm_filters = register_ret['realm_filters'],
realm_default_streams = register_ret['realm_default_streams'],
is_admin = user_profile.is_realm_admin,
can_create_streams = user_profile.can_create_streams(),
name_changes_disabled = name_changes_disabled(user_profile.realm),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
autoscroll_forever = user_profile.autoscroll_forever,
default_desktop_notifications = user_profile.default_desktop_notifications,
avatar_url = avatar_url(user_profile),
mandatory_topics = user_profile.realm.mandatory_topics,
show_digest_email = user_profile.realm.show_digest_email,
presence_disabled = user_profile.realm.presence_disabled,
is_zephyr_mirror_realm = user_profile.realm.is_zephyr_mirror_realm,
)
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["initial_pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
product_name = "Zulip"
page_params['product_name'] = product_name
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render_to_response('zerver/index.html',
{'user_profile': user_profile,
'page_params' : simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE_ENABLED,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.webathena_enabled,
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
'product_name': product_name
},
request=request)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponseRedirect(reverse('zerver.views.home'))
def is_buggy_ua(agent):
# type: (str) -> bool
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ(), password=REQ()):
# type: (HttpRequest, str, str) -> HttpResponse
return_data = {} # type: Dict[str, bool]
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password,
realm_subdomain=get_subdomain(request),
return_data=return_data)
else:
user_profile = authenticate(username=username,
password=password,
realm_subdomain=get_subdomain(request),
return_data=return_data)
if return_data.get("inactive_user") == True:
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
if return_data.get("inactive_realm") == True:
return json_error(_("Your realm has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("password_auth_disabled") == True:
return json_error(_("Password auth is disabled in your team."),
data={"reason": "password auth disabled"}, status=403)
if user_profile is None:
if return_data.get("valid_attestation") == True:
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error(_("This user is not registered; do so from a browser."),
data={"reason": "unregistered"}, status=403)
return json_error(_("Your username or password is incorrect."),
data={"reason": "incorrect_creds"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_get_auth_backends(request):
# type: (HttpRequest) -> HttpResponse
# May return a false positive for password auth if it's been disabled
# for a specific realm. Currently only happens for zulip.com on prod
return json_success({"password": password_auth_enabled(None),
"dev": dev_auth_enabled(),
"google": google_auth_enabled(),
})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if password_auth_enabled(user_profile.realm):
if not authenticate(username=user_profile.email, password=password,
realm_subdomain=get_subdomain(request)):
return json_error(_("Your username or password is incorrect."))
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request):
# type: (HttpRequest) -> HttpResponse
if not settings.GOOGLE_CLIENT_ID:
return json_error(_("GOOGLE_CLIENT_ID is not configured"), status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, bool, Optional[bool]) -> HttpResponse
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
def _default_all_public_streams(user_profile, all_public_streams):
# type: (UserProfile, Optional[bool]) -> bool
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
# type: (UserProfile, Iterable[Sequence[text_type]]) -> Iterable[Sequence[text_type]]
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [['stream', default_stream.name]]
return narrow
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
# type: (HttpRequest, UserProfile, bool, Optional[bool], Optional[Iterable[str]], Iterable[Sequence[text_type]], int) -> HttpResponse
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
@authenticated_json_post_view
@has_request_variables
def json_refer_friend(request, user_profile, email=REQ()):
# type: (HttpRequest, UserProfile, str) -> HttpResponse
if not email:
return json_error(_("No email address specified"))
if user_profile.invites_granted - user_profile.invites_used <= 0:
return json_error(_("Insufficient invites"))
do_refer_friend(user_profile, email);
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_set_muted_topics(request, user_profile,
muted_topics=REQ(validator=check_list(check_list(check_string, length=2)), default=[])):
# type: (HttpRequest, UserProfile, List[List[text_type]]) -> HttpResponse
do_set_muted_topics(user_profile, muted_topics)
return json_success()
def generate_204(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponse(content=None, status=204)
def process_unsubscribe(token, subscription_type, unsubscribe_function):
# type: (HttpRequest, str, Callable[[UserProfile], None]) -> HttpResponse
try:
confirmation = Confirmation.objects.get(confirmation_key=token)
except Confirmation.DoesNotExist:
return render_to_response('zerver/unsubscribe_link_error.html')
user_profile = confirmation.content_object
unsubscribe_function(user_profile)
return render_to_response('zerver/unsubscribe_success.html',
{"subscription_type": subscription_type,
"external_host": settings.EXTERNAL_HOST,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'server_uri': settings.SERVER_URI,
'realm_uri': user_profile.realm.uri,
})
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile):
# type: (UserProfile) -> None
do_change_enable_offline_email_notifications(user_profile, False)
def do_welcome_unsubscribe(user_profile):
# type: (UserProfile) -> None
clear_followup_emails_queue(user_profile.email)
def do_digest_unsubscribe(user_profile):
# type: (UserProfile) -> None
do_change_enable_digest_emails(user_profile, False)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe)
}
# Login NOT required. These are for one-click unsubscribes.
def email_unsubscribe(request, type, token):
# type: (HttpRequest, str, str) -> HttpResponse
if type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[type]
return process_unsubscribe(token, display_name, unsubscribe_function)
return render_to_response('zerver/unsubscribe_link_error.html', {},
request=request)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ramaplot.AmberForceField.py
#
# Copyright (C) 2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Reads and represents AMBER-format force fields
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
import re
from .ForceField import ForceField
################################### CLASSES ###################################
class AmberForceField(ForceField):
"""
Reads and represents AMBER-format force fields
"""
@staticmethod
def get_cache_key(parm=None, *args, **kwargs):
"""
Generates tuple of arguments to be used as key for dataset
cache.
"""
from os.path import expandvars
return (AmberForceField, expandvars(parm))
@staticmethod
def get_cache_message(cache_key):
return "previously loaded from '{0}'".format(cache_key[1])
par_re = dict(
blank = "^\s*$",
mass = "^(?P<type>{t}){w}"
"(?P<mass>{f})"
"(?P<polarizability>{w}{f}|{w})"
"(?P<note>.*$)",
atomlist = "^({t}{w})*$",
bond = "^(?P<type_1>{t})-"
"(?P<type_2>{t}){w}"
"(?P<force_constant>{f}){w}"
"(?P<length>{f}){w}"
"(?P<note>.*$)",
angle = "^(?P<type_1>{t})-"
"(?P<type_2>{t})-"
"(?P<type_3>{t}){w}"
"(?P<force_constant>{f}){w}"
"(?P<angle>{f}){w}"
"(?P<note>.*$)",
dihedral = "^(?P<type_1>{t})-"
"(?P<type_2>{t})-"
"(?P<type_3>{t})-"
"(?P<type_4>{t}){w}"
"(?P<divider>{i}){w}"
"(?P<barrier>{sf}){w}"
"(?P<phase>{sf}){w}"
"(?P<periodicity>{sf}){w}"
"(?P<note>.*$)",
improper = "^(?P<type_1>{t})-"
"(?P<type_2>{t})-"
"(?P<type_3>{t})-"
"(?P<type_4>{t}){w}"
"(?P<barrier>{sf}){w}"
"(?P<phase>{sf}){w}"
"(?P<periodicity>{sf}){w}"
"(?P<note>.*$)",
hbond = "^{w}(?P<type_1>{t}){w}"
"(?P<type_2>{t}){w}"
"(?P<A>{f}){w}"
"(?P<B>{f}){w}"
"(?P<ASOLN>{f}){w}"
"(?P<note>.*$)",
vdw_format = "^.+{w}(?P<vdw_format>SK|RE|AC).*$",
vdw = "^{w}(?P<type>{t}){w}"
"(?P<radius>{f}){w}"
"(?P<well_depth>{f}){w}"
"(?P<note>.*$)",
ljedit_title = "^LJEDIT$",
ljedit = "^{w}(?P<type_1>{t}){w}"
"(?P<type_2>{t}){w}"
"(?P<radius_1>{f}){w}"
"(?P<well_depth_1>{f}){w}"
"(?P<radius_2>{f}){w}"
"(?P<well_depth_2>{f}){w}"
"(?P<note>.*$)",
end = "^END$")
lib_re = dict(
blank = "^\s*$",
atoms = "^\s*\"(?P<name>{a})\"{w}"
"\"(?P<type>{t})\"{w}"
"(?P<type_index>{i}){w}"
"(?P<residue_index>{i}){w}"
"(?P<flags>{i}){w}"
"(?P<atom_index>{i}){w}"
"(?P<element>{i}){w}"
"(?P<charge>{sf}){w}"
"(?P<note>.*$)",
atom_edits = "^\s*\"(?P<name>{a})\"{w}"
"\"(?P<type>{t})\"{w}"
"(?P<type_index>{i}){w}"
"(?P<element>{si}{w})"
"(?P<charge>{sf}{w})"
"(?P<note>.*$)",
box = "^\s*(?P<box>{sf}){w}"
"(?P<note>.*$)",
res_seq = "^\s*(?P<childsequence>{i}){w}"
"(?P<note>.*$)",
res_connect = "^\s*(?P<connect>{i}){w}"
"(?P<note>.*$)",
bonds = "^\s*(?P<atom_index_1>{i}){w}"
"(?P<atom_index_2>{t}){w}"
"(?P<flag>{i}){w}"
"(?P<note>.*$)",
hierarchy = "^\s*\"(?P<above_type>U|R|A)\"{w}"
"(?P<above_index>{i}){w}"
"\"(?P<below_type>U|R|A)\"{w}"
"(?P<below_index>{i}){w}"
"(?P<note>.*$)",
name = "^\s*\"(?P<name>{r})\""
"(?P<note>.*$)",
coordinates = "^\s*(?P<x>{sfe}){w}"
"(?P<y>{sfe}){w}"
"(?P<z>{sfe}){w}"
"(?P<note>.*$)",
res_connect2 = "^\s*(?P<atom_index_1>{i}){w}"
"(?P<atom_index_2>{i}){w}"
"(?P<atom_index_3>{i}){w}"
"(?P<atom_index_4>{i}){w}"
"(?P<atom_index_5>{i}){w}"
"(?P<atom_index_6>{i}){w}"
"(?P<note>.*$)",
residues = "^\s*\"(?P<name>{r})\"{w}"
"(?P<residue_index>{i}){w}"
"(?P<child_atom_index>{i}){w}"
"(?P<start_atom_index>{i}){w}"
"\"(?P<residue_type>p|n|w|\?)\"{w}"
"(?P<note>.*$)",
pdb_seq = "^\s*(?P<residue_index>{i}){w}"
"(?P<note>.*$)",
solventcap = "^\s*(?P<solventcap>{sf}){w}"
"(?P<note>.*$)",
velocities = "^\s*(?P<x>{sfe}){w}"
"(?P<y>{sfe}){w}"
"(?P<z>{sfe}){w}"
"(?P<note>.*$)")
def __init__(self, parm=None, **kwargs):
"""
"""
if parm is not None:
self.parameters = self.read_parm(parm, **kwargs)
@staticmethod
def amber_regex(regex, title=False):
"""
Prepares regex for matching AMBER fields
Arguments:
regex (string): regular expression
Returns:
(string): regular expression
"""
if title:
regex = "^!entry\.(?P<residue_name>{r})\.unit\." + regex + "{w}.*$"
return re.compile(regex.format(
r = "[\w][\w][\w][\w]?", # Residue
a = "[\w][\w]?[\w]?[\w]?", # Atom name
t = "[\w][\w \*]?", # Atom type
i = "\d+", # Integer
si = "[-]?\d+", # Signed Integer
f = "\d+\.?\d*?", # Float
sf = "[-]?\d+\.?\d*?", # Signed float
sfe = "[-]?\d+\.?\d*?[E]?[-]?\d*?", # Signed float in E notation
w = "\s+")) # Whitespace
@staticmethod
def strip_dict(dictionary):
"""
Strips each string in a dict, and deletes if empty
Arguements:
dictionary (dict): dictionary to strip
Returns:
(dict): dictionary with each element stripped
"""
for key, value in dictionary.items():
value = value.strip()
if value == "":
del dictionary[key]
else:
dictionary[key] = value
return dictionary
@staticmethod
def read_parm(infile, verbose=1, debug=0, **kwargs):
"""
Reads a parm file
Arguments:
infile (str): Path to input parm file
verbose (int): Enable verbose output
debug (int): Enable debug output
kwargs (dict): Additional keyword arguments
"""
import pandas as pd
if verbose >= 1:
print("READING PARM: {0}".format(infile))
strip_dict = AmberForceField.strip_dict
amber_regex = AmberForceField.amber_regex
par_re = AmberForceField.par_re
re_blank = amber_regex(par_re["blank"])
re_mass = amber_regex(par_re["mass"])
re_atomlist = amber_regex(par_re["atomlist"])
re_bond = amber_regex(par_re["bond"])
re_angle = amber_regex(par_re["angle"])
re_dihedral = amber_regex(par_re["dihedral"])
re_improper = amber_regex(par_re["improper"])
re_hbond = amber_regex(par_re["hbond"])
re_vdw_format = amber_regex(par_re["vdw_format"])
re_vdw = amber_regex(par_re["vdw"])
re_ljedit_title = amber_regex(par_re["ljedit_title"])
re_ljedit = amber_regex(par_re["ljedit"])
re_end = amber_regex(par_re["end"])
mass_types = pd.DataFrame(columns=["type", "mass",
"polarizability", "note"])
hydrophobic_types = pd.DataFrame(columns=["type"])
bonds = pd.DataFrame(columns=["type_1", "type_2",
"force_constant", "length", "note"])
angles = pd.DataFrame(columns=["type_1", "type_2", "type_3",
"force_constant", "angle", "note"])
dihedrals = pd.DataFrame(columns=["type_1", "type_2", "type_3",
"type_4", "divider", "barrier", "phase",
"periodicity", "note"])
impropers = pd.DataFrame(columns=["type_1", "type_2", "type_3",
"type_4", "barrier", "phase",
"periodicity", "note"])
hbonds = pd.DataFrame(columns=["type_1", "type_2", "A", "B",
"ASOLN"])
vdw_eq_types = pd.DataFrame()
vdw_types = pd.DataFrame(columns= ["type", "radius",
"well_depth", "note"])
ljedits = pd.DataFrame(columns= ["type_1", "type_2",
"radius_1", "well_depth_1", "radius_2",
"well_depth_2"])
section = 1
with open(infile, "r") as open_infile:
line = open_infile.readline()
while line:
# BLANK
if re.match(re_blank, line):
if verbose >= 1:
print("BLANK |{0}".format(line.strip()))
# 1: TITLE
elif section <= 1 and not re.match(re_mass, line):
if verbose >= 1:
print("TITLE |{0}".format(line.strip()))
# 2: MASS
elif section <= 2 and re.match(re_mass, line):
section = 2
if verbose >= 1:
print("MASS |{0}".format(line.strip()))
fields = strip_dict(re.match(re_mass, line).groupdict())
mass_types = mass_types.append(fields, ignore_index=True)
# 3: HYDROPHIC (list of types)
elif section <= 3 and re.match(re_atomlist, line):
section = 3
if verbose >= 1:
print("HYDROPHOBIC |{0}".format(line.rstrip()))
fields = [{"type": v} for v in
amber_regex("{t}").findall(line)]
hydrophobic_types = hydrophobic_types.append(fields,
ignore_index=True)
# 4: BOND
elif section <= 4 and re.match(re_bond, line):
section = 4
if verbose >= 1:
print("BOND |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_bond, line).groupdict())
bonds = bonds.append(fields, ignore_index=True)
# 5: ANGLE
elif section <= 5 and re.match(re_angle, line):
section = 5
if verbose >= 1:
print("ANGLE |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_angle, line).groupdict())
angles = angles.append(fields, ignore_index=True)
# 6: DIHEDRAL
elif section <= 6 and re.match(re_dihedral, line):
section = 6
if verbose >= 1:
print("DIHEDRAL |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_dihedral,
line).groupdict())
dihedrals = dihedrals.append(fields, ignore_index=True)
# 7: IMPROPER
elif section <= 7 and re.match(re_improper, line):
section = 7
if verbose >= 1:
print("IMPROPER |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_improper,
line).groupdict())
impropers = impropers.append(fields, ignore_index=True)
# 8: HBOND
elif section <= 8 and re.match(re_hbond, line):
section = 8
if verbose >= 1:
print("HBOND |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_hbond, line).groupdict())
hbonds = hbonds.append(fields, ignore_index=True)
# 9: VDW (equivalent types)
elif section <= 9 and re.match(re_atomlist, line):
section = 9
if verbose >= 1:
print("VDW EQUIVALENT |{0}".format(line.rstrip()))
fields = [{"type_{0}".format(i): v for i, v in
enumerate(re.compile(amber_regex("{t}")).findall(line))}]
vdw_eq_types = vdw_eq_types.append(fields,
ignore_index=True)
# 10: VDW (format)
elif section <= 10.3 and re.match(re_vdw_format, line):
if verbose >= 1:
print("VDW FORMAT |{0}".format(line.rstrip()))
# 10.2: VDW (radius and well depth)
elif section <= 10.2 and re.match(re_vdw, line):
section = 10.2
if verbose >= 1:
print("VDW |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_vdw, line).groupdict())
vdw_types = vdw_types.append(fields, ignore_index=True)
# 11: LJEDIT (title)
elif (section <= 11 and re.match(re_ljedit_title, line)):
section = 11
if verbose >= 1:
print("LJEDIT |{0}".format(line.rstrip()))
# 11.1: LJEDIT (atom types, radii, and well depth)
elif (section <= 11.1 and re.match(re_ljedit, line)):
section = 11.1
if verbose >= 1:
print("LJEDIT |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_ljedit, line).groupdict())
ljedits = ljedits.append(fields, ignore_index=True)
# END
elif re.match(re_end, line):
if verbose >= 1:
print("END |{0}".format(line.rstrip()))
break
# NO MATCH
else:
if verbose >= 1:
print("NOMATCH |{0}".format(line.rstrip()))
line = open_infile.readline()
if debug >= 1:
print(mass_types)
print(hydrophobic_types)
print(bonds)
print(angles)
print(dihedrals)
print(impropers)
print(hbonds)
print(vdw_eq_types)
print(vdw_types)
print(ljedits)
parameters = dict(
mass_types = mass_types,
hydrophobic_types = hydrophobic_types,
bonds = bonds,
angles = angles,
dihedrals = dihedrals,
impropers = impropers,
hbonds = hbonds,
vdw_eq_types = vdw_eq_types,
vdw_types = vdw_types,
ljedits = ljedits)
return parameters
# def read_frcmod(self, infile, verbose=1, debug=0, **kwargs):
# """
# Arguments:
# infile (str): Path to input lib file
# verbose (int): Enable verbose output
# debug (int): Enable debug output
# kwargs (dict): Additional keyword arguments
# """
# if verbose >= 1:
# print("READING FRCMOD: {0}".format(infile))
#
# are = self.amber_regex
# strip_dict = self.strip_dict
# re_blank = are("^\s*$")
#
# section = 1
# with open(infile, "r") as open_infile:
# line = open_infile.readline()
# while line:
# # BLANK
# if re.match(re_blank, line):
# if verbose >= 1:
# print("BLANK |{0}".format(line.strip()))
# # 1: TITLE
# elif section <= 1 and not re.match(re_mass, line):
# if verbose >= 1:
# print("TITLE |{0}".format(line.strip()))
# # 2: MASS
# elif section <= 2 and re.match(re_mass, line):
# section = 2
# if verbose >= 1:
# print("MASS |{0}".format(line.strip()))
# fields = strip_dict(re.match(re_mass, line).groupdict())
# mass_types = mass_types.append(fields, ignore_index=True)
# # 3: HYDROPHIC (list of types)
# elif section <= 3 and re.match(re_atomlist, line):
# section = 3
# if verbose >= 1:
# print("HYDROPHOBIC |{0}".format(line.rstrip()))
# fields = [{"type": v} for v in are("{t}").findall(line)]
# hydrophobic_types = hydrophobic_types.append(fields,
# ignore_index=True)
# # 4: BOND
# elif section <= 4 and re.match(re_bond, line):
# section = 4
# if verbose >= 1:
# print("BOND |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_bond, line).groupdict())
# bonds = bonds.append(fields, ignore_index=True)
# # 5: ANGLE
# elif section <= 5 and re.match(re_angle, line):
# section = 5
# if verbose >= 1:
# print("ANGLE |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_angle, line).groupdict())
# angles = angles.append(fields, ignore_index=True)
# # 6: DIHEDRAL
# elif section <= 6 and re.match(re_dihedral, line):
# section = 6
# if verbose >= 1:
# print("DIHEDRAL |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_dihedral,
# line).groupdict())
# dihedrals = dihedrals.append(fields, ignore_index=True)
# # 7: IMPROPER
# elif section <= 7 and re.match(re_improper, line):
# section = 7
# if verbose >= 1:
# print("IMPROPER |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_improper,
# line).groupdict())
# impropers = impropers.append(fields, ignore_index=True)
# # 8: HBOND
# elif section <= 8 and re.match(re_hbond, line):
# section = 8
# if verbose >= 1:
# print("HBOND |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_hbond, line).groupdict())
# hbonds = hbonds.append(fields, ignore_index=True)
# # 9: VDW (equivalent types)
# elif section <= 9 and re.match(re_atomlist, line):
# section = 9
# if verbose >= 1:
# print("VDW EQUIVALENT |{0}".format(line.rstrip()))
# fields = [{"type_{0}".format(i): v for i, v in
# enumerate(re.compile(are("{t}")).findall(line))}]
# vdw_eq_types = vdw_eq_types.append(fields,
# ignore_index=True)
# # 10: VDW (format)
# elif section <= 10.3 and re.match(re_vdw_format, line):
# if verbose >= 1:
# print("VDW FORMAT |{0}".format(line.rstrip()))
# # 10.2: VDW (radius and well depth)
# elif section <= 10.2 and re.match(re_vdw, line):
# section = 10.2
# if verbose >= 1:
# print("VDW |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_vdw, line).groupdict())
# vdw_types = vdw_types.append(fields, ignore_index=True)
# # 11: LJEDIT (title)
# elif (section <= 11 and re.match(re_ljedit_title, line)):
# section = 11
# if verbose >= 1:
# print("LJEDIT |{0}".format(line.rstrip()))
# # 11.1: LJEDIT (atom types, radii, and well depth)
# elif (section <= 11.1 and re.match(re_ljedit, line)):
# section = 11.1
# if verbose >= 1:
# print("LJEDIT |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_ljedit, line).groupdict())
# ljedits = ljedits.append(fields, ignore_index=True)
# # END
# elif re.match(re_end, line):
# if verbose >= 1:
# print("END |{0}".format(line.rstrip()))
# break
# # NO MATCH
# else:
# if verbose >= 1:
# print("NOMATCH |{0}".format(line.rstrip()))
# line = infile.readline()
# def read_lib(self, infile, verbose=1, debug=0, **kwargs):
# """
# Arguments:
# infile (str): Path to input lib file
# verbose (int): Enable verbose output
# debug (int): Enable debug output
# kwargs (dict): Additional keyword arguments
# """
# if verbose >= 1:
# print("READING LIB: {0}".format(infile))
#
# stripd = self.strip_dict
# re_blank = self.lib_re["blank"]
# re_atoms = self.lib_re["atoms"]
# re_atom_edits = self.lib_re["atom_edits"]
# re_box = self.lib_re["box"]
# re_res_seq = self.lib_re["res_seq"]
# re_res_connect = self.lib_re["res_connect"]
# re_bonds = self.lib_re["bonds"]
# re_hierarchy = self.lib_re["hierarchy"]
# re_name = self.lib_re["name"]
# re_coordinates = self.lib_re["coordinates"]
# re_res_connect2 = self.lib_re["res_connect2"]
# re_residues = self.lib_re["residues"]
# re_pdb_seq = self.lib_re["pdb_seq"]
# re_solventcap = self.lib_re["solventcap"]
# re_velocities = self.lib_re["velocities"]
#
# # Regular expressions for titles
# re_t_atoms = self.amber_regex("atoms", title=True)
# re_t_atom_edits = self.amber_regex("atomspertinfo", title=True)
# re_t_box = self.amber_regex("boundbox", title=True)
# re_t_res_seq = self.amber_regex("childsequence", title=True)
# re_t_res_connect = self.amber_regex("connect", title=True)
# re_t_bonds = self.amber_regex("connectivity", title=True)
# re_t_hierarchy = self.amber_regex("hierarchy", title=True)
# re_t_name = self.amber_regex("name", title=True)
# re_t_coordinates = self.amber_regex("positions", title=True)
# re_t_res_connect2 = self.amber_regex("residueconnect", title=True)
# re_t_residues = self.amber_regex("residues", title=True)
# re_t_pdb_seq = self.amber_regex("residuesPdbSequenceNumber",
# title=True)
# re_t_solventcap = self.amber_regex("solventcap", title=True)
# re_t_velocities = self.amber_regex("velocities", title=True)
#
# # Regular expressions for contents
# section = 0
# residue = None
#
# with open(infile, "r") as open_infile:
# line = open_infile.readline()
# while line:
# # BLANK
# if re.match(re_blank, line):
# if verbose >= 1:
# print("BLANK |{0}".format(line.strip()))
# # 1: ATOMS
# elif re.match(re_t_atoms, line):
# if verbose >= 1:
# print("ATOMS |{0}".format(line.strip()))
# section = 1
# fields = stripd(re.match(re_t_atoms, line).groupdict())
# residue = self.residues[fields["residue_name"]] = {}
# residue["atoms"] = pd.DataFrame(columns=
# ["name", "type", "type_index", "residue_index", "flags",
# "atom_index", "element", "charge", "note"])
# elif section == 1 and re.match(re_atoms, line):
# if verbose >= 1:
# print("ATOMS |{0}".format(line.strip()))
# fields = stripd(re.match(re_atoms, line).groupdict())
# residue["atoms"] = residue["atoms"].append(
# fields, ignore_index=True)
# # 2: ATOMSPERTINFO
# elif re.match(re_t_atom_edits, line):
# if verbose >= 1:
# print("ATOMSPERTINFO |{0}".format(line.strip()))
# section = 2
# residue["atom_edits"] = pd.DataFrame(columns=
# ["name", "type", "type_index", "element", "charge",
# "note"])
# elif section == 2 and re.match(re_atom_edits, line):
# if verbose >= 1:
# print("ATOMSPERTINFO |{0}".format(line.strip()))
# fields = stripd(re.match(re_atom_edits, line).groupdict())
# residue["atom_edits"] = residue["atom_edits"].append(
# fields, ignore_index=True)
# # 3: BOUNDBOX
# elif re.match(re_t_box, line):
# if verbose >= 1:
# print("BOUNDBOX |{0}".format(line.strip()))
# section = 3
# box_keys = ["box", "angle", "x_length", "y_length",
# "z_length"]
# box_items = []
# elif section == 3 and re.match(re_box, line):
# if verbose >= 1:
# print("BOUNDBOX |{0}".format(line.strip()))
# fields = stripd(re.match(re_box, line).groupdict())
# box_items.append(
# (box_keys.pop(0), [fields["box"]]))
# if len(box_keys) == 0:
# residue["box"] = pd.DataFrame.from_items(box_items)
# # 4: CHILDSEQUENCE
# elif re.match(re_t_res_seq, line):
# if verbose >= 1:
# print("CHILDSEQUENCE |{0}".format(line.strip()))
# section = 4
# residue["res_seq"] = pd.DataFrame(columns=
# ["childsequence", "note"])
# elif section == 4 and re.match(re_res_seq, line):
# if verbose >= 1:
# print("CHILDSEQUENCE |{0}".format(line.strip()))
# fields = stripd(re.match(re_res_seq, line).groupdict())
# residue["res_seq"] = residue["res_seq"].append(
# fields, ignore_index=True)
# # 5: CONNECT
# elif re.match(re_t_res_connect, line):
# if verbose >= 1:
# print("CONNECT |{0}".format(line.strip()))
# section = 5
# connect_keys = [
# "connect_atom_index_1", "connect_atom_index_2", "note"]
# connect_items = []
# elif section == 5 and re.match(re_res_connect, line):
# if verbose >= 1:
# print("CONNECT |{0}".format(line.strip()))
# fields = stripd(re.match(re_res_connect, line).groupdict())
# connect_items.append(
# (connect_keys.pop(0), [fields["connect"]]))
# if len(connect_keys) == 0:
# residue["res_connect"] = pd.DataFrame.from_items(
# connect_items)
# # 6: CONNECTIVITY
# elif re.match(re_t_bonds, line):
# if verbose >= 1:
# print("CONNECTIVITY |{0}".format(line.strip()))
# section = 6
# residue["bonds"] = pd.DataFrame(columns=
# ["atom_index_1", "atom_index_2", "flag", "note"])
# elif section == 6 and re.match(re_bonds, line):
# if verbose >= 1:
# print("CONNECTIVITY |{0}".format(line.strip()))
# fields = stripd(re.match(re_bonds,
# line).groupdict())
# residue["bonds"] = residue["bonds"].append(
# fields, ignore_index=True)
# # 7: HIERARCHY
# elif re.match(re_t_hierarchy, line):
# if verbose >= 1:
# print("HIERARCHY |{0}".format(line.strip()))
# section = 7
# residue["hierarchy"] = pd.DataFrame(columns=
# ["above_type", "above_index", "below_type",
# "below_index", "note"])
# elif section == 7 and re.match(re_hierarchy, line):
# if verbose >= 1:
# print("HIERARCHY |{0}".format(line.strip()))
# fields = stripd(re.match(re_hierarchy,
# line).groupdict())
# residue["hierarchy"] = residue["hierarchy"].append(
# fields, ignore_index=True)
# # 8: NAME
# elif re.match(re_t_name, line):
# if verbose >= 1:
# print("NAME |{0}".format(line.strip()))
# section = 8
# residue["name"] = pd.DataFrame(columns=
# ["childsequence", "note"])
# elif section == 8 and re.match(re_name, line):
# if verbose >= 1:
# print("NAME |{0}".format(line.strip()))
# fields = stripd(re.match(re_name, line).groupdict())
# residue["name"] = residue["name"].append(
# fields, ignore_index=True)
# # 9: POSITIONS
# elif re.match(re_t_coordinates, line):
# if verbose >= 1:
# print("POSITIONS |{0}".format(line.strip()))
# section = 9
# residue["coordinates"] = pd.DataFrame(columns=
# ["x", "y", "z", "note"])
# elif section == 9 and re.match(re_coordinates, line):
# if verbose >= 1:
# print("POSITIONS |{0}".format(line.strip()))
# fields = stripd(re.match(re_coordinates,
# line).groupdict())
# residue["coordinates"] = residue["coordinates"].append(
# fields, ignore_index=True)
# # 10: RESIDUECONNECT
# elif re.match(re_t_res_connect2, line):
# if verbose >= 1:
# print("RESIDUECONNECT |{0}".format(line.strip()))
# section = 10
# residue["res_connect2"] = pd.DataFrame(columns=
# ["atom_index_1", "atom_index_2", "atom_index_3",
# "atom_index_4", "atom_index_5", "atom_index_6", "note"])
# elif section == 10 and re.match(re_res_connect2, line):
# if verbose >= 1:
# print("RESIDUECONNECT |{0}".format(line.strip()))
# fields = stripd(re.match(re_res_connect2,
# line).groupdict())
# residue["res_connect2"] = residue["res_connect2"].append(
# fields, ignore_index=True)
# # 11: RESIDUES
# elif re.match(re_t_residues, line):
# if verbose >= 1:
# print("RESIDUES |{0}".format(line.strip()))
# section = 11
# residue["residues"] = pd.DataFrame(columns=
# ["name", "residue_index", "child_atom_index",
# "start_atom_index", "residue_type", "note"])
# elif re.match(re_residues, line):
# if verbose >= 1:
# print("RESIDUES |{0}".format(line.strip()))
# fields = stripd(re.match(re_residues,
# line).groupdict())
# residue["residues"] = residue["residues"].append(
# fields, ignore_index=True)
# # 12: RESIDUESPDBSEQUENCENUMBER
# elif re.match(re_t_pdb_seq, line):
# if verbose >= 1:
# print("PDBSEQUENCENUM |{0}".format(line.strip()))
# section = 12
# residue["pdb_seq"] = pd.DataFrame(columns=
# ["residue_index", "note"])
# elif section == 12 and re.match(re_pdb_seq, line):
# if verbose >= 1:
# print("PDBSEQUENCENUM |{0}".format(line.strip()))
# fields = stripd(re.match(re_pdb_seq, line).groupdict())
# residue["pdb_seq"] = residue["pdb_seq"].append(
# fields, ignore_index=True)
# # 13: SOLVENTCAP
# elif re.match(re_t_solventcap, line):
# if verbose >= 1:
# print("SOLVENTCAP |{0}".format(line.strip()))
# section = 13
# solventcap_keys = ["solventcap", "angle", "x_length",
# "y_length", "z_length"]
# solventcap_temp = []
# elif section == 13 and re.match(re_solventcap, line):
# if verbose >= 1:
# print("SOLVENTCAP |{0}".format(line.strip()))
# fields = stripd(re.match(re_solventcap, line).groupdict())
# solventcap_temp.append(
# (solventcap_keys.pop(0), [fields["solventcap"]]))
# if len(solventcap_keys) == 0:
# residue["solventcap"] = pd.DataFrame.from_items(
# solventcap_temp)
# # 14: VELOCITIES
# elif re.match(re_t_velocities, line):
# if verbose >= 1:
# print("VELOCITIES |{0}".format(line.strip()))
# section = 14
# residue["velocities"] = pd.DataFrame(columns=
# ["x", "y", "z", "note"])
# elif section == 14 and re.match(re_velocities, line):
# if verbose >= 1:
# print("VELOCITIES |{0}".format(line.strip()))
# fields = stripd(re.match(re_velocities,
# line).groupdict())
# residue["velocities"] = residue["velocities"].append(
# fields, ignore_index=True)
# # NO MATCH
# else:
# if verbose >= 1:
# print("NOMATCH |{0}".format(line.rstrip()))
# line = open_infile.readline()
# for name in sorted(self.residues):
# residue = self.residues[name]
# print()
# print(name)
# fields = ["atoms", "atom_edits", "box", "childsequence",
# "connect", "bonds", "hierarchy", "name",
# "coordinates", "residueconnect", "residues",
# "pdbindex", "solventcap", "velocities"]
# for field in fields:
# if field in residue:
# print(field)
# print(residue[field])
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 11:05:31 2014
@author: david
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 14:30:37 2014
@author: david
"""
import numpy as np
from osgeo import gdal
from osgeo import osr
import numpy.ma as ma
import os
import matplotlib.pyplot as plt
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA
from sklearn import preprocessing
import emd
import Image
import fiona
from shapely.geometry import shape
from shapely.geometry import asPoint
from scipy import stats
#LoadImage: loads an image using gdal
class LoadImage():
def __init__(self,infile):
# open the dataset
self.image_name = infile[:-4]
self.dataset = gdal.Open(infile) #GA_ReadOnly)
# if there's nothign there print error
#self.stacked = None
if self.dataset is None:
print 'BORK: Could not load file: %s' %(infile)
# otherwise do stuff
else:
#get the bit depth of the source image
try:
pillow_image = Image.open(infile)
self.bit_depth = pillow_image.bits()
pillow_image.close()
except:
print ('Cant get the bit-depth of the image with pillow')
#get the format
self.driver = self.dataset.GetDriver().ShortName
#get the x dimension
self.xsize = self.dataset.RasterXSize
#get the y dimension
self.ysize = self.dataset.RasterYSize
#get the projection
self.proj = self.dataset.GetProjection()
#get the number of bands
bands = self.dataset.RasterCount
print 'BANDS:',bands
#get the geotransform Returns a list object. This is standard GDAL ordering:
#spatial[0] = top left x
#spatial[1] = w-e pixel size
#spatial[2] = rotation (should be 0)
#spatial[3] = top left y
#spatial[4] = rotation (should be 0)
#spatial[5] = n-s pixel size
self.spatial = self.dataset.GetGeoTransform()
#print some stuff to console to show we're paying attention
print 'Found raster in %s format. Raster has %s bands' %(self.driver,bands)
print 'Projected as %s' %(self.proj)
print 'Dimensions: %s x %s' %(self.xsize,self.ysize)
#instantiate a counter
count = 1
#OK. This is the bit that catually loads the bands in in a while loop
# Loop through bands as long as count is equal to or less than total
while (count<=bands):
print 'BANDS less than COUNT'
#show that your computer's fans are whining for a reason
print 'Loading band: %s of %s' %(count,bands)
#get the band
band = self.dataset.GetRasterBand(count)
# load this as a numpy array
#mask the no data values
data_array = band.ReadAsArray()
data_array = ma.masked_where(data_array == 0, data_array)
data_array = data_array.filled(-999)
data_array = data_array.astype(np.float32, copy=False)
# close the band object
band = None
#this bit stacks the bands into a combined numpy array
#if it's the first band copy the array directly to the combined one
if count == 1:
self.stacked = data_array
#else combine these
else:
self.stacked = np.dstack((self.stacked,data_array))
# increment the counter
count = count+1
self.coords_matrix = self.coords()
print self.coords_matrix.shape
#print self.coords_matrix
#******************** MEFFODS ******************************************
def coords(self):
'''This gets the geographic coordinates of each cell in the raster and
returns a list of tuples containing the y and x array references for each pixel
and the geographic x and y coordinates for each pixel'''
print 'call to coords'
#get the shape of the array
matrix_dims = self.stacked.shape
print matrix_dims
#get the number of rows
rows = matrix_dims[0]-1
print rows
#get the number of columns
columns = matrix_dims[1]-1
print columns
x_coords = np.zeros(shape=(matrix_dims[0],matrix_dims[1]))
y_coords = np.zeros(shape=(matrix_dims[0],matrix_dims[1]))
#instantiate a counter
row = 0
#fruity loops
for row in range(matrix_dims[0]):
#increment counter
column = 0
for column in range(matrix_dims[1]):
#print row, column
xgeo = self.spatial[0] + (column*self.spatial[1])+(row*self.spatial[2])
x_coords[row,column] = xgeo
ygeo = self.spatial[3] + (column*self.spatial[4])+(row*self.spatial[5])
y_coords[row,column] = ygeo
column=column+1
print x_coords.shape, y_coords.shape
print np.min(x_coords), np.min(y_coords)
return np.dstack((x_coords,
y_coords,
np.zeros(shape=(matrix_dims[0],matrix_dims[1]))))
def coord_test(self):
print 'call to coord test'
print self.coords_list[0]
print self.coords_list[-1]
class ImageAnalysis():
def __init__(self,image_dir,image,mask_dir,mask,plot_dir, band_names):
os.chdir(image_dir)
loadimage = LoadImage(image)
print loadimage
self.multiband_image = loadimage.stacked
self.name = loadimage.image_name
self.plot_dir = plot_dir
os.chdir(mask_dir)
loadmask = LoadImage(mask)
print loadmask
self.classes = loadmask.stacked
self.arc = self.multiband_image[np.where(self.classes==1)]
self.bac = self.multiband_image[np.where(self.classes==2)]
print 'ARC',self.arc.shape
print 'BAC',self.bac.shape
self.band_names = band_names
if len(self.multiband_image.shape) == 3:
self.bands = self.multiband_image.shape[2]
elif len(self.multiband_image.shape) == 2:
self.bands = 1
def hist_compare(self):
print 'BANDS',self.bands
bins = 32
emd_list = []
out_dir = os.path.join(self.plot_dir,self.name)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
hist_dir = os.path.join(out_dir,'histograms')
if not os.path.exists(hist_dir):
os.mkdir(hist_dir)
archaeology = self.arc
background = self.bac
#print archaeology.shape
#print background.shape
minima = np.min(archaeology)
if minima > np.min(background):
minima = np.min(background)
maxima = np.min(archaeology)
if maxima < np.max(background):
maxima = np.max(background)
hist_arch = np.histogram(archaeology,
bins=bins,
range=(minima,maxima))
hist_back = np.histogram(background,
bins=bins,
range=(minima,maxima))
#print hist_arch[0]
#print hist_back[0]
print 'Totals'
print 'hist_arch', np.sum(hist_arch[0])
print archaeology.shape
#print hist_arch[0].shape
#print hist_arch[0].shape
hist_arch_norm = np.true_divide(hist_arch[0],archaeology.shape)
hist_back_norm = np.true_divide(hist_back[0],background.shape)
#hist_arch_norm = hist_arch
#hist_back_norm = hist_back
#print hist_arch_norm
#print x_vals.shape
os.chdir(hist_dir)
sum_of_difference = np.sum(np.abs(hist_arch_norm-hist_back_norm))
print sum_of_difference
contrast_emd = emd.emd(range(bins),range(bins),hist_arch_norm, hist_back_norm)
print 'EMD',contrast_emd
emd_list.append(contrast_emd)
emd_comp = np.array(emd_list)
print emd_comp.shape
os.chdir(self.plot_dir)
np.savetxt(self.name+'_emd.txt',emd_comp, delimiter=',')
def anova(self):
f = []
p = []
for band in range(self.bands):
archaeology = self.arc[:,band]
background = self.bac[:,band]
anova = stats.f_oneway(archaeology, background)
f.append(anova[0])
p.append(anova[1])
f_list = np.array(f)
p_list = np.array(p)
pos = np.arange(self.bands)
#plt.xlim([0,6])
plt.barh(pos,f_list, color='#3A01DF')
plt.margins(0.01)
plt.yticks(pos+0.5, band_names, style ='italic')
plt.xlabel("Contrast (ANOVA F)",fontweight='bold')
#plt.ylabel('RGB')
plt.title(self.name, fontweight='bold')
fig=plt.gcf()
fig.subplots_adjust(left=0.18)
plt.savefig(self.name+'_ANOVA_F')
plt.close()
os.chdir(self.plot_dir)
np.savetxt(self.name+'_anova_f.txt',f_list, delimiter=',')
pos = np.arange(self.bands)
#plt.xlim([0,6])
plt.barh(pos,p_list, color='#3A01DF')
plt.margins(0.01)
plt.yticks(pos+0.5, band_names, style ='italic')
plt.xlabel("Contrast (ANOVA P)",fontweight='bold')
#plt.ylabel('RGB')
plt.title(self.name, fontweight='bold')
fig=plt.gcf()
fig.subplots_adjust(left=0.18)
plt.savefig(self.name+'_ANOVA_P')
plt.close()
os.chdir(self.plot_dir)
np.savetxt(self.name+'_anova_p.txt',p_list, delimiter=',')
def pca(self):
print 'PCA'
data = np.vstack((self.arc[:,0:2],self.bac[:,0:2]))
#bad = data[np.where(np.isfinite(data))]
masked = np.ma.array(data,
mask = np.isfinite(data))
#print data.shape
#print bad.shape, 'BAD'
fixed = np.ma.fix_invalid(masked)
imp = preprocessing.Imputer(missing_values='NaN',
strategy='mean',
axis=0)
imp.fit_transform(fixed.data)
pca = PCA(n_components=3)
rgb_pca = pca.fit_transform(fixed.data)
print rgb_pca.shape
print rgb_pca[0],rgb_pca[1]
plt.scatter(rgb_pca[:self.arc.shape[0],0],rgb_pca[:self.arc.shape[0],1],color='red',alpha=0.5)
plt.scatter(rgb_pca[self.arc.shape[0]:,0],rgb_pca[self.arc.shape[0]:,1],color='blue',alpha=0.5)
#plt.show()
os.chdir(self.plot_dir)
plt.savefig('1111pca'+self.name)
plt.close()
def ttest(self):
t =[]
p = []
archaeology = self.arc
background = self.bac
print 'ttestloop'
print archaeology.shape
print background.shape
t_test = stats.ttest_1samp(archaeology, background)
#t_test = stats.ttest_ind(archaeology, background)
t.append(t_test[0])
p.append(t_test[1])
t_list = np.array(t)
print 'TSHAPE', t_list.shape
p_list = np.array(p)
print 'PSHAPE', p_list.shape
os.chdir(self.plot_dir)
np.savetxt(self.name+'_t.txt',np.mean(t_list, axis=1), delimiter=',')
np.savetxt(self.name+'_p.txt',np.mean(p_list, axis=1), delimiter=',')
#np.savetxt(self.name+'_t.txt',t_list, delimiter=',')
#np.savetxt(self.name+'_p.txt',p_list, delimiter=',')
if __name__ == "__main__":
dir_path = os.path.dirname(os.path.abspath('...'))
plot_dir = os.path.join(dir_path,'plots')
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
band_names = ('intensity',
'peak_start',
'peak_end',
'peak_location',
'peak_width',
'max_intensity',
'peak_sum',
'shoulder_location',
'shoulder_intensity')
dir_list = os.listdir(dir_path)
for directory in dir_list:
if not directory == 'plots':
current = os.path.join(dir_path,directory)
image_dir = os.path.join(current, 'image')
class_dir = os.path.join(current, 'grid_output')
img_list = os.listdir(image_dir)
class_list = os.listdir(class_dir)
for image in img_list:
#img = os.path.join(image_dir,image)
#print image
img = image
for classification in class_list:
clim = classification
#print clim
blah = ImageAnalysis(image_dir,
img,class_dir,clim,
plot_dir,
band_names)
blah.hist_compare()
#blah.pca()
blah.ttest()
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hickle as hkl
import numpy as np
from tensorflow.python.pywrap_tensorflow import do_quantize_training_on_graphdef
np.random.seed(9 ** 10)
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras import regularizers
from keras.layers import Dropout
from keras.models import Sequential
from keras.layers.core import Activation
from keras.utils.vis_utils import plot_model
from keras.layers.wrappers import TimeDistributed
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import UpSampling3D
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.callbacks import LearningRateScheduler
from keras.layers.advanced_activations import LeakyReLU
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_squared_error as mse
from plot_results import plot_err_variation
from keras.layers import Input
from keras.models import Model
from config_walle_r16 import *
from sys import stdout
import tb_callback
import lrs_callback
import argparse
import cv2
import os
def encoder_model():
inputs = Input(shape=(int(VIDEO_LENGTH/2), 128, 208, 3))
# 10x128x128
conv_1 = Conv3D(filters=32,
strides=(1, 2, 2),
dilation_rate=(1, 1, 1),
kernel_size=(3, 3, 3),
padding='same')(inputs)
x = TimeDistributed(BatchNormalization())(conv_1)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_1 = TimeDistributed(Dropout(0.5))(x)
conv_3 = Conv3D(filters=64,
strides=(1, 2, 2),
dilation_rate=(1, 1, 1),
kernel_size=(3, 3, 3),
padding='same')(out_1)
x = TimeDistributed(BatchNormalization())(conv_3)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
out_3 = TimeDistributed(Dropout(0.5))(x)
# 10x16x16
conv_4a = Conv3D(filters=64,
strides=(1, 2, 2),
dilation_rate=(1, 1, 1),
kernel_size=(3, 3, 3),
padding='same')(out_3)
x = TimeDistributed(BatchNormalization())(conv_4a)
x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
z = TimeDistributed(Dropout(0.5))(x)
model = Model(inputs=inputs, outputs=z)
return model
def decoder_model():
inputs = Input(shape=(int(VIDEO_LENGTH/2), 16, 26, 64))
# 10x16x16
convlstm_1 = ConvLSTM2D(filters=64,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(inputs)
x = TimeDistributed(BatchNormalization())(convlstm_1)
out_1 = TimeDistributed(Activation('tanh'))(x)
res_1 = UpSampling3D(size=(1, 2, 2))(out_1)
# 10x32x32
convlstm_3a = ConvLSTM2D(filters=64,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(res_1)
x = TimeDistributed(BatchNormalization())(convlstm_3a)
out_3a = TimeDistributed(Activation('tanh'))(x)
res_2 = UpSampling3D(size=(1, 2, 2))(out_3a)
# 10x64x64
convlstm_4a = ConvLSTM2D(filters=32,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(res_2)
x = TimeDistributed(BatchNormalization())(convlstm_4a)
out_4a = TimeDistributed(Activation('tanh'))(x)
res_3 = UpSampling3D(size=(1, 2, 2))(out_4a)
# 10x128x128
convlstm_5 = ConvLSTM2D(filters=3,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
return_sequences=True,
recurrent_dropout=0.2)(res_3)
predictions = TimeDistributed(Activation('tanh'))(convlstm_5)
model = Model(inputs=inputs, outputs=predictions)
return model
def set_trainability(model, trainable):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def arrange_images(video_stack):
n_frames = video_stack.shape[0] * video_stack.shape[1]
frames = np.zeros((n_frames,) + video_stack.shape[2:], dtype=video_stack.dtype)
frame_index = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
frames[frame_index] = video_stack[i, j]
frame_index += 1
img_height = video_stack.shape[2]
img_width = video_stack.shape[3]
width = img_width * video_stack.shape[1]
height = img_height * video_stack.shape[0]
shape = frames.shape[1:]
image = np.zeros((height, width, shape[2]), dtype=video_stack.dtype)
frame_number = 0
for i in range(video_stack.shape[0]):
for j in range(video_stack.shape[1]):
image[(i * img_height):((i + 1) * img_height), (j * img_width):((j + 1) * img_width)] = frames[frame_number]
frame_number = frame_number + 1
return image
def load_weights(weights_file, model):
model.load_weights(weights_file)
def run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS):
if PRINT_MODEL_SUMMARY:
print(encoder.summary())
print(decoder.summary())
print(autoencoder.summary())
# exit(0)
# Save model to file
if SAVE_MODEL:
print("Saving models to file...")
model_json = encoder.to_json()
with open(os.path.join(MODEL_DIR, "encoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = decoder.to_json()
with open(os.path.join(MODEL_DIR, "decoder.json"), "w") as json_file:
json_file.write(model_json)
model_json = autoencoder.to_json()
with open(os.path.join(MODEL_DIR, "autoencoder.json"), "w") as json_file:
json_file.write(model_json)
if PLOT_MODEL:
plot_model(encoder, to_file=os.path.join(MODEL_DIR, 'encoder.png'), show_shapes=True)
plot_model(decoder, to_file=os.path.join(MODEL_DIR, 'decoder.png'), show_shapes=True)
plot_model(autoencoder, to_file=os.path.join(MODEL_DIR, 'autoencoder.png'), show_shapes=True)
if ENC_WEIGHTS != "None":
print("Pre-loading encoder with weights...")
load_weights(ENC_WEIGHTS, encoder)
if DEC_WEIGHTS != "None":
print("Pre-loading decoder with weights...")
load_weights(DEC_WEIGHTS, decoder)
def load_to_RAM(frames_source):
frames = np.zeros(shape=((len(frames_source),) + IMG_SIZE))
print("Decimating RAM!")
j = 1
for i in range(1, len(frames_source)):
filename = "frame_" + str(j) + ".png"
im_file = os.path.join(DATA_DIR, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
frames[i] = (frame.astype(np.float32) - 127.5) / 127.5
j = j + 1
except AttributeError as e:
print(im_file)
print(e)
return frames
def load_X_RAM(videos_list, index, frames):
X = []
for i in range(BATCH_SIZE):
start_index = videos_list[(index * BATCH_SIZE + i), 0]
end_index = videos_list[(index * BATCH_SIZE + i), -1]
X.append(frames[start_index:end_index + 1])
X = np.asarray(X)
return X
def load_X(videos_list, index, data_dir, img_size, batch_size=BATCH_SIZE):
X = np.zeros((batch_size, VIDEO_LENGTH,) + img_size)
for i in range(batch_size):
for j in range(VIDEO_LENGTH):
filename = "frame_" + str(videos_list[(index * batch_size + i), j]) + ".png"
im_file = os.path.join(data_dir, filename)
try:
frame = cv2.imread(im_file, cv2.IMREAD_COLOR)
X[i, j] = (frame.astype(np.float32) - 127.5) / 127.5
except AttributeError as e:
print(im_file)
print(e)
return X
def get_video_lists(frames_source, stride):
# Build video progressions
videos_list = []
start_frame_index = 1
end_frame_index = VIDEO_LENGTH + 1
while (end_frame_index <= len(frames_source)):
frame_list = frames_source[start_frame_index:end_frame_index]
if (len(set(frame_list)) == 1):
videos_list.append(range(start_frame_index, end_frame_index))
start_frame_index = start_frame_index + stride
end_frame_index = end_frame_index + stride
else:
start_frame_index = end_frame_index - 1
end_frame_index = start_frame_index + VIDEO_LENGTH
videos_list = np.asarray(videos_list, dtype=np.int32)
return np.asarray(videos_list)
def train(BATCH_SIZE, ENC_WEIGHTS, DEC_WEIGHTS):
print("Loading data definitions...")
frames_source = hkl.load(os.path.join(DATA_DIR, 'sources_train_208.hkl'))
videos_list = get_video_lists(frames_source=frames_source, stride=4)
n_videos = videos_list.shape[0]
# Setup test
val_frames_source = hkl.load(os.path.join(VAL_DATA_DIR, 'sources_val_208.hkl'))
val_videos_list = get_video_lists(frames_source=val_frames_source, stride=(int(VIDEO_LENGTH / 2)))
n_val_videos = val_videos_list.shape[0]
if RAM_DECIMATE:
frames = load_to_RAM(frames_source=frames_source)
if SHUFFLE:
# Shuffle images to aid generalization
videos_list = np.random.permutation(videos_list)
# Build the Spatio-temporal Autoencoder
print("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
autoencoder.compile(loss="mean_squared_error", optimizer=OPTIM_A)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
NB_ITERATIONS = int(n_videos / BATCH_SIZE)
# NB_ITERATIONS = 5
NB_VAL_ITERATIONS = int(n_val_videos / BATCH_SIZE)
# Setup TensorBoard Callback
TC = tb_callback.TensorBoard(log_dir=TF_LOG_DIR, histogram_freq=0, write_graph=False, write_images=False)
LRS = lrs_callback.LearningRateScheduler(schedule=schedule)
LRS.set_model(autoencoder)
print("Beginning Training...")
# Begin Training
for epoch in range(1, NB_EPOCHS_AUTOENCODER+1):
if epoch == 21:
autoencoder.compile(loss="mean_absolute_error", optimizer=OPTIM_B)
load_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_20.h5'), encoder)
load_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_20.h5'), decoder)
print("\n\nEpoch ", epoch)
loss = []
val_loss = []
# Set learning rate every epoch
LRS.on_epoch_begin(epoch=epoch)
lr = K.get_value(autoencoder.optimizer.lr)
print("Learning rate: " + str(lr))
for index in range(NB_ITERATIONS):
# Train Autoencoder
if RAM_DECIMATE:
X = load_X_RAM(videos_list, index, frames)
else:
X = load_X(videos_list, index, DATA_DIR, IMG_SIZE)
X_train = np.flip(X[:, 0: int(VIDEO_LENGTH / 2)], axis=1)
y_train = X[:, int(VIDEO_LENGTH / 2):]
loss.append(autoencoder.train_on_batch(X_train, y_train))
arrow = int(index / (NB_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_ITERATIONS - 1) + " " +
"loss: " + str(loss[len(loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
predicted_images = autoencoder.predict(X_train, verbose=0)
voila = np.concatenate((X_train, y_train), axis=1)
truth_seq = arrange_images(voila)
pred_seq = arrange_images(np.concatenate((X_train, predicted_images), axis=1))
truth_seq = truth_seq * 127.5 + 127.5
pred_seq = pred_seq * 127.5 + 127.5
if epoch == 1:
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_truth.png"), truth_seq)
cv2.imwrite(os.path.join(GEN_IMAGES_DIR, str(epoch) + "_" + str(index) + "_pred.png"), pred_seq)
# Run over test data
print('')
for index in range(NB_VAL_ITERATIONS):
X = load_X(val_videos_list, index, VAL_DATA_DIR, IMG_SIZE)
X_val = np.flip(X[:, 0: int(VIDEO_LENGTH / 2)], axis=1)
y_val = X[:, int(VIDEO_LENGTH / 2):]
val_loss.append(autoencoder.test_on_batch(X_val, y_val))
arrow = int(index / (NB_VAL_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_VAL_ITERATIONS - 1) + " " +
"val_loss: " + str(val_loss[len(val_loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
# then after each epoch/iteration
avg_loss = sum(loss) / len(loss)
avg_val_loss = sum(val_loss) / len(val_loss)
logs = {'loss': avg_loss, 'val_loss': avg_val_loss}
TC.on_epoch_end(epoch, logs)
# Log the losses
with open(os.path.join(LOG_DIR, 'losses_gen.json'), 'a') as log_file:
log_file.write("{\"epoch\":%d, \"train_loss\":%f, \"val_loss\":%f}\n" % (epoch, avg_loss, avg_val_loss))
print("\nAvg train loss: " + str(avg_loss) + " Avg val loss: " + str(avg_val_loss))
# Save model weights per epoch to file
if epoch > 15 and epoch < 21:
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
if epoch > 25:
encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'), True)
decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# encoder.save_weights(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'), True)
# decoder.save_weights(os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'), True)
# test(os.path.join(CHECKPOINT_DIR, 'encoder_epoch_' + str(epoch) + '.h5'),
# os.path.join(CHECKPOINT_DIR, 'decoder_epoch_' + str(epoch) + '.h5'))
def test(ENC_WEIGHTS, DEC_WEIGHTS):
print('')
# Setup test
test_frames_source = hkl.load(os.path.join(TEST_DATA_DIR, 'sources_test_208.hkl'))
test_videos_list = get_video_lists(frames_source=test_frames_source, stride=(int(VIDEO_LENGTH / 2)))
n_test_videos = test_videos_list.shape[0]
if not os.path.exists(TEST_RESULTS_DIR + '/truth/'):
os.mkdir(TEST_RESULTS_DIR + '/truth/')
if not os.path.exists(TEST_RESULTS_DIR + '/pred/'):
os.mkdir(TEST_RESULTS_DIR + '/pred/')
if not os.path.exists(TEST_RESULTS_DIR + '/graphs/'):
os.mkdir(TEST_RESULTS_DIR + '/graphs/')
os.mkdir(TEST_RESULTS_DIR + '/graphs/values/')
print("Creating models...")
encoder = encoder_model()
decoder = decoder_model()
autoencoder = autoencoder_model(encoder, decoder)
autoencoder.compile(loss="mean_absolute_error", optimizer=OPTIM_A)
run_utilities(encoder, decoder, autoencoder, ENC_WEIGHTS, DEC_WEIGHTS)
NB_TEST_ITERATIONS = int(n_test_videos / TEST_BATCH_SIZE)
test_loss = []
mae_errors = np.zeros(shape=(n_test_videos, int(VIDEO_LENGTH/2) + 1))
mse_errors = np.zeros(shape=(n_test_videos, int(VIDEO_LENGTH/2) + 1))
# z_all = []
for index in range(NB_TEST_ITERATIONS):
X = load_X(test_videos_list, index, TEST_DATA_DIR, IMG_SIZE, batch_size=TEST_BATCH_SIZE)
X_test = np.flip(X[:, 0: int(VIDEO_LENGTH / 2)], axis=1)
y_test = X[:, int(VIDEO_LENGTH / 2):]
test_loss.append(autoencoder.test_on_batch(X_test, y_test))
arrow = int(index / (NB_TEST_ITERATIONS / 40))
stdout.write("\rIter: " + str(index) + "/" + str(NB_TEST_ITERATIONS - 1) + " " +
"test_loss: " + str(test_loss[len(test_loss) - 1]) +
"\t [" + "{0}>".format("=" * (arrow)))
stdout.flush()
if SAVE_GENERATED_IMAGES:
# Save generated images to file
z = encoder.predict(X_test, verbose=0)
# z_all.append(z)
# z_new = np.zeros(shape=(TEST_BATCH_SIZE, 1, 16, 26, 64))
# z_new[0] = z[:, 15]
# z_new = np.repeat(z_new, int(VIDEO_LENGTH/2), axis=1)
predicted_images = decoder.predict(z, verbose=0)
voila = np.concatenate((X_test, y_test), axis=1)
truth_seq = arrange_images(voila)
pred_seq = arrange_images(np.concatenate((X_test, predicted_images), axis=1))
truth_seq = truth_seq * 127.5 + 127.5
pred_seq = pred_seq * 127.5 + 127.5
mae_error = []
mse_error = []
for i in range(int(VIDEO_LENGTH / 2)):
mae_errors[index, i] = (mae(y_test[0, i].flatten(), predicted_images[0, i].flatten()))
mae_error.append(mae_errors[index, i])
mse_errors[index, i] = (mse(y_test[0, i].flatten(), predicted_images[0, i].flatten()))
mse_error.append(mse_errors[index, i])
dc_mae = mae(X_test[0, 0].flatten(), y_test[0, 0].flatten())
mae_errors[index, -1] = dc_mae
dc_mse = mse(X_test[0, 0].flatten(), y_test[0, 0].flatten())
mse_errors[index, -1] = dc_mse
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/truth/', str(index) + "_truth.png"), truth_seq)
cv2.imwrite(os.path.join(TEST_RESULTS_DIR + '/pred/', str(index) + "_pred.png"), pred_seq)
plot_err_variation(mae_error, index, dc_mae, 'mae')
plot_err_variation(mse_error, index, dc_mse, 'mse')
np.save(os.path.join(TEST_RESULTS_DIR + '/graphs/values/', str(index) + "_mae.npy"), np.asarray(mae_errors))
np.save(os.path.join(TEST_RESULTS_DIR + '/graphs/values/', str(index) + "_mse.npy"), np.asarray(mse_errors))
# np.save(os.path.join(TEST_RESULTS_DIR + '/graphs/values/', "z_all.npy"), np.asarray(z_all))
# then after each epoch/iteration
avg_test_loss = sum(test_loss) / len(test_loss)
np.save(TEST_RESULTS_DIR + 'test_loss.npy', np.asarray(test_loss))
print("\nAvg loss: " + str(avg_test_loss))
print("\n Std: " + str(np.std(np.asarray(test_loss))))
print("\n Variance: " + str(np.var(np.asarray(test_loss))))
print("\n Mean: " + str(np.mean(np.asarray(test_loss))))
print("\n Max: " + str(np.max(np.asarray(test_loss))))
print("\n Min: " + str(np.min(np.asarray(test_loss))))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str)
parser.add_argument("--enc_weights", type=str, default="None")
parser.add_argument("--dec_weights", type=str, default="None")
parser.add_argument("--gen_weights", type=str, default="None")
parser.add_argument("--dis_weights", type=str, default="None")
parser.add_argument("--batch_size", type=int, default=BATCH_SIZE)
parser.add_argument("--nice", dest="nice", action="store_true")
parser.set_defaults(nice=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size,
ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
if args.mode == "test":
test(ENC_WEIGHTS=args.enc_weights,
DEC_WEIGHTS=args.dec_weights)
# if args.mode == "test_ind":
# test_ind(ENC_WEIGHTS=args.enc_weights,
# DEC_WEIGHTS=args.dec_weights)
|
|
r'''
Copyright: Brainwy Software Ltda.
License: EPL.
=============
Works for Windows relying on a fork of winappdbg which works in py2/3 (at least for the part we're interested in).
See: https://github.com/fabioz/winappdbg (py3 branch).
Note that the official branch for winappdbg is: https://github.com/MarioVilas/winappdbg, which should be used when it works in Py3.
A private copy is added here to make deployment easier, but changes should always be done upstream first.
Works for Linux relying on gdb.
Limitations:
============
Linux:
------
1. It possible that ptrace is disabled: /etc/sysctl.d/10-ptrace.conf
Note that even enabling it in /etc/sysctl.d/10-ptrace.conf (i.e.: making the
ptrace_scope=0), it's possible that we need to run the application that'll use ptrace (or
gdb in this case) as root (so, we must sudo the python which'll run this module).
2. It currently doesn't work in debug builds (i.e.: python_d)
Other implementations:
- pyrasite.com:
GPL
Windows/linux (in Linux it also uses gdb to connect -- although specifics are different as we use a dll to execute
code with other threads stopped). It's Windows approach is more limited because it doesn't seem to deal properly with
Python 3 if threading is disabled.
- https://github.com/google/pyringe:
Apache v2.
Only linux/Python 2.
- http://pytools.codeplex.com:
Apache V2
Windows Only (but supports mixed mode debugging)
Our own code relies heavily on a part of it: http://pytools.codeplex.com/SourceControl/latest#Python/Product/PyDebugAttach/PyDebugAttach.cpp
to overcome some limitations of attaching and running code in the target python executable on Python 3.
See: attach.cpp
Linux: References if we wanted to use a pure-python debugger:
https://bitbucket.org/haypo/python-ptrace/
http://stackoverflow.com/questions/7841573/how-to-get-an-error-message-for-errno-value-in-python
Jugaad:
https://www.defcon.org/images/defcon-19/dc-19-presentations/Jakhar/DEFCON-19-Jakhar-Jugaad-Linux-Thread-Injection.pdf
https://github.com/aseemjakhar/jugaad
Something else (general and not Python related):
- http://www.codeproject.com/Articles/4610/Three-Ways-to-Inject-Your-Code-into-Another-Proces
Other references:
- https://github.com/haypo/faulthandler
- http://nedbatchelder.com/text/trace-function.html
- https://github.com/python-git/python/blob/master/Python/sysmodule.c (sys_settrace)
- https://github.com/python-git/python/blob/master/Python/ceval.c (PyEval_SetTrace)
- https://github.com/python-git/python/blob/master/Python/thread.c (PyThread_get_key_value)
To build the dlls needed on windows, visual studio express 13 was used (see compile_dll.bat)
See: attach_pydevd.py to attach the pydev debugger to a running python process.
'''
# Note: to work with nasm compiling asm to code and decompiling to see asm with shellcode:
# x:\nasm\nasm-2.07-win32\nasm-2.07\nasm.exe
# nasm.asm&x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe -b arch nasm
import ctypes
import os
import struct
import subprocess
import sys
import time
SHOW_DEBUG_INFO = 0
def stderr_write(message):
sys.stderr.write(message)
sys.stderr.write("\n")
def debug(message):
if SHOW_DEBUG_INFO > 0:
stderr_write(message)
class AutoExit(object):
def __init__(self, on_exit):
self.on_exit = on_exit
def __enter__(self):
pass
def __exit__(self, *args):
self.on_exit()
class GenShellCodeHelper(object):
def __init__(self, is_64):
from winappdbg import compat
self.is_64 = is_64
self._code = []
if not is_64:
self._translations = {
'push esi': b'\x56',
'push eax': b'\x50',
'push ebp': b'\x55',
'push ebx': b'\x53',
'pop esi': b'\x5E',
'pop eax': b'\x58',
'pop ebp': b'\x5D',
'pop ebx': b'\x5B',
'mov esi': b'\xBE',
'mov eax': b'\xB8',
'mov ebp': b'\xBD',
'mov ebx': b'\xBB',
'call ebp': b'\xFF\xD5',
'call eax': b'\xFF\xD0',
'call ebx': b'\xFF\xD3',
'mov ebx,eax': b'\x89\xC3',
'mov eax,ebx': b'\x89\xD8',
'mov ebp,esp': b'\x89\xE5',
'mov esp,ebp': b'\x89\xEC',
'push dword': b'\x68',
'mov ebp,eax': b'\x89\xC5',
'mov eax,ebp': b'\x89\xE8',
'ret': b'\xc3',
}
else:
# Translate 64 bits
self._translations = {
'push rsi': b'\x56',
'push rax': b'\x50',
'push rbp': b'\x55',
'push rbx': b'\x53',
'push rsp': b'\x54',
'push rdi': b'\x57',
'pop rsi': b'\x5E',
'pop rax': b'\x58',
'pop rbp': b'\x5D',
'pop rbx': b'\x5B',
'pop rsp': b'\x5C',
'pop rdi': b'\x5F',
'mov rsi': b'\x48\xBE',
'mov rax': b'\x48\xB8',
'mov rbp': b'\x48\xBD',
'mov rbx': b'\x48\xBB',
'mov rdi': b'\x48\xBF',
'mov rcx': b'\x48\xB9',
'mov rdx': b'\x48\xBA',
'call rbp': b'\xFF\xD5',
'call rax': b'\xFF\xD0',
'call rbx': b'\xFF\xD3',
'mov rbx,rax': b'\x48\x89\xC3',
'mov rax,rbx': b'\x48\x89\xD8',
'mov rbp,rsp': b'\x48\x89\xE5',
'mov rsp,rbp': b'\x48\x89\xEC',
'mov rcx,rbp': b'\x48\x89\xE9',
'mov rbp,rax': b'\x48\x89\xC5',
'mov rax,rbp': b'\x48\x89\xE8',
'mov rdi,rbp': b'\x48\x89\xEF',
'ret': b'\xc3',
}
def push_addr(self, addr):
self._code.append(self.translate('push dword'))
self._code.append(addr)
def push(self, register):
self._code.append(self.translate('push %s' % register))
return AutoExit(lambda: self.pop(register))
def pop(self, register):
self._code.append(self.translate('pop %s' % register))
def mov_to_register_addr(self, register, addr):
self._code.append(self.translate('mov %s' % register))
self._code.append(addr)
def mov_register_to_from(self, register_to, register_from):
self._code.append(self.translate('mov %s,%s' % (register_to, register_from)))
def call(self, register):
self._code.append(self.translate('call %s' % register))
def preserve_stack(self):
self.mov_register_to_from('ebp', 'esp')
return AutoExit(lambda: self.restore_stack())
def restore_stack(self):
self.mov_register_to_from('esp', 'ebp')
def ret(self):
self._code.append(self.translate('ret'))
def get_code(self):
return b''.join(self._code)
def translate(self, code):
return self._translations[code]
def pack_address(self, address):
if self.is_64:
return struct.pack('<q', address)
else:
return struct.pack('<L', address)
def convert(self, code):
'''
Note:
If the shellcode starts with '66' controls, it needs to be changed to add [BITS 32] or
[BITS 64] to the start.
To use:
convert("""
55
53
50
BDE97F071E
FFD5
BDD67B071E
FFD5
5D
5B
58
C3
""")
'''
code = code.replace(' ', '')
lines = []
for l in code.splitlines(False):
lines.append(l)
code = ''.join(lines) # Remove new lines
return code.decode('hex')
def resolve_label(process, label):
max_attempts = 10
for i in range(max_attempts):
try:
address = process.resolve_label(label)
if not address:
raise AssertionError('%s not resolved.' % (label,))
return address
except:
try:
process.scan_modules()
except:
pass
if i == max_attempts - 1:
raise
# At most 4 seconds to resolve it.
time.sleep(4. / max_attempts)
def is_python_64bit():
return (struct.calcsize('P') == 8)
def is_mac():
import platform
return platform.system() == 'Darwin'
def run_python_code_windows(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
from winappdbg.process import Process
if not isinstance(python_code, bytes):
python_code = python_code.encode('utf-8')
process = Process(pid)
bits = process.get_bits()
is_64 = bits == 64
if is_64 != is_python_64bit():
raise RuntimeError("The architecture of the Python used to connect doesn't match the architecture of the target.\n"
"Target 64 bits: %s\n"
"Current Python 64 bits: %s" % (is_64, is_python_64bit()))
debug('Connecting to %s bits target' % (bits,))
assert resolve_label(process, b'PyGILState_Ensure')
filedir = os.path.dirname(__file__)
if is_64:
suffix = 'amd64'
else:
suffix = 'x86'
target_dll = os.path.join(filedir, 'attach_%s.dll' % suffix)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
debug('Injecting dll')
process.inject_dll(target_dll.encode('mbcs'))
debug('Dll injected')
process.scan_modules()
attach_func = resolve_label(process, b'AttachAndRunPythonCode')
assert attach_func
debug('Allocating code in target process')
assert isinstance(python_code, bytes)
code_address = process.malloc(len(python_code))
assert code_address
debug('Writing code in target process')
process.write(code_address, python_code)
debug('Allocating return value memory in target process')
attach_info_address = process.malloc(ctypes.sizeof(ctypes.c_int))
assert attach_info_address
CONNECT_DEBUGGER = 2
attach_info = 0
if show_debug_info:
SHOW_DEBUG_INFO = 1
attach_info |= SHOW_DEBUG_INFO # Uncomment to show debug info
if connect_debugger_tracing:
attach_info |= CONNECT_DEBUGGER
# Note: previously the attach_info address was treated as read/write to have the return
# value, but it seems that sometimes when the program wrote back the memory became
# unreadable with the stack trace below when trying to read, so, we just write and
# no longer inspect the return value.
# i.e.:
# Traceback (most recent call last):
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\attach_pydevd.py", line 72, in <module>
# main(process_command_line(sys.argv[1:]))
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\attach_pydevd.py", line 68, in main
# setup['pid'], python_code, connect_debugger_tracing=True, show_debug_info=show_debug_info_on_target_process)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\add_code_to_python_process.py", line 392, in run_python_code_windows
# return_code = process.read_int(return_code_address)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 1673, in read_int
# return self.__read_c_type(lpBaseAddress, b'@l', ctypes.c_int)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 1568, in __read_c_type
# packed = self.read(address, size)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 1598, in read
# if not self.is_buffer(lpBaseAddress, nSize):
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 2843, in is_buffer
# mbi = self.mquery(address)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\process.py", line 2533, in mquery
# return win32.VirtualQueryEx(hProcess, lpAddress)
# File "X:\pydev\plugins\org.python.pydev.core\pysrc\pydevd_attach_to_process\winappdbg\win32\kernel32.py", line 3742, in VirtualQueryEx
# raise ctypes.WinError()
# PermissionError: [WinError 5] Access is denied.
# Process finished with exitValue: 1
process.write_int(attach_info_address, attach_info)
helper = GenShellCodeHelper(is_64)
if is_64:
# Interesting read: http://msdn.microsoft.com/en-us/library/ms235286.aspx
# Overview of x64 Calling Conventions (for windows: Linux is different!)
# Register Usage: http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
# The registers RAX, RCX, RDX, R8, R9, R10, R11 are considered volatile and must be considered destroyed on function calls (unless otherwise safety-provable by analysis such as whole program optimization).
#
# The registers RBX, RBP, RDI, RSI, RSP, R12, R13, R14, and R15 are considered nonvolatile and must be saved and restored by a function that uses them.
#
# Important: RCX: first int argument
with helper.push('rdi'): # This one REALLY must be pushed/poped
with helper.push('rsp'):
with helper.push('rbp'):
with helper.push('rbx'):
with helper.push('rdi'): # Note: pop is automatic.
helper.mov_to_register_addr('rcx', helper.pack_address(code_address))
helper.mov_to_register_addr('rdx', helper.pack_address(attach_info_address))
helper.mov_to_register_addr('rbx', helper.pack_address(attach_func))
helper.call('rbx')
else:
with helper.push('eax'): # Note: pop is automatic.
with helper.push('ebp'):
with helper.push('ebx'):
with helper.preserve_stack():
# Put our code as a parameter in the stack (on x86, we push parameters to
# the stack)
helper.push_addr(helper.pack_address(attach_info_address))
helper.push_addr(helper.pack_address(code_address))
helper.mov_to_register_addr('ebx', helper.pack_address(attach_func))
helper.call('ebx')
helper.ret()
code = helper.get_code()
# Uncomment to see the disassembled version of what we just did...
# with open('f.asm', 'wb') as stream:
# stream.write(code)
#
# exe = r'x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe'
# if is_64:
# arch = '64'
# else:
# arch = '32'
#
# subprocess.call((exe + ' -b %s f.asm' % arch).split())
debug('Injecting code to target process')
thread, _thread_address = process.inject_code(code, 0)
timeout = None # Could receive timeout in millis.
debug('Waiting for code to complete')
thread.wait(timeout)
# return_code = process.read_int(attach_info_address)
# if return_code == 0:
# print('Attach finished successfully.')
# else:
# print('Error when injecting code in target process. Error code: %s (on windows)' % (return_code,))
process.free(thread.pInjectedMemory)
process.free(code_address)
process.free(attach_info_address)
return 0
def run_python_code_linux(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'amd64'
arch = 'i386:x86-64'
else:
suffix = 'x86'
arch = 'i386'
debug('Attaching with arch: %s' % (arch,))
target_dll = os.path.join(filedir, 'attach_linux_%s.so' % suffix)
target_dll = os.path.abspath(os.path.normpath(target_dll))
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'gdb',
'--nw', # no gui interface
'--nh', # no ~/.gdbinit
'--nx', # no .gdbinit
# '--quiet', # no version number on startup
'--pid',
str(pid),
'--batch',
# '--batch-silent',
]
cmd.extend(["--eval-command='set scheduler-locking off'"]) # If on we'll deadlock.
cmd.extend(["--eval-command='set architecture %s'" % arch])
cmd.extend([
"--eval-command='call dlopen(\"%s\", 2)'" % target_dll,
"--eval-command='call (int)DoAttach(%s, \"%s\", %s)'" % (
is_debug, python_code, show_debug_info)
])
# print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
debug('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
debug('Running gdb in target process.')
out, err = p.communicate()
debug('stdout: %s' % (out,))
debug('stderr: %s' % (err,))
return out, err
def find_helper_script(filedir, script_name):
target_filename = os.path.join(filedir, 'linux_and_mac', script_name)
target_filename = os.path.normpath(target_filename)
if not os.path.exists(target_filename):
raise RuntimeError('Could not find helper script: %s' % target_filename)
return target_filename
def run_python_code_mac(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'x86_64.dylib'
arch = 'i386:x86-64'
else:
suffix = 'x86.dylib'
arch = 'i386'
debug('Attaching with arch: %s'% (arch,))
target_dll = os.path.join(filedir, 'attach_%s' % suffix)
target_dll = os.path.normpath(target_dll)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
lldb_prepare_file = find_helper_script(filedir, 'lldb_prepare.py')
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'lldb',
'--no-lldbinit', # Do not automatically parse any '.lldbinit' files.
# '--attach-pid',
# str(pid),
# '--arch',
# arch,
'--script-language',
'Python'
# '--batch-silent',
]
cmd.extend([
"-o 'process attach --pid %d'" % pid,
"-o 'command script import \"%s\"'" % (lldb_prepare_file,),
"-o 'load_lib_and_attach \"%s\" %s \"%s\" %s'" % (target_dll,
is_debug, python_code, show_debug_info),
])
cmd.extend([
"-o 'process detach'",
"-o 'script import os; os._exit(1)'",
])
# print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
debug('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
debug('Running lldb in target process.')
out, err = p.communicate()
debug('stdout: %s' % (out,))
debug('stderr: %s' % (err,))
return out, err
if sys.platform == 'win32':
run_python_code = run_python_code_windows
elif is_mac():
run_python_code = run_python_code_mac
else:
run_python_code = run_python_code_linux
def test():
print('Running with: %s' % (sys.executable,))
code = '''
import os, time, sys
print(os.getpid())
#from threading import Thread
#Thread(target=str).start()
if __name__ == '__main__':
while True:
time.sleep(.5)
sys.stdout.write('.\\n')
sys.stdout.flush()
'''
p = subprocess.Popen([sys.executable, '-u', '-c', code])
try:
code = 'print("It worked!")\n'
# Real code will be something as:
# code = '''import sys;sys.path.append(r'X:\winappdbg-code\examples'); import imported;'''
run_python_code(p.pid, python_code=code)
time.sleep(3)
finally:
p.kill()
def main(args):
# Otherwise, assume the first parameter is the pid and anything else is code to be executed
# in the target process.
pid = int(args[0])
del args[0]
python_code = ';'.join(args)
# Note: on Linux the python code may not have a single quote char: '
run_python_code(pid, python_code)
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print('Expected pid and Python code to execute in target process.')
else:
if '--test' == args[0]:
test()
else:
main(args)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class EndUserList(ListResource):
def __init__(self, version):
"""
Initialize the EndUserList
:param Version version: Version that contains the resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserList
"""
super(EndUserList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/RegulatoryCompliance/EndUsers'.format(**self._solution)
def create(self, friendly_name, type, attributes=values.unset):
"""
Create the EndUserInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param EndUserInstance.Type type: The type of end user of the Bundle resource
:param dict attributes: The set of parameters that compose the End User resource
:returns: The created EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'Type': type,
'Attributes': serialize.object(attributes),
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return EndUserInstance(self._version, payload, )
def stream(self, limit=None, page_size=None):
"""
Streams EndUserInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists EndUserInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EndUserInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return EndUserPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of EndUserInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return EndUserPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a EndUserContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserContext
"""
return EndUserContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a EndUserContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserContext
"""
return EndUserContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.EndUserList>'
class EndUserPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the EndUserPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserPage
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserPage
"""
super(EndUserPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of EndUserInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
"""
return EndUserInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.EndUserPage>'
class EndUserContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the EndUserContext
:param Version version: Version that contains the resource
:param sid: The unique string that identifies the resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserContext
"""
super(EndUserContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/RegulatoryCompliance/EndUsers/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the EndUserInstance
:returns: The fetched EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return EndUserInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, friendly_name=values.unset, attributes=values.unset):
"""
Update the EndUserInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param dict attributes: The set of parameters that compose the End User resource
:returns: The updated EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
"""
data = values.of({'FriendlyName': friendly_name, 'Attributes': serialize.object(attributes), })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return EndUserInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the EndUserInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.EndUserContext {}>'.format(context)
class EndUserInstance(InstanceResource):
class Type(object):
INDIVIDUAL = "individual"
BUSINESS = "business"
def __init__(self, version, payload, sid=None):
"""
Initialize the EndUserInstance
:returns: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
"""
super(EndUserInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'friendly_name': payload.get('friendly_name'),
'type': payload.get('type'),
'attributes': payload.get('attributes'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EndUserContext for this EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserContext
"""
if self._context is None:
self._context = EndUserContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def type(self):
"""
:returns: The type of end user of the Bundle resource
:rtype: EndUserInstance.Type
"""
return self._properties['type']
@property
def attributes(self):
"""
:returns: The set of parameters that compose the End Users resource
:rtype: dict
"""
return self._properties['attributes']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the End User resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the EndUserInstance
:returns: The fetched EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset, attributes=values.unset):
"""
Update the EndUserInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param dict attributes: The set of parameters that compose the End User resource
:returns: The updated EndUserInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.end_user.EndUserInstance
"""
return self._proxy.update(friendly_name=friendly_name, attributes=attributes, )
def delete(self):
"""
Deletes the EndUserInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.EndUserInstance {}>'.format(context)
|
|
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra import InvalidRequest
from cassandra.cqlengine.management import sync_table, drop_table
from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine.models import Model
from uuid import uuid4
from cassandra.cqlengine import columns
import mock
from cassandra.cqlengine.connection import get_session
from tests.integration import CASSANDRA_VERSION
class TestTTLModel(Model):
id = columns.UUID(primary_key=True, default=lambda: uuid4())
count = columns.Integer()
text = columns.Text(required=False)
class BaseTTLTest(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BaseTTLTest, cls).setUpClass()
sync_table(TestTTLModel)
@classmethod
def tearDownClass(cls):
super(BaseTTLTest, cls).tearDownClass()
drop_table(TestTTLModel)
class TestDefaultTTLModel(Model):
__options__ = {'default_time_to_live': 20}
id = columns.UUID(primary_key=True, default=lambda:uuid4())
count = columns.Integer()
text = columns.Text(required=False)
class BaseDefaultTTLTest(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
if CASSANDRA_VERSION >= '2.0':
super(BaseDefaultTTLTest, cls).setUpClass()
sync_table(TestDefaultTTLModel)
sync_table(TestTTLModel)
@classmethod
def tearDownClass(cls):
if CASSANDRA_VERSION >= '2.0':
super(BaseDefaultTTLTest, cls).tearDownClass()
drop_table(TestDefaultTTLModel)
drop_table(TestTTLModel)
class TTLQueryTests(BaseTTLTest):
def test_update_queryset_ttl_success_case(self):
""" tests that ttls on querysets work as expected """
def test_select_ttl_failure(self):
""" tests that ttls on select queries raise an exception """
class TTLModelTests(BaseTTLTest):
def test_ttl_included_on_create(self):
""" tests that ttls on models work as expected """
session = get_session()
with mock.patch.object(session, 'execute') as m:
TestTTLModel.ttl(60).create(text="hello blake")
query = m.call_args[0][0].query_string
self.assertIn("USING TTL", query)
def test_queryset_is_returned_on_class(self):
"""
ensures we get a queryset descriptor back
"""
qs = TestTTLModel.ttl(60)
self.assertTrue(isinstance(qs, TestTTLModel.__queryset__), type(qs))
class TTLInstanceUpdateTest(BaseTTLTest):
def test_update_includes_ttl(self):
session = get_session()
model = TestTTLModel.create(text="goodbye blake")
with mock.patch.object(session, 'execute') as m:
model.ttl(60).update(text="goodbye forever")
query = m.call_args[0][0].query_string
self.assertIn("USING TTL", query)
def test_update_syntax_valid(self):
# sanity test that ensures the TTL syntax is accepted by cassandra
model = TestTTLModel.create(text="goodbye blake")
model.ttl(60).update(text="goodbye forever")
class TTLInstanceTest(BaseTTLTest):
def test_instance_is_returned(self):
"""
ensures that we properly handle the instance.ttl(60).save() scenario
:return:
"""
o = TestTTLModel.create(text="whatever")
o.text = "new stuff"
o = o.ttl(60)
self.assertEqual(60, o._ttl)
def test_ttl_is_include_with_query_on_update(self):
session = get_session()
o = TestTTLModel.create(text="whatever")
o.text = "new stuff"
o = o.ttl(60)
with mock.patch.object(session, 'execute') as m:
o.save()
query = m.call_args[0][0].query_string
self.assertIn("USING TTL", query)
class TTLBlindUpdateTest(BaseTTLTest):
def test_ttl_included_with_blind_update(self):
session = get_session()
o = TestTTLModel.create(text="whatever")
tid = o.id
with mock.patch.object(session, 'execute') as m:
TestTTLModel.objects(id=tid).ttl(60).update(text="bacon")
query = m.call_args[0][0].query_string
self.assertIn("USING TTL", query)
@unittest.skipIf(CASSANDRA_VERSION < '2.0', "default_time_to_Live was introduce in C* 2.0, currently running {0}".format(CASSANDRA_VERSION))
class TTLDefaultTest(BaseDefaultTTLTest):
def get_default_ttl(self, table_name):
session = get_session()
try:
default_ttl = session.execute("SELECT default_time_to_live FROM system_schema.tables "
"WHERE keyspace_name = 'cqlengine_test' AND table_name = '{0}'".format(table_name))
except InvalidRequest:
default_ttl = session.execute("SELECT default_time_to_live FROM system.schema_columnfamilies "
"WHERE keyspace_name = 'cqlengine_test' AND columnfamily_name = '{0}'".format(table_name))
return default_ttl[0]['default_time_to_live']
def test_default_ttl_not_set(self):
session = get_session()
o = TestTTLModel.create(text="some text")
tid = o.id
self.assertIsNone(o._ttl)
default_ttl = self.get_default_ttl('test_ttlmodel')
self.assertEqual(default_ttl, 0)
with mock.patch.object(session, 'execute') as m:
TestTTLModel.objects(id=tid).update(text="aligators")
query = m.call_args[0][0].query_string
self.assertNotIn("USING TTL", query)
def test_default_ttl_set(self):
session = get_session()
o = TestDefaultTTLModel.create(text="some text on ttl")
tid = o.id
# Should not be set, it's handled by Cassandra
self.assertIsNone(o._ttl)
default_ttl = self.get_default_ttl('test_default_ttlmodel')
self.assertEqual(default_ttl, 20)
with mock.patch.object(session, 'execute') as m:
TestTTLModel.objects(id=tid).update(text="aligators expired")
# Should not be set either
query = m.call_args[0][0].query_string
self.assertNotIn("USING TTL", query)
def test_default_ttl_modify(self):
session = get_session()
default_ttl = self.get_default_ttl('test_default_ttlmodel')
self.assertEqual(default_ttl, 20)
TestDefaultTTLModel.__options__ = {'default_time_to_live': 10}
sync_table(TestDefaultTTLModel)
default_ttl = self.get_default_ttl('test_default_ttlmodel')
self.assertEqual(default_ttl, 10)
# Restore default TTL
TestDefaultTTLModel.__options__ = {'default_time_to_live': 20}
sync_table(TestDefaultTTLModel)
def test_override_default_ttl(self):
session = get_session()
o = TestDefaultTTLModel.create(text="some text on ttl")
tid = o.id
o.ttl(3600)
self.assertEqual(o._ttl, 3600)
with mock.patch.object(session, 'execute') as m:
TestDefaultTTLModel.objects(id=tid).ttl(None).update(text="aligators expired")
query = m.call_args[0][0].query_string
self.assertNotIn("USING TTL", query)
|
|
"""
Main loop (early stopping).
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
import traceback
sys.stdout = Unbuffered(sys.stdout)
# Generic imports
import numpy
import cPickle
import gzip
import time
import signal
from groundhog.utils import print_mem, print_time
class MainLoop(object):
def __init__(self,
train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
hooks=None,
reset=-1,
train_cost=False,
validate_postprocess=None,
l2_params=False):
"""
:type train_data: groundhog dataset object
:param train_data: data iterator used for training
:type valid_data: groundhog dataset object
:param valid_data: data iterator used for validation
:type test_data: groundhog dataset object
:param test_data: data iterator used for testing
:type model: groundhog model object
:param model: the model that is supposed to be trained
:type algo: groundhog trainer object
:param algo: optimization algorithm used to optimized the model
:type state: dictionary (or jobman dictionary)
:param state: dictionary containing various hyper-param choices,
but also the current state of the job (the dictionary is used by
jobman to fill in a psql table)
:type channel: jobman handler
:param channel: jobman handler used to communicate with a psql
server
:type hooks: function or list of functions
:param hooks: list of functions that are called every `hookFreq`
steps to carry on various diagnostics
:type reset: int
:param reset: if larger than 0, the train_data iterator position is
reseted to 0 every `reset` number of updates
:type train_cost: bool
:param train_cost: flag saying if the training error (over the
entire training set) should be computed every time the validation
error is computed
:type validate_postprocess: None or function
:param validate_postprocess: function called on the validation cost
every time before applying the logic of the early stopper
:type l2_params: bool
:param l2_params: save parameter norms at each step
"""
###################
# Step 0. Set parameters
###################
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.state = state
self.channel = channel
self.model = model
self.algo = algo
self.valid_id = 0
self.old_cost = 1e21
self.validate_postprocess = validate_postprocess
self.patience = state['patience']
self.l2_params = l2_params
self.train_cost = train_cost
if hooks and not isinstance(hooks, (list, tuple)):
hooks = [hooks]
if self.state['validFreq'] < 0:
self.state['validFreq'] = self.train_data.get_length()
print 'Validation computed every', self.state['validFreq']
elif self.state['validFreq'] > 0:
print 'Validation computed every', self.state['validFreq']
if self.state['trainFreq'] < 0:
self.state['trainFreq'] = self.train_data.get_length()
print 'Train frequency set to ', self.state['trainFreq']
state['bvalidcost'] = 1e21
for (pname, _) in model.properties:
self.state[pname] = 1e20
n_elems = state['loopIters'] // state['trainFreq'] + 1
self.timings = {'step' : 0, 'next_offset' : -1}
for name in self.algo.return_names:
self.timings[name] = numpy.zeros((n_elems,), dtype='float32')
if self.l2_params:
for param in model.params:
self.timings["l2_" + param.name] = numpy.zeros(n_elems, dtype="float32")
n_elems = state['loopIters'] // state['validFreq'] + 1
for pname in model.valid_costs:
self.state['valid'+pname] = 1e20
self.state['test'+pname] = 1e20
self.timings['fulltrain'+pname] = numpy.zeros((n_elems,),
dtype='float32')
self.timings['valid'+pname] = numpy.zeros((n_elems,),
dtype='float32')
self.timings['test'+pname] = numpy.zeros((n_elems,),
dtype='float32')
if self.channel is not None:
self.channel.save()
self.hooks = hooks
self.reset = reset
self.start_time = time.time()
self.batch_start_time = time.time()
def validate(self):
rvals = self.model.validate(self.valid_data)
msg = '** %d validation:' % self.valid_id
self.valid_id += 1
self.batch_start_time = time.time()
pos = self.step // self.state['validFreq']
for k, v in rvals:
msg = msg + ' ' + k + ':%f ' % float(v)
self.timings['valid'+k][pos] = float(v)
self.state['valid'+k] = float(v)
msg += 'whole time %s' % print_time(time.time() - self.start_time)
msg += ' patience %d' % self.patience
print msg
if self.train_cost:
valid_rvals = rvals
rvals = self.model.validate(self.train_data, True)
msg = '** %d train:' % (self.valid_id - 1)
for k, v in rvals:
msg = msg + ' ' + k + ':%6.3f ' % float(v)
self.timings['fulltrain' + k] = float(v)
self.state['fulltrain' + k] = float(v)
print msg
rvals = valid_rvals
self.state['validtime'] = float(time.time() - self.start_time)/60.
# Just pick the first thing that the cost returns
cost = rvals[0][1]
if self.state['bvalidcost'] > cost:
self.state['bvalidcost'] = float(cost)
for k, v in rvals:
self.state['bvalid'+k] = float(v)
self.state['bstep'] = int(self.step)
self.state['btime'] = int(time.time() - self.start_time)
self.test()
else:
print 'No testing', cost, '>', self.state['bvalidcost']
for k, v in self.state.items():
if 'test' in k:
print k, v
print_mem('validate')
if self.validate_postprocess:
return self.validate_postprocess(cost)
return cost
def test(self):
self.model.best_params = [(x.name, x.get_value()) for x in
self.model.params]
numpy.savez(self.state['prefix'] + '_best_params',
**dict(self.model.best_params))
self.state['best_params_pos'] = self.step
if self.test_data is not None:
rvals = self.model.validate(self.test_data)
else:
rvals = []
msg = '>>> Test'
pos = self.step // self.state['validFreq']
for k, v in rvals:
msg = msg + ' ' + k + ':%6.3f ' % v
self.timings['test' + k][pos] = float(v)
self.state['test' + k] = float(v)
print msg
self.state['testtime'] = float(time.time()-self.start_time)/60.
def save(self):
start = time.time()
print "Saving the model..."
# ignore keyboard interrupt while saving
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
numpy.savez(self.state['prefix']+'timing.npz',
**self.timings)
if self.state['overwrite']:
self.model.save(self.state['prefix']+'model.npz')
else:
self.model.save(self.state['prefix'] +
'model%d.npz' % self.save_iter)
cPickle.dump(self.state, open(self.state['prefix']+'state.pkl', 'w'))
self.save_iter += 1
signal.signal(signal.SIGINT, s)
print "Model saved, took {}".format(time.time() - start)
# FIXME
def load(self, model_path=None, timings_path=None):
if model_path is None:
model_path = self.state['prefix'] + 'model.npz'
if timings_path is None:
timings_path = self.state['prefix'] + 'timing.npz'
try:
self.model.load(model_path)
except Exception:
print 'mainLoop: Corrupted model file'
traceback.print_exc()
try:
self.timings = dict(numpy.load(timings_path).iteritems())
except Exception:
print 'mainLoop: Corrupted timings file'
traceback.print_exc()
def main(self):
assert self.reset == -1
print_mem('start')
self.state['gotNaN'] = 0
start_time = time.time()
self.start_time = start_time
self.batch_start_time = time.time()
self.step = int(self.timings['step'])
self.algo.step = self.step
self.save_iter = 0
self.save()
if self.channel is not None:
self.channel.save()
self.save_time = time.time()
last_cost = 1.
self.state['clr'] = self.state['lr']
self.train_data.start(self.timings['next_offset']
if 'next_offset' in self.timings
else -1)
while (self.step < self.state['loopIters'] and
last_cost > .1*self.state['minerr'] and
(time.time() - start_time)/60. < self.state['timeStop'] and
self.state['lr'] > self.state['minlr']):
if self.step > 0 and (time.time() - self.save_time)/60. >= self.state['saveFreq']:
self.save()
if self.channel is not None:
self.channel.save()
self.save_time = time.time()
st = time.time()
try:
rvals = self.algo()
self.state['traincost'] = float(rvals['cost'])
self.state['step'] = self.step
last_cost = rvals['cost']
for name in rvals.keys():
self.timings[name][self.step] = float(numpy.array(rvals[name]))
if self.l2_params:
for param in self.model.params:
self.timings["l2_" + param.name][self.step] =\
numpy.mean(param.get_value() ** 2) ** 0.5
if (numpy.isinf(rvals['cost']) or
numpy.isnan(rvals['cost'])) and\
self.state['on_nan'] == 'raise':
self.state['gotNaN'] = 1
self.save()
if self.channel:
self.channel.save()
print 'Got NaN while training'
last_cost = 0
if self.valid_data is not None and\
self.step % self.state['validFreq'] == 0 and\
self.step > 1:
valcost = self.validate()
if valcost > self.old_cost * self.state['cost_threshold']:
self.patience -= 1
if 'lr_start' in self.state and\
self.state['lr_start'] == 'on_error':
self.state['lr_start'] = self.step
elif valcost < self.old_cost:
self.patience = self.state['patience']
self.old_cost = valcost
if self.state['divide_lr'] and \
self.patience < 1:
# Divide lr by 2
self.algo.lr = self.algo.lr / self.state['divide_lr']
bparams = dict(self.model.best_params)
self.patience = self.state['patience']
for p in self.model.params:
p.set_value(bparams[p.name])
if self.state['hookFreq'] > 0 and \
self.step % self.state['hookFreq'] == 0 and \
self.hooks:
[fn() for fn in self.hooks]
if self.reset > 0 and self.step > 1 and \
self.step % self.reset == 0:
print 'Resetting the data iterator'
self.train_data.reset()
self.step += 1
self.timings['step'] = self.step
self.timings['next_offset'] = self.train_data.next_offset
except KeyboardInterrupt:
break
self.state['wholetime'] = float(time.time() - start_time)
if self.valid_data is not None:
self.validate()
self.save()
if self.channel:
self.channel.save()
print 'Took', (time.time() - start_time)/60., 'min'
avg_step = self.timings['time_step'][:self.step].mean()
avg_cost2expl = self.timings['log2_p_expl'][:self.step].mean()
print "Average step took {}".format(avg_step)
print "That amounts to {} sentences in a day".format(1 / avg_step * 86400 * self.state['bs'])
print "Average log2 per example is {}".format(avg_cost2expl)
|
|
""" pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import pickle
from _pydevd_bundle.pydevd_constants import dict_contains, get_frame, get_thread_id, xrange
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
from _pydev_imps._pydev_saved_modules import thread
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, quote, execfile
from _pydevd_bundle.pydevd_utils import to_string
SENTINEL_VALUE = []
# -------------------------------------------------------------------------- defining true and false for earlier versions
try:
__setFalse = False
except:
import __builtin__
setattr(__builtin__, 'True', 1)
setattr(__builtin__, 'False', 0)
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return dict_contains(AdditionalFramesContainer.additional_frames, thread_id)
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if dict_contains(AdditionalFramesContainer.additional_frames, thread_id):
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
errMsg = '''find_frame: frame not found.
Looking for thread_id:%s, frame_id:%s
Current thread_id:%s, available frames:
%s\n
''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
sys.stderr.write(errMsg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_variable(thread_id, frame_id, scope, attrs):
""" returns the value of the compound variable as a dictionary"""
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, attrs,))
traceback.print_exc()
def resolve_var(var, attrs):
attrList = attrs.split('\t')
for k in attrList:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = 100
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)))
return xml
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise Exception("%s has more than 2 dimensions." % slice)
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in "biufc":
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
xml = slice_to_xml(name, num_rows, num_cols, "", "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in "biufc":
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in "biufc" else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
def col_to_format(c):
return format if dtypes[c] == 'f' and format else array_default_format(dtypes[c])
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, lambda r: (("%" + col_to_format(c)) % (df.iat[r, c] if dim > 1 else df.iat[r])
for c in range(cols)))
return xml
def array_data_to_xml(rows, cols, get_row):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % to_string(row)
for value in get_row(row):
xml += var_to_xml(value, '')
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, format, type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = get_label(df.axes[1].values[col]) if dim > 1 else str(col)
bounds = col_bounds[col]
col_format = "%" + col_to_format(col)
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(col), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
TYPE_TO_XML_CONVERTERS = {"ndarray": array_to_xml, "DataFrame": dataframe_to_xml, "Series": dataframe_to_xml}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TransformedDistribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
bs = bijectors
ds = distributions
la = linalg
class TransformedDistributionTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def testTransformedDistribution(self):
g = ops.Graph()
with g.as_default():
mu = 3.0
sigma = 2.0
# Note: the Jacobian callable only works for this example; more generally
# you may or may not need a reduce_sum.
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu))
# sample
sample = log_normal.sample(100000, seed=235)
self.assertAllEqual([], log_normal.event_shape)
with self.test_session(graph=g):
self.assertAllEqual([], log_normal.event_shape_tensor().eval())
self.assertAllClose(
sp_dist.mean(), np.mean(sample.eval()), atol=0.0, rtol=0.05)
# pdf, log_pdf, cdf, etc...
# The mean of the lognormal is around 148.
test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32)
for func in [[log_normal.log_prob, sp_dist.logpdf],
[log_normal.prob, sp_dist.pdf],
[log_normal.log_cdf, sp_dist.logcdf],
[log_normal.cdf, sp_dist.cdf],
[log_normal.survival_function, sp_dist.sf],
[log_normal.log_survival_function, sp_dist.logsf]]:
actual = func[0](test_vals)
expected = func[1](test_vals)
with self.test_session(graph=g):
self.assertAllClose(expected, actual.eval(), atol=0, rtol=0.01)
def testCachedSamplesWithoutInverse(self):
with self.test_session() as sess:
mu = 3.0
sigma = 0.02
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sample = log_normal.sample(1)
sample_val, log_pdf_val = sess.run([sample, log_normal.log_prob(sample)])
self.assertAllClose(
stats.lognorm.logpdf(sample_val, s=sigma, scale=np.exp(mu)),
log_pdf_val,
atol=1e-2)
def testShapeChangingBijector(self):
with self.test_session():
softmax = bs.SoftmaxCentered()
standard_normal = ds.Normal(loc=0., scale=1.)
multi_logit_normal = self._cls()(
distribution=standard_normal,
bijector=softmax)
x = [[-np.log(3.), 0.],
[np.log(3), np.log(5)]]
y = softmax.forward(x).eval()
expected_log_pdf = (stats.norm(loc=0., scale=1.).logpdf(x) -
np.sum(np.log(y), axis=-1))
self.assertAllClose(expected_log_pdf,
multi_logit_normal.log_prob(y).eval())
self.assertAllClose(
[1, 2, 3, 2],
array_ops.shape(multi_logit_normal.sample([1, 2, 3])).eval())
self.assertAllEqual([2], multi_logit_normal.event_shape)
self.assertAllEqual([2], multi_logit_normal.event_shape_tensor().eval())
def testEntropy(self):
with self.test_session():
shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)
diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]
for i in range(len(diag))])
fake_mvn = self._cls()(
ds.MultivariateNormalDiag(
loc=array_ops.zeros_like(shift),
scale_diag=array_ops.ones_like(diag),
validate_args=True),
bs.AffineLinearOperator(
shift,
scale=la.LinearOperatorDiag(diag, is_non_singular=True),
validate_args=True),
validate_args=True)
self.assertAllClose(actual_mvn_entropy,
fake_mvn.entropy().eval())
class ScalarToMultiTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def setUp(self):
self._shift = np.array([-1, 0, 1], dtype=np.float32)
self._tril = np.array([[[1., 0, 0],
[2, 1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, 2, 0],
[4, 3, 2]]],
dtype=np.float32)
def _testMVN(self,
base_distribution_class,
base_distribution_kwargs,
batch_shape=(),
event_shape=(),
not_implemented_message=None):
with self.test_session() as sess:
# Overriding shapes must be compatible w/bijector; most bijectors are
# batch_shape agnostic and only care about event_ndims.
# In the case of `Affine`, if we got it wrong then it would fire an
# exception due to incompatible dimensions.
batch_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_batch_shape")
event_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_event_shape")
feed_dict = {batch_shape_pl: np.array(batch_shape, dtype=np.int32),
event_shape_pl: np.array(event_shape, dtype=np.int32)}
fake_mvn_dynamic = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape_pl,
event_shape=event_shape_pl,
validate_args=True)
fake_mvn_static = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=True)
actual_mean = np.tile(self._shift, [2, 1]) # Affine elided this tile.
actual_cov = np.matmul(self._tril, np.transpose(self._tril, [0, 2, 1]))
def actual_mvn_log_prob(x):
return np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).logpdf(x[:, i, :])]
for i in range(len(actual_cov))]).T
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).entropy()]
for i in range(len(actual_cov))])
self.assertAllEqual([3], fake_mvn_static.event_shape)
self.assertAllEqual([2], fake_mvn_static.batch_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.event_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.batch_shape)
x = fake_mvn_static.sample(5, seed=0).eval()
for unsupported_fn in (fake_mvn_static.log_cdf,
fake_mvn_static.cdf,
fake_mvn_static.survival_function,
fake_mvn_static.log_survival_function):
with self.assertRaisesRegexp(NotImplementedError,
not_implemented_message):
unsupported_fn(x)
num_samples = 5e3
for fake_mvn, feed_dict in ((fake_mvn_static, {}),
(fake_mvn_dynamic, feed_dict)):
# Ensure sample works by checking first, second moments.
y = fake_mvn.sample(int(num_samples), seed=0)
x = y[0:5, ...]
sample_mean = math_ops.reduce_mean(y, 0)
centered_y = array_ops.transpose(y - sample_mean, [1, 2, 0])
sample_cov = math_ops.matmul(
centered_y, centered_y, transpose_b=True) / num_samples
[
sample_mean_,
sample_cov_,
x_,
fake_event_shape_,
fake_batch_shape_,
fake_log_prob_,
fake_prob_,
fake_entropy_,
] = sess.run([
sample_mean,
sample_cov,
x,
fake_mvn.event_shape_tensor(),
fake_mvn.batch_shape_tensor(),
fake_mvn.log_prob(x),
fake_mvn.prob(x),
fake_mvn.entropy(),
], feed_dict=feed_dict)
self.assertAllClose(actual_mean, sample_mean_, atol=0.1, rtol=0.1)
self.assertAllClose(actual_cov, sample_cov_, atol=0., rtol=0.1)
# Ensure all other functions work as intended.
self.assertAllEqual([5, 2, 3], x_.shape)
self.assertAllEqual([3], fake_event_shape_)
self.assertAllEqual([2], fake_batch_shape_)
self.assertAllClose(actual_mvn_log_prob(x_), fake_log_prob_,
atol=0., rtol=1e-6)
self.assertAllClose(np.exp(actual_mvn_log_prob(x_)), fake_prob_,
atol=0., rtol=1e-5)
self.assertAllClose(actual_mvn_entropy, fake_entropy_,
atol=0., rtol=1e-6)
def testScalarBatchScalarEvent(self):
self._testMVN(
base_distribution_class=ds.Normal,
base_distribution_kwargs={"loc": 0., "scale": 1.},
batch_shape=[2],
event_shape=[3],
not_implemented_message="not implemented when overriding event_shape")
def testScalarBatchNonScalarEvent(self):
self._testMVN(
base_distribution_class=ds.MultivariateNormalDiag,
base_distribution_kwargs={"loc": [0., 0., 0.],
"scale_diag": [1., 1, 1]},
batch_shape=[2],
not_implemented_message="not implemented")
with self.test_session():
# Can't override event_shape for scalar batch, non-scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.MultivariateNormalDiag(loc=[0.], scale_diag=[1.]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
def testNonScalarBatchScalarEvent(self):
self._testMVN(
base_distribution_class=ds.Normal,
base_distribution_kwargs={"loc": [0., 0], "scale": [1., 1]},
event_shape=[3],
not_implemented_message="not implemented when overriding event_shape")
with self.test_session():
# Can't override batch_shape for non-scalar batch, scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.Normal(loc=[0.], scale=[1.]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
def testNonScalarBatchNonScalarEvent(self):
with self.test_session():
# Can't override event_shape and/or batch_shape for non_scalar batch,
# non-scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.MultivariateNormalDiag(loc=[[0.]],
scale_diag=[[1.]]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
if __name__ == "__main__":
test.main()
|
|
import logging, inspect
from robot.utils import ConnectionCache
from pyuia import PageObject, get_page_object
from util import is_test_failed, log_screenshot, log_text, in_context as in_robot_context
__all__ = ['BaseAppLibrary']
_logger = logging.getLogger(__name__)
def _state_capturing_decorator(method):
def decorator(*args, **kwargs):
name = method.__name__
# accessor = any(name.startswith(prefix) or name == prefix for prefix in ['is_', 'get_', 'should_be_'])
accessor = False
self = args[0] # the keyword library itself
self._capture_state()
try:
result = method(*args, **kwargs)
# if the the result is a page object, update the current page.
if isinstance(result, PageObject):
self._current_page = result
except Exception as err:
self._capture_state(after=True, err=err)
raise
if not accessor: # efficiency
self._capture_state(after=True)
return result
return decorator
class _StateCapturing(type):
def __new__(cls, clsname, bases, attrs):
white_list = [
'open_session',
'close_session',
'close_all_sessions',
'switch_device',
'open_app',
'close_app',
]
for name, obj in attrs.items():
if not (inspect.isroutine(obj) and not name.startswith('_')): continue
if name in white_list: continue
attrs[name] = _state_capturing_decorator(obj)
return type.__new__(cls, clsname, bases, attrs)
class BaseAppLibrary(object):
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
if in_robot_context:
__metaclass__ = _StateCapturing
def __init__(self):
self._cache = ConnectionCache()
def open_session(self, device_id, alias=None):
"""Open a session.
``device_id`` is an identifier for looking up configurations of a specific device.
The optional ``alias`` provided here can be used to switch between sessions/devices later.
See `Switch Device` for more details.
"""
_logger.info('Open session; device ID = [%s], alias = [%s])', device_id, alias)
# init context and install delegates
context = self._init_context(device_id)
context._log_screenshot_delegate = self._log_screenshot_delegate
context._log_page_source_delegate = self._log_page_source_delegate
self._cache.register(RFConnectionCache(context), alias)
def open_app(self, reset=None):
"""Open the app.
To reset app state prior to opening the app, pass a non-empty string to ``reset`` argument.
Examples:
| Open App | reset | # reset app state |
| Open App | | # do not reset app state |
"""
msg = 'App logs (initial)'
context = self._current_context
context.open_app(bool(reset))
context.logs_all = [] # accumulate logs of each step
log_text('\n'.join(context.get_initial_logs()), msg, 'app_logs_initial_', '.log', level=logging.INFO)
def _init_context(self):
raise NotImplementedError()
def _log_screenshot_delegate(self, msg, *args, **kwargs):
level = kwargs['level'] if 'level' in kwargs else logging.DEBUG
if not _logger.isEnabledFor(level):
return
page = kwargs['page'] if 'page' in kwargs else None
msg = msg % args
if page: msg += ' (%s)' % page.__class__.__name__
log_screenshot(self._current_context.take_screenshot_as_png(), msg, level=level)
def _log_page_source_delegate(self, msg, *args, **kwargs):
level = kwargs['level'] if 'level' in kwargs else logging.DEBUG
if not _logger.isEnabledFor(level):
return
page = kwargs['page'] if 'page' in kwargs else None
msg = msg % args
if page: msg += ' (%s)' % page.__class__.__name__
source, ext = self._current_context.dump_page_source()
log_text(source, msg, prefix='page_source_', suffix='.%s' % ext, level=level)
def close_session(self):
"""Terminate current session."""
self._cache.current.close()
def close_all_sessions(self):
"""Terminate all open sessions."""
self._cache.close_all()
def close_app(self):
"""Close the app."""
self._cache.current.close_app()
def switch_device(self, alias):
"""Switch between sessions/devices using alias.
Examples:
| Open App | A | # current session/device is A |
| Open App | B | # current session/device becomes B |
| Switch Device | A | # switch back to A |
"""
self._cache.switch(alias)
def _capture_state(self, after=False, err=None):
# To increase efficiency, screenshots are no longer taken automatically.
# Developers should explicitly do that AFTER the UI has been changed.
if not after: return
failed = bool(err)
try:
context = self._current_context
msg = 'App logs (step, keyword failed? %s)' % failed
logs_step = context.get_new_logs()
context.logs_all.extend(logs_step)
log_text('\n'.join(logs_step), msg, 'app_logs_step_', '.log', level=logging.INFO)
if not failed: return
context.log_page_source('Page source', level=logging.INFO)
context.log_screenshot('Screenshot', level=logging.INFO)
except:
_logger.warning('Fail to capture state. (keyword failed = %s)', failed, exc_info=True)
@property
def _current_context(self):
return self._cache.current._context
@property
def _current_page(self):
return self._cache.current._context.current_page
@_current_page.setter
def _current_page(self, page):
self._cache.current._context.current_page = page
class RFConnectionCache(object):
def __init__(self, context):
self._context = context
def close(self):
self._context.quit()
def close_app(self):
# all statements suppress possible errors, or other sessions won't be closed.
self._capture_state()
self._context.close_app()
def _capture_state(self):
failed = None
try:
failed = is_test_failed()
context = self._context
msg = 'App logs (about to quit, test failed? %s)' % failed
context.logs_all.extend(context.get_new_logs())
log_text('\n'.join(context.logs_all), msg, 'app_logs_all_', '.log', level=logging.INFO)
if not failed: return
context.log_page_source('Page source (test failed)', level=logging.INFO)
context.log_screenshot('Screenshot (test failed)', level=logging.INFO)
except Exception as e:
_logger.warning('Fail to capture state. (test failed = %s)', failed, exc_info=True)
|
|
# Copyright 2015-2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Resource Filtering Logic
"""
import copy
import datetime
from datetime import timedelta
import fnmatch
import ipaddress
import logging
import operator
import re
import os
from dateutil.tz import tzutc
from dateutil.parser import parse
from distutils import version
from random import sample
import jmespath
from c7n.element import Element
from c7n.exceptions import PolicyValidationError
from c7n.executor import ThreadPoolExecutor
from c7n.registry import PluginRegistry
from c7n.resolver import ValuesFrom
from c7n.utils import set_annotation, type_schema, parse_cidr
from c7n.manager import iter_filters
class FilterValidationError(Exception):
pass
# Matching filters annotate their key onto objects
ANNOTATION_KEY = "c7n:MatchedFilters"
def glob_match(value, pattern):
if not isinstance(value, str):
return False
return fnmatch.fnmatch(value, pattern)
def regex_match(value, regex):
if not isinstance(value, str):
return False
# Note python 2.5+ internally cache regex
# would be nice to use re2
return bool(re.match(regex, value, flags=re.IGNORECASE))
def regex_case_sensitive_match(value, regex):
if not isinstance(value, str):
return False
# Note python 2.5+ internally cache regex
# would be nice to use re2
return bool(re.match(regex, value))
def operator_in(x, y):
return x in y
def operator_ni(x, y):
return x not in y
def difference(x, y):
return bool(set(x).difference(y))
def intersect(x, y):
return bool(set(x).intersection(y))
OPERATORS = {
'eq': operator.eq,
'equal': operator.eq,
'ne': operator.ne,
'not-equal': operator.ne,
'gt': operator.gt,
'greater-than': operator.gt,
'ge': operator.ge,
'gte': operator.ge,
'le': operator.le,
'lte': operator.le,
'lt': operator.lt,
'less-than': operator.lt,
'glob': glob_match,
'regex': regex_match,
'regex-case': regex_case_sensitive_match,
'in': operator_in,
'ni': operator_ni,
'not-in': operator_ni,
'contains': operator.contains,
'difference': difference,
'intersect': intersect}
VALUE_TYPES = [
'age', 'integer', 'expiration', 'normalize', 'size',
'cidr', 'cidr_size', 'swap', 'resource_count', 'expr',
'unique_size', 'date', 'version']
class FilterRegistry(PluginRegistry):
def __init__(self, *args, **kw):
super(FilterRegistry, self).__init__(*args, **kw)
self.register('value', ValueFilter)
self.register('or', Or)
self.register('and', And)
self.register('not', Not)
self.register('event', EventFilter)
self.register('reduce', ReduceFilter)
def parse(self, data, manager):
results = []
for d in data:
results.append(self.factory(d, manager))
return results
def factory(self, data, manager=None):
"""Factory func for filters.
data - policy config for filters
manager - resource type manager (ec2, s3, etc)
"""
# Make the syntax a little nicer for common cases.
if isinstance(data, dict) and len(data) == 1 and 'type' not in data:
op = list(data.keys())[0]
if op == 'or':
return self['or'](data, self, manager)
elif op == 'and':
return self['and'](data, self, manager)
elif op == 'not':
return self['not'](data, self, manager)
return ValueFilter(data, manager)
if isinstance(data, str):
filter_type = data
data = {'type': data}
else:
filter_type = data.get('type')
if not filter_type:
raise PolicyValidationError(
"%s Invalid Filter %s" % (
self.plugin_type, data))
filter_class = self.get(filter_type)
if filter_class is not None:
return filter_class(data, manager)
else:
raise PolicyValidationError(
"%s Invalid filter type %s" % (
self.plugin_type, data))
def trim_runtime(filters):
"""Remove runtime filters.
Some filters can only be effectively evaluated at policy
execution, ie. event filters.
When evaluating conditions for dryrun or provisioning stages we
remove them.
"""
def remove_filter(f):
block = f.get_block_parent()
block.filters.remove(f)
if isinstance(block, BooleanGroupFilter) and not len(block):
remove_filter(block)
for f in iter_filters(filters):
if isinstance(f, EventFilter):
remove_filter(f)
# Really should be an abstract base class (abc) or
# zope.interface
class Filter(Element):
executor_factory = ThreadPoolExecutor
log = logging.getLogger('custodian.filters')
metrics = ()
permissions = ()
schema = {'type': 'object'}
# schema aliases get hoisted into a jsonschema definition
# location, and then referenced inline.
schema_alias = None
def __init__(self, data, manager=None):
self.data = data
self.manager = manager
def get_permissions(self):
return self.permissions
def validate(self):
"""validate filter config, return validation error or self"""
return self
def process(self, resources, event=None):
""" Bulk process resources and return filtered set."""
return list(filter(self, resources))
def get_block_operator(self):
"""Determine the immediate parent boolean operator for a filter"""
# Top level operator is `and`
block = self.get_block_parent()
if block.type in ('and', 'or', 'not'):
return block.type
return 'and'
def get_block_parent(self):
"""Get the block parent for a filter"""
block_stack = [self.manager]
for f in self.manager.iter_filters(block_end=True):
if f is None:
block_stack.pop()
elif f == self:
return block_stack[-1]
elif f.type in ('and', 'or', 'not'):
block_stack.append(f)
def merge_annotation(self, r, annotation_key, values):
block_op = self.get_block_operator()
if block_op in ('and', 'not'):
r[self.matched_annotation_key] = intersect_list(
values,
r.get(self.matched_annotation_key))
if not values and block_op != 'or':
return
class BaseValueFilter(Filter):
expr = None
def __init__(self, data, manager=None):
super(BaseValueFilter, self).__init__(data, manager)
self.expr = {}
def get_resource_value(self, k, i, regex=None):
r = None
if k.startswith('tag:'):
tk = k.split(':', 1)[1]
if 'Tags' in i:
for t in i.get("Tags", []):
if t.get('Key') == tk:
r = t.get('Value')
break
# GCP schema: 'labels': {'key': 'value'}
elif 'labels' in i:
r = i.get('labels', {}).get(tk, None)
# GCP has a secondary form of labels called tags
# as labels without values.
# Azure schema: 'tags': {'key': 'value'}
elif 'tags' in i:
r = i.get('tags', {}).get(tk, None)
elif k in i:
r = i.get(k)
elif k not in self.expr:
self.expr[k] = jmespath.compile(k)
r = self.expr[k].search(i)
else:
r = self.expr[k].search(i)
if regex:
r = ValueRegex(regex).get_resource_value(r)
return r
def intersect_list(a, b):
if b is None:
return a
elif a is None:
return b
res = []
for x in a:
if x in b:
res.append(x)
return res
class BooleanGroupFilter(Filter):
def __init__(self, data, registry, manager):
super(BooleanGroupFilter, self).__init__(data)
self.registry = registry
self.filters = registry.parse(list(self.data.values())[0], manager)
self.manager = manager
def validate(self):
for f in self.filters:
f.validate()
return self
def get_resource_type_id(self):
resource_type = self.manager.get_model()
return resource_type.id
def __len__(self):
return len(self.filters)
def __bool__(self):
return True
class Or(BooleanGroupFilter):
def process(self, resources, event=None):
if self.manager:
return self.process_set(resources, event)
return super(Or, self).process(resources, event)
def __call__(self, r):
"""Fallback for older unit tests that don't utilize a query manager"""
for f in self.filters:
if f(r):
return True
return False
def process_set(self, resources, event):
rtype_id = self.get_resource_type_id()
resource_map = {r[rtype_id]: r for r in resources}
results = set()
for f in self.filters:
results = results.union([
r[rtype_id] for r in f.process(resources, event)])
return [resource_map[r_id] for r_id in results]
class And(BooleanGroupFilter):
def process(self, resources, events=None):
if self.manager:
sweeper = AnnotationSweeper(self.get_resource_type_id(), resources)
for f in self.filters:
resources = f.process(resources, events)
if not resources:
break
if self.manager:
sweeper.sweep(resources)
return resources
class Not(BooleanGroupFilter):
def process(self, resources, event=None):
if self.manager:
return self.process_set(resources, event)
return super(Not, self).process(resources, event)
def __call__(self, r):
"""Fallback for older unit tests that don't utilize a query manager"""
# There is an implicit 'and' for self.filters
# ~(A ^ B ^ ... ^ Z) = ~A v ~B v ... v ~Z
for f in self.filters:
if not f(r):
return True
return False
def process_set(self, resources, event):
rtype_id = self.get_resource_type_id()
resource_map = {r[rtype_id]: r for r in resources}
sweeper = AnnotationSweeper(rtype_id, resources)
for f in self.filters:
resources = f.process(resources, event)
if not resources:
break
before = set(resource_map.keys())
after = {r[rtype_id] for r in resources}
results = before - after
sweeper.sweep([])
return [resource_map[r_id] for r_id in results]
class AnnotationSweeper:
"""Support clearing annotations set within a block filter.
See https://github.com/cloud-custodian/cloud-custodian/issues/2116
"""
def __init__(self, id_key, resources):
self.id_key = id_key
ra_map = {}
resource_map = {}
for r in resources:
ra_map[r[id_key]] = {k: v for k, v in r.items() if k.startswith('c7n')}
resource_map[r[id_key]] = r
# We keep a full copy of the annotation keys to allow restore.
self.ra_map = copy.deepcopy(ra_map)
self.resource_map = resource_map
def sweep(self, resources):
for rid in set(self.ra_map).difference([
r[self.id_key] for r in resources]):
# Clear annotations if the block filter didn't match
akeys = [k for k in self.resource_map[rid] if k.startswith('c7n')]
for k in akeys:
del self.resource_map[rid][k]
# Restore annotations that may have existed prior to the block filter.
self.resource_map[rid].update(self.ra_map[rid])
# The default LooseVersion will fail on comparing present strings, used
# in the value as shorthand for certain options.
class ComparableVersion(version.LooseVersion):
def __eq__(self, other):
try:
return super(ComparableVersion, self).__eq__(other)
except TypeError:
return False
class ValueFilter(BaseValueFilter):
"""Generic value filter using jmespath
"""
op = v = vtype = None
schema = {
'type': 'object',
# Doesn't mix well with inherits that extend
'additionalProperties': False,
'required': ['type'],
'properties': {
# Doesn't mix well as enum with inherits that extend
'type': {'enum': ['value']},
'key': {'type': 'string'},
'value_type': {'$ref': '#/definitions/filters_common/value_types'},
'default': {'type': 'object'},
'value_regex': {'type': 'string'},
'value_from': {'$ref': '#/definitions/filters_common/value_from'},
'value': {'$ref': '#/definitions/filters_common/value'},
'op': {'$ref': '#/definitions/filters_common/comparison_operators'}
}
}
schema_alias = True
annotate = True
required_keys = {'value', 'key'}
def _validate_resource_count(self):
""" Specific validation for `resource_count` type
The `resource_count` type works a little differently because it operates
on the entire set of resources. It:
- does not require `key`
- `value` must be a number
- supports a subset of the OPERATORS list
"""
for field in ('op', 'value'):
if field not in self.data:
raise PolicyValidationError(
"Missing '%s' in value filter %s" % (field, self.data))
if not (isinstance(self.data['value'], int) or
isinstance(self.data['value'], list)):
raise PolicyValidationError(
"`value` must be an integer in resource_count filter %s" % self.data)
# I don't see how to support regex for this?
if (self.data['op'] not in OPERATORS or self.data['op'] in {'regex', 'regex-case'} or
'value_regex' in self.data):
raise PolicyValidationError(
"Invalid operator in value filter %s" % self.data)
return self
def validate(self):
if len(self.data) == 1:
return self
# `resource_count` requires a slightly different schema than the rest of
# the value filters because it operates on the full resource list
if self.data.get('value_type') == 'resource_count':
return self._validate_resource_count()
elif self.data.get('value_type') == 'date':
if not parse_date(self.data.get('value')):
raise PolicyValidationError(
"value_type: date with invalid date value:%s",
self.data.get('value', ''))
if 'key' not in self.data and 'key' in self.required_keys:
raise PolicyValidationError(
"Missing 'key' in value filter %s" % self.data)
if ('value' not in self.data and
'value_from' not in self.data and
'value' in self.required_keys):
raise PolicyValidationError(
"Missing 'value' in value filter %s" % self.data)
if 'op' in self.data:
if not self.data['op'] in OPERATORS:
raise PolicyValidationError(
"Invalid operator in value filter %s" % self.data)
if self.data['op'] in {'regex', 'regex-case'}:
# Sanity check that we can compile
try:
re.compile(self.data['value'])
except re.error as e:
raise PolicyValidationError(
"Invalid regex: %s %s" % (e, self.data))
if 'value_regex' in self.data:
return self._validate_value_regex(self.data['value_regex'])
return self
def _validate_value_regex(self, regex):
"""Specific validation for `value_regex` type
The `value_regex` type works a little differently. In
particular it doesn't support OPERATORS that perform
operations on a list of values, specifically 'intersect',
'contains', 'difference', 'in' and 'not-in'
"""
# Sanity check that we can compile
try:
pattern = re.compile(regex)
if pattern.groups != 1:
raise PolicyValidationError(
"value_regex must have a single capturing group: %s" %
self.data)
except re.error as e:
raise PolicyValidationError(
"Invalid value_regex: %s %s" % (e, self.data))
return self
def __call__(self, i):
if self.data.get('value_type') == 'resource_count':
return self.process(i)
matched = self.match(i)
if matched and self.annotate:
set_annotation(i, ANNOTATION_KEY, self.k)
return matched
def process(self, resources, event=None):
# For the resource_count filter we operate on the full set of resources.
if self.data.get('value_type') == 'resource_count':
op = OPERATORS[self.data.get('op')]
if op(len(resources), self.data.get('value')):
return resources
return []
return super(ValueFilter, self).process(resources, event)
def get_resource_value(self, k, i):
return super(ValueFilter, self).get_resource_value(k, i, self.data.get('value_regex'))
def match(self, i):
if self.v is None and len(self.data) == 1:
[(self.k, self.v)] = self.data.items()
elif self.v is None and not hasattr(self, 'content_initialized'):
self.k = self.data.get('key')
self.op = self.data.get('op')
if 'value_from' in self.data:
values = ValuesFrom(self.data['value_from'], self.manager)
self.v = values.get_values()
else:
self.v = self.data.get('value')
self.content_initialized = True
self.vtype = self.data.get('value_type')
if i is None:
return False
# value extract
r = self.get_resource_value(self.k, i)
if self.op in ('in', 'not-in') and r is None:
r = ()
# value type conversion
if self.vtype is not None:
v, r = self.process_value_type(self.v, r, i)
else:
v = self.v
# Value match
if r is None and v == 'absent':
return True
elif r is not None and v == 'present':
return True
elif v == 'not-null' and r:
return True
elif v == 'empty' and not r:
return True
elif self.op:
op = OPERATORS[self.op]
try:
return op(r, v)
except TypeError:
return False
elif r == self.v:
return True
return False
def process_value_type(self, sentinel, value, resource):
if self.vtype == 'normalize' and isinstance(value, str):
return sentinel, value.strip().lower()
elif self.vtype == 'expr':
sentinel = self.get_resource_value(sentinel, resource)
return sentinel, value
elif self.vtype == 'integer':
try:
value = int(str(value).strip())
except ValueError:
value = 0
elif self.vtype == 'size':
try:
return sentinel, len(value)
except TypeError:
return sentinel, 0
elif self.vtype == 'unique_size':
try:
return sentinel, len(set(value))
except TypeError:
return sentinel, 0
elif self.vtype == 'swap':
return value, sentinel
elif self.vtype == 'date':
return parse_date(sentinel), parse_date(value)
elif self.vtype == 'age':
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) - timedelta(sentinel)
value = parse_date(value)
if value is None:
# compatiblity
value = 0
# Reverse the age comparison, we want to compare the value being
# greater than the sentinel typically. Else the syntax for age
# comparisons is intuitively wrong.
return value, sentinel
elif self.vtype == 'cidr':
s = parse_cidr(sentinel)
v = parse_cidr(value)
if (isinstance(s, ipaddress._BaseAddress) and isinstance(v, ipaddress._BaseNetwork)):
return v, s
return s, v
elif self.vtype == 'cidr_size':
cidr = parse_cidr(value)
if cidr:
return sentinel, cidr.prefixlen
return sentinel, 0
# Allows for expiration filtering, for events in the future as opposed
# to events in the past which age filtering allows for.
elif self.vtype == 'expiration':
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) + timedelta(sentinel)
value = parse_date(value)
if value is None:
value = 0
return sentinel, value
# Allows for comparing version numbers, for things that you expect a minimum version number.
elif self.vtype == 'version':
s = ComparableVersion(sentinel)
v = ComparableVersion(value)
return s, v
return sentinel, value
class AgeFilter(Filter):
"""Automatically filter resources older than a given date.
**Deprecated** use a value filter with `value_type: age` which can be
done on any attribute.
"""
threshold_date = None
# The name of attribute to compare to threshold; must override in subclass
date_attribute = None
schema = None
def validate(self):
if not self.date_attribute:
raise NotImplementedError(
"date_attribute must be overriden in subclass")
return self
def get_resource_date(self, i):
v = i[self.date_attribute]
if not isinstance(v, datetime.datetime):
v = parse(v)
if not v.tzinfo:
v = v.replace(tzinfo=tzutc())
return v
def __call__(self, i):
v = self.get_resource_date(i)
if v is None:
return False
op = OPERATORS[self.data.get('op', 'greater-than')]
if not self.threshold_date:
days = self.data.get('days', 0)
hours = self.data.get('hours', 0)
minutes = self.data.get('minutes', 0)
# Work around placebo issues with tz
if v.tzinfo:
n = datetime.datetime.now(tz=tzutc())
else:
n = datetime.datetime.now()
self.threshold_date = n - timedelta(days=days, hours=hours, minutes=minutes)
return op(self.threshold_date, v)
class EventFilter(ValueFilter):
"""Filter a resource based on an event."""
schema = type_schema('event', rinherit=ValueFilter.schema)
schema_alias = True
def validate(self):
if 'mode' not in self.manager.data:
raise PolicyValidationError(
"Event filters can only be used with lambda policies in %s" % (
self.manager.data,))
return self
def process(self, resources, event=None):
if event is None:
return resources
if self(event):
return resources
return []
def parse_date(v, tz=None):
if v is None:
return v
tz = tz or tzutc()
if isinstance(v, datetime.datetime):
if v.tzinfo is None:
return v.astimezone(tz)
return v
if isinstance(v, str):
try:
return parse(v).astimezone(tz)
except (AttributeError, TypeError, ValueError, OverflowError):
pass
# OSError on windows -- https://bugs.python.org/issue36439
exceptions = (ValueError, OSError) if os.name == "nt" else (ValueError)
if isinstance(v, (int, float, str)):
try:
v = datetime.datetime.fromtimestamp(float(v)).astimezone(tz)
except exceptions:
pass
if isinstance(v, (int, float, str)):
try:
# try interpreting as milliseconds epoch
v = datetime.datetime.fromtimestamp(float(v) / 1000).astimezone(tz)
except exceptions:
pass
return isinstance(v, datetime.datetime) and v or None
class ValueRegex:
"""Allows filtering based on the output of a regex capture.
This is useful for parsing data that has a weird format.
Instead of comparing the contents of the 'resource value' with the 'value',
it will instead apply the regex to contents of the 'resource value', and compare
the result of the capture group defined in that regex with the 'value'.
Therefore you must have a single capture group defined in the regex.
If the regex doesn't find a match it will return 'None'
Example of getting a datetime object to make an 'expiration' comparison::
type: value
value_regex: ".*delete_after=([0-9]{4}-[0-9]{2}-[0-9]{2}).*"
key: "tag:company_mandated_metadata"
value_type: expiration
op: lte
value: 0
"""
def __init__(self, expr):
self.expr = expr
def get_resource_value(self, resource):
if resource is None:
return resource
try:
capture = re.match(self.expr, resource)
except (ValueError, TypeError):
return None
if capture is None: # regex didn't capture anything
return None
return capture.group(1)
class ReduceFilter(BaseValueFilter):
"""Generic reduce filter to group, sort, and limit your resources.
This example will select the longest running instance from each ASG,
then randomly choose 10% of those, maxing at 15 total instances.
:example:
.. code-block:: yaml
- name: oldest-instance-by-asg
resource: ec2
filters:
- "tag:aws:autoscaling:groupName": present
- type: reduce
group-by: "tag:aws:autoscaling:groupName"
sort-by: "LaunchTime"
order: asc
limit: 1
Or you might want to randomly select a 10 percent of your resources,
but no more than 15.
:example:
.. code-block:: yaml
- name: random-selection
resource: ec2
filters:
- type: reduce
order: randomize
limit: 15
limit-percent: 10
"""
annotate = False
schema = {
'type': 'object',
# Doesn't mix well with inherits that extend
'additionalProperties': False,
'required': ['type'],
'properties': {
# Doesn't mix well as enum with inherits that extend
'type': {'enum': ['reduce']},
'group-by': {
'oneOf': [
{'type': 'string'},
{
'type': 'object',
'key': {'type': 'string'},
'value_type': {'enum': ['string', 'number', 'date']},
'value_regex': 'string',
},
]
},
'sort-by': {
'oneOf': [
{'type': 'string'},
{
'type': 'object',
'key': {'type': 'string'},
'value_type': {'enum': ['string', 'number', 'date']},
'value_regex': 'string',
},
]
},
'order': {'enum': ['asc', 'desc', 'reverse', 'randomize']},
'null-order': {'enum': ['first', 'last']},
'limit': {'type': 'number', 'minimum': 0},
'limit-percent': {'type': 'number', 'minimum': 0, 'maximum': 100},
'discard': {'type': 'number', 'minimum': 0},
'discard-percent': {'type': 'number', 'minimum': 0, 'maximum': 100},
},
}
schema_alias = True
def __init__(self, data, manager):
super(ReduceFilter, self).__init__(data, manager)
self.order = self.data.get('order', 'asc')
self.group_by = self.get_sort_config('group-by')
self.sort_by = self.get_sort_config('sort-by')
def validate(self):
# make sure the regexes compile
if 'value_regex' in self.group_by:
self._validate_value_regex(self.group_by['value_regex'])
if 'value_regex' in self.sort_by:
self._validate_value_regex(self.sort_by['value_regex'])
return self
def process(self, resources, event=None):
groups = self.group(resources)
# specified either of the sorting options, so sort
if 'sort-by' in self.data or 'order' in self.data:
groups = self.sort_groups(groups)
# now apply any limits to the groups and concatenate
return list(filter(None, self.limit(groups)))
def group(self, resources):
groups = {}
for r in resources:
v = self._value_to_sort(self.group_by, r)
vstr = str(v)
if vstr not in groups:
groups[vstr] = {'sortkey': v, 'resources': []}
groups[vstr]['resources'].append(r)
return groups
def get_sort_config(self, key):
# allow `foo: bar` but convert to
# `foo: {'key': bar}`
d = self.data.get(key, {})
if isinstance(d, str):
d = {'key': d}
d['null_sort_value'] = self.null_sort_value(d)
return d
def sort_groups(self, groups):
for g in groups:
groups[g]['resources'] = self.reorder(
groups[g]['resources'],
key=lambda r: self._value_to_sort(self.sort_by, r),
)
return groups
def _value_to_sort(self, config, r):
expr = config.get('key')
vtype = config.get('value_type', 'string')
vregex = config.get('value_regex')
v = None
try:
# extract value based on jmespath
if expr:
v = self.get_resource_value(expr, r, vregex)
if v is not None:
# now convert to expected type
if vtype == 'number':
v = float(v)
elif vtype == 'date':
v = parse_date(v)
else:
v = str(v)
except (AttributeError, ValueError):
v = None
if v is None:
v = config.get('null_sort_value')
return v
def null_sort_value(self, config):
vtype = config.get('value_type', 'string')
placement = self.data.get('null-order', 'last')
if (placement == 'last' and self.order == 'desc') or (
placement != 'last' and self.order != 'desc'
):
# return a value that will sort first
if vtype == 'number':
return float('-inf')
elif vtype == 'date':
return datetime.datetime.min.replace(tzinfo=tzutc())
return ''
else:
# return a value that will sort last
if vtype == 'number':
return float('inf')
elif vtype == 'date':
return datetime.datetime.max.replace(tzinfo=tzutc())
return '\uffff'
def limit(self, groups):
results = []
max = self.data.get('limit', 0)
pct = self.data.get('limit-percent', 0)
drop = self.data.get('discard', 0)
droppct = self.data.get('discard-percent', 0)
ordered = list(groups)
if 'group-by' in self.data or 'order' in self.data:
ordered = self.reorder(ordered, key=lambda r: groups[r]['sortkey'])
for g in ordered:
# discard X first
if droppct > 0:
n = int(droppct / 100 * len(groups[g]['resources']))
if n > drop:
drop = n
if drop > 0:
groups[g]['resources'] = groups[g]['resources'][drop:]
# then limit the remaining
count = len(groups[g]['resources'])
if pct > 0:
count = int(pct / 100 * len(groups[g]['resources']))
if max > 0 and max < count:
count = max
results.extend(groups[g]['resources'][0:count])
return results
def reorder(self, items, key=None):
if self.order == 'randomize':
return sample(items, k=len(items))
elif self.order == 'reverse':
return items[::-1]
else:
return sorted(items, key=key, reverse=(self.order == 'desc'))
|
|
import click
import json
import sci_parameter_utils.fragment
import sci_parameter_utils.parsers
import sci_parameter_utils.general
import yaml
try:
import typing # noqa: F401
from typing import Any, Callable, Iterable, Type # noqa: F401
except:
pass
def get_dict_from_file(fobj):
"""
Get dictionary from a file by extn
"""
if fobj.name.endswith('.yaml'):
return yaml.safe_load(fobj)
return json.load(fobj)
def get_extn_from_file(fobj):
fn = fobj.name
hInd = fn.rfind('.', -4)
if(hInd > 0):
return fn[hInd+1:]
return ""
def get_values_interactively(vlist, validator):
# type: (Iterable[str], Callable[[str, str], Any]) -> Dict[str, Any]
ivals = {}
for k in vlist:
def validate(v):
return validator(k, v)
ivals[k] = click.prompt("{}".format(k),
value_proc=validate)
return ivals
@click.group()
def cli_main():
"""Set of useful parameter utilities"""
@cli_main.command()
@click.option('--params', '-p', type=click.File('r'),
required=True,
help="Parmeter definition file")
@click.option('--ifile', '-i', type=click.File('r'),
help="Input values file")
@click.option('--out', '-o', default="",
help="Name format for output file")
@click.option('--interact/--no-interact', default=False,
help="Allow interactive value supply")
@click.option('list_fns', '--list/--no-list', '-l', default=False,
help="List names of output files only")
@click.argument('template', type=click.File('r'))
def template(params, ifile, out, template, interact, list_fns):
# type: (typing.TextIO, typing.TextIO, str, typing.TextIO, bool, bool) -> None # noqa
"""Generate parameter files from TEMPLATE"""
try:
eset = sci_parameter_utils.fragment.TemplateElemSet(
sci_parameter_utils.fragment.elems_from_dict(
get_dict_from_file(params),
sci_parameter_utils.fragment.TemplateElem
))
except Exception as e:
click.echo("Error setting up template: {}".format(e))
raise click.Abort()
iReq = eset.get_inputs()
if ifile:
iList = get_dict_from_file(ifile)
if not iList:
iList = [{}]
else:
iList = [{}]
extn = get_extn_from_file(template)
try:
parser = (sci_parameter_utils.parsers.PFileParser
.parser_by_extn(extn))
except Exception as e:
click.echo("Error getting parser: {}".format(e))
raise click.Abort()
if not out:
out = sci_parameter_utils.general.get_fn_suggest(template, parser)
if not out:
out = 'output.'+extn
for d in iList:
ivals = {}
try:
missing = iReq.difference(d.keys())
for k in iReq:
if k in d:
ivals[k] = eset.validate(k, d[k])
if interact:
click.echo('Getting input values')
ivals.update(get_values_interactively(missing, eset.validate))
missing = iReq.difference(ivals)
if missing:
raise ValueError("No values supplied for {}".format(missing))
except Exception as e:
click.echo("Error obtaining input values: {}".format(e))
raise click.Abort()
eset.compute_strings(ivals)
fn = out.format(**ivals)
try:
pass
except Exception as e:
click.echo("Error generating filename: {}".format(e))
raise click.Abort()
if list_fns:
click.echo(fn)
continue
try:
(sci_parameter_utils.general
.do_template(template,
click.open_file(fn, 'w'),
parser,
ivals))
except Exception as e:
click.echo("Error templating file {}: {}".format(fn, e))
raise click.Abort()
@cli_main.command('print')
@click.option('--deffile', '-d', type=click.File('r'),
required=True,
help="Parmeter definition file")
@click.option('olist', '--print', '-p', default="",
help="List of sections to print")
@click.argument('prmfiles', type=click.File('r'), nargs=-1)
def print_vals(prmfiles, deffile, olist):
# type: (List[typing.TextIO], typing.TextIO, str) -> None
"""Prints values from PRMFILES"""
try:
idict = get_dict_from_file(deffile)
deffile.close()
except Exception as e:
click.echo("Error setting loading def file: {}".format(e))
raise click.Abort()
try:
dset = sci_parameter_utils.fragment.TemplateElemSet(
sci_parameter_utils.fragment.elems_from_dict(
idict['elems'],
sci_parameter_utils.fragment.TemplateElem
))
except Exception as e:
click.echo("Error setting up template: {}".format(e))
raise click.Abort()
try:
sset = sci_parameter_utils.fragment.elems_from_dict(
idict['locs'],
sci_parameter_utils.fragment.SearchElem
)
except Exception as e:
click.echo("Error generating search list: {}".format(e))
raise click.Abort()
try:
prlist = idict['print']
if olist:
pr_sections = set(s.strip() for s in olist.split(','))
else:
pr_sections = set(prlist.keys())
except Exception as e:
click.echo("Error collecting printing sections: {}".format(e))
raise click.Abort()
extn = get_extn_from_file(prmfiles[0])
try:
parser = (sci_parameter_utils.parsers.PFileParser
.parser_by_extn(extn))
except Exception as e:
click.echo("Error getting parser: {}".format(e))
raise click.Abort()
for f in prmfiles:
click.echo("Input {}:".format(f.name))
try:
ivals = sci_parameter_utils.general.do_search(sset, f, parser)
except Exception as e:
click.echo("Error searching file: {}".format(e))
raise click.Abort()
try:
dset.compute_strings(ivals)
except Exception as e:
click.echo("Error generating strings: {}".format(e))
raise click.Abort()
try:
for k in prlist:
if k in pr_sections:
click.echo('Section {}'.format(k))
for v in prlist[k]:
click.echo('\t{} = {}'.format(v, ivals[v]))
except Exception as e:
click.echo("Error printing data: {}".format(e))
raise click.Abort()
click.echo('-----')
|
|
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask.ext.login import UserMixin, AnonymousUserMixin
from . import db, login_manager
from datetime import datetime
from hashlib import md5
from flask import url_for
import os
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['BLOG_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
db.session.commit()
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
@staticmethod
def insert_admin():
user = User(email=os.environ.get("BLOG_ADMIN"),
password=os.environ.get("BLOG_ADMIN_PASSWORD"),
username=os.environ.get("BLOG_ADMIN_USERNAME"),
confirmed=True)
db.session.add(user)
db.session.commit()
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post',lazy='dynamic')
posttags = db.relationship('PostTag',
backref=db.backref('post', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
posttags = db.relationship('PostTag',
backref=db.backref('tag', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
@staticmethod
def insert_tags():
tag_list = current_app.config['BLOG_TAG_LIST']
for i in tag_list:
tag = Tag.query.filter_by(name=i).first()
if tag is None:
tag = Tag(name=i)
db.session.add(tag)
db.session.commit()
class PostTag(db.Model):
__tablename__ = 'posttags'
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'), primary_key=True)
tag_id = db.Column(db.Integer, db.ForeignKey('tags.id'), primary_key=True)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
|
|
import numpy as np
import sys
import pylab
import cPickle
filename = sys.argv[1]
def load_cpickle(filename):
f = open(filename,"rb")
p = cPickle.load(f)
f.close()
return p
cpickle_data = load_cpickle(filename)
#cpickle_data["E_native"] = E_native
#cpickle_data["E_non_native"] = E_non_native
#cpickle_data["rmsd_native"] = rmsd_native
#cpickle_data["rmsd_non_native"] = rmsd_non_native
#cpickle_data["E_cam"] = E_cam
#cpickle_data["basilisk"] = basilisk
#cpickle_data["backbone_dbn"] = backbone_dbn
#cpickle_data["eh"] = eh
#cpickle_data["mumu"] = mumu
#cpickle_data["rmsds"] = rmsds
#cpickle_data["opls"] = opls
#cpickle_data["profasi"] = profasi
#cpickle_data["custom"] = custom
#print dict((i,low_E_rmsd_index.count(i)) for i in low_E_rmsd_index)
print "Making Plots"
pylab.plot(cpickle_data["rmsd_native"],cpickle_data["E_native"],"r.")
pylab.plot(cpickle_data["rmsd_non_native"],cpickle_data["E_non_native"],"k.")
pylab.savefig("total_energy.png")
pylab.clf()
#
pylab.plot(cpickle_data["rmsd_native"],cpickle_data["E_native"],"r.")
pylab.plot(cpickle_data["rmsd_non_native"],cpickle_data["E_non_native"],"k.")
pylab.xlim([0.2,4.1])
pylab.savefig("total_low_energy.png")
pylab.clf()
pylab.plot(cpickle_data["rmsds"],cpickle_data["custom"],"k.")
pylab.savefig("custom.png")
pylab.clf()
pylab.plot(cpickle_data["rmsds"],cpickle_data["E_cam"],"k.")
#pylab.xlim([0.0,5.0])
pylab.savefig("cam.png")
pylab.clf()
#pylab.plot(rmsds,kbp_profasi_sidechain_charge_improved,"k.")
#pylab.savefig("kbp_profasi_sidechain_charge_improved.png")
#pylab.clf()
#
#pylab.plot(rmsds,kbp_profasi_hydrophobicity_improved,"k.")
#pylab.savefig("kbp_profasi_hydrophobicity_improved.png")
#pylab.clf()
#
pylab.plot(cpickle_data["rmsds"],cpickle_data["mumu"],"k.")
pylab.savefig("mumu.png")
pylab.clf()
#pylab.plot(rmsds,E_all,"k.")
#pylab.savefig("all.png")
#pylab.clf()
#
pylab.plot(cpickle_data["rmsds"],cpickle_data["profasi"],"k.")
pylab.savefig("profasi.png")
pylab.clf()
pylab.plot(cpickle_data["rmsds"],cpickle_data["opls"],"k.")
pylab.savefig("opls.png")
pylab.clf()
#
#pylab.plot(rmsds,opls_charge,"k.")
#pylab.savefig("opls_charge.png")
#pylab.clf()
#
#pylab.plot(rmsds,opls_vdw,"k.")
#pylab.savefig("opls_vdw.png")
#pylab.clf()
#
#pylab.plot(rmsds,opls_angle_bend,"k.")
#pylab.savefig("opls_angle_bend.png")
#pylab.clf()
#
#pylab.plot(rmsds,opls_torsion,"k.")
#pylab.savefig("opls_torsion.png")
#pylab.clf()
#
#pylab.plot(rmsds,opls_improper_torsion,"k.")
#pylab.savefig("opls_improper_torsion.png")
#pylab.clf()
#
#pylab.plot(rmsds,opls_bond_stretch,"k.")
#pylab.savefig("opls_bond_stretch.png")
#pylab.clf()
#
#pylab.plot(rmsds,opls_non_bonded,"k.")
#pylab.savefig("opls_non_bonded.png")
#pylab.clf()
#
#pylab.plot(rmsds,gbsa,"k.")
#pylab.savefig("gbsa.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_local,"k.")
#pylab.savefig("profasi_local.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_local_sidechain,"k.")
#pylab.savefig("profasi_local_sidechain.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_excluded_volume,"k.")
#pylab.savefig("profasi_excluded_volume.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_excluded_volume_local,"k.")
#pylab.savefig("profasi_excluded_volume_local.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_hydrogen_bond,"k.")
#pylab.savefig("profasi_hydrogen_bond.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_hydrophobicity,"k.")
#pylab.savefig("profasi_hydrophobicity.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_sidechain_charge,"k.")
#pylab.savefig("profasi_sidechain_charge.png")
#pylab.clf()
#
#pylab.plot(rmsds,profasi_proline_phi_torsion,"k.")
#pylab.savefig("profasi_proline_phi_torsion.png")
#pylab.clf()
#
pylab.plot(cpickle_data["rmsds"],cpickle_data["basilisk"],"k.")
pylab.savefig("basilisk.png")
pylab.clf()
#
pylab.plot(cpickle_data["rmsds"],cpickle_data["backbone_dbn"],"k.")
pylab.savefig("backbone_dbn.png")
pylab.clf()
#
pylab.plot(cpickle_data["rmsds"],cpickle_data["eh"],"k.")
#pylab.ylim(-200,-700)
pylab.savefig("eh.png")
pylab.clf()
print "calculating rmsds"
lowest_energies = [9999999 for _ in range(255)]
low_E_rmsds = [99999 for _ in range(255)]
low_E_rmsd_index = [99999 for _ in range(255)]
for i, profasi_i in enumerate(cpickle_data["profasi"]):
energies = []
names = ["custom","E_cam", "opls", "basilisk", "backbone_dbn", "eh", "mumu", "profasi"]
combinations = [cpickle_data[x][i] for x in names]
combination_names = []
for ja in range(8):
energies.append(combinations[ja])
combination_names.append(names[ja])
for jb in range(8):
if jb > ja:
energies.append(combinations[ja]+combinations[jb])
combination_names.append(names[ja]+names[jb])
for jc in range(8):
if jc > jb:
energies.append(combinations[ja]+combinations[jb]+combinations[jc])
combination_names.append(names[ja]+names[jb]+names[jc])
for jd in range(8):
if jd > jc:
energies.append(combinations[ja]+combinations[jb]+combinations[jc]+combinations[jd])
combination_names.append(names[ja]+names[jb]+names[jc]+names[jd])
for je in range(8):
if je > jd:
energies.append(combinations[ja]+combinations[jb]+combinations[jc]+combinations[jd]+combinations[je])
combination_names.append(names[ja]+names[jb]+names[jc]+names[jd]+names[je])
for jf in range(8):
if jf > je:
energies.append(combinations[ja]+combinations[jb]+combinations[jc]+combinations[jd]+combinations[je]+combinations[jf])
combination_names.append(names[ja]+names[jb]+names[jc]+names[jd]+names[je]+names[jf])
for jg in range(8):
if jg > jf:
energies.append(combinations[ja]+combinations[jb]+combinations[jc]+combinations[jd]+combinations[je]+combinations[jf]+combinations[jg])
combination_names.append(names[ja]+names[jb]+names[jc]+names[jd]+names[je]+names[jf]+names[jg])
for jh in range(8):
if jh > jg:
energies.append(combinations[ja]+combinations[jb]+combinations[jc]+combinations[jd]+combinations[je]+combinations[jf]+combinations[jg]+combinations[jh])
combination_names.append(names[ja]+names[jb]+names[jc]+names[jd]+names[je]+names[jf]+names[jg]+names[jh])
for j, E in enumerate(energies):
if E < lowest_energies[j]:
lowest_energies[j] = E
low_E_rmsds[j] = cpickle_data["rmsds"][i]
low_E_rmsd_index[j] = i
for j, J in enumerate(lowest_energies):
print j, low_E_rmsd_index[j], low_E_rmsds[j], combination_names[j]
|
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""Views for `lino.modlib.bootstrap3`.
"""
from __future__ import division
from past.utils import old_div
import logging
logger = logging.getLogger(__name__)
from django import http
from django.conf import settings
from django.views.generic import View
from django.core import exceptions
from django.utils.translation import ugettext as _
# from django.contrib import auth
from lino.core import auth
from lino.api import dd
from lino.core import constants
# from lino.core import auth
from lino.core.requests import BaseRequest
from lino.core.tablerequest import TableRequest
from lino.core.views import action_request
from lino.core.utils import navinfo
from lino.modlib.bootstrap3.views import http_response
from etgen.html import E
class List(View):
def get(self, request, app_label=None, actor=None):
ar = action_request(app_label, actor, request, request.GET, True)
ar.renderer = settings.SITE.plugins.bootstrap3.renderer
context = dict(
title=ar.get_title(),
heading=ar.get_title(),
)
if isinstance(ar, TableRequest):
context.update(main=table2html(ar))
else:
context.update(main=layout2html(ar, None))
context.update(ar=ar)
return http_response(ar, ar.actor.list_html_template, context)
class Element(View):
def get(self, request, app_label=None, actor=None, pk=None):
# print(request, app_label, actor, pk)
ar = action_request(app_label, actor, request, request.GET, False)
ar.renderer = settings.SITE.plugins.bootstrap3.renderer
navigator = None
if pk and pk != '-99999' and pk != '-99998':
elem = ar.get_row_by_pk(pk)
if elem is None:
raise http.Http404("%s has no row with primary key %r" %
(ar.actor, pk))
#~ raise Exception("20120327 %s.get_row_by_pk(%r)" % (rpt,pk))
if ar.actor.show_detail_navigator:
ni = navinfo(ar.data_iterator, elem)
if ni:
# m = elem.__class__
buttons = []
#~ buttons.append( ('*',_("Home"), '/' ))
buttons.append(
('<<', _("First page"), ar.pk2url(ni['first'])))
buttons.append(
('<', _("Previous page"), ar.pk2url(ni['prev'])))
buttons.append(
('>', _("Next page"), ar.pk2url(ni['next'])))
buttons.append(
('>>', _("Last page"), ar.pk2url(ni['last'])))
navigator = buttons2pager(buttons)
else:
navigator = E.p("No navinfo")
else:
elem = None
# main = E.div(
# E.div(E.div(E.h5(ar.get_title(),
# style="display: inline-block;"),
# class_="panel-title"),
# class_="panel-heading"),
# E.div(layout2html(ar, elem),class_="panel-body"), # Content
# class_="panel panel-default",
# # style="display: inline-block;"
# )
main = layout2html(ar, elem)
# The `method="html"` argument isn't available in Python 2.6,
# only 2.7. It is useful to avoid side effects in case of
# empty elements: the default method (xml) writes an empty
# E.div() as "<div/>" while in HTML5 it must be "<div></div>"
# (and the ending / is ignored).
#~ return tostring(main, method="html")
#~ return tostring(main)
# return main
context = dict(
title=ar.get_action_title(),
obj=elem,
form=main,
navigator=navigator,
)
#~ template = web.jinja_env.get_template('detail.html')
context.update(ar=ar)
return http_response(ar, ar.actor.detail_html_template, context)
class Authenticate(View):
def get(self, request, *args, **kw):
action_name = request.GET.get(constants.URL_PARAM_ACTION_NAME)
if action_name == 'logout':
username = request.session.pop('username', None)
auth.logout(request)
# request.user = settings.SITE.user_model.get_anonymous_user()
# request.session.pop('password', None)
#~ username = request.session['username']
#~ del request.session['password']
target = '/'
return http.HttpResponseRedirect(target)
# ar = BaseRequest(request)
# ar.success("User %r logged out." % username)
# return ar.renderer.render_action_response(ar)
raise http.Http404()
def post(self, request, *args, **kw):
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(
request, username=username, password=password)
auth.login(request, user)
target = '/'
return http.HttpResponseRedirect(target)
# ar = BaseRequest(request)
# mw = auth.get_auth_middleware()
# msg = mw.authenticate(username, password, request)
# if msg:
# request.session.pop('username', None)
# ar.error(msg)
# else:
# request.session['username'] = username
# # request.session['password'] = password
# # ar.user = request....
# ar.success(("Now logged in as %r" % username))
# # print "20150428 Now logged in as %r (%s)" % (username, user)
# return ar.renderer.render_action_response(ar)
class Index(View):
"""
Render the main page.
"""
def get(self, request, *args, **kw):
# raise Exception("20171122 {} {}".format(
# get_language(), settings.MIDDLEWARE_CLASSES))
ui = settings.SITE.plugins.bootstrap3
# print("20170607", request.user)
# assert ui.renderer is not None
ar = BaseRequest(
# user=user,
request=request,
renderer=ui.renderer)
return index_response(ar)
def index_response(ar):
ui = settings.SITE.plugins.bootstrap3
main = settings.SITE.get_main_html(ar.request, extjs=ui)
main = ui.renderer.html_text(main)
context = dict(
title=settings.SITE.title,
main=main,
)
# if settings.SITE.user_model is None:
# user = auth.AnonymousUser.instance()
# else:
# user = request.subst_user or request.user
# context.update(ar=ar)
return http_response(ar, 'bootstrap3/index.html', context)
class Metadata(View):
def get(self, request, *args, **kw):
ui = settings.SITE.plugins.odata
# print("20170607", request.user)
# assert ui.renderer is not None
ar = BaseRequest(
# user=user,
request=request,
renderer=ui.renderer)
return metada_response(ar)
def metada_response(ar):
ui = settings.SITE.plugins.odata
context = dict(
title=settings.SITE.title,
dd=dd,
)
return http_response(ar, 'odata/csdl.xml', context)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import uuid, filecmp, os, sys, time, tempfile
from nose.tools import assert_raises, assert_equals, assert_is_none, assert_less
import synapseclient
from synapseclient import Activity, Wiki, Project, Folder, File, Link, Column, Schema, RowSet, Row
from synapseclient.exceptions import *
import synapseutils
import re
import integration
from integration import schedule_for_cleanup, QUERY_TIMEOUT_SEC
def setup(module):
module.syn = integration.syn
module.project = integration.project
module.other_user = integration.other_user
###Add Test for UPDATE
###Add test for existing provenance but the orig doesn't have provenance
def test_copy():
"""Tests the copy function"""
# Create a Project
project_entity = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
acl = syn.setPermissions(project_entity, other_user['principalId'], accessType=['READ', 'CREATE', 'UPDATE', 'DOWNLOAD'])
# Create two Folders in Project
folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
second_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
third_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
schedule_for_cleanup(folder_entity.id)
schedule_for_cleanup(second_folder.id)
schedule_for_cleanup(third_folder.id)
# Annotations and provenance
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
annots = {'test':['hello_world']}
prov = Activity(name = "test",used = repo_url)
# Create, upload, and set annotations/provenance on a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(File(filename, parent=folder_entity))
externalURL_entity = syn.store(File(repo_url,name='rand',parent=folder_entity,synapseStore=False))
syn.setAnnotations(file_entity,annots)
syn.setAnnotations(externalURL_entity,annots)
syn.setProvenance(externalURL_entity.id, prov)
schedule_for_cleanup(file_entity.id)
schedule_for_cleanup(externalURL_entity.id)
# ------------------------------------
# TEST COPY FILE
# ------------------------------------
output = synapseutils.copy(syn,file_entity.id,destinationId=project_entity.id)
output_URL = synapseutils.copy(syn,externalURL_entity.id,destinationId=project_entity.id,skipCopyAnnotations=True)
#Verify that our copied files are identical
copied_ent = syn.get(output[file_entity.id])
copied_URL_ent = syn.get(output_URL[externalURL_entity.id],downloadFile=False)
copied_ent_annot = syn.getAnnotations(copied_ent)
copied_url_annot = syn.getAnnotations(copied_URL_ent)
copied_prov = syn.getProvenance(copied_ent)
copied_url_prov = syn.getProvenance(copied_URL_ent)
schedule_for_cleanup(copied_ent.id)
schedule_for_cleanup(copied_URL_ent.id)
# TEST: set_Provenance = Traceback
assert copied_prov['used'][0]['reference']['targetId'] == file_entity.id
assert copied_url_prov['used'][0]['reference']['targetId'] == externalURL_entity.id
# TEST: Make sure copied files are the same
assert copied_ent_annot == annots
assert copied_ent.dataFileHandleId == file_entity.dataFileHandleId
# TEST: Make sure copied URLs are the same
assert copied_url_annot == {}
assert copied_URL_ent.externalURL == repo_url
assert copied_URL_ent.name == 'rand'
assert copied_URL_ent.dataFileHandleId == externalURL_entity.dataFileHandleId
# TEST: Throw error if file is copied to a folder/project that has a file with the same filename
assert_raises(ValueError,synapseutils.copy,syn,project_entity.id,destinationId = project_entity.id)
assert_raises(ValueError,synapseutils.copy,syn,file_entity.id,destinationId = project_entity.id)
assert_raises(ValueError,synapseutils.copy,syn,file_entity.id,destinationId = third_folder.id,setProvenance = "gib")
assert_raises(ValueError,synapseutils.copy,syn,file_entity.id,destinationId = file_entity.id)
#Test: setProvenance = None
output = synapseutils.copy(syn,file_entity.id,destinationId=second_folder.id,setProvenance = None)
assert_raises(SynapseHTTPError,syn.getProvenance,output[file_entity.id])
schedule_for_cleanup(output[file_entity.id])
#Test: setProvenance = Existing
output_URL = synapseutils.copy(syn,externalURL_entity.id,destinationId=second_folder.id,setProvenance = "existing")
output_prov = syn.getProvenance(output_URL[externalURL_entity.id])
schedule_for_cleanup(output_URL[externalURL_entity.id])
assert output_prov['name'] == prov['name']
assert output_prov['used'] == prov['used']
if 'username' not in other_user or 'password' not in other_user:
sys.stderr.write('\nWarning: no test-authentication configured. skipping testing copy function when trying to copy file made by another user.\n')
return
try:
#Test: Other user copy should result in different data file handle
syn_other = synapseclient.Synapse(skip_checks=True)
syn_other.login(other_user['username'], other_user['password'])
output = synapseutils.copy(syn_other,file_entity.id,destinationId=third_folder.id)
new_copied_ent = syn.get(output[file_entity.id])
new_copied_ent_annot = syn.getAnnotations(new_copied_ent)
schedule_for_cleanup(new_copied_ent.id)
copied_URL_ent.externalURL = "https://www.google.com"
copied_URL_ent = syn.store(copied_URL_ent)
output = synapseutils.copy(syn_other,copied_URL_ent.id,destinationId=third_folder.id,version=1)
new_copied_URL = syn.get(output[copied_URL_ent.id],downloadFile=False)
schedule_for_cleanup(new_copied_URL.id)
assert new_copied_ent_annot == annots
assert new_copied_ent.dataFileHandleId != copied_ent.dataFileHandleId
#Test if copying different versions gets you the correct file
assert new_copied_URL.versionNumber == 1
assert new_copied_URL.externalURL == repo_url
assert new_copied_URL.dataFileHandleId != copied_URL_ent.dataFileHandleId
finally:
syn_other.logout()
# ------------------------------------
# TEST COPY LINKS
# ------------------------------------
second_file = utils.make_bogus_data_file()
#schedule_for_cleanup(filename)
second_file_entity = syn.store(File(second_file, parent=project_entity))
link_entity = Link(second_file_entity.id,parent=folder_entity.id)
link_entity = syn.store(link_entity)
copied_link = synapseutils.copy(syn,link_entity.id, destinationId=second_folder.id)
old = syn.get(link_entity.id,followLink=False)
new = syn.get(copied_link[link_entity.id],followLink=False)
assert old.linksTo['targetId'] == new.linksTo['targetId']
schedule_for_cleanup(second_file_entity.id)
schedule_for_cleanup(link_entity.id)
schedule_for_cleanup(copied_link[link_entity.id])
time.sleep(3)
assert_raises(ValueError,synapseutils.copy,syn,link_entity.id,destinationId=second_folder.id)
# ------------------------------------
# TEST COPY TABLE
# ------------------------------------
second_project = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(second_project.id)
cols = [Column(name='n', columnType='DOUBLE', maximumSize=50),
Column(name='c', columnType='STRING', maximumSize=50),
Column(name='i', columnType='INTEGER')]
data = [[2.1,'foo',10],
[2.2,'bar',20],
[2.3,'baz',30]]
schema = syn.store(Schema(name='Testing', columns=cols, parent=project_entity.id))
row_reference_set = syn.store(RowSet(schema=schema, rows=[Row(r) for r in data]))
table_map = synapseutils.copy(syn,schema.id, destinationId=second_project.id)
copied_table = syn.tableQuery('select * from %s' %table_map[schema.id])
rows = copied_table.asRowSet()['rows']
# TEST: Check if all values are the same
for i,row in enumerate(rows):
assert row['values'] == data[i]
assert_raises(ValueError,synapseutils.copy,syn,schema.id,destinationId=second_project.id)
schedule_for_cleanup(schema.id)
schedule_for_cleanup(table_map[schema.id])
# ------------------------------------
# TEST COPY FOLDER
# ------------------------------------
mapping = synapseutils.copy(syn,folder_entity.id,destinationId=second_project.id)
for i in mapping:
old = syn.get(i,downloadFile=False)
new = syn.get(mapping[i],downloadFile=False)
assert old.name == new.name
assert old.annotations == new.annotations
assert old.concreteType == new.concreteType
assert_raises(ValueError,synapseutils.copy,syn,folder_entity.id,destinationId=second_project.id)
# TEST: Throw error if excludeTypes isn't in file, link and table or isn't a list
assert_raises(ValueError,synapseutils.copy,syn,second_folder.id,destinationId=second_project.id,excludeTypes=["foo"])
assert_raises(ValueError,synapseutils.copy,syn,second_folder.id,destinationId=second_project.id,excludeTypes="file")
# TEST: excludeType = ["file"], only the folder is created
second = synapseutils.copy(syn,second_folder.id,destinationId=second_project.id,excludeTypes=["file","table","link"])
copied_folder = syn.get(second[second_folder.id])
assert copied_folder.name == second_folder.name
assert len(second) == 1
# TEST: Make sure error is thrown if foldername already exists
assert_raises(ValueError,synapseutils.copy,syn,second_folder.id, destinationId=second_project.id)
# ------------------------------------
# TEST COPY PROJECT
# ------------------------------------
third_project = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(third_project.id)
mapping = synapseutils.copy(syn,project_entity.id,destinationId=third_project.id)
for i in mapping:
old = syn.get(i,downloadFile=False)
new = syn.get(mapping[i],downloadFile=False)
if not isinstance(old, Project):
assert old.name == new.name
assert old.annotations == new.annotations
assert old.concreteType == new.concreteType
# TEST: Can't copy project to a folder
assert_raises(ValueError,synapseutils.copy,syn,project_entity.id,destinationId=second_folder.id)
def test_copyWiki():
# Create a Project
project_entity = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
schedule_for_cleanup(folder_entity.id)
second_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
schedule_for_cleanup(second_folder.id)
third_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
schedule_for_cleanup(third_folder.id)
filename = utils.make_bogus_data_file()
attachname = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(File(filename, parent=folder_entity))
nested_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=folder_entity))
second_file = syn.store(File(filename, parent=nested_folder))
schedule_for_cleanup(file_entity.id)
schedule_for_cleanup(nested_folder.id)
schedule_for_cleanup(second_file.id)
fileWiki = Wiki(owner=second_file, title='A Test Wiki', markdown="Test")
fileWiki = syn.store(fileWiki)
#Create mock wiki
md = """
This is a test wiki
=======================
Blabber jabber blah blah boo.
%s
%s
""" %(file_entity.id,second_file.id)
wiki = Wiki(owner=project_entity, title='A Test Wiki', markdown=md,
attachments=[attachname])
wiki = syn.store(wiki)
# Create a Wiki sub-page
subwiki = Wiki(owner=project_entity, title='A sub-wiki',
markdown='%s' % file_entity.id, parentWikiId=wiki.id)
subwiki = syn.store(subwiki)
second_md = """
Testing internal links
======================
[test](#!Synapse:%s/wiki/%s)
%s)
""" % (project_entity.id,subwiki.id, second_file.id)
sub_subwiki = Wiki(owner=project_entity, title='A sub-sub-wiki',
markdown=second_md, parentWikiId=subwiki.id,
attachments=[attachname])
sub_subwiki = syn.store(sub_subwiki)
#Copy wiki to second project
second_project = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(second_project.id)
fileMapping = synapseutils.copy(syn, project_entity, second_project.id, skipCopyWikiPage=True)
#Test: copyWikiPage = False
assert_raises(SynapseHTTPError,syn.getWiki,second_project.id)
first_headers = syn.getWikiHeaders(project_entity)
second_headers = synapseutils.copyWiki(syn, project_entity.id, second_project.id, entityMap=fileMapping)
mapping = dict()
#Test: Check that all wikis were copied correctly with the correct mapping
for index,info in enumerate(second_headers):
mapping[first_headers[index]['id']] = info['id']
assert first_headers[index]['title'] == info['title']
if info.get('parentId',None) is not None:
#Check if parent Ids are mapping correctly in the copied Wikis
assert info['parentId'] == mapping[first_headers[index]['parentId']]
#Test: Check that all wikis have the correct attachments and have correct internal synapse link/file mapping
for index,info in enumerate(second_headers):
#Check if markdown is the correctly mapped
orig_wikiPage= syn.getWiki(project_entity, first_headers[index]['id'])
new_wikiPage = syn.getWiki(second_project, info['id'])
s = orig_wikiPage.markdown
for oldWikiId in mapping.keys():
oldProjectAndWikiId = "%s/wiki/%s" % (project_entity.id, oldWikiId)
newProjectAndWikiId = "%s/wiki/%s" % (second_project.id, mapping[oldWikiId])
s=re.sub(oldProjectAndWikiId, newProjectAndWikiId, s)
for oldFileId in fileMapping.keys():
s = re.sub(oldFileId, fileMapping[oldFileId], s)
assert s == new_wikiPage.markdown
orig_attach = syn.getWikiAttachments(orig_wikiPage)
new_attach = syn.getWikiAttachments(new_wikiPage)
orig_file = [i['fileName'] for i in orig_attach if i['concreteType'] != "org.sagebionetworks.repo.model.file.PreviewFileHandle"]
new_file = [i['fileName'] for i in new_attach if i['concreteType'] != "org.sagebionetworks.repo.model.file.PreviewFileHandle"]
#check that attachment file names are the same
assert orig_file == new_file
#Test: copyWikiPage = True (Default) (Should copy all wikis including wikis on files)
third_project = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(third_project.id)
copiedFile = synapseutils.copy(syn, second_file, third_project.id)
copiedWiki = syn.getWiki(copiedFile[second_file.id])
assert copiedWiki.title == fileWiki.title
assert copiedWiki.markdown == fileWiki.markdown
#Test: entitySubPageId
third_header = synapseutils.copyWiki(syn, project_entity.id, third_project.id, entitySubPageId=sub_subwiki.id, destinationSubPageId=None, updateLinks=False, updateSynIds=False,entityMap=fileMapping)
test_ent_subpage = syn.getWiki(third_project.id,third_header[0]['id'])
#Test: No internal links updated
assert test_ent_subpage.markdown == sub_subwiki.markdown
assert test_ent_subpage.title == sub_subwiki.title
#Test: destinationSubPageId
fourth_header = synapseutils.copyWiki(syn, project_entity.id, third_project.id, entitySubPageId=subwiki.id, destinationSubPageId=test_ent_subpage.id, updateLinks=False, updateSynIds=False,entityMap=fileMapping)
temp = syn.getWiki(third_project.id, fourth_header[0]['id'])
#There are issues where some title pages are blank. This is an issue that needs to be addressed
assert temp.title == subwiki.title
assert temp.markdown == subwiki.markdown
temp = syn.getWiki(third_project.id, fourth_header[1]['id'])
assert temp.title == sub_subwiki.title
assert temp.markdown == sub_subwiki.markdown
def test_walk():
walked = []
firstfile = utils.make_bogus_data_file()
schedule_for_cleanup(firstfile)
project_entity = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
schedule_for_cleanup(folder_entity.id)
second_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
schedule_for_cleanup(second_folder.id)
file_entity = syn.store(File(firstfile, parent=project_entity))
schedule_for_cleanup(file_entity.id)
walked.append(((project_entity.name,project_entity.id),
[(folder_entity.name, folder_entity.id),
(second_folder.name, second_folder.id)],
[(file_entity.name, file_entity.id)]))
nested_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=folder_entity))
schedule_for_cleanup(nested_folder.id)
secondfile = utils.make_bogus_data_file()
schedule_for_cleanup(secondfile)
second_file = syn.store(File(secondfile, parent=nested_folder))
schedule_for_cleanup(second_file.id)
thirdfile = utils.make_bogus_data_file()
schedule_for_cleanup(thirdfile)
third_file = syn.store(File(thirdfile, parent=second_folder))
schedule_for_cleanup(third_file.id)
walked.append(((os.path.join(project_entity.name,folder_entity.name),folder_entity.id),
[(nested_folder.name,nested_folder.id)],
[]))
walked.append(((os.path.join(os.path.join(project_entity.name,folder_entity.name),nested_folder.name),nested_folder.id),
[],
[(second_file.name,second_file.id)]))
walked.append(((os.path.join(project_entity.name,second_folder.name),second_folder.id),
[],
[(third_file.name,third_file.id)]))
temp = synapseutils.walk(syn, project_entity.id)
temp = list(temp)
#Must sort the tuples returned, because order matters for the assert
#Folders are returned in a different ordering depending on the name
for i in walked:
for x in i:
if type(x) == list:
x = x.sort()
for i in temp:
for x in i:
if type(x) == list:
x = x.sort()
assert i in walked
temp = synapseutils.walk(syn, second_file.id)
assert list(temp) == []
def test_syncFromSynapse():
"""This function tests recursive download as defined in syncFromSynapse
most of the functionality of this function are already tested in the
tests/integration/test_command_line_client::test_command_get_recursive_and_query
which means that the only test if for path=None
"""
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload two files in Folder
uploaded_paths = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = syn.store(File(f, parent=folder_entity))
#Add a file in the project level as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = syn.store(File(f, parent=project_entity))
### Test recursive get
output = synapseutils.syncFromSynapse(syn, project_entity)
assert len(output) == len(uploaded_paths)
for f in output:
assert f.path in uploaded_paths
def test_syncFromSynapse__given_file_id():
file_path = utils.make_bogus_data_file()
schedule_for_cleanup(file_path)
file = syn.store(File(file_path, name=str(uuid.uuid4()), parent=project, synapseStore=False))
all_files = synapseutils.syncFromSynapse(syn, file.id)
assert_equals(1, len(all_files))
assert_equals(file, all_files[0])
def test_copyFileHandleAndchangeFileMetadata():
project_entity = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
filename = utils.make_bogus_data_file()
attachname = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
schedule_for_cleanup(attachname)
file_entity = syn.store(File(filename, parent=project_entity))
schedule_for_cleanup(file_entity.id)
wiki = Wiki(owner=project_entity, title='A Test Wiki', markdown="testing",
attachments=[attachname])
wiki = syn.store(wiki)
wikiattachments = syn._getFileHandle(wiki.attachmentFileHandleIds[0])
#CHECK: Can batch copy two file handles (wiki attachments and file entity)
copiedFileHandles = synapseutils.copyFileHandles(syn, [file_entity.dataFileHandleId, wiki.attachmentFileHandleIds[0]], [file_entity.concreteType.split(".")[-1], "WikiAttachment"], [file_entity.id, wiki.id], [file_entity.contentType, wikiattachments['contentType']], [file_entity.name, wikiattachments['fileName']])
assert all([results.get("failureCode") is None for results in copiedFileHandles['copyResults']]), "NOT FOUND and UNAUTHORIZED failure codes."
files = {file_entity.name:{"contentType":file_entity['contentType'],
"md5":file_entity['md5']},
wikiattachments['fileName']:{"contentType":wikiattachments['contentType'],
"md5":wikiattachments['contentMd5']}}
for results in copiedFileHandles['copyResults']:
i = results['newFileHandle']
assert files.get(i['fileName']) is not None, "Filename has to be the same"
assert files[i['fileName']]['contentType'] == i['contentType'], "Content type has to be the same"
assert files[i['fileName']]['md5'] == i['contentMd5'], "Md5 has to be the same"
assert all([results.get("failureCode") is None for results in copiedFileHandles['copyResults']]), "There should not be NOT FOUND and UNAUTHORIZED failure codes."
if 'username' not in other_user or 'password' not in other_user:
sys.stderr.write('\nWarning: no test-authentication configured. skipping testing copy function when trying to copy file made by another user.\n')
return
syn_other = synapseclient.Synapse(skip_checks=True)
syn_other.login(other_user['username'], other_user['password'])
#CHECK: UNAUTHORIZED failure code should be returned
output = synapseutils.copyFileHandles(syn_other,[file_entity.dataFileHandleId, wiki.attachmentFileHandleIds[0]], [file_entity.concreteType.split(".")[-1], "WikiAttachment"], [file_entity.id, wiki.id], [file_entity.contentType, wikiattachments['contentType']], [file_entity.name, wikiattachments['fileName']])
assert all([results.get("failureCode") == "UNAUTHORIZED" for results in output['copyResults']]), "UNAUTHORIZED codes."
#CHECK: Changing content type and downloadAs
new_entity = synapseutils.changeFileMetaData(syn, file_entity, contentType="application/x-tar", downloadAs="newName.txt")
schedule_for_cleanup(new_entity.id)
assert file_entity.md5 == new_entity.md5, "Md5s must be equal after copying"
fileResult = syn._getFileHandleDownload(new_entity.dataFileHandleId, new_entity.id)
assert fileResult['fileHandle']['fileName'] == "newName.txt", "Set new file name to be newName.txt"
assert new_entity.contentType == "application/x-tar", "Set new content type to be application/x-tar"
def test_copyFileHandles__copying_cached_file_handles():
num_files = 3
file_entities = []
#upload temp files to synapse
for i in range(num_files):
file_path = utils.make_bogus_data_file();
schedule_for_cleanup(file_path)
file_entities.append(syn.store(File(file_path,name=str(uuid.uuid1()), parent=project)))
#a bunch of setup for arguments to the function under test
file_handles = [file_entity['_file_handle'] for file_entity in file_entities ]
file_entity_ids = [file_entity['id'] for file_entity in file_entities]
content_types = [file_handle['contentType'] for file_handle in file_handles]
filenames = [file_handle['fileName'] for file_handle in file_handles]
#remove every other FileHandle from the cache (at even indicies)
for i in range(num_files):
if i % 2 == 0:
syn.cache.remove(file_handles[i]["id"])
#get the new list of file_handles
copiedFileHandles = synapseutils.copyFileHandles(syn, file_handles , ["FileEntity"] * num_files, file_entity_ids,content_types , filenames)
new_file_handle_ids = [copy_result['newFileHandle']['id'] for copy_result in copiedFileHandles['copyResults']]
#verify that the cached paths are the same
for i in range(num_files):
original_path = syn.cache.get(file_handles[i]['id'])
new_path = syn.cache.get(new_file_handle_ids[i])
if i % 2 == 0: # since even indicies are not cached, both should be none
assert_is_none(original_path)
assert_is_none(new_path)
else: # at odd indicies, the file path should have been copied
assert_equals(original_path, new_path)
|
|
# coding=utf-8
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for Native IPMI power driver module.
"""
import mock
from oslo_utils import uuidutils
from pyghmi import exceptions as pyghmi_exception
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules import console_utils
from ironic.drivers.modules import ipminative
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_ipmi_info()
class IPMINativePrivateMethodTestCase(db_base.DbTestCase):
"""Test cases for ipminative private methods."""
def setUp(self):
super(IPMINativePrivateMethodTestCase, self).setUp()
self.node = obj_utils.create_test_node(self.context,
driver='fake_ipminative',
driver_info=INFO_DICT)
self.info = ipminative._parse_driver_info(self.node)
def test__parse_driver_info(self):
# make sure we get back the expected things
self.assertEqual('1.2.3.4', self.info['address'])
self.assertEqual('admin', self.info['username'])
self.assertEqual('fake', self.info['password'])
self.assertEqual('1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
self.info['uuid'])
self.assertEqual(False, self.info['force_boot_device'])
# make sure error is raised when info, eg. username, is missing
info = dict(INFO_DICT)
del info['ipmi_username']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ipminative._parse_driver_info,
node)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__power_status_on(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_power.return_value = {'powerstate': 'on'}
state = ipminative._power_status(self.info)
ipmicmd.get_power.assert_called_once_with()
self.assertEqual(states.POWER_ON, state)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__power_status_off(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_power.return_value = {'powerstate': 'off'}
state = ipminative._power_status(self.info)
ipmicmd.get_power.assert_called_once_with()
self.assertEqual(states.POWER_OFF, state)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__power_status_error(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_power.return_value = {'powerstate': 'Error'}
state = ipminative._power_status(self.info)
ipmicmd.get_power.assert_called_once_with()
self.assertEqual(states.ERROR, state)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__power_on(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'on'}
self.config(retry_timeout=400, group='ipmi')
state = ipminative._power_on(self.info)
ipmicmd.set_power.assert_called_once_with('on', 400)
self.assertEqual(states.POWER_ON, state)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__power_off(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'off'}
self.config(retry_timeout=500, group='ipmi')
state = ipminative._power_off(self.info)
ipmicmd.set_power.assert_called_once_with('off', 500)
self.assertEqual(states.POWER_OFF, state)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__reboot(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'on'}
self.config(retry_timeout=600, group='ipmi')
state = ipminative._reboot(self.info)
ipmicmd.set_power.assert_called_once_with('boot', 600)
self.assertEqual(states.POWER_ON, state)
def _create_sensor_object(self, value, type_, name, states=None,
units='fake_units', health=0):
if states is None:
states = []
return type('Reading', (object, ), {
'value': value, 'type': type_, 'name': name,
'states': states, 'units': units, 'health': health})()
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__get_sensors_data(self, ipmi_mock):
reading_1 = self._create_sensor_object('fake_value1',
'fake_type_A',
'fake_name1')
reading_2 = self._create_sensor_object('fake_value2',
'fake_type_A',
'fake_name2')
reading_3 = self._create_sensor_object('fake_value3',
'fake_type_B',
'fake_name3')
readings = [reading_1, reading_2, reading_3]
ipmicmd = ipmi_mock.return_value
ipmicmd.get_sensor_data.return_value = readings
expected = {
'fake_type_A': {
'fake_name1': {
'Health': '0',
'Sensor ID': 'fake_name1',
'Sensor Reading': 'fake_value1 fake_units',
'States': '[]',
'Units': 'fake_units'
},
'fake_name2': {
'Health': '0',
'Sensor ID': 'fake_name2',
'Sensor Reading': 'fake_value2 fake_units',
'States': '[]',
'Units': 'fake_units'
}
},
'fake_type_B': {
'fake_name3': {
'Health': '0',
'Sensor ID': 'fake_name3',
'Sensor Reading': 'fake_value3 fake_units',
'States': '[]', 'Units': 'fake_units'
}
}
}
ret = ipminative._get_sensors_data(self.info)
self.assertEqual(expected, ret)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__get_sensors_data_missing_values(self, ipmi_mock):
reading_1 = self._create_sensor_object('fake_value1',
'fake_type_A',
'fake_name1')
reading_2 = self._create_sensor_object(None,
'fake_type_A',
'fake_name2')
reading_3 = self._create_sensor_object(None,
'fake_type_B',
'fake_name3')
readings = [reading_1, reading_2, reading_3]
ipmicmd = ipmi_mock.return_value
ipmicmd.get_sensor_data.return_value = readings
expected = {
'fake_type_A': {
'fake_name1': {
'Health': '0',
'Sensor ID': 'fake_name1',
'Sensor Reading': 'fake_value1 fake_units',
'States': '[]',
'Units': 'fake_units'
}
}
}
ret = ipminative._get_sensors_data(self.info)
self.assertEqual(expected, ret)
def test__parse_raw_bytes_ok(self):
bytes_string = '0x11 0x12 0x25 0xFF'
netfn, cmd, data = ipminative._parse_raw_bytes(bytes_string)
self.assertEqual(0x11, netfn)
self.assertEqual(0x12, cmd)
self.assertEqual([0x25, 0xFF], data)
def test__parse_raw_bytes_invalid_value(self):
bytes_string = '0x11 oops'
self.assertRaises(exception.InvalidParameterValue,
ipminative._parse_raw_bytes,
bytes_string)
def test__parse_raw_bytes_missing_byte(self):
bytes_string = '0x11'
self.assertRaises(exception.InvalidParameterValue,
ipminative._parse_raw_bytes,
bytes_string)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__send_raw(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipminative._send_raw(self.info, '0x01 0x02 0x03 0x04')
ipmicmd.xraw_command.assert_called_once_with(1, 2, data=[3, 4])
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test__send_raw_fail(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.xraw_command.side_effect = pyghmi_exception.IpmiException()
self.assertRaises(exception.IPMIFailure, ipminative._send_raw,
self.info, '0x01 0x02')
class IPMINativeDriverTestCase(db_base.DbTestCase):
"""Test cases for ipminative.NativeIPMIPower class functions."""
def setUp(self):
super(IPMINativeDriverTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ipminative")
self.driver = driver_factory.get_driver("fake_ipminative")
self.node = obj_utils.create_test_node(self.context,
driver='fake_ipminative',
driver_info=INFO_DICT)
self.info = ipminative._parse_driver_info(self.node)
def test_get_properties(self):
expected = ipminative.COMMON_PROPERTIES
self.assertEqual(expected, self.driver.power.get_properties())
self.assertEqual(expected, self.driver.management.get_properties())
self.assertEqual(expected, self.driver.vendor.get_properties())
expected = list(ipminative.COMMON_PROPERTIES)
expected += list(ipminative.CONSOLE_PROPERTIES)
self.assertEqual(sorted(expected),
sorted(self.driver.console.get_properties().keys()))
self.assertEqual(sorted(expected),
sorted(self.driver.get_properties().keys()))
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_get_power_state(self, ipmi_mock):
# Getting the mocked command.
cmd_mock = ipmi_mock.return_value
# Getting the get power mock.
get_power_mock = cmd_mock.get_power
return_values = [{'powerstate': 'error'},
{'powerstate': 'on'},
{'powerstate': 'off'}]
get_power_mock.side_effect = lambda: return_values.pop()
with task_manager.acquire(self.context, self.node.uuid) as task:
pstate = self.driver.power.get_power_state(task)
self.assertEqual(states.POWER_OFF, pstate)
pstate = self.driver.power.get_power_state(task)
self.assertEqual(states.POWER_ON, pstate)
pstate = self.driver.power.get_power_state(task)
self.assertEqual(states.ERROR, pstate)
self.assertEqual(3, get_power_mock.call_count,
"pyghmi.ipmi.command.Command.get_power was not"
" called 3 times.")
@mock.patch.object(ipminative, '_power_on', autospec=True)
def test_set_power_on_ok(self, power_on_mock):
power_on_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.power.set_power_state(
task, states.POWER_ON)
power_on_mock.assert_called_once_with(self.info)
@mock.patch.object(driver_utils, 'ensure_next_boot_device', autospec=True)
@mock.patch.object(ipminative, '_power_on', autospec=True)
def test_set_power_on_with_next_boot(self, power_on_mock, mock_next_boot):
power_on_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.power.set_power_state(
task, states.POWER_ON)
mock_next_boot.assert_called_once_with(task, self.info)
power_on_mock.assert_called_once_with(self.info)
@mock.patch.object(ipminative, '_power_off', autospec=True)
def test_set_power_off_ok(self, power_off_mock):
power_off_mock.return_value = states.POWER_OFF
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.power.set_power_state(
task, states.POWER_OFF)
power_off_mock.assert_called_once_with(self.info)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_set_power_on_fail(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'error'}
self.config(retry_timeout=500, group='ipmi')
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.set_power_state,
task,
states.POWER_ON)
ipmicmd.set_power.assert_called_once_with('on', 500)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_set_boot_device_ok(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_bootdev.return_value = None
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.management.set_boot_device(task, boot_devices.PXE)
# PXE is converted to 'network' internally by ipminative
ipmicmd.set_bootdev.assert_called_once_with('network', persist=False)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_force_set_boot_device_ok(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_bootdev.return_value = None
with task_manager.acquire(self.context,
self.node.uuid) as task:
task.node.driver_info['ipmi_force_boot_device'] = True
self.driver.management.set_boot_device(task, boot_devices.PXE)
task.node.refresh()
self.assertEqual(
False,
task.node.driver_internal_info['is_next_boot_persistent']
)
# PXE is converted to 'network' internally by ipminative
ipmicmd.set_bootdev.assert_called_once_with('network', persist=False)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_set_boot_device_with_persistent(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_bootdev.return_value = None
with task_manager.acquire(self.context,
self.node.uuid) as task:
task.node.driver_info['ipmi_force_boot_device'] = True
self.driver.management.set_boot_device(task,
boot_devices.PXE,
True)
self.assertEqual(
boot_devices.PXE,
task.node.driver_internal_info['persistent_boot_device'])
# PXE is converted to 'network' internally by ipminative
ipmicmd.set_bootdev.assert_called_once_with('network', persist=False)
def test_set_boot_device_bad_device(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task,
'fake-device')
@mock.patch.object(driver_utils, 'ensure_next_boot_device', autospec=True)
@mock.patch.object(ipminative, '_reboot', autospec=True)
def test_reboot_ok(self, reboot_mock, mock_next_boot):
reboot_mock.return_value = None
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.power.reboot(task)
mock_next_boot.assert_called_once_with(task, self.info)
reboot_mock.assert_called_once_with(self.info)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_reboot_fail(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.set_power.return_value = {'error': 'Some IPMI error'}
self.config(retry_timeout=500, group='ipmi')
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.reboot,
task)
ipmicmd.set_power.assert_called_once_with('boot', 500)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM, boot_devices.BIOS]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices(task)))
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_management_interface_get_boot_device_good(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_bootdev.return_value = {'bootdev': 'hd'}
with task_manager.acquire(self.context, self.node.uuid) as task:
bootdev = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.DISK, bootdev['boot_device'])
self.assertIsNone(bootdev['persistent'])
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_management_interface_get_boot_device_persistent(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_bootdev.return_value = {'bootdev': 'hd',
'persistent': True}
with task_manager.acquire(self.context, self.node.uuid) as task:
bootdev = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.DISK, bootdev['boot_device'])
self.assertTrue(bootdev['persistent'])
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_management_interface_get_boot_device_fail(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_bootdev.side_effect = pyghmi_exception.IpmiException
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.IPMIFailure,
self.driver.management.get_boot_device, task)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_management_interface_get_boot_device_fail_dict(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_bootdev.return_value = {'error': 'boooom'}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.IPMIFailure,
self.driver.management.get_boot_device, task)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_management_interface_get_boot_device_unknown(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_bootdev.return_value = {'bootdev': 'unknown'}
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = {'boot_device': None, 'persistent': None}
self.assertEqual(expected,
self.driver.management.get_boot_device(task))
def test_get_force_boot_device_persistent(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['ipmi_force_boot_device'] = True
task.node.driver_internal_info['persistent_boot_device'] = 'pxe'
bootdev = self.driver.management.get_boot_device(task)
self.assertEqual('pxe', bootdev['boot_device'])
self.assertTrue(bootdev['persistent'])
def test_management_interface_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.management.validate(task)
def test_management_interface_validate_fail(self):
# Missing IPMI driver_info information
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipminative')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.management.validate, task)
@mock.patch('pyghmi.ipmi.command.Command', autospec=True)
def test_get_sensors_data(self, ipmi_mock):
ipmicmd = ipmi_mock.return_value
ipmicmd.get_sensor_data.return_value = None
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.management.get_sensors_data(task)
ipmicmd.get_sensor_data.assert_called_once_with()
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console(self, mock_exec):
mock_exec.return_value = None
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.console.start_console(task)
mock_exec.assert_called_once_with(self.info['uuid'],
self.info['port'],
mock.ANY)
self.assertTrue(mock_exec.called)
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console_fail(self, mock_exec):
mock_exec.side_effect = exception.ConsoleSubprocessFailed(
error='error')
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.ConsoleSubprocessFailed,
self.driver.console.start_console,
task)
@mock.patch.object(console_utils, 'stop_shellinabox_console',
autospec=True)
def test_stop_console(self, mock_exec):
mock_exec.return_value = None
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.console.stop_console(task)
mock_exec.assert_called_once_with(self.info['uuid'])
self.assertTrue(mock_exec.called)
@mock.patch.object(console_utils, 'stop_shellinabox_console',
autospec=True)
def test_stop_console_fail(self, mock_stop):
mock_stop.side_effect = exception.ConsoleError()
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.ConsoleError,
self.driver.console.stop_console,
task)
mock_stop.assert_called_once_with(self.node.uuid)
@mock.patch.object(console_utils, 'get_shellinabox_console_url',
autospec=True)
def test_get_console(self, mock_exec):
url = 'http://localhost:4201'
mock_exec.return_value = url
expected = {'type': 'shellinabox', 'url': url}
with task_manager.acquire(self.context,
self.node.uuid) as task:
console_info = self.driver.console.get_console(task)
self.assertEqual(expected, console_info)
mock_exec.assert_called_once_with(self.info['port'])
self.assertTrue(mock_exec.called)
@mock.patch.object(ipminative, '_parse_driver_info', autospec=True)
@mock.patch.object(ipminative, '_parse_raw_bytes', autospec=True)
def test_vendor_passthru_validate__send_raw_bytes_good(self, mock_raw,
mock_driver):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.vendor.validate(task,
method='send_raw',
http_method='POST',
raw_bytes='0x00 0x01')
mock_raw.assert_called_once_with('0x00 0x01')
mock_driver.assert_called_once_with(task.node)
def test_vendor_passthru_validate__send_raw_bytes_fail(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
self.driver.vendor.validate,
task, method='send_raw')
def test_vendor_passthru_vendor_routes(self):
expected = ['send_raw', 'bmc_reset']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(sorted(expected), sorted(vendor_routes))
@mock.patch.object(ipminative, '_send_raw', autospec=True)
def test_send_raw(self, send_raw_mock):
bytes = '0x00 0x01'
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.vendor.send_raw(task, http_method='POST',
raw_bytes=bytes)
send_raw_mock.assert_called_once_with(self.info, bytes)
@mock.patch.object(ipminative, '_send_raw', autospec=True)
def _test_bmc_reset(self, warm, expected_bytes, send_raw_mock):
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.driver.vendor.bmc_reset(task, http_method='POST', warm=warm)
send_raw_mock.assert_called_once_with(self.info, expected_bytes)
def test_bmc_reset_cold(self):
for param in (False, 'false', 'off', 'n', 'no'):
self._test_bmc_reset(param, '0x06 0x02')
def test_bmc_reset_warm(self):
for param in (True, 'true', 'on', 'y', 'yes'):
self._test_bmc_reset(param, '0x06 0x03')
|
|
import datetime
import ujson
import zlib
from django.utils.translation import ugettext as _
from django.utils.timezone import now as timezone_now
from six import binary_type
from zerver.lib.avatar import avatar_url_from_dict
import zerver.lib.bugdown as bugdown
from zerver.lib.cache import cache_with_key, to_dict_cache_key
from zerver.lib.request import JsonableError
from zerver.lib.str_utils import force_bytes, dict_with_str_keys
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.topic_mutes import build_topic_mute_checker
from zerver.models import (
get_display_recipient_by_id,
get_user_profile_by_id,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserProfile,
UserMessage,
Reaction
)
from typing import Any, Dict, List, Optional, Set, Tuple, Text, Union
from mypy_extensions import TypedDict
RealmAlertWords = Dict[int, List[Text]]
UnreadMessagesResult = TypedDict('UnreadMessagesResult', {
'pms': List[Dict[str, Any]],
'streams': List[Dict[str, Any]],
'huddles': List[Dict[str, Any]],
'mentions': List[int],
'count': int,
})
MAX_UNREAD_MESSAGES = 5000
def extract_message_dict(message_bytes):
# type: (binary_type) -> Dict[str, Any]
return dict_with_str_keys(ujson.loads(zlib.decompress(message_bytes).decode("utf-8")))
def stringify_message_dict(message_dict):
# type: (Dict[str, Any]) -> binary_type
return zlib.compress(force_bytes(ujson.dumps(message_dict)))
def message_to_dict(message, apply_markdown):
# type: (Message, bool) -> Dict[str, Any]
json = message_to_dict_json(message, apply_markdown)
return extract_message_dict(json)
@cache_with_key(to_dict_cache_key, timeout=3600*24)
def message_to_dict_json(message, apply_markdown):
# type: (Message, bool) -> binary_type
return MessageDict.to_dict_uncached(message, apply_markdown)
class MessageDict(object):
@staticmethod
def to_dict_uncached(message, apply_markdown):
# type: (Message, bool) -> binary_type
dct = MessageDict.to_dict_uncached_helper(message, apply_markdown)
return stringify_message_dict(dct)
@staticmethod
def to_dict_uncached_helper(message, apply_markdown):
# type: (Message, bool) -> Dict[str, Any]
return MessageDict.build_message_dict(
apply_markdown = apply_markdown,
message = message,
message_id = message.id,
last_edit_time = message.last_edit_time,
edit_history = message.edit_history,
content = message.content,
subject = message.subject,
pub_date = message.pub_date,
rendered_content = message.rendered_content,
rendered_content_version = message.rendered_content_version,
sender_id = message.sender.id,
sender_email = message.sender.email,
sender_realm_id = message.sender.realm_id,
sender_realm_str = message.sender.realm.string_id,
sender_full_name = message.sender.full_name,
sender_short_name = message.sender.short_name,
sender_avatar_source = message.sender.avatar_source,
sender_avatar_version = message.sender.avatar_version,
sender_is_mirror_dummy = message.sender.is_mirror_dummy,
sending_client_name = message.sending_client.name,
recipient_id = message.recipient.id,
recipient_type = message.recipient.type,
recipient_type_id = message.recipient.type_id,
reactions = Reaction.get_raw_db_rows([message.id])
)
@staticmethod
def build_dict_from_raw_db_row(row, apply_markdown):
# type: (Dict[str, Any], bool) -> Dict[str, Any]
'''
row is a row from a .values() call, and it needs to have
all the relevant fields populated
'''
return MessageDict.build_message_dict(
apply_markdown = apply_markdown,
message = None,
message_id = row['id'],
last_edit_time = row['last_edit_time'],
edit_history = row['edit_history'],
content = row['content'],
subject = row['subject'],
pub_date = row['pub_date'],
rendered_content = row['rendered_content'],
rendered_content_version = row['rendered_content_version'],
sender_id = row['sender_id'],
sender_email = row['sender__email'],
sender_realm_id = row['sender__realm__id'],
sender_realm_str = row['sender__realm__string_id'],
sender_full_name = row['sender__full_name'],
sender_short_name = row['sender__short_name'],
sender_avatar_source = row['sender__avatar_source'],
sender_avatar_version = row['sender__avatar_version'],
sender_is_mirror_dummy = row['sender__is_mirror_dummy'],
sending_client_name = row['sending_client__name'],
recipient_id = row['recipient_id'],
recipient_type = row['recipient__type'],
recipient_type_id = row['recipient__type_id'],
reactions=row['reactions']
)
@staticmethod
def build_message_dict(
apply_markdown,
message,
message_id,
last_edit_time,
edit_history,
content,
subject,
pub_date,
rendered_content,
rendered_content_version,
sender_id,
sender_email,
sender_realm_id,
sender_realm_str,
sender_full_name,
sender_short_name,
sender_avatar_source,
sender_avatar_version,
sender_is_mirror_dummy,
sending_client_name,
recipient_id,
recipient_type,
recipient_type_id,
reactions
):
# type: (bool, Optional[Message], int, Optional[datetime.datetime], Optional[Text], Text, Text, datetime.datetime, Optional[Text], Optional[int], int, Text, int, Text, Text, Text, Text, int, bool, Text, int, int, int, List[Dict[str, Any]]) -> Dict[str, Any]
avatar_url = avatar_url_from_dict(dict(
avatar_source=sender_avatar_source,
avatar_version=sender_avatar_version,
email=sender_email,
id=sender_id,
realm_id=sender_realm_id))
display_recipient = get_display_recipient_by_id(
recipient_id,
recipient_type,
recipient_type_id
)
if recipient_type == Recipient.STREAM:
display_type = "stream"
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
assert not isinstance(display_recipient, Text)
display_type = "private"
if len(display_recipient) == 1:
# add the sender in if this isn't a message between
# someone and themself, preserving ordering
recip = {'email': sender_email,
'full_name': sender_full_name,
'short_name': sender_short_name,
'id': sender_id,
'is_mirror_dummy': sender_is_mirror_dummy}
if recip['email'] < display_recipient[0]['email']:
display_recipient = [recip, display_recipient[0]]
elif recip['email'] > display_recipient[0]['email']:
display_recipient = [display_recipient[0], recip]
else:
raise AssertionError("Invalid recipient type %s" % (recipient_type,))
obj = dict(
id = message_id,
sender_email = sender_email,
sender_full_name = sender_full_name,
sender_short_name = sender_short_name,
sender_realm_str = sender_realm_str,
sender_id = sender_id,
type = display_type,
display_recipient = display_recipient,
recipient_id = recipient_id,
subject = subject,
timestamp = datetime_to_timestamp(pub_date),
avatar_url = avatar_url,
client = sending_client_name)
if obj['type'] == 'stream':
obj['stream_id'] = recipient_type_id
obj['subject_links'] = bugdown.subject_links(sender_realm_id, subject)
if last_edit_time is not None:
obj['last_edit_timestamp'] = datetime_to_timestamp(last_edit_time)
assert edit_history is not None
obj['edit_history'] = ujson.loads(edit_history)
if apply_markdown:
if Message.need_to_render_content(rendered_content, rendered_content_version, bugdown.version):
if message is None:
# We really shouldn't be rendering objects in this method, but there is
# a scenario where we upgrade the version of bugdown and fail to run
# management commands to re-render historical messages, and then we
# need to have side effects. This method is optimized to not need full
# blown ORM objects, but the bugdown renderer is unfortunately highly
# coupled to Message, and we also need to persist the new rendered content.
# If we don't have a message object passed in, we get one here. The cost
# of going to the DB here should be overshadowed by the cost of rendering
# and updating the row.
# TODO: see #1379 to eliminate bugdown dependencies
message = Message.objects.select_related().get(id=message_id)
assert message is not None # Hint for mypy.
# It's unfortunate that we need to have side effects on the message
# in some cases.
rendered_content = render_markdown(message, content, realm=message.get_realm())
message.rendered_content = rendered_content
message.rendered_content_version = bugdown.version
message.save_rendered_content()
if rendered_content is not None:
obj['content'] = rendered_content
else:
obj['content'] = u'<p>[Zulip note: Sorry, we could not understand the formatting of your message]</p>'
obj['content_type'] = 'text/html'
else:
obj['content'] = content
obj['content_type'] = 'text/x-markdown'
if rendered_content is not None:
obj['is_me_message'] = Message.is_status_message(content, rendered_content)
else:
obj['is_me_message'] = False
obj['reactions'] = [ReactionDict.build_dict_from_raw_db_row(reaction)
for reaction in reactions]
return obj
class ReactionDict(object):
@staticmethod
def build_dict_from_raw_db_row(row):
# type: (Dict[str, Any]) -> Dict[str, Any]
return {'emoji_name': row['emoji_name'],
'emoji_code': row['emoji_code'],
'reaction_type': row['reaction_type'],
'user': {'email': row['user_profile__email'],
'id': row['user_profile__id'],
'full_name': row['user_profile__full_name']}}
def access_message(user_profile, message_id):
# type: (UserProfile, int) -> Tuple[Message, UserMessage]
"""You can access a message by ID in our APIs that either:
(1) You received or have previously accessed via starring
(aka have a UserMessage row for).
(2) Was sent to a public stream in your realm.
We produce consistent, boring error messages to avoid leaking any
information from a security perspective.
"""
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Invalid message(s)"))
try:
user_message = UserMessage.objects.select_related().get(user_profile=user_profile,
message=message)
except UserMessage.DoesNotExist:
user_message = None
if user_message is None:
if message.recipient.type != Recipient.STREAM:
# You can't access private messages you didn't receive
raise JsonableError(_("Invalid message(s)"))
stream = Stream.objects.get(id=message.recipient.type_id)
if not stream.is_public():
# You can't access messages sent to invite-only streams
# that you didn't receive
raise JsonableError(_("Invalid message(s)"))
# So the message is to a public stream
if stream.realm != user_profile.realm:
# You can't access public stream messages in other realms
raise JsonableError(_("Invalid message(s)"))
# Otherwise, the message must have been sent to a public
# stream in your realm, so return the message, user_message pair
return (message, user_message)
def render_markdown(message, content, realm=None, realm_alert_words=None, user_ids=None):
# type: (Message, Text, Optional[Realm], Optional[RealmAlertWords], Optional[Set[int]]) -> Text
"""Return HTML for given markdown. Bugdown may add properties to the
message object such as `mentions_user_ids` and `mentions_wildcard`.
These are only on this Django object and are not saved in the
database.
"""
if user_ids is None:
message_user_ids = set() # type: Set[int]
else:
message_user_ids = user_ids
if message is not None:
message.mentions_wildcard = False
message.mentions_user_ids = set()
message.alert_words = set()
message.links_for_preview = set()
if realm is None:
realm = message.get_realm()
possible_words = set() # type: Set[Text]
if realm_alert_words is not None:
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
possible_words.update(set(words))
if message is None:
# If we don't have a message, then we are in the compose preview
# codepath, so we know we are dealing with a human.
sent_by_bot = False
else:
sent_by_bot = get_user_profile_by_id(message.sender_id).is_bot
# DO MAIN WORK HERE -- call bugdown to convert
rendered_content = bugdown.convert(content, message=message, message_realm=realm,
possible_words=possible_words,
sent_by_bot=sent_by_bot)
if message is not None:
message.user_ids_with_alert_words = set()
if realm_alert_words is not None:
for user_id, words in realm_alert_words.items():
if user_id in message_user_ids:
if set(words).intersection(message.alert_words):
message.user_ids_with_alert_words.add(user_id)
return rendered_content
def huddle_users(recipient_id):
# type: (int) -> str
display_recipient = get_display_recipient_by_id(recipient_id,
Recipient.HUDDLE,
None) # type: Union[Text, List[Dict[str, Any]]]
# Text is for streams.
assert not isinstance(display_recipient, Text)
user_ids = [obj['id'] for obj in display_recipient] # type: List[int]
user_ids = sorted(user_ids)
return ','.join(str(uid) for uid in user_ids)
def aggregate_dict(input_rows, lookup_fields, input_field, output_field):
# type: (List[Dict[str, Any]], List[str], str, str) -> List[Dict[str, Any]]
lookup_dict = dict() # type: Dict[Any, Dict]
for input_row in input_rows:
lookup_key = tuple([input_row[f] for f in lookup_fields])
if lookup_key not in lookup_dict:
obj = {}
for f in lookup_fields:
obj[f] = input_row[f]
obj[output_field] = []
lookup_dict[lookup_key] = obj
lookup_dict[lookup_key][output_field].append(input_row[input_field])
sorted_keys = sorted(lookup_dict.keys())
return [lookup_dict[k] for k in sorted_keys]
def get_inactive_recipient_ids(user_profile):
# type: (UserProfile) -> List[int]
rows = Subscription.objects.filter(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
active=False,
).values(
'recipient_id'
)
inactive_recipient_ids = [
row['recipient_id']
for row in rows]
return inactive_recipient_ids
def get_muted_recipient_ids(user_profile):
# type: (UserProfile) -> List[int]
rows = Subscription.objects.filter(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
active=True,
in_home_view=False,
).values(
'recipient_id'
)
muted_recipient_ids = [
row['recipient_id']
for row in rows]
return muted_recipient_ids
def get_unread_message_ids_per_recipient(user_profile):
# type: (UserProfile) -> UnreadMessagesResult
excluded_recipient_ids = get_inactive_recipient_ids(user_profile)
user_msgs = UserMessage.objects.filter(
user_profile=user_profile
).exclude(
message__recipient_id__in=excluded_recipient_ids
).extra(
where=[UserMessage.where_unread()]
).values(
'message_id',
'message__sender_id',
'message__subject',
'message__recipient_id',
'message__recipient__type',
'message__recipient__type_id',
'flags',
).order_by("-message_id")
# Limit unread messages for performance reasons.
user_msgs = list(user_msgs[:MAX_UNREAD_MESSAGES])
rows = list(reversed(user_msgs))
muted_recipient_ids = get_muted_recipient_ids(user_profile)
topic_mute_checker = build_topic_mute_checker(user_profile)
def is_row_muted(row):
# type: (Dict[str, Any]) -> bool
recipient_id = row['message__recipient_id']
if recipient_id in muted_recipient_ids:
return True
topic_name = row['message__subject']
if topic_mute_checker(recipient_id, topic_name):
return True
return False
active_stream_rows = [row for row in rows if not is_row_muted(row)]
count = len(active_stream_rows)
pm_msgs = [
dict(
sender_id=row['message__sender_id'],
message_id=row['message_id'],
) for row in rows
if row['message__recipient__type'] == Recipient.PERSONAL]
pm_objects = aggregate_dict(
input_rows=pm_msgs,
lookup_fields=[
'sender_id',
],
input_field='message_id',
output_field='unread_message_ids',
)
stream_msgs = [
dict(
stream_id=row['message__recipient__type_id'],
topic=row['message__subject'],
message_id=row['message_id'],
) for row in rows
if row['message__recipient__type'] == Recipient.STREAM]
stream_objects = aggregate_dict(
input_rows=stream_msgs,
lookup_fields=[
'stream_id',
'topic',
],
input_field='message_id',
output_field='unread_message_ids',
)
huddle_msgs = [
dict(
recipient_id=row['message__recipient_id'],
message_id=row['message_id'],
) for row in rows
if row['message__recipient__type'] == Recipient.HUDDLE]
huddle_objects = aggregate_dict(
input_rows=huddle_msgs,
lookup_fields=[
'recipient_id',
],
input_field='message_id',
output_field='unread_message_ids',
)
for huddle in huddle_objects:
huddle['user_ids_string'] = huddle_users(huddle['recipient_id'])
del huddle['recipient_id']
mentioned_message_ids = [
row['message_id']
for row in rows
if (row['flags'] & UserMessage.flags.mentioned) != 0]
result = dict(
pms=pm_objects,
streams=stream_objects,
huddles=huddle_objects,
mentions=mentioned_message_ids,
count=count) # type: UnreadMessagesResult
return result
def apply_unread_message_event(state, message):
# type: (Dict[str, Any], Dict[str, Any]) -> None
state['count'] += 1
message_id = message['id']
if message['type'] == 'stream':
message_type = 'stream'
elif message['type'] == 'private':
others = [
recip for recip in message['display_recipient']
if recip['id'] != message['sender_id']
]
if len(others) <= 1:
message_type = 'private'
else:
message_type = 'huddle'
else:
raise AssertionError("Invalid message type %s" % (message['type'],))
if message_type == 'stream':
unread_key = 'streams'
stream_id = message['stream_id']
topic = message['subject']
my_key = (stream_id, topic) # type: Any
key_func = lambda obj: (obj['stream_id'], obj['topic'])
new_obj = dict(
stream_id=stream_id,
topic=topic,
unread_message_ids=[message_id],
)
elif message_type == 'private':
unread_key = 'pms'
sender_id = message['sender_id']
my_key = sender_id
key_func = lambda obj: obj['sender_id']
new_obj = dict(
sender_id=sender_id,
unread_message_ids=[message_id],
)
else:
unread_key = 'huddles'
display_recipient = message['display_recipient']
user_ids = [obj['id'] for obj in display_recipient]
user_ids = sorted(user_ids)
my_key = ','.join(str(uid) for uid in user_ids)
key_func = lambda obj: obj['user_ids_string']
new_obj = dict(
user_ids_string=my_key,
unread_message_ids=[message_id],
)
if message.get('is_mentioned'):
if message_id not in state['mentions']:
state['mentions'].append(message_id)
for obj in state[unread_key]:
if key_func(obj) == my_key:
obj['unread_message_ids'].append(message_id)
obj['unread_message_ids'].sort()
return
state[unread_key].append(new_obj)
state[unread_key].sort(key=key_func)
|
|
"""Support for installing and building the "wheel" binary package format.
"""
import collections
import compileall
import contextlib
import csv
import importlib
import logging
import os.path
import re
import shutil
import sys
import warnings
from base64 import urlsafe_b64encode
from email.message import Message
from itertools import chain, filterfalse, starmap
from typing import (
IO,
TYPE_CHECKING,
Any,
BinaryIO,
Callable,
Dict,
Iterable,
Iterator,
List,
NewType,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
from zipfile import ZipFile, ZipInfo
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor.distlib.util import get_export_entry
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six import ensure_str, ensure_text, reraise
from pip._internal.exceptions import InstallationError
from pip._internal.locations import get_major_minor_version
from pip._internal.metadata import BaseDistribution, get_wheel_distribution
from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl
from pip._internal.models.scheme import SCHEME_KEYS, Scheme
from pip._internal.utils.filesystem import adjacent_tmp_file, replace
from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition
from pip._internal.utils.unpacking import (
current_umask,
is_within_directory,
set_extracted_file_to_default_mode_plus_executable,
zip_item_is_executable,
)
from pip._internal.utils.wheel import parse_wheel
if TYPE_CHECKING:
from typing import Protocol
class File(Protocol):
src_record_path = None # type: RecordPath
dest_path = None # type: str
changed = None # type: bool
def save(self):
# type: () -> None
pass
logger = logging.getLogger(__name__)
RecordPath = NewType('RecordPath', str)
InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]]
def rehash(path, blocksize=1 << 20):
# type: (str, int) -> Tuple[str, str]
"""Return (encoded_digest, length) for path using hashlib.sha256()"""
h, length = hash_file(path, blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, str(length))
def csv_io_kwargs(mode):
# type: (str) -> Dict[str, Any]
"""Return keyword arguments to properly open a CSV file
in the given mode.
"""
return {'mode': mode, 'newline': '', 'encoding': 'utf-8'}
def fix_script(path):
# type: (str) -> bool
"""Replace #!python with #!/path/to/python
Return True if file was changed.
"""
# XXX RECORD hashes will need to be updated
assert os.path.isfile(path)
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
def wheel_root_is_purelib(metadata):
# type: (Message) -> bool
return metadata.get("Root-Is-Purelib", "").lower() == "true"
def get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, str]]:
console_scripts = {}
gui_scripts = {}
for entry_point in dist.iter_entry_points():
if entry_point.group == "console_scripts":
console_scripts[entry_point.name] = entry_point.value
elif entry_point.group == "gui_scripts":
gui_scripts[entry_point.name] = entry_point.value
return console_scripts, gui_scripts
def message_about_scripts_not_on_PATH(scripts):
# type: (Sequence[str]) -> Optional[str]
"""Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None.
"""
if not scripts:
return None
# Group scripts by the path they were installed in
grouped_by_dir = collections.defaultdict(set) # type: Dict[str, Set[str]]
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
# We don't want to warn for directories that are on PATH.
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ.get("PATH", "").split(os.pathsep)
]
# If an executable sits with sys.executable, we don't warn for it.
# This covers the case of venv invocations without activating the venv.
not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
} # type: Dict[str, Set[str]]
if not warn_for:
return None
# Format a message
msg_lines = []
for parent_dir, dir_scripts in warn_for.items():
sorted_scripts = sorted(dir_scripts) # type: List[str]
if len(sorted_scripts) == 1:
start_text = "script {} is".format(sorted_scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
# Add a note if any directory starts with ~
warn_for_tilde = any(
i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i
)
if warn_for_tilde:
tilde_warning_msg = (
"NOTE: The current PATH contains path(s) starting with `~`, "
"which may not be expanded by all applications."
)
msg_lines.append(tilde_warning_msg)
# Returns the formatted multiline message
return "\n".join(msg_lines)
def _normalized_outrows(outrows):
# type: (Iterable[InstalledCSVRow]) -> List[Tuple[str, str, str]]
"""Normalize the given rows of a RECORD file.
Items in each row are converted into str. Rows are then sorted to make
the value more predictable for tests.
Each row is a 3-tuple (path, hash, size) and corresponds to a record of
a RECORD file (see PEP 376 and PEP 427 for details). For the rows
passed to this function, the size can be an integer as an int or string,
or the empty string.
"""
# Normally, there should only be one row per path, in which case the
# second and third elements don't come into play when sorting.
# However, in cases in the wild where a path might happen to occur twice,
# we don't want the sort operation to trigger an error (but still want
# determinism). Since the third element can be an int or string, we
# coerce each element to a string to avoid a TypeError in this case.
# For additional background, see--
# https://github.com/pypa/pip/issues/5868
return sorted(
(ensure_str(record_path, encoding='utf-8'), hash_, str(size))
for record_path, hash_, size in outrows
)
def _record_to_fs_path(record_path):
# type: (RecordPath) -> str
return record_path
def _fs_to_record_path(path, relative_to=None):
# type: (str, Optional[str]) -> RecordPath
if relative_to is not None:
# On Windows, do not handle relative paths if they belong to different
# logical disks
if os.path.splitdrive(path)[0].lower() == \
os.path.splitdrive(relative_to)[0].lower():
path = os.path.relpath(path, relative_to)
path = path.replace(os.path.sep, '/')
return cast('RecordPath', path)
def _parse_record_path(record_column):
# type: (str) -> RecordPath
p = ensure_text(record_column, encoding='utf-8')
return cast('RecordPath', p)
def get_csv_rows_for_installed(
old_csv_rows, # type: List[List[str]]
installed, # type: Dict[RecordPath, RecordPath]
changed, # type: Set[RecordPath]
generated, # type: List[str]
lib_dir, # type: str
):
# type: (...) -> List[InstalledCSVRow]
"""
:param installed: A map from archive RECORD path to installation RECORD
path.
"""
installed_rows = [] # type: List[InstalledCSVRow]
for row in old_csv_rows:
if len(row) > 3:
logger.warning('RECORD line has more than three elements: %s', row)
old_record_path = _parse_record_path(row[0])
new_record_path = installed.pop(old_record_path, old_record_path)
if new_record_path in changed:
digest, length = rehash(_record_to_fs_path(new_record_path))
else:
digest = row[1] if len(row) > 1 else ''
length = row[2] if len(row) > 2 else ''
installed_rows.append((new_record_path, digest, length))
for f in generated:
path = _fs_to_record_path(f, lib_dir)
digest, length = rehash(f)
installed_rows.append((path, digest, length))
for installed_record_path in installed.values():
installed_rows.append((installed_record_path, '', ''))
return installed_rows
def get_console_script_specs(console):
# type: (Dict[str, str]) -> List[str]
"""
Given the mapping from entrypoint name to callable, return the relevant
console script specs.
"""
# Don't mutate caller's version
console = console.copy()
scripts_to_generate = []
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append('pip = ' + pip_script)
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
scripts_to_generate.append(
'pip{} = {}'.format(sys.version_info[0], pip_script)
)
scripts_to_generate.append(
f'pip{get_major_minor_version()} = {pip_script}'
)
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append(
'easy_install = ' + easy_install_script
)
scripts_to_generate.append(
'easy_install-{} = {}'.format(
get_major_minor_version(), easy_install_script
)
)
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console entry points specified in the wheel
scripts_to_generate.extend(starmap('{} = {}'.format, console.items()))
return scripts_to_generate
class ZipBackedFile:
def __init__(self, src_record_path, dest_path, zip_file):
# type: (RecordPath, str, ZipFile) -> None
self.src_record_path = src_record_path
self.dest_path = dest_path
self._zip_file = zip_file
self.changed = False
def _getinfo(self):
# type: () -> ZipInfo
return self._zip_file.getinfo(self.src_record_path)
def save(self):
# type: () -> None
# directory creation is lazy and after file filtering
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
parent_dir = os.path.dirname(self.dest_path)
ensure_dir(parent_dir)
# When we open the output file below, any existing file is truncated
# before we start writing the new contents. This is fine in most
# cases, but can cause a segfault if pip has loaded a shared
# object (e.g. from pyopenssl through its vendored urllib3)
# Since the shared object is mmap'd an attempt to call a
# symbol in it will then cause a segfault. Unlinking the file
# allows writing of new contents while allowing the process to
# continue to use the old copy.
if os.path.exists(self.dest_path):
os.unlink(self.dest_path)
zipinfo = self._getinfo()
with self._zip_file.open(zipinfo) as f:
with open(self.dest_path, "wb") as dest:
shutil.copyfileobj(f, dest)
if zip_item_is_executable(zipinfo):
set_extracted_file_to_default_mode_plus_executable(self.dest_path)
class ScriptFile:
def __init__(self, file):
# type: (File) -> None
self._file = file
self.src_record_path = self._file.src_record_path
self.dest_path = self._file.dest_path
self.changed = False
def save(self):
# type: () -> None
self._file.save()
self.changed = fix_script(self.dest_path)
class MissingCallableSuffix(InstallationError):
def __init__(self, entry_point):
# type: (str) -> None
super().__init__(
"Invalid script entry point: {} - A callable "
"suffix is required. Cf https://packaging.python.org/"
"specifications/entry-points/#use-for-scripts for more "
"information.".format(entry_point)
)
def _raise_for_invalid_entrypoint(specification):
# type: (str) -> None
entry = get_export_entry(specification)
if entry is not None and entry.suffix is None:
raise MissingCallableSuffix(str(entry))
class PipScriptMaker(ScriptMaker):
def make(self, specification, options=None):
# type: (str, Dict[str, Any]) -> List[str]
_raise_for_invalid_entrypoint(specification)
return super().make(specification, options)
def _install_wheel(
name, # type: str
wheel_zip, # type: ZipFile
wheel_path, # type: str
scheme, # type: Scheme
pycompile=True, # type: bool
warn_script_location=True, # type: bool
direct_url=None, # type: Optional[DirectUrl]
requested=False, # type: bool
):
# type: (...) -> None
"""Install a wheel.
:param name: Name of the project to install
:param wheel_zip: open ZipFile for wheel being installed
:param scheme: Distutils scheme dictating the install directories
:param req_description: String used in place of the requirement, for
logging
:param pycompile: Whether to byte-compile installed Python files
:param warn_script_location: Whether to check that scripts are installed
into a directory on PATH
:raises UnsupportedWheel:
* when the directory holds an unpacked wheel with incompatible
Wheel-Version
* when the .dist-info dir does not match the wheel
"""
info_dir, metadata = parse_wheel(wheel_zip, name)
if wheel_root_is_purelib(metadata):
lib_dir = scheme.purelib
else:
lib_dir = scheme.platlib
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {} # type: Dict[RecordPath, RecordPath]
changed = set() # type: Set[RecordPath]
generated = [] # type: List[str]
def record_installed(srcfile, destfile, modified=False):
# type: (RecordPath, str, bool) -> None
"""Map archive RECORD paths to installation RECORD paths."""
newpath = _fs_to_record_path(destfile, lib_dir)
installed[srcfile] = newpath
if modified:
changed.add(_fs_to_record_path(destfile))
def all_paths():
# type: () -> Iterable[RecordPath]
names = wheel_zip.namelist()
# If a flag is set, names may be unicode in Python 2. We convert to
# text explicitly so these are valid for lookup in RECORD.
decoded_names = map(ensure_text, names)
for name in decoded_names:
yield cast("RecordPath", name)
def is_dir_path(path):
# type: (RecordPath) -> bool
return path.endswith("/")
def assert_no_path_traversal(dest_dir_path, target_path):
# type: (str, str) -> None
if not is_within_directory(dest_dir_path, target_path):
message = (
"The wheel {!r} has a file {!r} trying to install"
" outside the target directory {!r}"
)
raise InstallationError(
message.format(wheel_path, target_path, dest_dir_path)
)
def root_scheme_file_maker(zip_file, dest):
# type: (ZipFile, str) -> Callable[[RecordPath], File]
def make_root_scheme_file(record_path):
# type: (RecordPath) -> File
normed_path = os.path.normpath(record_path)
dest_path = os.path.join(dest, normed_path)
assert_no_path_traversal(dest, dest_path)
return ZipBackedFile(record_path, dest_path, zip_file)
return make_root_scheme_file
def data_scheme_file_maker(zip_file, scheme):
# type: (ZipFile, Scheme) -> Callable[[RecordPath], File]
scheme_paths = {}
for key in SCHEME_KEYS:
encoded_key = ensure_text(key)
scheme_paths[encoded_key] = ensure_text(
getattr(scheme, key), encoding=sys.getfilesystemencoding()
)
def make_data_scheme_file(record_path):
# type: (RecordPath) -> File
normed_path = os.path.normpath(record_path)
try:
_, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
except ValueError:
message = (
"Unexpected file in {}: {!r}. .data directory contents"
" should be named like: '<scheme key>/<path>'."
).format(wheel_path, record_path)
raise InstallationError(message)
try:
scheme_path = scheme_paths[scheme_key]
except KeyError:
valid_scheme_keys = ", ".join(sorted(scheme_paths))
message = (
"Unknown scheme key used in {}: {} (for file {!r}). .data"
" directory contents should be in subdirectories named"
" with a valid scheme key ({})"
).format(
wheel_path, scheme_key, record_path, valid_scheme_keys
)
raise InstallationError(message)
dest_path = os.path.join(scheme_path, dest_subpath)
assert_no_path_traversal(scheme_path, dest_path)
return ZipBackedFile(record_path, dest_path, zip_file)
return make_data_scheme_file
def is_data_scheme_path(path):
# type: (RecordPath) -> bool
return path.split("/", 1)[0].endswith(".data")
paths = all_paths()
file_paths = filterfalse(is_dir_path, paths)
root_scheme_paths, data_scheme_paths = partition(
is_data_scheme_path, file_paths
)
make_root_scheme_file = root_scheme_file_maker(
wheel_zip,
ensure_text(lib_dir, encoding=sys.getfilesystemencoding()),
)
files = map(make_root_scheme_file, root_scheme_paths)
def is_script_scheme_path(path):
# type: (RecordPath) -> bool
parts = path.split("/", 2)
return (
len(parts) > 2 and
parts[0].endswith(".data") and
parts[1] == "scripts"
)
other_scheme_paths, script_scheme_paths = partition(
is_script_scheme_path, data_scheme_paths
)
make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
files = chain(files, other_scheme_files)
# Get the defined entry points
distribution = get_wheel_distribution(wheel_path, canonicalize_name(name))
console, gui = get_entrypoints(distribution)
def is_entrypoint_wrapper(file):
# type: (File) -> bool
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
path = file.dest_path
name = os.path.basename(path)
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
script_scheme_files = map(make_data_scheme_file, script_scheme_paths)
script_scheme_files = filterfalse(
is_entrypoint_wrapper, script_scheme_files
)
script_scheme_files = map(ScriptFile, script_scheme_files)
files = chain(files, script_scheme_files)
for file in files:
file.save()
record_installed(file.src_record_path, file.dest_path, file.changed)
def pyc_source_file_paths():
# type: () -> Iterator[str]
# We de-duplicate installation paths, since there can be overlap (e.g.
# file in .data maps to same location as file in wheel root).
# Sorting installation paths makes it easier to reproduce and debug
# issues related to permissions on existing files.
for installed_path in sorted(set(installed.values())):
full_installed_path = os.path.join(lib_dir, installed_path)
if not os.path.isfile(full_installed_path):
continue
if not full_installed_path.endswith('.py'):
continue
yield full_installed_path
def pyc_output_path(path):
# type: (str) -> str
"""Return the path the pyc file would have been written to.
"""
return importlib.util.cache_from_source(path)
# Compile all of the pyc files for the installed files
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for path in pyc_source_file_paths():
# Python 2's `compileall.compile_file` requires a str in
# error cases, so we must convert to the native type.
path_arg = ensure_str(
path, encoding=sys.getfilesystemencoding()
)
success = compileall.compile_file(
path_arg, force=True, quiet=True
)
if success:
pyc_path = pyc_output_path(path)
assert os.path.exists(pyc_path)
pyc_record_path = cast(
"RecordPath", pyc_path.replace(os.path.sep, "/")
)
record_installed(pyc_record_path, pyc_path)
logger.debug(stdout.getvalue())
maker = PipScriptMaker(None, scheme.scripts)
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = {''}
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Generate the console and GUI entry points specified in the wheel
scripts_to_generate = get_console_script_specs(console)
gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items()))
generated_console_scripts = maker.make_multiple(scripts_to_generate)
generated.extend(generated_console_scripts)
generated.extend(
maker.make_multiple(gui_scripts_to_generate, {'gui': True})
)
if warn_script_location:
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
if msg is not None:
logger.warning(msg)
generated_file_mode = 0o666 & ~current_umask()
@contextlib.contextmanager
def _generate_file(path, **kwargs):
# type: (str, **Any) -> Iterator[BinaryIO]
with adjacent_tmp_file(path, **kwargs) as f:
yield f
os.chmod(f.name, generated_file_mode)
replace(f.name, path)
dest_info_dir = os.path.join(lib_dir, info_dir)
# Record pip as the installer
installer_path = os.path.join(dest_info_dir, 'INSTALLER')
with _generate_file(installer_path) as installer_file:
installer_file.write(b'pip\n')
generated.append(installer_path)
# Record the PEP 610 direct URL reference
if direct_url is not None:
direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
with _generate_file(direct_url_path) as direct_url_file:
direct_url_file.write(direct_url.to_json().encode("utf-8"))
generated.append(direct_url_path)
# Record the REQUESTED file
if requested:
requested_path = os.path.join(dest_info_dir, 'REQUESTED')
with open(requested_path, "wb"):
pass
generated.append(requested_path)
record_text = distribution.read_text('RECORD')
record_rows = list(csv.reader(record_text.splitlines()))
rows = get_csv_rows_for_installed(
record_rows,
installed=installed,
changed=changed,
generated=generated,
lib_dir=lib_dir)
# Record details of all files installed
record_path = os.path.join(dest_info_dir, 'RECORD')
with _generate_file(record_path, **csv_io_kwargs('w')) as record_file:
# The type mypy infers for record_file is different for Python 3
# (typing.IO[Any]) and Python 2 (typing.BinaryIO). We explicitly
# cast to typing.IO[str] as a workaround.
writer = csv.writer(cast('IO[str]', record_file))
writer.writerows(_normalized_outrows(rows))
@contextlib.contextmanager
def req_error_context(req_description):
# type: (str) -> Iterator[None]
try:
yield
except InstallationError as e:
message = "For req: {}. {}".format(req_description, e.args[0])
reraise(
InstallationError, InstallationError(message), sys.exc_info()[2]
)
def install_wheel(
name, # type: str
wheel_path, # type: str
scheme, # type: Scheme
req_description, # type: str
pycompile=True, # type: bool
warn_script_location=True, # type: bool
direct_url=None, # type: Optional[DirectUrl]
requested=False, # type: bool
):
# type: (...) -> None
with ZipFile(wheel_path, allowZip64=True) as z:
with req_error_context(req_description):
_install_wheel(
name=name,
wheel_zip=z,
wheel_path=wheel_path,
scheme=scheme,
pycompile=pycompile,
warn_script_location=warn_script_location,
direct_url=direct_url,
requested=requested,
)
|
|
"""gccxmlparser - parse a gccxml created XML file into sequence type descriptions"""
try:
from xml.etree import cElementTree
except ImportError:
try:
import cElementTree
except ImportError:
cElementTree = None
if cElementTree:
base = object
else:
import xml.sax
base = xml.sax.ContentHandler
import typedesc
import sys
try:
set
except NameError:
from sets import Set as set
import re
################################################################
def MAKE_NAME(name):
name = name.replace("$", "DOLLAR")
name = name.replace(".", "DOT")
if name.startswith("__"):
return "_X" + name
elif name[0] in "01234567879":
return "_" + name
return name
WORDPAT = re.compile("^[a-zA-Z_][a-zA-Z0-9_]*$")
def CHECK_NAME(name):
if WORDPAT.match(name):
return name
return None
class GCCXML_Parser(base):
has_values = set(["Enumeration", "Function", "FunctionType",
"OperatorFunction", "Method", "Constructor",
"Destructor", "OperatorMethod"])
def __init__(self, *args):
base.__init__(self, *args)
self.context = []
self.all = {}
self.cpp_data = {}
if cElementTree:
def parse(self, xmlfile):
for event, node in cElementTree.iterparse(xmlfile, events=("start", "end")):
if event == "start":
self.startElement(node.tag, dict(node.items()))
else:
if node.text:
self.characters(node.text)
self.endElement(node.tag)
node.clear()
else:
def parse(self, xmlfile):
xml.sax.parse(xmlfile, self)
def startElement(self, name, attrs):
# find and call the handler for this element
mth = getattr(self, name)
result = mth(attrs)
if result is not None:
location = attrs.get("location", None)
if location is not None:
result.location = location
# record the result
_id = attrs.get("id", None)
# The '_id' attribute is used to link together all the
# nodes, in the _fixup_ methods.
if _id is not None:
self.all[_id] = result
else:
# EnumValue, for example, has no "_id" attribute.
# Invent our own...
self.all[id(result)] = result
# if this element has children, push onto the context
if name in self.has_values:
self.context.append(result)
cdata = None
def endElement(self, name):
# if this element has children, pop the context
if name in self.has_values:
self.context.pop()
self.cdata = None
################################
# do-nothing element handlers
def Class(self, attrs): pass
def Destructor(self, attrs): pass
cvs_revision = None
def GCC_XML(self, attrs):
rev = attrs["cvs_revision"]
self.cvs_revision = tuple(map(int, rev.split(".")))
def Namespace(self, attrs): pass
def Base(self, attrs): pass
def Ellipsis(self, attrs): pass
def OperatorMethod(self, attrs): pass
################################
# real element handlers
def CPP_DUMP(self, attrs):
name = attrs["name"]
# Insert a new list for each named section into self.cpp_data,
# and point self.cdata to it. self.cdata will be set to None
# again at the end of each section.
self.cpp_data[name] = self.cdata = []
def characters(self, content):
if self.cdata is not None:
self.cdata.append(content)
def File(self, attrs):
name = attrs["name"]
if sys.platform == "win32" and " " in name:
# On windows, convert to short filename if it contains blanks
from ctypes import windll, create_unicode_buffer, sizeof, WinError
buf = create_unicode_buffer(512)
if windll.kernel32.GetShortPathNameW(name, buf, sizeof(buf)):
name = buf.value
return typedesc.File(name)
def _fixup_File(self, f): pass
# simple types and modifiers
def Variable(self, attrs):
name = attrs["name"]
if name.startswith("cpp_sym_"):
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXx fix me!
name = name[len("cpp_sym_"):]
init = attrs.get("init", None)
typ = attrs["type"]
return typedesc.Variable(name, typ, init)
def _fixup_Variable(self, t):
t.typ = self.all[t.typ]
def Typedef(self, attrs):
name = attrs["name"]
typ = attrs["type"]
return typedesc.Typedef(name, typ)
def _fixup_Typedef(self, t):
t.typ = self.all[t.typ]
def FundamentalType(self, attrs):
name = attrs["name"]
if name == "void":
size = ""
else:
size = attrs["size"]
align = attrs["align"]
return typedesc.FundamentalType(name, size, align)
def _fixup_FundamentalType(self, t): pass
def PointerType(self, attrs):
typ = attrs["type"]
size = attrs["size"]
align = attrs["align"]
return typedesc.PointerType(typ, size, align)
def _fixup_PointerType(self, p):
p.typ = self.all[p.typ]
ReferenceType = PointerType
_fixup_ReferenceType = _fixup_PointerType
def ArrayType(self, attrs):
# type, min?, max?
typ = attrs["type"]
min = attrs["min"]
max = attrs["max"]
if max == "ffffffffffffffff":
max = "-1"
return typedesc.ArrayType(typ, min, max)
def _fixup_ArrayType(self, a):
a.typ = self.all[a.typ]
def CvQualifiedType(self, attrs):
# id, type, [const|volatile]
typ = attrs["type"]
const = attrs.get("const", None)
volatile = attrs.get("volatile", None)
return typedesc.CvQualifiedType(typ, const, volatile)
def _fixup_CvQualifiedType(self, c):
c.typ = self.all[c.typ]
# callables
def Function(self, attrs):
# name, returns, extern, attributes
name = attrs["name"]
returns = attrs["returns"]
attributes = attrs.get("attributes", "").split()
extern = attrs.get("extern")
return typedesc.Function(name, returns, attributes, extern)
def _fixup_Function(self, func):
func.returns = self.all[func.returns]
func.fixup_argtypes(self.all)
def FunctionType(self, attrs):
# id, returns, attributes
returns = attrs["returns"]
attributes = attrs.get("attributes", "").split()
return typedesc.FunctionType(returns, attributes)
def _fixup_FunctionType(self, func):
func.returns = self.all[func.returns]
func.fixup_argtypes(self.all)
def OperatorFunction(self, attrs):
# name, returns, extern, attributes
name = attrs["name"]
returns = attrs["returns"]
return typedesc.OperatorFunction(name, returns)
def _fixup_OperatorFunction(self, func):
func.returns = self.all[func.returns]
def _Ignored(self, attrs):
name = attrs.get("name", None)
if not name:
name = attrs["mangled"]
return typedesc.Ignored(name)
def _fixup_Ignored(self, const): pass
Constructor = Destructor = OperatorMethod = _Ignored
def Method(self, attrs):
# name, virtual, pure_virtual, returns
name = attrs["name"]
returns = attrs["returns"]
return typedesc.Method(name, returns)
def _fixup_Method(self, m):
m.returns = self.all[m.returns]
m.fixup_argtypes(self.all)
def Argument(self, attrs):
parent = self.context[-1]
if parent is not None:
parent.add_argument(typedesc.Argument(attrs["type"], attrs.get("name")))
# enumerations
def Enumeration(self, attrs):
# id, name
name = attrs["name"]
# If the name isn't a valid Python identifier, create an unnamed enum
name = CHECK_NAME(name)
size = attrs["size"]
align = attrs["align"]
return typedesc.Enumeration(name, size, align)
def _fixup_Enumeration(self, e): pass
def EnumValue(self, attrs):
name = attrs["name"]
value = attrs["init"]
v = typedesc.EnumValue(name, value, self.context[-1])
self.context[-1].add_value(v)
return v
def _fixup_EnumValue(self, e): pass
# structures, unions
def Struct(self, attrs):
# id, name, members
name = attrs.get("name")
if name is None:
name = MAKE_NAME(attrs["mangled"])
bases = attrs.get("bases", "").split()
members = attrs.get("members", "").split()
align = attrs["align"]
size = attrs.get("size")
return typedesc.Structure(name, align, members, bases, size)
def _fixup_Structure(self, s):
s.members = [self.all[m] for m in s.members]
s.bases = [self.all[b] for b in s.bases]
_fixup_Union = _fixup_Structure
def Union(self, attrs):
name = attrs.get("name")
if name is None:
name = MAKE_NAME(attrs["mangled"])
bases = attrs.get("bases", "").split()
members = attrs.get("members", "").split()
align = attrs["align"]
size = attrs.get("size")
return typedesc.Union(name, align, members, bases, size)
def Field(self, attrs):
# name, type
name = attrs["name"]
## if name.startswith("__") and not name.endswith("__"):
## print "INVALID FIELD NAME", name
typ = attrs["type"]
bits = attrs.get("bits", None)
offset = attrs.get("offset")
return typedesc.Field(name, typ, bits, offset)
def _fixup_Field(self, f):
f.typ = self.all[f.typ]
################
def _fixup_Macro(self, m):
pass
def get_macros(self, text):
if text is None:
return
text = "".join(text)
# preprocessor definitions that look like macros with one or more arguments
for m in text.splitlines():
name, body = m.split(None, 1)
name, args = name.split("(", 1)
args = "(%s" % args
self.all[name] = typedesc.Macro(name, args, body)
def get_aliases(self, text, namespace):
if text is None:
return
# preprocessor definitions that look like aliases:
# #define A B
text = "".join(text)
aliases = {}
for a in text.splitlines():
name, value = a.split(None, 1)
a = typedesc.Alias(name, value)
aliases[name] = a
self.all[name] = a
for name, a in aliases.items():
value = a.alias
# the value should be either in namespace...
if value in namespace:
# set the type
a.typ = namespace[value]
# or in aliases...
elif value in aliases:
a.typ = aliases[value]
# or unknown.
else:
# not known
## print "skip %s = %s" % (name, value)
pass
def get_result(self):
interesting = (typedesc.Typedef, typedesc.Enumeration, typedesc.EnumValue,
typedesc.Function, typedesc.Structure, typedesc.Union,
typedesc.Variable, typedesc.Macro, typedesc.Alias)
import warnings
if self.cvs_revision is None:
warnings.warn("Could not determine CVS revision of GCCXML")
elif self.cvs_revision < (1, 114):
warnings.warn("CVS Revision of GCCXML is %d.%d" % self.cvs_revision)
self.get_macros(self.cpp_data.get("functions"))
remove = []
for n, i in self.all.items():
location = getattr(i, "location", None)
if location:
fil, line = location.split(":")
i.location = self.all[fil].name, line
# link together all the nodes (the XML that gccxml generates uses this).
mth = getattr(self, "_fixup_" + type(i).__name__)
try:
mth(i)
except KeyError: # XXX better exception catching
remove.append(n)
for n in remove:
del self.all[n]
# Now we can build the namespace.
namespace = {}
for i in self.all.values():
if not isinstance(i, interesting):
continue # we don't want these
name = getattr(i, "name", None)
if name is not None:
namespace[name] = i
self.get_aliases(self.cpp_data.get("aliases"), namespace)
result = []
for i in self.all.values():
if isinstance(i, interesting):
result.append(i)
return result
################################################################
def parse(xmlfile):
# parse an XML file into a sequence of type descriptions
parser = GCCXML_Parser()
parser.parse(xmlfile)
return parser.get_result()
|
|
"""Support for Epson projector."""
from __future__ import annotations
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
SUPPORT_EPSON = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Epson projector from a config entry."""
entry_id = config_entry.entry_id
unique_id = config_entry.unique_id
projector = hass.data[DOMAIN][entry_id]
projector_entity = EpsonProjectorMediaPlayer(
projector=projector,
name=config_entry.title,
unique_id=unique_id,
entry=config_entry,
)
async_add_entities([projector_entity], True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
"""Representation of Epson Projector Device."""
def __init__(self, projector, name, unique_id, entry):
"""Initialize entity to control Epson projector."""
self._projector = projector
self._entry = entry
self._name = name
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def set_unique_id(self):
"""Set unique id for projector config entry."""
_LOGGER.debug("Setting unique_id for projector")
if self._unique_id:
return False
uid = await self._projector.get_serial_number()
if uid:
self.hass.config_entries.async_update_entry(self._entry, unique_id=uid)
registry = async_get_entity_registry(self.hass)
old_entity_id = registry.async_get_entity_id(
"media_player", DOMAIN, self._entry.entry_id
)
if old_entity_id is not None:
registry.async_update_entity(old_entity_id, new_unique_id=uid)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return True
async def async_update(self):
"""Update state of device."""
power_state = await self._projector.get_power()
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
if await self.set_unique_id():
return
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def device_info(self) -> DeviceInfo | None:
"""Get attributes about the device."""
if not self._unique_id:
return None
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer="Epson",
model="Epson",
name="Epson projector",
via_device=(DOMAIN, self._unique_id),
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self):
"""Return if projector is available."""
return self._available
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_EPSON
async def async_turn_on(self):
"""Turn on epson."""
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
"""Turn off epson."""
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Get current input sources."""
return self._source
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
async def select_cmode(self, cmode):
"""Set color mode in Epson."""
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
"""Select input source."""
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) sound."""
await self._projector.send_command(MUTE)
async def async_volume_up(self):
"""Increase volume."""
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
"""Decrease volume."""
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
"""Play media via Epson."""
await self._projector.send_command(PLAY)
async def async_media_pause(self):
"""Pause media via Epson."""
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
"""Skip to next."""
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
"""Skip to previous."""
await self._projector.send_command(BACK)
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import transaction
from BTrees.OOBTree import OOSet
from zeo_connector import transaction_manager
from zeo_connector.examples import DatabaseHandler
from settings import ZEO_CLIENT_PATH
from settings import TREE_PROJECT_KEY as PROJECT_KEY
# Variables ===================================================================
_TREE_HANDLER = None
# Functions & classes =========================================================
class TreeHandler(DatabaseHandler):
"""
This class is used as database handler for :class:`.Tree` instances.
Attributes:
name_db_key (str): Key for the :attr:`.name_db`.
name_db (dict): Database handler dict for `name`.
aleph_id_db_key (str): Key for the :attr:`.aleph_id_db`.
aleph_id_db (dict): Database handler dict for `aleph_id`.
issn_db_key (str): Key for the :attr:`.issn_db`.
issn_db (dict): Database handler dict for `issn`.
path_db_key (str): Key for the :attr:`.path_db`.
path_db (dict): Database handler dict for `path`.
parent_db_key (str): Key for the :attr:`.parent_db`.
parent_db (dict): Database handler dict for `parent`.
"""
def __init__(self, conf_path=ZEO_CLIENT_PATH, project_key=PROJECT_KEY):
"""
Constructor.
Args:
conf_path (str): Path to the ZEO configuration file. Default
:attr:`~storage.settings.ZEO_CLIENT_PATH`.
project_key (str): Project key, which is used for lookups into ZEO.
Default :attr:`~storage.settings.TREE_PROJECT_KEY`.
"""
super(self.__class__, self).__init__(
conf_path=conf_path,
project_key=project_key
)
# tree.name -> tree
self.name_db_key = "name_db"
self.name_db = self._get_key_or_create(self.name_db_key)
# tree.aleph_id -> tree
self.aleph_id_db_key = "aleph_id_db"
self.aleph_id_db = self._get_key_or_create(self.aleph_id_db_key)
# tree.issn -> tree
self.issn_db_key = "issn_db"
self.issn_db = self._get_key_or_create(self.issn_db_key)
# tree.path -> tree
self.path_db_key = "path_db"
self.path_db = self._get_key_or_create(self.path_db_key)
# sub_tree.path -> parent
self.parent_db_key = "parent_db"
self.parent_db = self._get_key_or_create(self.parent_db_key)
@transaction_manager
def _add_to(self, db, index, item, default=OOSet):
"""
Add `item` to `db` under `index`. If `index` is not yet in `db`, create
it using `default`.
Args:
db (dict-obj): Dict-like object used to connect to database.
index (str): Index used to look in `db`.
item (obj): Persistent object, which may be stored in DB.
default (func/obj): Reference to function/object, which will be
used to create the object under `index`.
Default :class:`OOSet`.
"""
row = db.get(index, None)
if row is None:
row = default()
db[index] = row
row.add(item)
@transaction_manager
def add_tree(self, tree, parent=None):
"""
Add `tree` into database.
Args:
tree (obj): :class:`.Tree` instance.
parent (ref, default None): Reference to parent tree. This is used
for all sub-trees in recursive call.
"""
if tree.path in self.path_db:
self.remove_tree_by_path(tree.path)
# index all indexable attributes
for index in tree.indexes:
if not getattr(tree, index):
continue
self._add_to(
getattr(self, index + "_db"),
getattr(tree, index),
tree,
)
if parent:
self._add_to(self.parent_db, tree.path, parent)
# make sure, that all sub-trees starts with path of parent tree
for sub_tree in tree.sub_trees:
assert sub_tree.path.startswith(tree.path)
for sub_tree in tree.sub_trees:
self.add_tree(sub_tree, parent=tree)
def remove_tree_by_path(self, path):
"""
Remove the tree from database by given `path`.
Args:
path (str): Path of the tree.
"""
with transaction.manager:
trees = self.path_db.get(path, None)
if not trees:
return
for tree in trees:
return self._remove_tree(tree)
def remove_tree(self, tree):
"""
Remove the tree from database using `tree` object to identfy the path.
Args:
tree (obj): :class:`.Tree` instance.
"""
return self.remove_tree_by_path(tree.path)
def _remove_from(self, db, index, item):
"""
Remove `item` from `db` at `index`.
Note:
This function is inverse to :meth:`._add_to`.
Args:
db (dict-obj): Dict-like object used to connect to database.
index (str): Index used to look in `db`.
item (obj): Persistent object, which may be stored in DB.
"""
with transaction.manager:
row = db.get(index, None)
if row is None:
return
with transaction.manager:
if item in row:
row.remove(item)
with transaction.manager:
if not row:
del db[index]
@transaction_manager
def _remove_tree(self, tree, parent=None):
"""
Really remove the tree identified by `tree` instance from all indexes
from database.
Args:
tree (obj): :class:`.Tree` instance.
parent (obj, default None): Reference to parent.
"""
# remove sub-trees
for sub_tree in tree.sub_trees:
self._remove_tree(sub_tree, parent=tree)
# remove itself
for index in tree.indexes:
if not getattr(tree, index):
continue
self._remove_from(
getattr(self, index + "_db"),
getattr(tree, index),
tree,
)
if parent:
self._remove_from(self.parent_db, tree.path, parent)
self.zeo.pack()
@transaction_manager
def trees_by_issn(self, issn):
"""
Search trees by `issn`.
Args:
issn (str): :attr:`.Tree.issn` property of :class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances.
"""
return set(
self.issn_db.get(issn, OOSet()).keys()
)
@transaction_manager
def trees_by_path(self, path):
"""
Search trees by `path`.
Args:
path (str): :attr:`.Tree.path` property of :class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances.
"""
return set(
self.path_db.get(path, OOSet()).keys()
)
@transaction_manager
def trees_by_subpath(self, sub_path):
"""
Search trees by `sub_path` using ``Tree.path.startswith(sub_path)``
comparison.
Args:
sub_path (str): Part of the :attr:`.Tree.path` property of
:class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances.
"""
matches = (
self.path_db[tree_path].keys()
for tree_path in self.path_db.iterkeys()
if tree_path.startswith(sub_path)
)
return set(sum(matches, [])) # flattern the list
@transaction_manager
def get_parent(self, tree, alt=None):
"""
Get parent for given `tree` or `alt` if not found.
Args:
tree (obj): :class:`.Tree` instance, which is already stored in DB.
alt (obj, default None): Alternative value returned when `tree` is
not found.
Returns:
obj: :class:`.Tree` parent to given `tree`.
"""
parent = self.parent_db.get(tree.path)
if not parent:
return alt
return list(parent)[0]
def tree_handler(*args, **kwargs):
"""
Singleton `TreeHandler` generator. Any arguments are given to
:class:`TreeHandler`, when it is first created.
Returns:
obj: :class:`TreeHandler` instance.
"""
global _TREE_HANDLER
if not _TREE_HANDLER:
_TREE_HANDLER = TreeHandler(*args, **kwargs)
return _TREE_HANDLER
|
|
from django.core.exceptions import ValidationError
from nose.plugins.skip import SkipTest
from cyder.base.tests import ModelTestMixin, TestCase
from cyder.cydhcp.site.models import Site
from cyder.cydhcp.network.models import Network
from cyder.cydhcp.range.models import Range
from cyder.cydhcp.validation import get_partial_overlap, get_total_overlap
from cyder.cydns.domain.models import Domain
from cyder.cydns.ip.models import ipv6_to_longs
class NetworkTests(TestCase, ModelTestMixin):
@property
def objs(self):
"""Create objects for test_create_delete."""
return (
Network.objects.create(network_str='10.0.0.0/8', ip_type='4'),
Network.objects.create(network_str='192.168.0.0/24', ip_type='4'),
Network.objects.create(network_str='192.168.128.0/25',
ip_type='4'),
Network.objects.create(network_str='abcd::1234/126', ip_type='6'),
Network.objects.create(network_str='f::/24', ip_type='6'),
)
def test2_create_ipv6(self):
s = Network.objects.create(
network_str='ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/24',
ip_type='6')
str(s)
s.__repr__()
start_upper, start_lower = ipv6_to_longs(
'ffff:ff00:0000:0000:0000:0000:0000:0000')
# Network address was canonicalized.
self.assertEqual(s.start_upper, start_upper)
self.assertEqual(s.start_lower, start_lower)
def test_bad_resize(self):
s = Network.objects.create(network_str='129.0.0.0/24', ip_type='4')
d = Domain(name="asdf")
d.save()
r = Range.objects.create(
start_str='129.0.0.1', end_str='129.0.0.255', network=s)
self.assertEqual(r.network, s)
self.assertEqual(s.range_set.count(), 1)
s.network_str = '129.0.0.0/25'
self.assertRaises(ValidationError, s.save)
def test_bad_delete(self):
s = Network.objects.create(
network_str='129.0.0.0/24', ip_type='4')
d = Domain.objects.create(name="asdf")
r = Range.objects.create(
start_str='129.0.0.1', end_str='129.0.0.255', network=s)
self.assertEqual(r.network, s)
self.assertEqual(s.range_set.count(), 1)
self.assertRaises(ValidationError, s.delete)
self.assertTrue(Network.objects.filter(pk=s.pk).exists())
r.delete()
s_pk = s.pk
s.delete()
self.assertFalse(Network.objects.filter(pk=s_pk).exists())
def test_check_valid_ranges_v4_valid(self):
n = Network(network_str='10.0.0.0/8')
n.full_clean()
n.save()
r = Range(ip_type='4', start_str='10.4.0.2', end_str='10.4.255.254',
network=n)
r.full_clean()
r.save()
n.network_str = '10.4.0.0/16'
n.full_clean()
n.save()
def test_check_valid_ranges_v4_start_low(self):
n = Network(network_str='10.0.0.0/8')
n.full_clean()
n.save()
r = Range(ip_type='4', start_str='10.3.0.2', end_str='10.4.255.254',
network=n)
r.full_clean()
r.save()
n.network_str = '10.4.0.0/16'
with self.assertRaises(ValidationError):
n.full_clean()
n.save()
def test_check_valid_ranges_v4_start_end_low(self):
n = Network(network_str='10.0.0.0/8')
n.full_clean()
n.save()
r = Range(ip_type='4', start_str='10.3.0.2', end_str='10.3.255.254',
network=n)
r.full_clean()
r.save()
n.network_str = '10.4.0.0/16'
with self.assertRaises(ValidationError):
n.full_clean()
n.save()
def test_check_valid_ranges_v4_end_high(self):
n = Network(network_str='10.0.0.0/8')
n.full_clean()
n.save()
r = Range(ip_type='4', start_str='10.4.0.2', end_str='10.5.255.254',
network=n)
r.full_clean()
r.save()
n.network_str = '10.4.0.0/16'
with self.assertRaises(ValidationError):
n.full_clean()
n.save()
def test_check_valid_ranges_v4_start_end_high(self):
n = Network(network_str='10.0.0.0/8')
n.full_clean()
n.save()
r = Range(ip_type='4', start_str='10.5.0.2', end_str='10.5.255.254',
network=n)
r.full_clean()
r.save()
n.network_str = '10.4.0.0/16'
with self.assertRaises(ValidationError):
n.full_clean()
n.save()
def test_check_valid_ranges_v4_start_low_end_high(self):
n = Network(network_str='10.0.0.0/8')
n.full_clean()
n.save()
r = Range(ip_type='4', start_str='10.3.0.2', end_str='10.5.255.254',
network=n)
r.full_clean()
r.save()
n.network_str = '10.4.0.0/16'
with self.assertRaises(ValidationError):
n.full_clean()
n.save()
def test_overlap_validation(self):
n1 = Network(network_str='1::/65', ip_type='6')
n1.update_network()
n1.save()
self.assertFalse(n1 in get_total_overlap(n1))
self.assertFalse(n1 in get_partial_overlap(n1))
n2 = Network(network_str='1::/66', ip_type='6')
n2.update_network()
self.assertEqual(n1.start_upper, n2.start_upper)
self.assertEqual(n1.end_upper, n2.end_upper)
self.assertFalse(n1 in get_total_overlap(n2))
self.assertTrue(n1 in get_partial_overlap(n2))
n2 = Network(network_str='1::/64', ip_type='6')
n2.update_network()
self.assertEqual(n1.start_upper, n2.start_upper)
self.assertEqual(n1.end_upper, n2.end_upper)
self.assertTrue(n1 in get_total_overlap(n2))
self.assertTrue(n1 in get_partial_overlap(n2))
n2 = Network(network_str='1:0:0:0:8000::/65', ip_type='6')
n2.update_network()
self.assertEqual(n1.start_upper, n2.start_upper)
self.assertEqual(n1.end_upper, n2.end_upper)
self.assertFalse(n1 in get_total_overlap(n2))
self.assertFalse(n1 in get_partial_overlap(n2))
n1 = Network(network_str='2::/16', ip_type='6')
n1.update_network()
n1.save()
self.assertFalse(n1 in get_total_overlap(n1))
self.assertFalse(n1 in get_partial_overlap(n1))
n2 = Network(network_str='2::/17', ip_type='6')
n2.update_network()
self.assertEqual(n1.start_upper, n2.start_upper)
self.assertNotEqual(n1.end_upper, n2.end_upper)
self.assertFalse(n1 in get_total_overlap(n2))
self.assertTrue(n1 in get_partial_overlap(n2))
n2 = Network(network_str='2::/15', ip_type='6')
n2.update_network()
self.assertEqual(n1.start_upper, n2.start_upper)
self.assertNotEqual(n1.end_upper, n2.end_upper)
self.assertTrue(n1 in get_total_overlap(n2))
self.assertTrue(n1 in get_partial_overlap(n2))
n2 = Network(network_str='3::/16', ip_type='6')
n2.update_network()
self.assertNotEqual(n1.start_upper, n2.start_upper)
self.assertNotEqual(n1.end_upper, n2.end_upper)
self.assertFalse(n1 in get_total_overlap(n2))
self.assertFalse(n1 in get_partial_overlap(n2))
|
|
import ankle
import unittest
class SimpleTestCase(unittest.TestCase):
def test_works(self):
document = '''
<form id="test1" class="form"></form>
<form id="test2" class="form"></form>
'''
skeleton = '<form class="form"></form>'
ankle.find_all(skeleton, document)
def test_disallows_skeleton_with_multiple_elements(self):
document = '<html></html>'
skeleton = '<p></p><p></p>'
with self.assertRaises(ValueError):
ankle.find_all(skeleton, document)
class FindTestCase(unittest.TestCase):
def test_returns_first_found_element_when_found(self):
document = '''
<form id="test1" class="form"></form>
<form id="test2" class="form"></form>
'''
skeleton = '<form class="form"></form>'
element = ankle.find(skeleton, document)
self.assertEqual(element.attrib['id'], 'test1')
def test_returns_none_when_nothing_found(self):
document = '<form id="test"></form>'
skeleton = '<div class="other"></div>'
self.assertIsNone(ankle.find(skeleton, document))
class MatchingTestCase(unittest.TestCase):
def test_match_by_tag_name(self):
document = '''
<form id="test"></form>
'''
skeleton = '<form></form>'
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test')
def test_match_by_attribute(self):
document = '''
<form id="test1"></form>
<form id="test2"></form>
'''
skeleton = '<form id="test1"></form>'
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_by_child(self):
document = '''
<form id="test1"><input name="match"></form>
<form id="test2"><input name="no-match"></form>
'''
skeleton = '<form><input name="match"></form>'
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_by_descendant(self):
document = '''
<form id="test1">
<div><span><input name="match"></span></div>
<button>Submit</button>
</form>
<form id="test2">
<input name="whatever">
<div><button>Go</button></div>
</form>
'''
skeleton = '<form><input name="match"></form>'
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_by_multiple_children(self):
document = '''
<form id="login">
<input name="name">
<input name="password">
</form>
<form id="some-other-form">
<input name="no-match">
<input name="different-input">
</form>
'''
skeleton = '<form><input name="name"><input name="password"></form>'
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'login')
def test_multiple_matches(self):
document = '''
<form id="test1"></form>
<form id="test2"></form>
'''
skeleton = '<form></form>'
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 2)
self.assertEqual(matches[0].attrib['id'], 'test1')
self.assertEqual(matches[1].attrib['id'], 'test2')
def test_attribute_order_doesnt_matter(self):
document = '<form method="POST" action="." id="test1"></form>'
skeleton = '<form id="test1" action="." method="POST"></form>'
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_deep_descendants(self):
document = '''
<form id="test1">
<div class="red">
<input name="wonderful">
</div>
</form>
<form id="test2">
<div class="red">
<input name="different">
</div>
</form>
'''
skeleton = (
'<form><div class="red"><input name="wonderful"></form></div>'
)
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_text(self):
document = '''
<form id="test1">
<label for="name">
Correct label
</label>
<input name="name">
</form>
<form id="test2">
<label for="name">
Wrong label
</label>
<input name="name">
</form>
'''
skeleton = '''
<form>
<label for="name">Correct label</label>
<input name="name">
</form>
'''
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_by_order(self):
document = '''
<form id="test1">
<label for="name">Label</label>
<input name="name">
</form>
<form id="test2">
<input name="name">
<label for="name">Label</label>
</form>
'''
skeleton = '''
<form>
<label for="name">Label</label>
<input name="name">
</form>
'''
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_matches_skeleton_with_just_text(self):
document = '''
<h1 id="test1">Correct title</h1>
<h2 id="test2">Different title</h1>
'''
skeleton = '''
<h1>Correct title</h1>
'''
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_text_between_elements(self):
document = '''
<form id="test1">
<label for="name">Label</label>
Correct text
<input name="name">
</form>
<form id="test2">
<label for="name">Label</label>
Incorrect text
<input name="name">
</form>
'''
skeleton = '''
<form>
<label for="name">Label</label>
Correct text
<input name="name">
</form>
'''
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
def test_match_text_in_the_beginning_of_element(self):
document = '''
<form id="test1">
Correct text
<label for="name">Label</label>
<input name="name">
</form>
<form id="test2">
Incorrect text
<label for="name">Label</label>
<input name="name">
</form>
'''
skeleton = '''
<form>
Correct text
<label for="name">Label</label>
<input name="name">
</form>
'''
matches = ankle.find_all(skeleton, document)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0].attrib['id'], 'test1')
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SVN to GIT mapping for the public Chromium repositories."""
import re
GIT_HOST = 'https://chromium.googlesource.com/'
BLINK_TRUNK_RE = re.compile(
'^https?://src.chromium.org/blink/trunk$')
BLINK_TRUNK_PUBLIC_RE = re.compile(
'^https?://src.chromium.org/blink/trunk/public$')
def SvnUrlToGitUrl(path, svn_url):
"""Convert a chromium SVN URL to a chromium Git URL."""
match = re.match(
'(https?://src.chromium.org/svn|svn://svn.chromium.org/chrome)(/.*)',
svn_url)
if match:
svn_url = match.group(2)
# A few special cases.
if re.match('^https?://sctp-refimpl.googlecode.com/svn/' +
'trunk/KERN/usrsctp/usrsctplib$', svn_url):
return (path, GIT_HOST + 'external/usrsctplib.git', GIT_HOST)
if svn_url == '/trunk/deps/page_cycler/acid3':
return (path, GIT_HOST + 'chromium/deps/acid3.git', GIT_HOST)
if svn_url == '/trunk/deps/canvas_bench':
return (path, GIT_HOST + 'chromium/canvas_bench.git', GIT_HOST)
if svn_url == '/trunk/deps/gpu/software_rendering_list':
return (path, GIT_HOST + 'chromium/deps/gpu/software_rendering_list.git',
GIT_HOST)
if svn_url == '/trunk/tools/third_party/python_26':
return (path, GIT_HOST + 'chromium/deps/python_26.git', GIT_HOST)
if svn_url == '/trunk/deps/support':
return (path, GIT_HOST + 'chromium/support.git', GIT_HOST)
if svn_url == '/trunk/deps/frame_rate/content':
return (path, GIT_HOST + 'chromium/frame_rate/content.git', GIT_HOST)
if svn_url == 'svn://svn.chromium.org/boto':
return (path, GIT_HOST + 'external/boto.git', GIT_HOST)
if svn_url == 'svn://svn.chromium.org/gsutil/trunk/src':
return (path, GIT_HOST + 'external/gsutil/src.git', GIT_HOST)
if svn_url == 'svn://svn.chromium.org/jsoncpp/trunk/jsoncpp':
return (path, GIT_HOST + 'external/jsoncpp/jsoncpp.git', GIT_HOST)
if svn_url == '/trunk/deps/cdm':
return (path, GIT_HOST + 'chromium/cdm.git', GIT_HOST)
# TODO(niklase) Remove after landing https://codereview.chromium.org/86563002
if re.match('^https?://webrtc.googlecode.com/svn/stable/webrtc$', svn_url):
return (path, GIT_HOST + 'external/webrtc/stable/webrtc.git', GIT_HOST)
# TODO(niklase) Remove after landing https://codereview.chromium.org/86563002
if re.match('^https?://webrtc.googlecode.com/svn/stable/talk$', svn_url):
return (path, GIT_HOST + 'external/webrtc/stable/talk.git', GIT_HOST)
# TODO(niklase) Remove after landing https://codereview.chromium.org/86563002
if re.match('^https?://webrtc.googlecode.com/svn/stable/src$', svn_url):
return (path, GIT_HOST + 'external/webrtc/stable/src.git', GIT_HOST)
# webrtc 'trunk/src' mirror was created without 'trunk' in the name, unlike
# the other ones which are matched next.
match = re.match('^https?://webrtc.googlecode.com/svn/trunk/src', svn_url)
if match:
return (path, GIT_HOST + 'external/webrtc/src.git', GIT_HOST)
# webrtc 'trunk' mappings for everything but 'trunk/src'.
match = re.match('^https?://webrtc.googlecode.com/svn/trunk/(.*)', svn_url)
if match:
repo = '%s.git' % match.group(1)
return (path, GIT_HOST + 'external/webrtc/trunk/%s' % repo, GIT_HOST)
if re.match('^https?://webrtc.googlecode.com/svn/deps/third_party/openmax$',
svn_url):
return (path, GIT_HOST + 'external/webrtc/deps/third_party/openmax.git',
GIT_HOST)
if svn_url in ('http://selenium.googlecode.com/svn/trunk/py/test',
'https://selenium.googlecode.com/svn/trunk/py/test',
'/trunk/deps/reference_builds/chrome'):
# Those can't be git svn cloned. Skipping for now.
return
# Projects on sourceforge using trunk
match = re.match('^https?://svn.code.sf.net/p/(.*)/code/trunk(.*)',
svn_url)
if match:
repo = '%s%s.git' % (match.group(1), match.group(2))
return (path, GIT_HOST + 'external/%s' % repo, GIT_HOST)
# Fallback for old sourceforge URL.
match = re.match('^https?://(.*).svn.sourceforge.net/svnroot/(.*)/trunk(.*)',
svn_url)
if match:
repo = '%s%s.git' % (match.group(2), match.group(3))
return (path, GIT_HOST + 'external/%s' % repo, GIT_HOST)
# Subdirectories of libaddressinput
if re.match('^https?://libaddressinput.googlecode.com/svn/trunk', svn_url):
if 'libaddressinput' in path:
path = path[:path.index('libaddressinput')] + 'libaddressinput/src'
return (path, GIT_HOST + 'external/libaddressinput.git', GIT_HOST)
# Projects on googlecode.com using trunk.
match = re.match('^https?://(.*).googlecode.com/svn/trunk(.*)', svn_url)
if match:
repo = '%s%s.git' % (match.group(1), match.group(2))
return (path, GIT_HOST + 'external/%s' % repo, GIT_HOST)
# Projects on googlecode.com using branches.
# Branches should be automatically included in the projects corresponding
# 'trunk' mirror as 'branch-heads' refspecs.
# This makes some broad assumptions about a "standard" branch layout , i.e.:
# svn/branches/<branch_name>/<optional_sub_path>
# This layout can't really be enforced, though it appears to apply to most
# repos. Outliers will have to be special-cased.
match = re.match('^https?://(.*).googlecode.com/svn/branches/([^/]+)(.*)',
svn_url)
if match:
repo = '%s%s.git' % (match.group(1), match.group(3))
branch_name = match.group(2)
return (path, GIT_HOST + 'external/%s' % repo, GIT_HOST, branch_name)
# Projects that are subdirectories of the native_client repository.
match = re.match('^https?://src.chromium.org/native_client/trunk/(.*)',
svn_url)
if match:
repo = '%s.git' % match.group(1)
return (path, GIT_HOST + 'native_client/%s' % repo, GIT_HOST)
# Projects that are subdirectories of the chromium/{src,tools} repository.
match = re.match('/trunk/((src|tools)/.*)', svn_url)
if match:
repo = '%s.git' % match.group(1)
return (path, GIT_HOST + 'chromium/%s' % repo, GIT_HOST)
# Public-header-only blink directory for iOS.
if BLINK_TRUNK_PUBLIC_RE.match(svn_url):
return (path, GIT_HOST + 'chromium/blink-public.git', GIT_HOST)
# Main blink directory.
if BLINK_TRUNK_RE.match(svn_url):
return (path, GIT_HOST + 'chromium/blink.git', GIT_HOST)
# llvm project (and possible subdirectory) repos.
match = re.match('^https?://src.chromium.org/llvm-project/([^/]*)/trunk(.*)',
svn_url)
if match:
repo = '%s.git' % ''.join(match.groups())
return (path, GIT_HOST + 'chromium/llvm-project/%s' % repo, GIT_HOST)
# Minimal header-only webkit directories for iOS. At some point after the
# transition to the blink repo, these were replaced by the
# BLINK_TRUNK_PUBLIC_RE entries above.
if svn_url in ['http://svn.webkit.org/repository/webkit/trunk/Source/'
'WebKit/chromium/public',
'http://src.chromium.org/blink/trunk/Source/'
'WebKit/chromium/public'
]:
return (path,
GIT_HOST + 'external/WebKit/Source/WebKit/chromium/public.git',
GIT_HOST)
if svn_url in ['http://svn.webkit.org/repository/webkit/trunk/Source/'
'Platform/chromium/public',
'http://src.chromium.org/blink/trunk/Source/'
'Platform/chromium/public'
]:
return (path,
GIT_HOST + 'external/WebKit/Source/Platform/chromium/public.git',
GIT_HOST)
# Ignore all webkit directories (other than the above), since we fetch the
# whole thing directly for all but iOS.
if svn_url == '/trunk/deps/third_party/WebKit':
return
# blink
# Subdirectories of the chromium deps/third_party directory.
match = re.match('/trunk/deps/third_party/(.*)', svn_url)
if match:
repo = '%s.git' % match.group(1)
return (path, GIT_HOST + 'chromium/deps/%s' % repo, GIT_HOST)
# Subdirectories of the chromium deps/reference_builds directory.
match = re.match('/trunk/deps/reference_builds/(.*)', svn_url)
if match:
repo = '%s.git' % match.group(1)
return (path, GIT_HOST + 'chromium/reference_builds/%s' % repo, GIT_HOST)
# Nothing yet? Oops.
print 'No match for %s' % svn_url
|
|
"""
Copyright (c) 2017-2022, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
def activity_indicator_factory():
from .uikit_activity_indicator import UiKitActivityIndicator
return UiKitActivityIndicator
def auto_complete_text_view_factory():
from .uikit_auto_complete_text_view import UiKitAutoCompleteTextView
return UiKitAutoCompleteTextView
def button_factory():
# print "Import button"
from .uikit_button import UiKitButton
return UiKitButton
def calendar_view_factory():
# print "Import calendar view"
from .uikit_calendar_view import UiKitCalendarView
return UiKitCalendarView
def card_view_factory():
# print "Import card view"
from .uikit_card_view import UiKitCardView
return UiKitCardView
def checkbox_factory():
# iOS doesn't have a checkbox
# from .uikit_checkbox import UiKitCheckBox
# return UiKitCheckBox
from .uikit_switch import UiKitSwitch
return UiKitSwitch
def chronometer_factory():
# print "Import chronometer"
from .uikit_chronometer import UiKitChronometer
return UiKitChronometer
def compound_button_factory():
# print "Import compound button"
from .uikit_compound_button import UiKitCompoundButton
return UiKitCompoundButton
def date_picker_factory():
# print "Import date picker"
from .uikit_date_picker import UiKitDatePicker
return UiKitDatePicker
def drawer_layout_factory():
# print "Import drawer layout"
from .uikit_drawer_layout import UiKitDrawerLayout
return UiKitDrawerLayout
def edit_text_factory():
# print "Import edit text"
from .uikit_edit_text import UiKitEditText
return UiKitEditText
def flexbox_factory():
from .uikit_flexbox import UiKitFlexbox
return UiKitFlexbox
def fragment_factory():
# print "Import frame layout"
from .uikit_fragment import UiKitFragment
return UiKitFragment
def frame_layout_factory():
# print "Import frame layout"
from .uikit_frame_layout import UiKitFrameLayout
return UiKitFrameLayout
def grid_layout_factory():
# print "Import grid layout"
from .uikit_grid_layout import UiKitGridLayout
return UiKitGridLayout
def icon_factory():
from .uikit_iconify import UiKitIcon
return UiKitIcon
def icon_button_factory():
from .uikit_iconify import UiKitIconButton
return UiKitIconButton
def icon_toggle_button_factory():
from .uikit_iconify import UiKitIconToggleButton
return UiKitIconToggleButton
def image_view_factory():
from .uikit_image_view import UiKitImageView
return UiKitImageView
def linear_layout_factory():
# print "Import linear layout"
from .uikit_linear_layout import UiKitLinearLayout
return UiKitLinearLayout
def list_item_factory():
# print "Import linear layout"
from .uikit_list_view import UiKitListItem
return UiKitListItem
def list_view_factory():
# print "Import linear layout"
from .uikit_list_view import UiKitListView
return UiKitListView
def number_picker_factory():
# print "Import view"
from .uikit_number_picker import UiKitNumberPicker
return UiKitNumberPicker
def pager_title_strip_factory():
from .uikit_view_pager import UiKitPagerTitleStrip
return UiKitPagerTitleStrip
def pager_tab_strip_factory():
from .uikit_view_pager import UiKitPagerTabStrip
return UiKitPagerTabStrip
def pager_fragment_factory():
from .uikit_fragment import UiKitPagerFragment
return UiKitPagerFragment
def progress_bar_factory():
# print "Import progress bar"
from .uikit_progress_view import UiKitProgressView
return UiKitProgressView
def radio_button_factory():
# print "Import radio button"
from .uikit_radio_button import UiKitRadioButton
return UiKitRadioButton
def radio_group_factory():
# print "Import radio group"
from .uikit_radio_group import UiKitRadioGroup
return UiKitRadioGroup
def rating_bar_factory():
# print "Import rating bar"
from .uikit_rating_bar import UiKitRatingBar
return UiKitRatingBar
def relative_layout_factory():
# print "Import relative layout"
from .uikit_relative_layout import UiKitRelativeLayout
return UiKitRelativeLayout
def scroll_view_factory():
# print "Import scroll view"
from .uikit_scroll_view import UiKitScrollView
return UiKitScrollView
def seek_bar_factory():
from .uikit_slider import UiKitSlider
return UiKitSlider
def spacer_factory():
# print "Import switch"
from .uikit_spacer import UiKitSpacer
return UiKitSpacer
def spinner_factory():
# print "Import spinner"
from .uikit_spinner import UiKitSpinner
return UiKitSpinner
def switch_factory():
# print "Import switch"
from .uikit_switch import UiKitSwitch
return UiKitSwitch
def text_clock_factory():
from .uikit_text_clock import UiKitTextClock
return UiKitTextClock
def text_view_factory():
# print "Import text view"
from .uikit_text_view import UiKitTextView
return UiKitTextView
def time_picker_factory():
# print "Import time picker"
from .uikit_time_picker import UiKitTimePicker
return UiKitTimePicker
def tab_layout_factory():
# print "Import tab host"
from .uikit_tab_layout import UiKitTabLayout
return UiKitTabLayout
def tab_fragment_factory():
# print "Import tab host"
from .uikit_tab_layout import UiKitTabFragment
return UiKitTabFragment
def toggle_button_factory():
# print "Import toggle button"
from .uikit_toggle_button import UiKitToggleButton
return UiKitToggleButton
def toolbar_factory():
from .uikit_toolbar import UiKitToolbar
return UiKitToolbar
def view_factory():
# print "Import view"
from .uikit_view import UiKitView
return UiKitView
def view_pager_factory():
# print "Import view pager"
from .uikit_view_pager import UiKitViewPager
return UiKitViewPager
def web_view_factory():
from .uikit_web_view import UiKitWebView
return UiKitWebView
IOS_FACTORIES = {
"ActivityIndicator": activity_indicator_factory,
"AutoCompleteTextView": auto_complete_text_view_factory,
"Button": button_factory,
"CalendarView": calendar_view_factory,
"CardView": card_view_factory,
"CheckBox": checkbox_factory,
"Chronometer": chronometer_factory,
"CompoundButton": compound_button_factory,
"DatePicker": date_picker_factory,
"DrawerLayout": drawer_layout_factory,
"EditText": edit_text_factory,
"Flexbox": flexbox_factory,
"Fragment": fragment_factory,
# 'FrameLayout': frame_layout_factory,
"GridLayout": grid_layout_factory,
"Icon": icon_factory,
"IconButton": icon_button_factory,
"IconToggleButton": icon_toggle_button_factory,
"ImageView": image_view_factory,
"Label": text_view_factory,
"LinearLayout": linear_layout_factory,
"ListItem": list_item_factory,
"ListView": list_view_factory,
"NumberPicker": number_picker_factory,
"PagerTitleStrip": pager_title_strip_factory,
"PagerTabStrip": pager_tab_strip_factory,
"PagerFragment": pager_fragment_factory,
"ProgressBar": progress_bar_factory,
"RadioButton": radio_button_factory,
"RadioGroup": radio_group_factory,
"RatingBar": rating_bar_factory,
"RelativeLayout": relative_layout_factory,
"ScrollView": scroll_view_factory,
"SeekBar": seek_bar_factory,
"Slider": seek_bar_factory, # Alias
"Spacer": spacer_factory,
"Spinner": spinner_factory,
"Switch": switch_factory,
"TabFragment": tab_fragment_factory,
"TabLayout": tab_layout_factory,
"TextClock": text_clock_factory,
"TextView": text_view_factory,
"TimePicker": time_picker_factory,
"ToggleButton": toggle_button_factory,
"Toolbar": toolbar_factory,
"View": view_factory,
"ViewPager": view_pager_factory,
"WebView": web_view_factory,
}
|
|
from filebeat import BaseTest
import os
import platform
import time
import shutil
import json
from nose.plugins.skip import Skip, SkipTest
# Additional tests: to be implemented
# * Check if registrar file can be configured, set config param
# * Check "updating" of registrar file
# * Check what happens when registrar file is deleted
class Test(BaseTest):
def test_registrar_file_content(self):
"""
Check if registrar file is created correctly and content is as expected
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
# Use \n as line terminator on all platforms per docs.
line = "hello world\n"
line_len = len(line) - 1 + len(os.linesep)
iterations = 5
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
file.write(iterations * line)
file.close()
filebeat = self.start_beat()
c = self.log_contains_count("states written")
self.wait_until(
lambda: self.output_has(lines=5),
max_timeout=15)
# Make sure states written appears one more time
self.wait_until(
lambda: self.log_contains("states written") > c,
max_timeout=10)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: os.path.isfile(os.path.join(self.working_dir,
"registry")),
max_timeout=1)
filebeat.check_kill_and_wait()
# Check that a single file exists in the registry.
data = self.get_registry()
assert len(data) == 1
logFileAbsPath = os.path.abspath(testfile)
record = self.get_registry_entry_by_path(logFileAbsPath)
self.assertDictContainsSubset({
"source": logFileAbsPath,
"offset": iterations * line_len,
}, record)
self.assertTrue("FileStateOS" in record)
file_state_os = record["FileStateOS"]
if os.name == "nt":
# Windows checks
# TODO: Check for IdxHi, IdxLo, Vol in FileStateOS on Windows.
self.assertEqual(len(file_state_os), 3)
elif platform.system() == "SunOS":
stat = os.stat(logFileAbsPath)
self.assertEqual(file_state_os["inode"], stat.st_ino)
# Python does not return the same st_dev value as Golang or the
# command line stat tool so just check that it's present.
self.assertTrue("device" in file_state_os)
else:
stat = os.stat(logFileAbsPath)
self.assertDictContainsSubset({
"inode": stat.st_ino,
"device": stat.st_dev,
}, file_state_os)
def test_registrar_files(self):
"""
Check that multiple files are put into registrar file
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test1.log"
testfile2 = self.working_dir + "/log/test2.log"
file1 = open(testfile1, 'w')
file2 = open(testfile2, 'w')
iterations = 5
for n in range(0, iterations):
file1.write("hello world") # 11 chars
file1.write("\n") # 1 char
file2.write("goodbye world") # 11 chars
file2.write("\n") # 1 char
file1.close()
file2.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=10),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: os.path.isfile(os.path.join(self.working_dir,
"registry")),
max_timeout=1)
filebeat.check_kill_and_wait()
# Check that file exist
data = self.get_registry()
# Check that 2 files are port of the registrar file
assert len(data) == 2
def test_custom_registry_file_location(self):
"""
Check that when a custom registry file is used, the path
is created automatically.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
registryFile="a/b/c/registry",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w') as f:
f.write("hello world\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# wait until the registry file exist. Needed to avoid a race between
# the logging and actual writing the file. Seems to happen on Windows.
self.wait_until(
lambda: os.path.isfile(os.path.join(self.working_dir,
"a/b/c/registry")),
max_timeout=1)
filebeat.check_kill_and_wait()
assert os.path.isfile(os.path.join(self.working_dir, "a/b/c/registry"))
def test_rotating_file(self):
"""
Checks that the registry is properly updated after a file is rotated
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
with open(testfile, 'w') as f:
f.write("offset 9\n")
self.wait_until(lambda: self.output_has(lines=1),
max_timeout=10)
testfilerenamed = self.working_dir + "/log/test.1.log"
os.rename(testfile, testfilerenamed)
with open(testfile, 'w') as f:
f.write("offset 10\n")
self.wait_until(lambda: self.output_has(lines=2),
max_timeout=10)
filebeat.check_kill_and_wait()
# Check that file exist
data = self.get_registry()
# Make sure the offsets are correctly set
if os.name == "nt":
assert self.get_registry_entry_by_path(os.path.abspath(testfile))["offset"] == 11
assert self.get_registry_entry_by_path(os.path.abspath(testfilerenamed))["offset"] == 10
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile))["offset"] == 10
assert self.get_registry_entry_by_path(os.path.abspath(testfilerenamed))["offset"] == 9
# Check that 2 files are port of the registrar file
assert len(data) == 2
def test_data_path(self):
"""
Checks that the registry file is written in a custom data path.
"""
self.render_config_template(
path=self.working_dir + "/test.log",
path_data=self.working_dir + "/datapath",
skip_registry_config=True,
)
with open(self.working_dir + "/test.log", "w") as f:
f.write("test message\n")
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
filebeat.check_kill_and_wait()
assert os.path.isfile(self.working_dir + "/datapath/registry")
def test_rotating_file_inode(self):
"""
Check that inodes are properly written during file rotation
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="1s"
)
if os.name == "nt":
raise SkipTest
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/input"
filebeat = self.start_beat()
with open(testfile, 'w') as f:
f.write("entry1\n")
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
data = self.get_registry()
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
testfilerenamed1 = self.working_dir + "/log/input.1"
os.rename(testfile, testfilerenamed1)
with open(testfile, 'w') as f:
f.write("entry2\n")
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
# Add one second sleep as it can sometimes take a moment until state is written
time.sleep(1)
data = self.get_registry()
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
# Rotate log file, create a new empty one and remove it afterwards
testfilerenamed2 = self.working_dir + "/log/input.2"
os.rename(testfilerenamed1, testfilerenamed2)
os.rename(testfile, testfilerenamed1)
with open(testfile, 'w') as f:
f.write("")
os.remove(testfilerenamed2)
with open(testfile, 'w') as f:
f.write("entry3\n")
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Compare file inodes and the one in the registry
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
# Check that 3 files are part of the registrar file. The deleted file should never have been detected, but the rotated one should be in
assert len(data) == 3
def test_restart_continue(self):
"""
Check that file readining continues after restart
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="1s"
)
if os.name == "nt":
raise SkipTest
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/input"
filebeat = self.start_beat()
with open(testfile, 'w') as f:
f.write("entry1\n")
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Wait a momemt to make sure registry is completely written
time.sleep(1)
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
filebeat.check_kill_and_wait()
# Store first registry file
shutil.copyfile(self.working_dir + "/registry", self.working_dir + "/registry.first")
# Append file
with open(testfile, 'a') as f:
f.write("entry2\n")
filebeat = self.start_beat(output="filebeat2.log")
# Output file was rotated
self.wait_until(
lambda: self.output_has(lines=1, output_file="output/filebeat.1"),
max_timeout=10)
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Compare file inodes and the one in the registry
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
# Check that 1 files are part of the registrar file. The deleted file should never have been detected
assert len(data) == 1
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 1 == len(output)
assert output[0]["message"] == "entry2"
def test_rotating_file_with_restart(self):
"""
Check that inodes are properly written during file rotation and restart
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="1s"
)
if os.name == "nt":
raise SkipTest
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/input"
filebeat = self.start_beat()
with open(testfile, 'w') as f:
f.write("entry1\n")
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Wait a momemt to make sure registry is completely written
time.sleep(1)
data = self.get_registry()
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
testfilerenamed1 = self.working_dir + "/log/input.1"
os.rename(testfile, testfilerenamed1)
with open(testfile, 'w') as f:
f.write("entry2\n")
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
# Wait a momemt to make sure registry is completely written
time.sleep(1)
data = self.get_registry()
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
filebeat.check_kill_and_wait()
# Store first registry file
shutil.copyfile(self.working_dir + "/registry", self.working_dir + "/registry.first")
# Rotate log file, create a new empty one and remove it afterwards
testfilerenamed2 = self.working_dir + "/log/input.2"
os.rename(testfilerenamed1, testfilerenamed2)
os.rename(testfile, testfilerenamed1)
with open(testfile, 'w') as f:
f.write("")
os.remove(testfilerenamed2)
with open(testfile, 'w') as f:
f.write("entry3\n")
filebeat = self.start_beat(output="filebeat2.log")
# Output file was rotated
self.wait_until(
lambda: self.output_has(lines=2, output_file="output/filebeat.1"),
max_timeout=10)
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Compare file inodes and the one in the registry
assert os.stat(testfile).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfile))["FileStateOS"]["inode"]
assert os.stat(testfilerenamed1).st_ino == self.get_registry_entry_by_path(os.path.abspath(testfilerenamed1))["FileStateOS"]["inode"]
# Check that 3 files are part of the registrar file. The deleted file should never have been detected, but the rotated one should be in
assert len(data) == 3
def test_state_after_rotation(self):
"""
Checks that the state is written correctly after rotation
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
ignore_older="2m",
scan_frequency="1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/input"
testfile2 = self.working_dir + "/log/input.1"
testfile3 = self.working_dir + "/log/input.2"
with open(testfile1, 'w') as f:
f.write("entry10\n")
with open(testfile2, 'w') as f:
f.write("entry0\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
# Wait a moment to make sure file exists
time.sleep(1)
data = self.get_registry()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 8
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 8
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 7
# Rotate files and remove old one
os.rename(testfile2, testfile3)
os.rename(testfile1, testfile2)
with open(testfile1, 'w') as f:
f.write("entry200\n")
# Remove file afterwards to make sure not inode reuse happens
os.remove(testfile3)
# Now wait until rotation is detected
self.wait_until(
lambda: self.log_contains(
"File rename was detected"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains_count(
"Registry file updated. 2 states written.") >= 1,
max_timeout=15)
time.sleep(5)
filebeat.kill_and_wait()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 10
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 9
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 8
def test_state_after_rotation_ignore_older(self):
"""
Checks that the state is written correctly after rotation and ignore older
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
ignore_older="2m",
scan_frequency="1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/input"
testfile2 = self.working_dir + "/log/input.1"
testfile3 = self.working_dir + "/log/input.2"
with open(testfile1, 'w') as f:
f.write("entry10\n")
with open(testfile2, 'w') as f:
f.write("entry0\n")
# Change modification time so file extends ignore_older
yesterday = time.time() - 3600*24
os.utime(testfile2, (yesterday, yesterday))
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Wait a moment to make sure file exists
time.sleep(1)
data = self.get_registry()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 8
# Rotate files and remove old one
os.rename(testfile2, testfile3)
os.rename(testfile1, testfile2)
with open(testfile1, 'w') as f:
f.write("entry200\n")
# Remove file afterwards to make sure not inode reuse happens
os.remove(testfile3)
# Now wait until rotation is detected
self.wait_until(
lambda: self.log_contains(
"File rename was detected"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains_count(
"Registry file updated. 2 states written.") >= 1,
max_timeout=15)
time.sleep(5)
filebeat.kill_and_wait()
# Check that offsets are correct
if os.name == "nt":
# Under windows offset is +1 because of additional newline char
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 10
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 9
else:
assert self.get_registry_entry_by_path(os.path.abspath(testfile1))["offset"] == 9
assert self.get_registry_entry_by_path(os.path.abspath(testfile2))["offset"] == 8
def test_migration_non_windows(self):
"""
Tests if migration from old filebeat registry to new format works
"""
if os.name == "nt":
raise SkipTest
registry_file = self.working_dir + '/registry'
# Write old registry file
with open(registry_file, 'w') as f:
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}}')
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
)
filebeat = self.start_beat()
self.wait_until(
lambda: self.log_contains("Old registry states found: 2"),
max_timeout=15)
self.wait_until(
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
max_timeout=15)
filebeat.check_kill_and_wait()
# Check if content is same as above
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
# Compare first entry
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"inode":30178938,"device":16777220}}')
newJson = self.get_registry_entry_by_path("logs/hello.log")
del newJson["timestamp"]
del newJson["ttl"]
assert newJson == oldJson
# Compare second entry
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"inode":30178958,"device":16777220}}')
newJson = self.get_registry_entry_by_path("logs/log2.log")
del newJson["timestamp"]
del newJson["ttl"]
assert newJson == oldJson
# Make sure the right number of entries is in
data = self.get_registry()
assert len(data) == 2
def test_migration_windows(self):
"""
Tests if migration from old filebeat registry to new format works
"""
if os.name != "nt":
raise SkipTest
registry_file = self.working_dir + '/registry'
# Write old registry file
with open(registry_file, 'w') as f:
f.write('{"logs/hello.log":{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}},"logs/log2.log":{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}}')
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
)
filebeat = self.start_beat()
self.wait_until(
lambda: self.log_contains("Old registry states found: 2"),
max_timeout=15)
self.wait_until(
lambda: self.log_contains("Old states converted to new states and written to registrar: 2"),
max_timeout=15)
filebeat.check_kill_and_wait()
# Check if content is same as above
assert self.get_registry_entry_by_path("logs/hello.log")["offset"] == 4
assert self.get_registry_entry_by_path("logs/log2.log")["offset"] == 6
# Compare first entry
oldJson = json.loads('{"source":"logs/hello.log","offset":4,"FileStateOS":{"idxhi":1,"idxlo":12,"vol":34}}')
newJson = self.get_registry_entry_by_path("logs/hello.log")
del newJson["timestamp"]
del newJson["ttl"]
assert newJson == oldJson
# Compare second entry
oldJson = json.loads('{"source":"logs/log2.log","offset":6,"FileStateOS":{"idxhi":67,"idxlo":44,"vol":12}}')
newJson = self.get_registry_entry_by_path("logs/log2.log")
del newJson["timestamp"]
del newJson["ttl"]
assert newJson == oldJson
# Make sure the right number of entries is in
data = self.get_registry()
assert len(data) == 2
def test_clean_inactive(self):
"""
Checks that states are properly removed after clean_inactive
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
clean_inactive="4s",
ignore_older="2s",
close_inactive="0.2s",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/input1"
testfile2 = self.working_dir + "/log/input2"
testfile3 = self.working_dir + "/log/input3"
with open(testfile1, 'w') as f:
f.write("first file\n")
with open(testfile2, 'w') as f:
f.write("second file\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
data = self.get_registry()
assert len(data) == 2
# Wait until states are removed from prospectors
self.wait_until(
lambda: self.log_contains_count(
"State removed for") == 2,
max_timeout=15)
with open(testfile3, 'w') as f:
f.write("2\n")
# Write new file to make sure registrar is flushed again
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=30)
# Wait until states are removed from prospectors
self.wait_until(
lambda: self.log_contains_count(
"State removed for") == 4,
max_timeout=15)
filebeat.check_kill_and_wait()
# Check that the first to files were removed from the registry
data = self.get_registry()
assert len(data) == 1
# Make sure the last file in the registry is the correct one and has the correct offset
if os.name == "nt":
assert data[0]["offset"] == 3
else:
assert data[0]["offset"] == 2
def test_clean_removed(self):
"""
Checks that files which were removed, the state is removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/input*",
scan_frequency="0.1s",
clean_removed=True,
close_removed=True
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/input1"
testfile2 = self.working_dir + "/log/input2"
with open(testfile1, 'w') as f:
f.write("file to be removed\n")
with open(testfile2, 'w') as f:
f.write("2\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
data = self.get_registry()
assert len(data) == 2
os.remove(testfile1)
# Wait until states are removed from prospectors
self.wait_until(
lambda: self.log_contains(
"Remove state for file as file removed"),
max_timeout=15)
# Add one more line to make sure registry is written
with open(testfile2, 'a') as f:
f.write("make sure registry is written\n")
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
filebeat.check_kill_and_wait()
# Check that the first to files were removed from the registry
data = self.get_registry()
assert len(data) == 1
# Make sure the last file in the registry is the correct one and has the correct offset
if os.name == "nt":
assert data[0]["offset"] == len("make sure registry is written\n" + "2\n") + 2
else:
assert data[0]["offset"] == len("make sure registry is written\n" + "2\n")
|
|
#!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple server for testing the Objective-C GData Framework
This http server is for use by GDataServiceTest.m in testing
both authentication and object retrieval.
Requests to the path /accounts/ClientLogin are assumed to be
for login; other requests are for object retrieval
"""
import string
import cgi
import time
import os
import sys
import re
import mimetypes
import socket
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from optparse import OptionParser
class ServerTimeoutException(Exception):
pass
class HTTPTimeoutServer(HTTPServer):
"""HTTP server for testing network requests.
This server will throw an exception if it receives no connections for
several minutes. We use this to ensure that the server will be cleaned
up if something goes wrong during the unit testing.
"""
def get_request(self):
self.socket.settimeout(120.0)
result = None
while result is None:
try:
result = self.socket.accept()
except socket.timeout:
raise ServerTimeoutException
result[0].settimeout(None)
return result
class SimpleServer(BaseHTTPRequestHandler):
"""HTTP request handler for testing GData network requests.
This is an implementation of a request handler for BaseHTTPServer,
specifically designed for GData service code usage.
Normal requests for GET/POST/PUT simply retrieve the file from the
supplied path, starting in the current directory. A cookie called
TestCookie is set by the response header, with the value of the filename
requested.
DELETE requests always succeed.
Appending ?status=n results in a failure with status value n.
Paths ending in .auth have the .auth extension stripped, and must have
an authorization header of "GoogleLogin auth=GoodAuthToken" to succeed.
Paths ending in .authsub have the .authsub extension stripped, and must have
an authorization header of "AuthSub token=GoodAuthSubToken" to succeed.
Successful results have a Last-Modified header set; if that header's value
("thursday") is supplied in a request's "If-Modified-Since" header, the
result is 304 (Not Modified).
Requests to /accounts/ClientLogin will fail if supplied with a body
containing Passwd=bad. If they contain logintoken and logincaptcha values,
those must be logintoken=CapToken&logincaptch=good to succeed.
"""
def do_GET(self):
self.doAllRequests()
def do_POST(self):
self.doAllRequests()
def do_PUT(self):
self.doAllRequests()
def do_DELETE(self):
self.doAllRequests()
def doAllRequests(self):
# This method handles all expected incoming requests
#
# Requests to path /accounts/ClientLogin are assumed to be for signing in
#
# Other paths are for retrieving a local xml file. An .auth appended
# to an xml file path will require authentication (meaning the Authorization
# header must be present with the value "GoogleLogin auth=GoodAuthToken".)
# Delete commands succeed but return no data.
#
# GData override headers are supported.
#
# Any auth password is valid except "bad", which will fail, and "captcha",
# which will fail unless the authentication request's post string includes
# "logintoken=CapToken&logincaptcha=good"
# We will use a readable default result string since it should never show up
# in output
resultString = "default GDataTestServer result\n";
resultStatus = 0
headerType = "text/plain"
postString = ""
modifiedDate = "thursday" # clients should treat dates as opaque, generally
# auth queries and some GData queries include post data
postLength = int(self.headers.getheader("Content-Length", "0"));
if postLength > 0:
postString = self.rfile.read(postLength)
ifModifiedSince = self.headers.getheader("If-Modified-Since", "");
# retrieve the auth header; require it if the file path ends
# with the string ".auth" or ".authsub"
authorization = self.headers.getheader("Authorization", "")
if self.path.endswith(".auth"):
if authorization != "GoogleLogin auth=GoodAuthToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-5] # remove the .auth at the end
if self.path.endswith(".authsub"):
if authorization != "AuthSub token=GoodAuthSubToken":
self.send_error(401,"Unauthorized: %s" % self.path)
return
self.path = self.path[:-8] # remove the .authsub at the end
overrideHeader = self.headers.getheader("X-HTTP-Method-Override", "")
httpCommand = self.command
if httpCommand == "POST" and len(overrideHeader) > 0:
httpCommand = overrideHeader
try:
if self.path.endswith("/accounts/ClientLogin"):
#
# it's a sign-in attempt; it's good unless the password is "bad" or
# "captcha"
#
# use regular expression to find the password
password = ""
searchResult = re.search("(Passwd=)([^&\n]*)", postString)
if searchResult:
password = searchResult.group(2)
if password == "bad":
resultString = "Error=BadAuthentication\n"
resultStatus = 403
elif password == "captcha":
logintoken = ""
logincaptcha = ""
# use regular expressions to find the captcha token and answer
searchResult = re.search("(logintoken=)([^&\n]*)", postString);
if searchResult:
logintoken = searchResult.group(2)
searchResult = re.search("(logincaptcha=)([^&\n]*)", postString);
if searchResult:
logincaptcha = searchResult.group(2)
# if the captcha token is "CapToken" and the answer is "good"
# then it's a valid sign in
if (logintoken == "CapToken") and (logincaptcha == "good"):
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
else:
# incorrect captcha token or answer provided
resultString = ("Error=CaptchaRequired\nCaptchaToken=CapToken\n"
"CaptchaUrl=CapUrl\n")
resultStatus = 403
else:
# valid username/password
resultString = "SID=GoodSID\nLSID=GoodLSID\nAuth=GoodAuthToken\n"
resultStatus = 200
elif httpCommand == "DELETE":
#
# it's an object delete; read and return empty data
#
resultString = ""
resultStatus = 200
headerType = "text/plain"
else:
# queries that have something like "?status=456" should fail with the
# status code
searchResult = re.search("(status=)([0-9]+)", self.path)
if searchResult:
status = searchResult.group(2)
self.send_error(int(status),
"Test HTTP server status parameter: %s" % self.path)
return
# if the client gave us back our modified date, then say there's no
# change in the response
if ifModifiedSince == modifiedDate:
self.send_response(304) # Not Modified
return
else:
#
# it's an object fetch; read and return the XML file
#
f = open("." + self.path)
resultString = f.read()
f.close()
resultStatus = 200
fileTypeInfo = mimetypes.guess_type("." + self.path)
headerType = fileTypeInfo[0] # first part of the tuple is mime type
self.send_response(resultStatus)
self.send_header("Content-type", headerType)
self.send_header("Last-Modified", modifiedDate)
# set TestCookie to equal the file name requested
cookieValue = os.path.basename("." + self.path)
self.send_header('Set-Cookie', 'TestCookie=%s' % cookieValue)
self.end_headers()
self.wfile.write(resultString)
except IOError:
self.send_error(404,"File Not Found: %s" % self.path)
def main():
try:
parser = OptionParser()
parser.add_option("-p", "--port", dest="port", help="Port to run server on",
type="int", default="80")
parser.add_option("-r", "--root", dest="root", help="Where to root server",
default=".")
(options, args) = parser.parse_args()
os.chdir(options.root)
server = HTTPTimeoutServer(("127.0.0.1", options.port), SimpleServer)
sys.stdout.write("started GDataTestServer.py...");
sys.stdout.flush();
server.serve_forever()
except KeyboardInterrupt:
print "^C received, shutting down server"
server.socket.close()
except ServerTimeoutException:
print "Too long since the last request, shutting down server"
server.socket.close()
if __name__ == "__main__":
main()
|
|
'''
QuickCoords/main.py
QuickCoords is a simple tool for quickly and easily capturing a series of pixel
coordinates from a large number of images.
Copyright (c) 2014, Brendan Gray and Sylvermyst Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This module provides the ToolScreen class.
'''
import os
from PyQt4 import QtGui
from PyQt4.QtCore import Qt, QTimer
from QuickCoords.constants import supportedExtensions, imageScaleFactor, folderSaveFileName,\
imageColumnMinWidth, outputColumnMinWidth, outputColumnMaxWidth,\
outputColumnMinHeight, targetFPS, forwardKeys,\
backwardKeys
from QuickCoords.image import ClickableImageBox
from QuickCoords.points import CoordinateList
from QuickCoords.table import TableBox
class ToolScreen(QtGui.QWidget):
'''
Extends QWidget to provide the required functionality for the program.
Provides the following functions:
ToolScreen.prepare() initialises some variables.
ToolScreen.updateDisplay() fills the table and redraws the points.
ToolScreen.keyPressEvent(event) handles keyboard shortcuts.
ToolScreen.initUI() initialises the user interface.
ToolScreen.selectFolder() brings up a folder selection dialogue.
ToolScreen.setFoldertoPath(newPath) changes the current folder.
ToolScreen.copyTable() copies the list of points to the clipboard.
ToolScreen.exportTable() exports the list of points to a CSV or plain text file.
ToolScreen.clearTable() deletes all points.
ToolScreen.fillListBox() fills the list box with the images from the current folder.
ToolScreen.changeImageFromList() changes the image to the currently selected image in the list box.
ToolScreen.shiftSelected(direction) shifts the selected points in the specified direction.
ToolScreen.setImage() loads the current image from disk and sets it for display.
ToolScreen.updatePoints() updates the table to reflect the current state of the coordinate list.
ToolScreen.drawImagePoints() redraws the points on the display.
ToolScreen.nextImage() switches to the next image.
ToolScreen.prevImage() switches to the previous image.
ToolScreen.saveCurrentFolder() writes the current path to a file.
ToolScreen.loadLastFolder() loads the folder last used.
'''
def __init__(self):
super(ToolScreen, self).__init__() # Call the constructor of this class's parent
self.prepare()
self.initUI()
self.setFoldertoPath(self.imagePath)
self.fpsTimer = QTimer()
self.fpsTimer.timeout.connect(self.updateDisplay)
self.fpsTimer.start(1000/targetFPS)
def prepare(self):
'''
Initialises variables that need to be set before the GUI is initialised.
'''
self.loadLastFolder()
self.currentImageNum = 0
self.imageList = []
self.coordList = CoordinateList([])
self.scaleFactor = imageScaleFactor
self.tableViewChanged = False
self.ignoreDeletes = False
def updateDisplay(self):
'''
Fills the table and redraws the points, if things have changed since the last update.
'''
if self.tableViewChanged:
self.updatePoints()
self.drawImagePoints()
self.tableViewChanged = False
def keyPressEvent(self, event):
'''
Handles key presses anywhere in the program.
'''
if event.key() in forwardKeys:
self.nextImage()
if event.key() in backwardKeys:
self.prevImage()
if event.key() == Qt.Key_Backspace:
self.coordList.removeLastPoint()
if event.key() == Qt.Key_Delete:
if not self.ignoreDeletes:
self.table.deleteSelectedRows()
self.ignoreDeletes = False
if event.key() == Qt.Key_W:
self.shiftSelected('up')
if event.key() == Qt.Key_A:
self.shiftSelected('left')
if event.key() == Qt.Key_S:
self.shiftSelected('down')
if event.key() == Qt.Key_D:
self.shiftSelected('right')
self.tableViewChanged = True
def initUI(self):
'''
Initialises the UI layout and widgets.
'''
mainBox = QtGui.QHBoxLayout()
imageBox = QtGui.QVBoxLayout()
outputBox = QtGui.QVBoxLayout()
titleBox = QtGui.QHBoxLayout()
self.imagePathLabel = QtGui.QLabel("", self)
self.imageLabel = QtGui.QLabel("No image loaded.", self)
self.image = QtGui.QPixmap()
self.imageBlockScene = ClickableImageBox(parent = self)
self.imageBlockScene.addPixmap(self.image)
self.imageBlock = QtGui.QGraphicsView()
self.imageBlock.setScene(self.imageBlockScene)
self.imageBlock.setMinimumWidth(imageColumnMinWidth)
folderButton = QtGui.QPushButton("Image folder:")
folderButton.setMaximumWidth(180)
folderButton.clicked.connect(self.selectFolder)
tableCopyButton = QtGui.QPushButton("Copy")
tableCopyButton.setMinimumWidth(40)
tableCopyButton.clicked.connect(self.copyTable)
tableExportButton = QtGui.QPushButton("Export")
tableExportButton.setMinimumWidth(40)
tableExportButton.clicked.connect(self.exportTable)
tableClearButton = QtGui.QPushButton("Clear")
tableClearButton.setMinimumWidth(40)
tableClearButton.clicked.connect(self.clearTable)
self.table = TableBox(0,2)
self.table.toolScreen = self
self.table.setHorizontalHeaderLabels(['x','y'])
self.table.setSelectionBehavior(QtGui.QTableWidget.SelectRows)
self.table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.table.setMinimumWidth(outputColumnMinWidth)
self.table.setMaximumWidth(outputColumnMaxWidth)
self.table.setMinimumHeight(outputColumnMinHeight)
self.listBlock = QtGui.QListWidget()
self.listBlock.setMinimumWidth(outputColumnMinWidth)
self.listBlock.setMaximumWidth(outputColumnMaxWidth)
self.listBlock.setMinimumHeight(outputColumnMinHeight)
self.listBlock.currentRowChanged.connect(self.changeImageFromList)
titleBox.addWidget(folderButton)
titleBox.addWidget(self.imagePathLabel)
titleBox.addWidget(self.imageLabel)
imageBox.addWidget(self.imageBlock)
imageBox.addLayout(titleBox)
outputBox.addWidget(self.table)
tableButtonsLayout = QtGui.QHBoxLayout()
tableButtonsLayout.addWidget(tableCopyButton)
tableButtonsLayout.addWidget(tableExportButton)
tableButtonsLayout.addWidget(tableClearButton)
tableLayout = QtGui.QVBoxLayout()
tableLayout.addLayout(tableButtonsLayout)
tableLayout.addWidget(self.table)
tableWidget = QtGui.QWidget()
tableWidget.setLayout(tableLayout)
outputBoxSplitter = QtGui.QSplitter(Qt.Vertical)
outputBoxSplitter.addWidget(tableWidget)
outputBoxSplitter.addWidget(self.listBlock)
outputBoxSplitter.setChildrenCollapsible(False)
outputBoxSplitter.setStretchFactor(0, 3)
outputBoxSplitter.setStretchFactor(1, 1)
imageBoxWidget = QtGui.QWidget()
imageBoxWidget.setLayout(imageBox)
mainBoxSplitter = QtGui.QSplitter(Qt.Horizontal)
mainBoxSplitter.setChildrenCollapsible(False)
mainBoxSplitter.addWidget(imageBoxWidget)
mainBoxSplitter.addWidget(outputBoxSplitter)
mainBoxSplitter.setStretchFactor(0, 4)
mainBoxSplitter.setStretchFactor(1, 1)
mainBox.addWidget(mainBoxSplitter)
self.setLayout(mainBox)
self.setWindowTitle('Quick Coords')
self.setWindowState(Qt.WindowMaximized)
self.show()
def selectFolder(self):
'''
Brings up a folder selection dialog and sets the current path to the selcted folder.
'''
fileDialog = QtGui.QFileDialog()
newPath = fileDialog.getExistingDirectory(self, "Select a folder with images", self.imagePath)
self.setFoldertoPath(newPath)
def setFoldertoPath(self, newPath):
'''
Handles a change in path. Checks that the path exists, fills the list box, loads the first image, and saves the current folder.
'''
if len(newPath) > 0 and os.access(newPath, 0):
self.imagePath = newPath.replace('\\','/').rstrip('/')+'/' # Replace Windows' stupid file separator with one that works on all platforms.
self.imagePathLabel.setText(self.imagePath)
fileList = os.listdir(self.imagePath)
fileList.sort()
self.imageList = []
for i in fileList:
extension = i.split('.')[-1]
if extension in supportedExtensions:
self.imageList.append(self.imagePath + i)
self.currentImageNum = 0
self.setImage()
self.fillListBox()
self.saveCurrentFolder()
def copyTable(self):
'''
Copies a tab separated list to the clipboard, suitable for pasting into most spreadsheet programs
'''
self.clipboard.setText(self.coordList.copyAsText())
def exportTable(self):
'''
Saves a CSV or plain text file containg a comma separated list of points.
'''
fileDialog = QtGui.QFileDialog()
filters = 'CSV files (*.csv);;Text files (*.txt);;All files (*.*)'
exportLocation = fileDialog.getSaveFileName(self, "Choose file to export to", self.imagePath, filter=filters)
exportFile = open(exportLocation, 'w')
exportFile.write(self.coordList.copyAsCSV())
exportFile.close()
def clearTable(self):
'''
Deletes all points.
'''
self.coordList.clear()
self.tableViewChanged = True
def fillListBox(self):
'''
Fills the list box with the names of the images in the folder.
Note: Does not check if images have been added since the folder was loaded. To do this, reset the folder.
'''
self.listBlock.clear()
if len(self.imageList) > 0:
for f in self.imageList:
thisImage = f.split('/')[-1]
self.listBlock.addItem(thisImage)
self.listBlock.setCurrentRow(self.currentImageNum)
else:
print("No images in current folder")
self.tableViewChanged = True
def changeImageFromList(self):
'''
Changes the image to the currently selected image in the list.
'''
self.currentImageNum = self.listBlock.currentRow()
self.setImage()
def shiftSelected(self, direction):
'''
Moves the selected points 1 pixel (on the screen, not on the image) in the specified direction.
Direction can be 'up', 'down', 'left' or 'right'.
'''
selectedPoints = self.table.getSelectedPoints()
for i in selectedPoints:
self.coordList.points[i].shift(direction, 1.0/imageScaleFactor)
def setImage(self):
'''
Loads the current image from disk and sets it for display.
'''
if len(self.imageList) > 0:
currentImage = self.imageList[self.currentImageNum]
print("Attempting to load Current image",currentImage)
self.image = QtGui.QPixmap(currentImage)
self.image.originalWidth = self.image.width()
self.image.originalHeight = self.image.height()
width = self.image.originalWidth * self.scaleFactor
height = self.image.originalHeight * self.scaleFactor
self.image = self.image.scaled(width, height, Qt.KeepAspectRatio)
self.originalImage = self.image.copy()
self.imageBlockScene.clear()
self.imageBlockScene.setSceneRect(0, 0, width, height)
self.imageBlockScene.addPixmap(self.image)
self.imageLabel.setText(currentImage.split('/')[-1])
self.listBlock.setCurrentRow(self.currentImageNum)
else:
print("No images in current folder")
def updatePoints(self):
'''
Updates the table to reflect the current state of the coordinate list.
'''
selectedPoints = self.table.getSelectedPoints()
points = self.coordList.points
nPoints = len(points)
self.table.clearContents()
self.table.setRowCount(nPoints)
for i in range(nPoints):
xItem = QtGui.QTableWidgetItem('{:.1f}'.format(points[i].x))
xItem.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self.table.setItem(i, 0, xItem)
yItem = QtGui.QTableWidgetItem('{:.1f}'.format(points[i].y))
yItem.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self.table.setItem(i, 1, yItem)
self.table.setSelectedRows(selectedPoints)
def drawImagePoints(self):
'''
Redraws the points on the display.
'''
# We don't want to actually draw the points onto the image, because then we'd have to reload the
# image from disk to undo the changes. Instead, we work on a copy, and only display the copy.
# The original image remains unchanged in memory.
newImage = self.originalImage.toImage()
for p in self.coordList.points:
top = int((p.y-0.4) * self.scaleFactor)
bottom = int((p.y+0.4) * self.scaleFactor)
left = int((p.x-0.4) * self.scaleFactor)
right = int((p.x+0.4) * self.scaleFactor)
if p.colour == 0:
colour = QtGui.qRgb(0, 255, 0)
else:
colour = QtGui.qRgb(255, 0, 0)
borderColour = QtGui.qRgb(0, 0, 0)
for x in range(left, right+1):
for y in range(top, bottom+1):
if x>0 and x<newImage.width() and y>0 and y<newImage.height():
newImage.setPixel(x, y, colour)
for x in range(left-1, right+2):
for y in [top-1, bottom+1]:
if x>0 and x<newImage.width() and y>0 and y<newImage.height():
newImage.setPixel(x, y, borderColour)
for x in [left-1, right+1]:
for y in range(top-1, bottom+2):
if x>0 and x<newImage.width() and y>0 and y<newImage.height():
newImage.setPixel(x, y, borderColour)
self.image.convertFromImage(newImage)
self.imageBlockScene.update()
def nextImage(self):
'''
Changes to the next image.
'''
self.currentImageNum += 1
if self.currentImageNum >= len(self.imageList):
self.currentImageNum = 0
self.setImage()
def prevImage(self):
'''
Changes to the previous image.
'''
self.currentImageNum -= 1
if self.currentImageNum < 0:
self.currentImageNum = len(self.imageList)-1
self.setImage()
def saveCurrentFolder(self):
'''
Saves the current folder to a file, allowing the folder selection to be persistant if the program is exited.
'''
try:
folderFile = open(folderSaveFileName, 'w')
folderFile.write(self.imagePath)
folderFile.close()
except IOError:
print("Could not save last folder")
def loadLastFolder(self):
'''
Loads the last current folder from disk.
'''
try:
folderFile = open(folderSaveFileName, 'r')
path = folderFile.readline().strip()
if len(path) > 0 and os.access(path, 0):
self.imagePath = path
folderFile.close()
except IOError:
print("Could not load last folder")
self.imagePath = ""
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import time
import uuid
from cloudify_rest_client.executions import Execution
from integration_tests import AgentlessTestCase
from integration_tests.framework import postgresql
from integration_tests.tests.utils import (
verify_deployment_environment_creation_complete,
do_retries,
get_resource as resource)
class ExecutionsTest(AgentlessTestCase):
def test_cancel_execution(self):
execution, deployment_id = self._execute_and_cancel_execution(
'sleep_with_cancel_support')
self._assert_execution_cancelled(execution, deployment_id)
def test_force_cancel_execution(self):
execution, deployment_id = self._execute_and_cancel_execution(
'sleep', True)
self._assert_execution_cancelled(execution, deployment_id)
def test_cancel_execution_with_graph_workflow(self):
execution, deployment_id = self._execute_and_cancel_execution(
'sleep_with_graph_usage')
self._assert_execution_cancelled(execution, deployment_id)
def test_cancel_execution_and_then_force_cancel(self):
execution, deployment_id = self._execute_and_cancel_execution(
'sleep', False, False)
# cancel didn't work (unsupported) - use force-cancel instead
execution = self.client.executions.cancel(execution.id, True)
self.assertEquals(Execution.FORCE_CANCELLING, execution.status)
self.wait_for_execution_to_end(execution)
execution = self.client.executions.get(execution.id)
self._assert_execution_cancelled(execution, deployment_id)
def test_legacy_cancel_execution(self):
# this tests cancellation of an execution where the workflow
# announces the cancel occurred by returning a value rather than by
# raising an error
execution, deployment_id = self._execute_and_cancel_execution(
'sleep_with_cancel_support',
workflow_params={'use_legacy_cancel': True})
self._assert_execution_cancelled(execution, deployment_id)
def test_cancel_execution_before_it_started(self):
execution, deployment_id = self._execute_and_cancel_execution(
'sleep_with_cancel_support', False, True, False)
self.assertEquals(Execution.CANCELLED, execution.status)
data = self.get_plugin_data(
plugin_name='testmockoperations',
deployment_id=deployment_id
)
self.assertEqual(data, {})
def test_sort_executions(self):
dsl_path = resource("dsl/basic.yaml")
deployment, execution_id = self.deploy_application(dsl_path)
self.wait_for_execution_to_end(
self.client.executions.get(execution_id))
deployment, execution_id = self.deploy_application(dsl_path)
self.wait_for_execution_to_end(
self.client.executions.get(execution_id))
deployments_executions = self.client.executions.list(sort='created_at')
for i in range(len(deployments_executions)-1):
self.assertTrue(deployments_executions[i]['created_at'] <
deployments_executions[i+1]['created_at'],
'execution list not sorted correctly')
deployments_executions = self.client.executions.list(
sort='created_at',
is_descending=True)
for i in range(len(deployments_executions)-1):
self.assertTrue(deployments_executions[i]['created_at'] >
deployments_executions[i+1]['created_at'],
'execution list not sorted correctly')
def test_get_deployments_executions_with_status(self):
dsl_path = resource("dsl/basic.yaml")
deployment, execution_id = self.deploy_application(dsl_path)
def assertions():
deployments_executions = self.client.executions.list(
deployment_id=deployment.id)
# expecting 2 executions (1 for deployment environment
# creation and 1 execution of 'install'). Checking the install
# execution's status
self.assertEquals(2, len(deployments_executions))
self.assertIn(execution_id, [deployments_executions[0].id,
deployments_executions[1].id])
install_execution = \
deployments_executions[0] if execution_id == \
deployments_executions[0].id else deployments_executions[1]
self.assertEquals(Execution.TERMINATED, install_execution.status)
self.assertEquals('', install_execution.error)
self.do_assertions(assertions, timeout=10)
def test_execution_parameters(self):
dsl_path = resource('dsl/workflow_parameters.yaml')
_id = uuid.uuid1()
blueprint_id = 'blueprint_{0}'.format(_id)
deployment_id = 'deployment_{0}'.format(_id)
self.client.blueprints.upload(dsl_path, blueprint_id)
self.client.deployments.create(blueprint_id, deployment_id,
skip_plugins_validation=True)
do_retries(verify_deployment_environment_creation_complete, 60,
deployment_id=deployment_id)
execution_parameters = {
'operation': 'test_interface.operation',
'properties': {
'key': 'different-key',
'value': 'different-value'
},
'custom-parameter': "doesn't matter"
}
execution = self.client.executions.start(
deployment_id, 'another_execute_operation',
parameters=execution_parameters,
allow_custom_parameters=True)
self.wait_for_execution_to_end(execution)
invocations = self.get_plugin_data(
plugin_name='testmockoperations',
deployment_id=deployment_id
)['mock_operation_invocation']
self.assertEqual(1, len(invocations))
self.assertDictEqual(invocations[0],
{'different-key': 'different-value'})
# checking for execution parameters - expecting there to be a merge
# with overrides with workflow parameters.
expected_params = {
'node_id': 'test_node',
'operation': 'test_interface.operation',
'properties': {
'key': 'different-key',
'value': 'different-value'
},
'custom-parameter': "doesn't matter"
}
self.assertEqual(expected_params, execution.parameters)
def test_update_execution_status(self):
dsl_path = resource("dsl/basic.yaml")
_, execution_id = self.deploy_application(dsl_path,
wait_for_execution=True)
execution = self.client.executions.get(execution_id)
self.assertEquals(Execution.TERMINATED, execution.status)
# Manually updating the status, because the client checks for
# correct transitions
postgresql.run_query(
"UPDATE executions SET status='started' "
"WHERE id='{0}'".format(execution_id)
)
execution = self.client.executions.get(execution_id)
self.assertEquals('started', execution.status)
execution = self.client.executions.update(execution_id,
'pending',
'some-error')
self.assertEquals('pending', execution.status)
self.assertEquals('some-error', execution.error)
# verifying that updating only the status field also resets the
# error field to an empty string
execution = self.client.executions.update(execution_id, 'terminated')
self.assertEquals('terminated', execution.status)
self.assertEquals('', execution.error)
def _execute_and_cancel_execution(self, workflow_id, force=False,
wait_for_termination=True,
is_wait_for_asleep_node=True,
workflow_params=None):
dsl_path = resource('dsl/sleep_workflows.yaml')
_id = uuid.uuid1()
blueprint_id = 'blueprint_{0}'.format(_id)
deployment_id = 'deployment_{0}'.format(_id)
self.client.blueprints.upload(dsl_path, blueprint_id)
self.client.deployments.create(blueprint_id, deployment_id,
skip_plugins_validation=True)
do_retries(verify_deployment_environment_creation_complete, 30,
deployment_id=deployment_id)
execution = self.client.executions.start(
deployment_id, workflow_id, parameters=workflow_params)
node_inst_id = self.client.node_instances.list(deployment_id)[0].id
if is_wait_for_asleep_node:
for retry in range(30):
if self.client.node_instances.get(
node_inst_id).state == 'asleep':
break
time.sleep(1)
else:
raise RuntimeError("Execution was expected to go"
" into 'sleeping' status")
execution = self.client.executions.cancel(execution.id, force)
expected_status = Execution.FORCE_CANCELLING if force else \
Execution.CANCELLING
self.assertEquals(expected_status, execution.status)
if wait_for_termination:
self.wait_for_execution_to_end(execution)
execution = self.client.executions.get(execution.id)
return execution, deployment_id
def _assert_execution_cancelled(self, execution, deployment_id):
self.assertEquals(Execution.CANCELLED, execution.status)
invocations = self.get_plugin_data(
plugin_name='testmockoperations',
deployment_id=deployment_id
)['mock_operation_invocation']
self.assertEqual(1, len(invocations))
self.assertDictEqual(invocations[0], {'before-sleep': None})
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import webob.dec
import webob.exc
import traffic.api.openstack
from traffic.api.openstack import wsgi
from traffic.api.openstack import xmlutil
from traffic import exception
from traffic.openstack.common.gettextutils import _
from traffic import flags
from traffic.openstack.common import importutils
from traffic.openstack.common import log as logging
import traffic.policy
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The XML namespace for the extension, e.g.,
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
namespace = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T13:25:27-06:00'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
@classmethod
def nsmap(cls):
"""Synthesize a namespace map from extension."""
# Start with a base nsmap
nsmap = ext_nsmap.copy()
# Add the namespace for the extension
nsmap[cls.alias] = cls.namespace
return nsmap
@classmethod
def xmlname(cls, name):
"""Synthesize element and attribute names."""
return '{%s}%s' % (cls.namespace, name)
def make_ext(elem):
elem.set('name')
elem.set('namespace')
elem.set('alias')
elem.set('updated')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
xmlutil.make_links(elem, 'links')
ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class ExtensionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extension', selector='extension')
make_ext(root)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extensions')
elem = xmlutil.SubTemplateElement(root, 'extension',
selector='extensions')
make_ext(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsResource, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for ext in self.extension_manager.sorted_extensions():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@wsgi.serializers(xml=ExtensionTemplate)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See traffic/tests/api/openstack/volume/extensions/foxinsocks.py or an
example extension implementation.
"""
def is_loaded(self, alias):
return alias in self.extensions
def sorted_extensions(self):
if self.sorted_ext_list is None:
self.sorted_ext_list = sorted(self.extensions.iteritems())
for _alias, ext in self.sorted_ext_list:
yield ext
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.audit(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.TrafficException("Found duplicate extension: %s"
% alias)
self.extensions[alias] = ext
self.sorted_ext_list = None
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsResource(self)))
for ext in self.sorted_extensions():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.sorted_extensions():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.name)
LOG.debug(_('Ext alias: %s'), extension.alias)
LOG.debug(_('Ext description: %s'),
' '.join(extension.__doc__.strip().split()))
LOG.debug(_('Ext namespace: %s'), extension.namespace)
LOG.debug(_('Ext updated: %s'), extension.updated)
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug(_("Loading extension %s"), ext_factory)
if isinstance(ext_factory, basestring):
# Load the factory
factory = importutils.import_class(ext_factory)
else:
factory = ext_factory
# Call it
LOG.debug(_("Calling extension factory %s"), ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warn(_('Failed to load extension %(ext_factory)s: '
'%(exc)s') % locals())
class ControllerExtension(object):
"""Extend core controllers of traffic OpenStack API.
Provide a way to extend existing traffic OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in traffic."""
def __init__(self, collection, controller=None, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None, inherits=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
self.inherits = inherits
def wrap_errors(fn):
"""Ensure errors are not passed along."""
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except webob.exc.HTTPException:
raise
except Exception:
raise webob.exc.HTTPInternalServerError()
return wrapped
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s" % classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warn(_('Failed to load extension %(classpath)s: '
'%(exc)s') % locals())
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname,
'__init__.py')):
continue
# If it has extension(), delegate...
ext_name = ("%s%s.%s.extension" %
(package, relpkg, dname))
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warn(_('Failed to load extension %(ext_name)s: '
'%(exc)s') % locals())
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def extension_authorizer(api_name, extension_name):
def authorize(context, target=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
action = '%s_extension:%s' % (api_name, extension_name)
traffic.policy.enforce(context, action, target)
return authorize
def soft_extension_authorizer(api_name, extension_name):
hard_authorize = extension_authorizer(api_name, extension_name)
def authorize(context):
try:
hard_authorize(context)
return True
except exception.NotAuthorized:
return False
return authorize
|
|
# coding: utf-8
from sqlalchemy.testing import AssertsExecutionResults, eq_, \
assert_raises_message, AssertsCompiledSQL
from sqlalchemy import Table, Column, MetaData, Integer, String, bindparam, \
Sequence, ForeignKey, text, select, func, extract, literal_column, \
tuple_, DateTime, Time, literal, and_, Date, or_
from sqlalchemy.testing import engines, fixtures
from sqlalchemy.testing.assertsql import DialectSQL, CursorSQL
from sqlalchemy import testing
from sqlalchemy import exc
from sqlalchemy.dialects import postgresql
import datetime
matchtable = cattable = None
class InsertTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'postgresql'
__backend__ = True
@classmethod
def setup_class(cls):
cls.metadata = MetaData(testing.db)
def teardown(self):
self.metadata.drop_all()
self.metadata.clear()
def test_compiled_insert(self):
table = Table(
'testtable', self.metadata, Column(
'id', Integer, primary_key=True),
Column(
'data', String(30)))
self.metadata.create_all()
ins = table.insert(
inline=True,
values={'data': bindparam('x')}).compile()
ins.execute({'x': 'five'}, {'x': 'seven'})
eq_(
table.select().execute().fetchall(),
[(1, 'five'), (2, 'seven')]
)
def test_foreignkey_missing_insert(self):
Table(
't1', self.metadata,
Column('id', Integer, primary_key=True))
t2 = Table(
't2',
self.metadata,
Column(
'id',
Integer,
ForeignKey('t1.id'),
primary_key=True))
self.metadata.create_all()
# want to ensure that "null value in column "id" violates not-
# null constraint" is raised (IntegrityError on psycoopg2, but
# ProgrammingError on pg8000), and not "ProgrammingError:
# (ProgrammingError) relationship "t2_id_seq" does not exist".
# the latter corresponds to autoincrement behavior, which is not
# the case here due to the foreign key.
for eng in [
engines.testing_engine(options={'implicit_returning': False}),
engines.testing_engine(options={'implicit_returning': True})
]:
assert_raises_message(
exc.CompileError,
".*has no Python-side or server-side default.*",
eng.execute, t2.insert()
)
def test_sequence_insert(self):
table = Table(
'testtable',
self.metadata,
Column(
'id',
Integer,
Sequence('my_seq'),
primary_key=True),
Column(
'data',
String(30)))
self.metadata.create_all()
self._assert_data_with_sequence(table, 'my_seq')
@testing.requires.returning
def test_sequence_returning_insert(self):
table = Table(
'testtable',
self.metadata,
Column(
'id',
Integer,
Sequence('my_seq'),
primary_key=True),
Column(
'data',
String(30)))
self.metadata.create_all()
self._assert_data_with_sequence_returning(table, 'my_seq')
def test_opt_sequence_insert(self):
table = Table(
'testtable', self.metadata,
Column(
'id', Integer, Sequence(
'my_seq', optional=True), primary_key=True),
Column(
'data', String(30)))
self.metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_opt_sequence_returning_insert(self):
table = Table(
'testtable', self.metadata,
Column(
'id', Integer, Sequence(
'my_seq', optional=True), primary_key=True),
Column(
'data', String(30)))
self.metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_autoincrement_insert(self):
table = Table(
'testtable', self.metadata,
Column(
'id', Integer, primary_key=True),
Column(
'data', String(30)))
self.metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_autoincrement_returning_insert(self):
table = Table(
'testtable', self.metadata,
Column(
'id', Integer, primary_key=True),
Column(
'data', String(30)))
self.metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_noautoincrement_insert(self):
table = Table(
'testtable',
self.metadata,
Column(
'id',
Integer,
primary_key=True,
autoincrement=False),
Column(
'data',
String(30)))
self.metadata.create_all()
self._assert_data_noautoincrement(table)
def _assert_data_autoincrement(self, table):
engine = \
engines.testing_engine(options={'implicit_returning': False})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
# execute with explicit id
r = conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
eq_(r.inserted_primary_key, [30])
# execute with prefetch id
r = conn.execute(table.insert(), {'data': 'd2'})
eq_(r.inserted_primary_key, [1])
# executemany with explicit ids
conn.execute(
table.insert(),
{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
# executemany, uses SERIAL
conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
# single execute, explicit id, inline
conn.execute(
table.insert(inline=True),
{'id': 33, 'data': 'd7'})
# single execute, inline, uses SERIAL
conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 1, 'data': 'd2'}),
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL(
'INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
DialectSQL(
'INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd8'}]),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
)
conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
r = conn.execute(table.insert(), {'data': 'd2'})
eq_(r.inserted_primary_key, [5])
conn.execute(
table.insert(),
{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
conn.execute(
table.insert(inline=True), {'id': 33, 'data': 'd7'})
conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 5, 'data': 'd2'}),
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL(
'INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL(
'INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
DialectSQL(
'INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd8'}]),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, 'd1'),
(5, 'd2'),
(31, 'd3'),
(32, 'd4'),
(6, 'd5'),
(7, 'd6'),
(33, 'd7'),
(8, 'd8'),
]
)
conn.execute(table.delete())
def _assert_data_autoincrement_returning(self, table):
engine = \
engines.testing_engine(options={'implicit_returning': True})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
# execute with explicit id
r = conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
eq_(r.inserted_primary_key, [30])
# execute with prefetch id
r = conn.execute(table.insert(), {'data': 'd2'})
eq_(r.inserted_primary_key, [1])
# executemany with explicit ids
conn.execute(
table.insert(),
{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
# executemany, uses SERIAL
conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
# single execute, explicit id, inline
conn.execute(
table.insert(inline=True), {'id': 33, 'data': 'd7'})
# single execute, inline, uses SERIAL
conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
DialectSQL('INSERT INTO testtable (data) VALUES (:data) RETURNING '
'testtable.id', {'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL('INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
DialectSQL('INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd8'}]),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
)
conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
r = conn.execute(table.insert(), {'data': 'd2'})
eq_(r.inserted_primary_key, [5])
conn.execute(
table.insert(),
{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'})
conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
conn.execute(
table.insert(inline=True), {'id': 33, 'data': 'd7'})
conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
DialectSQL('INSERT INTO testtable (data) VALUES (:data) RETURNING '
'testtable.id', {'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL('INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
DialectSQL(
'INSERT INTO testtable (data) VALUES (:data)',
[{'data': 'd8'}]),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, 'd1'),
(5, 'd2'),
(31, 'd3'),
(32, 'd4'),
(6, 'd5'),
(7, 'd6'),
(33, 'd7'),
(8, 'd8'),
]
)
conn.execute(table.delete())
def _assert_data_with_sequence(self, table, seqname):
engine = \
engines.testing_engine(options={'implicit_returning': False})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
conn.execute(table.insert(), {'data': 'd2'})
conn.execute(table.insert(),
{'id': 31, 'data': 'd3'},
{'id': 32, 'data': 'd4'})
conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
conn.execute(table.insert(inline=True),
{'id': 33, 'data': 'd7'})
conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
CursorSQL("select nextval('my_seq')"),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 1, 'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd8'}]),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
)
# cant test reflection here since the Sequence must be
# explicitly specified
def _assert_data_with_sequence_returning(self, table, seqname):
engine = \
engines.testing_engine(options={'implicit_returning': True})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
conn.execute(table.insert(), {'data': 'd2'})
conn.execute(table.insert(),
{'id': 31, 'data': 'd3'},
{'id': 32, 'data': 'd4'})
conn.execute(table.insert(), {'data': 'd5'}, {'data': 'd6'})
conn.execute(
table.insert(inline=True), {'id': 33, 'data': 'd7'})
conn.execute(table.insert(inline=True), {'data': 'd8'})
asserter.assert_(
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
DialectSQL("INSERT INTO testtable (id, data) VALUES "
"(nextval('my_seq'), :data) RETURNING testtable.id",
{'data': 'd2'}),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
DialectSQL('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd8'}]),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
)
# cant test reflection here since the Sequence must be
# explicitly specified
def _assert_data_noautoincrement(self, table):
engine = \
engines.testing_engine(options={'implicit_returning': False})
with engine.connect() as conn:
conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
assert_raises_message(
exc.CompileError,
".*has no Python-side or server-side default.*",
conn.execute, table.insert(), {'data': 'd2'})
assert_raises_message(
exc.CompileError,
".*has no Python-side or server-side default.*",
conn.execute, table.insert(), {'data': 'd2'},
{'data': 'd3'})
assert_raises_message(
exc.CompileError,
".*has no Python-side or server-side default.*",
conn.execute, table.insert(), {'data': 'd2'})
assert_raises_message(
exc.CompileError,
".*has no Python-side or server-side default.*",
conn.execute, table.insert(), {'data': 'd2'},
{'data': 'd3'})
conn.execute(
table.insert(),
{'id': 31, 'data': 'd2'}, {'id': 32, 'data': 'd3'})
conn.execute(table.insert(inline=True), {'id': 33, 'data': 'd4'})
eq_(conn.execute(table.select()).fetchall(), [
(30, 'd1'),
(31, 'd2'),
(32, 'd3'),
(33, 'd4')])
conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
with engine.connect() as conn:
conn.execute(table.insert(), {'id': 30, 'data': 'd1'})
assert_raises_message(
exc.CompileError,
".*has no Python-side or server-side default.*",
conn.execute, table.insert(), {'data': 'd2'})
assert_raises_message(
exc.CompileError,
".*has no Python-side or server-side default.*",
conn.execute, table.insert(), {'data': 'd2'},
{'data': 'd3'})
conn.execute(
table.insert(),
{'id': 31, 'data': 'd2'}, {'id': 32, 'data': 'd3'})
conn.execute(table.insert(inline=True), {'id': 33, 'data': 'd4'})
eq_(conn.execute(table.select()).fetchall(), [
(30, 'd1'),
(31, 'd2'),
(32, 'd3'),
(33, 'd4')])
class ServerSideCursorsTest(fixtures.TestBase, AssertsExecutionResults):
__requires__ = 'psycopg2_compatibility',
def _fixture(self, server_side_cursors):
self.engine = engines.testing_engine(
options={'server_side_cursors': server_side_cursors}
)
return self.engine
def tearDown(self):
engines.testing_reaper.close_all()
self.engine.dispose()
def test_global_string(self):
engine = self._fixture(True)
result = engine.execute('select 1')
assert result.cursor.name
def test_global_text(self):
engine = self._fixture(True)
result = engine.execute(text('select 1'))
assert result.cursor.name
def test_global_expr(self):
engine = self._fixture(True)
result = engine.execute(select([1]))
assert result.cursor.name
def test_global_off_explicit(self):
engine = self._fixture(False)
result = engine.execute(text('select 1'))
# It should be off globally ...
assert not result.cursor.name
def test_stmt_option(self):
engine = self._fixture(False)
s = select([1]).execution_options(stream_results=True)
result = engine.execute(s)
# ... but enabled for this one.
assert result.cursor.name
def test_conn_option(self):
engine = self._fixture(False)
# and this one
result = \
engine.connect().execution_options(stream_results=True).\
execute('select 1'
)
assert result.cursor.name
def test_stmt_enabled_conn_option_disabled(self):
engine = self._fixture(False)
s = select([1]).execution_options(stream_results=True)
# not this one
result = \
engine.connect().execution_options(stream_results=False).\
execute(s)
assert not result.cursor.name
def test_stmt_option_disabled(self):
engine = self._fixture(True)
s = select([1]).execution_options(stream_results=False)
result = engine.execute(s)
assert not result.cursor.name
def test_aliases_and_ss(self):
engine = self._fixture(False)
s1 = select([1]).execution_options(stream_results=True).alias()
result = engine.execute(s1)
assert result.cursor.name
# s1's options shouldn't affect s2 when s2 is used as a
# from_obj.
s2 = select([1], from_obj=s1)
result = engine.execute(s2)
assert not result.cursor.name
def test_for_update_expr(self):
engine = self._fixture(True)
s1 = select([1], for_update=True)
result = engine.execute(s1)
assert result.cursor.name
def test_for_update_string(self):
engine = self._fixture(True)
result = engine.execute('SELECT 1 FOR UPDATE')
assert result.cursor.name
def test_text_no_ss(self):
engine = self._fixture(False)
s = text('select 42')
result = engine.execute(s)
assert not result.cursor.name
def test_text_ss_option(self):
engine = self._fixture(False)
s = text('select 42').execution_options(stream_results=True)
result = engine.execute(s)
assert result.cursor.name
def test_roundtrip(self):
engine = self._fixture(True)
test_table = Table('test_table', MetaData(engine),
Column('id', Integer, primary_key=True),
Column('data', String(50)))
test_table.create(checkfirst=True)
try:
test_table.insert().execute(data='data1')
nextid = engine.execute(Sequence('test_table_id_seq'))
test_table.insert().execute(id=nextid, data='data2')
eq_(test_table.select().execute().fetchall(), [(1, 'data1'
), (2, 'data2')])
test_table.update().where(
test_table.c.id == 2).values(
data=test_table.c.data +
' updated').execute()
eq_(test_table.select().execute().fetchall(),
[(1, 'data1'), (2, 'data2 updated')])
test_table.delete().execute()
eq_(test_table.count().scalar(), 0)
finally:
test_table.drop(checkfirst=True)
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'postgresql >= 8.3'
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table(
'cattable', metadata,
Column(
'id', Integer, primary_key=True),
Column(
'description', String(50)))
matchtable = Table(
'matchtable', metadata,
Column(
'id', Integer, primary_key=True),
Column(
'title', String(200)),
Column(
'category_id', Integer, ForeignKey('cattable.id')))
metadata.create_all()
cattable.insert().execute([{'id': 1, 'description': 'Python'},
{'id': 2, 'description': 'Ruby'}])
matchtable.insert().execute(
[{'id': 1, 'title': 'Agile Web Development with Rails',
'category_id': 2},
{'id': 2, 'title': 'Dive Into Python', 'category_id': 1},
{'id': 3, 'title': "Programming Matz's Ruby", 'category_id': 2},
{'id': 4, 'title': 'The Definitive Guide to Django',
'category_id': 1},
{'id': 5, 'title': 'Python in a Nutshell', 'category_id': 1}])
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on('postgresql+pg8000', 'uses positional')
@testing.fails_on('postgresql+zxjdbc', 'uses qmark')
def test_expression_pyformat(self):
self.assert_compile(matchtable.c.title.match('somstr'),
'matchtable.title @@ to_tsquery(%(title_1)s'
')')
@testing.fails_on('postgresql+psycopg2', 'uses pyformat')
@testing.fails_on('postgresql+pypostgresql', 'uses pyformat')
@testing.fails_on('postgresql+pygresql', 'uses pyformat')
@testing.fails_on('postgresql+zxjdbc', 'uses qmark')
@testing.fails_on('postgresql+psycopg2cffi', 'uses pyformat')
def test_expression_positional(self):
self.assert_compile(matchtable.c.title.match('somstr'),
'matchtable.title @@ to_tsquery(%s)')
def test_simple_match(self):
results = matchtable.select().where(
matchtable.c.title.match('python')).order_by(
matchtable.c.id).execute().fetchall()
eq_([2, 5], [r.id for r in results])
def test_not_match(self):
results = matchtable.select().where(
~matchtable.c.title.match('python')).order_by(
matchtable.c.id).execute().fetchall()
eq_([1, 3, 4], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = matchtable.select().where(
matchtable.c.title.match("Matz's")).execute().fetchall()
eq_([3], [r.id for r in results])
def test_simple_derivative_match(self):
results = matchtable.select().where(
matchtable.c.title.match('nutshells')).execute().fetchall()
eq_([5], [r.id for r in results])
def test_or_match(self):
results1 = matchtable.select().where(
or_(
matchtable.c.title.match('nutshells'),
matchtable.c.title.match('rubies'))).order_by(
matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results1])
results2 = matchtable.select().where(
matchtable.c.title.match('nutshells | rubies')).order_by(
matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = matchtable.select().where(
and_(
matchtable.c.title.match('python'),
matchtable.c.title.match('nutshells'))).execute().fetchall()
eq_([5], [r.id for r in results1])
results2 = \
matchtable.select().where(
matchtable.c.title.match('python & nutshells'
)).execute().fetchall()
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = matchtable.select().where(
and_(
cattable.c.id == matchtable.c.category_id, or_(
cattable.c.description.match('Ruby'),
matchtable.c.title.match('nutshells')))).order_by(
matchtable.c.id).execute().fetchall()
eq_([1, 3, 5], [r.id for r in results])
class TupleTest(fixtures.TestBase):
__only_on__ = 'postgresql'
__backend__ = True
def test_tuple_containment(self):
for test, exp in [
([('a', 'b')], True),
([('a', 'c')], False),
([('f', 'q'), ('a', 'b')], True),
([('f', 'q'), ('a', 'c')], False)
]:
eq_(
testing.db.execute(
select([
tuple_(
literal_column("'a'"),
literal_column("'b'")
).
in_([
tuple_(*[
literal_column("'%s'" % letter)
for letter in elem
]) for elem in test
])
])
).scalar(),
exp
)
class ExtractTest(fixtures.TablesTest):
"""The rationale behind this test is that for many years we've had a system
of embedding type casts into the expressions rendered by visit_extract()
on the postgreql platform. The reason for this cast is not clear.
So here we try to produce a wide range of cases to ensure that these casts
are not needed; see [ticket:2740].
"""
__only_on__ = 'postgresql'
__backend__ = True
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_bind(cls):
from sqlalchemy import event
eng = engines.testing_engine()
@event.listens_for(eng, "connect")
def connect(dbapi_conn, rec):
cursor = dbapi_conn.cursor()
cursor.execute("SET SESSION TIME ZONE 0")
cursor.close()
return eng
@classmethod
def define_tables(cls, metadata):
Table('t', metadata,
Column('id', Integer, primary_key=True),
Column('dtme', DateTime),
Column('dt', Date),
Column('tm', Time),
Column('intv', postgresql.INTERVAL),
Column('dttz', DateTime(timezone=True))
)
@classmethod
def insert_data(cls):
# TODO: why does setting hours to anything
# not affect the TZ in the DB col ?
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=4)
cls.bind.execute(
cls.tables.t.insert(),
{
'dtme': datetime.datetime(2012, 5, 10, 12, 15, 25),
'dt': datetime.date(2012, 5, 10),
'tm': datetime.time(12, 15, 25),
'intv': datetime.timedelta(seconds=570),
'dttz': datetime.datetime(2012, 5, 10, 12, 15, 25,
tzinfo=TZ())
},
)
def _test(self, expr, field="all", overrides=None):
t = self.tables.t
if field == "all":
fields = {"year": 2012, "month": 5, "day": 10,
"epoch": 1336652125.0,
"hour": 12, "minute": 15}
elif field == "time":
fields = {"hour": 12, "minute": 15, "second": 25}
elif field == 'date':
fields = {"year": 2012, "month": 5, "day": 10}
elif field == 'all+tz':
fields = {"year": 2012, "month": 5, "day": 10,
"epoch": 1336637725.0,
"hour": 8,
"timezone": 0
}
else:
fields = field
if overrides:
fields.update(overrides)
for field in fields:
result = self.bind.scalar(
select([extract(field, expr)]).select_from(t))
eq_(result, fields[field])
def test_one(self):
t = self.tables.t
self._test(t.c.dtme, "all")
def test_two(self):
t = self.tables.t
self._test(t.c.dtme + t.c.intv,
overrides={"epoch": 1336652695.0, "minute": 24})
def test_three(self):
self.tables.t
actual_ts = self.bind.scalar(func.current_timestamp()) - \
datetime.timedelta(days=5)
self._test(func.current_timestamp() - datetime.timedelta(days=5),
{"hour": actual_ts.hour, "year": actual_ts.year,
"month": actual_ts.month}
)
def test_four(self):
t = self.tables.t
self._test(datetime.timedelta(days=5) + t.c.dt,
overrides={"day": 15, "epoch": 1337040000.0, "hour": 0,
"minute": 0}
)
def test_five(self):
t = self.tables.t
self._test(func.coalesce(t.c.dtme, func.current_timestamp()),
overrides={"epoch": 1336652125.0})
def test_six(self):
t = self.tables.t
self._test(t.c.tm + datetime.timedelta(seconds=30), "time",
overrides={"second": 55})
def test_seven(self):
self._test(literal(datetime.timedelta(seconds=10))
- literal(datetime.timedelta(seconds=10)), "all",
overrides={"hour": 0, "minute": 0, "month": 0,
"year": 0, "day": 0, "epoch": 0})
def test_eight(self):
t = self.tables.t
self._test(t.c.tm + datetime.timedelta(seconds=30),
{"hour": 12, "minute": 15, "second": 55})
def test_nine(self):
self._test(text("t.dt + t.tm"))
def test_ten(self):
t = self.tables.t
self._test(t.c.dt + t.c.tm)
def test_eleven(self):
self._test(func.current_timestamp() - func.current_timestamp(),
{"year": 0, "month": 0, "day": 0, "hour": 0}
)
def test_twelve(self):
t = self.tables.t
actual_ts = self.bind.scalar(
func.current_timestamp()).replace(tzinfo=None) - \
datetime.datetime(2012, 5, 10, 12, 15, 25)
self._test(
func.current_timestamp() - func.coalesce(
t.c.dtme,
func.current_timestamp()
),
{"day": actual_ts.days})
def test_thirteen(self):
t = self.tables.t
self._test(t.c.dttz, "all+tz")
def test_fourteen(self):
t = self.tables.t
self._test(t.c.tm, "time")
def test_fifteen(self):
t = self.tables.t
self._test(datetime.timedelta(days=5) + t.c.dtme,
overrides={"day": 15, "epoch": 1337084125.0}
)
|
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plots relating to trans-in."""
from makani.analysis.plot.python import mplot
from makani.control import control_types
from makani.lib.python import c_helpers
from matplotlib.pyplot import plot
from matplotlib.pyplot import xlim
from matplotlib.pyplot import ylim
import numpy as np
MFig = mplot.PlotGroup.MFig # pylint: disable=invalid-name
_TRANS_IN_GATE_HELPER = c_helpers.EnumHelper('TransInGate', control_types)
class Plots(mplot.PlotGroup):
"""Trans-in plots."""
def SetXlimTransIn(self):
c = self.data[1]
xlim(c['time'][c['flight_mode']
== control_types.kFlightModeTransIn][[0, -1]])
@MFig(title='Gates', ylabel='Gate Label', xlabel='Time [s]')
def PlotGates(self, ti, c, s):
mplot.PlotBitMask(
c['time'],
c['flight_mode_gates'][:, control_types.kFlightModeTransIn],
_TRANS_IN_GATE_HELPER)
@MFig(title='Radial Velocity', ylabel='Radial Velocity [m/s]',
xlabel='Time [s]')
def PlotRadialVelocity(self, ti, c, s):
plot(c['time'], c['trans_in']['radial_vel_ti_cmd'], 'b:', label='Cmd')
plot(c['time'], c['trans_in']['radial_vel_ti'], 'b', label='Est')
@MFig(title='Airspeed', ylabel='Airspeed [m/s]', xlabel='Time [s]')
def PlotAirspeed(self, ti, c, s):
plot(c['time'], c['estimator']['apparent_wind_pitot']['v'],
label='Pitot Estimate')
plot(c['time'], c['state_est']['apparent_wind']['sph_f']['v'],
label='Estimate')
if 'airspeed_cmd' in ti.dtype.names:
plot(c['time'], ti['airspeed_cmd'], label='Cmd')
if 'airspeed' in ti.dtype.names:
plot(c['time'], ti['airspeed'], label='Ti')
plot(c['time'], (c['state_est']['Vg_f']['x']**2.0
+ c['state_est']['Vg_f']['y']**2.0
+ c['state_est']['Vg_f']['z']**2.0)**0.5,
'r:', label='Kite Speed')
if s is not None:
plot(s['time'], s['wing']['apparent_wind_b']['v'], 'b:', label='Sim')
plot(s['time'], s['wing']['wind_g']['z'], label='Updraft')
@MFig(title='Aerodynamic Angles', ylabel='Angle [deg]', xlabel='Time [s]')
def PlotAeroAngles(self, ti, c, s):
plot(c['time'], np.rad2deg(ti['angle_of_attack_cmd']), 'b:',
label='AOA Cmd')
plot(c['time'],
np.rad2deg(ti['eulers_ti2b']['y'] - ti['aero_climb_angle']),
'r-.', label='Pitch-Gamma')
plot(c['time'],
np.rad2deg(c['state_est']['apparent_wind']['sph_f']['alpha']), 'b',
label='Est AOA')
if 'int_angle_of_attack' in ti.dtype.names:
plot(c['time'],
np.rad2deg(ti['int_angle_of_attack']), 'b-.', label='Int AOA')
plot(c['time'],
np.rad2deg(c['state_est']['apparent_wind']['sph_f']['beta']),
'g', label='Est AOS')
if s is not None:
plot(s['time'], np.rad2deg(s['wing']['apparent_wind_b']['alpha']),
'b--', label='Sim AOA')
plot(s['time'], np.rad2deg(s['wing']['apparent_wind_b']['beta']),
'g--', label='Sim AOS')
ylim([-10.0, 10.0])
@MFig(title='Tension', ylabel='Tension [N]', xlabel='Time [s]')
def PlotTension(self, ti, c, s):
if 'tension_cmd' in ti.dtype.names:
plot(c['time'], ti['tension_cmd'], label='Tension Cmd')
plot(c['time'], c['state_est']['tether_force_b']['vector_f']['z'],
label='Tension f')
@MFig(title='Apparent Wind', ylabel='Speed [m/s]', xlabel='Time [s]')
def PlotApparentWind(self, ti, c, s):
plot(c['time'], c['state_est']['apparent_wind']['sph']['v'], 'b',
label='airspeed')
plot(c['time'],
np.rad2deg(c['state_est']['apparent_wind']['sph']['alpha']),
'g', label='AOA')
plot(c['time'],
np.rad2deg(c['state_est']['apparent_wind']['sph']['beta']),
'r', label='AOS')
if s is not None:
plot(s['time'], s['wing']['apparent_wind_b']['v'], 'b:',
label='sim airspeed')
plot(s['time'], np.rad2deg(s['wing']['apparent_wind_b']['alpha']),
'g:', label='sim AOA')
plot(s['time'], np.rad2deg(s['wing']['apparent_wind_b']['beta']),
'r:', label='sim AOS')
@MFig(title='Aero Climb Angle', ylabel='Angle [deg]', xlabel='Time [s]')
def PlotAeroClimbAngle(self, ti, c, s):
climb_angle = np.arctan2(-c['state_est']['Vg']['z'],
np.hypot(c['state_est']['Vg']['x'],
c['state_est']['Vg']['y']))
elevation_angle_g = np.arctan2(-c['state_est']['Xg']['z'],
np.hypot(c['state_est']['Xg']['x'],
c['state_est']['Xg']['y']))
plot(c['time'], np.rad2deg(ti['aero_climb_angle']), 'b-',
label='Aero Climb Est')
if 'aero_climb_angle_cmd' in ti.dtype.names:
plot(c['time'], np.rad2deg(ti['aero_climb_angle_cmd']), 'b:',
label='Aero Climb Cmd')
plot(c['time'], np.rad2deg(climb_angle), 'k', label='Climb Angle Est')
plot(c['time'], 90.0 - np.rad2deg(elevation_angle_g), 'g-.',
label='Tangent')
if s is not None:
plot(s['time'], np.rad2deg(np.arcsin(
-(s['wing']['Vg']['z'] - s['wing']['wind_g']['z']) /
np.hypot(s['wing']['Vg']['z'] - s['wing']['wind_g']['z'],
np.hypot(s['wing']['Vg']['x'] - s['wing']['wind_g']['x'],
s['wing']['Vg']['y'] - s['wing']['wind_g']['y'])))),
'r', label='Sim Aero Climb Angle')
@MFig(title='Radial Position', ylabel='Radial Position [m]',
xlabel='Time [s]')
def PlotRadialPosition(self, ti, c, s):
radial_pos_g = np.hypot(c['state_est']['Xg']['x'],
np.hypot(c['state_est']['Xg']['y'],
c['state_est']['Xg']['z']))
radial_pos_ti = np.hypot(ti['wing_pos_ti']['x'],
ti['wing_pos_ti']['z'])
plot(c['time'], 434.0 + 5.0 * np.ones(c['time'].shape), 'g', label='Cmd')
plot(c['time'], radial_pos_g, 'g', label='G')
plot(c['time'], radial_pos_ti, 'r', label='Ti')
@MFig(title='Lateral Position', ylabel='Position [m]', xlabel='Time [s]')
def PlotLateralPosition(self, ti, c, s):
if 'wing_pos_ti_y_cmd' in ti.dtype.names:
plot(c['time'], ti['wing_pos_ti_y_cmd'], 'b:', label='Cmd')
else:
plot(c['time'], np.zeros(ti['wing_pos_ti']['y'].shape), 'b:', label='Cmd')
plot(c['time'], ti['wing_pos_ti']['y'], 'b', label='Est')
@MFig(title='Lateral Velocity', ylabel='Velocity [m/s]', xlabel='Time [s]')
def PlotLateralVelocity(self, ti, c, s):
if 'wing_vel_ti_y_cmd' in ti.dtype.names:
plot(c['time'], ti['wing_vel_ti_y_cmd'], 'b:', label='Cmd')
else:
plot(c['time'], np.zeros(ti['wing_vel_ti']['y'].shape), 'b:', label='Cmd')
plot(c['time'], ti['wing_vel_ti']['y'], 'b', label='Est')
@MFig(title='Euler Angle Errors', ylabel='Angle Error [deg]',
xlabel='Time [s]')
def PlotEulerAngleError(self, ti, c, s):
mplot.PlotVec3(c['time'], ti['eulers_ti2b'], scale=180.0 / np.pi,
labels=['Roll', 'Pitch', 'Yaw'])
mplot.PlotVec3(c['time'], ti['eulers_ti2cmd'], scale=180.0 / np.pi,
labels=['Roll Cmd', 'Pitch Cmd', 'Yaw Cmd'], linestyle=':')
@MFig(title='Attitude Error', ylabel='Error [deg]', xlabel='Time [s]')
def PlotAttitudeError(self, ti, c, s):
labels = ['Roll Error', 'Pitch Error', 'Yaw Error']
mplot.PlotVec3(c['time'], ti['axis_b2cmd'], scale=180.0 / np.pi,
labels=labels)
@MFig(title='Body Rates', ylabel='Rotation Rate [rad/s]', xlabel='Time [s]')
def PlotBodyRates(self, ti, c, s):
labels = ['Roll Rate', 'Pitch Rate', 'Yaw Rate']
mplot.PlotVec3(c['time'], c['state_est']['pqr_f'], labels=labels)
mplot.PlotVec3(c['time'], ti['pqr_cmd'], labels=labels, linestyle=':')
plot(c['time'], ti['pitch_rate_b_cmd'], 'k:', label='Pitch Rate Cmd')
@MFig(title='Aileron Deflections', ylabel='Angle [deg]',
xlabel='Time [s]')
def PlotAilerons(self, ti, c, s):
indices = [(0, 'A1'), (1, 'A2'), (4, 'A7'), (5, 'A8')]
mplot.PlotComponents(c['time'], c['control_output']['flaps'],
[(i, l + ' Cmd') for i, l in indices],
linestyle=':', scale=180.0 / np.pi)
mplot.PlotComponents(c['time'], c['control_input']['flaps'],
[(i, l + ' Response') for i, l in indices],
scale=180.0 / np.pi)
@MFig(title='Flap Deflections', ylabel='Angle [deg]',
xlabel='Time [s]')
def PlotFlaps(self, ti, c, s):
indices = [(2, 'A3'), (3, 'A4')]
mplot.PlotComponents(c['time'], c['control_output']['flaps'],
[(i, l + ' Cmd') for i, l in indices],
linestyle=':', scale=180.0 / np.pi)
mplot.PlotComponents(c['time'], c['control_input']['flaps'],
[(i, l + ' Response') for i, l in indices],
scale=180.0 / np.pi)
@MFig(title='Elevator Deflection', ylabel='Angle [deg]', xlabel='Time [s]')
def PlotElevator(self, ti, c, s):
plot(c['time'], np.rad2deg(c['control_output']['flaps'][:, 6]), ':',
label='Command')
plot(c['time'], np.rad2deg(c['control_input']['flaps'][:, 6]),
label='Response')
if s is not None:
plot(s['time'], 10.0-np.rad2deg(s['wing']['apparent_wind_b']['alpha']),
'k:', label='Stall')
plot(s['time'], -10.0-np.rad2deg(s['wing']['apparent_wind_b']['alpha']),
'k:', label='Stall')
@MFig(title='Rudder Deflection', ylabel='Rudder Angle [deg]',
xlabel='Time [s]')
def PlotRudder(self, ti, c, s):
plot(c['time'], np.rad2deg(c['control_output']['flaps'][:, 7]), ':',
label='Rudder Command')
plot(c['time'], np.rad2deg(c['control_input']['flaps'][:, 7]),
label='Rudder Response')
@MFig(title='Motor Moments', ylabel='Moment [N-m]', xlabel='Time [s]')
def PlotMotorMoments(self, ti, c, s):
mplot.PlotVec3(c['time'], c['thrust_moment_avail']['moment'],
labels=['Roll Avail', 'Pitch Avail', 'Yaw Avail'])
mplot.PlotVec3(c['time'], c['thrust_moment']['moment'], linestyle=':',
labels=['Roll Cmd', 'Pitch Cmd', 'Yaw Cmd'])
@MFig(title='CL', ylabel='CL [#]', xlabel='Time [s]')
def PlotCL(self, ti, c, s):
plot(c['time'], ti['CL_cmd'], label='Command')
if s is not None:
plot(s['time'], s['wing']['CL'], ':', label='Sim')
@MFig(title='Thrust', ylabel='Thrust [N]', xlabel='Time [s]')
def PlotThrust(self, ti, c, s):
plot(c['time'], c['thrust_moment']['thrust'], 'b:',
label='Thrust Cmd')
plot(c['time'], c['thrust_moment_avail']['thrust'], 'b',
label='Thrust Avail')
plot(c['time'], ti['int_airspeed_thrust_cmd'], 'g',
label='Thrust Int.')
if s is not None:
plot(s['time'], s['wing']['fm_rotors']['force']['x'],
'r', label='Sim')
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import warnings
from distutils.version import LooseVersion
from .pycompat import OrderedDict, zip, dask_array_type
from .common import full_like
from .combine import concat
from .ops import (inject_bottleneck_rolling_methods,
inject_datasetrolling_methods, has_bottleneck, bn)
from .dask_array_ops import dask_rolling_wrapper
class Rolling(object):
"""A object that implements the moving window pattern.
See Also
--------
Dataset.groupby
DataArray.groupby
Dataset.rolling
DataArray.rolling
"""
_attributes = ['window', 'min_periods', 'center', 'dim']
def __init__(self, obj, min_periods=None, center=False, **windows):
"""
Moving window object.
Parameters
----------
obj : Dataset or DataArray
Object to window.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
"""
if (has_bottleneck and
(LooseVersion(bn.__version__) < LooseVersion('1.0'))):
warnings.warn('xarray requires bottleneck version of 1.0 or '
'greater for rolling operations. Rolling '
'aggregation methods will use numpy instead'
'of bottleneck.')
if len(windows) != 1:
raise ValueError('exactly one dim/window should be provided')
dim, window = next(iter(windows.items()))
if window <= 0:
raise ValueError('window must be > 0')
self.obj = obj
# attributes
self.window = window
self.min_periods = min_periods
if min_periods is None:
self._min_periods = window
else:
if min_periods <= 0:
raise ValueError(
'min_periods must be greater than zero or None')
self._min_periods = min_periods
self.center = center
self.dim = dim
def __repr__(self):
"""provide a nice str repr of our rolling object"""
attrs = ["{k}->{v}".format(k=k, v=getattr(self, k))
for k in self._attributes if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=','.join(attrs))
def __len__(self):
return self.obj.sizes[self.dim]
class DataArrayRolling(Rolling):
"""
This class adds the following class methods;
+ _reduce_method(cls, func)
+ _bottleneck_reduce(cls, func)
These class methods will be used to inject numpy or bottleneck function
by doing
>>> func = cls._reduce_method(f)
>>> func.__name__ = name
>>> setattr(cls, name, func)
in ops.inject_bottleneck_rolling_methods.
After the injection, the Rolling object will have `name` (such as `mean` or
`median`) methods,
e.g. it enables the following call,
>>> data.rolling().mean()
If bottleneck is installed, some bottleneck methods will be used instdad of
the numpy method.
see also
+ rolling.DataArrayRolling
+ ops.inject_bottleneck_rolling_methods
"""
def __init__(self, obj, min_periods=None, center=False, **windows):
super(DataArrayRolling, self).__init__(obj, min_periods=min_periods,
center=center, **windows)
self._windows = None
self._valid_windows = None
self.window_indices = None
self.window_labels = None
self._setup_windows()
@property
def windows(self):
if self._windows is None:
self._windows = OrderedDict(zip(self.window_labels,
self.window_indices))
return self._windows
def __iter__(self):
for (label, indices, valid) in zip(self.window_labels,
self.window_indices,
self._valid_windows):
window = self.obj.isel(**{self.dim: indices})
if not valid:
window = full_like(window, fill_value=True, dtype=bool)
yield (label, window)
def _setup_windows(self):
"""
Find the indices and labels for each window
"""
from .dataarray import DataArray
self.window_labels = self.obj[self.dim]
window = int(self.window)
dim_size = self.obj[self.dim].size
stops = np.arange(dim_size) + 1
starts = np.maximum(stops - window, 0)
if self._min_periods > 1:
valid_windows = (stops - starts) >= self._min_periods
else:
# No invalid windows
valid_windows = np.ones(dim_size, dtype=bool)
self._valid_windows = DataArray(valid_windows, dims=(self.dim, ),
coords=self.obj[self.dim].coords)
self.window_indices = [slice(start, stop)
for start, stop in zip(starts, stops)]
def _center_result(self, result):
"""center result"""
shift = (-self.window // 2) + 1
return result.shift(**{self.dim: shift})
def reduce(self, func, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
"""
windows = [window.reduce(func, dim=self.dim, **kwargs)
for _, window in self]
# Find valid windows based on count
if self.dim in self.obj.coords:
concat_dim = self.window_labels
else:
concat_dim = self.dim
counts = concat([window.count(dim=self.dim) for _, window in self],
dim=concat_dim)
result = concat(windows, dim=concat_dim)
# restore dim order
result = result.transpose(*self.obj.dims)
result = result.where(counts >= self._min_periods)
if self.center:
result = self._center_result(result)
return result
@classmethod
def _reduce_method(cls, func):
"""
Methods to return a wrapped function for any function `func` for
numpy methods.
"""
def wrapped_func(self, **kwargs):
return self.reduce(func, **kwargs)
return wrapped_func
@classmethod
def _bottleneck_reduce(cls, func):
"""
Methods to return a wrapped function for any function `func` for
bottoleneck method, except for `median`.
"""
def wrapped_func(self, **kwargs):
from .dataarray import DataArray
# bottleneck doesn't allow min_count to be 0, although it should
# work the same as if min_count = 1
if self.min_periods is not None and self.min_periods == 0:
min_count = 1
else:
min_count = self.min_periods
axis = self.obj.get_axis_num(self.dim)
if isinstance(self.obj.data, dask_array_type):
values = dask_rolling_wrapper(func, self.obj.data,
window=self.window,
min_count=min_count,
axis=axis)
else:
values = func(self.obj.data, window=self.window,
min_count=min_count, axis=axis)
result = DataArray(values, self.obj.coords)
if self.center:
result = self._center_result(result)
return result
return wrapped_func
class DatasetRolling(Rolling):
"""An object that implements the moving window pattern for Dataset.
This class has an OrderedDict named self.rollings, that is a collection of
DataArrayRollings for all the DataArrays in the Dataset, except for those
not depending on rolling dimension.
reduce() method returns a new Dataset generated from a set of
self.rollings[key].reduce().
See Also
--------
Dataset.groupby
DataArray.groupby
Dataset.rolling
DataArray.rolling
"""
def __init__(self, obj, min_periods=None, center=False, **windows):
"""
Moving window object for Dataset.
Parameters
----------
obj : Dataset
Object to window.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
"""
super(DatasetRolling, self).__init__(obj,
min_periods, center, **windows)
if self.dim not in self.obj.dims:
raise KeyError(self.dim)
# Keep each Rolling object as an OrderedDict
self.rollings = OrderedDict()
for key, da in self.obj.data_vars.items():
# keeps rollings only for the dataset depending on slf.dim
if self.dim in da.dims:
self.rollings[key] = DataArrayRolling(da, min_periods,
center, **windows)
def reduce(self, func, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
"""
from .dataset import Dataset
reduced = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
reduced[key] = self.rollings[key].reduce(func, **kwargs)
else:
reduced[key] = self.obj[key]
return Dataset(reduced, coords=self.obj.coords)
@classmethod
def _reduce_method(cls, func):
"""
Return a wrapped function for injecting numpy and bottoleneck methods.
see ops.inject_datasetrolling_methods
"""
def wrapped_func(self, **kwargs):
from .dataset import Dataset
reduced = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
reduced[key] = getattr(self.rollings[key],
func.__name__)(**kwargs)
else:
reduced[key] = self.obj[key]
return Dataset(reduced, coords=self.obj.coords)
return wrapped_func
inject_bottleneck_rolling_methods(DataArrayRolling)
inject_datasetrolling_methods(DatasetRolling)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
from io import BytesIO
import json
from struct import pack, unpack
import http.client
import urllib.parse
from test_framework.test_framework import GuldenTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
hex_str_to_bytes,
)
from test_framework.messages import BLOCK_HEADER_SIZE
class ReqType(Enum):
JSON = 1
BIN = 2
HEX = 3
class RetType(Enum):
OBJ = 1
BYTES = 2
JSON = 3
def filter_output_indices_by_value(vouts, value):
for vout in vouts:
if vout['value'] == value:
yield vout['n']
class RESTTest (GuldenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-rest"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON, body='', status=200, ret_type=RetType.JSON):
rest_uri = '/rest' + uri
if req_type == ReqType.JSON:
rest_uri += '.json'
elif req_type == ReqType.BIN:
rest_uri += '.bin'
elif req_type == ReqType.HEX:
rest_uri += '.hex'
conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
self.log.debug('%s %s %s', http_method, rest_uri, body)
if http_method == 'GET':
conn.request('GET', rest_uri)
elif http_method == 'POST':
conn.request('POST', rest_uri, body)
resp = conn.getresponse()
assert_equal(resp.status, status)
if ret_type == RetType.OBJ:
return resp
elif ret_type == RetType.BYTES:
return resp.read()
elif ret_type == RetType.JSON:
return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
def run_test(self):
self.url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mine blocks and send Gulden to node 1")
# Random address so node1's balance doesn't increase
not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ"
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generatetoaddress(100, not_related_address)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.log.info("Test the /tx URI")
json_obj = self.test_rest_request("/tx/{}".format(txid))
assert_equal(json_obj['txid'], txid)
# Check hex format response
hex_response = self.test_rest_request("/tx/{}".format(txid), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than_or_equal(int(hex_response.getheader('content-length')),
json_obj['size']*2)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout']) # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
self.log.info("Query an unspent TXO using the /getutxos URI")
self.nodes[1].generatetoaddress(1, not_related_address)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
# Check chainTip response
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
self.log.info("Query a spent TXO using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
# Check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
# Make sure there is no utxo in the response because this outpoint has been spent
assert_equal(len(json_obj['utxos']), 0)
# Check bitmap
assert_equal(json_obj['bitmap'], "0")
self.log.info("Query two TXOs using the /getutxos URI")
json_obj = self.test_rest_request("/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
self.log.info("Query the TXOs using the /getutxos URI with a binary response")
bin_request = b'\x01\x02'
for txid, n in [spending, spent]:
bin_request += hex_str_to_bytes(txid)
bin_request += pack("i", n)
bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
output = BytesIO(bin_response)
chain_height, = unpack("i", output.read(4))
response_hash = output.read(32)[::-1].hex()
assert_equal(bb_hash, response_hash) # check if getutxo's chaintip during calculation was fine
assert_equal(chain_height, 102) # chain height must be 102
self.log.info("Test the /getutxos URI with and without /checkmempool")
# Create a transaction, check that it's found with /checkmempool, but
# not found without. Then confirm the transaction and check that it's
# found with or without /checkmempool.
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_obj = self.test_rest_request("/tx/{}".format(txid))
# get the spent output to later check for utxo (should be spent by then)
spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
# get n of 0.1 outpoint
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 0)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spent))
assert_equal(len(json_obj['utxos']), 0)
self.nodes[0].generate(1)
self.sync_all()
json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
json_obj = self.test_rest_request("/getutxos/checkmempool/{}-{}".format(*spending))
assert_equal(len(json_obj['utxos']), 1)
# Do some invalid requests
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body='{"checkmempool', status=400, ret_type=RetType.OBJ)
self.test_rest_request("/getutxos/checkmempool", http_method='POST', req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
# Test limits
long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=400, ret_type=RetType.OBJ)
long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
self.nodes[0].generate(1) # generate block to not affect upcoming tests
self.sync_all()
self.log.info("Test the /block, /blockhashbyheight and /headers URIs")
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
assert_equal(self.test_rest_request('/headers/1/{}'.format(bb_hash)), [])
self.test_rest_request('/block/{}'.format(bb_hash))
self.nodes[0].reconsiderblock(bb_hash)
# Check binary format
response = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_greater_than(int(response.getheader('content-length')), BLOCK_HEADER_SIZE)
response_bytes = response.read()
# Compare with block header
response_header = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
assert_equal(int(response_header.getheader('content-length')), BLOCK_HEADER_SIZE)
response_header_bytes = response_header.read()
assert_equal(response_bytes[:BLOCK_HEADER_SIZE], response_header_bytes)
# Check block hex format
response_hex = self.test_rest_request("/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_hex_bytes = response_hex.read().strip(b'\n')
assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
# Compare with hex block header
response_header_hex = self.test_rest_request("/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_greater_than(int(response_header_hex.getheader('content-length')), BLOCK_HEADER_SIZE*2)
response_header_hex_bytes = response_header_hex.read(BLOCK_HEADER_SIZE*2)
assert_equal(binascii.hexlify(response_bytes[:BLOCK_HEADER_SIZE]), response_header_hex_bytes)
# Check json format
block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
assert_equal(block_json_obj['hash'], bb_hash)
assert_equal(self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']))['blockhash'], bb_hash)
# Check hex/bin format
resp_hex = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.HEX, ret_type=RetType.OBJ)
assert_equal(resp_hex.read().decode('utf-8').rstrip(), bb_hash)
resp_bytes = self.test_rest_request("/blockhashbyheight/{}".format(block_json_obj['height']), req_type=ReqType.BIN, ret_type=RetType.BYTES)
blockhash = resp_bytes[::-1].hex()
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: -1")
self.test_rest_request("/blockhashbyheight/", ret_type=RetType.OBJ, status=400)
# Compare with json block header
json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
assert_equal(len(json_obj), 1) # ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) # request/response hash should be the same
# Compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot', 'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
assert_equal(json_obj[0][key], rpc_block_json[key])
# See if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
assert_equal(len(json_obj), 5) # now we should have 5 header objects
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
self.sync_all()
# Check that there are exactly 3 transactions in the TX memory pool before generating the block
json_obj = self.test_rest_request("/mempool/info")
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# Check that there are our submitted transactions in the TX memory pool
json_obj = self.test_rest_request("/mempool/contents")
for i, tx in enumerate(txs):
assert tx in json_obj
assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
# Now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
# Check if the 3 tx show up in the new block
json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
non_coinbase_txs = {tx['txid'] for tx in json_obj['tx']
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
# Check the same but without tx details
json_obj = self.test_rest_request("/block/notxdetails/{}".format(newblockhash[0]))
for tx in txs:
assert tx in json_obj['tx']
self.log.info("Test the /chaininfo URI")
bb_hash = self.nodes[0].getbestblockhash()
json_obj = self.test_rest_request("/chaininfo")
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest().main()
|
|
# Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Python Standard Logging integration with CloudWatch Logs
Double Buffered with background thread delivery.
We do an initial buffering on the log handler directly, to avoid
some of the overhead of pushing to the queue (albeit dubious as
std logging does default lock acquisition around handler emit).
also uses a single thread for all outbound. Background thread
uses a separate session.
"""
from c7n.exceptions import ClientError
import itertools
import logging
from operator import itemgetter
import threading
import time
try:
import Queue
except ImportError: # pragma: no cover
import queue as Queue
from c7n.utils import get_retry
FLUSH_MARKER = object()
SHUTDOWN_MARKER = object()
EMPTY = Queue.Empty
class Error:
AlreadyAccepted = "DataAlreadyAcceptedException"
InvalidToken = "InvalidSequenceTokenException"
ResourceExists = "ResourceAlreadyExistsException"
@staticmethod
def code(e):
return e.response.get('Error', {}).get('Code')
class CloudWatchLogHandler(logging.Handler):
"""Python Log Handler to Send to Cloud Watch Logs
https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html
"""
batch_size = 20
batch_interval = 40
batch_min_buffer = 10
def __init__(self, log_group=__name__, log_stream=None,
session_factory=None):
super(CloudWatchLogHandler, self).__init__()
self.log_group = log_group
self.log_stream = log_stream
self.session_factory = session_factory
self.transport = None
self.queue = Queue.Queue()
self.threads = []
# do some basic buffering before sending to transport to minimize
# queue/threading overhead
self.buf = []
self.last_seen = time.time()
# Logging module internally is tracking all handlers, for final
# cleanup atexit, custodian is a bit more explicitly scoping shutdown to
# each policy, so use a sentinel value to avoid deadlocks.
self.shutdown = True
retry = get_retry(('ThrottlingException',))
try:
client = self.session_factory().client('logs')
logs = retry(
client.describe_log_groups,
logGroupNamePrefix=self.log_group)['logGroups']
if not [l for l in logs if l['logGroupName'] == self.log_group]:
retry(client.create_log_group,
logGroupName=self.log_group)
except ClientError as e:
if Error.code(e) != Error.ResourceExists:
raise
# Begin logging.Handler API
def emit(self, message):
"""Send logs"""
# We're sending messages asynchronously, bubble to caller when
# we've detected an error on the message. This isn't great,
# but options once we've gone async without a deferred/promise
# aren't great.
if self.transport and self.transport.error:
raise self.transport.error
# Sanity safety, people do like to recurse by attaching to
# root log :-(
if message.name.startswith('boto'):
return
msg = self.format_message(message)
if not self.transport:
self.shutdown = False
self.start_transports()
self.buf.append(msg)
self.flush_buffers(
(message.created - self.last_seen >= self.batch_interval))
self.last_seen = message.created
def flush(self):
"""Ensure all logging output has been flushed."""
if self.shutdown:
return
self.flush_buffers(force=True)
self.queue.put(FLUSH_MARKER)
self.queue.join()
def close(self):
if self.shutdown:
return
self.shutdown = True
self.queue.put(SHUTDOWN_MARKER)
self.queue.join()
for t in self.threads:
t.join()
self.threads = []
# End logging.Handler API
def format_message(self, msg):
"""format message."""
return {'timestamp': int(msg.created * 1000),
'message': self.format(msg),
'stream': self.log_stream or msg.name,
'group': self.log_group}
def start_transports(self):
"""start thread transports."""
self.transport = Transport(
self.queue, self.batch_size, self.batch_interval,
self.session_factory)
thread = threading.Thread(target=self.transport.loop)
self.threads.append(thread)
thread.daemon = True
thread.start()
def flush_buffers(self, force=False):
if not force and len(self.buf) < self.batch_min_buffer:
return
self.queue.put(self.buf)
self.buf = []
class Transport:
def __init__(self, queue, batch_size, batch_interval, session_factory):
self.queue = queue
self.batch_size = batch_size
self.batch_interval = batch_interval
self.client = session_factory().client('logs')
self.sequences = {}
self.buffers = {}
self.error = None
def create_stream(self, group, stream):
try:
self.client.create_log_stream(
logGroupName=group, logStreamName=stream)
except ClientError as e:
if Error.code(e) != Error.ResourceExists:
self.error = e
return False
return True
def send(self):
for k, messages in self.buffers.items():
self.send_group(k, messages)
self.buffers = {}
def send_group(self, k, messages):
group, stream = k.split('=', 1)
if stream not in self.sequences:
if not self.create_stream(group, stream):
return
self.sequences[stream] = None
params = dict(
logGroupName=group, logStreamName=stream,
logEvents=sorted(
messages, key=itemgetter('timestamp'), reverse=False))
if self.sequences[stream]:
params['sequenceToken'] = self.sequences[stream]
try:
response = self.client.put_log_events(**params)
except ClientError as e:
if Error.code(e) in (Error.AlreadyAccepted, Error.InvalidToken):
self.sequences[stream] = e.response['Error']['Message'].rsplit(
" ", 1)[-1]
return self.send_group(k, messages)
self.error = e
return
self.sequences[stream] = response['nextSequenceToken']
def loop(self):
def keyed(datum):
return "%s=%s" % (
datum.pop('group'), datum.pop('stream'))
while True:
try:
datum = self.queue.get(block=True, timeout=self.batch_interval)
except EMPTY:
if Queue is None:
return
datum = None
if datum is None:
# Timeout reached, flush
self.send()
continue
elif datum == FLUSH_MARKER:
self.send()
elif datum == SHUTDOWN_MARKER:
self.queue.task_done()
return
else:
for k, group in itertools.groupby(datum, keyed):
self.buffers.setdefault(k, []).extend(group)
self.queue.task_done()
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import basestring
from collections import defaultdict, Counter
from datetime import datetime
import getpass
import logging
import socket
import subprocess
import multiprocessing
import math
from time import sleep
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_
from sqlalchemy.orm.session import make_transient
from airflow import executors, models, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.utils.state import State
from airflow.utils.db import provide_session, pessimistic_connection_handling
from airflow.utils.email import send_email
from airflow.utils.logging import LoggingMixin
from airflow.utils import asciiart
from airflow.settings import Stats
DagRun = models.DagRun
Base = models.Base
ID_LEN = models.ID_LEN
Stats = settings.Stats
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.getfqdn()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
if job.state == State.SHUTDOWN:
self.kill()
if job.latest_heartbeat:
sleep_for = self.heartrate - (
datetime.now() - job.latest_heartbeat).total_seconds()
if sleep_for > 0:
sleep(sleep_for)
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
session.close()
self.heartbeat_callback()
self.logger.debug('[heart] Boom.')
def run(self):
Stats.incr(self.__class__.__name__.lower()+'_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
Stats.incr(self.__class__.__name__.lower()+'_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs indefinitely and constantly schedules the jobs
that are ready to run. It figures out the latest runs for each
task and see if the dependencies for the next schedules are met.
If so it triggers the task instance. It does this for each task
in each DAG and repeats.
:param dag_id: to run the scheduler for a single specific DAG
:type dag_id: string
:param subdir: to search for DAG under a certain folder only
:type subdir: string
:param test_mode: used for unit testing this class only, runs a single
schedule run
:type test_mode: bool
:param refresh_dags_every: force refresh the DAG definition every N
runs, as specified here
:type refresh_dags_every: int
:param do_pickle: to pickle the DAG object and send over to workers
for non-local executors
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=None,
test_mode=False,
refresh_dags_every=10,
num_runs=None,
do_pickle=False,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
if test_mode:
self.num_runs = 1
else:
self.num_runs = num_runs
self.refresh_dags_every = refresh_dags_every
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = min(conf.getint('scheduler', 'max_threads'), multiprocessing.cpu_count())
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
if self.max_threads > 1:
self.logger.error("Cannot use more than 1 thread when using sqlite. Setting max_threads to 1")
self.max_threads = 1
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < datetime.now():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.email_sent == False or SlaMiss.notification_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
blocking_tis = ([ti for ti in blocking_tis
if ti.are_dependencies_met(session=session)])
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.logger.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
session.close()
def import_errors(self, dagbag):
session = settings.Session()
session.query(models.ImportError).delete()
for filename, stacktrace in list(dagbag.import_errors.items()):
session.add(models.ImportError(
filename=filename, stacktrace=stacktrace))
session.commit()
def schedule_dag(self, dag):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
DagRun = models.DagRun
session = settings.Session()
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
if len(active_runs) >= dag.max_active_runs:
return
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < datetime.now() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = datetime.now()
session.commit()
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX+'%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.logger.debug("Next run date based on tasks {}"
.format(next_run_date))
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.logger.debug("Dag start date: {}. Next run date: {}"
.format(dag.start_date, next_run_date))
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= datetime.now():
next_run = dag.create_dagrun(
run_id='scheduled__' + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False
)
return next_run
def process_dag(self, dag, queue):
"""
This method schedules a single DAG by looking at the latest
run for each task and attempting to schedule the following run.
As multiple schedulers may be running for redundancy, this
function takes a lock on the DAG and timestamps the last run
in ``last_scheduler_run``.
"""
DagModel = models.DagModel
session = settings.Session()
# picklin'
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle_id = dag.pickle(session).id
# obtain db lock
db_dag = session.query(DagModel).filter_by(
dag_id=dag.dag_id
).with_for_update().one()
last_scheduler_run = db_dag.last_scheduler_run or datetime(2000, 1, 1)
secs_since_last = (datetime.now() - last_scheduler_run).total_seconds()
if secs_since_last < self.heartrate:
# release db lock
session.commit()
session.close()
return None
# Release the db lock
# the assumption here is that process_dag will take less
# time than self.heartrate otherwise we might unlock too
# quickly and this should moved below, but that would increase
# the time the record is locked and is blocking for other calls.
db_dag.last_scheduler_run = datetime.now()
session.commit()
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
# do not consider runs that are executed in the future
if run.execution_date > datetime.now():
continue
# todo: run.task is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
active_dag_runs.append(run)
for run in active_dag_runs:
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.is_runnable(flag_upstream_failed=True):
self.logger.debug('Queuing task: {}'.format(ti))
ti.refresh_from_db(session=session, lock_for_update=True)
# another scheduler could have picked this task
# todo: UP_FOR_RETRY still could create a race condition
if ti.state is State.SCHEDULED:
session.commit()
self.logger.debug("Task {} was picked up by another scheduler"
.format(ti))
continue
elif ti.state is State.NONE:
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
queue.put((ti.key, pickle_id))
session.close()
@provide_session
def prioritize_queued(self, session, executor, dagbag):
# Prioritizing queued task instances
pools = {p.pool: p for p in session.query(models.Pool).all()}
TI = models.TaskInstance
queued_tis = (
session.query(TI)
.filter(TI.state == State.QUEUED)
.all()
)
self.logger.info(
"Prioritizing {} queued jobs".format(len(queued_tis)))
session.expunge_all()
d = defaultdict(list)
for ti in queued_tis:
if ti.dag_id not in dagbag.dags:
self.logger.info(
"DAG no longer in dagbag, deleting {}".format(ti))
session.delete(ti)
session.commit()
elif not dagbag.dags[ti.dag_id].has_task(ti.task_id):
self.logger.info(
"Task no longer exists, deleting {}".format(ti))
session.delete(ti)
session.commit()
else:
d[ti.pool].append(ti)
dag_blacklist = set(dagbag.paused_dags())
for pool, tis in list(d.items()):
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
open_slots = pools[pool].open_slots(session=session)
queue_size = len(tis)
self.logger.info("Pool {pool} has {open_slots} slots, {queue_size} "
"task instances in queue".format(**locals()))
if open_slots <= 0:
continue
tis = sorted(
tis, key=lambda ti: (-ti.priority_weight, ti.start_date))
for ti in tis:
if open_slots <= 0:
continue
task = None
try:
task = dagbag.dags[ti.dag_id].get_task(ti.task_id)
except:
self.logger.error("Queued task {} seems gone".format(ti))
session.delete(ti)
session.commit()
continue
if not task:
continue
ti.task = task
# picklin'
dag = dagbag.dags[ti.dag_id]
pickle_id = None
if self.do_pickle and self.executor.__class__ not in (
executors.LocalExecutor,
executors.SequentialExecutor):
self.logger.info("Pickling DAG {}".format(dag))
pickle_id = dag.pickle(session).id
if dag.dag_id in dag_blacklist:
continue
if dag.concurrency_reached:
dag_blacklist.add(dag.dag_id)
continue
if ti.are_dependencies_met():
executor.queue_task_instance(ti, pickle_id=pickle_id)
open_slots -= 1
else:
session.delete(ti)
session.commit()
continue
ti.task = task
session.commit()
def _split(self, items, size):
"""
This function splits a list of items into chunks of int size.
_split([1,2,3,4,5,6], 3) becomes [[1,2,3],[4,5,6]]
"""
size = max(1, size)
return [items[i:i + size] for i in range(0, len(items), size)]
def _do_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and schedules and processes them
"""
for dag in dags:
self.logger.debug("Scheduling {}".format(dag.dag_id))
dag = dagbag.get_dag(dag.dag_id)
if not dag:
continue
try:
self.schedule_dag(dag)
self.process_dag(dag, tis_out)
self.manage_slas(dag)
except Exception as e:
self.logger.exception(e)
def _execute(self):
TI = models.TaskInstance
pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
self.logger.info("Starting the scheduler")
dagbag = models.DagBag(self.subdir, sync_to_db=True)
executor = self.executor = dagbag.executor
executor.start()
self.runs = 0
while not self.num_runs or self.num_runs > self.runs:
try:
loop_start_dttm = datetime.now()
try:
self.prioritize_queued(executor=executor, dagbag=dagbag)
except Exception as e:
self.logger.exception(e)
self.runs += 1
try:
if self.runs % self.refresh_dags_every == 0:
dagbag = models.DagBag(self.subdir, sync_to_db=True)
else:
dagbag.collect_dags(only_if_updated=True)
except Exception as e:
self.logger.error("Failed at reloading the dagbag. {}".format(e))
Stats.incr('dag_refresh_error', 1, 1)
sleep(5)
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values() if dag.dag_id in self.dag_ids]
else:
dags = [
dag for dag in dagbag.dags.values()
if not dag.parent_dag]
paused_dag_ids = dagbag.paused_dags()
dags = [x for x in dags if x.dag_id not in paused_dag_ids]
# dags = filter(lambda x: x.dag_id not in paused_dag_ids, dags)
self.logger.debug("Total Cores: {} Max Threads: {} DAGs:{}".
format(multiprocessing.cpu_count(),
self.max_threads,
len(dags)))
dags = self._split(dags, math.ceil(len(dags) / self.max_threads))
tis_q = multiprocessing.Queue()
jobs = [multiprocessing.Process(target=self._do_dags,
args=(dagbag, dags[i], tis_q))
for i in range(len(dags))]
self.logger.info("Starting {} scheduler jobs".format(len(jobs)))
for j in jobs:
j.start()
while any(j.is_alive() for j in jobs):
while not tis_q.empty():
ti_key, pickle_id = tis_q.get()
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
self.executor.queue_task_instance(ti, pickle_id=pickle_id)
for j in jobs:
j.join()
self.logger.info("Done queuing tasks, calling the executor's "
"heartbeat")
duration_sec = (datetime.now() - loop_start_dttm).total_seconds()
self.logger.info("Loop took: {} seconds".format(duration_sec))
Stats.timing("scheduler_loop", duration_sec * 1000)
try:
self.import_errors(dagbag)
except Exception as e:
self.logger.exception(e)
try:
dagbag.kill_zombies()
except Exception as e:
self.logger.exception(e)
try:
# We really just want the scheduler to never ever stop.
executor.heartbeat()
self.heartbeat()
except Exception as e:
self.logger.exception(e)
self.logger.error("Tachycardia!")
except Exception as deep_e:
self.logger.exception(deep_e)
raise
finally:
settings.Session.remove()
executor.end()
def heartbeat_callback(self):
Stats.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag, start_date=None, end_date=None, mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_dependencies=False,
ignore_first_depends_on_past=False,
pool=None,
*args, **kwargs):
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_dependencies = ignore_dependencies
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.pool = pool
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
executor_fails = Counter()
# Build a list of all instances to run
tasks_to_run = {}
failed = set()
succeeded = set()
started = set()
skipped = set()
not_ready = set()
deadlocked = set()
for task in self.dag.tasks:
if (not self.include_adhoc) and task.adhoc:
continue
start_date = start_date or task.start_date
end_date = end_date or task.end_date or datetime.now()
for dttm in self.dag.date_range(start_date, end_date=end_date):
ti = models.TaskInstance(task, dttm)
tasks_to_run[ti.key] = ti
session.merge(ti)
session.commit()
# Triggering what is ready to get triggered
while tasks_to_run and not deadlocked:
not_ready.clear()
for key, ti in list(tasks_to_run.items()):
ti.refresh_from_db(session=session, lock_for_update=True)
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if key not in started:
if ti.state == State.SUCCESS:
succeeded.add(key)
tasks_to_run.pop(key)
session.commit()
continue
elif ti.state == State.SKIPPED:
skipped.add(key)
tasks_to_run.pop(key)
session.commit()
continue
# Is the task runnable? -- then run it
if ti.is_queueable(
include_queued=True,
ignore_depends_on_past=ignore_depends_on_past,
flag_upstream_failed=True):
self.logger.debug('Sending {} to executor'.format(ti))
if ti.state == State.NONE:
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_dependencies=self.ignore_dependencies,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool)
started.add(key)
# Mark the task as not ready to run
elif ti.state in (State.NONE, State.UPSTREAM_FAILED):
not_ready.add(key)
session.commit()
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run, then the backfill is deadlocked
if not_ready and not_ready == set(tasks_to_run):
deadlocked.update(tasks_to_run.values())
tasks_to_run.clear()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
if key not in tasks_to_run:
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
# executor reports failure
if state == State.FAILED:
# task reports running
if ti.state == State.RUNNING:
msg = (
'Executor reports that task instance {} failed '
'although the task says it is running.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.error("Skipping {} ".format(key))
skipped.add(key)
tasks_to_run.pop(key)
# anything else is a failure
else:
self.logger.error("Task instance {} failed".format(key))
failed.add(key)
tasks_to_run.pop(key)
# executor reports success
elif state == State.SUCCESS:
# task reports success
if ti.state == State.SUCCESS:
self.logger.info(
'Task instance {} succeeded'.format(key))
succeeded.add(key)
tasks_to_run.pop(key)
# task reports failure
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(key))
failed.add(key)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.info("Task instance {} skipped".format(key))
skipped.add(key)
tasks_to_run.pop(key)
# this probably won't ever be triggered
elif ti in not_ready:
self.logger.info(
"{} wasn't expected to run, but it did".format(ti))
# executor reports success but task does not - this is weird
elif ti.state not in (
State.SCHEDULED,
State.QUEUED,
State.UP_FOR_RETRY):
self.logger.error(
"The airflow run command failed "
"at reporting an error. This should not occur "
"in normal circumstances. Task state is '{}',"
"reported state is '{}'. TI is {}"
"".format(ti.state, state, ti))
# if the executor fails 3 or more times, stop trying to
# run the task
executor_fails[key] += 1
if executor_fails[key] >= 3:
msg = (
'The airflow run command failed to report an '
'error for task {} three or more times. The '
'task is being marked as failed. This is very '
'unusual and probably means that an error is '
'taking place before the task even '
'starts.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
msg = ' | '.join([
"[backfill progress]",
"waiting: {0}",
"succeeded: {1}",
"kicked_off: {2}",
"failed: {3}",
"skipped: {4}",
"deadlocked: {5}"
]).format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(skipped),
len(deadlocked))
self.logger.info(msg)
executor.end()
session.close()
err = ''
if failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(failed))
if deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met() != t.are_dependencies_met(
ignore_depends_on_past=True)
for t in deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks were unable to run:\n{}\n'.format(deadlocked)
if err:
raise AirflowException(err)
self.logger.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_dependencies=False,
ignore_depends_on_past=False,
force=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_dependencies = ignore_dependencies
self.ignore_depends_on_past = ignore_depends_on_past
self.force = force
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
command = self.task_instance.command(
raw=True,
ignore_dependencies=self.ignore_dependencies,
ignore_depends_on_past=self.ignore_depends_on_past,
force=self.force,
pickle_id=self.pickle_id,
mark_success=self.mark_success,
job_id=self.id,
pool=self.pool,
)
self.process = subprocess.Popen(['bash', '-c', command])
return_code = None
while return_code is None:
self.heartbeat()
return_code = self.process.poll()
def on_kill(self):
self.process.terminate()
"""
def heartbeat_callback(self):
if datetime.now() - self.start_date < timedelta(seconds=300):
return
# Suicide pill
TI = models.TaskInstance
ti = self.task_instance
session = settings.Session()
state = session.query(TI.state).filter(
TI.dag_id==ti.dag_id, TI.task_id==ti.task_id,
TI.execution_date==ti.execution_date).scalar()
session.commit()
session.close()
if state != State.RUNNING:
logging.warning(
"State of this instance has been externally set to "
"{self.task_instance.state}. "
"Taking the poison pill. So long.".format(**locals()))
self.process.terminate()
"""
|
|
import os
import datetime
import logging
import json
import base64
import zlib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse # silence pyflakes
import jwt
import requests
from flask_oauthlib.client import OAuth, OAuthException
from .models import get_permission, get_user, create_or_get_user, save_user
def readfile_or_default(filename, default):
try:
return open(filename).read().strip()
except IOError:
return default
try:
credentials = ''.join(os.environ.get('OS_CONDUCTOR_SECRETS_%d' % i)
for i in range(4)).encode('ascii')
credentials = base64.decodebytes(credentials)
credentials = zlib.decompress(credentials).decode('ascii')
credentials = json.loads(credentials)
except Exception:
credentials = {}
PUBLIC_KEY = credentials.get('public.pem', '''-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzSrV/SxRNKufc6f0GQIu
YMASgBCOiJW5fvCnGtVMIrWvBQoCFAp9QwRHrbQrQJiPg6YqqnTvGhWssL5LMMvR
8jXXOpFUKzYaSgYaQt1LNMCwtqMB0FGSDjBrbmEmnDSo6g0Naxhi+SJX3BMcce1W
TgKRybv3N3F+gJ9d8wPkyx9xhd3H4200lHk4T5XK5+LyAPSnP7FNUYTdJRRxKFWg
ZFuII+Ex6mtUKU9LZsg9xeAC6033dmSYe5yWfdrFehmQvPBUVH4HLtL1fXTNyXuz
ZwtO1v61Qc1u/j7gMsrHXW+4csjS3lDwiiPIg6q1hTA7QJdB1M+rja2MG+owL0U9
owIDAQAB
-----END PUBLIC KEY-----''')
PRIVATE_KEY = credentials.get('private.pem', '''-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAzSrV/SxRNKufc6f0GQIuYMASgBCOiJW5fvCnGtVMIrWvBQoC
FAp9QwRHrbQrQJiPg6YqqnTvGhWssL5LMMvR8jXXOpFUKzYaSgYaQt1LNMCwtqMB
0FGSDjBrbmEmnDSo6g0Naxhi+SJX3BMcce1WTgKRybv3N3F+gJ9d8wPkyx9xhd3H
4200lHk4T5XK5+LyAPSnP7FNUYTdJRRxKFWgZFuII+Ex6mtUKU9LZsg9xeAC6033
dmSYe5yWfdrFehmQvPBUVH4HLtL1fXTNyXuzZwtO1v61Qc1u/j7gMsrHXW+4csjS
3lDwiiPIg6q1hTA7QJdB1M+rja2MG+owL0U9owIDAQABAoIBAHgA7ytniZQSMnDW
szsRgIkMr4WCqawQT3CFWGikjCTdOiLraK3KONxDG53pfUcKNR9eySPsw5HxTZIP
rDE9dm6CuYJDUQT5X0Ue7qtffsa7UmFxVPVBUPnFroDgiFHjp01HFysmF3X7dYJ/
Fys4FDwK2rUxoXcnhkO7c5taErAPhpmv+QncVBkouQ3bB78av6cHdQfo+7PcvYRP
x6iDPAjMpz1wF1Fkd9mSHadjuqlC3FubbwEK5nTuSl4nPULK7KaCv9NjxyzTUi23
DWk9QCv+peIK/1h75cbB9eVvZayHlFlVNtD7Mrx5rediWABSqvNLRv/aZ0/o5+FM
1cxiYPECgYEA9AEr60CPlW9vBOacCImnWHWEH/UEwi4aNTBxpZEWRuN0HnmB+4Rt
1b+7LoX6olVBN1y8YIwzkDOCVblFaT+THBNiE7ABwB87c0jYd2ULQszqrebjXPoz
8q7MqghD+4iDfvP2QmivpadfeGGzYFI49b7W5c/Iv4w0oWgutib+hDsCgYEA10Dk
hMwg61q6YVAeTIqnV7zujfzTIif9AkePAfNLolLdn0Bx5LS6oPxeRUxyy4mImwrf
p6yZGOX/7ocy7rQ3X/F6fuxwuGa74PNZPwlLuD7UUPr//OPuQihoDKvL+52XWA5U
Q09sXK+KlvuH4DJ5UsHC9kgATyuGNUOeXYBHHbkCgYEA78Zq8x2ZOz6quQUolZc3
dEzezkyHJY4KQPRe6VUesAB5riy3F4M2L5LejMQp2/WtRYsCrll3nh+P109dryRD
GpbNjQ0rWzEVyZ7u4LzRiQ43GzbFfCt+et9czUWcEIRAu7Ne7jlTSZSk03Ymv+Ns
h8jGAkTiP6C2Y1oudN7ywtsCgYBAWIa3Z+oDUQjcJD4adWxW3wSU71oSINASSV/n
nloiuRDFFVe2nYwYqbhokNTUIVXzuwlmr0LI3aBnJoVENB1FkgMjQ/ziMtvBAB3S
qS24cxe26YFykJRdtIR+HTEKE271hLsNsAVdo6ATSDey/oOkCIYGZzmocQNaks8Z
dkpMCQKBgQCfZ75r1l/Hzphb78Ygf9tOz1YUFqw/xY9jfufW4C/5SgV2q2t/AZok
LixyPP8SzJcH20iKdc9kS7weiQA0ldT2SYv6VT7IqgQ3i/qYdOmaggjBGaIuIB/B
QZOJBnaSMVJFf/ZO1/1ilGVGfZZ3TMOA1TJlcTZisk56tRTbkivL9Q==
-----END RSA PRIVATE KEY-----''')
GOOGLE_KEY = credentials.get('google.key', 'google consumer key')
GOOGLE_SECRET = credentials.get('google.secret.key',
'google consumer secret')
LIBJS = readfile_or_default(os.path.join(os.path.dirname(__file__),
'lib',
'lib.js'),
'alert("error");')
oauth = OAuth()
def _google_remote_app():
if 'google' not in oauth.remote_apps:
oauth.remote_app(
'google',
base_url='https://www.googleapis.com/oauth2/v1/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={
'scope': 'email profile',
},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
consumer_key=GOOGLE_KEY,
consumer_secret=GOOGLE_SECRET)
return oauth.google
def _get_user_profile(access_token):
if access_token is None:
return None
headers = {'Authorization': 'OAuth {}'.format(access_token)}
response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo',
headers=headers)
if response.status_code == 401:
return None
return response.json()
def authenticate(token, next, callback_url):
"""Check if user is authenticated
"""
if token is not None:
try:
token = jwt.decode(token, PRIVATE_KEY)
except jwt.InvalidTokenError:
token = None
if token is not None:
userid = token['userid']
user = get_user(userid)
if user is not None:
ret = {
'authenticated': True,
'profile': user
}
return ret
# Otherwise - not authenticated
provider = 'google'
state = {
'next': next,
'provider': provider,
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=10),
'nbf': datetime.datetime.utcnow()
}
state = jwt.encode(state, PRIVATE_KEY)
google_login_url = _google_remote_app() \
.authorize(callback=callback_url, state=state).headers['Location']
ret = {
'authenticated': False,
'providers': {
'google': {
'url': google_login_url
}
}
}
return ret
def _update_next_url(next_url, client_token):
if client_token is None:
return next_url
url_parts = list(urlparse.urlparse(next_url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update({'jwt': client_token})
url_parts[4] = urlparse.urlencode(query)
next_url = urlparse.urlunparse(url_parts)
return next_url
def _get_token_from_profile(provider, profile):
if profile is None:
return None
provider_id = profile['id']
name = profile['name']
email = profile['email']
avatar_url = profile['picture']
userid = '%s:%s' % (provider, provider_id)
user = create_or_get_user(userid, name, email, avatar_url)
token = {
'userid': user['idhash'],
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
client_token = jwt.encode(token, PRIVATE_KEY)
return client_token
def oauth_callback(state, callback_url,
set_session=lambda k, v: None):
"""Callback from google
"""
try:
app = _google_remote_app()
set_session('%s_oauthredir' % app.name, callback_url)
resp = app.authorized_response()
except OAuthException as e:
resp = e
if isinstance(resp, OAuthException):
logging.error("OAuthException: %r", resp.data, exc_info=resp)
resp = None
try:
state = jwt.decode(state, PRIVATE_KEY)
except jwt.InvalidTokenError:
state = {}
next_url = '/'
provider = state.get('provider')
next_url = state.get('next', next_url)
if resp is not None and provider is not None:
access_token = resp.get('access_token')
profile = _get_user_profile(access_token)
client_token = _get_token_from_profile(provider, profile)
# Add client token to redirect url
next_url = _update_next_url(next_url, client_token)
return next_url
def update(token, username):
"""Update a user
"""
err = None
if token is not None:
try:
token = jwt.decode(token, PRIVATE_KEY)
except jwt.InvalidTokenError:
token = None
err = 'Not authenticated'
else:
err = 'No token'
if token is not None:
userid = token['userid']
user = get_user(userid)
if user is not None:
dirty = False
if username is not None:
if user.get('username') is None:
user['username'] = username
dirty = True
else:
err = 'Cannot modify username, already set'
if dirty:
save_user(user)
else:
err = 'Unknown User'
ret = {'success': err is None}
if err is not None:
ret['error'] = err
return ret
def authorize(token, service):
"""Return user authorization for a service
"""
if token is not None and service is not None:
try:
token = jwt.decode(token, PRIVATE_KEY)
except jwt.InvalidTokenError:
token = None
if token is not None:
userid = token['userid']
service_permissions = get_permission('*', service)
user_permissions = get_permission(userid, service)
permissions = {}
if service_permissions is not None:
permissions.update(service_permissions)
if user_permissions is not None:
permissions.update(user_permissions)
ret = {
'userid': userid,
'permissions': permissions,
'service': service
}
token = jwt.encode(ret, PRIVATE_KEY, algorithm='RS256')\
.decode('ascii')
ret['token'] = token
return ret
ret = {
'permissions': {}
}
return ret
def public_key():
return PUBLIC_KEY
def lib():
return LIBJS
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mongoengine, inspect
from flask import abort, current_app
from mongoengine.base.fields import BaseField
from mongoengine.queryset import (MultipleObjectsReturned,
DoesNotExist, QuerySet)
from mongoengine.base import ValidationError
from pymongo import uri_parser
from .sessions import *
from .pagination import *
from .metadata import *
from .json import override_json_encoder
from .wtf import WtfBaseField
from .connection import *
import flask_mongoengine
def redirect_connection_calls(cls):
"""
Redirect mongonengine.connection
calls via flask_mongoengine.connection
"""
# Proxy all 'mongoengine.connection'
# specific attr via 'flask_mongoengine'
connection_methods = {
'get_db' : get_db,
'DEFAULT_CONNECTION_NAME' : DEFAULT_CONNECTION_NAME,
'get_connection' : get_connection
}
cls_module = inspect.getmodule(cls)
if cls_module != mongoengine.connection:
for attr in inspect.getmembers(cls_module):
n = attr[0]
if connection_methods.get(n, None):
setattr(cls_module, n, connection_methods.get(n, None))
def _patch_base_field(obj, name):
"""
If the object submitted has a class whose base class is
mongoengine.base.fields.BaseField, then monkey patch to
replace it with flask_mongoengine.wtf.WtfBaseField.
@note: WtfBaseField is an instance of BaseField - but
gives us the flexibility to extend field parameters
and settings required of WTForm via model form generator.
@see: flask_mongoengine.wtf.base.WtfBaseField.
@see: model_form in flask_mongoengine.wtf.orm
@param obj: The object whose footprint to locate the class.
@param name: Name of the class to locate.
"""
# locate class
cls = getattr(obj, name)
if not inspect.isclass(cls):
return
# fetch class base classes
cls_bases = list(cls.__bases__)
# replace BaseField with WtfBaseField
for index, base in enumerate(cls_bases):
if base == BaseField:
cls_bases[index] = WtfBaseField
cls.__bases__ = tuple(cls_bases)
break
# re-assign class back to
# object footprint
delattr(obj, name)
setattr(obj, name, cls)
redirect_connection_calls(cls)
def _include_mongoengine(obj):
for module in mongoengine, mongoengine.fields:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
# patch BaseField if available
_patch_base_field(obj, key)
def current_mongoengine_instance():
"""
Obtain instance of MongoEngine in the
current working app instance.
"""
me = current_app.extensions.get('mongoengine', None)
if current_app and me:
instance_dict = me.items()\
if (sys.version_info >= (3, 0)) else me.iteritems()
for k, v in instance_dict:
if isinstance(k, MongoEngine):
return k
return None
class MongoEngine(object):
def __init__(self, app=None, config=None):
_include_mongoengine(self)
self.Document = Document
self.DynamicDocument = DynamicDocument
if app is not None:
self.init_app(app, config)
def init_app(self, app, config=None):
app.extensions = getattr(app, 'extensions', {})
# Make documents JSON serializable
override_json_encoder(app)
if not 'mongoengine' in app.extensions:
app.extensions['mongoengine'] = {}
if self in app.extensions['mongoengine']:
# Raise an exception if extension already initialized as
# potentially new configuration would not be loaded.
raise Exception('Extension already initialized')
if not config:
# If not passed a config then we
# read the connection settings from
# the app config.
config = app.config
# Obtain db connection
connection = create_connection(config)
# Store objects in application instance
# so that multiple apps do not end up
# accessing the same objects.
s = {'app': app, 'conn': connection}
app.extensions['mongoengine'][self] = s
def disconnect(self):
conn_settings = fetch_connection_settings(current_app.config)
if isinstance(conn_settings, list):
for setting in conn_settings:
alias = setting.get('alias', DEFAULT_CONNECTION_NAME)
disconnect(alias, setting.get('preserve_temp_db', False))
else:
alias = conn_settings.get('alias', DEFAULT_CONNECTION_NAME)
disconnect(alias, conn_settings.get('preserve_temp_db', False))
return True
@property
def connection(self):
return current_app.extensions['mongoengine'][self]['conn']
class BaseQuerySet(QuerySet):
"""
A base queryset with handy extras
"""
def get_or_404(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except (MultipleObjectsReturned, DoesNotExist, ValidationError):
abort(404)
def first_or_404(self):
obj = self.first()
if obj is None:
abort(404)
return obj
def paginate(self, page, per_page, error_out=True):
return Pagination(self, page, per_page)
def paginate_field(self, field_name, doc_id, page, per_page,
total=None):
item = self.get(id=doc_id)
count = getattr(item, field_name + "_count", '')
total = total or count or len(getattr(item, field_name))
return ListFieldPagination(self, doc_id, field_name, page, per_page,
total=total)
class Document(mongoengine.Document):
"""Abstract document with extra helpers in the queryset class"""
meta = {'abstract': True,
'queryset_class': BaseQuerySet}
def paginate_field(self, field_name, page, per_page, total=None):
count = getattr(self, field_name + "_count", '')
total = total or count or len(getattr(self, field_name))
return ListFieldPagination(self.__class__.objects, self.pk, field_name,
page, per_page, total=total)
class DynamicDocument(mongoengine.DynamicDocument):
"""Abstract Dynamic document with extra helpers in the queryset class"""
meta = {'abstract': True,
'queryset_class': BaseQuerySet}
|
|
import io
import os
import base64
import urllib
import aiohttp
import asyncio
import hashlib
import tarfile
import json
import datetime as dt
import ssl
from aiohttp import websocket
from .channel import Channel
from .utils import identical
from .multiplexed import MultiplexedResult
from .jsonstream import JsonStreamResult
class Docker:
def __init__(self,
url=os.environ.get('DOCKER_HOST', "/run/docker.sock"),
connector=None,
session=None,
ssl_context=None):
self.url = url
self.events = DockerEvents(self)
self.containers = DockerContainers(self)
if connector is None:
if url.startswith('http://'):
connector = aiohttp.TCPConnector()
elif url.startswith('https://'):
connector = aiohttp.TCPConnector(ssl_context=ssl_context)
elif url.startswith('unix://'):
connector = aiohttp.connector.UnixConnector(url[8:])
self.url = "http://docker" #aiohttp treats this as a proxy
elif url.startswith('/'):
connector = aiohttp.connector.UnixConnector(url)
self.url = "http://docker" #aiohttp treats this as a proxy
else:
connector = aiohttp.connector.UnixConnector(url)
self.connector = connector
if session is None:
session = aiohttp.ClientSession(connector=self.connector)
self.session = session
@asyncio.coroutine
def pull(self, image, stream=False):
response = yield from self._query(
"images/create", "POST",
params={"fromImage": image},
headers={"content-type": "application/json",},
)
json_stream = self._json_stream_result(response)
if stream:
return json_stream
data = []
i = yield from json_stream.__aiter__()
while True:
try:
line = yield from i.__anext__()
except StopAsyncIteration:
break
else:
data.append(line)
return data
def _endpoint(self, path):
return "/".join([self.url, path])
@asyncio.coroutine
def _query(self, path, method='GET', params=None, timeout=None,
data=None, headers=None, **kwargs):
url = self._endpoint(path)
future = asyncio.ensure_future(self.session.request(
method, url,
params=params, headers=headers, data=data, **kwargs))
if timeout:
response = yield from asyncio.wait_for(future, timeout)
else:
response = yield from future
if (response.status // 100) in [4, 5]:
what = yield from response.read()
response.close()
raise ValueError("Got a failure from the server: '%s'" % (
what.decode('utf-8').strip()
))
return response
@asyncio.coroutine
def _result(self, response, response_type=None):
if not response_type:
ct = response.headers.get("Content-Type", "")
if 'json' in ct:
response_type = 'json'
elif 'x-tar' in ct:
response_type = 'tar'
elif 'text/plain' in ct:
response_type = 'text'
else:
raise TypeError("Unrecognized response type: {}".format(ct))
if 'tar' == response_type:
what = yield from response.read()
yield from response.release()
return tarfile.open(mode='r', fileobj=io.BytesIO(what))
if 'json' == response_type:
data = yield from response.json(encoding='utf-8')
elif 'text' == response_type:
data = yield from response.text(encoding='utf-8')
else:
data = yield from response.read()
yield from response.release()
return data
def _json_stream_result(self, response, transform=None):
return JsonStreamResult(response, transform)
def _multiplexed_result(self, response):
return MultiplexedResult(response)
@asyncio.coroutine
def _websocket(self, url, **params):
if not params:
params = {
'stdout': 1,
'stderr': 1,
'stream': 1
}
url = self._endpoint(url) + "?" + urllib.parse.urlencode(params)
ws = yield from aiohttp.ws_connect(url, connector=self.connector)
return ws
@asyncio.coroutine
def _query_json(self, *args, **kwargs):
response = yield from self._query(*args, **kwargs)
data = yield from self._result(response, 'json')
return data
class DockerContainers(object):
def __init__(self, docker):
self.docker = docker
@asyncio.coroutine
def list(self, **kwargs):
data = yield from self.docker._query_json(
"containers/json",
method='GET',
params=kwargs
)
return [DockerContainer(self.docker, **x) for x in data]
@asyncio.coroutine
def create_or_replace(self, name, config):
container = None
try:
container = yield from self.get(name)
if not identical(config, container._container):
running = container._container.get(
"State", {}).get("Running", False)
if running:
yield from container.stop()
yield from container.delete()
container = None
except ValueError:
pass
if container is None:
container = yield from self.create(config, name=name)
return container
@asyncio.coroutine
def create(self, config, name=None):
url = "containers/create"
config = json.dumps(config, sort_keys=True, indent=4).encode('utf-8')
kwargs = {}
if name:
kwargs['name'] = name
data = yield from self.docker._query_json(
url,
method='POST',
headers={"content-type": "application/json",},
data=config,
params=kwargs
)
return DockerContainer(self.docker, id=data['Id'])
@asyncio.coroutine
def get(self, container, **kwargs):
data = yield from self.docker._query_json(
"containers/{}/json".format(container),
method='GET',
params=kwargs
)
return DockerContainer(self.docker, **data)
def container(self, container_id, **kwargs):
data = {
'id': container_id
}
data.update(kwargs)
return DockerContainer(self.docker, **data)
class DockerContainer:
def __init__(self, docker, **kwargs):
self.docker = docker
self._container = kwargs
self._id = self._container.get("id", self._container.get(
"ID", self._container.get("Id")))
self.logs = DockerLog(docker, self)
@asyncio.coroutine
def log(self, stdout=False, stderr=False, follow=False, **kwargs):
if stdout is False and stderr is False:
raise TypeError("Need one of stdout or stderr")
params = {
"stdout": stdout,
"stderr": stderr,
"follow": follow,
}
params.update(kwargs)
response = yield from self.docker._query(
"containers/{}/logs".format(self._id),
method='GET',
params=params,
)
log_stream = self.docker._multiplexed_result(response)
if follow:
return log_stream
log_lines = []
#TODO 3.5 cleans up this syntax
i = yield from log_stream.__aiter__()
while True:
try:
line = yield from i.__anext__()
except StopAsyncIteration:
break
else:
log_lines.append(line.decode('utf-8'))
return ''.join(log_lines)
@asyncio.coroutine
def copy(self, resource, **kwargs):
#TODO this is deprecated, use get_archive instead
request = json.dumps({
"Resource": resource,
}, sort_keys=True, indent=4).encode('utf-8')
data = yield from self.docker._query(
"containers/{}/copy".format(self._id),
method='POST',
data=request,
headers={"content-type": "application/json",},
params=kwargs
)
return data
@asyncio.coroutine
def put_archive(self, path, data):
response = yield from self.docker._query(
"containers/{}/archive".format(self._id),
method='PUT',
data=data,
headers={"content-type": "application/json",},
params={'path': path}
)
data = yield from self.docker._result(response)
return data
@asyncio.coroutine
def show(self, **kwargs):
data = yield from self.docker._query_json(
"containers/{}/json".format(self._id),
method='GET',
params=kwargs
)
self._container = data
return data
@asyncio.coroutine
def stop(self, **kwargs):
response = yield from self.docker._query(
"containers/{}/stop".format(self._id),
method='POST',
params=kwargs
)
yield from response.release()
return
@asyncio.coroutine
def start(self, _config=None, **config):
config = _config or config
config = json.dumps(config, sort_keys=True, indent=4).encode('utf-8')
response = yield from self.docker._query(
"containers/{}/start".format(self._id),
method='POST',
headers={"content-type": "application/json",},
data=config
)
yield from response.release()
return
@asyncio.coroutine
def kill(self, **kwargs):
data = yield from self.docker._query_json(
"containers/{}/kill".format(self._id),
method='POST',
params=kwargs
)
return data
@asyncio.coroutine
def wait(self, timeout=None, **kwargs):
data = yield from self.docker._query_json(
"containers/{}/wait".format(self._id),
method='POST',
params=kwargs,
timeout=timeout,
)
return data
@asyncio.coroutine
def delete(self, **kwargs):
response = yield from self.docker._query(
"containers/{}".format(self._id),
method='DELETE',
params=kwargs
)
yield from response.release()
return
@asyncio.coroutine
def websocket(self, **params):
url = "containers/{}/attach/ws".format(self._id)
ws = yield from self.docker._websocket(url, **params)
return ws
@asyncio.coroutine
def port(self, private_port):
if 'NetworkSettings' not in self._container:
yield from self.show()
private_port = str(private_port)
h_ports = None
# Port settings is None when the container is running with
# network_mode=host.
port_settings = self._container.get('NetworkSettings', {}).get('Ports')
if port_settings is None:
return None
if '/' in private_port:
return port_settings.get(private_port)
h_ports = port_settings.get(private_port + '/tcp')
if h_ports is None:
h_ports = port_settings.get(private_port + '/udp')
return h_ports
def __getitem__(self, key):
return self._container[key]
def __hasitem__(self, key):
return key in self._container
class DockerEvents:
def __init__(self, docker):
self.running = False
self.docker = docker
self.channel = Channel()
def listen(self):
return self.channel.listen()
def saferun(self):
if self.running:
return
self.running = True
asyncio.async(self.run())
@asyncio.coroutine
def query(self, **params):
response = yield from self.docker._query(
"events",
method="GET",
params=params,
)
json_stream = self.docker._json_stream_result(response, self._transform_event)
return json_stream
def _transform_event(self, data):
if 'time' in data:
data['time'] = dt.datetime.fromtimestamp(data['time'])
return data
@asyncio.coroutine
def run(self):
self.running = True
containers = self.docker.containers
json_stream = yield from self.query()
i = yield from json_stream.__aiter__()
while True:
try:
data = yield from i.__anext__()
except StopAsyncIteration:
break
else:
if 'id' in data and data['status'] in [
"start", "create",
]:
data['container'] = yield from containers.get(data['id'])
asyncio.async(self.channel.put(data))
self.running = False
class DockerLog:
def __init__(self, docker, container):
self.docker = docker
self.channel = Channel()
self.container = container
self.running = False
def listen(self):
return self.channel.listen()
def saferun(self):
if self.running:
return
self.running = True
asyncio.async(self.run())
@asyncio.coroutine
def run(self):
self.running = True
containers = self.docker.containers
response = yield from self.docker._query(
'containers/{id}/logs'.format(id=self.container._id),
params=dict(
follow=True,
stdout=True,
stderr=True,
)
)
for msg in response:
msg = yield from msg
asyncio.async(self.channel.put(msg))
yield from response.release()
self.running = False
|
|
# AUV resizable thread pool - forked from the eventlet thread pool,
# but with modifications that allow it to dynamically grow as more
# threads are added. For now, it never shrinks - that shouldn't be a
# large issue, as the threads are doing legitimate waits instead of busy
# waits.
# Copyright (c) 2007-2009, Linden Research, Inc.
# Copyright (c) 2007, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import imp
import os
import sys
import traceback
import eventlet
from eventlet import event, greenio, greenthread, patcher, timeout
from eventlet.support import six
__all__ = ['execute', 'Proxy', 'killall', 'set_num_threads']
EXC_CLASSES = (Exception, timeout.Timeout)
SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit)
QUIET = True
socket = patcher.original('socket')
threading = patcher.original('threading')
if six.PY2:
Queue_module = patcher.original('Queue')
if six.PY3:
Queue_module = patcher.original('queue')
Empty = Queue_module.Empty
Queue = Queue_module.Queue
_bytetosend = b' '
_coro = None
_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 1000))
_reqq = _rspq = None
_rsock = _wsock = None
_setup_already = False
_threads = []
def tpool_trampoline():
global _rspq
while True:
try:
_c = _rsock.recv(1)
assert _c
# FIXME: this is probably redundant since using sockets instead of pipe now
except ValueError:
break # will be raised when pipe is closed
while not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
e = rv = None
except Empty:
pass
count_lock = threading.Lock()
free_threads = _nthreads
def tworker():
global _rspq, free_threads
while True:
try:
msg = _reqq.get()
except AttributeError:
return # can't get anything off of a dud queue
if msg is None:
return
with count_lock:
free_threads = free_threads - 1
(e, meth, args, kwargs) = msg
rv = None
try:
rv = meth(*args, **kwargs)
except SYS_EXCS:
raise
except EXC_CLASSES:
rv = sys.exc_info()
# test_leakage_from_tracebacks verifies that the use of
# exc_info does not lead to memory leaks
_rspq.put((e, rv))
msg = meth = args = kwargs = e = rv = None
_wsock.sendall(_bytetosend)
with count_lock:
free_threads = free_threads + 1
def execute(meth, *args, **kwargs):
"""
Execute *meth* in a Python thread, blocking the current coroutine/
greenthread until the method completes.
The primary use case for this is to wrap an object or module that is not
amenable to monkeypatching or any of the other tricks that Eventlet uses
to achieve cooperative yielding. With tpool, you can force such objects to
cooperate with green threads by sticking them in native threads, at the cost
of some overhead.
"""
global _nthreads, free_threads
setup()
# if already in tpool, don't recurse into the tpool
# also, call functions directly if we're inside an import lock, because
# if meth does any importing (sadly common), it will hang
my_thread = threading.currentThread()
if my_thread in _threads or imp.lock_held() or _nthreads == 0:
return meth(*args, **kwargs)
e = event.Event()
_reqq.put((e, meth, args, kwargs))
if free_threads == 0 and kwargs.get('spawn', False):
t = threading.Thread(target=tworker,
name="tpool_thread_%s" % _nthreads)
t.daemon = True
_nthreads = _nthreads + 1
with count_lock:
free_threads = free_threads + 1
t.start()
_threads.append(t)
rv = e.wait()
if isinstance(rv, tuple) \
and len(rv) == 3 \
and isinstance(rv[1], EXC_CLASSES):
(c, e, tb) = rv
if not QUIET:
traceback.print_exception(c, e, tb)
traceback.print_stack()
six.reraise(c, e, tb)
return rv
def proxy_call(autowrap, f, *args, **kwargs):
"""
Call a function *f* and returns the value. If the type of the return value
is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
object before return.
Normally *f* will be called in the threadpool with :func:`execute`; if the
keyword argument "nonblocking" is set to ``True``, it will simply be
executed directly. This is useful if you have an object which has methods
that don't need to be called in a separate thread, but which return objects
that should be Proxy wrapped.
"""
if kwargs.pop('nonblocking', False):
rv = f(*args, **kwargs)
else:
rv = execute(f, *args, **kwargs)
if isinstance(rv, autowrap):
return Proxy(rv, autowrap)
else:
return rv
class Proxy(object):
"""
a simple proxy-wrapper of any object that comes with a
methods-only interface, in order to forward every method
invocation onto a thread in the native-thread pool. A key
restriction is that the object's methods should not switch
greenlets or use Eventlet primitives, since they are in a
different thread from the main hub, and therefore might behave
unexpectedly. This is for running native-threaded code
only.
It's common to want to have some of the attributes or return
values also wrapped in Proxy objects (for example, database
connection objects produce cursor objects which also should be
wrapped in Proxy objects to remain nonblocking). *autowrap*, if
supplied, is a collection of types; if an attribute or return
value matches one of those types (via isinstance), it will be
wrapped in a Proxy. *autowrap_names* is a collection
of strings, which represent the names of attributes that should be
wrapped in Proxy objects when accessed.
"""
def __init__(self, obj, autowrap=(), autowrap_names=()):
self._obj = obj
self._autowrap = autowrap
self._autowrap_names = autowrap_names
def __getattr__(self, attr_name):
f = getattr(self._obj, attr_name)
if not hasattr(f, '__call__'):
if isinstance(f, self._autowrap) or attr_name in self._autowrap_names:
return Proxy(f, self._autowrap)
return f
def doit(*args, **kwargs):
result = proxy_call(self._autowrap, f, *args, **kwargs)
if attr_name in self._autowrap_names and not isinstance(result, Proxy):
return Proxy(result)
return result
return doit
# the following are a buncha methods that the python interpeter
# doesn't use getattr to retrieve and therefore have to be defined
# explicitly
def __getitem__(self, key):
return proxy_call(self._autowrap, self._obj.__getitem__, key)
def __setitem__(self, key, value):
return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
def __deepcopy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
def __copy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__copy__, memo)
def __call__(self, *a, **kw):
if '__call__' in self._autowrap_names:
return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
else:
return proxy_call(self._autowrap, self._obj, *a, **kw)
def __enter__(self):
return proxy_call(self._autowrap, self._obj.__enter__)
def __exit__(self, *exc):
return proxy_call(self._autowrap, self._obj.__exit__, *exc)
# these don't go through a proxy call, because they're likely to
# be called often, and are unlikely to be implemented on the
# wrapped object in such a way that they would block
def __eq__(self, rhs):
return self._obj == rhs
def __hash__(self):
return self._obj.__hash__()
def __repr__(self):
return self._obj.__repr__()
def __str__(self):
return self._obj.__str__()
def __len__(self):
return len(self._obj)
def __nonzero__(self):
return bool(self._obj)
# Python3
__bool__ = __nonzero__
def __iter__(self):
it = iter(self._obj)
if it == self._obj:
return self
else:
return Proxy(it)
def next(self):
return proxy_call(self._autowrap, next, self._obj)
# Python3
__next__ = next
def setup():
global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq
if _setup_already:
return
else:
_setup_already = True
assert _nthreads >= 0, "Can't specify negative number of threads"
if _nthreads == 0:
import warnings
warnings.warn("Zero threads in tpool. All tpool.execute calls will\
execute in main thread. Check the value of the environment \
variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
_reqq = Queue(maxsize=-1)
_rspq = Queue(maxsize=-1)
# connected socket pair
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(sock.getsockname())
csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
_wsock, _addr = sock.accept()
_wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
sock.close()
_rsock = greenio.GreenSocket(csock)
for i in six.moves.range(_nthreads):
t = threading.Thread(target=tworker,
name="tpool_thread_%s" % i)
t.daemon = True
t.start()
_threads.append(t)
_coro = greenthread.spawn_n(tpool_trampoline)
# This yield fixes subtle error with GreenSocket.__del__
eventlet.sleep(0)
# Avoid ResourceWarning unclosed socket on Python3.2+
@atexit.register
def killall():
global _setup_already, _rspq, _rsock, _wsock
if not _setup_already:
return
# This yield fixes freeze in some scenarios
eventlet.sleep(0)
for thr in _threads:
_reqq.put(None)
for thr in _threads:
thr.join()
del _threads[:]
# return any remaining results
while (_rspq is not None) and not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
e = rv = None
except Empty:
pass
if _coro is not None:
greenthread.kill(_coro)
if _rsock is not None:
_rsock.close()
_rsock = None
if _wsock is not None:
_wsock.close()
_wsock = None
_rspq = None
_setup_already = False
def set_num_threads(nthreads):
global _nthreads
global free_threads
free_threads = nthreads
_nthreads = nthreads
|
|
"""
ROC Analysis Widget
-------------------
"""
import operator
from functools import reduce, wraps
from collections import namedtuple, deque, OrderedDict
import numpy
import sklearn.metrics as skl_metrics
from PyQt4 import QtGui
from PyQt4.QtGui import QColor, QPen, QBrush
from PyQt4.QtCore import Qt
import pyqtgraph as pg
import Orange
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import colorpalette, colorbrewer
from Orange.widgets.io import FileFormat
from Orange.canvas import report
#: Points on a ROC curve
ROCPoints = namedtuple(
"ROCPoints",
["fpr", # (N,) array of false positive rate coordinates (ascending)
"tpr", # (N,) array of true positive rate coordinates
"thresholds" # (N,) array of thresholds (in descending order)
]
)
ROCPoints.is_valid = property(lambda self: self.fpr.size > 0)
#: ROC Curve and it's convex hull
ROCCurve = namedtuple(
"ROCCurve",
["points", # ROCPoints
"hull" # ROCPoints of the convex hull
]
)
ROCCurve.is_valid = property(lambda self: self.points.is_valid)
#: A ROC Curve averaged vertically
ROCAveragedVert = namedtuple(
"ROCAveragedVert",
["points", # ROCPoints sampled by fpr
"hull", # ROCPoints of the convex hull
"tpr_std", # array standard deviation of tpr at each fpr point
]
)
ROCAveragedVert.is_valid = property(lambda self: self.points.is_valid)
#: A ROC Curve averaged by thresholds
ROCAveragedThresh = namedtuple(
"ROCAveragedThresh",
["points", # ROCPoints sampled by threshold
"hull", # ROCPoints of the convex hull
"tpr_std", # array standard deviations of tpr at each threshold
"fpr_std" # array standard deviations of fpr at each threshold
]
)
ROCAveragedThresh.is_valid = property(lambda self: self.points.is_valid)
#: Combined data for a ROC curve of a single algorithm
ROCData = namedtuple(
"ROCData",
["merged", # ROCCurve merged over all folds
"folds", # ROCCurve list, one for each fold
"avg_vertical", # ROCAveragedVert
"avg_threshold", # ROCAveragedThresh
]
)
def ROCData_from_results(results, clf_index, target):
"""
Compute ROC Curve(s) from evaluation results.
:param Orange.evaluation.Results results:
Evaluation results.
:param int clf_index:
Learner index in the `results`.
:param int target:
Target class index (i.e. positive class).
:rval ROCData:
A instance holding the computed curves.
"""
merged = roc_curve_for_fold(results, slice(0, -1), clf_index, target)
merged_curve = ROCCurve(ROCPoints(*merged),
ROCPoints(*roc_curve_convex_hull(merged)))
folds = results.folds if results.folds is not None else [slice(0, -1)]
fold_curves = []
for fold in folds:
# TODO: Check for no FP or no TP
points = roc_curve_for_fold(results, fold, clf_index, target)
hull = roc_curve_convex_hull(points)
c = ROCCurve(ROCPoints(*points), ROCPoints(*hull))
fold_curves.append(c)
curves = [fold.points for fold in fold_curves
if fold.is_valid]
fpr, tpr, std = roc_curve_vertical_average(curves)
thresh = numpy.zeros_like(fpr) * numpy.nan
hull = roc_curve_convex_hull((fpr, tpr, thresh))
v_avg = ROCAveragedVert(
ROCPoints(fpr, tpr, thresh),
ROCPoints(*hull),
std
)
all_thresh = numpy.hstack([t for _, _, t in curves])
all_thresh = numpy.clip(all_thresh, 0.0 - 1e-10, 1.0 + 1e-10)
all_thresh = numpy.unique(all_thresh)[::-1]
thresh = all_thresh[::max(all_thresh.size // 10, 1)]
(fpr, fpr_std), (tpr, tpr_std) = \
roc_curve_threshold_average(curves, thresh)
hull = roc_curve_convex_hull((fpr, tpr, thresh))
t_avg = ROCAveragedThresh(
ROCPoints(fpr, tpr, thresh),
ROCPoints(*hull),
tpr_std,
fpr_std
)
return ROCData(merged_curve, fold_curves, v_avg, t_avg)
ROCData.from_results = staticmethod(ROCData_from_results)
#: A curve item to be displayed in a plot
PlotCurve = namedtuple(
"PlotCurve",
["curve", # ROCCurve source curve
"curve_item", # pg.PlotDataItem main curve
"hull_item" # pg.PlotDataItem curve's convex hull
]
)
def plot_curve(curve, pen=None, shadow_pen=None, symbol="+",
symbol_size=3, name=None):
"""
Construct a `PlotCurve` for the given `ROCCurve`.
:param ROCCurve curve:
Source curve.
The other parameters are passed to pg.PlotDataItem
:rtype: PlotCurve
"""
def extend_to_origin(points):
"Extend ROCPoints to include coordinate origin if not already present"
if points.tpr.size and (points.tpr[0] > 0 or points.fpr[0] > 0):
points = ROCPoints(
numpy.r_[0, points.fpr], numpy.r_[0, points.tpr],
numpy.r_[points.thresholds[0] + 1, points.thresholds]
)
return points
points = extend_to_origin(curve.points)
item = pg.PlotCurveItem(
points.fpr, points.tpr, pen=pen, shadowPen=shadow_pen,
name=name, antialias=True
)
sp = pg.ScatterPlotItem(
curve.points.fpr, curve.points.tpr, symbol=symbol,
size=symbol_size, pen=shadow_pen,
name=name
)
sp.setParentItem(item)
hull = extend_to_origin(curve.hull)
hull_item = pg.PlotDataItem(
hull.fpr, hull.tpr, pen=pen, antialias=True
)
return PlotCurve(curve, item, hull_item)
PlotCurve.from_roc_curve = staticmethod(plot_curve)
#: A curve displayed in a plot with error bars
PlotAvgCurve = namedtuple(
"PlotAvgCurve",
["curve", # ROCCurve
"curve_item", # pg.PlotDataItem
"hull_item", # pg.PlotDataItem
"confint_item", # pg.ErrorBarItem
]
)
def plot_avg_curve(curve, pen=None, shadow_pen=None, symbol="+",
symbol_size=4, name=None):
"""
Construct a `PlotAvgCurve` for the given `curve`.
:param curve: Source curve.
:type curve: ROCAveragedVert or ROCAveragedThresh
The other parameters are passed to pg.PlotDataItem
:rtype: PlotAvgCurve
"""
pc = plot_curve(curve, pen=pen, shadow_pen=shadow_pen, symbol=symbol,
symbol_size=symbol_size, name=name)
points = curve.points
if isinstance(curve, ROCAveragedVert):
tpr_std = curve.tpr_std
error_item = pg.ErrorBarItem(
x=points.fpr[1:-1], y=points.tpr[1:-1],
height=2 * tpr_std[1:-1],
pen=pen, beam=0.025,
antialias=True,
)
elif isinstance(curve, ROCAveragedThresh):
tpr_std, fpr_std = curve.tpr_std, curve.fpr_std
error_item = pg.ErrorBarItem(
x=points.fpr[1:-1], y=points.tpr[1:-1],
height=2 * tpr_std[1:-1], width=2 * fpr_std[1:-1],
pen=pen, beam=0.025,
antialias=True,
)
return PlotAvgCurve(curve, pc.curve_item, pc.hull_item, error_item)
PlotAvgCurve.from_roc_curve = staticmethod(plot_avg_curve)
Some = namedtuple("Some", ["val"])
def once(f):
"""
Return a function that will be called only once, and it's result cached.
"""
cached = None
@wraps(f)
def wraped():
nonlocal cached
if cached is None:
cached = Some(f())
return cached.val
return wraped
plot_curves = namedtuple(
"plot_curves",
["merge", # :: () -> PlotCurve
"folds", # :: () -> [PlotCurve]
"avg_vertical", # :: () -> PlotAvgCurve
"avg_threshold", # :: () -> PlotAvgCurve
]
)
class InfiniteLine(pg.InfiniteLine):
"""pyqtgraph.InfiniteLine extended to support antialiasing.
"""
def __init__(self, pos=None, angle=90, pen=None, movable=False,
bounds=None, antialias=False):
super().__init__(pos, angle, pen, movable, bounds)
self.antialias = antialias
def paint(self, painter, *args):
if self.antialias:
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
super().paint(painter, *args)
class OWROCAnalysis(widget.OWWidget):
name = "ROC Analysis"
description = "Display Receiver Operating Characteristics curve " \
"based on evaluation of classifiers."
icon = "icons/ROCAnalysis.svg"
priority = 1010
inputs = [("Evaluation Results", Orange.evaluation.Results, "set_results")]
target_index = settings.Setting(0)
selected_classifiers = []
display_perf_line = settings.Setting(True)
display_def_threshold = settings.Setting(True)
fp_cost = settings.Setting(500)
fn_cost = settings.Setting(500)
target_prior = settings.Setting(50.0)
#: ROC Averaging Types
Merge, Vertical, Threshold, NoAveraging = 0, 1, 2, 3
roc_averaging = settings.Setting(Merge)
display_convex_hull = settings.Setting(False)
display_convex_curve = settings.Setting(False)
graph_name = "plot"
def __init__(self):
super().__init__()
self.results = None
self.classifier_names = []
self.perf_line = None
self.colors = []
self._curve_data = {}
self._plot_curves = {}
self._rocch = None
self._perf_line = None
box = gui.widgetBox(self.controlArea, "Plot")
tbox = gui.widgetBox(box, "Target Class")
tbox.setFlat(True)
self.target_cb = gui.comboBox(
tbox, self, "target_index", callback=self._on_target_changed,
contentsLength=8)
cbox = gui.widgetBox(box, "Classifiers")
cbox.setFlat(True)
self.classifiers_list_box = gui.listBox(
cbox, self, "selected_classifiers", "classifier_names",
selectionMode=QtGui.QListView.MultiSelection,
callback=self._on_classifiers_changed)
abox = gui.widgetBox(box, "Combine ROC Curves From Folds")
abox.setFlat(True)
gui.comboBox(abox, self, "roc_averaging",
items=["Merge predictions from folds", "Mean TP rate",
"Mean TP and FP at threshold", "Show individual curves"],
callback=self._replot)
hbox = gui.widgetBox(box, "ROC Convex Hull")
hbox.setFlat(True)
gui.checkBox(hbox, self, "display_convex_curve",
"Show convex ROC curves", callback=self._replot)
gui.checkBox(hbox, self, "display_convex_hull",
"Show ROC convex hull", callback=self._replot)
box = gui.widgetBox(self.controlArea, "Analysis")
gui.checkBox(box, self, "display_def_threshold",
"Default threshold (0.5) point",
callback=self._on_display_def_threshold_changed)
gui.checkBox(box, self, "display_perf_line", "Show performance line",
callback=self._on_display_perf_line_changed)
grid = QtGui.QGridLayout()
ibox = gui.indentedBox(box, orientation=grid)
sp = gui.spin(box, self, "fp_cost", 1, 1000, 10,
callback=self._on_display_perf_line_changed)
grid.addWidget(QtGui.QLabel("FP Cost"), 0, 0)
grid.addWidget(sp, 0, 1)
sp = gui.spin(box, self, "fn_cost", 1, 1000, 10,
callback=self._on_display_perf_line_changed)
grid.addWidget(QtGui.QLabel("FN Cost"))
grid.addWidget(sp, 1, 1)
sp = gui.spin(box, self, "target_prior", 1, 99,
callback=self._on_display_perf_line_changed)
sp.setSuffix("%")
sp.addAction(QtGui.QAction("Auto", sp))
grid.addWidget(QtGui.QLabel("Prior target class probability"))
grid.addWidget(sp, 2, 1)
self.plotview = pg.GraphicsView(background="w")
self.plotview.setFrameStyle(QtGui.QFrame.StyledPanel)
self.plot = pg.PlotItem()
self.plot.getViewBox().setMenuEnabled(False)
self.plot.getViewBox().setMouseEnabled(False, False)
pen = QPen(self.palette().color(QtGui.QPalette.Text))
tickfont = QtGui.QFont(self.font())
tickfont.setPixelSize(max(int(tickfont.pixelSize() * 2 // 3), 11))
axis = self.plot.getAxis("bottom")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("FP Rate (1-Specificity)")
axis = self.plot.getAxis("left")
axis.setTickFont(tickfont)
axis.setPen(pen)
axis.setLabel("TP Rate (Sensitivity)")
self.plot.showGrid(True, True, alpha=0.1)
self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0))
self.plotview.setCentralItem(self.plot)
self.mainArea.layout().addWidget(self.plotview)
self.inline_graph_report()
def set_results(self, results):
"""Set the input evaluation results."""
self.clear()
self.error(0)
if results is not None:
if results.data is None:
self.error(0, "Evaluation results require"
" information on test data")
results = None
elif not results.data.domain.has_discrete_class:
self.error(0, "Need discrete class variable")
results = None
self.results = results
if results is not None:
self._initialize(results)
self._setup_plot()
def clear(self):
"""Clear the widget state."""
self.results = None
self.plot.clear()
self.classifier_names = []
self.selected_classifiers = []
self.target_cb.clear()
self.target_index = 0
self.colors = []
self._curve_data = {}
self._plot_curves = {}
self._rocch = None
self._perf_line = None
def _initialize(self, results):
names = getattr(results, "learner_names", None)
if names is None:
names = ["#{}".format(i + 1)
for i in range(len(results.predicted))]
self.colors = colorpalette.ColorPaletteGenerator(
len(names), colorbrewer.colorSchemes["qualitative"]["Dark2"])
self.classifier_names = names
self.selected_classifiers = list(range(len(names)))
for i in range(len(names)):
listitem = self.classifiers_list_box.item(i)
listitem.setIcon(colorpalette.ColorPixmap(self.colors[i]))
class_var = results.data.domain.class_var
self.target_cb.addItems(class_var.values)
def curve_data(self, target, clf_idx):
"""Return `ROCData' for the given target and classifier."""
if (target, clf_idx) not in self._curve_data:
data = ROCData.from_results(self.results, clf_idx, target)
self._curve_data[target, clf_idx] = data
return self._curve_data[target, clf_idx]
def plot_curves(self, target, clf_idx):
"""Return a set of functions `plot_curves` generating plot curves."""
def generate_pens(basecolor):
pen = QPen(basecolor, 1)
pen.setCosmetic(True)
shadow_pen = QPen(pen.color().lighter(160), 2.5)
shadow_pen.setCosmetic(True)
return pen, shadow_pen
data = self.curve_data(target, clf_idx)
if (target, clf_idx) not in self._plot_curves:
pen, shadow_pen = generate_pens(self.colors[clf_idx])
name = self.classifier_names[clf_idx]
@once
def merged():
return plot_curve(
data.merged, pen=pen, shadow_pen=shadow_pen, name=name)
@once
def folds():
return [plot_curve(fold, pen=pen, shadow_pen=shadow_pen)
for fold in data.folds]
@once
def avg_vert():
return plot_avg_curve(data.avg_vertical, pen=pen,
shadow_pen=shadow_pen, name=name)
@once
def avg_thres():
return plot_avg_curve(data.avg_threshold, pen=pen,
shadow_pen=shadow_pen, name=name)
self._plot_curves[target, clf_idx] = plot_curves(
merge=merged, folds=folds,
avg_vertical=avg_vert, avg_threshold=avg_thres
)
return self._plot_curves[target, clf_idx]
def _setup_plot(self):
target = self.target_index
selected = self.selected_classifiers
curves = [self.plot_curves(target, i) for i in selected]
selected = [self.curve_data(target, i) for i in selected]
if self.roc_averaging == OWROCAnalysis.Merge:
for curve in curves:
graphics = curve.merge()
curve = graphics.curve
self.plot.addItem(graphics.curve_item)
if self.display_convex_curve:
self.plot.addItem(graphics.hull_item)
if self.display_def_threshold:
points = curve.points
ind = numpy.argmin(numpy.abs(points.thresholds - 0.5))
item = pg.TextItem(
text="{:.3f}".format(points.thresholds[ind]),
)
item.setPos(points.fpr[ind], points.tpr[ind])
self.plot.addItem(item)
hull_curves = [curve.merged.hull for curve in selected]
if hull_curves:
self._rocch = convex_hull(hull_curves)
iso_pen = QPen(QColor(Qt.black), 1)
iso_pen.setCosmetic(True)
self._perf_line = InfiniteLine(pen=iso_pen, antialias=True)
self.plot.addItem(self._perf_line)
elif self.roc_averaging == OWROCAnalysis.Vertical:
for curve in curves:
graphics = curve.avg_vertical()
self.plot.addItem(graphics.curve_item)
self.plot.addItem(graphics.confint_item)
hull_curves = [curve.avg_vertical.hull for curve in selected]
elif self.roc_averaging == OWROCAnalysis.Threshold:
for curve in curves:
graphics = curve.avg_threshold()
self.plot.addItem(graphics.curve_item)
self.plot.addItem(graphics.confint_item)
hull_curves = [curve.avg_threshold.hull for curve in selected]
elif self.roc_averaging == OWROCAnalysis.NoAveraging:
for curve in curves:
graphics = curve.folds()
for fold in graphics:
self.plot.addItem(fold.curve_item)
if self.display_convex_curve:
self.plot.addItem(fold.hull_item)
hull_curves = [fold.hull for curve in selected for fold in curve.folds]
if self.display_convex_hull and hull_curves:
hull = convex_hull(hull_curves)
hull_pen = QPen(QColor(200, 200, 200, 100), 2)
hull_pen.setCosmetic(True)
item = self.plot.plot(
hull.fpr, hull.tpr,
pen=hull_pen,
brush=QBrush(QColor(200, 200, 200, 50)),
fillLevel=0)
item.setZValue(-10000)
pen = QPen(QColor(100, 100, 100, 100), 1, Qt.DashLine)
pen.setCosmetic(True)
self.plot.plot([0, 1], [0, 1], pen=pen, antialias=True)
if self.roc_averaging == OWROCAnalysis.Merge:
self._update_perf_line()
def _on_target_changed(self):
self.plot.clear()
self._setup_plot()
def _on_classifiers_changed(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _on_display_perf_line_changed(self):
if self.roc_averaging == OWROCAnalysis.Merge:
self._update_perf_line()
if self.perf_line is not None:
self.perf_line.setVisible(self.display_perf_line)
def _on_display_def_threshold_changed(self):
self._replot()
def _replot(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _update_perf_line(self):
if self._perf_line is None:
return
self._perf_line.setVisible(self.display_perf_line)
if self.display_perf_line:
m = roc_iso_performance_slope(
self.fp_cost, self.fn_cost, self.target_prior / 100.0)
hull = self._rocch
ind = roc_iso_performance_line(m, hull)
angle = numpy.arctan2(m, 1) # in radians
self._perf_line.setAngle(angle * 180 / numpy.pi)
self._perf_line.setPos((hull.fpr[ind[0]], hull.tpr[ind[0]]))
def onDeleteWidget(self):
self.clear()
def send_report(self):
if self.results is None:
return
items = OrderedDict()
items["Target class"] = self.target_cb.currentText()
if self.display_perf_line:
items["Costs"] = \
"FP = {}, FN = {}".format(self.fp_cost, self.fn_cost)
items["Target probability"] = "{} %".format(self.target_prior)
caption = report.list_legend(self.classifiers_list_box,
self.selected_classifiers)
self.report_items(items)
self.report_plot()
self.report_caption(caption)
def interp(x, xp, fp, left=None, right=None):
"""
Like numpy.interp except for handling of running sequences of
same values in `xp`.
"""
x = numpy.asanyarray(x)
xp = numpy.asanyarray(xp)
fp = numpy.asanyarray(fp)
if xp.shape != fp.shape:
raise ValueError("xp and fp must have the same shape")
ind = numpy.searchsorted(xp, x, side="right")
fx = numpy.zeros(len(x))
under = ind == 0
over = ind == len(xp)
between = ~under & ~over
fx[under] = left if left is not None else fp[0]
fx[over] = right if right is not None else fp[-1]
if right is not None:
# Fix points exactly on the right boundary.
fx[x == xp[-1]] = fp[-1]
ind = ind[between]
df = (fp[ind] - fp[ind - 1]) / (xp[ind] - xp[ind - 1])
fx[between] = df * (x[between] - xp[ind]) + fp[ind]
return fx
def roc_curve_for_fold(res, fold, clf_idx, target):
fold_actual = res.actual[fold]
P = numpy.sum(fold_actual == target)
N = fold_actual.size - P
if P == 0 or N == 0:
# Undefined TP and FP rate
return numpy.array([]), numpy.array([]), numpy.array([])
fold_probs = res.probabilities[clf_idx][fold][:, target]
return skl_metrics.roc_curve(
fold_actual, fold_probs, pos_label=target
)
def roc_curve_vertical_average(curves, samples=10):
fpr_sample = numpy.linspace(0.0, 1.0, samples)
tpr_samples = []
for fpr, tpr, _ in curves:
tpr_samples.append(interp(fpr_sample, fpr, tpr, left=0, right=1))
tpr_samples = numpy.array(tpr_samples)
return fpr_sample, tpr_samples.mean(axis=0), tpr_samples.std(axis=0)
def roc_curve_threshold_average(curves, thresh_samples):
fpr_samples, tpr_samples = [], []
for fpr, tpr, thresh in curves:
ind = numpy.searchsorted(thresh[::-1], thresh_samples, side="left")
ind = ind[::-1]
ind = numpy.clip(ind, 0, len(thresh) - 1)
fpr_samples.append(fpr[ind])
tpr_samples.append(tpr[ind])
fpr_samples = numpy.array(fpr_samples)
tpr_samples = numpy.array(tpr_samples)
return ((fpr_samples.mean(axis=0), fpr_samples.std(axis=0)),
(tpr_samples.mean(axis=0), fpr_samples.std(axis=0)))
def roc_curve_threshold_average_interp(curves, thresh_samples):
fpr_samples, tpr_samples = [], []
for fpr, tpr, thresh in curves:
thresh = thresh[::-1]
fpr = interp(thresh_samples, thresh, fpr[::-1], left=1.0, right=0.0)
tpr = interp(thresh_samples, thresh, tpr[::-1], left=1.0, right=0.0)
fpr_samples.append(fpr)
tpr_samples.append(tpr)
fpr_samples = numpy.array(fpr_samples)
tpr_samples = numpy.array(tpr_samples)
return ((fpr_samples.mean(axis=0), fpr_samples.std(axis=0)),
(tpr_samples.mean(axis=0), fpr_samples.std(axis=0)))
roc_point = namedtuple("roc_point", ["fpr", "tpr", "threshold"])
def roc_curve_convex_hull(curve):
def slope(p1, p2):
x1, y1, _ = p1
x2, y2, _ = p2
if x1 != x2:
return (y2 - y1) / (x2 - x1)
else:
return numpy.inf
fpr, _, _ = curve
if len(fpr) <= 2:
return curve
points = map(roc_point._make, zip(*curve))
hull = deque([next(points)])
for point in points:
while True:
if len(hull) < 2:
hull.append(point)
break
else:
last = hull[-1]
if point.fpr != last.fpr and \
slope(hull[-2], last) > slope(last, point):
hull.append(point)
break
else:
hull.pop()
fpr = numpy.array([p.fpr for p in hull])
tpr = numpy.array([p.tpr for p in hull])
thres = numpy.array([p.threshold for p in hull])
return (fpr, tpr, thres)
def convex_hull(curves):
def slope(p1, p2):
x1, y1, *_ = p1
x2, y2, *_ = p2
if x1 != x2:
return (y2 - y1) / (x2 - x1)
else:
return numpy.inf
curves = [list(map(roc_point._make, zip(*curve))) for curve in curves]
merged_points = reduce(operator.iadd, curves, [])
merged_points = sorted(merged_points)
if len(merged_points) == 0:
return ROCPoints(numpy.array([]), numpy.array([]), numpy.array([]))
if len(merged_points) <= 2:
return ROCPoints._make(map(numpy.array, zip(*merged_points)))
points = iter(merged_points)
hull = deque([next(points)])
for point in points:
while True:
if len(hull) < 2:
hull.append(point)
break
else:
last = hull[-1]
if point[0] != last[0] and \
slope(hull[-2], last) > slope(last, point):
hull.append(point)
break
else:
hull.pop()
return ROCPoints._make(map(numpy.array, zip(*hull)))
def roc_iso_performance_line(slope, hull, tol=1e-5):
"""
Return the indices where a line with `slope` touches the ROC convex hull.
"""
fpr, tpr, *_ = hull
# Compute the distance of each point to a reference iso line
# going through point (0, 1). The point(s) with the minimum
# distance are our result
# y = m * x + 1
# m * x - 1y + 1 = 0
a, b, c = slope, -1, 1
dist = distance_to_line(a, b, c, fpr, tpr)
mindist = numpy.min(dist)
return numpy.flatnonzero((dist - mindist) <= tol)
def distance_to_line(a, b, c, x0, y0):
"""
Return the distance to a line ax + by + c = 0
"""
assert a != 0 or b != 0
return numpy.abs(a * x0 + b * y0 + c) / numpy.sqrt(a ** 2 + b ** 2)
def roc_iso_performance_slope(fp_cost, fn_cost, p):
assert 0 <= p <= 1
if fn_cost * p == 0:
return numpy.inf
else:
return (fp_cost * (1. - p)) / (fn_cost * p)
def main():
import gc
import sip
from PyQt4.QtGui import QApplication
from Orange.classification import (LogisticRegressionLearner, SVMLearner,
NuSVMLearner)
app = QApplication([])
w = OWROCAnalysis()
w.show()
w.raise_()
# data = Orange.data.Table("iris")
data = Orange.data.Table("ionosphere")
results = Orange.evaluation.CrossValidation(
data,
[LogisticRegressionLearner(),
LogisticRegressionLearner(penalty="l1"),
SVMLearner(probability=True),
NuSVMLearner(probability=True)],
k=5,
store_data=True,
)
results.learner_names = ["Logistic", "Logistic (L1 reg.)", "SVM", "NuSVM"]
w.set_results(results)
rval = app.exec_()
w.deleteLater()
sip.delete(w)
del w
app.processEvents()
sip.delete(app)
del app
gc.collect()
return rval
if __name__ == "__main__":
import sys
sys.exit(main())
|
|
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Firestore API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`~google.cloud.firestore_v1.client.Client` owns a
:class:`~google.cloud.firestore_v1.collection.CollectionReference`
* a :class:`~google.cloud.firestore_v1.client.Client` owns a
:class:`~google.cloud.firestore_v1.document.DocumentReference`
"""
import os
import google.api_core.client_options
from google.api_core.gapic_v1 import client_info
from google.cloud.client import ClientWithProject
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1 import __version__
from google.cloud.firestore_v1 import query
from google.cloud.firestore_v1 import types
from google.cloud.firestore_v1.batch import WriteBatch
from google.cloud.firestore_v1.collection import CollectionReference
from google.cloud.firestore_v1.document import DocumentReference
from google.cloud.firestore_v1.document import DocumentSnapshot
from google.cloud.firestore_v1.field_path import render_field_path
from google.cloud.firestore_v1.gapic import firestore_client
from google.cloud.firestore_v1.gapic.transports import firestore_grpc_transport
from google.cloud.firestore_v1.transaction import Transaction
DEFAULT_DATABASE = "(default)"
"""str: The default database used in a :class:`~google.cloud.firestore_v1.client.Client`."""
_BAD_OPTION_ERR = (
"Exactly one of ``last_update_time`` or ``exists`` " "must be provided."
)
_BAD_DOC_TEMPLATE = (
"Document {!r} appeared in response but was not present among references"
)
_ACTIVE_TXN = "There is already an active transaction."
_INACTIVE_TXN = "There is no active transaction."
_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__)
_FIRESTORE_EMULATOR_HOST = "FIRESTORE_EMULATOR_HOST"
class Client(ClientWithProject):
"""Client for interacting with Google Cloud Firestore API.
.. note::
Since the Cloud Firestore API requires the gRPC transport, no
``_http`` argument is accepted by this class.
Args:
project (Optional[str]): The project which the client acts on behalf
of. If not passed, falls back to the default inferred
from the environment.
credentials (Optional[~google.auth.credentials.Credentials]): The
OAuth2 Credentials to use for this client. If not passed, falls
back to the default inferred from the environment.
database (Optional[str]): The database name that the client targets.
For now, :attr:`DEFAULT_DATABASE` (the default value) is the
only valid database.
client_info (Optional[google.api_core.gapic_v1.client_info.ClientInfo]):
The client info used to send a user-agent string along with API
requests. If ``None``, then default info will be used. Generally,
you only need to set this if you're developing your own library
or partner tool.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
SCOPE = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
)
"""The scopes required for authenticating with the Firestore service."""
_firestore_api_internal = None
_database_string_internal = None
_rpc_metadata_internal = None
def __init__(
self,
project=None,
credentials=None,
database=DEFAULT_DATABASE,
client_info=_CLIENT_INFO,
client_options=None,
):
# NOTE: This API has no use for the _http argument, but sending it
# will have no impact since the _http() @property only lazily
# creates a working HTTP object.
super(Client, self).__init__(
project=project, credentials=credentials, _http=None
)
self._client_info = client_info
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
self._client_options = client_options
self._database = database
self._emulator_host = os.getenv(_FIRESTORE_EMULATOR_HOST)
@property
def _firestore_api(self):
"""Lazy-loading getter GAPIC Firestore API.
Returns:
:class:`~google.cloud.gapic.firestore.v1`.firestore_client.FirestoreClient:
<The GAPIC client with the credentials of the current client.
"""
if self._firestore_api_internal is None:
# Use a custom channel.
# We need this in order to set appropriate keepalive options.
if self._emulator_host is not None:
channel = firestore_grpc_transport.firestore_pb2_grpc.grpc.insecure_channel(
self._emulator_host
)
else:
channel = firestore_grpc_transport.FirestoreGrpcTransport.create_channel(
self._target,
credentials=self._credentials,
options={"grpc.keepalive_time_ms": 30000}.items(),
)
self._transport = firestore_grpc_transport.FirestoreGrpcTransport(
address=self._target, channel=channel
)
self._firestore_api_internal = firestore_client.FirestoreClient(
transport=self._transport, client_info=self._client_info
)
return self._firestore_api_internal
@property
def _target(self):
"""Return the target (where the API is).
Returns:
str: The location of the API.
"""
if self._emulator_host is not None:
return self._emulator_host
elif self._client_options and self._client_options.api_endpoint:
return self._client_options.api_endpoint
else:
return firestore_client.FirestoreClient.SERVICE_ADDRESS
@property
def _database_string(self):
"""The database string corresponding to this client's project.
This value is lazy-loaded and cached.
Will be of the form
``projects/{project_id}/databases/{database_id}``
but ``database_id == '(default)'`` for the time being.
Returns:
str: The fully-qualified database string for the current
project. (The default database is also in this string.)
"""
if self._database_string_internal is None:
# NOTE: database_root_path() is a classmethod, so we don't use
# self._firestore_api (it isn't necessary).
db_str = firestore_client.FirestoreClient.database_root_path(
self.project, self._database
)
self._database_string_internal = db_str
return self._database_string_internal
@property
def _rpc_metadata(self):
"""The RPC metadata for this client's associated database.
Returns:
Sequence[Tuple(str, str)]: RPC metadata with resource prefix
for the database associated with this client.
"""
if self._rpc_metadata_internal is None:
self._rpc_metadata_internal = _helpers.metadata_with_prefix(
self._database_string
)
if self._emulator_host is not None:
# The emulator requires additional metadata to be set.
self._rpc_metadata_internal.append(("authorization", "Bearer owner"))
return self._rpc_metadata_internal
def collection(self, *collection_path):
"""Get a reference to a collection.
For a top-level collection:
.. code-block:: python
>>> client.collection('top')
For a sub-collection:
.. code-block:: python
>>> client.collection('mydocs/doc/subcol')
>>> # is the same as
>>> client.collection('mydocs', 'doc', 'subcol')
Sub-collections can be nested deeper in a similar fashion.
Args:
collection_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a collection
* A tuple of collection path segments
Returns:
:class:`~google.cloud.firestore_v1.collection.CollectionReference`:
A reference to a collection in the Firestore database.
"""
if len(collection_path) == 1:
path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
else:
path = collection_path
return CollectionReference(*path, client=self)
def collection_group(self, collection_id):
"""
Creates and returns a new Query that includes all documents in the
database that are contained in a collection or subcollection with the
given collection_id.
.. code-block:: python
>>> query = client.collection_group('mygroup')
@param {string} collectionId Identifies the collections to query over.
Every collection or subcollection with this ID as the last segment of its
path will be included. Cannot contain a slash.
@returns {Query} The created Query.
"""
if "/" in collection_id:
raise ValueError(
"Invalid collection_id "
+ collection_id
+ ". Collection IDs must not contain '/'."
)
collection = self.collection(collection_id)
return query.Query(collection, all_descendants=True)
def document(self, *document_path):
"""Get a reference to a document in a collection.
For a top-level document:
.. code-block:: python
>>> client.document('collek/shun')
>>> # is the same as
>>> client.document('collek', 'shun')
For a document in a sub-collection:
.. code-block:: python
>>> client.document('mydocs/doc/subcol/child')
>>> # is the same as
>>> client.document('mydocs', 'doc', 'subcol', 'child')
Documents in sub-collections can be nested deeper in a similar fashion.
Args:
document_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a document
* A tuple of document path segments
Returns:
:class:`~google.cloud.firestore_v1.document.DocumentReference`:
A reference to a document in a collection.
"""
if len(document_path) == 1:
path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
else:
path = document_path
# DocumentReference takes a relative path. Strip the database string if present.
base_path = self._database_string + "/documents/"
joined_path = _helpers.DOCUMENT_PATH_DELIMITER.join(path)
if joined_path.startswith(base_path):
joined_path = joined_path[len(base_path) :]
path = joined_path.split(_helpers.DOCUMENT_PATH_DELIMITER)
return DocumentReference(*path, client=self)
@staticmethod
def field_path(*field_names):
"""Create a **field path** from a list of nested field names.
A **field path** is a ``.``-delimited concatenation of the field
names. It is used to represent a nested field. For example,
in the data
.. code-block:: python
data = {
'aa': {
'bb': {
'cc': 10,
},
},
}
the field path ``'aa.bb.cc'`` represents the data stored in
``data['aa']['bb']['cc']``.
Args:
field_names (Tuple[str, ...]): The list of field names.
Returns:
str: The ``.``-delimited field path.
"""
return render_field_path(field_names)
@staticmethod
def write_option(**kwargs):
"""Create a write option for write operations.
Write operations include :meth:`~google.cloud.DocumentReference.set`,
:meth:`~google.cloud.DocumentReference.update` and
:meth:`~google.cloud.DocumentReference.delete`.
One of the following keyword arguments must be provided:
* ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\
Timestamp`): A timestamp. When set, the target document must
exist and have been last updated at that time. Protobuf
``update_time`` timestamps are typically returned from methods
that perform write operations as part of a "write result"
protobuf or directly.
* ``exists`` (:class:`bool`): Indicates if the document being modified
should already exist.
Providing no argument would make the option have no effect (so
it is not allowed). Providing multiple would be an apparent
contradiction, since ``last_update_time`` assumes that the
document **was** updated (it can't have been updated if it
doesn't exist) and ``exists`` indicate that it is unknown if the
document exists or not.
Args:
kwargs (Dict[str, Any]): The keyword arguments described above.
Raises:
TypeError: If anything other than exactly one argument is
provided by the caller.
Returns:
:class:`~google.cloud.firestore_v1.client.WriteOption`:
The option to be used to configure a write message.
"""
if len(kwargs) != 1:
raise TypeError(_BAD_OPTION_ERR)
name, value = kwargs.popitem()
if name == "last_update_time":
return _helpers.LastUpdateOption(value)
elif name == "exists":
return _helpers.ExistsOption(value)
else:
extra = "{!r} was provided".format(name)
raise TypeError(_BAD_OPTION_ERR, extra)
def get_all(self, references, field_paths=None, transaction=None):
"""Retrieve a batch of documents.
.. note::
Documents returned by this method are not guaranteed to be
returned in the same order that they are given in ``references``.
.. note::
If multiple ``references`` refer to the same document, the server
will only return one result.
See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
references (List[.DocumentReference, ...]): Iterable of document
references to be retrieved.
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]):
An existing transaction that these ``references`` will be
retrieved in.
Yields:
.DocumentSnapshot: The next document snapshot that fulfills the
query, or :data:`None` if the document does not exist.
"""
document_paths, reference_map = _reference_info(references)
mask = _get_doc_mask(field_paths)
response_iterator = self._firestore_api.batch_get_documents(
self._database_string,
document_paths,
mask,
transaction=_helpers.get_transaction_id(transaction),
metadata=self._rpc_metadata,
)
for get_doc_response in response_iterator:
yield _parse_batch_get(get_doc_response, reference_map, self)
def collections(self):
"""List top-level collections of the client's database.
Returns:
Sequence[:class:`~google.cloud.firestore_v1.collection.CollectionReference`]:
iterator of subcollections of the current document.
"""
iterator = self._firestore_api.list_collection_ids(
"{}/documents".format(self._database_string), metadata=self._rpc_metadata
)
iterator.client = self
iterator.item_to_value = _item_to_collection_ref
return iterator
def batch(self):
"""Get a batch instance from this client.
Returns:
:class:`~google.cloud.firestore_v1.batch.WriteBatch`:
A "write" batch to be used for accumulating document changes and
sending the changes all at once.
"""
return WriteBatch(self)
def transaction(self, **kwargs):
"""Get a transaction that uses this client.
See :class:`~google.cloud.firestore_v1.transaction.Transaction` for
more information on transactions and the constructor arguments.
Args:
kwargs (Dict[str, Any]): The keyword arguments (other than
``client``) to pass along to the
:class:`~google.cloud.firestore_v1.transaction.Transaction`
constructor.
Returns:
:class:`~google.cloud.firestore_v1.transaction.Transaction`:
A transaction attached to this client.
"""
return Transaction(self, **kwargs)
def _reference_info(references):
"""Get information about document references.
Helper for :meth:`~google.cloud.firestore_v1.client.Client.get_all`.
Args:
references (List[.DocumentReference, ...]): Iterable of document
references.
Returns:
Tuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of
* fully-qualified documents paths for each reference in ``references``
* a mapping from the paths to the original reference. (If multiple
``references`` contains multiple references to the same document,
that key will be overwritten in the result.)
"""
document_paths = []
reference_map = {}
for reference in references:
doc_path = reference._document_path
document_paths.append(doc_path)
reference_map[doc_path] = reference
return document_paths, reference_map
def _get_reference(document_path, reference_map):
"""Get a document reference from a dictionary.
This just wraps a simple dictionary look-up with a helpful error that is
specific to :meth:`~google.cloud.firestore.client.Client.get_all`, the
**public** caller of this function.
Args:
document_path (str): A fully-qualified document path.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
Returns:
.DocumentReference: The matching reference.
Raises:
ValueError: If ``document_path`` has not been encountered.
"""
try:
return reference_map[document_path]
except KeyError:
msg = _BAD_DOC_TEMPLATE.format(document_path)
raise ValueError(msg)
def _parse_batch_get(get_doc_response, reference_map, client):
"""Parse a `BatchGetDocumentsResponse` protobuf.
Args:
get_doc_response (~google.cloud.proto.firestore.v1.\
firestore_pb2.BatchGetDocumentsResponse): A single response (from
a stream) containing the "get" response for a document.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
client (:class:`~google.cloud.firestore_v1.client.Client`):
A client that has a document factory.
Returns:
[.DocumentSnapshot]: The retrieved snapshot.
Raises:
ValueError: If the response has a ``result`` field (a oneof) other
than ``found`` or ``missing``.
"""
result_type = get_doc_response.WhichOneof("result")
if result_type == "found":
reference = _get_reference(get_doc_response.found.name, reference_map)
data = _helpers.decode_dict(get_doc_response.found.fields, client)
snapshot = DocumentSnapshot(
reference,
data,
exists=True,
read_time=get_doc_response.read_time,
create_time=get_doc_response.found.create_time,
update_time=get_doc_response.found.update_time,
)
elif result_type == "missing":
reference = _get_reference(get_doc_response.missing, reference_map)
snapshot = DocumentSnapshot(
reference,
None,
exists=False,
read_time=get_doc_response.read_time,
create_time=None,
update_time=None,
)
else:
raise ValueError(
"`BatchGetDocumentsResponse.result` (a oneof) had a field other "
"than `found` or `missing` set, or was unset"
)
return snapshot
def _get_doc_mask(field_paths):
"""Get a document mask if field paths are provided.
Args:
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results.
Returns:
Optional[google.cloud.firestore_v1.types.DocumentMask]: A mask
to project documents to a restricted set of field paths.
"""
if field_paths is None:
return None
else:
return types.DocumentMask(field_paths=field_paths)
def _item_to_collection_ref(iterator, item):
"""Convert collection ID to collection ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (str): ID of the collection
"""
return iterator.client.collection(item)
|
|
# -*- coding: utf-8 -*-
"""
This file is part of Urban Mediator software.
Copyright (c) 2008 University of Art and Design Helsinki
See the file LICENSE.txt for copying permission.
Widget controller.
To add new widget several places should be edited
(in links.py, code.py, widgetize.py and here in widget.py,
plus templates/widget/YOURWIDGET.html and
templates/pc/topic_tools.html)
See `topic_links` as example.
"""
import re
import web
from web.utils import Storage
import model
import media
import util
import i18n
import config
import turing
import url_handler
import webutil
import mobile_map
import view
from view import widget_render as render, get_widget, pc_macro as macros
from profiles import *
from links import widget_links as links, pc_links, feed_links
from webutil import request_uri, dispatch_url
import webform as form
#from session_user import *
_ = i18n._
urls = (
'/topic/(.+)/topic_points', 'TopicPoints',
'/topic/(.+)/topic_links', 'TopicLinks',
'/topic/(.+)/submit', 'SubmitWidget',
'/topic/(.+)/note', 'NoteWidget',
'/topic/(.+)/map', 'MapWidget',
'/topic/.+/container', 'ContainerWidget',
)
def url_to_class(url):
#!!! ugly now. Use regexps for urls above
if "/submit" in url:
return SubmitWidget
elif "/note" in url:
return NoteWidget
elif "/container" in url:
return ContainerWidget
elif "/topic_points" in url:
return TopicPoints
elif "/topic_links" in url:
return TopicLinks
elif "/map" in url:
return MapWidget
def render_widget(text):
try:
m = re.search(r'src="(.+?)"', text).groups()[0]
except:
return "[ERROR with this widget]"
if getattr(url_to_class(m), "ALWAYS_IFRAME", False):
return text
return get_widget_code(m)
def get_widget_url(text):
try:
return re.search(r'src="(.+?)"', text).groups()[0]
except:
return ""
def dictify(fs): return dict([(k, fs[k]) for k in fs.keys()])
def get_widget_code(path, mapping=urls, prefix="/widget"):
"""
Based on handle in web.py's request.py
Simplified to work only for the current module.
"""
return webutil.dispatch_url(path, globals(), mapping, prefix)
web.template.Template.globals.update(dict(
widget_links=links,
render_widget=render_widget,
get_widget_code=get_widget_code,
get_widget_url=get_widget_url,
))
class SubmitWidget:
def GET(self, topic_id, onlycode=False, webinput=None):
presets = webinput or web.input()
try:
presets.referer = web.ctx.environ["HTTP_REFERER"]
except KeyError:
presets.referer = 0
### compat only !!! remove
if "disabled_text" not in presets:
presets.disabled_text = _("Point addition disabled.")
###
context = Storage(
presets=presets,
offset = 14,
framed = 0,
onlycode=onlycode,
title=presets.get("title", _("Add point")),
desturl=pc_links("topic_newpoint", int(topic_id), **presets),
submitform=pc_links("topic_newpoint", int(topic_id)),
)
topics, topic = model.t_helper(topic_id)
if onlycode:
presets.referer = ''
return get_widget('submit', context, topic, presets)
get_widget('submit', context, topic, presets)
class NoteWidget:
def GET(self, topic_id, onlycode=False, webinput=None):
presets = webinput or web.input()
try:
presets.referer = web.ctx.environ["HTTP_REFERER"]
except KeyError:
presets.referer = 0
context = Storage(
presets=presets,
offset=14,
framed=0,
onlycode=onlycode,
title=presets.get("title", _("Note")),
)
topics, topic = model.t_helper(topic_id)
if onlycode:
presets.referer = ''
return get_widget('note', context, topic, presets)
get_widget('note', context, topic, presets)
class MapWidget:
ALWAYS_IFRAME = 1
def GET(self, topic_id, onlycode=False, webinput=None):
presets = webinput or web.input()
try:
presets.referer = web.ctx.environ["HTTP_REFERER"]
except KeyError:
presets.referer = 0
context = Storage(
presets=presets,
offset = 10,
framed = 1,
onlycode=onlycode,
title=presets.get("title", _("Points")),
desturl=pc_links("topic", int(topic_id), **presets),
# submitform=pc_links("topic_newpoint", int(topic_id)),
)
topics, topic = model.t_helper(topic_id)
topics.annotate_by_datasources()
topics.annotate_by_points()
number_of_points = int(presets.get("number_of_points",
config.points_per_page))
points = model.Points(project=topic, external=None)
points.limit_by_page(1, length=number_of_points)
points.annotate_by_comments()
points.annotate_by_tags()
points.annotate_by_profiles(default=DEFAULT_POINT_PROFILE)
feed_link = feed_links("topic_points",
topic.id, presets, page=None)
if "zoom" in topic.profile and topic.profile.zoom != "auto":
c_lat, c_lon, c_zoom = \
topic.lat, topic.lon, topic.profile.zoom
c_zoom = int(c_zoom)
else:
c_lat, c_lon, c_zoom = points.central_point(
config.center_lat, config.center_lon, 14)
map_context = Storage(
getmap_url=pc_links.getmap,
getmap_layers=config.getmap_layers,
getmap_layers1=config.getmap_layers1,
getmap_layers2=config.getmap_layers2,
map_params=config.getmap_params,
getmap_custom=config.getmap_custom,
getmap_custom_init=config.getmap_custom_init,
lat=c_lat,
lon=c_lon,
zoom=c_zoom + config.getmap_zoomshift,
initial_feed=feed_link,
has_new_point="false",
getmap_zoom1=config.getmap_zoom1 + config.getmap_zoomshift,
getmap_zoom2=config.getmap_zoom2 + config.getmap_zoomshift,
)
model.encode_coordinates(map_context)
context.map_context = map_context
context.page_specific_js = macros.map_js(map_context)
if onlycode:
presets.referer = ''
# !!! map widget can't be part of combo
self_link = links('map', topic_id, **presets)
return """<iframe src="%s" style="height:%spx;width:%spx;border:none;"></iframe>""" % \
(self_link, presets.height, presets.width)
get_widget('map', context, topic, presets)
class TopicPoints:
def GET(self, topic_id, onlycode=False, webinput=None):
presets = webinput or web.input()
try:
presets.referer = web.ctx.environ["HTTP_REFERER"]
except KeyError:
presets.referer = 0
context = Storage(
presets=presets,
offset = 10,
framed = 1,
onlycode=onlycode,
title=presets.get("title", _("Points")),
desturl=pc_links("topic", int(topic_id), **presets),
# submitform=pc_links("topic_newpoint", int(topic_id)),
)
topics, topic = model.t_helper(topic_id)
# if not topics:
# web.seeother(links("index", message=_("No such topic. Update your bookmarks.")))
# return
topics.annotate_by_datasources()
topics.annotate_by_points()
number_of_points = int(presets.get("number_of_points",
config.points_per_page))
points = model.Points(project=topic, external=None)
model.order_helper(points, presets.get('order',
config.main_page_point_orders[0]))
points.limit_by_page(1, length=number_of_points)
points.annotate_by_comments()
points.annotate_by_tags()
points.annotate_by_profiles(default=DEFAULT_POINT_PROFILE)
if onlycode:
presets.referer = ''
return get_widget('topic_points', context, presets, points, topic)
get_widget('topic_points', context, presets, points, topic)
class TopicLinks:
def GET(self, topic_id, onlycode=False, webinput=None):
presets = webinput or web.input()
try:
presets.referer = web.ctx.environ["HTTP_REFERER"]
except KeyError:
presets.referer = 0
context = Storage(
presets=presets,
offset = 10,
framed = 1,
onlycode=onlycode,
title=presets.get("title", _("Links")),
desturl=pc_links("topic", int(topic_id), **presets),
)
topics, topic = model.t_helper(topic_id)
topics.annotate_by_datasources()
topics.annotate_by_points()
number_of_points = int(presets.get("number_of_links",
config.links_per_page))
if onlycode:
presets.referer = ''
return get_widget('topic_links', context, presets, topic)
get_widget('topic_links', context, presets, topic)
MAX_NUMBER_OF_ITEMS_IN_CONTAINER = 10
class ContainerWidget:
def GET(self, onlycode=False, webinput=None):
presets = webinput or web.input()
try:
presets.referer = web.ctx.environ["HTTP_REFERER"]
except KeyError:
presets.referer = 0
context = Storage(
presets=presets,
offset = 14,
framed = 0,
onlycode=onlycode,
title=presets.get("title", _("Add point")),
)
widget_urls = []
for n in range(MAX_NUMBER_OF_ITEMS_IN_CONTAINER):
urln = "u%i" % n
if urln in presets:
widget_urls.append(presets[urln])
if onlycode:
presets.referer = ''
return get_widget('container', context, presets, widget_urls)
get_widget('container', context, presets, widget_urls)
|
|
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
import os
import traceback
import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError, "unmatched input: %s" % `s`
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, (module, name, version, _0, _1)):
" module ::= Id Id version { } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, (module, name, version, _0, definitions, _1)):
" module ::= Id Id version { definitions } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_version(self, (version, V)):
"version ::= Id String"
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, (definition,)):
" definitions ::= definition "
return definition
def p_definition_1(self, (definitions, definition)):
" definitions ::= definition definitions "
return definitions + definition
def p_definition(self, (id, _, type)):
" definition ::= Id = type "
return [Type(id, type)]
def p_type_0(self, (product,)):
" type ::= product "
return product
def p_type_1(self, (sum,)):
" type ::= sum "
return Sum(sum)
def p_type_2(self, (sum, id, _0, attributes, _1)):
" type ::= sum Id ( fields ) "
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, (_0, fields, _1)):
" product ::= ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Product(fields)
def p_sum_0(self, (constructor,)):
" sum ::= constructor "
return [constructor]
def p_sum_1(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_sum_2(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_constructor_0(self, (id,)):
" constructor ::= Id "
return Constructor(id)
def p_constructor_1(self, (id, _0, fields, _1)):
" constructor ::= Id ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, (field,)):
" fields ::= field "
return [field]
def p_fields_1(self, (field, _, fields)):
" fields ::= field , fields "
return fields + [field]
def p_field_0(self, (type,)):
" field ::= Id "
return Field(type)
def p_field_1(self, (type, name)):
" field ::= Id Id "
return Field(type, name)
def p_field_2(self, (type, _, name)):
" field ::= Id * Id "
return Field(type, name, seq=True)
def p_field_3(self, (type, _, name)):
" field ::= Id ? Id "
return Field(type, name, opt=True)
def p_field_4(self, (type, _)):
" field ::= Id * "
return Field(type, seq=True)
def p_field_5(self, (type, _)):
" field ::= Id ? "
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception, err:
print "Error visiting", repr(object)
print err
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print "Redefinition of constructor %s" % key
print "Defined in %s and %s" % (conflict, name)
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print "Undefined type %s, used in %s" % (t, uses)
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError, err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print file
mod = parse(file)
print "module", mod.name
print len(mod.dfns), "definitions"
if not check(mod):
print "Check failed"
else:
for dfn in mod.dfns:
print dfn.type
|
|
# -*- coding: utf-8 -*-
# Copyright 2012-2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from datetime import datetime, timedelta
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.utils import simplejson
from django.utils.timezone import utc
from moocng.api.tests.outputs import (NO_OBJECTS, NORMAL_USER,
BASIC_ALLCOURSES, BASIC_COURSES)
from moocng.api.tests.utils import ApiTestCase
class UserTestCase(ApiTestCase):
# Get user
def test_get_user_annonymous(self):
self.create_test_user_user()
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_user_user(self):
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_user_alum(self):
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_user_teacher(self):
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_user_owner(self):
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_user_admin(self):
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 404)
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NORMAL_USER)
def test_get_user_userkey(self):
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
response = self.client.get('/api/%s/user/2/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 404)
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NORMAL_USER)
def test_get_user_certificator(self):
certuser = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(certuser, key)
response = self.client.get('/api/%s/user/2/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 404)
self.create_test_user_test()
response = self.client.get('/api/%s/user/2/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NORMAL_USER)
# Get courses of the user
def test_get_allcourses_annonymous(self):
owner = self.create_test_user_owner()
self.create_test_user_user()
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_allcourses_user(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_allcourses_alum(self):
owner = self.create_test_user_owner()
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
test_user = self.create_test_user_test()
course1 = self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
course2 = self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
course1.students.add(alum1)
course1.save()
course2.students.add(alum1)
course2.save()
response = self.client.get('/api/%s/user/2/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_allcourses_teacher(self):
owner = self.create_test_user_owner()
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_allcourses_owner(self):
teacher1 = self.create_test_user_teacher1()
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_allcourses_admin(self):
owner = self.create_test_user_owner()
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, BASIC_ALLCOURSES)
response = self.client.get('/api/%s/user/2/allcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
def test_get_allcourses_userkey(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
test_user = self.create_test_user_test()
response = self.client.get('/api/%s/user/3/allcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 401)
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/allcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 401)
def test_get_allcourses_certificator(self):
owner = self.create_test_user_owner()
certificator = self.create_test_user_user()
ct = ContentType.objects.get(model='course', app_label='courses')
perm = Permission.objects.get(content_type=ct, codename='can_list_allcourses')
certificator.user_permissions.add(perm)
key = str(uuid.uuid4())
self.generate_apikeyuser(certificator, key)
test_user = self.create_test_user_test()
response = self.client.get('/api/%s/user/3/allcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/allcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, BASIC_ALLCOURSES)
response = self.client.get('/api/%s/user/2/allcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
# Get passed courses of the user
def test_get_passedcourses_annonymous(self):
owner = self.create_test_user_owner()
self.create_test_user_user()
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_passedcourses_user(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
self.client = self.django_login_user(self.client, user)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_passedcourses_alum(self):
owner = self.create_test_user_owner()
alum1 = self.create_test_user_alum1()
self.client = self.django_login_user(self.client, alum1)
test_user = self.create_test_user_test()
course1 = self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
course2 = self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
course1.students.add(alum1)
course1.save()
course2.students.add(alum1)
course2.save()
response = self.client.get('/api/%s/user/2/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_passedcourses_teacher(self):
owner = self.create_test_user_owner()
teacher1 = self.create_test_user_teacher1()
self.client = self.django_login_user(self.client, teacher1)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_passedcourses_owner(self):
teacher1 = self.create_test_user_teacher1()
owner = self.create_test_user_owner()
self.client = self.django_login_user(self.client, owner)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
teacher=teacher1,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 401)
def test_get_passedcourses_admin(self):
owner = self.create_test_user_owner()
admin = self.create_test_user_admin()
self.client = self.django_login_user(self.client, admin)
test_user = self.create_test_user_test()
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/2/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
response = self.client.get('/api/%s/user/3/passedcourses/%s' % (self.api_name, self.format_append))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
def test_get_passedcourses_userkey(self):
owner = self.create_test_user_owner()
user = self.create_test_user_user()
key = str(uuid.uuid4())
self.generate_apikeyuser(user, key)
test_user = self.create_test_user_test()
response = self.client.get('/api/%s/user/3/passedcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 401)
self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/3/passedcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 401)
response = self.client.get('/api/%s/user/2/passedcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 401)
def test_get_passedcourses_certificator(self):
owner = self.create_test_user_owner()
certificator = self.create_test_user_user()
ct = ContentType.objects.get(model='course', app_label='courses')
perm = Permission.objects.get(content_type=ct, codename='can_list_passedcourses')
certificator.user_permissions.add(perm)
key = str(uuid.uuid4())
self.generate_apikeyuser(certificator, key)
test_user = self.create_test_user_test()
response = self.client.get('/api/%s/user/3/passedcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
course1 = self.create_test_basic_course(owner=owner,
student=test_user,
name='course1')
self.create_test_basic_course(owner=owner,
student=test_user,
name='course2')
response = self.client.get('/api/%s/user/2/passedcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
response = self.client.get('/api/%s/user/3/passedcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, NO_OBJECTS)
# Create unit in the course, create a simple kq with a video.
course1.threshold = 1
course1.save()
now = datetime.utcnow().replace(tzinfo=utc)
start = now + timedelta(days=1)
deadline = now + timedelta(days=2)
test_unit = self.create_test_basic_unit(course1, 'e', unicode(start.isoformat()), unicode(deadline.isoformat()), 100)
test_kq = self.create_test_basic_kq(unit=test_unit, weight=100)
self.create_activity(user=test_user, kq=test_kq)
response = self.client.get('/api/%s/user/3/passedcourses/%s&key=%s' % (self.api_name, self.format_append, key))
self.assertEqual(response.status_code, 200)
aux_basic_courses = simplejson.loads(BASIC_COURSES)
aux_basic_courses['objects'][0]['name'] = u'course1_course'
aux_basic_courses['objects'][0]['description'] = u'course1_description'
aux_basic_courses['objects'][0]['slug'] = u'course1_course'
aux_basic_courses['objects'][0]['threshold'] = u'1'
self.assertEqual(simplejson.loads(response.content), aux_basic_courses)
|
|
# XML DOM Tk Text Browser Editor
# May 18, 2004
# Alex Caldwell M.D.
# alcald2000@yahoo.com
# derived from xmlbrowser.tcl
# by Richard Suchenwirth http://wiki.tcl.tk/3884
# Comparison:
# Modified to allow real time editing of the XML DOM in memory via events in the Tk text
# widget.
# It seems like the text widget is faster than the BWidget Tree. I personally find
# the table-like layout in the Tk text widget with the color coding is easier to read
# and find your data than the Tree widget.
# Purpose:
# to get some XML data, parse it into an XML DOM tree in memory, and then
# map the DOM structure into an editable display in a Tk text widget. A sort of "Map" is
# generated in the text widget using marks and tags that corresponds to elements and
# attributes in the DOM structure. Event bindings are used so that any changes to the data
# in the text widget are updated in the DOM memory structure in parallel in real time.
# The modified DOM can then be dumped as XML to a file, sent to another part of the
# application, transmitted over the network, etc. It currently dumps the modified DOM
# as XML to the console when any change is made to the data. So start it from a console.
# This is similar to domtext widget in Swish and Waxml by Steve Ball http://waxml.sourceforge.net
# I was using a similar technique with SAX and mapping the XML elements and attributes with
# Tk text widget marks and tags. The problem with the SAX method is that it's sort of one-way
# SAX works good for mapping the XML data to the text widget display, but when you want to
# reverse that, and go back to XML from the Tk text widget display, you have to write a unique
# Tcl script to collect your data and convert it back to XML. So the SAX method is not very
# generalizable to different XML schema. Each change in the XML schema means you have to
# re-write your script.
# With the DOM model, it is more automatic in both directions and more generalizable to different
# XML schema.
# Limitations and Bugs:
# You can only edit the text node data and the value of the attributes, you can't change the
# XML schema itself.
# Don't allow the value of an attribute to become NULL. You can change the value but leave at least
# one character between the quote marks.
# To do:
# wrap it up in a namespace, turn it into a Tcl package and make it behave as a proper Tk widget with
# options etc like the domtext widget.
# Add a "Save" button and a better menubar across the top.
# maybe add some whitespace so variable length text elements will always line up vertically in the
# display which will make it faster to find stuff.
# maybe remove or hide the XML element tags in the display and only show the text node data
# and the attribute values with the element names only across a header at the top.
#BWidget not required since we are going to use a Tk text widget
#package require BWidget
# we do need the tDOM package
package require tdom
proc recurseInsert {w node parent} {
global colorlist color name tag_list
foreach one $tag_list {
global $one
}
set name [$node nodeName]
set done 0
if {$name=="#text" || $name=="#cdata"} {
set text [string map {\n " "} [$node nodeValue]]
} else {
set text <$name
set lineno [lindex [split [$w index current] "."] 0]
$w mark set begin$name$lineno [$w index current]
$w mark gravity begin$name$lineno left
$w insert end $text
set text ""
foreach att [getAttributes $node] {
$w mark set begin$att$lineno [$w index current]
$w mark gravity begin$att$lineno left
catch {set text " $att=\"[$node getAttribute $att]\""
$w insert end $text
}
$w mark set end$att$lineno [$w index current]
$w mark gravity end$att$lineno left
$w tag add $att$lineno "begin$att$lineno + 1 char" end$att$lineno
}
# this tests if the option to insert a newline to format the display in the text
# widget is turned on for this element
if {[set $name] == "1"} {
set text ">\n"
} else {
set text >
}
set children [$node childNodes]
# I think this is a test to see if the child is a text node
if {[llength $children]==1 && [$children nodeName]=="#text"} {
# this tests if the option is turned on for that element to break the data display with
# a newline on that element.
# You can format the display in various ways depending on which elements you choose
# to use newlines on in the Tk text widget.
if {[set $name] == "1"} {
append text "[$children nodeValue]\n</$name>\n"
} else {
append text "[$children nodeValue]</$name>"
}
set done 1
}
}
$w insert end "$text"
$w mark set end$name$lineno [$w index current]
$w mark gravity end$name$lineno left
$w tag add $name$lineno begin$name$lineno end$name$lineno
# update the DOM Tree and dump it as XML to the standard output. In application, you would save it
# or use it somewhere else. Here it's just to monitor the changes to the DOM tree.
$w tag bind $name$lineno <KeyRelease> "
set new_text \[$w get begin$name$lineno end$name$lineno\]
regsub \"<${name}(.*?)>\" \$new_text \{\} new_text
regsub \"</$name>\" \$new_text \{\} new_text
\[$node firstChild\] nodeValue \$new_text
puts \"\[\$root asXML\]\"
"
$w tag configure $name$lineno -background $color($name) -relief raised -borderwidth 1
foreach att [getAttributes $node] {
$w tag configure $att$lineno -relief sunken -background $color($att) -borderwidth 1
$w tag raise $att$lineno
# Update DOM tree in memory and dump as XML to the standard output. In application, you would save it
# or use it somewhere else. Here it's just to monitor the changes to the DOM tree.
$w tag bind $att$lineno <KeyRelease> "
set new_attribute \[$w get \"begin$att$lineno + 1 char\" end$att$lineno\]
regsub -all \{\"\} \$new_attribute \{\} new_attribute
set new_attribute \[split \$new_attribute \"=\"\]
$node setAttribute \[lindex \$new_attribute 0\] \"\[lindex \$new_attribute 1\]\"
puts \"\[\$root asXML\]\"
"
}
if !$done {
foreach child [$node childNodes] {
recurseInsert $w $child $node
}
$w mark set startend[lindex [$node nodeName] 0]$lineno current
$w mark gravity startend[lindex [$node nodeName] 0]$lineno left
if {[set [lindex [$node nodeName] 0]] == "1"} {
$w insert end "\n</[lindex [$node nodeName] 0]>\n"
} else {
$w insert end "</[lindex [$node nodeName] 0]>"
}
$w mark set end[lindex [$node nodeName] 0]$lineno [$w index current]
$w mark gravity end[lindex [$node nodeName] 0]$lineno left
$w tag add [lindex [$node nodeName] 0]$lineno startend[lindex [$node nodeName] 0]$lineno \
end[lindex [$node nodeName] 0]$lineno
$w tag configure [lindex [$node nodeName] 0]$lineno -background $color([lindex [$node nodeName] 0]) \
-relief raised -borderwidth 1
}
}
proc getAttributes node {
if {![catch {$node attributes} res]} {set res}
}
# this is for generating a list of the unique element names and attribute names
# in your XML data. This will be used for mapping a unique color to the text
# corresponding to that data in the Tk text widget. It makes an array called color
# indexed by the element and attribute names, with a value of a color name for the display
proc recurse_names {node} {
global tag_list color colorlist
foreach child [$node childNodes] {
if {![regexp [$child nodeName] $tag_list] && [$child nodeName] != "#text" } {
lappend tag_list [$child nodeName]
set color([$child nodeName]) [lindex $colorlist [llength $tag_list]]
if {[getAttributes $child] != ""} {
set match ""
if {![regexp [getAttributes $child] $tag_list match]} {
lappend tag_list [getAttributes $child]
set color([getAttributes $child]) [lindex $colorlist [llength $tag_list]]
}
if {$match != "" && ![string compare [getAttributes $child] $match]} {
lappend tag_list [getAttributes $child]
set color([getAttributes $child]) [lindex $colorlist [llength $tag_list]]
}
}
recurse_names $child
}
}
}
# Check for an XML file from the command line. If none, present user with a
# tk_getOpenFile dialog.
if {[lindex $argv 0] == ""} {
set fp [open [tk_getOpenFile]]
} else {
set fp [open [file join [lindex $argv 0]]]
}
fconfigure $fp -encoding utf-8
set xml [read $fp]
close $fp
dom parse $xml doc
$doc documentElement root
# BWidget Tree not needed as we are using the Tk text widget
#Tree .t -yscrollcommand ".y set" -xscrollcommand ".x set" -padx 0
menubutton .m -text "Options... Add newline to choice of XML tags to format display" -menu .m.menu -indicatoron true
grid .m -sticky news
menu .m.menu
text .t -yscrollcommand ".y set" -xscrollcommand ".x set" -wrap none
scrollbar .x -ori hori -command ".t xview"
scrollbar .y -ori vert -command ".t yview"
grid .t .y -sticky news
grid .x -sticky news
grid rowconfig . 0 -weight 1
grid columnconfig . 0 -weight 1
# this is a map of the colors to use for the various attributes and elements
# you want to display - needs to be at least as long as the no. of unique elements and attributes
# in your XML data
set colorlist [list white bisque red green lightblue yellow pink #E4D0EC orange #FF3F3F wheat\
peachpuff lightgrey olivedrab2 white ivory bisque pink yellow skyblue3]
# set up a map of colors for each unique element or attribute
set tag_list ""
set color([$root nodeName]) [lindex $colorlist 0]
lappend tag_list "[$root nodeName]"
recurse_names $root
# add menuitems representing the element names to the options menu
foreach one $tag_list {
.m.menu add checkbutton -label $one -variable $one -onvalue 1 -offvalue 0
}
.m.menu add separator
.m.menu add command -label "Save Options" -command {
set f [open xmlbrowser.options w]
foreach one $tag_list {
puts $f "set $one [set $one]"
}
close $f
}
if {[catch {source ./xmlbrowser.options} res]} {
toplevel .res
label .res.label -text "No options file Available...\nSet and save options\nto format display."
grid .res.label
button .res.ok -text "OK" -command {
destroy .res
}
grid .res.ok
}
after 5 recurseInsert .t $root root
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Openstack, LLC
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import urlparse
from django.conf import settings # noqa
from django.contrib.auth import logout # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from keystoneclient.exceptions import ClientException # noqa
from openstack_auth import backend
from horizon import exceptions
from horizon import messages
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
DEFAULT_ROLE = None
# Set up our data structure for managing Identity API versions, and
# add a couple utility methods to it.
class IdentityAPIVersionManager(base.APIVersionManager):
def upgrade_v2_user(self, user):
if getattr(user, "project_id", None) is None:
user.project_id = getattr(user, "tenantId", None)
return user
def get_project_manager(self, *args, **kwargs):
if VERSIONS.active < 3:
manager = keystoneclient(*args, **kwargs).tenants
else:
manager = keystoneclient(*args, **kwargs).projects
return manager
VERSIONS = IdentityAPIVersionManager("identity", preferred_version=3)
# Import from oldest to newest so that "preferred" takes correct precedence.
try:
from keystoneclient.v2_0 import client as keystone_client_v2
VERSIONS.load_supported_version(2.0, {"client": keystone_client_v2})
except ImportError:
pass
try:
from keystoneclient.v3 import client as keystone_client_v3
VERSIONS.load_supported_version(3, {"client": keystone_client_v3})
except ImportError:
pass
class Service(base.APIDictWrapper):
""" Wrapper for a dict based on the service data from keystone. """
_attrs = ['id', 'type', 'name']
def __init__(self, service, region, *args, **kwargs):
super(Service, self).__init__(service, *args, **kwargs)
self.public_url = base.get_url_for_service(service, region,
'publicURL')
self.url = base.get_url_for_service(service, region, 'internalURL')
if self.url:
self.host = urlparse.urlparse(self.url).hostname
else:
self.host = None
self.disabled = None
self.region = region
def __unicode__(self):
if(self.type == "identity"):
return _("%(type)s (%(backend)s backend)") \
% {"type": self.type, "backend": keystone_backend_name()}
else:
return self.type
def __repr__(self):
return "<Service: %s>" % unicode(self)
def _get_endpoint_url(request, endpoint_type, catalog=None):
if getattr(request.user, "service_catalog", None):
url = base.url_for(request,
service_type='identity',
endpoint_type=endpoint_type)
else:
auth_url = getattr(settings, 'OPENSTACK_KEYSTONE_URL')
url = request.session.get('region_endpoint', auth_url)
# TODO(gabriel): When the Service Catalog no longer contains API versions
# in the endpoints this can be removed.
bits = urlparse.urlparse(url)
root = "://".join((bits.scheme, bits.netloc))
url = "%s/v%s" % (root, VERSIONS.active)
return url
def keystoneclient(request, admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
user = request.user
if admin:
if not user.is_superuser:
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'internalURL')
api_version = VERSIONS.get_active_version()
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystoneclient_admin" if admin \
else backend.KEYSTONE_CLIENT_ATTR
if hasattr(request, cache_attr) and (not user.token.id
or getattr(request, cache_attr).auth_token == user.token.id):
LOG.debug("Using cached client for token: %s" % user.token.id)
conn = getattr(request, cache_attr)
else:
endpoint = _get_endpoint_url(request, endpoint_type)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
LOG.debug("Creating a new keystoneclient connection to %s." % endpoint)
remote_addr = request.environ.get('REMOTE_ADDR', '')
conn = api_version['client'].Client(token=user.token.id,
endpoint=endpoint,
original_ip=remote_addr,
insecure=insecure,
auth_url=endpoint,
debug=settings.DEBUG)
setattr(request, cache_attr, conn)
return conn
def domain_create(request, name, description=None, enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.create(name,
description=description,
enabled=enabled)
def domain_get(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.get(domain_id)
def domain_delete(request, domain_id):
manager = keystoneclient(request, admin=True).domains
return manager.delete(domain_id)
def domain_list(request):
manager = keystoneclient(request, admin=True).domains
return manager.list()
def domain_update(request, domain_id, name=None, description=None,
enabled=None):
manager = keystoneclient(request, admin=True).domains
return manager.update(domain_id, name, description, enabled)
def tenant_create(request, name, description=None, enabled=None, domain=None):
manager = VERSIONS.get_project_manager(request, admin=True)
if VERSIONS.active < 3:
return manager.create(name, description, enabled)
else:
return manager.create(name, domain,
description=description,
enabled=enabled)
def get_default_domain(request):
"""
Gets the default domain object to use when creating Identity object.
Returns the domain context if is set, otherwise return the domain
of the logon user.
"""
domain_id = request.session.get("domain_context", None)
domain_name = request.session.get("domain_context_name", None)
# if running in Keystone V3 or later
if VERSIONS.active >= 3 and not domain_id:
# if no domain context set, default to users' domain
domain_id = request.user.user_domain_id
try:
domain = domain_get(request, domain_id)
domain_name = domain.name
except Exception:
LOG.warning("Unable to retrieve Domain: %s" % domain_id)
domain = base.APIDictWrapper({"id": domain_id,
"name": domain_name})
return domain
# TODO(gabriel): Is there ever a valid case for admin to be false here?
# A quick search through the codebase reveals that it's always called with
# admin=true so I suspect we could eliminate it entirely as with the other
# tenant commands.
def tenant_get(request, project, admin=True):
manager = VERSIONS.get_project_manager(request, admin=admin)
return manager.get(project)
def tenant_delete(request, project):
manager = VERSIONS.get_project_manager(request, admin=True)
return manager.delete(project)
def tenant_list(request, paginate=False, marker=None, domain=None, user=None):
manager = VERSIONS.get_project_manager(request, admin=True)
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
limit = None
if paginate:
limit = page_size + 1
has_more_data = False
if VERSIONS.active < 3:
tenants = manager.list(limit, marker)
if paginate and len(tenants) > page_size:
tenants.pop(-1)
has_more_data = True
else:
tenants = manager.list(domain=domain, user=user)
return (tenants, has_more_data)
def tenant_update(request, project, name=None, description=None,
enabled=None, domain=None):
manager = VERSIONS.get_project_manager(request, admin=True)
if VERSIONS.active < 3:
return manager.update(project, name, description, enabled)
else:
return manager.update(project, name=name, description=description,
enabled=enabled, domain=domain)
def user_list(request, project=None, domain=None, group=None):
if VERSIONS.active < 3:
kwargs = {"tenant_id": project}
else:
kwargs = {
"project": project,
"domain": domain,
"group": group
}
users = keystoneclient(request, admin=True).users.list(**kwargs)
return [VERSIONS.upgrade_v2_user(user) for user in users]
def user_create(request, name=None, email=None, password=None, project=None,
enabled=None, domain=None):
manager = keystoneclient(request, admin=True).users
if VERSIONS.active < 3:
user = manager.create(name, password, email, project, enabled)
return VERSIONS.upgrade_v2_user(user)
else:
return manager.create(name, password=password, email=email,
project=project, enabled=enabled, domain=domain)
def user_delete(request, user_id):
return keystoneclient(request, admin=True).users.delete(user_id)
def user_get(request, user_id, admin=True):
user = keystoneclient(request, admin=admin).users.get(user_id)
return VERSIONS.upgrade_v2_user(user)
def user_update(request, user, **data):
manager = keystoneclient(request, admin=True).users
error = None
if not keystone_can_edit_user():
raise ClientException(405, _("Identity service does not allow "
"editing user data."))
# The v2 API updates user model, password and default project separately
if VERSIONS.active < 3:
password = data.pop('password')
project = data.pop('project')
# Update user details
try:
user = manager.update(user, **data)
except Exception:
error = exceptions.handle(request, ignore=True)
# Update default tenant
try:
user_update_tenant(request, user, project)
user.tenantId = project
except Exception:
error = exceptions.handle(request, ignore=True)
# Check for existing roles
# Show a warning if no role exists for the project
user_roles = roles_for_user(request, user, project)
if not user_roles:
messages.warning(request,
_('User %s has no role defined for '
'that project.')
% data.get('name', None))
# If present, update password
# FIXME(gabriel): password change should be its own form + view
if password:
try:
user_update_password(request, user, password)
if user == request.user.id:
logout(request)
except Exception:
error = exceptions.handle(request, ignore=True)
if error is not None:
raise error
# v3 API is so much simpler...
else:
if not data['password']:
data.pop('password')
user = manager.update(user, **data)
return VERSIONS.upgrade_v2_user(user)
def user_update_enabled(request, user, enabled):
manager = keystoneclient(request, admin=True).users
if VERSIONS.active < 3:
return manager.update_enabled(user, enabled)
else:
return manager.update(user, enabled=enabled)
def user_update_password(request, user, password, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_password(user, password)
else:
return manager.update(user, password=password)
def user_update_own_password(request, origpassword, password):
client = keystoneclient(request, admin=False)
if VERSIONS.active < 3:
client.user_id = request.user.id
return client.users.update_own_password(origpassword, password)
else:
return client.users.update(request.user.id, password=password)
def user_update_tenant(request, user, project, admin=True):
manager = keystoneclient(request, admin=admin).users
if VERSIONS.active < 3:
return manager.update_tenant(user, project)
else:
return manager.update(user, project=project)
def group_create(request, domain_id, name, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.create(domain=domain_id,
name=name,
description=description)
def group_get(request, group_id, admin=True):
manager = keystoneclient(request, admin=admin).groups
return manager.get(group_id)
def group_delete(request, group_id):
manager = keystoneclient(request, admin=True).groups
return manager.delete(group_id)
def group_list(request, domain=None, project=None, user=None):
manager = keystoneclient(request, admin=True).groups
groups = manager.list(user=user)
# TODO(dklyle): once keystoneclient supports filtering by
# domain change this to use that cleaner implementation
if domain:
domain_groups = []
for group in groups:
if group.domain_id == domain:
domain_groups.append(group)
groups = domain_groups
if project:
project_groups = []
for group in groups:
roles = roles_for_group(request, group=group.id, project=project)
if roles and len(roles) > 0:
project_groups.append(group)
groups = project_groups
return groups
def group_update(request, group_id, name=None, description=None):
manager = keystoneclient(request, admin=True).groups
return manager.update(group=group_id,
name=name,
description=description)
def add_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.add_to_group(group=group_id, user=user_id)
def remove_group_user(request, group_id, user_id):
manager = keystoneclient(request, admin=True).users
return manager.remove_from_group(group=group_id, user=user_id)
def role_create(request, name):
manager = keystoneclient(request, admin=True).roles
return manager.create(name)
def role_get(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.get(role_id)
def role_update(request, role_id, name=None):
manager = keystoneclient(request, admin=True).roles
return manager.update(role_id, name)
def role_delete(request, role_id):
manager = keystoneclient(request, admin=True).roles
return manager.delete(role_id)
def role_list(request):
""" Returns a global list of available roles. """
return keystoneclient(request, admin=True).roles.list()
def roles_for_user(request, user, project):
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.roles_for_user(user, project)
else:
return manager.list(user=user, project=project)
def add_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
""" Adds a role for a user on a tenant. """
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.add_user_role(user, role, project)
else:
return manager.grant(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user_role(request, project=None, user=None, role=None,
group=None, domain=None):
""" Removes a given single role for a user from a tenant. """
manager = keystoneclient(request, admin=True).roles
if VERSIONS.active < 3:
return manager.remove_user_role(user, role, project)
else:
return manager.revoke(role, user=user, project=project,
group=group, domain=domain)
def remove_tenant_user(request, project=None, user=None, domain=None):
""" Removes all roles from a user on a tenant, removing them from it. """
client = keystoneclient(request, admin=True)
roles = client.roles.roles_for_user(user, project)
for role in roles:
remove_tenant_user_role(request, user=user, role=role.id,
project=project, domain=domain)
def roles_for_group(request, group, domain=None, project=None):
manager = keystoneclient(request, admin=True).roles
return manager.list(group=group, domain=domain, project=project)
def add_group_role(request, role, group, domain=None, project=None):
""" Adds a role for a group on a domain or project ."""
manager = keystoneclient(request, admin=True).roles
return manager.grant(role=role, group=group, domain=domain,
project=project)
def remove_group_role(request, role, group, domain=None, project=None):
""" Removes a given single role for a group from a domain or project. """
manager = keystoneclient(request, admin=True).roles
return manager.revoke(role=role, group=group, project=project,
domain=domain)
def remove_group_roles(request, group, domain=None, project=None):
""" Removes all roles from a group on a domain or project,
removing them from it.
"""
client = keystoneclient(request, admin=True)
roles = client.roles.list(group=group, domain=domain, project=project)
for role in roles:
remove_group_role(request, role=role.id, group=group,
domain=domain, project=project)
def get_default_role(request):
"""
Gets the default role object from Keystone and saves it as a global
since this is configured in settings and should not change from request
to request. Supports lookup by name or id.
"""
global DEFAULT_ROLE
default = getattr(settings, "OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
if default and DEFAULT_ROLE is None:
try:
roles = keystoneclient(request, admin=True).roles.list()
except Exception:
roles = []
exceptions.handle(request)
for role in roles:
if role.id == default or role.name == default:
DEFAULT_ROLE = role
break
return DEFAULT_ROLE
def list_ec2_credentials(request, user_id):
return keystoneclient(request).ec2.list(user_id)
def create_ec2_credentials(request, user_id, tenant_id):
return keystoneclient(request).ec2.create(user_id, tenant_id)
def get_user_ec2_credentials(request, user_id, access_token):
return keystoneclient(request).ec2.get(user_id, access_token)
def keystone_can_edit_domain():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
can_edit_domain = backend_settings.get('can_edit_domain', True)
multi_domain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return can_edit_domain and multi_domain_support
def keystone_can_edit_user():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_user', True)
def keystone_can_edit_project():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_project', True)
def keystone_can_edit_group():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_group', True)
def keystone_can_edit_role():
backend_settings = getattr(settings, "OPENSTACK_KEYSTONE_BACKEND", {})
return backend_settings.get('can_edit_role', True)
def keystone_backend_name():
if hasattr(settings, "OPENSTACK_KEYSTONE_BACKEND"):
return settings.OPENSTACK_KEYSTONE_BACKEND['name']
else:
return 'unknown'
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mixture distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
import tensorflow as tf
distributions_py = tf.contrib.distributions
def _swap_first_last_axes(array):
rank = len(array.shape)
transpose = [rank - 1] + list(range(0, rank - 1))
return array.transpose(transpose)
@contextlib.contextmanager
def _test_capture_mvndiag_sample_outputs():
"""Use monkey-patching to capture the output of an MVNDiag sample_n."""
data_container = []
true_mvndiag_sample = distributions_py.MultivariateNormalDiag.sample_n
def _capturing_mvndiag_sample(self, n, seed=None, name="sample_n"):
samples = true_mvndiag_sample(self, n=n, seed=seed, name=name)
data_container.append(samples)
return samples
distributions_py.MultivariateNormalDiag.sample_n = _capturing_mvndiag_sample
yield data_container
distributions_py.MultivariateNormalDiag.sample_n = true_mvndiag_sample
@contextlib.contextmanager
def _test_capture_normal_sample_outputs():
"""Use monkey-patching to capture the output of an Normal sample_n."""
data_container = []
true_normal_sample = distributions_py.Normal.sample_n
def _capturing_normal_sample(self, n, seed=None, name="sample_n"):
samples = true_normal_sample(self, n=n, seed=seed, name=name)
data_container.append(samples)
return samples
distributions_py.Normal.sample_n = _capturing_normal_sample
yield data_container
distributions_py.Normal.sample_n = true_normal_sample
def make_univariate_mixture(batch_shape, num_components):
logits = tf.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=tf.float32) - 50.
components = [
distributions_py.Normal(
mu=np.float32(np.random.randn(*list(batch_shape))),
sigma=np.float32(10 * np.random.rand(*list(batch_shape))))
for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=tf.int32)
return distributions_py.Mixture(cat, components)
def make_multivariate_mixture(batch_shape, num_components, event_shape):
logits = tf.random_uniform(
list(batch_shape) + [num_components], -1, 1, dtype=tf.float32) - 50.
components = [
distributions_py.MultivariateNormalDiag(
mu=np.float32(np.random.randn(*list(batch_shape + event_shape))),
diag_stdev=np.float32(10 * np.random.rand(
*list(batch_shape + event_shape))))
for _ in range(num_components)
]
cat = distributions_py.Categorical(logits, dtype=tf.int32)
return distributions_py.Mixture(cat, components)
class MixtureTest(tf.test.TestCase):
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_univariate_mixture(batch_shape, num_components=10)
self.assertAllEqual(batch_shape, dist.get_batch_shape())
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual([], dist.get_event_shape())
self.assertAllEqual([], dist.event_shape().eval())
for event_shape in ([1], [2]):
dist = make_multivariate_mixture(
batch_shape, num_components=10, event_shape=event_shape)
self.assertAllEqual(batch_shape, dist.get_batch_shape())
self.assertAllEqual(batch_shape, dist.batch_shape().eval())
self.assertAllEqual(event_shape, dist.get_event_shape())
self.assertAllEqual(event_shape, dist.event_shape().eval())
def testBrokenShapesStatic(self):
with self.assertRaisesWithPredicateMatch(ValueError,
r"cat.num_classes != len"):
distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.5]), # 2 classes
[distributions_py.Normal(mu=1.0, sigma=2.0)])
with self.assertRaisesWithPredicateMatch(
ValueError, r"\(\) and \(2,\) are not compatible"):
# The value error is raised because the batch shapes of the
# Normals are not equal. One is a scalar, the other is a
# vector of size (2,).
distributions_py.Mixture(
distributions_py.Categorical([-0.5, 0.5]), # scalar batch
[distributions_py.Normal(mu=1.0, sigma=2.0), # scalar dist
distributions_py.Normal(mu=[1.0, 1.0], sigma=[2.0, 2.0])])
with self.assertRaisesWithPredicateMatch(ValueError, r"Could not infer"):
cat_logits = tf.placeholder(shape=[1, None], dtype=tf.float32)
distributions_py.Mixture(
distributions_py.Categorical(cat_logits),
[distributions_py.Normal(mu=[1.0], sigma=[2.0])])
def testBrokenShapesDynamic(self):
with self.test_session():
d0_param = tf.placeholder(dtype=tf.float32)
d1_param = tf.placeholder(dtype=tf.float32)
d = distributions_py.Mixture(
distributions_py.Categorical([0.1, 0.2]),
[distributions_py.Normal(mu=d0_param, sigma=d0_param),
distributions_py.Normal(mu=d1_param, sigma=d1_param)],
validate_args=True)
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: [1.0]})
with self.assertRaisesOpError(r"batch shape must match"):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: 1.0})
def testBrokenTypes(self):
with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
distributions_py.Mixture(None, [])
cat = distributions_py.Categorical([0.3, 0.2])
# components must be a list of distributions
with self.assertRaisesWithPredicateMatch(
TypeError, "all .* must be Distribution instances"):
distributions_py.Mixture(cat, [None])
with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
distributions_py.Mixture(
cat,
[distributions_py.Normal(mu=[1.0], sigma=[2.0]),
distributions_py.Normal(mu=[np.float16(1.0)],
sigma=[np.float16(2.0)])])
with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
distributions_py.Mixture(distributions_py.Categorical([0.3, 0.2]), None)
with self.assertRaisesWithPredicateMatch(TypeError,
"either be continuous or not"):
distributions_py.Mixture(
cat,
[distributions_py.Normal(mu=[1.0], sigma=[2.0]),
distributions_py.Bernoulli(dtype=tf.float32, logits=[1.0])])
def testMeanUnivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=2)
mean = dist.mean()
self.assertEqual(batch_shape, mean.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape, mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testMeanMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
mean = dist.mean()
self.assertEqual(batch_shape + (4,), mean.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape + (4,), mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# Add a new innermost dimension for broadcasting to mvn vector shape
cat_probs_value = [np.expand_dims(c_p, -1) for c_p in cat_probs_value]
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testProbScalarUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[], num_components=2)
for x in [np.array(
[1.0, 2.0], dtype=np.float32), np.array(
1.0, dtype=np.float32), np.random.randn(3, 4).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = tf.nn.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
total_prob = sum(
c_p_value * d_p_value
for (c_p_value, d_p_value)
in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbScalarMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[], num_components=2, event_shape=[3])
for x in [np.array(
[[-1.0, 0.0, 1.0], [0.5, 1.0, -0.3]], dtype=np.float32), np.array(
[-1.0, 0.0, 1.0], dtype=np.float32),
np.random.randn(2, 2, 3).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = tf.nn.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2)
for x in [np.random.randn(2, 3).astype(np.float32),
np.random.randn(4, 2, 3).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(
c_p_value * d_p_value
for (c_p_value, d_p_value)
in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=2, event_shape=[4])
for x in [np.random.randn(2, 3, 4).astype(np.float32),
np.random.randn(4, 2, 3, 4).astype(np.float32)]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(
c_p_value * d_p_value for (c_p_value, d_p_value)
in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testSampleScalarBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((4,), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4,), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch univariate case: batch_size == 1, rank 1
which_dist_samples = dist_sample_values[c][:size_c]
self.assertAllClose(which_dist_samples, sample_values[which_c])
def testSampleScalarBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[], num_components=num_components, event_shape=[2])
n = 4
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((4, 2), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch multivariate case: batch_size == 1, rank 2
which_dist_samples = dist_sample_values[c][:size_c, :]
self.assertAllClose(which_dist_samples, sample_values[which_c, :])
def testSampleBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[2, 3], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((4, 2, 3), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2, 3), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 3
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1])
def testSampleBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=num_components, event_shape=[4])
n = 5
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample_n(n, seed=123)
self.assertEqual(samples.dtype, tf.float32)
self.assertEqual((5, 2, 3, 4), samples.get_shape())
cat_samples = dist.cat.sample_n(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((5, 2, 3, 4), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 4 (multivariate)
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1, :]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1, :])
def testEntropyLowerBoundMultivariate(self):
with self.test_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,))
entropy_lower_bound = dist.entropy_lower_bound()
self.assertEqual(batch_shape, entropy_lower_bound.get_shape())
cat_probs = tf.nn.softmax(dist.cat.logits)
dist_entropy = [d.entropy() for d in dist.components]
entropy_lower_bound_value, cat_probs_value, dist_entropy_value = (
sess.run([entropy_lower_bound, cat_probs, dist_entropy]))
self.assertEqual(batch_shape, entropy_lower_bound_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# entropy_lower_bound = sum_i pi_i entropy_i
# for i in num_components, batchwise.
true_entropy_lower_bound = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_entropy_value)])
self.assertAllClose(true_entropy_lower_bound, entropy_lower_bound_value)
class MixtureBenchmark(tf.test.Benchmark):
def _runSamplingBenchmark(self, name,
create_distribution, use_gpu, num_components,
batch_size, num_features, sample_size):
config = tf.ConfigProto()
config.allow_soft_placement = True
np.random.seed(127)
with tf.Session(config=config, graph=tf.Graph()) as sess:
tf.set_random_seed(0)
with tf.device("/gpu:0" if use_gpu else "/cpu:0"):
mixture = create_distribution(
num_components=num_components,
batch_size=batch_size,
num_features=num_features)
sample_op = mixture.sample(sample_size).op
sess.run(tf.initialize_all_variables())
reported = self.run_op_benchmark(
sess, sample_op,
min_iters=10,
name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d"
% (name, use_gpu, num_components,
batch_size, num_features, sample_size)))
print("\t".join(["%s", "%d", "%d", "%d", "%d", "%g"])
% (use_gpu, num_components, batch_size,
num_features, sample_size, reported["wall_time"]))
def benchmarkSamplingMVNDiag(self):
print("mvn_diag\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
tf.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)]
sigmas = [
tf.Variable(np.random.rand(batch_size, num_features))
for _ in range(num_components)]
components = list(
distributions_py.MultivariateNormalDiag(mu=mu, diag_stdev=sigma)
for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not tf.test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_diag", create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
def benchmarkSamplingMVNFull(self):
print("mvn_full\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def psd(x):
"""Construct batch-wise PSD matrices."""
return np.stack([np.dot(np.transpose(z), z) for z in x])
def create_distribution(batch_size, num_components, num_features):
cat = distributions_py.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
tf.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)]
sigmas = [
tf.Variable(
psd(np.random.rand(batch_size, num_features, num_features)))
for _ in range(num_components)]
components = list(
distributions_py.MultivariateNormalFull(mu=mu, sigma=sigma)
for (mu, sigma) in zip(mus, sigmas))
return distributions_py.Mixture(cat, components)
for use_gpu in False, True:
if use_gpu and not tf.test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_full", create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
if __name__ == "__main__":
tf.test.main()
|
|
"""Parameter manipulation utilities."""
from collections.abc import Iterable, Mapping
from urllib.parse import quote
import datetime
import json
# Removed top-level import to correct circular imports
# (we're in backport territory, these things happen)
# from mws.mws import MWSError
def enumerate_param(param, values):
"""Builds a dictionary of an enumerated parameter, using the param string and some values.
If values is not a list, tuple, or set, it will be coerced to a list
with a single item.
Example:
enumerate_param('MarketplaceIdList.Id', (123, 345, 4343))
Returns:
{
MarketplaceIdList.Id.1: 123,
MarketplaceIdList.Id.2: 345,
MarketplaceIdList.Id.3: 4343
}
"""
if not isinstance(values, (list, tuple, set)):
# Coerces a single value to a list before continuing.
values = [values]
if not any(values):
# if not values -> returns ValueError
return {}
param = dot_appended_param(param)
# Return final output: dict comprehension of the enumerated param and values.
return {"{}{}".format(param, idx): val for idx, val in enumerate(values, start=1)}
def enumerate_params(params=None):
"""For each param and values, runs enumerate_param,
returning a flat dict of all results
"""
if params is None or not isinstance(params, dict):
return {}
params_output = {}
for param, values in params.items():
params_output.update(enumerate_param(param, values))
return params_output
def enumerate_keyed_param(param, values):
"""Given a param string and a list of values dicts, returns a flat dict of keyed, enumerated params.
Each dict in the values list must pertain to a single item and its data points.
Example:
param = "InboundShipmentPlanRequestItems.member"
values = [
{'SellerSKU': 'Football2415',
'Quantity': 3},
{'SellerSKU': 'TeeballBall3251',
'Quantity': 5},
...
]
Returns:
{
'InboundShipmentPlanRequestItems.member.1.SellerSKU': 'Football2415',
'InboundShipmentPlanRequestItems.member.1.Quantity': 3,
'InboundShipmentPlanRequestItems.member.2.SellerSKU': 'TeeballBall3251',
'InboundShipmentPlanRequestItems.member.2.Quantity': 5,
...
}
"""
if not isinstance(values, (list, tuple, set)):
# If it's a single value, convert it to a list first
values = [values]
if not any(values):
# Shortcut for empty values
return {}
param = dot_appended_param(param)
for val in values:
# Every value in the list must be a dict.
if not isinstance(val, dict):
# Value is not a dict: can't work on it here.
raise ValueError(
(
"Non-dict value detected. "
"`values` must be a list, tuple, or set; containing only dicts."
)
)
params = {}
for idx, val_dict in enumerate(values, start=1):
# Build the final output.
params.update(
{
"{param}{idx}.{key}".format(param=param, idx=idx, key=k): v
for k, v in val_dict.items()
}
)
return params
def dict_keyed_param(param, dict_from):
"""Given a param string and a dict, returns a flat dict of keyed params without enumerate.
Example:
param = "ShipmentRequestDetails.PackageDimensions"
dict_from = {'Length': 5, 'Width': 5, 'Height': 5, 'Unit': 'inches'}
Returns:
{
'ShipmentRequestDetails.PackageDimensions.Length': 5,
'ShipmentRequestDetails.PackageDimensions.Width': 5,
'ShipmentRequestDetails.PackageDimensions.Height': 5,
'ShipmentRequestDetails.PackageDimensions.Unit': 'inches',
...
}
"""
params = {}
param = dot_appended_param(param)
for k, v in dict_from.items():
params.update({"{param}{key}".format(param=param, key=k): v})
return params
def flat_param_dict(value, prefix=""):
"""Returns a flattened params dictionary by collapsing nested dicts and
non-string iterables.
Any arbitrarily-nested dict or iterable will be expanded and flattened.
- Each key in a child dict will be concatenated to its parent key.
- Elements of a non-string iterable will be enumerated using a 1-based index,
with the index number concatenated to the parent key.
- In both cases, keys and sub-keys are joined by ``.``.
If ``prefix`` is set, all keys in the resulting output will begin with
``prefix + '.'``.
"""
prefix = "" if not prefix else str(prefix)
# Prefix is now either an empty string or a valid prefix string ending in '.'
# NOTE should ensure that a `None` value is changed to empty string, as well.
if isinstance(value, str) or not isinstance(value, (Mapping, Iterable)):
# Value is not one of the types we want to expand.
if prefix:
# Can return a single dict of the prefix and value as a base case
prefix = dot_appended_param(prefix, reverse=True)
return {prefix: value}
raise ValueError(
(
"Non-dict, non-iterable value requires a prefix "
"(would return a mapping of `prefix: value`)"
)
)
# Past here, the value is something that must be expanded.
# We'll build that output with recursive calls to `flat_param_dict`.
if prefix:
prefix = dot_appended_param(prefix)
output = {}
if isinstance(value, Mapping):
for key, val in value.items():
new_key = "{}{}".format(prefix, key)
output.update(flat_param_dict(val, prefix=new_key))
else:
# value must be an Iterable
for idx, val in enumerate(value, start=1):
new_key = "{}{}".format(prefix, idx)
output.update(flat_param_dict(val, prefix=new_key))
return output
def dot_appended_param(param_key, reverse=False):
"""Returns ``param_key`` string, ensuring that it ends with ``'.'``.
Set ``reverse`` to ``True`` (default ``False``) to reverse this behavior,
ensuring that ``param_key`` *does not* end with ``'.'``.
"""
if not param_key.endswith("."):
# Ensure this enumerated param ends in '.'
param_key += "."
if reverse:
# Since param_key is guaranteed to end with '.' by this point,
# if `reverse` flag was set, now we just get rid of it.
param_key = param_key[:-1]
return param_key
BOOL_FALSE_STRINGS = ("no", "n", "none", "off", "false", "0")
def coerce_to_bool(val):
"""Coerces ``val`` to a boolean for use in MWS requests.
If ``val`` is a string, converts certain (case-insensitive) string values
to "False", such as:
- "no"
- "n"
- "none"
- "off"
- "false"
- "0"
Otherwise, ``val`` is simply cast using built-in ``bool()`` function.
"""
if isinstance(val, str) and val.lower() in BOOL_FALSE_STRINGS:
return False
return bool(val)
def remove_empty_param_keys(params):
"""Returns a copy of ``params`` dict where any key with a value of ``None``
or ``""`` (empty string) are removed.
"""
return {k: v for k, v in params.items() if v is not None and v != ""}
def clean_params_dict(params):
"""Clean multiple param values in a dict, returning a new dict
containing the original keys and cleaned values.
"""
cleaned_params = dict()
for key, val in params.items():
try:
cleaned_params[key] = clean_value(val)
except ValueError as exc:
from mws.mws import MWSError
raise MWSError(str(exc)) from exc
return cleaned_params
def clean_value(val):
"""Attempts to clean a value so that it can be sent in a request."""
if isinstance(val, (dict, list, set, tuple)):
raise ValueError("Cannot clean parameter value of type %s" % str(type(val)))
if isinstance(val, (datetime.datetime, datetime.date)):
return clean_date(val)
if isinstance(val, bool):
return clean_bool(val)
# For all else, assume a string, and clean that.
return clean_string(str(val))
def clean_string(val):
"""Passes a string value through `urllib.parse.quote` to clean it.
Safe characters permitted: -_.~
"""
return quote(val, safe="-_.~")
def clean_bool(val):
"""Converts a boolean value to its JSON string equivalent."""
if val is not True and val is not False:
raise ValueError("Expected a boolean, got %s" % val)
return json.dumps(val)
def clean_date(val):
"""Converts a datetime.datetime or datetime.date to ISO 8601 string.
Further passes that string through `urllib.parse.quote`.
"""
return clean_string(val.isoformat())
|
|
"""Test requirements module."""
import os
from pathlib import Path
from unittest.mock import patch, call
from pytest import raises
from homeassistant import setup
from homeassistant.requirements import (
CONSTRAINT_FILE,
async_get_integration_with_requirements,
async_process_requirements,
PROGRESS_FILE,
_install,
RequirementsNotFound,
)
from tests.common import get_test_home_assistant, MockModule, mock_integration
def env_without_wheel_links():
"""Return env without wheel links."""
env = dict(os.environ)
env.pop("WHEEL_LINKS", None)
return env
class TestRequirements:
"""Test the requirements module."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
@patch("os.path.dirname")
@patch("homeassistant.util.package.is_virtual_env", return_value=True)
@patch("homeassistant.util.package.is_docker_env", return_value=False)
@patch("homeassistant.util.package.install_package", return_value=True)
@patch.dict(os.environ, env_without_wheel_links(), clear=True)
def test_requirement_installed_in_venv(
self, mock_install, mock_denv, mock_venv, mock_dirname
):
"""Test requirement installed in virtual environment."""
mock_dirname.return_value = "ha_package_path"
self.hass.config.skip_pip = False
mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"]))
assert setup.setup_component(self.hass, "comp", {})
assert "comp" in self.hass.config.components
assert mock_install.call_args == call(
"package==0.0.1",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=False,
)
@patch("os.path.dirname")
@patch("homeassistant.util.package.is_virtual_env", return_value=False)
@patch("homeassistant.util.package.is_docker_env", return_value=False)
@patch("homeassistant.util.package.install_package", return_value=True)
@patch.dict(os.environ, env_without_wheel_links(), clear=True)
def test_requirement_installed_in_deps(
self, mock_install, mock_denv, mock_venv, mock_dirname
):
"""Test requirement installed in deps directory."""
mock_dirname.return_value = "ha_package_path"
self.hass.config.skip_pip = False
mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"]))
assert setup.setup_component(self.hass, "comp", {})
assert "comp" in self.hass.config.components
assert mock_install.call_args == call(
"package==0.0.1",
target=self.hass.config.path("deps"),
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=False,
)
async def test_install_existing_package(hass):
"""Test an install attempt on an existing package."""
with patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_inst:
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 1
with patch("homeassistant.util.package.is_installed", return_value=True), patch(
"homeassistant.util.package.install_package"
) as mock_inst:
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 0
async def test_install_missing_package(hass):
"""Test an install attempt on an existing package."""
with patch(
"homeassistant.util.package.install_package", return_value=False
) as mock_inst:
with raises(RequirementsNotFound):
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 1
async def test_get_integration_with_requirements(hass):
"""Check getting an integration with loaded requirements."""
hass.config.skip_pip = False
mock_integration(
hass, MockModule("test_component_dep", requirements=["test-comp-dep==1.0.0"])
)
mock_integration(
hass,
MockModule(
"test_component_after_dep", requirements=["test-comp-after-dep==1.0.0"]
),
)
mock_integration(
hass,
MockModule(
"test_component",
requirements=["test-comp==1.0.0"],
dependencies=["test_component_dep"],
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
)
with patch(
"homeassistant.util.package.is_installed", return_value=False
) as mock_is_installed, patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_inst:
integration = await async_get_integration_with_requirements(
hass, "test_component"
)
assert integration
assert integration.domain == "test_component"
assert len(mock_is_installed.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_is_installed.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
assert len(mock_inst.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_inst.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
async def test_install_with_wheels_index(hass):
"""Test an install attempt with wheels index URL."""
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["hello==1.0.0"]))
with patch("homeassistant.util.package.is_installed", return_value=False), patch(
"homeassistant.util.package.is_docker_env", return_value=True
), patch("homeassistant.util.package.install_package") as mock_inst, patch.dict(
os.environ, {"WHEELS_LINKS": "https://wheels.hass.io/test"}
), patch(
"os.path.dirname"
) as mock_dir:
mock_dir.return_value = "ha_package_path"
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_inst.call_args == call(
"hello==1.0.0",
find_links="https://wheels.hass.io/test",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=True,
)
async def test_install_on_docker(hass):
"""Test an install attempt on an docker system env."""
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["hello==1.0.0"]))
with patch("homeassistant.util.package.is_installed", return_value=False), patch(
"homeassistant.util.package.is_docker_env", return_value=True
), patch("homeassistant.util.package.install_package") as mock_inst, patch(
"os.path.dirname"
) as mock_dir, patch.dict(
os.environ, env_without_wheel_links(), clear=True
):
mock_dir.return_value = "ha_package_path"
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_inst.call_args == call(
"hello==1.0.0",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=True,
)
async def test_progress_lock(hass):
"""Test an install attempt on an existing package."""
progress_path = Path(hass.config.path(PROGRESS_FILE))
kwargs = {"hello": "world"}
def assert_env(req, **passed_kwargs):
"""Assert the env."""
assert progress_path.exists()
assert req == "hello"
assert passed_kwargs == kwargs
return True
with patch("homeassistant.util.package.install_package", side_effect=assert_env):
_install(hass, "hello", kwargs)
assert not progress_path.exists()
|
|
import os
import sqlite3
import pandas as pd
import datetime
import numpy as np
import wget
def updateModisDB(filenames, cacheDir):
if len(filenames) > 0:
db_fn = os.path.join(cacheDir, "modis_db.db")
fn = filenames[0].split(os.sep)[-1]
product = fn.split('.')[0]
years = []
doys = []
tiles = []
fns = []
for filename in filenames:
fn = filename.split(os.sep)[-1]
fns.append(filename)
years.append(fn.split('.')[1][1:5])
doys.append(fn.split('.')[1][5:9])
tiles.append(fn.split('.')[2])
if not os.path.exists(db_fn):
conn = sqlite3.connect(db_fn)
modis_dict = {"TILE": tiles, "YEAR": years, "DOY": doys, "filename": fns}
modis_df = pd.DataFrame.from_dict(modis_dict)
modis_df.to_sql("%s" % product, conn, if_exists="replace", index=False)
conn.close()
else:
conn = sqlite3.connect(db_fn)
orig_df = pd.read_sql_query("SELECT * from %s" % product, conn)
modis_dict = {"TILE": tiles, "YEAR": years, "DOY": doys, "filename": fns}
modis_df = pd.DataFrame.from_dict(modis_dict)
orig_df = orig_df.append(modis_df, ignore_index=True)
orig_df = orig_df.drop_duplicates(keep='last')
orig_df.to_sql("%s" % product, conn, if_exists="replace", index=False)
conn.close()
def searchModisDB(tiles, start_date, end_date, product, cacheDir):
db_fn = os.path.join(cacheDir, "modis_db.db")
conn = sqlite3.connect(db_fn)
startdd = datetime.datetime.strptime(start_date, '%Y-%m-%d')
enddd = datetime.datetime.strptime(end_date, '%Y-%m-%d')
numDays = (enddd - startdd).days
laidates = np.array(range(1, 366, 4))
df1 = pd.DataFrame.from_dict({"TILE": [], "YEAR": [], "DOY": [], "filename": []})
df2 = pd.DataFrame.from_dict({"TILE": [], "YEAR": [], "DOY": []})
if isinstance(tiles, basestring):
tiles = [tiles]
for tile in tiles:
for i in range(numDays + 1):
dd = startdd + datetime.timedelta(days=i)
year = dd.year
doy = (dd - datetime.datetime(year, 1, 1, 0, 0)).days + 1
rday = laidates[laidates >= doy][0]
if (doy == rday):
dd = datetime.datetime(year, 1, 1, 0, 0) + datetime.timedelta(days=rday - 1)
year = dd.year
df = pd.read_sql_query("SELECT * from %s WHERE (TILE = '%s')"
"AND (YEAR = '%d') AND (DOY = '%03d' )" %
(product, tile, year, rday), conn)
df1 = df1.append(df, ignore_index=True)
df1 = df1[["DOY", "TILE", "YEAR"]]
row = pd.Series({"TILE": "%s" % tile, "YEAR": "%d" % year, "DOY": "%03d" % rday})
df2 = df2.append(row, ignore_index=True)
merged = df2.merge(df1, indicator=True, how='outer')
df3 = merged[merged['_merge'] != 'both']
out_df = df3[["DOY", "TILE", "YEAR"]]
conn.close()
return out_df
def search(lat, lon, start_date, end_date, cloud, available, cacheDir, sat):
""" Search the USGS Landsat database """
end = datetime.strptime(end_date, '%Y-%m-%d')
# this is a landsat-util work around when it fails
if sat == 7:
metadataUrl = 'https://landsat.usgs.gov/landsat/metadata_service/bulk_metadata_files/LANDSAT_ETM_C1.csv'
else:
metadataUrl = 'https://landsat.usgs.gov/landsat/metadata_service/bulk_metadata_files/LANDSAT_8_C1.csv'
fn = os.path.join(cacheDir, metadataUrl.split(os.sep)[-1])
# looking to see if metadata CSV is available and if its up to the date needed
if os.path.exists(fn):
d = datetime.fromtimestamp(os.path.getmtime(fn))
db_name = os.path.join(cacheDir, fn.split(os.sep)[-1][:-4] + '.db')
if not os.path.exists(db_name):
orig_df = pd.read_csv(fn)
orig_df['sr'] = pd.Series(np.tile('N', len(orig_df)))
orig_df['bt'] = pd.Series(np.tile('N', len(orig_df)))
orig_df['local_file_path'] = ''
conn = sqlite3.connect(db_name)
orig_df.to_sql("raw_data", conn, if_exists="replace", index=False)
conn.close()
# orig_df = pd.read_sql_query("SELECT * from raw_data",conn)
if ((end.year > d.year) and (end.month > d.month) and (end.day > d.day)):
wget.download(metadataUrl, out=fn)
metadata = pd.read_csv(fn)
metadata['sr'] = pd.Series(np.tile('N', len(metadata)))
metadata['bt'] = pd.Series(np.tile('N', len(metadata)))
orig_df = pd.read_sql_query("SELECT * from raw_data", conn)
orig_df = orig_df.append(metadata, ignore_index=True)
orig_df = orig_df.drop_duplicates(subset='sceneID', keep='first')
orig_df.to_sql("raw_data", conn, if_exists="replace", index=False)
else:
wget.download(metadataUrl, out=fn)
db_name = os.path.join(cacheDir, fn.split(os.sep)[-1][:-4] + '.db')
conn = sqlite3.connect(db_name)
metadata = pd.read_csv(fn)
metadata['sr'] = pd.Series(np.tile('N', len(metadata)))
metadata['bt'] = pd.Series(np.tile('N', len(metadata)))
metadata['local_file_path'] = ''
metadata.to_sql("raw_data", conn, if_exists="replace", index=False)
conn.close()
conn = sqlite3.connect(db_name)
if sat == 8:
output = pd.read_sql_query("SELECT * from raw_data WHERE (acquisitionDate >= '%s')"
"AND (acquisitionDate < '%s') AND (upperLeftCornerLatitude > %f )"
"AND (upperLeftCornerLongitude < %f ) AND "
"(lowerRightCornerLatitude < %f) AND "
"(lowerRightCornerLongitude > %f) AND "
"(cloudCoverFull <= %d) AND (sr = '%s') AND "
"(sensor = 'OLI_TIRS')" %
(start_date, end_date, lat, lon, lat, lon, cloud, available), conn)
else:
output = pd.read_sql_query("SELECT * from raw_data WHERE (acquisitionDate >= '%s')"
"AND (acquisitionDate < '%s') AND (upperLeftCornerLatitude > %f )"
"AND (upperLeftCornerLongitude < %f ) AND "
"(lowerRightCornerLatitude < %f) AND "
"(lowerRightCornerLongitude > %f) AND "
"(cloudCoverFull <= %d) AND (sr = '%s')" %
(start_date, end_date, lat, lon, lat, lon, cloud, available), conn)
conn.close()
return output
def searchProduct(productID,db_path,sat):
""" search Landsat database by ProductID """
if sat==7:
metadataUrl = 'https://landsat.usgs.gov/landsat/metadata_service/bulk_metadata_files/LANDSAT_ETM_C1.csv'
db_name = os.path.join(db_path,'LANDSAT_ETM_C1.db')
else:
metadataUrl = 'https://landsat.usgs.gov/landsat/metadata_service/bulk_metadata_files/LANDSAT_8_C1.csv'
db_name = os.path.join(db_path,'LANDSAT_8_C1.db')
fn = os.path.join(db_path,metadataUrl.split(os.sep)[-1])
if not os.path.exists(db_name):
if not os.path.exists(fn):
wget.download(metadataUrl,out=fn)
conn = sqlite3.connect( db_name )
orig_df= pd.read_csv(fn)
orig_df['sr'] = pd.Series(np.tile('N',len(orig_df)))
orig_df['bt'] = pd.Series(np.tile('N',len(orig_df)))
orig_df['local_file_path'] = ''
orig_df.to_sql("raw_data", conn, if_exists="replace", index=False)
conn.close()
conn = sqlite3.connect( db_name )
output = pd.read_sql_query("SELECT * from raw_data WHERE (LANDSAT_PRODUCT_ID == '%s')" % productID,conn)
conn.close()
return output
def updateLandsatProductsDB(landsatDB, filenames, cacheDir, product):
db_fn = os.path.join(cacheDir, "landsat_products.db")
date = landsatDB.acquisitionDate
ullat = landsatDB.upperLeftCornerLatitude
ullon = landsatDB.upperLeftCornerLongitude
lllat = landsatDB.lowerRightCornerLatitude
lllon = landsatDB.lowerRightCornerLongitude
productIDs = landsatDB.LANDSAT_PRODUCT_ID
if not os.path.exists(db_fn):
conn = sqlite3.connect(db_fn)
landsat_dict = {"acquisitionDate": date, "upperLeftCornerLatitude": ullat,
"upperLeftCornerLongitude": ullon,
"lowerRightCornerLatitude": lllat,
"lowerRightCornerLongitude": lllon,
"LANDSAT_PRODUCT_ID": productIDs, "filename": filenames}
landsat_df = pd.DataFrame.from_dict(landsat_dict)
landsat_df.to_sql("%s" % product, conn, if_exists="replace", index=False)
conn.close()
else:
conn = sqlite3.connect(db_fn)
res = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = res.fetchall()[0]
if (product in tables):
orig_df = pd.read_sql_query("SELECT * from %s" % product, conn)
else:
orig_df = pd.DataFrame()
landsat_dict = {"acquisitionDate": date, "upperLeftCornerLatitude": ullat,
"upperLeftCornerLongitude": ullon,
"lowerRightCornerLatitude": lllat,
"lowerRightCornerLongitude": lllon,
"LANDSAT_PRODUCT_ID": productIDs, "filename": filenames}
landsat_df = pd.DataFrame.from_dict(landsat_dict)
orig_df = orig_df.append(landsat_df, ignore_index=True)
orig_df = orig_df.drop_duplicates(keep='last')
orig_df.to_sql("%s" % product, conn, if_exists="replace", index=False)
conn.close()
def searchLandsatProductsDB(lat, lon, start_date, end_date, product, cacheDir):
db_fn = os.path.join(cacheDir, "landsat_products.db")
conn = sqlite3.connect(db_fn)
out_df = pd.read_sql_query("SELECT * from %s WHERE (acquisitionDate >= '%s')"
"AND (acquisitionDate < '%s') AND (upperLeftCornerLatitude > %f )"
"AND (upperLeftCornerLongitude < %f ) AND "
"(lowerRightCornerLatitude < %f) AND "
"(lowerRightCornerLongitude > %f)" %
(product, start_date, end_date, lat, lon, lat, lon), conn)
conn.close()
return out_df
|
|
"""
Created on 26 Sep 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
example JSON:
4-Way AFE:
{"serial_number":"250011","type":"810-00000","pt1000_v20":2.0,"calibrated_on":null,"dispatched_on":null,
"sn1":{"serial_number":"123456789","sensor_type":"IRMA1","ae_total_zero_mv":"287.0","we_total_zero_mv":"284.0", ...},
"sn2":{"serial_number":"123456789","sensor_type":"IRMA1","ae_total_zero_mv":"287.0","we_total_zero_mv":"284.0", ...},
"sn3":{"serial_number":"123456789","sensor_type":"IRMA1","ae_total_zero_mv":"280.0","we_total_zero_mv":"284.0", ...},
"sn4":{"serial_number":"123456789","sensor_type":"IRMA1","ae_total_zero_mv":"313.0","we_total_zero_mv":"305.0", ...}}
"""
import json
from collections import OrderedDict
from scs_core.data.datetime import LocalizedDatetime
from scs_core.data.datum import Datum
from scs_core.data.json import PersistentJSONable
from scs_core.data.str import Str
from scs_core.data.timedelta import Timedelta
from scs_core.client.http_client import HTTPClient
from scs_core.gas.afe.pt1000_calib import Pt1000Calib
from scs_core.gas.sensor import Sensor
from scs_core.gas.sensor_calib import SensorCalib
# --------------------------------------------------------------------------------------------------------------------
class CalibCurrency(object):
"""
classdocs
"""
__TIME_OFFSET = Timedelta(hours=12)
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def age(cls, calibrated_on, rec):
calibrated = LocalizedDatetime.construct_from_date(calibrated_on)
calibrated_noon = calibrated + cls.__TIME_OFFSET
age = rec - calibrated_noon
return int(age.total_seconds())
# --------------------------------------------------------------------------------------------------------------------
class AFECalib(PersistentJSONable):
"""
classdocs
"""
ALPHASENSE_HOST = "www.alphasense-technology.co.uk"
ALPHASENSE_PATH = "/api/v1/boards/"
ALPHASENSE_HEADER = {"Accept": "application/json"}
TEST_LOAD = '''
{"serial_number": "1", "type": "test-load", "calibrated_on": "2020-01-01", "dispatched_on": null,
"pt1000_v20": 1.0,
"sn1": {"serial_number": "01", "sensor_type": "SN1", "we_electronic_zero_mv": 1,
"we_sensor_zero_mv": 1, "we_total_zero_mv": 1, "ae_electronic_zero_mv": 1, "ae_sensor_zero_mv": 1,
"ae_total_zero_mv": 1, "we_sensitivity_na_ppb": 1.0, "we_cross_sensitivity_no2_na_ppb": "n/a",
"pcb_gain": 1.0, "we_sensitivity_mv_ppb": 1.0, "we_cross_sensitivity_no2_mv_ppb": "n/a"},
"sn2": {"serial_number": "02", "sensor_type": "SN2", "we_electronic_zero_mv": 1,
"we_sensor_zero_mv": 1, "we_total_zero_mv": 1, "ae_electronic_zero_mv": 1, "ae_sensor_zero_mv": 1,
"ae_total_zero_mv": 1, "we_sensitivity_na_ppb": 1.0, "we_cross_sensitivity_no2_na_ppb": "n/a",
"pcb_gain": 1.0, "we_sensitivity_mv_ppb": 1.0, "we_cross_sensitivity_no2_mv_ppb": "n/a"},
"sn3": {"serial_number": "03", "sensor_type": "SN3", "we_electronic_zero_mv": 1,
"we_sensor_zero_mv": 1, "we_total_zero_mv": 1, "ae_electronic_zero_mv": 1, "ae_sensor_zero_mv": 1,
"ae_total_zero_mv": 1, "we_sensitivity_na_ppb": 1.0, "we_cross_sensitivity_no2_na_ppb": "n/a",
"pcb_gain": 1.0, "we_sensitivity_mv_ppb": 1.0, "we_cross_sensitivity_no2_mv_ppb": "n/a"},
"sn4": {"serial_number": "04", "sensor_type": "SN4", "we_electronic_zero_mv": 1,
"we_sensor_zero_mv": 1, "we_total_zero_mv": 1, "ae_electronic_zero_mv": 1, "ae_sensor_zero_mv": 1,
"ae_total_zero_mv": 1, "we_sensitivity_na_ppb": 1.0, "we_cross_sensitivity_no2_na_ppb": "n/a",
"pcb_gain": 1.0, "we_sensitivity_mv_ppb": 1.0, "we_cross_sensitivity_no2_mv_ppb": "n/a"}}
'''
# ----------------------------------------------------------------------------------------------------------------
__FILENAME = "afe_calib.json"
@classmethod
def persistence_location(cls):
return cls.conf_dir(), cls.__FILENAME
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def download(cls, serial_number):
http_client = HTTPClient()
http_client.connect(AFECalib.ALPHASENSE_HOST)
try:
path = AFECalib.ALPHASENSE_PATH + serial_number
jdict = json.loads(http_client.get(path, None, AFECalib.ALPHASENSE_HEADER))
return cls.construct_from_jdict(jdict)
finally:
http_client.close()
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict, skeleton=False):
if not jdict:
return None
serial_number = jdict.get('serial_number')
afe_type = jdict.get('type')
calibrated_on = Datum.date(jdict.get('calibrated_on'))
dispatched_on = Datum.date(jdict.get('dispatched_on'))
pt1000_v20 = jdict.get('pt1000_v20')
pt1000_calib = None if pt1000_v20 is None else Pt1000Calib(calibrated_on, pt1000_v20)
sensor_calibs = []
for key in sorted(jdict.keys()):
if key[:2] == "sn":
if jdict[key] is None:
sensor_calibs.append(None)
continue
sensor_calibs.append(SensorCalib.construct_from_jdict(jdict[key]))
return cls(serial_number, afe_type, calibrated_on, dispatched_on, pt1000_calib, sensor_calibs)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, serial_number, afe_type, calibrated_on, dispatched_on, pt1000_calib, sensor_calibs):
"""
Constructor
"""
super().__init__()
self.__serial_number = serial_number # string
self.__afe_type = afe_type # string
self.__calibrated_on = calibrated_on # date
self.__dispatched_on = dispatched_on # date
self.__pt1000_calib = pt1000_calib # Pt1000Calib
self.__sensor_calibs = sensor_calibs # array of SensorCalib
def __eq__(self, other):
try:
if len(self) != len(other):
return False
for i in range(len(self)):
if self.sensor_calib(i) != other.sensor_calib(i):
return False
return self.serial_number == other.serial_number and self.afe_type == other.afe_type and \
self.calibrated_on == other.calibrated_on and self.dispatched_on == other.dispatched_on and \
self.pt1000_calib == other.pt1000_calib
except (TypeError, AttributeError):
return False
def __len__(self):
return len(self.__sensor_calibs)
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['serial_number'] = self.serial_number
jdict['type'] = self.afe_type
jdict['calibrated_on'] = self.calibrated_on.isoformat() if self.calibrated_on else None
jdict['dispatched_on'] = self.dispatched_on.isoformat() if self.dispatched_on else None
jdict['pt1000_v20'] = self.pt1000_calib.v20 if self.pt1000_calib else None
for i in range(len(self.__sensor_calibs)):
jdict['sn' + str(i + 1)] = self.__sensor_calibs[i]
return jdict
# ----------------------------------------------------------------------------------------------------------------
def sensors(self, afe_baseline):
sensors = []
for i in range(len(self)):
calib = self.sensor_calib(i)
sensor = None if calib is None else calib.sensor(afe_baseline.sensor_baseline(i))
sensors.append(sensor)
return sensors
def sensor_calibs(self): # returns dict of gas_name: SensorCalib
calibs = {}
for sensor_calib in self.__sensor_calibs:
if not sensor_calib:
continue
sensor = Sensor.find(sensor_calib.serial_number)
name = sensor.gas_name
if name in calibs:
raise ValueError("duplicate gas name: %s" % name)
calibs[sensor.gas_name] = sensor_calib
return calibs
def gas_names(self):
names = []
for sensor_calib in self.__sensor_calibs:
if sensor_calib is None:
continue
sensor = Sensor.find(sensor_calib.serial_number)
name = sensor.gas_name
names.append(name)
return names
def has_unique_gas_names(self):
names = set()
for sensor_calib in self.__sensor_calibs:
if sensor_calib is None:
continue
sensor = Sensor.find(sensor_calib.serial_number)
name = sensor.gas_name
if name in names:
return False
names.add(name)
return True
def sensor_index(self, gas_name):
for i in range(len(self.__sensor_calibs)):
sensor_calib = self.__sensor_calibs[i]
if sensor_calib is None:
continue
sensor = Sensor.find(sensor_calib.serial_number)
if sensor.gas_name == gas_name:
return i
return None
# ----------------------------------------------------------------------------------------------------------------
def age(self):
return self.age_at(LocalizedDatetime.now())
def age_at(self, rec):
return CalibCurrency.age(self.calibrated_on, rec)
# ----------------------------------------------------------------------------------------------------------------
@property
def serial_number(self):
return self.__serial_number
@property
def afe_type(self):
return self.__afe_type
@property
def calibrated_on(self):
return self.__calibrated_on
@calibrated_on.setter
def calibrated_on(self, calibrated_on):
self.__calibrated_on = calibrated_on
@property
def dispatched_on(self):
return self.__dispatched_on
@property
def pt1000_calib(self):
return self.__pt1000_calib
def sensor_calib(self, i):
return self.__sensor_calibs[i]
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
cls = self.__class__.__name__
return cls + ":{serial_number:%s, afe_type:%s, calibrated_on:%s, " \
"dispatched_on:%s, pt1000_calib:%s, sensor_calibs:%s}" % \
(self.serial_number, self.afe_type, self.calibrated_on,
self.dispatched_on, self.pt1000_calib, Str.collection(self.__sensor_calibs))
|
|
# python imports
import warnings
import logging
# django imports
from django.db import IntegrityError
from django.db import connection
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
# permissions imports
from permissions.exceptions import Unauthorized
from permissions.models import ObjectPermission, Actor, ActorGroup
from permissions.models import ObjectPermissionInheritanceBlock
from permissions.models import Permission
from permissions.models import PrincipalRoleRelation
from permissions.models import Role
# Roles ######################################################################
def add_role(principal, role):
"""Adds a global role to a principal.
**Parameters:**
principal
The principal (user or group) which gets the role added.
role
The role which is assigned.
"""
if isinstance(principal, Actor):
try:
PrincipalRoleRelation.objects.get(actor=principal, role=role, content_id=None, content_type=None)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(actor=principal, role=role)
return True
else:
try:
PrincipalRoleRelation.objects.get(group=principal, role=role, content_id=None, content_type=None)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(group=principal, role=role)
return True
return False
def add_local_role(obj, principal, role):
"""Adds a local role to a principal.
**Parameters:**
obj
The object for which the principal gets the role.
principal
The principal (actor or group) which gets the role.
role
The role which is assigned.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, Actor):
try:
PrincipalRoleRelation.objects.get(actor=principal, role=role, content_id=obj.id, content_type=ctype)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(actor=principal, role=role, content=obj)
return True
else:
try:
PrincipalRoleRelation.objects.get(group=principal, role=role, content_id=obj.id, content_type=ctype)
except PrincipalRoleRelation.DoesNotExist:
PrincipalRoleRelation.objects.create(group=principal, role=role, content=obj)
return True
return False
def remove_role(principal, role):
"""Removes role from passed principal.
**Parameters:**
principal
The principal (actor or group) from which the role is removed.
role
The role which is removed.
"""
try:
if isinstance(principal, Actor):
ppr = PrincipalRoleRelation.objects.get(
actor=principal, role=role, content_id=None, content_type=None)
else:
ppr = PrincipalRoleRelation.objects.get(
group=principal, role=role, content_id=None, content_type=None)
except PrincipalRoleRelation.DoesNotExist:
return False
else:
ppr.delete()
return True
def remove_local_role(obj, principal, role):
"""Removes role from passed object and principle.
**Parameters:**
obj
The object from which the role is removed.
principal
The principal (user or group) from which the role is removed.
role
The role which is removed.
"""
try:
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, Actor):
ppr = PrincipalRoleRelation.objects.get(
actor=principal, role=role, content_id=obj.id, content_type=ctype)
else:
ppr = PrincipalRoleRelation.objects.get(
group=principal, role=role, content_id=obj.id, content_type=ctype)
except PrincipalRoleRelation.DoesNotExist:
return False
else:
ppr.delete()
return True
def remove_roles(principal):
"""Removes all roles passed principal (actor or group).
**Parameters:**
principal
The principal (actor or group) from which all roles are removed.
"""
if isinstance(principal, Actor):
ppr = PrincipalRoleRelation.objects.filter(
actor=principal, content_id=None, content_type=None)
else:
ppr = PrincipalRoleRelation.objects.filter(
group=principal, content_id=None, content_type=None)
if ppr:
ppr.delete()
return True
else:
return False
def remove_local_roles(obj, principal):
"""Removes all local roles from passed object and principal (user or
group).
**Parameters:**
obj
The object from which the roles are removed.
principal
The principal (user or group) from which the roles are removed.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, Actor):
ppr = PrincipalRoleRelation.objects.filter(
actor=principal, content_id=obj.id, content_type=ctype)
else:
ppr = PrincipalRoleRelation.objects.filter(
group=principal, content_id=obj.id, content_type=ctype)
if ppr:
ppr.delete()
return True
else:
return False
def get_roles(principal, obj=None):
"""Returns *all* roles of the passed actor.
This takes direct roles and roles via the actor's groups into account.
If an object is passed local roles will also added. Then all local roles
from all ancestors and all actor's groups are also taken into account.
This is the method to use if one want to know whether the passed actor
has a role in general (for the passed object).
**Parameters:**
principal
The actor or group for which the roles are returned.
obj
The object for which local roles will returned.
"""
role_ids = []
groups = principal.groups.all()
if isinstance(principal, Actor):
groups = principal.groups.all()
elif isinstance(principal, ActorGroup):
groups = [principal]
groups_ids_str = ", ".join(["'%s'" % (str(g.id)) for g in groups])
groups_ids_str = groups_ids_str or "''"
# Global roles for actor and the actor's groups
cursor = connection.cursor()
if groups_ids_str:
cursor.execute("""SELECT role_id
FROM permissions_principalrolerelation
WHERE (actor_id='%s' OR group_id IN (%s))
AND content_id is Null""" % (principal.id, groups_ids_str))
else:
cursor.execute("""SELECT role_id
FROM permissions_principalrolerelation
WHERE actor_id=%s
AND content_id is Null""" % principal.id)
for row in cursor.fetchall():
role_ids.append(row[0])
# Local roles for actor and the actor's groups and all ancestors of the
# passed object.
while obj:
ctype = ContentType.objects.get_for_model(obj)
if groups_ids_str:
cursor.execute("""SELECT role_id
FROM permissions_principalrolerelation
WHERE (actor_id='%s' OR group_id IN (%s))
AND content_id='%s'
AND content_type_id='%s'""" % (principal.id, groups_ids_str, obj.id, ctype.id))
else:
cursor.execute("""SELECT role_id
FROM permissions_principalrolerelation
WHERE actor_id='%s'
AND content_id='%s'
AND content_type_id='%s'""" % (principal.id, obj.id, ctype.id))
for row in cursor.fetchall():
role_ids.append(row[0])
try:
obj = obj.get_parent_for_permissions()
except AttributeError:
obj = None
return Role.objects.filter(pk__in=role_ids)
def get_global_roles(principal):
"""Returns *direct* global roles of passed principal (user or group).
"""
if isinstance(principal, Actor):
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
actor=principal, content_id=None, content_type=None).order_by('role')]
else:
if isinstance(principal, ActorGroup):
principal = (principal,)
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
group__in=principal, content_id=None, content_type=None).order_by('role')]
def get_local_roles(obj, principal):
"""Returns *direct* local roles for passed principal and content object.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(principal, Actor):
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
actor=principal, content_id=obj.id, content_type=ctype).order_by('role')]
else:
return [prr.role for prr in PrincipalRoleRelation.objects.filter(
group=principal, content_id=obj.id, content_type=ctype).order_by('role')]
# Permissions ################################################################
def check_permission(obj, actor, codename, roles=None):
"""Checks whether passed actor has passed permission for passed obj.
**Parameters:**
obj
The object for which the permission should be checked.
codename
The permission's codename which should be checked.
actor
The actor for which the permission should be checked.
roles
If given these roles will be assigned to the actor temporarily before
the permissions are checked.
"""
if not has_permission(obj, actor, codename):
raise Unauthorized("Actor '%s' doesn't have permission '%s' for object '%s' (%s)"
% (actor, codename, obj.slug, obj.__class__.__name__))
def grant_permission(obj, role, permission):
"""Grants passed permission to passed role. Returns True if the permission
was able to be added, otherwise False.
**Parameters:**
obj
The content object for which the permission should be granted.
role
The role for which the permission should be granted.
permission
The permission which should be granted. Either a permission
object or the codename of a permission.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
ObjectPermission.objects.get(role=role, content_type = ct, content_id=obj.id, permission=permission)
except ObjectPermission.DoesNotExist:
ObjectPermission.objects.create(role=role, content=obj, permission=permission)
return True
def remove_permission(obj, role, permission):
"""Removes passed permission from passed role and object. Returns True if
the permission has been removed.
**Parameters:**
obj
The content object for which a permission should be removed.
role
The role for which a permission should be removed.
permission
The permission which should be removed. Either a permission object
or the codename of a permission.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
op = ObjectPermission.objects.get(role=role, content_type = ct, content_id=obj.id, permission = permission)
except ObjectPermission.DoesNotExist:
return False
op.delete()
return True
def has_permission(obj, actor, codename, roles=None):
"""Checks whether the passed actor has passed permission for passed object.
**Parameters:**
obj
The object for which the permission should be checked.
codename
The permission's codename which should be checked.
request
The current request.
roles
If given these roles will be assigned to the actor temporarily before
the permissions are checked.
"""
ctype = ContentType.objects.get_for_model(obj)
cache_key = "%s-%s-%s" % (ctype.id, obj.id, codename)
result = None # _get_cached_permission(user, cache_key)
if result is not None:
return result
if roles is None:
roles = []
#
# if actor.is_superuser:
# return True
#
roles.extend(get_roles(actor, obj))
result = False
while obj is not None:
p = ObjectPermission.objects.filter(
content_type=ctype, content_id=obj.id, role__in=roles, permission__codename = codename).values("id")
if len(p) > 0:
result = True
break
if is_inherited(obj, codename) == False:
result = False
break
try:
obj = obj.get_parent_for_permissions()
ctype = ContentType.objects.get_for_model(obj)
except AttributeError:
result = False
break
_cache_permission(actor, cache_key, result)
return result
# Inheritance ################################################################
def add_inheritance_block(obj, permission):
"""Adds an inheritance for the passed permission on the passed obj.
**Parameters:**
permission
The permission for which an inheritance block should be added.
Either a permission object or the codename of a permission.
obj
The content object for which an inheritance block should be added.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
ObjectPermissionInheritanceBlock.objects.get(content_type = ct, content_id=obj.id, permission=permission)
except ObjectPermissionInheritanceBlock.DoesNotExist:
try:
ObjectPermissionInheritanceBlock.objects.create(content=obj, permission=permission)
except IntegrityError:
return False
return True
def remove_inheritance_block(obj, permission):
"""Removes a inheritance block for the passed permission from the passed
object.
**Parameters:**
obj
The content object for which an inheritance block should be added.
permission
The permission for which an inheritance block should be removed.
Either a permission object or the codename of a permission.
"""
if not isinstance(permission, Permission):
try:
permission = Permission.objects.get(codename = permission)
except Permission.DoesNotExist:
return False
ct = ContentType.objects.get_for_model(obj)
try:
opi = ObjectPermissionInheritanceBlock.objects.get(content_type = ct, content_id=obj.id, permission=permission)
except ObjectPermissionInheritanceBlock.DoesNotExist:
return False
opi.delete()
return True
def is_inherited(obj, codename):
"""Returns True if the passed permission is inherited for passed object.
**Parameters:**
obj
The content object for which the permission should be checked.
codename
The permission which should be checked. Must be the codename of the
permission.
"""
ct = ContentType.objects.get_for_model(obj)
try:
ObjectPermissionInheritanceBlock.objects.get(
content_type=ct, content_id=obj.id, permission__codename = codename)
except ObjectDoesNotExist:
return True
else:
return False
def get_group_by_id(id):
try:
return ActorGroup.objects.get(pk=id)
except ActorGroup.DoesNotExist:
return None
def get_group(name):
"""Returns the group with passed group name.
"""
try:
return ActorGroup.objects.get(name=name)
except ActorGroup.DoesNotExist:
return None
def get_role_by_id(id):
try:
return Role.objects.get(pk=id)
except Role.DoesNotExist:
return None
def get_role(name):
"""Returns the role with passed name or None.
"""
if isinstance(name, (int, long)):
warnings.warn(
"The use of get_role with an id is deprecated, please use the group name instead.",
PendingDeprecationWarning
)
try:
return Role.objects.get(pk=name)
except Role.DoesNotExist:
return None
else:
try:
return Role.objects.get(name=name)
except Role.DoesNotExist:
return None
def get_actors(user):
return Actor.objects.filter(user=user)
def get_actor_by_id(id):
try:
return Actor.objects.get(id=id)
except Actor.DoesNotExist:
return None
def get_actor(name):
try:
return Actor.objects.get(name=name)
except Actor.DoesNotExist:
return None
def get_user(username):
"""Returns the user with passed id or None.
"""
if isinstance(username, (int, long)):
warnings.warn(
"The use of get_user with an id is deprecated, please use the username instead.",
PendingDeprecationWarning
)
try:
return User.objects.get(pk=username)
except User.DoesNotExist:
return None
else:
try:
return User.objects.get(username=username)
except User.DoesNotExist:
return None
def has_group(actor, group):
"""Returns True if passed actor has passed group.
"""
if isinstance(group, str):
group = ActorGroup.objects.get(name=group)
return group in actor.groups.all()
def has_actor_group(actor, group):
if isinstance(group, str):
group = ActorGroup.objects.get(name=group)
return group in actor.groups.all()
def reset(obj):
"""Resets all permissions and inheritance blocks of passed object.
"""
ctype = ContentType.objects.get_for_model(obj)
ObjectPermissionInheritanceBlock.objects.filter(content_id=obj.id, content_type=ctype).delete()
ObjectPermission.objects.filter(content_id=obj.id, content_type=ctype).delete()
# Registering ################################################################
def register_permission(name, codename, ctypes=None):
"""Registers a permission to the framework. Returns the permission if the
registration was successfully, otherwise False.
**Parameters:**
name
The unique name of the permission. This is displayed to the customer.
codename
The unique codename of the permission. This is used internally to
identify the permission.
content_types
The content type for which the permission is active. This can be
used to display only reasonable permissions for an object. This
must be a Django ContentType
"""
if ctypes is None:
ctypes = []
# Permission with same codename and/or name must not exist.
if Permission.objects.filter(Q(name=name) | Q(codename=codename)):
return False
p = Permission.objects.create(name=name, codename=codename)
ctypes = [ContentType.objects.get_for_model(ctype) for ctype in ctypes]
if ctypes:
p.content_types = ctypes
p.save()
return p
def unregister_permission(codename):
"""Unregisters a permission from the framework
**Parameters:**
codename
The unique codename of the permission.
"""
try:
permission = Permission.objects.get(codename=codename)
except Permission.DoesNotExist:
return False
permission.delete()
return True
def register_role(name):
"""Registers a role with passed name to the framework. Returns the new
role if the registration was successfully, otherwise False.
**Parameters:**
name
The unique role name.
"""
role, created = Role.objects.get_or_create(name=name)
if created:
return role
else:
return False
def unregister_role(name):
"""Unregisters the role with passed name.
**Parameters:**
name
The unique role name.
"""
try:
role = Role.objects.get(name=name)
except Role.DoesNotExist:
return False
role.delete()
return True
def register_group(name):
"""Registers a group with passed name to the framework. Returns the new
group if the registration was successfully, otherwise False.
Actually this creates just a default Django Group.
**Parameters:**
name
The unique group name.
"""
group, created = ActorGroup.objects.get_or_create(name=name)
if created:
return group
else:
return False
def unregister_group(name):
"""Unregisters the group with passed name. Returns True if the
unregistration was succesfull otherwise False.
Actually this deletes just a default Django Group.
**Parameters:**
name
The unique role name.
"""
try:
group = ActorGroup.objects.get(name=name)
except ActorGroup.DoesNotExist:
return False
group.delete()
return True
def _cache_permission(actor, cache_key, data):
"""Stores the passed data on the passed actor object.
**Parameters:**
actor
The actor on which the data is stored.
cache_key
The key under which the data is stored.
data
The data which is stored.
"""
if not getattr(actor, "permissions", None):
actor.permissions = {}
actor.permissions[cache_key] = data
def _get_cached_permission(actor, cache_key):
"""Returns the stored data from passed actor object for passed cache_key.
**Parameters:**
actor
The actor from which the data is retrieved.
cache_key
The key under which the data is stored.
"""
permissions = getattr(actor, "permissions", None)
if permissions:
logging.error("get_cached_permissions: got permissions %s" % (permissions))
return actor.permissions.get(cache_key, None)
else:
logging.error("don't got no permissions")
|
|
# Copyright 2022 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, render_template, request, jsonify
from google.cloud.exceptions import NotFound
import Utils as u
import PolicyManager as pm
app = Flask(__name__)
@app.route("/")
def homepage():
# [END homepage]
# [START render_template]
return render_template(
'index.html')
#
# Init methods
#
@app.route("/bq_settings<int:saved>", methods=['GET'])
def bq_settings(saved):
settings = u.Utils.get_bq_settings()
if settings:
return render_template('bq_settings.html',
project=settings['project'],
native_dataset=settings['native_dataset'],
tombstone_dataset=settings['tombstone_dataset'],
temp_dataset=settings['temp_dataset'],
external_dataset=settings['external_dataset'],
saved=saved)
else:
return render_template('bq_settings.html')
@app.route("/process_bq_settings", methods=['POST'])
def process_bq_settings():
action = request.form['action']
if action == "Submit Changes":
project = request.form['project'].rstrip()
native_dataset = request.form['native_dataset'].rstrip()
tombstone_dataset = request.form['tombstone_dataset'].rstrip()
temp_dataset = request.form['temp_dataset'].rstrip()
external_dataset = request.form['external_dataset'].rstrip()
if project != None and native_dataset != None:
u.Utils.init_bq(project, native_dataset, tombstone_dataset, temp_dataset, external_dataset)
return bq_settings(1)
else:
return bq_settings(0)
if action == "Cancel Changes":
return homepage()
@app.route("/gcs_settings<int:saved>", methods=['GET'])
def gcs_settings(saved):
settings = u.Utils.get_gcs_settings()
if settings:
return render_template('gcs_settings.html',
project=settings['project'],
archive_bucket=settings['archive_bucket'],
lake_bucket=settings['lake_bucket'],
file_format=settings['file_format'],
compression=settings['compression'],
saved=saved)
else:
return render_template('gcs_settings.html')
@app.route("/process_gcs_settings", methods=['POST'])
def process_gcs_settings():
action = request.form['action']
if action == "Submit Changes":
project = request.form['project'].rstrip()
archive_bucket = request.form['archive_bucket'].rstrip()
lake_bucket = request.form['lake_bucket'].rstrip()
file_format = request.form['file_format'].rstrip()
compression = request.form['compression'].rstrip()
if project != None and archive_bucket != None and lake_bucket != None:
u.Utils.init_gcs(project, archive_bucket, lake_bucket, file_format, compression)
return gcs_settings(1)
else:
return gcs_settings(0)
if action == "Cancel Changes":
return homepage()
@app.route("/dataproc_settings<int:saved>", methods=['GET'])
def dataproc_settings(saved):
settings = u.Utils.get_dataproc_settings()
if settings:
return render_template('dataproc_settings.html',
project=settings['project'],
region=settings['region'],
cluster=settings['cluster'],
delete_script=settings['delete_script'],
saved=saved)
else:
return render_template('dataproc_settings.html')
@app.route("/process_dataproc_settings", methods=['POST'])
def process_dataproc_settings():
action = request.form['action']
if action == "Submit Changes":
project = request.form['project'].rstrip()
region = request.form['region'].rstrip()
cluster = request.form['cluster'].rstrip()
delete_script = request.form['delete_script'].rstrip()
if project != None and region != None and cluster != None and delete_script != None:
u.Utils.init_dataproc(project, region, cluster, delete_script)
return dataproc_settings(1)
else:
return dataproc_settings(0)
if action == "Cancel Changes":
return homepage()
#
# Config methods
#
@app.route("/create_associations<int:saved>", methods=['GET'])
def create_associations(saved):
foreign_key_stmts = u.Utils.get_foreign_key_stmts()
return render_template(
'create_associations.html',
foreign_key_stmts=foreign_key_stmts,
saved=saved)
@app.route("/process_associations", methods=['POST'])
def process_associations():
action = request.form['action']
if action == "Submit Changes":
foreign_key_stmts = request.form['foreign_key_stmts'].rstrip()
u.Utils.write_foreign_keys(foreign_key_stmts)
return create_associations(1)
if action == "Cancel Changes":
return homepage()
@app.route("/create_groupings<int:saved>", methods=['GET'])
def create_groupings(saved):
bq_settings = u.Utils.get_bq_settings()
project = bq_settings['project']
dataset = bq_settings['native_dataset']
table_group_list = u.Utils.get_table_group_pairs()
print('table_group_list: ' + str(table_group_list))
return render_template(
'create_groupings.html',
table_group_list=table_group_list,
project=project,
dataset=dataset,
saved=saved)
@app.route("/process_groupings", methods=['POST'])
def process_groupings():
action = request.form['action']
if action == "Submit Changes":
table_list = u.Utils.get_tables()
bq_settings = u.Utils.get_bq_settings()
for entity_name in table_list:
entity_group = request.form[entity_name].rstrip()
if entity_group:
entity_path = bq_settings['native_dataset'] + '.' + entity_name
u.Utils.write_group(entity_name, entity_path, entity_group)
return create_groupings(1)
if action == "Cancel Changes":
return homepage()
@app.route("/create_scheduled_policy", methods=['GET'])
def create_scheduled_policy():
return render_template(
'create_scheduled_policy.html')
@app.route("/create_ondemand_policy", methods=['GET'])
def create_ondemand_policy():
return render_template(
'create_ondemand_policy.html')
@app.route("/process_scheduled_policy", methods=['POST'])
def process_scheduled_policy():
print('enter process_scheduled_policy')
action = request.form['action']
if action == "Submit Policy":
if 'policy_id' in request.form:
policy_id = request.form['policy_id']
print('policy_id: ' + policy_id)
else:
policy_id = None
policy_action = request.form['policy_action']
entity_name = request.form['entity_name'].rstrip()
entity_path = request.form['entity_path'].rstrip()
storage_system = request.form['storage_system']
grouping = request.form['grouping']
if grouping:
entity_groups = request.form['entity_groups'].rstrip()
else:
entity_groups = None
ts_column = request.form['ts_column'].rstrip()
sql_filter_exp = request.form['sql_filter_exp'].rstrip()
retention_period = request.form['retention_period'].rstrip()
retention_unit = request.form['retention_unit']
pm.PolicyManager.write_update_scheduled_policy(policy_id, storage_system, entity_name, entity_path, grouping, entity_groups,\
ts_column, sql_filter_exp, retention_period, retention_unit, policy_action)
return view_scheduled_policies()
if action == "Cancel Changes":
return homepage()
@app.route("/process_ondemand_policy", methods=['POST'])
def process_ondemand_policy():
action = request.form['action']
if action == "Submit Policy":
if 'policy_id' in request.form:
policy_id = request.form['policy_id']
else:
policy_id = None
policy_action = request.form['policy_action']
entity_name = request.form['entity_name'].rstrip()
entity_path = request.form['entity_path'].rstrip()
storage_system = request.form['storage_system']
grouping = request.form['grouping']
if grouping:
entity_groups = request.form['entity_groups'].rstrip()
else:
entity_groups = None
sql_filter_exp = request.form['sql_filter_exp'].rstrip()
softdelete_period = request.form['softdelete_period'].rstrip()
softdelete_unit = request.form['softdelete_unit']
pm.PolicyManager.write_update_ondemand_policy(policy_id, storage_system, entity_name, entity_path, grouping, entity_groups,\
sql_filter_exp, softdelete_period, softdelete_unit, policy_action)
return view_ondemand_policies()
if action == "Cancel Changes":
return homepage()
@app.route("/view_scheduled_policies", methods=['GET'])
def view_scheduled_policies():
policies = pm.PolicyManager.get_scheduled_policies()
if policies:
return render_template('view_scheduled_policies.html',
policies=policies)
else:
return create_scheduled_policy()
@app.route("/view_ondemand_policies", methods=['GET'])
def view_ondemand_policies():
policies = pm.PolicyManager.get_ondemand_policies()
if policies:
return render_template('view_ondemand_policies.html',
policies=policies)
else:
return create_ondemand_policy()
@app.route("/update_scheduled_policy", methods=['POST'])
def update_scheduled_policy():
policy_id = request.form['policy_id']
policy_action = request.form['policy_action']
storage_system = request.form['storage_system']
entity_name = request.form['entity_name']
entity_path = request.form['entity_path']
grouping = request.form['grouping']
entity_groups = request.form['entity_groups']
ts_column = request.form['ts_column']
sql_filter_exp = request.form['sql_filter_exp']
retention_period = request.form['retention_period']
retention_unit = request.form['retention_unit']
print('sql_filter_exp: ' + sql_filter_exp)
return render_template(
'update_scheduled_policy.html',
policy_id=policy_id,
policy_action=policy_action,
storage_system=storage_system,
entity_name=entity_name,
entity_path=entity_path,
grouping=grouping,
entity_groups=entity_groups,
ts_column=ts_column,
sql_filter_exp=sql_filter_exp,
retention_period=retention_period,
retention_unit=retention_unit)
@app.route("/update_ondemand_policy", methods=['POST'])
def update_ondemand_policy():
policy_id = request.form['policy_id']
policy_action = request.form['policy_action']
storage_system = request.form['storage_system']
entity_name = request.form['entity_name']
entity_path = request.form['entity_path']
grouping = request.form['grouping']
entity_groups = request.form['entity_groups']
sql_filter_exp = request.form['sql_filter_exp']
softdelete_period = request.form['softdelete_period']
softdelete_unit = request.form['softdelete_unit']
return render_template(
'update_ondemand_policy.html',
policy_id=policy_id,
policy_action=policy_action,
storage_system=storage_system,
entity_name=entity_name,
entity_path=entity_path,
grouping=grouping,
entity_groups=entity_groups,
sql_filter_exp=sql_filter_exp,
softdelete_period=softdelete_period,
softdelete_unit=softdelete_unit)
#
# TO DO: implement reporting methods
#
@app.route("/visualize_associations", methods=['GET'])
def visualize_associations():
return render_template(
'visualize_associations.html')
@app.route("/policy_execution_history", methods=['GET'])
def policy_execution_history():
return render_template(
'policy_execution_history.html')
@app.route("/policy_compliance", methods=['GET'])
def policy_compliance():
return render_template(
'policy_compliance.html')
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
#logging.exception('An error occurred during a request.')
return 'An internal error occurred: ' + str(e), 500
# [END app]
if __name__ == "__main__":
app.run()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Proximal stochastic dual coordinate ascent optimizer for linear models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.linear_optimizer.ops import gen_sdca_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework.load_library import load_op_library
from tensorflow.python.framework.ops import convert_to_tensor
from tensorflow.python.framework.ops import name_scope
from tensorflow.python.framework.ops import op_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as var_ops
from tensorflow.python.ops.nn import sigmoid_cross_entropy_with_logits
from tensorflow.python.platform import resource_loader
__all__ = ['SdcaModel']
_sdca_ops = load_op_library(resource_loader.get_path_to_datafile(
'_sdca_ops.so'))
assert _sdca_ops, 'Could not load _sdca_ops.so'
# TODO(sibyl-Aix6ihai): add op_scope to appropriate methods.
class SdcaModel(object):
"""Stochastic dual coordinate ascent solver for linear models.
This class currently only supports a single machine (multi-threaded)
implementation. We expect the weights and duals to fit in a single machine.
Loss functions supported:
* Binary logistic loss
* Squared loss
* Hinge loss
This class defines an optimizer API to train a linear model.
### Usage
```python
# Create a solver with the desired parameters.
lr = tf.contrib.linear_optimizer.SdcaModel(
container, examples, variables, options)
opt_op = lr.minimize()
predictions = lr.predictions(examples)
# Primal loss + L1 loss + L2 loss.
regularized_loss = lr.regularized_loss(examples)
# Primal loss only
unregularized_loss = lr.unregularized_loss(examples)
container: Name of the container (eg a hex-encoded UUID) where internal
state of the optimizer can be stored. The container can be safely shared
across many models.
examples: {
sparse_features: list of SparseTensors of value type float32.
dense_features: list of dense tensors of type float32.
example_labels: a tensor of type float32 and shape [Num examples]
example_weights: a tensor of type float32 and shape [Num examples]
example_ids: a tensor of type string and shape [Num examples]
}
variables: {
sparse_features_weights: list of tensors of shape [vocab size]
dense_features_weights: list of tensors of shape [1]
}
options: {
symmetric_l1_regularization: 0.0
symmetric_l2_regularization: 1.0
loss_type: "logistic_loss"
}
```
In the training program you will just have to run the returned Op from
minimize(). You should also eventually cleanup the temporary state used by
the model, by resetting its (possibly shared) container.
```python
# Execute opt_op and train for num_steps.
for _ in xrange(num_steps):
opt_op.run()
# You can also check for convergence by calling
# lr.approximate_duality_gap()
```
"""
def __init__(self, container, examples, variables, options):
"""Create a new sdca optimizer."""
if not container or not examples or not variables or not options:
raise ValueError('All arguments must be specified.')
supported_losses = ('logistic_loss', 'squared_loss', 'hinge_loss')
if options['loss_type'] not in supported_losses:
raise ValueError('Unsupported loss_type: ', options['loss_type'])
self._assertSpecified(
['example_labels', 'example_weights', 'example_ids', 'sparse_features',
'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
self._assertSpecified(
['sparse_features_weights', 'dense_features_weights'], variables)
self._assertList(
['sparse_features_weights', 'dense_features_weights'], variables)
self._assertSpecified(
['loss_type', 'symmetric_l2_regularization',
'symmetric_l1_regularization'], options)
for name in ['symmetric_l1_regularization', 'symmetric_l2_regularization']:
value = options[name]
if value < 0.0:
raise ValueError('%s should be non-negative. Found (%f)' %
(name, value))
self._container = container
self._examples = examples
self._variables = variables
self._options = options
self._solver_uuid = uuid.uuid4().hex
self._create_slots()
def _symmetric_l2_regularization(self):
# Algorithmic requirement (for now) is to have minimal l2 of 1.0
return max(self._options['symmetric_l2_regularization'], 1.0)
# TODO(sibyl-Aix6ihai): Use optimizer interface to make use of slot creation logic.
def _create_slots(self):
# Make internal variables which have the updates before applying L1
# regularization.
self._slots = {
'unshrinked_sparse_features_weights': [],
'unshrinked_dense_features_weights': [],
}
for name in ['sparse_features_weights', 'dense_features_weights']:
for var in self._variables[name]:
self._slots['unshrinked_' + name].append(var_ops.Variable(
array_ops.zeros_like(var.initialized_value(), dtypes.float32)))
def _assertSpecified(self, items, check_in):
for x in items:
if check_in[x] is None:
raise ValueError(check_in[x] + ' must be specified.')
def _assertList(self, items, check_in):
for x in items:
if not isinstance(check_in[x], list):
raise ValueError(x + ' must be a list.')
def _l1_loss(self):
"""Computes the l1 loss of the model."""
with name_scope('l1_loss'):
sum = 0.0
for name in ['sparse_features_weights', 'dense_features_weights']:
for weights in self._convert_n_to_tensor(self._variables[name]):
sum += math_ops.reduce_sum(math_ops.abs(weights))
# SDCA L1 regularization cost is: l1 * sum(|weights|)
return self._options['symmetric_l1_regularization'] * sum
def _l2_loss(self, l2):
"""Computes the l2 loss of the model."""
with name_scope('l2_loss'):
sum = 0.0
for name in ['sparse_features_weights', 'dense_features_weights']:
for weights in self._convert_n_to_tensor(self._variables[name]):
sum += math_ops.reduce_sum(math_ops.square(weights))
# SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
return l2 * sum / 2.0
def _convert_n_to_tensor(self, input_list, as_ref=False):
"""Converts input list to a set of tensors."""
return [convert_to_tensor(x, as_ref=as_ref) for x in input_list]
def _linear_predictions(self, examples):
"""Returns predictions of the form w*x."""
with name_scope('sdca/prediction'):
sparse_variables = self._convert_n_to_tensor(self._variables[
'sparse_features_weights'])
result = 0.0
for st_i, sv in zip(examples['sparse_features'], sparse_variables):
ei, fi = array_ops.split(1, 2, st_i.indices)
ei = array_ops.reshape(ei, [-1])
fi = array_ops.reshape(fi, [-1])
fv = array_ops.reshape(st_i.values, [-1])
# TODO(sibyl-Aix6ihai): This does not work if examples have empty features.
result += math_ops.segment_sum(
math_ops.mul(array_ops.gather(sv, fi), fv), ei)
dense_features = self._convert_n_to_tensor(examples['dense_features'])
dense_variables = self._convert_n_to_tensor(self._variables[
'dense_features_weights'])
for i in range(len(dense_variables)):
result += dense_features[i] * dense_variables[i]
# Reshaping to allow shape inference at graph construction time.
return array_ops.reshape(result, [-1])
def predictions(self, examples):
"""Add operations to compute predictions by the model.
If logistic_loss is being used, predicted probabilities are returned.
Otherwise, (raw) linear predictions (w*x) are returned.
Args:
examples: Examples to compute predictions on.
Returns:
An Operation that computes the predictions for examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_weights', 'sparse_features', 'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
result = self._linear_predictions(examples)
if self._options['loss_type'] == 'logistic_loss':
# Convert logits to probability for logistic loss predictions.
with name_scope('sdca/logistic_prediction'):
result = math_ops.sigmoid(result)
return result
def minimize(self, global_step=None, name=None):
"""Add operations to train a linear model by minimizing the loss function.
Args:
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation.
Returns:
An Operation that updates the variables passed in the constructor.
"""
# Technically, the op depends on a lot more than the variables,
# but we'll keep the list short.
with op_scope([], name, 'sdca/minimize'):
sparse_features_indices = []
sparse_features_values = []
for sf in self._examples['sparse_features']:
sparse_features_indices.append(convert_to_tensor(sf.indices))
sparse_features_values.append(convert_to_tensor(sf.values))
step_op = _sdca_ops.sdca_solver(
sparse_features_indices,
sparse_features_values,
self._convert_n_to_tensor(self._examples['dense_features']),
convert_to_tensor(self._examples['example_weights']),
convert_to_tensor(self._examples['example_labels']),
convert_to_tensor(self._examples['example_ids']),
self._convert_n_to_tensor(
self._slots['unshrinked_sparse_features_weights'],
as_ref=True),
self._convert_n_to_tensor(
self._slots['unshrinked_dense_features_weights'],
as_ref=True),
l1=self._options['symmetric_l1_regularization'],
l2=self._symmetric_l2_regularization(),
# TODO(sibyl-Aix6ihai): Provide empirical evidence for this. It is better
# to run more than one iteration on single mini-batch as we want to
# spend more time in compute. SDCA works better with larger
# mini-batches and there is also recent work that shows its better to
# reuse old samples than train on new samples.
# See: http://arxiv.org/abs/1602.02136.
num_inner_iterations=2,
loss_type=self._options['loss_type'],
container=self._container,
solver_uuid=self._solver_uuid)
with ops.control_dependencies([step_op]):
assign_ops = []
for name in ['sparse_features_weights', 'dense_features_weights']:
for var, slot_var in zip(self._variables[name],
self._slots['unshrinked_' + name]):
assign_ops.append(var.assign(slot_var))
assign_group = control_flow_ops.group(*assign_ops)
with ops.control_dependencies([assign_group]):
shrink_l1 = _sdca_ops.sdca_shrink_l1(
self._convert_n_to_tensor(
self._variables['sparse_features_weights'],
as_ref=True),
self._convert_n_to_tensor(
self._variables['dense_features_weights'],
as_ref=True),
l1=self._options['symmetric_l1_regularization'],
l2=self._symmetric_l2_regularization())
if not global_step:
return shrink_l1
with ops.control_dependencies([shrink_l1]):
return state_ops.assign_add(global_step, 1, name=name).op
def approximate_duality_gap(self):
"""Add operations to compute the approximate duality gap.
Returns:
An Operation that computes the approximate duality gap over all
examples.
"""
(primal_loss, dual_loss, example_weights) = _sdca_ops.sdca_training_stats(
container=self._container,
solver_uuid=self._solver_uuid)
# Note that example_weights is guaranteed to be positive by
# sdca_training_stats so dividing by it is safe.
return (primal_loss + dual_loss + math_ops.to_double(self._l1_loss()) +
(2.0 * math_ops.to_double(self._l2_loss(
self._symmetric_l2_regularization())))) / example_weights
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_labels', 'example_weights', 'sparse_features',
'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = self._linear_predictions(examples)
labels = convert_to_tensor(examples['example_labels'])
weights = convert_to_tensor(examples['example_weights'])
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.mul(
sigmoid_cross_entropy_with_logits(
predictions, labels), weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] == 'hinge_loss':
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
# first convert 0/1 labels into -1/1 labels.
all_ones = array_ops.ones_like(predictions)
adjusted_labels = math_ops.sub(2 * labels, all_ones)
all_zeros = array_ops.zeros_like(predictions)
# Tensor that contains (unweighted) error (hinge loss) per
# example.
error = math_ops.maximum(all_zeros, math_ops.sub(
all_ones, math_ops.mul(adjusted_labels, predictions)))
weighted_error = math_ops.mul(error, weights)
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
weights)
# squared loss
err = math_ops.sub(labels, predictions)
weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
return (math_ops.reduce_sum(weighted_squared_err) /
(2.0 * math_ops.reduce_sum(weights)))
def regularized_loss(self, examples):
"""Add operations to compute the loss with regularization loss included.
Args:
examples: Examples to compute loss on.
Returns:
An Operation that computes mean (regularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_labels', 'example_weights', 'sparse_features',
'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/regularized_loss'):
weights = convert_to_tensor(examples['example_weights'])
return (((
self._l1_loss() +
# Note that here we are using the raw regularization
# (as specified by the user) and *not*
# self._symmetric_l2_regularization().
self._l2_loss(self._options['symmetric_l2_regularization'])) /
math_ops.reduce_sum(weights)) +
self.unregularized_loss(examples))
|
|
import copy
import shutil
from typing import Callable
from requests import Response
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
GENERAL_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
DATE_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
account_sas_token = ''
storage_account_name = ''
class Client:
"""
API Client
"""
def __init__(self, server_url, verify, proxy, account_sas_token, storage_account_name, api_version):
self.ms_client = MicrosoftStorageClient(server_url, verify, proxy, account_sas_token, storage_account_name,
api_version)
def create_share_request(self, share_name: str) -> Response:
"""
Create a new Azure file share under the specified account.
Args:
share_name (str): Share name.
Returns:
Response: API response from Azure.
"""
params = assign_params(restype="share")
response = self.ms_client.http_request(method='PUT', url_suffix=f'{share_name}',
params=params, return_empty_response=True)
return response
def delete_share_request(self, share_name: str) -> Response:
"""
Delete file share under the specified account.
Args:
share_name (str): Share name.
Returns:
Response: API response from Azure.
"""
params = assign_params(restype="share")
response = self.ms_client.http_request(method='DELETE', url_suffix=f'{share_name}',
params=params, return_empty_response=True)
return response
def list_shares_request(self, limit: str = None, prefix: str = None, marker: str = None) -> str:
"""
list Azure file shares under the specified account.
Args:
limit (str): Number of shares to retrieve.
prefix (str): Filters the results to return only shares whose name begins with the specified prefix.
marker (str): Identifies the portion of the list to be returned.
Returns:
str: API response from Azure.
"""
params = assign_params(comp="list", maxresults=limit, prefix=prefix, marker=marker)
response = self.ms_client.http_request(method='GET', url_suffix='',
params=params, resp_type="text")
return response
def list_directories_and_files_request(self, share_name: str, directory_path: str = None, prefix: str = None,
limit: str = None, marker: str = None) -> str:
"""
List files and directories under the specified share or directory.
Args:
share_name (str): Share name.
directory_path (str): The path to the directory.
prefix (str): Filters the results to return only files and directories whose name begins with the specified prefix.
limit (str): Number of directories and files to retrieve.
marker (str): Identifies the portion of the list to be returned.
Returns:
str: API response from Azure.
"""
params = assign_params(restype="directory", comp="list", include="Timestamps",
prefix=prefix, maxresults=limit, marker=marker)
url_suffix = f'{share_name}/{directory_path}' if directory_path else f'{share_name}'
response = self.ms_client.http_request(method='GET', url_suffix=url_suffix,
params=params, resp_type="text")
return response
def create_directory_request(self, share_name: str, directory_name: str, directory_path: str = None) -> Response:
"""
Create a new directory under the specified share or parent directory.
Args:
share_name (str): Share name.
directory_name (str): New directory name.
directory_path (str): The path to the directory.
Returns:
Response: API response from Azure.
"""
params = assign_params(restype="directory")
headers = {'x-ms-file-permission': 'inherit ',
'x-ms-file-attributes': 'None',
'x-ms-file-creation-time': 'now',
'x-ms-file-last-write-time': 'now'}
url_suffix = f'{share_name}/{directory_path}/{directory_name}' if directory_path else f'{share_name}/{directory_name}'
response = self.ms_client.http_request(method='PUT', url_suffix=url_suffix,
params=params, headers=headers, return_empty_response=True)
return response
def delete_directory_request(self, share_name: str, directory_name: str, directory_path: str) -> Response:
"""
Delete the specified empty directory.
Args:
share_name (str): Share name.
directory_name (str): Directory name.
directory_path (str): The path to the directory.
Returns:
Response: API response from Azure.
"""
params = assign_params(restype="directory")
url_suffix = f'{share_name}/{directory_path}/{directory_name}' if directory_path else f'{share_name}/{directory_name}'
response = self.ms_client.http_request(method='DELETE', url_suffix=url_suffix,
params=params, return_empty_response=True)
return response
def create_file_request(self, share_name: str, file_entry_id: str, file_name: str,
directory_path: str = None) -> Response:
"""
Create a New empty file in Share from War room file Entry ID.
Note that this operation only initializes the file. To add content to a file, we have to call the Put Range operation.
Args:
share_name (str): Share name.
file_entry_id (str): File War room Entry ID.
file_name (str): File name. Default is XSOAR file name.
directory_path (str): The path to the directory where the file should be created.
Returns:
Response: API response from Azure.
"""
xsoar_file_data = demisto.getFilePath(
file_entry_id) # Retrieve XSOAR system file path and name, given file entry ID.
xsoar_system_file_path = xsoar_file_data['path']
new_file_name = file_name if file_name else xsoar_file_data['name']
create_file_headers = {'x-ms-type': 'file',
'x-ms-file-permission': 'Inherit',
'x-ms-file-attributes': 'None',
'x-ms-file-creation-time': 'now',
'x-ms-file-last-write-time': 'now'
}
create_file_url = f'{share_name}/{directory_path}/{new_file_name}' if directory_path else f'{share_name}/{new_file_name}'
try:
shutil.copy(xsoar_system_file_path, new_file_name)
except FileNotFoundError:
raise Exception(
'Failed to prepare file for upload. '
'The process of importing and copying the file data from XSOAR failed.')
try:
with open(new_file_name, 'rb') as file:
file.seek(0, 2)
content_length = file.tell()
create_file_headers['x-ms-content-length'] = str(content_length)
create_file_response = self.ms_client.http_request(method='PUT', url_suffix=create_file_url,
headers=create_file_headers,
return_empty_response=True)
finally:
shutil.rmtree(new_file_name, ignore_errors=True)
return create_file_response
def add_file_content_request(self, share_name: str, file_entry_id: str, file_name: str,
directory_path: str = None) -> Response:
"""
Write a range of bytes to a file.
Note that this operation not initializes the file, but add content to a file.
Args:
share_name (str): Share name.
file_entry_id (str): File War room Entry ID.
file_name (str): File name. Default is XSOAR file name.
directory_path (str): The path to the directory where the file should be created.
Returns:
Response: API response from Azure.
"""
xsoar_file_data = demisto.getFilePath(
file_entry_id) # Retrieve XSOAR system file path and name, given file entry ID.
xsoar_system_file_path = xsoar_file_data['path']
new_file_name = file_name if file_name else xsoar_file_data['name']
try:
shutil.copy(xsoar_system_file_path, new_file_name)
except FileNotFoundError:
raise Exception('Failed to prepare file for upload. '
'The process of importing and copying the file data from XSOAR failed.')
try:
with open(new_file_name, 'rb') as file:
file.seek(0, 2)
content_length = file.tell()
file.seek(0)
max_range = int(content_length) - 1
bytes_range = f'bytes=0-{max_range}'
put_rang_headers = {
'x-ms-write': 'update',
'x-ms-range': bytes_range,
'Content-Length': str(content_length),
'x-ms-type': 'file',
}
params = {'comp': 'range'}
put_range_url = f'{share_name}/{directory_path}/{new_file_name}' if directory_path else \
f'{share_name}/{new_file_name}'
put_range_response = self.ms_client.http_request(method='PUT', url_suffix=put_range_url,
headers=put_rang_headers, params=params,
return_empty_response=True, data=file)
finally:
shutil.rmtree(new_file_name, ignore_errors=True)
return put_range_response
def get_file_request(self, share_name: str, file_name: str, directory_path: str = None) -> Response:
"""
Get file from Share.
Args:
share_name (str): Share name.
file_name (str): File name.
directory_path (str): The path to the file directory.
Returns:
Response: API response from Azure.
"""
url_suffix = f'{share_name}/{directory_path}/{file_name}' if directory_path else f'{share_name}/{file_name}'
response = self.ms_client.http_request(method='GET', url_suffix=url_suffix, resp_type="response")
return response
def delete_file_request(self, share_name, file_name, directory_path):
"""
Delete file from Share.
Args:
share_name (str): Share name.
file_name (str): File name.
directory_path (str): The path to the file directory.
Returns:
Response: API response from Azure.
"""
url_suffix = f'{share_name}/{directory_path}/{file_name}' if directory_path else f'{share_name}/{file_name}'
response = self.ms_client.http_request(method='DELETE', url_suffix=url_suffix, return_empty_response=True)
return response
def create_share_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Create a new Azure file share under the specified account.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
share_name = args['share_name']
share_name_regex = "^[a-z0-9](?!.*--)[a-z0-9-]{1,61}[a-z0-9]$"
# Rules for naming shares can be found here:
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-shares--directories--files--and-metadata
if not re.search(share_name_regex, share_name):
raise Exception('The specified share name is invalid.')
client.create_share_request(share_name)
command_results = CommandResults(
readable_output=f'Share {share_name} successfully created.',
)
return command_results
def delete_share_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete file share under the specified account.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
share_name = args['share_name']
client.delete_share_request(share_name)
command_results = CommandResults(
readable_output=f'Share {share_name} successfully deleted.',
)
return command_results
def get_pagination_next_marker_element(limit: str, page: int, client_request: Callable, params: dict) -> str:
"""
Get next marker element for request pagination.
'marker' is a string value that identifies the portion of the list to be returned with the next list operation.
The operation returns a NextMarker element within the response body if the list returned was not complete.
This value may then be used as a query parameter in a subsequent call to request the next portion of the list items.
Args:
limit (str): Number of elements to retrieve.
page (str): Page number.
client_request (Callable): Client request function.
params (dict): Request params.
Returns:
str: Next marker.
"""
offset = int(limit) * (page - 1)
response = client_request(limit=str(offset), **params)
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
return root.findtext('NextMarker') # type: ignore
def list_shares_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
list Azure file shares under the specified account.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
limit = args.get('limit') or '50'
prefix = args.get('prefix')
page = arg_to_number(args.get('page') or '1')
marker = ''
readable_message = f'Shares List:\n Current page size: {limit}\n Showing page {page} out others that may exist'
if page > 1: # type: ignore
marker = get_pagination_next_marker_element(limit=limit, page=page, # type: ignore
client_request=client.list_shares_request,
params={"prefix": prefix})
if not marker:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageFileShare.Share',
outputs=[],
raw_response=[]
)
response = client.list_shares_request(limit, prefix, marker=marker)
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
raw_response = []
outputs = []
for element in root.iter('Share'):
data = handle_content_properties_information(element)
raw_response.append(data)
outputs.append({'Name': element.findtext('Name')})
readable_output = tableToMarkdown(
readable_message,
outputs,
headers=['Name'],
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageFileShare.Share',
outputs_key_field='Name',
outputs=outputs,
raw_response=raw_response
)
return command_results
def handle_content_properties_information(element: object) -> dict:
"""
Handle API response 'properties' information.
Args:
element (object): An XML element hierarchy data.
Returns:
dict: Transformed dictionary of properties information.
"""
data = {'Name': element.findtext('Name')} # type: ignore
properties = {}
for share_property in element.findall('Properties'): # type: ignore
for attribute in share_property:
properties[attribute.tag] = attribute.text
data['Properties'] = properties # type: ignore
return data
def handle_directory_content_response(response: str) -> dict:
"""
Convert XML schema directory content to dictionary data structure.
Args:
response (str): XML schema string response.
Returns:
dict: Raw response.
"""
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
xml_path = ['Directory', 'File']
raw_response = {'Directory': [], 'File': [], 'DirectoryId': root.findtext('DirectoryId')} # type: ignore
for path in xml_path:
for element in root.iter(path):
data = handle_content_properties_information(element)
data['FileId'] = element.findtext('FileId')
raw_response[path].append(data) # type: ignore
return raw_response
def create_directory_content_output(share_name: str, raw_response: dict, directory_path: str = "") -> dict:
"""
Create XSOAR context output for list directory command.
Args:
share_name (str): Share name.
raw_response (dict): Request raw response.
directory_path (str): Source directory path.
Returns:
dict: XSOAR command context output.
"""
xml_path = ['Directory', 'File']
outputs = {"Name": share_name, "Content": {"Path": directory_path, "DirectoryId": raw_response['DirectoryId']}}
time_headers = ['CreationTime', 'LastAccessTime', 'LastWriteTime', 'ChangeTime']
for path in xml_path:
for element in raw_response.get(path): # type: ignore
for header in time_headers:
str_time = element['Properties'].get(header) # type: ignore
str_time = str_time[:-2] + 'Z'
element['Properties'][header] = FormatIso8601( # type: ignore
datetime.strptime(str_time, GENERAL_DATE_FORMAT)) # type: ignore
element['Properties']['Last-Modified'] = FormatIso8601( # type: ignore
datetime.strptime(element['Properties']['Last-Modified'], DATE_FORMAT)) # type: ignore
element['Property'] = element.pop('Properties') # type: ignore
outputs["Content"].update(raw_response) # type: ignore
return outputs
def create_content_readable_output(outputs: dict, prefix: str = '') -> str:
"""
Create readable output for list directory content command.
Args:
outputs (dict): Command outputs.
prefix (str): Readable output prefix.
Returns:
str: Command readable output.
"""
directories_outputs = tableToMarkdown(
'Directories:',
outputs["Content"]["Directory"],
headers=['Name', 'FileId'],
headerTransform=pascalToSpace
)
files_outputs = tableToMarkdown(
'Files:',
outputs["Content"]["File"],
headers=['Name', 'FileId'],
headerTransform=pascalToSpace
)
return prefix + "\n" + directories_outputs + "\n" + files_outputs
def list_directories_and_files_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
List files and directories under the specified share or directory.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
prefix = args.get('prefix')
limit = args.get('limit') or '50'
share_name = args['share_name']
directory_path = args.get('directory_path', '')
page = arg_to_number(args.get('page') or '1')
marker = ''
readable_message = f'Directories and Files List:\n Current page size: {limit}\n Showing page {page} out others that may exist'
if page > 1: # type: ignore
marker = get_pagination_next_marker_element(limit=limit, page=page, # type: ignore
client_request=client.list_directories_and_files_request,
params={"prefix": prefix, "share_name": share_name,
"directory_path": directory_path})
if not marker:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageFileShare.Share',
outputs=[],
raw_response=[]
)
response = client.list_directories_and_files_request(share_name, directory_path, prefix, limit, marker)
raw_response = handle_directory_content_response(response)
response_copy = copy.deepcopy(raw_response)
outputs = create_directory_content_output(share_name, response_copy, directory_path)
readable_output = create_content_readable_output(outputs, readable_message)
command_results = CommandResults(
readable_output=readable_output,
outputs_key_field='Name',
outputs_prefix='AzureStorageFileShare.Share',
outputs=outputs,
raw_response=raw_response
)
return command_results
def validate_characters(string: str, invalid_characters: str) -> bool:
"""
Validate that string does not contain invalid characters.
Args:
string (str): String validate.
invalid_characters (str): Characters to validate.
Returns:
bool: True if the string is valid , otherwise False.
"""
for character in invalid_characters:
if character in string:
return False
return True
def create_directory_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Create a new directory under the specified share or parent directory.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
share_name = args['share_name']
directory_name = args['directory_name']
directory_path = args.get('directory_path')
if not validate_characters(directory_name, "\"\/:|<>*?"):
raise Exception('The specified directory name is invalid.')
client.create_directory_request(share_name, directory_name, directory_path)
command_results = CommandResults(
readable_output=f'{directory_name} Directory successfully created in {share_name}.',
)
return command_results
def delete_directory_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete the specified empty directory.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
share_name = args['share_name']
directory_name = args['directory_name']
directory_path = args.get('directory_path')
client.delete_directory_request(share_name, directory_name, directory_path) # type: ignore
command_results = CommandResults(
readable_output=f'{directory_name} Directory successfully deleted from {share_name}.'
)
return command_results
def create_file_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Create a file in Share from War room file Entry ID.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
share_name = args['share_name']
file_entry_id = args['file_entry_id']
directory_path = args.get('directory_path')
file_name = args.get('file_name')
client.create_file_request(share_name, file_entry_id, file_name, directory_path) # type: ignore
client.add_file_content_request(share_name, file_entry_id, file_name, directory_path) # type: ignore
command_results = CommandResults(
readable_output=f'File successfully created in {share_name}.'
)
return command_results
def get_file_command(client: Client, args: Dict[str, Any]) -> fileResult: # type: ignore
"""
Get file from Share.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
fileResult: XSOAR File Result.
"""
share_name = args['share_name']
file_name = args['file_name']
directory_path = args.get('directory_path')
response = client.get_file_request(share_name, file_name, directory_path)
return fileResult(filename=file_name, data=response.content)
def delete_file_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete file from Share.
Args:
client (Client): Azure FileShares Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
share_name = args['share_name']
file_name = args['file_name']
directory_path = args.get('directory_path')
client.delete_file_request(share_name, file_name, directory_path)
command_results = CommandResults(
readable_output=f'File {file_name} successfully deleted from {share_name}.',
)
return command_results
def test_module(client: Client) -> None:
"""
Tests API connectivity and authentication.
Args:
client (Client): Azure FileShares Storage API client.
Returns:
str : 'ok' if test passed, anything else will fail the test.
"""
try:
client.list_shares_request()
except Exception as exception:
if 'Error in API call' in str(exception):
return return_results('Authorization Error: make sure API Credentials are correctly set')
if 'Error Type' in str(exception):
return return_results(
'Verify that the storage account name is correct and that you have access to the server from your host.')
raise exception
return_results('ok')
def main() -> None:
"""
Main function
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
global account_sas_token
global storage_account_name
account_sas_token = params['credentials']['password']
storage_account_name = params['credentials']['identifier']
api_version = "2020-10-02"
base_url = f'https://{storage_account_name}.file.core.windows.net/'
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: Client = Client(base_url, verify_certificate, proxy, account_sas_token, storage_account_name,
api_version)
commands = {
'azure-storage-fileshare-create': create_share_command,
'azure-storage-fileshare-delete': delete_share_command,
'azure-storage-fileshare-list': list_shares_command,
'azure-storage-fileshare-content-list': list_directories_and_files_command,
'azure-storage-fileshare-directory-create': create_directory_command,
'azure-storage-fileshare-directory-delete': delete_directory_command,
'azure-storage-fileshare-file-create': create_file_command,
'azure-storage-fileshare-file-get': get_file_command,
'azure-storage-fileshare-file-delete': delete_file_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
from MicrosoftAzureStorageApiModule import * # noqa: E402
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
|
import logging
import os
import time
import requests
from nflmisc.browser import BrowserScraper
from nba.dates import convert_format, strtodate, subtract_datestr, today
class BasketballScraper(object):
def __init__(self, headers=None, cookies=None, cache_name=None, delay=1, expire_hours=12, as_string=False):
'''
Base class for common scraping tasks
Args:
headers: dict of headers
cookies: cookiejar object
cache_name: should be full path
delay: int (be polite!!!)
expire_hours: int - default 4
as_string: get string rather than parsed json
'''
logging.getLogger(__name__).addHandler(logging.NullHandler())
if not cookies:
try:
import cookielib
cookies = cookielib.MozillaCookieJar()
except (NameError, ImportError) as e:
try:
import http.cookiejar
cookies = http.cookiejar.MozillaCookieJar()
except Exception as e:
pass
_s = requests.Session()
_s.cookies = cookies
if headers:
_s.headers.update(headers)
else:
_s.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'})
if cache_name:
if not '/' in cache_name:
cache_name = os.path.join('/tmp', cache_name)
try:
from cachecontrol import CacheControlAdapter
from cachecontrol.heuristics import ExpiresAfter
from cachecontrol.caches import FileCache
_s.mount('http://', CacheControlAdapter(cache=FileCache(cache_name), cache_etags = False, heuristic=ExpiresAfter(hours=expire_hours)))
except ImportError as e:
try:
import requests_cache
requests_cache.install_cache(cache_name)
except:
pass
self.s = _s
self.urls = []
self.as_string = as_string
if delay > 0:
self.delay = delay
else:
self.delay = None
def get(self, url, payload=None, encoding='utf-8'):
'''
Args:
url:
payload:
encoding:
Returns:
'''
if payload:
r = self.s.get(url, params={k:payload[k] for k in sorted(payload)})
else:
r = self.s.get(url)
self.urls.append(r.url)
r.raise_for_status()
if self.delay:
time.sleep(self.delay)
return r.content.decode(encoding)
def get_json(self, url, payload=None):
'''
Gets JSON resource and (default) parses into python data structure
Args:
url:
payload: query string parameters
Returns:
parsed JSON or JSON string
'''
if payload:
r = self.s.get(url, params={k:payload[k] for k in sorted(payload)})
else:
r = self.s.get(url, params=None)
self.urls.append(r.url)
r.raise_for_status()
if self.delay:
time.sleep(self.delay)
if self.as_string:
return r.content
else:
return r.json()
def post(self, url, payload):
'''
Args:
url:
payload:
Returns:
'''
if payload:
r = self.s.get(url, params={k:payload[k] for k in sorted(payload)})
else:
r = self.s.get(url)
self.urls.append(r.url)
r.raise_for_status()
if self.delay:
time.sleep(self.delay)
return r.content
class BBrowserScraper(object):
def __init__(self, headers=None, cookies=None, cache_name=None, delay=1, expire_hours=12, as_string=False):
'''
Base class for common scraping tasks
Args:
headers: dict of headers
cookies: cookiejar object
cache_name: should be full path
delay: int (be polite!!!)
expire_hours: int - default 4
as_string: get string rather than parsed json
'''
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.s = BrowserScraper()
self.urls = []
self.as_string = as_string
if delay > 0:
self.delay = delay
else:
self.delay = None
def get_json(self, url, payload=None):
'''
Gets JSON resource and (default) parses into python data structure
Args:
url:
payload: query string parameters
Returns:
parsed JSON or JSON string
'''
if payload:
r = self.s.get(url, params={k:payload[k] for k in sorted(payload)})
else:
r = self.s.get(url, params=None)
self.urls.append(r.url)
r.raise_for_status()
if self.delay:
time.sleep(self.delay)
if self.as_string:
return r.content
else:
return r.json()
class WaybackScraper(BasketballScraper):
def __init__(self, headers=None, cookies=None, cache_name=None, expire_hours=12, as_string=False):
'''
Scraper for waybackmachine API
Args:
headers: dictionary of HTTP headers
cookies: cookie object, such as browsercookie.firefox()
cache_name: str 'nbacomscraper'
expire_hours: how long to cache requests
as_string: return as raw string rather than json parsed into python data structure
'''
self.wburl = 'http://archive.org/wayback/available?url={}×tamp={}'
logging.getLogger(__name__).addHandler(logging.NullHandler())
BasketballScraper.__init__(self, headers=headers, cookies=cookies, cache_name=cache_name, expire_hours=expire_hours, as_string=as_string)
def get_wayback(self, url, d=None, max_delta=None):
'''
Gets page from the wayback machine
Args:
url: of the site you want, not the wayback machine
d: datestring, if None then get most recent one
max_delta: int, how many days off can the last page be from the requested date
Returns:
content: HTML string
'''
content = None
if not d:
d = today('db')
else:
d = convert_format(d, 'db')
resp = self.get_json(self.wburl.format(url, d))
if resp:
ts = resp['archived_snapshots']['closest']['timestamp'][:8]
if max_delta:
closest_url = resp['archived_snapshots']['closest']['url']
if subtract_datestr(d, ts) <= max_delta:
content = self.get(resp['archived_snapshots']['closest']['url'])
else:
logging.error('page is too old: {}'.format(ts))
else:
closest_url = resp['archived_snapshots']['closest']['url']
return self.get(closest_url)
else:
logging.error('url unavailable on wayback machine')
return content, ts
if __name__ == "__main__":
pass
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
import mock
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import floating_ips as fips_v2
from nova.api.openstack.compute.plugins.v3 import floating_ips as fips_v21
from nova.api.openstack import extensions
from nova import compute
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import network
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': None}
def network_api_get_floating_ip_by_address(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10}
def network_api_get_floating_ips_by_project(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': {'uuid': FAKE_UUID}}},
{'id': 2,
'pool': 'nova', 'interface': 'eth0',
'address': '10.10.10.11',
'fixed_ip': None}]
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
def network_api_allocate(self, context):
return '10.10.10.10'
def network_api_release(self, context, address):
pass
def compute_api_associate(self, context, instance_id, address):
pass
def network_api_associate(self, context, floating_address, fixed_address):
pass
def network_api_disassociate(self, context, instance, floating_address):
pass
def fake_instance_get(context, instance_id):
return {
"id": 1,
"uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'}
def stub_nw_info(stubs):
def get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(stubs)
return get_nw_info_for_instance
def get_instance_by_floating_ip_addr(self, context, address):
return None
class FloatingIpTestNeutronV21(test.NoDBTestCase):
floating_ips = fips_v21
def setUp(self):
super(FloatingIpTestNeutronV21, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.controller = self.floating_ips.FloatingIPController()
def test_floatingip_delete(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'disassociate_and_release_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, dis_and_del, rel_fip, _, _):
self.controller.delete(req, 1)
self.assertFalse(disoc_fip.called)
self.assertFalse(rel_fip.called)
# Only disassociate_and_release_floating_ip is
# called if using neutron
self.assertTrue(dis_and_del.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'get_floating_ip',
side_effect=ex)
):
self.assertRaises(expect_ex,
self.controller.delete, req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
class FloatingIpTestNeutronV2(FloatingIpTestNeutronV21):
floating_ips = fips_v2
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPNotFound)
class FloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
url = '/v2/fake/servers/test_inst/action'
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def _get_fake_fip_request(self, act=''):
return fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/%s' % act)
def _get_fake_server_request(self):
return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
def _get_fake_app(self):
return fakes.wsgi_app_v21(init_only=('servers', 'os-floating-ips'))
def setUp(self):
super(FloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.app = self._get_fake_app()
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpTestV21, self).tearDown()
def test_floatingip_delete(self):
req = self._get_fake_fip_request('1')
fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'disassociate_floating_ip'),
mock.patch.object(self.controller.network_api,
'release_floating_ip'),
mock.patch.object(self.controller.network_api,
'get_instance_id_by_floating_address',
return_value=None),
mock.patch.object(self.controller.network_api,
'get_floating_ip',
return_value=fip_val)) as (
disoc_fip, rel_fip, _, _):
self.controller.delete(req, 1)
self.assertTrue(disoc_fip.called)
self.assertTrue(rel_fip.called)
def _test_floatingip_delete_not_found(self, ex,
expect_ex=webob.exc.HTTPNotFound):
req = self._get_fake_fip_request('1')
with contextlib.nested(
mock.patch.object(self.controller.network_api,
'get_floating_ip',
side_effect=ex)
):
self.assertRaises(expect_ex,
self.controller.delete, req, 1)
def test_floatingip_delete_not_found_ip(self):
ex = exception.FloatingIpNotFound(id=1)
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_not_found(self):
ex = exception.NotFound
self._test_floatingip_delete_not_found(ex)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest)
def test_translate_floating_ip_view(self):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
# NOTE(vish): network_get uses the id not the address
floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
self.assertTrue(view['floating_ip']['id'])
self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
self.assertIsNone(view['floating_ip']['fixed_ip'])
self.assertIsNone(view['floating_ip']['instance_id'])
def test_translate_floating_ip_view_dict(self):
floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
'fixed_ip': None}
view = self.floating_ips._translate_floating_ip_view(floating_ip)
self.assertIn('floating_ip', view)
def test_floating_ips_list(self):
req = self._get_fake_fip_request()
res_dict = self.controller.index(req)
response = {'floating_ips': [{'instance_id': FAKE_UUID,
'ip': '10.10.10.10',
'pool': 'nova',
'fixed_ip': '10.0.0.1',
'id': 1},
{'instance_id': None,
'ip': '10.10.10.11',
'pool': 'nova',
'fixed_ip': None,
'id': 2}]}
self.assertEqual(res_dict, response)
def test_floating_ip_release_nonexisting(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id=id)
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
req = self._get_fake_fip_request('9876')
req.method = 'DELETE'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
'for id 9876", "code": 404}}')
self.assertEqual(res.body, expected_msg)
def test_floating_ip_release_race_cond(self):
def fake_get_floating_ip(*args, **kwargs):
return {'fixed_ip_id': 1, 'address': self.floating_ip}
def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
return 'test-inst'
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotAssociated(args[3])
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
self.stubs.Set(self.floating_ips, "get_instance_by_floating_ip_addr",
fake_get_instance_by_floating_ip_addr)
self.stubs.Set(self.floating_ips, "disassociate_floating_ip",
fake_disassociate_floating_ip)
req = self._get_fake_fip_request('1')
req.method = 'DELETE'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
def test_floating_ip_show(self):
req = self._get_fake_fip_request('1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertIsNone(res_dict['floating_ip']['instance_id'])
def test_floating_ip_show_not_found(self):
def fake_get_floating_ip(*args, **kwargs):
raise exception.FloatingIpNotFound(id='fake')
self.stubs.Set(network.api.API, "get_floating_ip",
fake_get_floating_ip)
req = self._get_fake_fip_request('9876')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
'for id 9876", "code": 404}}')
self.assertEqual(res.body, expected_msg)
def test_show_associated_floating_ip(self):
def get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip': {'address': '10.0.0.1',
'instance_uuid': FAKE_UUID,
'instance': {'uuid': FAKE_UUID}}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
req = self._get_fake_fip_request('1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
def test_recreation_of_floating_ip(self):
self._delete_floating_ip()
self._create_floating_ips()
def test_floating_ip_in_bulk_creation(self):
self._delete_floating_ip()
self._create_floating_ips([self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertIn(self.floating_ip_2, ip_list)
def test_fail_floating_ip_in_bulk_creation(self):
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ips,
[self.floating_ip, self.floating_ip_2])
all_ips = db.floating_ip_get_all(self.context)
ip_list = [ip['address'] for ip in all_ips]
self.assertIn(self.floating_ip, ip_list)
self.assertNotIn(self.floating_ip_2, ip_list)
def test_floating_ip_allocate_no_free_ips(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
req = self._get_fake_fip_request()
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req)
self.assertIn('No more floating ips', ex.explanation)
def test_floating_ip_allocate_no_free_ips_pool(self):
def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
req = self._get_fake_fip_request()
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, {'pool': 'non_existent_pool'})
self.assertIn('No more floating ips in pool non_existent_pool',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_over_quota(self, allocate_mock):
req = self._get_fake_fip_request()
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req)
self.assertIn('IP allocation over quota', ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpLimitExceeded())
def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
req = self._get_fake_fip_request()
ex = self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, {'pool': 'non_existent_pool'})
self.assertIn('IP allocation over quota in pool non_existent_pool.',
ex.explanation)
@mock.patch('nova.network.api.API.allocate_floating_ip',
side_effect=exception.FloatingIpPoolNotFound())
def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
req = self._get_fake_fip_request()
ex = self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, {'pool': 'non_existent_pool'})
self.assertIn('Floating ip pool not found.', ex.explanation)
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
self.stubs.Set(network.api.API, "allocate_floating_ip",
fake1)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake2)
req = self._get_fake_fip_request()
res_dict = self.controller.create(req)
ip = res_dict['floating_ip']
expected = {
"id": 1,
"instance_id": None,
"ip": "10.10.10.10",
"fixed_ip": None,
"pool": 'nova'}
self.assertEqual(ip, expected)
def test_floating_ip_release(self):
req = self._get_fake_fip_request('1')
self.controller.delete(req, 1)
def test_floating_ip_associate(self):
fixed_address = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip))
req = self._get_fake_server_request()
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_associate_invalid_instance(self):
def fake_get(self, context, id, expected_attrs=None,
want_objects=False):
raise exception.InstanceNotFound(instance_id=id)
self.stubs.Set(compute.api.API, "get", fake_get)
body = dict(addFloatingIp=dict(address=self.floating_ip))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_associate_not_allocated_floating_ip_to_instance(self):
def fake_associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
floating_ip = '10.10.10.11'
body = dict(addFloatingIp=dict(address=floating_ip))
req = webob.Request.blank(self.url)
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
res_dict = jsonutils.loads(resp.body)
self.assertEqual(resp.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['message'],
"floating ip not found")
@mock.patch.object(network.api.API, 'associate_floating_ip',
side_effect=exception.Forbidden)
def test_associate_floating_ip_forbidden(self, associate_mock):
body = dict(addFloatingIp=dict(address='10.10.10.11'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_associate_floating_ip_bad_address_key(self):
body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_associate_floating_ip_bad_addfloatingip_key(self):
body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, req, 'test_inst',
body)
def test_floating_ip_disassociate(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = self._get_fake_server_request()
rsp = self.manager._remove_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_floating_ip_disassociate_missing(self):
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_associate_non_existent_ip(self):
def fake_network_api_associate(self, context, instance,
floating_address=None,
fixed_address=None):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_network_api_associate)
body = dict(addFloatingIp=dict(address='1.1.1.1'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._add_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_non_existent_ip(self):
def network_api_get_floating_ip_by_address(self, context,
floating_address):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
body = dict(removeFloatingIp=dict(address='1.1.1.1'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_wrong_instance_uuid(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
req, wrong_uuid, body)
def test_floating_ip_disassociate_wrong_instance_id(self):
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'wrong_inst'
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPConflict,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_auto_assigned(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
def network_api_disassociate(self, context, instance,
floating_address):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
req, 'test_inst', body)
def test_floating_ip_disassociate_map_authorization_exc(self):
def fake_get_floating_ip_addr_auto_assigned(self, context, address):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': 10, 'auto_assigned': 1}
def get_instance_by_floating_ip_addr(self, context, address):
if address == '10.10.10.10':
return 'test_inst'
def network_api_disassociate(self, context, instance, address):
raise exception.Forbidden()
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
fake_get_floating_ip_addr_auto_assigned)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
body = dict(removeFloatingIp=dict(address='10.10.10.10'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPForbidden,
self.manager._remove_floating_ip,
req, 'test_inst', body)
# these are a few bad param tests
def test_bad_address_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._remove_floating_ip, req, 'test_inst',
body)
def test_missing_dict_param_in_remove_floating_ip(self):
body = dict(removeFloatingIp='11.0.0.1')
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._remove_floating_ip, req, 'test_inst',
body)
def test_missing_dict_param_in_add_floating_ip(self):
body = dict(addFloatingIp='11.0.0.1')
req = self._get_fake_server_request()
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._add_floating_ip, req, 'test_inst',
body)
class FloatingIpTestV2(FloatingIpTestV21):
floating_ips = fips_v2
def _get_fake_app(self):
return fakes.wsgi_app(init_only=('servers', 'os-floating-ips'))
def setUp(self):
super(FloatingIpTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Floating_ips'])
def test_not_extended_floating_ip_associate_fixed(self):
# Check that fixed_address is ignored if os-extended-floating-ips
# is not loaded
fixed_address_requested = '192.168.1.101'
fixed_address_allocated = '192.168.1.100'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address_allocated,
kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address_requested))
req = self._get_fake_server_request()
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_floatingip_delete_invalid_id(self):
ex = exception.InvalidID(id=1)
self._test_floatingip_delete_not_found(ex, webob.exc.HTTPNotFound)
class ExtendedFloatingIpTestV21(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
floating_ips = fips_v21
def _create_floating_ips(self, floating_ips=None):
"""Create a floating ip object."""
if floating_ips is None:
floating_ips = [self.floating_ip]
elif not isinstance(floating_ips, (list, tuple)):
floating_ips = [floating_ips]
def make_ip_dict(ip):
"""Shortcut for creating floating ip dict."""
return
dict_ = {'pool': 'nova', 'host': 'fake_host'}
return db.floating_ip_bulk_create(
self.context, [dict(address=ip, **dict_) for ip in floating_ips],
)
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, self.floating_ip)
def _get_fake_request(self):
return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
def _get_fake_app(self):
return fakes.wsgi_app_v21(init_only=('servers', 'os-floating-ips'))
def setUp(self):
super(ExtendedFloatingIpTestV21, self).setUp()
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
network_api_get_floating_ip_by_address)
self.stubs.Set(network.api.API, "get_floating_ips_by_project",
network_api_get_floating_ips_by_project)
self.stubs.Set(network.api.API, "release_floating_ip",
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
get_instance_by_floating_ip_addr)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
stub_nw_info(self.stubs))
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.ext_mgr.extensions['os-floating-ips'] = True
self.ext_mgr.extensions['os-extended-floating-ips'] = True
self.controller = self.floating_ips.FloatingIPController()
self.manager = self.floating_ips.\
FloatingIPActionController(self.ext_mgr)
self.app = self._get_fake_app()
def tearDown(self):
self._delete_floating_ip()
super(ExtendedFloatingIpTestV21, self).tearDown()
def test_extended_floating_ip_associate_fixed(self):
fixed_address = '192.168.1.101'
def fake_associate_floating_ip(*args, **kwargs):
self.assertEqual(fixed_address, kwargs['fixed_address'])
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address=fixed_address))
req = self._get_fake_request()
rsp = self.manager._add_floating_ip(req, 'test_inst', body)
self.assertEqual(202, rsp.status_int)
def test_extended_floating_ip_associate_fixed_not_allocated(self):
def fake_associate_floating_ip(*args, **kwargs):
pass
self.stubs.Set(network.api.API, "associate_floating_ip",
fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip,
fixed_address='11.11.11.11'))
req = self._get_fake_request()
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
resp = req.get_response(self.app)
res_dict = jsonutils.loads(resp.body)
self.assertEqual(resp.status_int, 400)
self.assertEqual(res_dict['badRequest']['message'],
"Specified fixed address not assigned to instance")
class ExtendedFloatingIpTestV2(ExtendedFloatingIpTestV21):
floating_ips = fips_v2
def _get_fake_app(self):
return fakes.wsgi_app(init_only=('servers', 'os-floating-ips'))
def setUp(self):
super(ExtendedFloatingIpTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Floating_ips', 'Extended_floating_ips'])
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import gluon
import numpy as np
from numpy.testing import assert_allclose
def test_rnn():
cell = gluon.rnn.RNNCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm():
cell = gluon.rnn.LSTMCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_lstm_forget_bias():
forget_bias = 2.0
stack = gluon.rnn.SequentialRNNCell()
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))
stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))
dshape = (32, 1, 200)
data = mx.sym.Variable('data')
sym, _ = stack.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))
expected_bias = np.hstack([np.zeros((100,)),
forget_bias * np.ones(100, ), np.zeros((2 * 100,))])
assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)
def test_gru():
cell = gluon.rnn.GRUCell(100, prefix='rnn_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_residual():
cell = gluon.rnn.ResidualCell(gluon.rnn.GRUCell(50, prefix='rnn_'))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']
# assert outputs.list_outputs() == \
# ['rnn_t0_out_plus_residual_output', 'rnn_t1_out_plus_residual_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50)),
rnn_t1_data=mx.nd.ones((10, 50)),
rnn_i2h_weight=mx.nd.zeros((150, 50)),
rnn_i2h_bias=mx.nd.zeros((150,)),
rnn_h2h_weight=mx.nd.zeros((150, 50)),
rnn_h2h_bias=mx.nd.zeros((150,)))
expected_outputs = np.ones((10, 50))
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_residual_bidirectional():
cell = gluon.rnn.ResidualCell(
gluon.rnn.BidirectionalCell(
gluon.rnn.GRUCell(25, prefix='rnn_l_'),
gluon.rnn.GRUCell(25, prefix='rnn_r_')))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=False)
outputs = mx.sym.Group(outputs)
assert sorted(cell.collect_params().keys()) == \
['rnn_l_h2h_bias', 'rnn_l_h2h_weight', 'rnn_l_i2h_bias', 'rnn_l_i2h_weight',
'rnn_r_h2h_bias', 'rnn_r_h2h_weight', 'rnn_r_i2h_bias', 'rnn_r_i2h_weight']
# assert outputs.list_outputs() == \
# ['bi_t0_plus_residual_output', 'bi_t1_plus_residual_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 50), (10, 50)]
outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50))+5,
rnn_t1_data=mx.nd.ones((10, 50))+5,
rnn_l_i2h_weight=mx.nd.zeros((75, 50)),
rnn_l_i2h_bias=mx.nd.zeros((75,)),
rnn_l_h2h_weight=mx.nd.zeros((75, 25)),
rnn_l_h2h_bias=mx.nd.zeros((75,)),
rnn_r_i2h_weight=mx.nd.zeros((75, 50)),
rnn_r_i2h_bias=mx.nd.zeros((75,)),
rnn_r_h2h_weight=mx.nd.zeros((75, 25)),
rnn_r_h2h_bias=mx.nd.zeros((75,)))
expected_outputs = np.ones((10, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
assert np.array_equal(outputs[1].asnumpy(), expected_outputs)
def test_stack():
cell = gluon.rnn.SequentialRNNCell()
for i in range(5):
if i == 1:
cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_' % i)))
else:
cell.add(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_'%i))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
keys = sorted(cell.collect_params().keys())
for i in range(5):
assert 'rnn_stack%d_h2h_weight'%i in keys
assert 'rnn_stack%d_h2h_bias'%i in keys
assert 'rnn_stack%d_i2h_weight'%i in keys
assert 'rnn_stack%d_i2h_bias'%i in keys
assert outputs.list_outputs() == ['rnn_stack4_t0_out_output', 'rnn_stack4_t1_out_output', 'rnn_stack4_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def test_bidirectional():
cell = gluon.rnn.BidirectionalCell(
gluon.rnn.LSTMCell(100, prefix='rnn_l0_'),
gluon.rnn.LSTMCell(100, prefix='rnn_r0_'),
output_prefix='rnn_bi_')
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
assert outputs.list_outputs() == ['rnn_bi_t0_output', 'rnn_bi_t1_output', 'rnn_bi_t2_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 200), (10, 200), (10, 200)]
def test_zoneout():
cell = gluon.rnn.ZoneoutCell(gluon.rnn.RNNCell(100, prefix='rnn_'), zoneout_outputs=0.5,
zoneout_states=0.5)
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))
assert outs == [(10, 100), (10, 100), (10, 100)]
def check_rnn_forward(layer, inputs):
inputs.attach_grad()
layer.collect_params().initialize()
with mx.autograd.record():
layer.unroll(3, inputs, merge_outputs=True)[0].backward()
mx.autograd.backward(layer.unroll(3, inputs, merge_outputs=False)[0])
mx.nd.waitall()
def test_rnn_cells():
check_rnn_forward(gluon.rnn.LSTMCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.RNNCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.GRUCell(100, input_size=200), mx.nd.ones((8, 3, 200)))
bilayer = gluon.rnn.BidirectionalCell(gluon.rnn.LSTMCell(100, input_size=200),
gluon.rnn.LSTMCell(100, input_size=200))
check_rnn_forward(bilayer, mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.DropoutCell(0.5), mx.nd.ones((8, 3, 200)))
check_rnn_forward(gluon.rnn.ZoneoutCell(gluon.rnn.LSTMCell(100, input_size=200),
0.5, 0.2),
mx.nd.ones((8, 3, 200)))
net = gluon.rnn.SequentialRNNCell()
net.add(gluon.rnn.LSTMCell(100, input_size=200))
net.add(gluon.rnn.RNNCell(100, input_size=100))
net.add(gluon.rnn.GRUCell(100, input_size=100))
check_rnn_forward(net, mx.nd.ones((8, 3, 200)))
def check_rnn_layer_forward(layer, inputs, states=None):
layer.collect_params().initialize()
with mx.autograd.record():
out = layer(inputs, states)
if states is not None:
assert isinstance(out, tuple) and len(out) == 2
out = out[0]
else:
assert isinstance(out, mx.nd.NDArray)
out.backward()
mx.nd.waitall()
def test_rnn_layers():
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)), [mx.nd.ones((2, 3, 10)), mx.nd.ones((2, 3, 10))])
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))
net = gluon.nn.Sequential()
net.add(gluon.rnn.LSTM(10, 2, bidirectional=True))
net.add(gluon.nn.BatchNorm(axis=2))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(3, activation='relu'))
net.collect_params().initialize()
with mx.autograd.record():
net(mx.nd.ones((2, 3, 10))).backward()
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Low-level objects providing an abstraction for the objects involved in the calculation.
"""
from __future__ import unicode_literals, division, print_function
import collections
import abc
import six
import numpy as np
import pymatgen.core.units as units
from pprint import pformat
from monty.design_patterns import singleton
from monty.collections import AttrDict
from enum import Enum
from monty.json import MSONable
from pymatgen.core.units import ArrayWithUnit
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.serialization import pmg_serialize
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from monty.json import MontyEncoder, MontyDecoder
def lattice_from_abivars(cls=None, *args, **kwargs):
"""
Returns a `Lattice` object from a dictionary
with the Abinit variables `acell` and either `rprim` in Bohr or `angdeg`
If acell is not given, the Abinit default is used i.e. [1,1,1] Bohr
Args:
cls: Lattice class to be instantiated. pymatgen.core.lattice.Lattice if `cls` is None
Example:
lattice_from_abivars(acell=3*[10], rprim=np.eye(3))
"""
cls = Lattice if cls is None else cls
kwargs.update(dict(*args))
d = kwargs
rprim = d.get("rprim", None)
angdeg = d.get("angdeg", None)
acell = d["acell"]
if rprim is not None:
if angdeg is not None:
raise ValueError("angdeg and rprimd are mutually exclusive")
rprim = np.reshape(rprim, (3,3))
rprimd = [float(acell[i]) * rprim[i] for i in range(3)]
# Call pymatgen constructors (note that pymatgen uses Angstrom instead of Bohr).
return cls(ArrayWithUnit(rprimd, "bohr").to("ang"))
elif angdeg is not None:
angdeg = np.reshape(angdeg, 3)
if np.any(angdeg <= 0.):
raise ValueError("Angles must be > 0 but got %s" % str(angdeg))
if angdeg.sum() >= 360.:
raise ValueError("The sum of angdeg must be lower that 360, angdeg %s" % str(angdeg))
# This code follows the implementation in ingeo.F90
# See also http://www.abinit.org/doc/helpfiles/for-v7.8/input_variables/varbas.html#angdeg
tol12 = 1e-12
pi, sin, cos, sqrt = np.pi, np.sin, np.cos, np.sqrt
rprim = np.zeros((3,3))
if (abs(angdeg[0] -angdeg[1]) < tol12 and abs(angdeg[1] - angdeg[2]) < tol12 and
abs(angdeg[0]-90.) + abs(angdeg[1]-90.) + abs(angdeg[2] -90) > tol12):
# Treat the case of equal angles (except all right angles):
# generates trigonal symmetry wrt third axis
cosang = cos(pi * angdeg[0]/180.0)
a2 = 2.0/3.0*(1.0 - cosang)
aa = sqrt(a2)
cc = sqrt(1.0-a2)
rprim[0,0] = aa ; rprim[0,1] = 0.0 ; rprim[0,2] = cc
rprim[1,0] = -0.5*aa; rprim[1,1] = sqrt(3.0)*0.5*aa ; rprim[1,2] = cc
rprim[2,0] = -0.5*aa; rprim[2,1] = -sqrt(3.0)*0.5*aa; rprim[2,2] = cc
else:
# Treat all the other cases
rprim[0,0] = 1.0
rprim[1,0] = cos(pi*angdeg[2]/180.)
rprim[1,1] = sin(pi*angdeg[2]/180.)
rprim[2,0] = cos(pi*angdeg[1]/180.)
rprim[2,1] = (cos(pi*angdeg[0]/180.0)-rprim[1,0]*rprim[2,0])/rprim[1,1]
rprim[2,2] = sqrt(1.0-rprim[2,0]**2-rprim[2,1]**2)
# Call pymatgen constructors (note that pymatgen uses Angstrom instead of Bohr).
rprimd = [float(acell[i]) * rprim[i] for i in range(3)]
return cls(ArrayWithUnit(rprimd, "bohr").to("ang"))
raise ValueError("Don't know how to construct a Lattice from dict:\n%s" % pformat(d))
def structure_from_abivars(cls=None, *args, **kwargs):
"""
Build a :class:`Structure` object from a dictionary with ABINIT variables.
Args:
cls: Structure class to be instantiated. pymatgen.core.structure.Structure if cls is None
example:
al_structure = structure_from_abivars(
acell=3*[7.5],
rprim=[0.0, 0.5, 0.5,
0.5, 0.0, 0.5,
0.5, 0.5, 0.0],
typat=1,
xred=[0.0, 0.0, 0.0],
ntypat=1,
znucl=13,
)
`xred` can be replaced with `xcart` or `xangst`.
"""
kwargs.update(dict(*args))
d = kwargs
cls = Structure if cls is None else cls
#lattice = Lattice.from_dict(d, fmt="abivars")
lattice = lattice_from_abivars(**d)
coords, coords_are_cartesian = d.get("xred", None), False
if coords is None:
coords = d.get("xcart", None)
if coords is not None:
if "xangst" in d:
raise ValueError("xangst and xcart are mutually exclusive")
coords = ArrayWithUnit(coords, "bohr").to("ang")
else:
coords = d.get("xangst", None)
coords_are_cartesian = True
if coords is None:
raise ValueError("Cannot extract coordinates from:\n %s" % str(d))
coords = np.reshape(coords, (-1,3))
znucl_type, typat = d["znucl"], d["typat"]
if not isinstance(znucl_type, collections.Iterable):
znucl_type = [znucl_type]
if not isinstance(typat, collections.Iterable):
typat = [typat]
if len(typat) != len(coords):
raise ValueError("len(typat) != len(coords):\ntypat: %s\ncoords: %s" % (typat, coords))
# Note conversion to int and Fortran --> C indexing
typat = np.array(typat, dtype=np.int)
species = [znucl_type[typ-1] for typ in typat]
return cls(lattice, species, coords, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=coords_are_cartesian)
def structure_to_abivars(structure, **kwargs):
"""
Receives a structure and returns a dictionary with the ABINIT variables.
"""
if not structure.is_ordered:
raise ValueError("""\
Received disordered structure with partial occupancies that cannot be converted into an Abinit input
Please use OrderDisorderedStructureTransformation or EnumerateStructureTransformation
to build an appropriate supercell from partial occupancies or alternatively use the Virtual Crystal Approximation.""")
types_of_specie = structure.types_of_specie
natom = structure.num_sites
znucl_type = [specie.number for specie in types_of_specie]
znucl_atoms = structure.atomic_numbers
typat = np.zeros(natom, np.int)
for atm_idx, site in enumerate(structure):
typat[atm_idx] = types_of_specie.index(site.specie) + 1
rprim = ArrayWithUnit(structure.lattice.matrix, "ang").to("bohr")
angdeg = structure.lattice.angles
xred = np.reshape([site.frac_coords for site in structure], (-1, 3))
# Set small values to zero. This usually happens when the CIF file
# does not give structure parameters with enough digits.
rprim = np.where(np.abs(rprim) > 1e-8, rprim, 0.0)
xred = np.where(np.abs(xred) > 1e-8, xred, 0.0)
# Info on atoms.
d = dict(
natom=natom,
ntypat=len(types_of_specie),
typat=typat,
znucl=znucl_type,
xred=xred,
)
# Add info on the lattice.
# Should we use (rprim, acell) or (angdeg, acell) to specify the lattice?
geomode = kwargs.pop("geomode", "rprim")
if geomode == "automatic":
geomode = "rprim"
if structure.lattice.is_hexagonal: # or structure.lattice.is_rhombohedral
geomode = "angdeg"
angdeg = structure.lattice.angles
# Here one could polish a bit the numerical values if they are not exact.
# Note that in pmg the angles are 12, 20, 01 while in Abinit 12, 02, 01
# One should make sure that the orientation is preserved (see Curtarolo's settings)
if geomode == "rprim":
d.update(
acell=3 * [1.0],
rprim=rprim,
)
elif geomode == "angdeg":
d.update(
acell=ArrayWithUnit(structure.lattice.abc, "ang").to("bohr"),
angdeg=angdeg,
)
else:
raise ValueError("Wrong value for geomode: %s" % geomode)
return d
def contract(s):
"""
>>> assert contract("1 1 1 2 2 3") == "3*1 2*2 1*3"
>>> assert contract("1 1 3 2 3") == "2*1 1*3 1*2 1*3"
"""
if not s: return s
tokens = s.split()
old = tokens[0]
count = [[1, old]]
for t in tokens[1:]:
if t == old:
count[-1][0] += 1
else:
old = t
count.append([1, t])
return " ".join("%d*%s" % (c, t) for c, t in count)
class AbivarAble(six.with_metaclass(abc.ABCMeta, object)):
"""
An `AbivarAble` object provides a method `to_abivars`
that returns a dictionary with the abinit variables.
"""
@abc.abstractmethod
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
#@abc.abstractmethod
#def from_abivars(cls, vars):
# """Build the object from a dictionary with Abinit variables."""
def __str__(self):
return pformat(self.to_abivars(), indent=1, width=80, depth=None)
def __contains__(self, key):
return key in self.to_abivars()
@singleton
class MandatoryVariable(object):
"""
Singleton used to tag mandatory variables, just because I can use
the cool syntax: variable is MANDATORY!
"""
@singleton
class DefaultVariable(object):
"""Singleton used to tag variables that will have the default value"""
MANDATORY = MandatoryVariable()
DEFAULT = DefaultVariable()
class SpinMode(collections.namedtuple('SpinMode', "mode nsppol nspinor nspden"), AbivarAble, MSONable):
"""
Different configurations of the electron density as implemented in abinit:
One can use as_spinmode to construct the object via SpinMode.as_spinmode
(string) where string can assume the values:
- polarized
- unpolarized
- afm (anti-ferromagnetic)
- spinor (non-collinear magnetism)
- spinor_nomag (non-collinear, no magnetism)
"""
@classmethod
def as_spinmode(cls, obj):
"""Converts obj into a `SpinMode` instance"""
if isinstance(obj, cls):
return obj
else:
# Assume a string with mode
try:
return _mode2spinvars[obj]
except KeyError:
raise KeyError("Wrong value for spin_mode: %s" % str(obj))
def to_abivars(self):
return {
"nsppol": self.nsppol,
"nspinor": self.nspinor,
"nspden": self.nspden,
}
@pmg_serialize
def as_dict(self):
return {k: getattr(self, k) for k in self._fields}
@classmethod
def from_dict(cls, d):
return cls(**{k: d[k] for k in d if k in cls._fields})
# An handy Multiton
_mode2spinvars = {
"unpolarized": SpinMode("unpolarized", 1, 1, 1),
"polarized": SpinMode("polarized", 2, 1, 2),
"afm": SpinMode("afm", 1, 1, 2),
"spinor": SpinMode("spinor", 1, 2, 4),
"spinor_nomag": SpinMode("spinor_nomag", 1, 2, 1),
}
class Smearing(AbivarAble, MSONable):
"""
Variables defining the smearing technique. The preferred way to instanciate
a `Smearing` object is via the class method Smearing.as_smearing(string)
"""
#: Mapping string_mode --> occopt
_mode2occopt = {
'nosmearing': 1,
'fermi_dirac': 3,
'marzari4': 4,
'marzari5': 5,
'methfessel': 6,
'gaussian': 7}
def __init__(self, occopt, tsmear):
self.occopt = occopt
self.tsmear = tsmear
def __str__(self):
s = "occopt %d # %s Smearing\n" % (self.occopt, self.mode)
if self.tsmear:
s += 'tsmear %s' % self.tsmear
return s
def __eq__(self, other):
return (self.occopt == other.occopt and
np.allclose(self.tsmear, other.tsmear))
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.mode != "nosmearing"
# py2 old version
__nonzero__ = __bool__
@classmethod
def as_smearing(cls, obj):
"""
Constructs an instance of `Smearing` from obj. Accepts obj in the form:
* Smearing instance
* "name:tsmear" e.g. "gaussian:0.004" (Hartree units)
* "name:tsmear units" e.g. "gaussian:0.1 eV"
* None --> no smearing
"""
if obj is None:
return Smearing.nosmearing()
if isinstance(obj, cls):
return obj
# obj is a string
if obj == "nosmearing":
return cls.nosmearing()
else:
obj, tsmear = obj.split(":")
obj.strip()
occopt = cls._mode2occopt[obj]
try:
tsmear = float(tsmear)
except ValueError:
tsmear, unit = tsmear.split()
tsmear = units.Energy(float(tsmear), unit).to("Ha")
return cls(occopt, tsmear)
@property
def mode(self):
for (mode_str, occopt) in self._mode2occopt.items():
if occopt == self.occopt:
return mode_str
raise AttributeError("Unknown occopt %s" % self.occopt)
@staticmethod
def nosmearing():
return Smearing(1, 0.0)
def to_abivars(self):
if self.mode == "nosmearing":
return {"occopt": 1, "tsmear": 0.0}
else:
return {"occopt": self.occopt, "tsmear": self.tsmear,}
@pmg_serialize
def as_dict(self):
"""json friendly dict representation of Smearing"""
return {"occopt": self.occopt, "tsmear": self.tsmear}
@staticmethod
def from_dict(d):
return Smearing(d["occopt"], d["tsmear"])
class ElectronsAlgorithm(dict, AbivarAble, MSONable):
"""Variables controlling the SCF/NSCF algorithm."""
# None indicates that we use abinit defaults.
_DEFAULT = dict(
iprcell=None, iscf=None, diemac=None, diemix=None, diemixmag=None,
dielam=None, diegap=None, dielng=None, diecut=None, nstep=50)
def __init__(self, *args, **kwargs):
super(ElectronsAlgorithm, self).__init__(*args, **kwargs)
for k in self:
if k not in self._DEFAULT:
raise ValueError("%s: No default value has been provided for "
"key %s" % (self.__class__.__name__, k))
def to_abivars(self):
return self.copy()
@pmg_serialize
def as_dict(self):
return self.copy()
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop("@module", None)
d.pop("@class", None)
return cls(**d)
class Electrons(AbivarAble, MSONable):
"""The electronic degrees of freedom"""
def __init__(self, spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
algorithm=None, nband=None, fband=None, charge=0.0, comment=None): # occupancies=None,
"""
Constructor for Electrons object.
Args:
comment: String comment for Electrons
charge: Total charge of the system. Default is 0.
"""
super(Electrons, self).__init__()
self.comment = comment
self.smearing = Smearing.as_smearing(smearing)
self.spin_mode = SpinMode.as_spinmode(spin_mode)
self.nband = nband
self.fband = fband
self.charge = charge
self.algorithm = algorithm
@property
def nsppol(self):
return self.spin_mode.nsppol
@property
def nspinor(self):
return self.spin_mode.nspinor
@property
def nspden(self):
return self.spin_mode.nspden
def as_dict(self):
"json friendly dict representation"
d = {}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["spin_mode"] = self.spin_mode.as_dict()
d["smearing"] = self.smearing.as_dict()
d["algorithm"] = self.algorithm.as_dict() if self.algorithm else None
d["nband"] = self.nband
d["fband"] = self.fband
d["charge"] = self.charge
d["comment"] = self.comment
return d
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop("@module", None)
d.pop("@class", None)
dec = MontyDecoder()
d["spin_mode"] = dec.process_decoded(d["spin_mode"])
d["smearing"] = dec.process_decoded(d["smearing"])
d["algorithm"] = dec.process_decoded(d["algorithm"]) if d["algorithm"] else None
return cls(**d)
def to_abivars(self):
abivars = self.spin_mode.to_abivars()
abivars.update({
"nband" : self.nband,
"fband" : self.fband,
"charge" : self.charge,
})
if self.smearing:
abivars.update(self.smearing.to_abivars())
if self.algorithm:
abivars.update(self.algorithm)
#abivars["#comment"] = self.comment
return abivars
class KSamplingModes(Enum):
monkhorst = 1
path = 2
automatic = 3
class KSampling(AbivarAble, MSONable):
"""
Input variables defining the K-point sampling.
"""
def __init__(self, mode=KSamplingModes.monkhorst, num_kpts= 0,
kpts=((1, 1, 1),),
kpt_shifts=(0.5, 0.5, 0.5),
kpts_weights=None, use_symmetries=True, use_time_reversal=True, chksymbreak=None,
comment=None):
"""
Highly flexible constructor for KSampling objects. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the object be constructed
far more easily using the convenience static constructors:
#. gamma_only
#. gamma_centered
#. monkhorst
#. monkhorst_automatic
#. path
and it is recommended that you use those.
Args:
mode: Mode for generating k-poits. Use one of the KSamplingModes enum types.
num_kpts: Number of kpoints if mode is "automatic"
Number of division for the sampling of the smallest segment if mode is "path".
Not used for the other modes
kpts: Number of divisions. Even when only a single specification is
required, e.g. in the automatic scheme, the kpts should still
be specified as a 2D array. e.g., [[20]] or [[2,2,2]].
kpt_shifts: Shifts for Kpoints.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
kpts_weights: Optional weights for kpoints. For explicit kpoints.
chksymbreak: Abinit input variable: check whether the BZ sampling preserves the symmetry of the crystal.
comment: String comment for Kpoints
.. note::
The default behavior of the constructor is monkhorst.
"""
if isinstance(mode, six.string_types):
mode = KSamplingModes[mode]
super(KSampling, self).__init__()
self.mode = mode
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.kpt_shifts = kpt_shifts
self.kpts_weights = kpts_weights
self.use_symmetries = use_symmetries
self.use_time_reversal = use_time_reversal
self.chksymbreak = chksymbreak
abivars = {}
if mode == KSamplingModes.monkhorst:
assert num_kpts == 0
ngkpt = np.reshape(kpts, 3)
shiftk = np.reshape(kpt_shifts, (-1,3))
if use_symmetries and use_time_reversal: kptopt = 1
if not use_symmetries and use_time_reversal: kptopt = 2
if not use_symmetries and not use_time_reversal: kptopt = 3
if use_symmetries and not use_time_reversal: kptopt = 4
abivars.update({
"ngkpt" : ngkpt,
"shiftk" : shiftk,
"nshiftk" : len(shiftk),
"kptopt" : kptopt,
"chksymbreak": chksymbreak,
})
elif mode == KSamplingModes.path:
if num_kpts <= 0:
raise ValueError("For Path mode, num_kpts must be specified and >0")
kptbounds = np.reshape(kpts, (-1,3))
#print("in path with kptbound: %s " % kptbounds)
abivars.update({
"ndivsm" : num_kpts,
"kptbounds": kptbounds,
"kptopt" : -len(kptbounds)+1,
})
elif mode == KSamplingModes.automatic:
kpts = np.reshape(kpts, (-1,3))
if len(kpts) != num_kpts:
raise ValueError("For Automatic mode, num_kpts must be specified.")
abivars.update({
"kptopt" : 0,
"kpt" : kpts,
"nkpt" : num_kpts,
"kptnrm" : np.ones(num_kpts),
"wtk" : kpts_weights, # for iscf/=-2, wtk.
"chksymbreak": chksymbreak,
})
else:
raise ValueError("Unknown mode %s" % mode)
self.abivars = abivars
#self.abivars["#comment"] = comment
@property
def is_homogeneous(self):
return self.mode not in ["path"]
@classmethod
def gamma_only(cls):
"""Gamma-only sampling"""
return cls(kpt_shifts=(0.0,0.0,0.0), comment="Gamma-only sampling")
@classmethod
def gamma_centered(cls, kpts=(1, 1, 1), use_symmetries=True, use_time_reversal=True):
"""
Convenient static constructor for an automatic Gamma centered Kpoint grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
Returns:
:class:`KSampling` object.
"""
return cls(kpts=[kpts], kpt_shifts=(0.0, 0.0, 0.0),
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
comment="gamma-centered mode")
@classmethod
def monkhorst(cls, ngkpt, shiftk=(0.5, 0.5, 0.5), chksymbreak=None, use_symmetries=True,
use_time_reversal=True, comment=None):
"""
Convenient static constructor for a Monkhorst-Pack mesh.
Args:
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
shiftk: Shift to be applied to the kpoints.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
"""
return cls(
kpts=[ngkpt], kpt_shifts=shiftk,
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,
comment=comment if comment else "Monkhorst-Pack scheme with user-specified shiftk")
@classmethod
def monkhorst_automatic(cls, structure, ngkpt,
use_symmetries=True, use_time_reversal=True, chksymbreak=None, comment=None):
"""
Convenient static constructor for an automatic Monkhorst-Pack mesh.
Args:
structure: :class:`Structure` object.
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
"""
sg = SpacegroupAnalyzer(structure)
#sg.get_crystal_system()
#sg.get_point_group_symbol()
# TODO
nshiftk = 1
#shiftk = 3*(0.5,) # this is the default
shiftk = 3*(0.5,)
#if lattice.ishexagonal:
#elif lattice.isbcc
#elif lattice.isfcc
return cls.monkhorst(
ngkpt, shiftk=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
chksymbreak=chksymbreak, comment=comment if comment else "Automatic Monkhorst-Pack scheme")
@classmethod
def _path(cls, ndivsm, structure=None, kpath_bounds=None, comment=None):
"""
Static constructor for path in k-space.
Args:
structure: :class:`Structure` object.
kpath_bounds: List with the reduced coordinates of the k-points defining the path.
ndivsm: Number of division for the smallest segment.
comment: Comment string.
Returns:
:class:`KSampling` object.
"""
if kpath_bounds is None:
# Compute the boundaries from the input structure.
from pymatgen.symmetry.bandstructure import HighSymmKpath
sp = HighSymmKpath(structure)
# Flat the array since "path" is a a list of lists!
kpath_labels = []
for labels in sp.kpath["path"]:
kpath_labels.extend(labels)
kpath_bounds = []
for label in kpath_labels:
red_coord = sp.kpath["kpoints"][label]
#print("label %s, red_coord %s" % (label, red_coord))
kpath_bounds.append(red_coord)
return cls(mode=KSamplingModes.path, num_kpts=ndivsm, kpts=kpath_bounds,
comment=comment if comment else "K-Path scheme")
@classmethod
def path_from_structure(cls, ndivsm, structure):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, structure=structure, comment="K-path generated automatically from structure")
@classmethod
def explicit_path(cls, ndivsm, kpath_bounds):
"""See _path for the meaning of the variables"""
return cls._path(ndivsm, kpath_bounds=kpath_bounds, comment="Explicit K-path")
@classmethod
def automatic_density(cls, structure, kppa, chksymbreak=None, use_symmetries=True, use_time_reversal=True,
shifts=(0.5, 0.5, 0.5)):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure: Input structure
kppa: Grid density
"""
lattice = structure.lattice
lengths = lattice.abc
shifts = np.reshape(shifts, (-1, 3))
ngrid = kppa / structure.num_sites / len(shifts)
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3.)
num_div = [int(round(1.0 / lengths[i] * mult)) for i in range(3)]
# ensure that num_div[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
angles = lattice.angles
hex_angle_tol = 5 # in degrees
hex_length_tol = 0.01 # in angstroms
right_angles = [i for i in range(3) if abs(angles[i] - 90) < hex_angle_tol]
hex_angles = [i for i in range(3)
if abs(angles[i] - 60) < hex_angle_tol or
abs(angles[i] - 120) < hex_angle_tol]
is_hexagonal = (len(right_angles) == 2 and len(hex_angles) == 1
and abs(lengths[right_angles[0]] -
lengths[right_angles[1]]) < hex_length_tol)
#style = KSamplingModes.gamma
#if not is_hexagonal:
# num_div = [i + i % 2 for i in num_div]
# style = KSamplingModes.monkhorst
comment = "pymatge.io.abinit generated KPOINTS with grid density = " + "{} / atom".format(kppa)
return cls(
mode="monkhorst", num_kpts=0, kpts=[num_div], kpt_shifts=shifts,
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak,
comment=comment)
def to_abivars(self):
return self.abivars
def as_dict(self):
enc = MontyEncoder()
return {'mode': self.mode.name, 'comment': self.comment,
'num_kpts': self.num_kpts,
'kpts': enc.default(np.array(self.kpts)), 'kpt_shifts': self.kpt_shifts,
'kpts_weights': self.kpts_weights, 'use_symmetries': self.use_symmetries,
'use_time_reversal': self.use_time_reversal, 'chksymbreak': self.chksymbreak,
'@module': self.__class__.__module__, '@class': self.__class__.__name__}
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
dec = MontyDecoder()
d['kpts'] = dec.process_decoded(d['kpts'])
return cls(**d)
class Constraints(AbivarAble):
"""This object defines the constraints for structural relaxation"""
def to_abivars(self):
raise NotImplementedError("")
class RelaxationMethod(AbivarAble, MSONable):
"""
This object stores the variables for the (constrained) structural optimization
ionmov and optcell specify the type of relaxation.
The other variables are optional and their use depend on ionmov and optcell.
A None value indicates that we use abinit default. Default values can
be modified by passing them to the constructor.
The set of variables are constructed in to_abivars depending on ionmov and optcell.
"""
_default_vars = {
"ionmov" : MANDATORY,
"optcell" : MANDATORY,
"ntime" : 80,
"dilatmx" : 1.05,
"ecutsm" : 0.5,
"strfact" : None,
"tolmxf" : None,
"strtarget" : None,
"atoms_constraints": {}, # Constraints are stored in a dictionary. {} means if no constraint is enforced.
}
IONMOV_DEFAULT = 3
OPTCELL_DEFAULT = 2
def __init__(self, *args, **kwargs):
# Initialize abivars with the default values.
self.abivars = self._default_vars
# Overwrite the keys with the args and kwargs passed to constructor.
self.abivars.update(*args, **kwargs)
self.abivars = AttrDict(self.abivars)
for k in self.abivars:
if k not in self._default_vars:
raise ValueError("%s: No default value has been provided for key %s" % (self.__class__.__name__, k))
for k in self.abivars:
if k is MANDATORY:
raise ValueError("%s: No default value has been provided for the mandatory key %s" %
(self.__class__.__name__, k))
@classmethod
def atoms_only(cls, atoms_constraints=None):
if atoms_constraints is None:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=0)
else:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=0, atoms_constraints=atoms_constraints)
@classmethod
def atoms_and_cell(cls, atoms_constraints=None):
if atoms_constraints is None:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=cls.OPTCELL_DEFAULT)
else:
return cls(ionmov=cls.IONMOV_DEFAULT, optcell=cls.OPTCELL_DEFAULT, atoms_constraints=atoms_constraints)
@property
def move_atoms(self):
"""True if atoms must be moved."""
return self.abivars.ionmov != 0
@property
def move_cell(self):
"""True if lattice parameters must be optimized."""
return self.abivars.optcell != 0
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
# These variables are always present.
out_vars = {
"ionmov" : self.abivars.ionmov,
"optcell": self.abivars.optcell,
"ntime" : self.abivars.ntime,
}
# Atom relaxation.
if self.move_atoms:
out_vars.update({
"tolmxf": self.abivars.tolmxf,
})
if self.abivars.atoms_constraints:
# Add input variables for constrained relaxation.
raise NotImplementedError("")
out_vars.update(self.abivars.atoms_constraints.to_abivars())
# Cell relaxation.
if self.move_cell:
out_vars.update({
"dilatmx" : self.abivars.dilatmx,
"ecutsm" : self.abivars.ecutsm,
"strfact" : self.abivars.strfact,
"strtarget": self.abivars.strtarget,
})
return out_vars
def as_dict(self):
d = dict(self._default_vars)
d['@module'] = self.__class__.__module__
d['@class'] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop('@module', None)
d.pop('@class', None)
return cls(**d)
class PPModelModes(Enum):
noppmodel = 0
godby = 1
hybersten = 2
linden = 3
farid = 4
class PPModel(AbivarAble, MSONable):
"""
Parameters defining the plasmon-pole technique.
The common way to instanciate a PPModel object is via the class method PPModel.as_ppmodel(string)
"""
@classmethod
def as_ppmodel(cls, obj):
"""
Constructs an instance of PPModel from obj.
Accepts obj in the form:
* PPmodel instance
* string. e.g "godby:12.3 eV", "linden".
"""
if isinstance(obj, cls):
return obj
# obj is a string
if ":" not in obj:
mode, plasmon_freq = obj, None
else:
# Extract mode and plasmon_freq
mode, plasmon_freq = obj.split(":")
try:
plasmon_freq = float(plasmon_freq)
except ValueError:
plasmon_freq, unit = plasmon_freq.split()
plasmon_freq = units.Energy(float(plasmon_freq), unit).to("Ha")
return cls(mode=mode, plasmon_freq=plasmon_freq)
def __init__(self, mode="godby", plasmon_freq=None):
if isinstance(mode, six.string_types):
mode = PPModelModes[mode]
self.mode = mode
self.plasmon_freq = plasmon_freq
def __eq__(self, other):
if other is None:
return False
else:
if self.mode != other.mode:
return False
if self.plasmon_freq is None:
return other.plasmon_freq is None
else:
return np.allclose(self.plasmon_freq, other.plasmon_freq)
def __ne__(self, other):
return not self == other
def __bool__(self):
return self.mode != PPModelModes.noppmodel
# py2 old version
__nonzero__ = __bool__
def __repr__(self):
return "<%s at %s, mode = %s>" % (self.__class__.__name__, id(self),
str(self.mode))
def to_abivars(self):
if self:
return {"ppmodel": self.mode.value,
"ppmfrq": self.plasmon_freq}
else:
return {}
@classmethod
def get_noppmodel(cls):
return cls(mode="noppmodel", plasmon_freq=None)
def as_dict(self):
return {"mode": self.mode.name, "plasmon_freq": self.plasmon_freq,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@staticmethod
def from_dict(d):
return PPModel(mode=d["mode"], plasmon_freq=d["plasmon_freq"])
class HilbertTransform(AbivarAble):
"""
Parameters for the Hilbert-transform method (Screening code)
i.e. the parameters defining the frequency mesh used for the spectral function
and the frequency mesh used for the polarizability
"""
def __init__(self, nomegasf, domegasf=None, spmeth=1, nfreqre=None, freqremax=None, nfreqim=None, freqremin=None):
"""
Args:
nomegasf: Number of points for sampling the spectral function along the real axis.
domegasf: Step in Ha for the linear mesh used for the spectral function.
spmeth: Algorith for the representation of the delta function.
nfreqre: Number of points along the real axis (linear mesh).
freqremax: Maximum frequency for W along the real axis (in hartree).
nfreqim: Number of point along the imaginary axis (Gauss-Legendre mesh).
freqremin: Minimum frequency for W along the real axis (in hartree).
"""
# Spectral function
self.nomegasf = nomegasf
self.domegasf = domegasf
self.spmeth = spmeth
# Mesh for the contour-deformation method used for the integration of the self-energy
self.nfreqre = nfreqre
self.freqremax = freqremax
self.freqremin = freqremin
self.nfreqim = nfreqim
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
return {
# Spectral function
"nomegasf": self.nomegasf,
"domegasf": self.domegasf,
"spmeth" : self.spmeth,
# Frequency mesh for the polarizability
"nfreqre" : self.nfreqre,
"freqremax": self.freqremax,
"nfreqim" : self.nfreqim,
"freqremin": self.freqremin,
}
class ModelDielectricFunction(AbivarAble):
"""Model dielectric function used for BSE calculation"""
def __init__(self, mdf_epsinf):
self.mdf_epsinf = mdf_epsinf
def to_abivars(self):
return {"mdf_epsinf": self.mdf_epsinf}
##########################################################################################
################################# WORK IN PROGRESS ######################################
##########################################################################################
class Screening(AbivarAble):
"""
This object defines the parameters used for the
computation of the screening function.
"""
# Approximations used for W
_WTYPES = {
"RPA": 0,
}
# Self-consistecy modes
_SC_MODES = {
"one_shot" : 0,
"energy_only" : 1,
"wavefunctions": 2,
}
def __init__(self, ecuteps, nband, w_type="RPA", sc_mode="one_shot",
hilbert=None, ecutwfn=None, inclvkb=2):
"""
Args:
ecuteps: Cutoff energy for the screening (Ha units).
nband Number of bands for the Green's function
w_type: Screening type
sc_mode: Self-consistency mode.
hilbert: Instance of :class:`HilbertTransform` defining the parameters for the Hilber transform method.
ecutwfn: Cutoff energy for the wavefunctions (Default: ecutwfn == ecut).
inclvkb: Option for the treatment of the dipole matrix elements (NC pseudos).
"""
if w_type not in self._WTYPES:
raise ValueError("W_TYPE: %s is not supported" % w_type)
if sc_mode not in self._SC_MODES:
raise ValueError("Self-consistecy mode %s is not supported" % sc_mode)
self.ecuteps = ecuteps
self.nband = nband
self.w_type = w_type
self.sc_mode = sc_mode
self.ecutwfn = ecutwfn
self.inclvkb = inclvkb
if hilbert is not None:
raise NotImplementedError("Hilber transform not coded yet")
self.hilbert = hilbert
# Default values (equivalent to those used in Abinit8)
self.gwpara=2
self.awtr =1
self.symchi=1
self.optdriver = 3
@property
def use_hilbert(self):
return hasattr(self, "hilbert")
#@property
#def gwcalctyp(self):
# "Return the value of the gwcalctyp input variable"
# dig0 = str(self._SIGMA_TYPES[self.type])
# dig1 = str(self._SC_MODES[self.sc_mode]
# return dig1.strip() + dig0.strip()
def to_abivars(self):
"""Returns a dictionary with the abinit variables"""
abivars = {
"ecuteps" : self.ecuteps,
"ecutwfn" : self.ecutwfn,
"inclvkb" : self.inclvkb,
"gwpara" : self.gwpara,
"awtr" : self.awtr,
"symchi" : self.symchi,
"nband" : self.nband,
#"gwcalctyp": self.gwcalctyp,
#"fftgw" : self.fftgw,
"optdriver" : self.optdriver,
}
# Variables for the Hilber transform.
if self.use_hilbert:
abivars.update(self.hilbert.to_abivars())
return abivars
class SelfEnergy(AbivarAble):
"""
This object defines the parameters used for the computation of the self-energy.
"""
_SIGMA_TYPES = {
"gw" : 0,
"hartree_fock": 5,
"sex" : 6,
"cohsex" : 7,
"model_gw_ppm": 8,
"model_gw_cd" : 9,
}
_SC_MODES = {
"one_shot" : 0,
"energy_only" : 1,
"wavefunctions": 2,
}
def __init__(self, se_type, sc_mode, nband, ecutsigx, screening,
gw_qprange=1, ppmodel=None, ecuteps=None, ecutwfn=None, gwpara=2):
"""
Args:
se_type: Type of self-energy (str)
sc_mode: Self-consistency mode.
nband: Number of bands for the Green's function
ecutsigx: Cutoff energy for the exchange part of the self-energy (Ha units).
screening: :class:`Screening` instance.
gw_qprange: Option for the automatic selection of k-points and bands for GW corrections.
See Abinit docs for more detail. The default value makes the code computie the
QP energies for all the point in the IBZ and one band above and one band below the Fermi level.
ppmodel: :class:`PPModel` instance with the parameters used for the plasmon-pole technique.
ecuteps: Cutoff energy for the screening (Ha units).
ecutwfn: Cutoff energy for the wavefunctions (Default: ecutwfn == ecut).
"""
if se_type not in self._SIGMA_TYPES:
raise ValueError("SIGMA_TYPE: %s is not supported" % se_type)
if sc_mode not in self._SC_MODES:
raise ValueError("Self-consistecy mode %s is not supported" % sc_mode)
self.type = se_type
self.sc_mode = sc_mode
self.nband = nband
self.ecutsigx = ecutsigx
self.screening = screening
self.gw_qprange = gw_qprange
self.gwpara = gwpara
if ppmodel is not None:
assert not screening.use_hilbert
self.ppmodel = PPModel.as_ppmodel(ppmodel)
self.ecuteps = ecuteps if ecuteps is not None else screening.ecuteps
self.ecutwfn = ecutwfn
self.optdriver = 4
#band_mode in ["gap", "full"]
#if isinstance(kptgw, str) and kptgw == "all":
# self.kptgw = None
# self.nkptgw = None
#else:
# self.kptgw = np.reshape(kptgw, (-1,3))
# self.nkptgw = len(self.kptgw)
#if bdgw is None:
# raise ValueError("bdgw must be specified")
#if isinstance(bdgw, str):
# # TODO add new variable in Abinit so that we can specify
# # an energy interval around the KS gap.
# homo = float(nele) / 2.0
# #self.bdgw =
#else:
# self.bdgw = np.reshape(bdgw, (-1,2))
#self.freq_int = freq_int
@property
def use_ppmodel(self):
"""True if we are using the plasmon-pole approximation."""
return hasattr(self, "ppmodel")
@property
def gwcalctyp(self):
"""Returns the value of the gwcalctyp input variable."""
dig0 = str(self._SIGMA_TYPES[self.type])
dig1 = str(self._SC_MODES[self.sc_mode])
return dig1.strip() + dig0.strip()
@property
def symsigma(self):
"""1 if symmetries can be used to reduce the number of q-points."""
return 1 if self.sc_mode == "one_shot" else 0
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
abivars = dict(
gwcalctyp=self.gwcalctyp,
ecuteps=self.ecuteps,
ecutsigx=self.ecutsigx,
symsigma=self.symsigma,
gw_qprange=self.gw_qprange,
gwpara=self.gwpara,
optdriver=self.optdriver,
nband=self.nband
#"ecutwfn" : self.ecutwfn,
#"kptgw" : self.kptgw,
#"nkptgw" : self.nkptgw,
#"bdgw" : self.bdgw,
)
# FIXME: problem with the spin
#assert len(self.bdgw) == self.nkptgw
# ppmodel variables
if self.use_ppmodel:
abivars.update(self.ppmodel.to_abivars())
return abivars
class ExcHamiltonian(AbivarAble):
"""This object contains the parameters for the solution of the Bethe-Salpeter equation."""
# Types of excitonic Hamiltonian.
_EXC_TYPES = {
"TDA": 0, # Tamm-Dancoff approximation.
"coupling": 1, # Calculation with coupling.
}
# Algorithms used to compute the macroscopic dielectric function
# and/or the exciton wavefunctions.
_ALGO2VAR = {
"direct_diago": 1,
"haydock" : 2,
"cg" : 3,
}
# Options specifying the treatment of the Coulomb term.
_COULOMB_MODES = [
"diago",
"full",
"model_df"
]
def __init__(self, bs_loband, nband, mbpt_sciss, coulomb_mode, ecuteps, spin_mode="polarized", mdf_epsinf=None,
exc_type="TDA", algo="haydock", with_lf=True, bs_freq_mesh=None, zcut=None, **kwargs):
"""
Args:
bs_loband: Lowest band index (Fortran convention) used in the e-h basis set.
Can be scalar or array of shape (nsppol,). Must be >= 1 and <= nband
nband: Max band index used in the e-h basis set.
mbpt_sciss: Scissors energy in Hartree.
coulomb_mode: Treatment of the Coulomb term.
ecuteps: Cutoff energy for W in Hartree.
mdf_epsinf: Macroscopic dielectric function :math:`\\epsilon_\\inf` used in
the model dielectric function.
exc_type: Approximation used for the BSE Hamiltonian
with_lf: True if local field effects are included <==> exchange term is included
bs_freq_mesh: Frequency mesh for the macroscopic dielectric function (start, stop, step) in Ha.
zcut: Broadening parameter in Ha.
**kwargs:
Extra keywords
"""
spin_mode = SpinMode.as_spinmode(spin_mode)
# We want an array bs_loband(nsppol).
try:
bs_loband = np.reshape(bs_loband, spin_mode.nsppol)
except ValueError:
bs_loband = np.array(spin_mode.nsppol * [int(bs_loband)])
self.bs_loband = bs_loband
self.nband = nband
self.mbpt_sciss = mbpt_sciss
self.coulomb_mode = coulomb_mode
assert coulomb_mode in self._COULOMB_MODES
self.ecuteps = ecuteps
self.mdf_epsinf = mdf_epsinf
self.exc_type = exc_type
assert exc_type in self._EXC_TYPES
self.algo = algo
assert algo in self._ALGO2VAR
self.with_lf = with_lf
# if bs_freq_mesh is not given, abinit will select its own mesh.
self.bs_freq_mesh = np.array(bs_freq_mesh) if bs_freq_mesh is not None else bs_freq_mesh
self.zcut = zcut
self.optdriver = 99
# Extra options.
self.kwargs = kwargs
#if "chksymbreak" not in self.kwargs:
# self.kwargs["chksymbreak"] = 0
# Consistency check
if any(bs_loband < 0):
raise ValueError("bs_loband <= 0 while it is %s" % bs_loband)
if any(bs_loband >= nband):
raise ValueError("bs_loband (%s) >= nband (%s)" % (bs_loband, nband))
@property
def inclvkb(self):
"""Treatment of the dipole matrix element (NC pseudos, default is 2)"""
return self.kwargs.get("inclvkb", 2)
@property
def use_haydock(self):
"""True if we are using the Haydock iterative technique."""
return self.algo == "haydock"
@property
def use_cg(self):
"""True if we are using the conjugate gradient method."""
return self.algo == "cg"
@property
def use_direct_diago(self):
"""True if we are performing the direct diagonalization of the BSE Hamiltonian."""
return self.algo == "direct_diago"
def to_abivars(self):
"""Returns a dictionary with the abinit variables."""
abivars = dict(
bs_calctype=1,
bs_loband=self.bs_loband,
#nband=self.nband,
mbpt_sciss=self.mbpt_sciss,
ecuteps=self.ecuteps,
bs_algorithm=self._ALGO2VAR[self.algo],
bs_coulomb_term=21,
mdf_epsinf=self.mdf_epsinf,
bs_exchange_term=1 if self.with_lf else 0,
inclvkb=self.inclvkb,
zcut=self.zcut,
bs_freq_mesh=self.bs_freq_mesh,
bs_coupling=self._EXC_TYPES[self.exc_type],
optdriver=self.optdriver,
)
if self.use_haydock:
# FIXME
abivars.update(
bs_haydock_niter=100, # No. of iterations for Haydock
bs_hayd_term=0, # No terminator
bs_haydock_tol=[0.05, 0], # Stopping criteria
)
elif self.use_direct_diago:
raise NotImplementedError("")
elif self.use_cg:
raise NotImplementedError("")
else:
raise ValueError("Unknown algorithm for EXC: %s" % self.algo)
# Add extra kwargs
abivars.update(self.kwargs)
return abivars
|
|
"""Entity for Zigbee Home Automation."""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable
import functools
import logging
from typing import Any
from homeassistant.const import ATTR_NAME
from homeassistant.core import CALLBACK_TYPE, Event, callback
from homeassistant.helpers import entity
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
from .core.const import (
ATTR_MANUFACTURER,
ATTR_MODEL,
DATA_ZHA,
DATA_ZHA_BRIDGE_ID,
DOMAIN,
SIGNAL_GROUP_ENTITY_REMOVED,
SIGNAL_GROUP_MEMBERSHIP_CHANGE,
SIGNAL_REMOVE,
)
from .core.helpers import LogMixin
from .core.typing import CALLABLE_T, ChannelType, ZhaDeviceType
_LOGGER = logging.getLogger(__name__)
ENTITY_SUFFIX = "entity_suffix"
UPDATE_GROUP_FROM_CHILD_DELAY = 0.5
class BaseZhaEntity(LogMixin, entity.Entity):
"""A base class for ZHA entities."""
unique_id_suffix: str | None = None
def __init__(self, unique_id: str, zha_device: ZhaDeviceType, **kwargs) -> None:
"""Init ZHA entity."""
self._name: str = ""
self._force_update: bool = False
self._should_poll: bool = False
self._unique_id: str = unique_id
if self.unique_id_suffix:
self._unique_id += f"-{self.unique_id_suffix}"
self._state: Any = None
self._extra_state_attributes: dict[str, Any] = {}
self._zha_device: ZhaDeviceType = zha_device
self._unsubs: list[CALLABLE_T] = []
self.remove_future: Awaitable[None] = None
@property
def name(self) -> str:
"""Return Entity's default name."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def zha_device(self) -> ZhaDeviceType:
"""Return the zha device this entity is attached to."""
return self._zha_device
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return device specific state attributes."""
return self._extra_state_attributes
@property
def force_update(self) -> bool:
"""Force update this entity."""
return self._force_update
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return self._should_poll
@property
def device_info(self) -> entity.DeviceInfo:
"""Return a device description for device registry."""
zha_device_info = self._zha_device.device_info
ieee = zha_device_info["ieee"]
return entity.DeviceInfo(
connections={(CONNECTION_ZIGBEE, ieee)},
identifiers={(DOMAIN, ieee)},
manufacturer=zha_device_info[ATTR_MANUFACTURER],
model=zha_device_info[ATTR_MODEL],
name=zha_device_info[ATTR_NAME],
via_device=(DOMAIN, self.hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID]),
)
@callback
def async_state_changed(self) -> None:
"""Entity state changed."""
self.async_write_ha_state()
@callback
def async_update_state_attribute(self, key: str, value: Any) -> None:
"""Update a single device state attribute."""
self._extra_state_attributes.update({key: value})
self.async_write_ha_state()
@callback
def async_set_state(self, attr_id: int, attr_name: str, value: Any) -> None:
"""Set the entity state."""
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
@callback
def async_accept_signal(
self, channel: ChannelType, signal: str, func: CALLABLE_T, signal_override=False
):
"""Accept a signal from a channel."""
unsub = None
if signal_override:
unsub = async_dispatcher_connect(self.hass, signal, func)
else:
unsub = async_dispatcher_connect(
self.hass, f"{channel.unique_id}_{signal}", func
)
self._unsubs.append(unsub)
def log(self, level: int, msg: str, *args):
"""Log a message."""
msg = f"%s: {msg}"
args = (self.entity_id,) + args
_LOGGER.log(level, msg, *args)
class ZhaEntity(BaseZhaEntity, RestoreEntity):
"""A base class for non group ZHA entities."""
def __init_subclass__(cls, id_suffix: str | None = None, **kwargs) -> None:
"""Initialize subclass.
:param id_suffix: suffix to add to the unique_id of the entity. Used for multi
entities using the same channel/cluster id for the entity.
"""
super().__init_subclass__(**kwargs)
if id_suffix:
cls.unique_id_suffix = id_suffix
def __init__(
self,
unique_id: str,
zha_device: ZhaDeviceType,
channels: list[ChannelType],
**kwargs,
) -> None:
"""Init ZHA entity."""
super().__init__(unique_id, zha_device, **kwargs)
ieeetail = "".join([f"{o:02x}" for o in zha_device.ieee[:4]])
ch_names = ", ".join(sorted(ch.name for ch in channels))
self._name: str = f"{zha_device.name} {ieeetail} {ch_names}"
if self.unique_id_suffix:
self._name += f" {self.unique_id_suffix}"
self.cluster_channels: dict[str, ChannelType] = {}
for channel in channels:
self.cluster_channels[channel.name] = channel
@classmethod
def create_entity(
cls,
unique_id: str,
zha_device: ZhaDeviceType,
channels: list[ChannelType],
**kwargs,
) -> ZhaEntity | None:
"""Entity Factory.
Return entity if it is a supported configuration, otherwise return None
"""
return cls(unique_id, zha_device, channels, **kwargs)
@property
def available(self) -> bool:
"""Return entity availability."""
return self._zha_device.available
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
self.remove_future = asyncio.Future()
self.async_accept_signal(
None,
f"{SIGNAL_REMOVE}_{self.zha_device.ieee}",
functools.partial(self.async_remove, force_remove=True),
signal_override=True,
)
if not self.zha_device.is_mains_powered:
# mains powered devices will get real time state
if last_state := await self.async_get_last_state():
self.async_restore_last_state(last_state)
self.async_accept_signal(
None,
f"{self.zha_device.available_signal}_entity",
self.async_state_changed,
signal_override=True,
)
self._zha_device.gateway.register_entity_reference(
self._zha_device.ieee,
self.entity_id,
self._zha_device,
self.cluster_channels,
self.device_info,
self.remove_future,
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
await super().async_will_remove_from_hass()
self.zha_device.gateway.remove_entity_reference(self)
self.remove_future.set_result(True)
@callback
def async_restore_last_state(self, last_state) -> None:
"""Restore previous state."""
async def async_update(self) -> None:
"""Retrieve latest state."""
tasks = [
channel.async_update()
for channel in self.cluster_channels.values()
if hasattr(channel, "async_update")
]
if tasks:
await asyncio.gather(*tasks)
class ZhaGroupEntity(BaseZhaEntity):
"""A base class for ZHA group entities."""
def __init__(
self, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
"""Initialize a light group."""
super().__init__(unique_id, zha_device, **kwargs)
self._available = False
self._group = zha_device.gateway.groups.get(group_id)
self._name = f"{self._group.name}_zha_group_0x{group_id:04x}"
self._group_id: int = group_id
self._entity_ids: list[str] = entity_ids
self._async_unsub_state_changed: CALLBACK_TYPE | None = None
self._handled_group_membership = False
self._change_listener_debouncer: Debouncer | None = None
@property
def available(self) -> bool:
"""Return entity availability."""
return self._available
@classmethod
def create_entity(
cls, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> ZhaGroupEntity | None:
"""Group Entity Factory.
Return entity if it is a supported configuration, otherwise return None
"""
return cls(entity_ids, unique_id, group_id, zha_device, **kwargs)
async def _handle_group_membership_changed(self):
"""Handle group membership changed."""
# Make sure we don't call remove twice as members are removed
if self._handled_group_membership:
return
self._handled_group_membership = True
await self.async_remove(force_remove=True)
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
self.async_accept_signal(
None,
f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{self._group_id:04x}",
self._handle_group_membership_changed,
signal_override=True,
)
if self._change_listener_debouncer is None:
self._change_listener_debouncer = Debouncer(
self.hass,
self,
cooldown=UPDATE_GROUP_FROM_CHILD_DELAY,
immediate=False,
function=functools.partial(self.async_update_ha_state, True),
)
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self._entity_ids, self.async_state_changed_listener
)
def send_removed_signal():
async_dispatcher_send(
self.hass, SIGNAL_GROUP_ENTITY_REMOVED, self._group_id
)
self.async_on_remove(send_removed_signal)
@callback
def async_state_changed_listener(self, event: Event):
"""Handle child updates."""
# Delay to ensure that we get updates from all members before updating the group
self.hass.create_task(self._change_listener_debouncer.async_call())
async def async_will_remove_from_hass(self) -> None:
"""Handle removal from Home Assistant."""
await super().async_will_remove_from_hass()
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self) -> None:
"""Update the state of the group entity."""
|
|
from datetime import datetime
from typing import Union, Dict, Optional, List # noqa: F401
import requests
from pykechain.defaults import API_EXTRA_PARAMS
from pykechain.enums import (
Multiplicity,
ScopeStatus,
SubprocessDisplayMode,
KEChainPages,
ScopeRoles,
ScopeMemberActions,
ScopeCategory,
)
from pykechain.exceptions import APIError, NotFoundError, IllegalArgumentError
from pykechain.models.activity import Activity
from pykechain.models.base import Base
from pykechain.models.context import Context
from pykechain.models.input_checks import (
check_text,
check_datetime,
check_enum,
check_list_of_text,
check_base,
check_type,
)
from pykechain.models.part import Part
from pykechain.models.property import Property
from pykechain.models.representations import BaseRepresentation
from pykechain.models.representations.component import RepresentationsComponent
from pykechain.models.service import Service, ServiceExecution
from pykechain.models.sidebar.sidebar_manager import SideBarManager
from pykechain.models.tags import TagsMixin
from pykechain.models.team import Team
from pykechain.utils import parse_datetime, find, Empty, clean_empty_values, empty
class Scope(Base, TagsMixin):
"""A virtual object representing a KE-chain scope.
:ivar id: id of the activity
:type id: uuid
:ivar name: name of the activity
:type name: basestring
:ivar created_at: created datetime of the activity
:type created_at: datetime
:ivar updated_at: updated datetime of the activity
:type updated_at: datetime
:ivar description: description of the activity
:type description: basestring
:ivar workflow_root: uuid of the workflow root object
:type workflow_root: uuid
:ivar status: status of the scope. One of :class:`pykechain.enums.ScopeStatus`
:type status: basestring
:ivar type: Type of the Scope. One of :class:`pykechain.enums.ScopeType` for WIM version 2
:type type: basestring
"""
def __init__(self, json: Dict, **kwargs) -> None:
"""Construct a scope from provided json data."""
super().__init__(json, **kwargs)
# for 'kechain2.core.wim <2.0.0'
self.process = json.get("process")
# for 'kechain2.core.wim >=2.0.0'
self.workflow_root = json.get("workflow_root_id")
self._workflow_root_process = None
self._catalog_root_process = None
self._app_root_process = None
self._product_root_model = None
self._product_root_instance = None
self._catalog_root_model = None
self._catalog_root_instance = None
self.ref = json.get("ref")
self.description = json.get("text")
self.status = json.get("status")
self.category = json.get("category")
self._tags = json.get("tags")
self.start_date = parse_datetime(json.get("start_date"))
self.due_date = parse_datetime(json.get("due_date"))
self._representations_container = RepresentationsComponent(
self,
self.options.get("representations", {}),
self._save_representations,
)
@property
def team(self) -> Optional[Team]:
"""Team to which the scope is assigned."""
team_dict = self._json_data.get("team_id_name")
if team_dict and team_dict.get("id"):
return self._client.team(pk=team_dict.get("id"))
else:
return None
@property
def options(self) -> Dict:
"""Options of the Scope.
.. versionadded: 3.0
"""
return self._json_data.get("scope_options")
@options.setter
def options(self, option_value):
self.edit(options=option_value)
def refresh(self, json=None, url=None, extra_params=None):
"""Refresh the object in place."""
super().refresh(
json=json,
url=self._client._build_url("scope", scope_id=self.id),
extra_params=API_EXTRA_PARAMS["scope"],
)
@property
def representations(self) -> List["BaseRepresentation"]:
"""Get and set the scope representations."""
return self._representations_container.get_representations()
@representations.setter
def representations(self, value):
self._representations_container.set_representations(value)
def _save_representations(self, representation_options):
options = self.options
options.update({"representations": representation_options})
self.options = options
@property
def workflow_root_process(self) -> "Activity":
"""Retrieve the Activity root object with classification WORKFLOW."""
if self._workflow_root_process is None:
self._workflow_root_process = self.activity(id=self._json_data["workflow_root_id"])
return self._workflow_root_process
@property
def app_root_process(self) -> "Activity":
"""Retrieve the Activity root object with classification APP."""
if self._app_root_process is None:
self._app_root_process = self.activity(id=self._json_data["app_root_id"])
return self._app_root_process
@property
def catalog_root_process(self) -> "Activity":
"""Retrieve the Activity root object with classification CATALOG."""
if self._catalog_root_process is None:
self._catalog_root_process = self.activity(id=self._json_data["catalog_root_id"])
return self._catalog_root_process
@property
def product_root_model(self) -> "Part":
"""Retrieve the Part root object with classification PRODUCT and category MODEL."""
if self._product_root_model is None:
self._product_root_model = self.model(id=self._json_data["product_model_id"])
return self._product_root_model
@property
def product_root_instance(self) -> "Part":
"""Retrieve the Part root object with classification PRODUCT and category INSTANCE."""
if self._product_root_instance is None:
self._product_root_instance = self.part(id=self._json_data["product_instance_id"])
return self._product_root_instance
@property
def catalog_root_model(self) -> "Part":
"""Retrieve the Part root object with classification CATALOG and category MODEL."""
if self._catalog_root_model is None:
self._catalog_root_model = self.model(id=self._json_data["catalog_model_id"])
return self._catalog_root_model
@property
def catalog_root_instance(self) -> "Part":
"""Retrieve the Part root object with classification CATALOG and category INSTANCE."""
if self._catalog_root_instance is None:
self._catalog_root_instance = self.part(id=self._json_data["catalog_instance_id"])
return self._catalog_root_instance
#
# CRUD methods
#
def _update_scope_project_team(self, action, role, user):
"""
Update the Project Team of the Scope. Updates include addition or removing of managers or members.
:param action: type of action to be applied
:type action: ScopeMemberActions
:param role: type of role to be applied to the user
:type role: ScopeRoles
:param user: the username of the user to which the action applies to
:type user: basestring
:raises APIError: When unable to update the scope project team.
"""
action = check_enum(action, ScopeMemberActions, "action")
role = check_enum(role, ScopeRoles, "role")
user = check_text(user, "user")
users: List[Dict] = self._client._retrieve_users()["results"]
user_object: Dict = find(users, lambda u: u["username"] == user)
if user_object is None:
raise NotFoundError(f'User "{user}" does not exist')
url = self._client._build_url(f"scope_{action}_{role}", scope_id=self.id)
response = self._client._request(
"PUT",
url,
params=API_EXTRA_PARAMS[self.__class__.__name__.lower()],
data={"user_id": user_object["pk"]},
)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError(f"Could not {action} {role} in Scope", response=response)
self.refresh(json=response.json().get("results")[0])
def edit(
self,
name: Optional[Union[str, Empty]] = empty,
description: Optional[Union[str, Empty]] = empty,
start_date: Optional[Union[datetime, Empty]] = empty,
due_date: Optional[Union[datetime, Empty]] = empty,
status: Optional[Union[str, ScopeStatus, Empty]] = empty,
category: Optional[Union[str, ScopeCategory, Empty]] = empty,
tags: Optional[Union[List[str], Empty]] = empty,
team: Optional[Union[Team, str, Empty]] = empty,
options: Optional[Union[Dict, Empty]] = empty,
**kwargs,
) -> None:
"""
Edit the details of a scope.
Setting an input to None will clear out the value (exception being name and status).
:param name: (optionally) edit the name of the scope. Name cannot be cleared.
:type name: basestring or None or Empty
:param description: (optionally) edit the description of the scope or clear it
:type description: basestring or None or Empty
:param start_date: (optionally) edit the start date of the scope as a datetime object (UTC time/timezone
aware preferred) or clear it
:type start_date: datetime or None or Empty
:param due_date: (optionally) edit the due_date of the scope as a datetime object (UTC time/timzeone
aware preferred) or clear it
:type due_date: datetime or None or Empty
:param status: (optionally) edit the status of the scope as a string based. Status cannot be cleared.
:type status: ScopeStatus or Empty
:param category (optionally) edit the category of the scope
:type category: ScopeCategory or Empty
:param tags: (optionally) replace the tags on a scope, which is a list of strings ["one","two","three"] or
clear them
:type tags: list of basestring or None or Empty
:param team: (optionally) add the scope to a team. Team cannot be cleared.
:type team: UUIDstring or None or Empty
:param options: (optionally) custom options dictionary stored on the scope object
:type options: dict or None or Empty
:raises IllegalArgumentError: if the type of the inputs is not correct
:raises APIError: if another Error occurs
:warns: UserWarning - When a naive datetime is provided. Defaults to UTC.
Examples
--------
>>> from datetime import datetime
>>> project.edit(name='New project name',description='Changing the description just because I can',
... start_date=datetime.now(),status=ScopeStatus.CLOSED)
If we want to provide timezone aware datetime objects we can use the 3rd party convenience library :mod:`pytz`.
Mind that we need to fetch the timezone first and use `<timezone>.localize(<your datetime>)` to make it
work correctly.
Using `datetime(2017,6,1,23,59,0 tzinfo=<tz>)` does NOT work for most timezones with a
daylight saving time. Check the `pytz <http://pythonhosted.org/pytz/#localized-times-and-date-arithmetic>`_
documentation.
To make it work using :mod:`pytz` and timezone aware :mod:`datetime` see the following example::
>>> import pytz
>>> start_date_tzaware = datetime.now(pytz.utc)
>>> mytimezone = pytz.timezone('Europe/Amsterdam')
>>> due_date_tzaware = mytimezone.localize(datetime(2019, 10, 27, 23, 59, 0))
>>> project.edit(start_date=start_date_tzaware,due_date=due_date_tzaware)
To assign a scope to a team see the following example::
>>> my_team = client.team(name='My own team')
>>> project.edit(team=my_team)
Not mentioning an input parameter in the function will leave it unchanged. Setting a parameter as None will
clear its value (where that is possible). The example below will clear the due_date, but leave everything else
unchanged.
>>> project.edit(due_date=None)
"""
update_dict = {
"id": self.id,
"name": check_text(name, "name") or self.name,
"text": check_text(description, "description") or "",
"start_date": check_datetime(start_date, "start_date"),
"due_date": check_datetime(due_date, "due_date"),
"status": check_enum(status, ScopeStatus, "status") or self.status,
"category": check_enum(category, ScopeCategory, "category"),
"tags": check_list_of_text(tags, "tags", True) or list(),
"team_id": check_base(team, Team, "team") or "",
"scope_options": check_type(options, dict, "options") or dict(),
}
if kwargs: # pragma: no cover
update_dict.update(kwargs)
update_dict = clean_empty_values(update_dict=update_dict)
url = self._client._build_url("scope", scope_id=self.id)
response = self._client._request(
"PUT",
url,
params=API_EXTRA_PARAMS[self.__class__.__name__.lower()],
json=update_dict,
)
if response.status_code != requests.codes.ok: # pragma: no cover
raise APIError(f"Could not update Scope {self}", response=response)
self.refresh(json=response.json().get("results")[0])
# TODO tags that are set are not in response
if tags is not None and not isinstance(tags, Empty):
self._tags = tags
def clone(self, **kwargs) -> "Scope":
"""Clone a scope.
See :method:`pykechain.Client.clone_scope()` for available parameters.
"""
return self._client.clone_scope(source_scope=self, **kwargs)
def delete(self, asynchronous=True):
"""Delete the scope.
Only works with enough permissions.
.. versionadded: 3.0
See :method:`pykechain.Client.delete_scope()` for available parameters.
:raises ForbiddenError: if you do not have the permissions to delete a scope
"""
return self._client.delete_scope(scope=self, asynchronous=asynchronous)
#
# Part methods
#
def parts(self, *args, **kwargs) -> List["Part"]:
"""Retrieve parts belonging to this scope.
This uses
See :class:`pykechain.Client.parts` for available parameters.
"""
return self._client.parts(*args, scope_id=self.id, **kwargs)
def part(self, *args, **kwargs) -> "Part":
"""Retrieve a single part belonging to this scope.
See :class:`pykechain.Client.part` for available parameters.
"""
return self._client.part(*args, scope_id=self.id, **kwargs)
def properties(self, *args, **kwargs) -> List["Property"]:
"""Retrieve properties belonging to this scope.
.. versionadded: 3.0
See :class:`pykechain.Client.properties` for available parameters.
"""
return self._client.properties(*args, scope_id=self.id, **kwargs)
def property(self, *args, **kwargs) -> "Property":
"""Retrieve a single property belonging to this scope.
.. versionadded: 3.0
See :class:`pykechain.Client.property` for available parameters.
"""
return self._client.property(*args, scope_id=self.id, **kwargs)
def model(self, *args, **kwargs) -> "Part":
"""Retrieve a single model belonging to this scope.
See :class:`pykechain.Client.model` for available parameters.
"""
return self._client.model(*args, scope_id=self.id, **kwargs)
def create_model(self, parent, name, multiplicity=Multiplicity.ZERO_MANY) -> "Part":
"""Create a single part model in this scope.
See :class:`pykechain.Client.create_model` for available parameters.
"""
return self._client.create_model(parent, name, multiplicity=multiplicity)
def create_model_with_properties(
self, parent, name, multiplicity=Multiplicity.ZERO_MANY, properties_fvalues=None, **kwargs
) -> "Part":
"""Create a model with its properties in a single API request.
See :func:`pykechain.Client.create_model_with_properties()` for available parameters.
"""
return self._client.create_model_with_properties(
parent,
name,
multiplicity=multiplicity,
properties_fvalues=properties_fvalues,
**kwargs,
)
#
# Activity methods
#
def activities(self, *args, **kwargs) -> List["Activity"]:
"""Retrieve activities belonging to this scope.
See :class:`pykechain.Client.activities` for available parameters.
"""
return self._client.activities(*args, scope=self.id, **kwargs)
def activity(self, *args, **kwargs) -> "Activity":
"""Retrieve a single activity belonging to this scope.
See :class:`pykechain.Client.activity` for available parameters.
"""
return self._client.activity(*args, scope=self.id, **kwargs)
def create_activity(self, *args, **kwargs) -> "Activity":
"""Create a new activity belonging to this scope.
See :class:`pykechain.Client.create_activity` for available parameters.
"""
return self._client.create_activity(self.workflow_root, *args, **kwargs)
def side_bar(self, *args, **kwargs) -> Optional[SideBarManager]:
"""Retrieve the side-bar manager."""
return SideBarManager(scope=self, *args, **kwargs)
def set_landing_page(
self,
activity: Union["Activity", KEChainPages],
task_display_mode: Optional[SubprocessDisplayMode] = SubprocessDisplayMode.ACTIVITIES,
) -> None:
"""
Update the landing page of the scope.
:param activity: Activity object or KEChainPages option
:type activity: (Activity, KEChainPages)
:param task_display_mode: display mode of the activity in KE-chain
:type task_display_mode: SubprocessDisplayMode
:return: None
:rtype None
"""
from pykechain.models import Activity
if not (isinstance(activity, Activity) or activity in KEChainPages.values()):
raise IllegalArgumentError(
'activity must be of class Activity or a KEChainPages option, "{}" is not.'.format(
activity
)
)
check_enum(task_display_mode, SubprocessDisplayMode, "task_display_mode")
if isinstance(activity, Activity):
url = f"#/scopes/{self.id}/{task_display_mode}/{activity.id}"
else:
url = f"#/scopes/{self.id}/{activity}"
options = dict(self.options)
options.update({"landingPage": url})
self.options = options
def get_landing_page_url(self) -> Optional[str]:
"""
Retrieve the landing page URL, if it is set in the options.
:return: Landing page url
"""
return self.options.get("landingPage")
#
# Service Methods
#
def services(self, *args, **kwargs) -> List["Service"]:
"""Retrieve services belonging to this scope.
See :class:`pykechain.Client.services` for available parameters.
.. versionadded:: 1.13
"""
return self._client.services(*args, scope=self.id, **kwargs)
def create_service(self, *args, **kwargs) -> "Service":
"""Create a service to current scope.
See :class:`pykechain.Client.create_service` for available parameters.
.. versionadded:: 1.13
"""
return self._client.create_service(*args, scope=self.id, **kwargs)
def service(self, *args, **kwargs) -> "Service":
"""Retrieve a single service belonging to this scope.
See :class:`pykechain.Client.service` for available parameters.
.. versionadded:: 1.13
"""
return self._client.service(*args, scope=self.id, **kwargs)
def service_executions(self, *args, **kwargs) -> List["ServiceExecution"]:
"""Retrieve services belonging to this scope.
See :class:`pykechain.Client.service_executions` for available parameters.
.. versionadded:: 1.13
"""
return self._client.service_executions(*args, scope=self.id, **kwargs)
def service_execution(self, *args, **kwargs) -> "ServiceExecution":
"""Retrieve a single service execution belonging to this scope.
See :class:`pykechain.Client.service_execution` for available parameters.
.. versionadded:: 1.13
"""
return self._client.service_execution(*args, scope=self.id, **kwargs)
#
# User and Members of the Scope
#
def members(
self,
is_manager: Optional[bool] = None,
is_supervisor: Optional[bool] = None,
is_leadmember: Optional[bool] = None,
) -> List[Dict]:
"""
Retrieve members of the scope.
.. versionchanged:: 3.7
we added the supervisor members for backend that support this.
:param is_manager: (otional) set to True/False to filter members that are/aren't managers, resp.
:type is_manager: bool
:param is_supervisor: (optional) set to True/False to filter members that are/aren't supervisors, resp.
:type is_supervisor: bool
:param is_leadmember: (optional) set to True/False to filter members that are/aren't leadmembers, resp.
:type is_leadmember: bool
:return: List of members, each defined as a dict
Examples
--------
>>> members = project.members()
>>> managers = project.members(is_manager=True)
>>> supervisors = project.members(is_supervisor=True)
>>> leadmembers = project.members(is_leadmember=True)
"""
members = [member for member in self._json_data["members"] if member["is_active"]]
if is_manager is not None:
members = [member for member in members if member.get("is_manager") == is_manager]
if is_supervisor is not None:
members = [
member for member in members if member.get("is_supervisor") == is_supervisor
]
if is_leadmember is not None:
members = [
member for member in members if member.get("is_leadmember") == is_leadmember
]
return members
def add_member(self, member: str) -> None:
"""
Add a single member to the scope.
You may only edit the list of members if the pykechain credentials allow this.
:param member: single username to be added to the scope list of members
:type member: basestring
:raises APIError: when unable to update the scope member
"""
self._update_scope_project_team(
action=ScopeMemberActions.ADD, role=ScopeRoles.MEMBER, user=member
)
def remove_member(self, member: str) -> None:
"""
Remove a single member to the scope.
:param member: single username to be removed from the scope list of members
:type member: basestring
:raises APIError: when unable to update the scope member
"""
self._update_scope_project_team(
action=ScopeMemberActions.REMOVE, role=ScopeRoles.MEMBER, user=member
)
def add_manager(self, manager: str) -> None:
"""
Add a single manager to the scope.
:param manager: single username to be added to the scope list of managers
:type manager: basestring
:raises APIError: when unable to update the scope manager
"""
self._update_scope_project_team(
action=ScopeMemberActions.ADD, role=ScopeRoles.MANAGER, user=manager
)
def remove_manager(self, manager: str) -> None:
"""
Remove a single manager to the scope.
:param manager: single username to be added to the scope list of managers
:type manager: basestring
:raises APIError: when unable to update the scope manager
"""
self._update_scope_project_team(
action=ScopeMemberActions.REMOVE, role=ScopeRoles.MANAGER, user=manager
)
def add_leadmember(self, leadmember: str) -> None:
"""
Add a single leadmember to the scope.
:param leadmember: single username to be added to the scope list of leadmembers
:type leadmember: basestring
:raises APIError: when unable to update the scope leadmember
"""
self._update_scope_project_team(
action=ScopeMemberActions.ADD, role=ScopeRoles.LEADMEMBER, user=leadmember
)
def remove_leadmember(self, leadmember: str) -> None:
"""
Remove a single leadmember to the scope.
:param leadmember: single username to be added to the scope list of leadmembers
:type leadmember: basestring
:raises APIError: when unable to update the scope leadmember
"""
self._update_scope_project_team(
action=ScopeMemberActions.REMOVE,
role=ScopeRoles.LEADMEMBER,
user=leadmember,
)
def add_supervisor(self, supervisor: str) -> None:
"""
Add a single supervisor to the scope.
.. versionadded:: 3.7
requires backend version 3.7 as well.
:param supervisor: single username to be added to the scope list of supervisors
:type supervisor: basestring
:raises APIError: when unable to update the scope supervisor
"""
if self._client.match_app_version(label="scope", version="<3.6.0"):
raise NotImplementedError(
"Adding and removal of supervisor members to a scope not "
"possible with this backend version"
)
self._update_scope_project_team(
action=ScopeMemberActions.ADD, role=ScopeRoles.SUPERVISOR, user=supervisor
)
def remove_supervisor(self, supervisor: str) -> None:
"""
Remove a single supervisor to the scope.
.. versionadded:: 3.7
requires backend version 3.7 as well.
:param supervisor: single username to be added to the scope list of supervisors
:type supervisor: basestring
:raises APIError: when unable to update the scope supervisor
"""
if self._client.match_app_version(label="scope", version="<3.6.0"):
raise NotImplementedError(
"Adding and removal of supervisor members to a scope not "
"possible with this backend version"
)
self._update_scope_project_team(
action=ScopeMemberActions.REMOVE,
role=ScopeRoles.SUPERVISOR,
user=supervisor,
)
#
# Context Methods
#
def context(self, *args, **kwargs) -> Context:
"""
Retrieve a context object in this scope.
See :class:`pykechain.Client.context` for available parameters.
.. versionadded:: 3.11
:return: a Context object
"""
return self._client.context(*args, scope=self, **kwargs)
def contexts(self, *args, **kwargs) -> List[Context]:
"""
Retrieve one or more contexts object in this scope.
See :class:`pykechain.Client.contexts` for available parameters.
.. versionadded:: 3.11
:return: a list of Context objects
"""
return self._client.contexts(scope=self, **kwargs)
def create_context(self, *args, **kwargs) -> Context:
"""
Create a new Context object of a ContextType in a scope.
See :class:`pykechain.Client.create_context` for available parameters.
.. versionadded:: 3.11
:return: a Context object
"""
return self._client.create_context(scope=self, **kwargs)
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import glanceclient.exc
import novaclient.exceptions as nova_exc
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from magnum.api.controllers import base
from magnum.api.controllers import link
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.api import expose
from magnum.api import validation
from magnum.common import clients
from magnum.common import exception
from magnum.common import policy
from magnum import objects
class BayModelPatchType(types.JsonPatchType):
pass
class BayModel(base.APIBase):
"""API representation of a baymodel.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a baymodel.
"""
_coe = None
def _get_coe(self):
return self._coe
def _set_coe(self, value):
if value and self._coe != value:
self._coe = value
elif value == wtypes.Unset:
self._coe = wtypes.Unset
uuid = types.uuid
"""Unique UUID for this baymodel"""
name = wtypes.StringType(min_length=1, max_length=255)
"""The name of the bay model"""
coe = wsme.wsproperty(wtypes.text, _get_coe, _set_coe, mandatory=True)
"""The Container Orchestration Engine for this bay model"""
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The image name or UUID to use as a base image for this baymodel"""
flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of this bay model"""
master_flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of the master node for this bay model"""
dns_nameserver = wtypes.IPv4AddressType()
"""The DNS nameserver address"""
keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The name or id of the nova ssh keypair"""
external_network_id = wtypes.StringType(min_length=1, max_length=255)
"""The external network to attach the Bay"""
fixed_network = wtypes.StringType(min_length=1, max_length=255)
"""The fixed network name to attach the Bay"""
network_driver = wtypes.StringType(min_length=1, max_length=255)
"""The name of the driver used for instantiating container networks"""
apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535)
"""The API server port for k8s"""
docker_volume_size = wtypes.IntegerType(minimum=1)
"""The size in GB of the docker volume"""
ssh_authorized_key = wtypes.StringType(min_length=1)
"""The SSH Authorized Key"""
cluster_distro = wtypes.StringType(min_length=1, max_length=255)
"""The Cluster distro for the bay, ex - coreos, fedora-atomic."""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated baymodel links"""
http_proxy = wtypes.StringType(min_length=1, max_length=255)
"""http_proxy for the bay """
https_proxy = wtypes.StringType(min_length=1, max_length=255)
"""https_proxy for the bay """
no_proxy = wtypes.StringType(min_length=1, max_length=255)
"""Its comma separated list of ip for which proxies should not
used in the bay"""
registry_enabled = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the docker registry is enabled"""
labels = wtypes.DictType(str, str)
"""One or more key/value pairs"""
insecure = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the TLS should be disabled"""
def __init__(self, **kwargs):
self.fields = []
for field in objects.BayModel.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(baymodel, url, expand=True):
if not expand:
baymodel.unset_fields_except(['uuid', 'name', 'image_id',
'apiserver_port', 'coe'])
baymodel.links = [link.Link.make_link('self', url,
'baymodels', baymodel.uuid),
link.Link.make_link('bookmark', url,
'baymodels', baymodel.uuid,
bookmark=True)]
return baymodel
@classmethod
def convert_with_links(cls, rpc_baymodel, expand=True):
baymodel = BayModel(**rpc_baymodel.as_dict())
return cls._convert_with_links(baymodel, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(
uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='example',
image_id='Fedora-k8s',
flavor_id='m1.small',
master_flavor_id='m1.small',
dns_nameserver='8.8.1.1',
keypair_id='keypair1',
external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba',
fixed_network='private',
network_driver='libnetwork',
apiserver_port=8080,
docker_volume_size=25,
cluster_distro='fedora-atomic',
ssh_authorized_key='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAB',
coe='kubernetes',
http_proxy='http://proxy.com:123',
https_proxy='https://proxy.com:123',
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
labels={'key1': 'val1', 'key2': 'val2'},
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
class BayModelCollection(collection.Collection):
"""API representation of a collection of baymodels."""
baymodels = [BayModel]
"""A list containing baymodels objects"""
def __init__(self, **kwargs):
self._type = 'baymodels'
@staticmethod
def convert_with_links(rpc_baymodels, limit, url=None, expand=False,
**kwargs):
collection = BayModelCollection()
collection.baymodels = [BayModel.convert_with_links(p, expand)
for p in rpc_baymodels]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.baymodels = [BayModel.sample(expand=False)]
return sample
class BayModelsController(rest.RestController):
"""REST controller for BayModels."""
_custom_actions = {
'detail': ['GET'],
}
def _get_baymodels_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.BayModel.get_by_uuid(pecan.request.context,
marker)
baymodels = objects.BayModel.list(pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return BayModelCollection.convert_with_links(baymodels, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
def _get_image_data(self, context, image_ident):
"""Retrieves os_distro and other metadata from the Glance image.
:param image_ident: image id or name of baymodel.
"""
try:
cli = clients.OpenStackClients(context)
return api_utils.get_openstack_resource(cli.glance().images,
image_ident, 'images')
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_ident)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_ident)
@policy.enforce_wsgi("baymodel")
@expose.expose(BayModelCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, baymodel_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of baymodels.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_baymodels_collection(marker, limit, sort_key,
sort_dir)
@policy.enforce_wsgi("baymodel")
@expose.expose(BayModelCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, baymodel_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of baymodels with detail.
:param baymodel_uuid: UUID of a baymodel, to get only baymodels for
that baymodel.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "baymodels":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['baymodels', 'detail'])
return self._get_baymodels_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@policy.enforce_wsgi("baymodel", "get")
@expose.expose(BayModel, types.uuid_or_name)
def get_one(self, baymodel_ident):
"""Retrieve information about the given baymodel.
:param baymodel_ident: UUID or logical name of a baymodel.
"""
rpc_baymodel = api_utils.get_rpc_resource('BayModel', baymodel_ident)
return BayModel.convert_with_links(rpc_baymodel)
def check_keypair_exists(self, context, keypair):
"""Checks the existence of the keypair"""
cli = clients.OpenStackClients(context)
try:
cli.nova().keypairs.get(keypair)
except nova_exc.NotFound:
raise exception.KeyPairNotFound(keypair=keypair)
@policy.enforce_wsgi("baymodel", "create")
@expose.expose(BayModel, body=BayModel, status_code=201)
@validation.enforce_network_driver_types('flannel')
def post(self, baymodel):
"""Create a new baymodel.
:param baymodel: a baymodel within the request body.
"""
baymodel_dict = baymodel.as_dict()
context = pecan.request.context
self.check_keypair_exists(context, baymodel_dict['keypair_id'])
baymodel_dict['project_id'] = context.project_id
baymodel_dict['user_id'] = context.user_id
image_data = self._get_image_data(context, baymodel_dict['image_id'])
if image_data.get('os_distro'):
baymodel_dict['cluster_distro'] = image_data['os_distro']
else:
raise exception.OSDistroFieldNotFound(
image_id=baymodel_dict['image_id'])
new_baymodel = objects.BayModel(context, **baymodel_dict)
new_baymodel.create()
# Set the HTTP Location Header
pecan.response.location = link.build_url('baymodels',
new_baymodel.uuid)
return BayModel.convert_with_links(new_baymodel)
@policy.enforce_wsgi("baymodel", "update")
@wsme.validate(types.uuid, [BayModelPatchType])
@expose.expose(BayModel, types.uuid, body=[BayModelPatchType])
@validation.enforce_network_driver_types('flannel')
def patch(self, baymodel_uuid, patch):
"""Update an existing baymodel.
:param baymodel_uuid: UUID of a baymodel.
:param patch: a json PATCH document to apply to this baymodel.
"""
rpc_baymodel = objects.BayModel.get_by_uuid(pecan.request.context,
baymodel_uuid)
try:
baymodel_dict = rpc_baymodel.as_dict()
baymodel = BayModel(**api_utils.apply_jsonpatch(
baymodel_dict,
patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.BayModel.fields:
try:
patch_val = getattr(baymodel, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_baymodel[field] != patch_val:
rpc_baymodel[field] = patch_val
rpc_baymodel.save()
return BayModel.convert_with_links(rpc_baymodel)
@policy.enforce_wsgi("baymodel")
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, baymodel_ident):
"""Delete a baymodel.
:param baymodel_uuid: UUID or logical name of a baymodel.
"""
rpc_baymodel = api_utils.get_rpc_resource('BayModel', baymodel_ident)
rpc_baymodel.destroy()
|
|
from inspect import isclass
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {review} for {addon} deleted.')
editor_format = _(u'{user} deleted {review} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class PAYPAL_FAILED(_LOG):
id = 51
format = _(u'{addon} failed checks with PayPal.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
u'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False)
or l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from access.models import Group
from addons.models import Addon
from amo import get_user, logger_log
from devhub.models import (ActivityLog, AddonLog, CommentLog, GroupLog,
UserLog, VersionLog)
from users.models import UserProfile
from versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Addon:
AddonLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
elif isinstance(arg, Addon):
AddonLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
|
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
from goose.extractors import BaseExtractor
KNOWN_ARTICLE_CONTENT_TAGS = [
{'attr': 'itemprop', 'value': 'articleBody'},
{'attr': 'class', 'value': 'post-content'},
{'tag': 'article'},
]
class ContentExtractor(BaseExtractor):
def get_language(self):
"""\
Returns the language is by the article or
the configuration language
"""
# we don't want to force the target language
# so we use the article.meta_lang
if self.config.use_meta_language:
if self.article.meta_lang:
return self.article.meta_lang[:2]
return self.config.target_language
def get_known_article_tags(self):
for item in KNOWN_ARTICLE_CONTENT_TAGS:
nodes = self.parser.getElementsByTag(
self.article.doc,
**item)
if len(nodes):
return nodes[0]
return None
def is_articlebody(self, node):
for item in KNOWN_ARTICLE_CONTENT_TAGS:
# attribute
if "attr" in item and "value" in item:
if self.parser.getAttribute(node, item['attr']) == item['value']:
return True
# tag
if "tag" in item:
if node.tag == item['tag']:
return True
return False
def calculate_best_node(self):
doc = self.article.doc
top_node = None
nodes_to_check = self.nodes_to_check(doc)
starting_boost = float(1.0)
cnt = 0
i = 0
parent_nodes = []
nodes_with_text = []
for node in nodes_to_check:
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)
high_link_density = self.is_highlink_density(node)
if word_stats.get_stopword_count() > 2 and not high_link_density:
nodes_with_text.append(node)
nodes_number = len(nodes_with_text)
negative_scoring = 0
bottom_negativescore_nodes = float(nodes_number) * 0.25
for node in nodes_with_text:
boost_score = float(0)
# boost
if(self.is_boostable(node)):
if cnt >= 0:
boost_score = float((1.0 / starting_boost) * 50)
starting_boost += 1
# nodes_number
if nodes_number > 15:
if (nodes_number - i) <= bottom_negativescore_nodes:
booster = float(bottom_negativescore_nodes - (nodes_number - i))
boost_score = float(-pow(booster, float(2)))
negscore = abs(boost_score) + negative_scoring
if negscore > 40:
boost_score = float(5)
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)
upscore = int(word_stats.get_stopword_count() + boost_score)
# parent node
parent_node = self.parser.getParent(node)
self.update_score(parent_node, upscore)
self.update_node_count(parent_node, 1)
if parent_node not in parent_nodes:
parent_nodes.append(parent_node)
# parentparent node
parent_parent_node = self.parser.getParent(parent_node)
if parent_parent_node is not None:
self.update_node_count(parent_parent_node, 1)
self.update_score(parent_parent_node, upscore / 2)
if parent_parent_node not in parent_nodes:
parent_nodes.append(parent_parent_node)
cnt += 1
i += 1
top_node_score = 0
for e in parent_nodes:
score = self.get_score(e)
if score > top_node_score:
top_node = e
top_node_score = score
if top_node is None:
top_node = e
return top_node
def is_boostable(self, node):
"""\
alot of times the first paragraph might be the caption under an image
so we'll want to make sure if we're going to boost a parent node that
it should be connected to other paragraphs,
at least for the first n paragraphs so we'll want to make sure that
the next sibling is a paragraph and has at
least some substatial weight to it
"""
para = "p"
steps_away = 0
minimum_stopword_count = 5
max_stepsaway_from_node = 3
nodes = self.walk_siblings(node)
for current_node in nodes:
# p
current_node_tag = self.parser.getTag(current_node)
if current_node_tag == para:
if steps_away >= max_stepsaway_from_node:
return False
paraText = self.parser.getText(current_node)
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(paraText)
if word_stats.get_stopword_count() > minimum_stopword_count:
return True
steps_away += 1
return False
def walk_siblings(self, node):
current_sibling = self.parser.previousSibling(node)
b = []
while current_sibling is not None:
b.append(current_sibling)
previousSibling = self.parser.previousSibling(current_sibling)
current_sibling = None if previousSibling is None else previousSibling
return b
def add_siblings(self, top_node):
# in case the extraction used known attributes
# we don't want to add sibilings
if self.is_articlebody(top_node):
return top_node
baselinescore_siblings_para = self.get_siblings_score(top_node)
results = self.walk_siblings(top_node)
for current_node in results:
ps = self.get_siblings_content(current_node, baselinescore_siblings_para)
for p in ps:
top_node.insert(0, p)
return top_node
def get_siblings_content(self, current_sibling, baselinescore_siblings_para):
"""\
adds any siblings that may have a decent score to this node
"""
if current_sibling.tag == 'p' and len(self.parser.getText(current_sibling)) > 0:
e0 = current_sibling
if e0.tail:
e0 = deepcopy(e0)
e0.tail = ''
return [e0]
else:
potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='p')
if potential_paragraphs is None:
return None
else:
ps = []
for first_paragraph in potential_paragraphs:
text = self.parser.getText(first_paragraph)
if len(text) > 0:
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text)
paragraph_score = word_stats.get_stopword_count()
sibling_baseline_score = float(.30)
high_link_density = self.is_highlink_density(first_paragraph)
score = float(baselinescore_siblings_para * sibling_baseline_score)
if score < paragraph_score and not high_link_density:
p = self.parser.createElement(tag='p', text=text, tail=None)
ps.append(p)
return ps
def get_siblings_score(self, top_node):
"""\
we could have long articles that have tons of paragraphs
so if we tried to calculate the base score against
the total text score of those paragraphs it would be unfair.
So we need to normalize the score based on the average scoring
of the paragraphs within the top node.
For example if our total score of 10 paragraphs was 1000
but each had an average value of 100 then 100 should be our base.
"""
base = 100000
paragraphs_number = 0
paragraphs_score = 0
nodes_to_check = self.parser.getElementsByTag(top_node, tag='p')
for node in nodes_to_check:
text_node = self.parser.getText(node)
word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)
high_link_density = self.is_highlink_density(node)
if word_stats.get_stopword_count() > 2 and not high_link_density:
paragraphs_number += 1
paragraphs_score += word_stats.get_stopword_count()
if paragraphs_number > 0:
base = paragraphs_score / paragraphs_number
return base
def update_score(self, node, addToScore):
"""\
adds a score to the gravityScore Attribute we put on divs
we'll get the current score then add the score
we're passing in to the current
"""
current_score = 0
score_string = self.parser.getAttribute(node, 'gravityScore')
if score_string:
current_score = int(score_string)
new_score = current_score + addToScore
self.parser.setAttribute(node, "gravityScore", str(new_score))
def update_node_count(self, node, add_to_count):
"""\
stores how many decent nodes are under a parent node
"""
current_score = 0
count_string = self.parser.getAttribute(node, 'gravityNodes')
if count_string:
current_score = int(count_string)
new_score = current_score + add_to_count
self.parser.setAttribute(node, "gravityNodes", str(new_score))
def is_highlink_density(self, e):
"""\
checks the density of links within a node,
is there not much text and most of it contains linky shit?
if so it's no good
"""
links = self.parser.getElementsByTag(e, tag='a')
if links is None or len(links) == 0:
return False
text = self.parser.getText(e)
words = text.split(' ')
words_number = float(len(words))
sb = []
for link in links:
sb.append(self.parser.getText(link))
linkText = ''.join(sb)
linkWords = linkText.split(' ')
numberOfLinkWords = float(len(linkWords))
numberOfLinks = float(len(links))
linkDivisor = float(numberOfLinkWords / words_number)
score = float(linkDivisor * numberOfLinks)
if score >= 1.0:
return True
return False
# return True if score > 1.0 else False
def get_score(self, node):
"""\
returns the gravityScore as an integer from this node
"""
return self.get_node_gravity_score(node) or 0
def get_node_gravity_score(self, node):
grvScoreString = self.parser.getAttribute(node, 'gravityScore')
if not grvScoreString:
return None
return int(grvScoreString)
def nodes_to_check(self, doc):
"""\
returns a list of nodes we want to search
on like paragraphs and tables
"""
nodes_to_check = []
for tag in ['p', 'pre', 'td']:
items = self.parser.getElementsByTag(doc, tag=tag)
nodes_to_check += items
return nodes_to_check
def is_table_and_no_para_exist(self, e):
subParagraphs = self.parser.getElementsByTag(e, tag='p')
for p in subParagraphs:
txt = self.parser.getText(p)
if len(txt) < 25:
self.parser.remove(p)
subParagraphs2 = self.parser.getElementsByTag(e, tag='p')
if len(subParagraphs2) == 0 and e.tag != "td":
return True
return False
def is_nodescore_threshold_met(self, node, e):
top_node_score = self.get_score(node)
current_nodeScore = self.get_score(e)
thresholdScore = float(top_node_score * .08)
if (current_nodeScore < thresholdScore) and e.tag != 'td':
return False
return True
def post_cleanup(self):
"""\
remove any divs that looks like non-content,
clusters of links, or paras with no gusto
"""
targetNode = self.article.top_node
node = self.add_siblings(targetNode)
for e in self.parser.getChildren(node):
e_tag = self.parser.getTag(e)
if e_tag != 'p':
if self.is_highlink_density(e) \
or self.is_table_and_no_para_exist(e) \
or not self.is_nodescore_threshold_met(node, e):
self.parser.remove(e)
return node
class StandardContentExtractor(ContentExtractor):
pass
|
|
"""Test Z-Wave config panel."""
from http import HTTPStatus
import json
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.components.zwave import DATA_NETWORK, const
from tests.mock.zwave import MockEntityValues, MockNode, MockValue
VIEW_NAME = "api:config:zwave:device_config"
@pytest.fixture
def client(loop, hass, hass_client):
"""Client to communicate with Z-Wave config views."""
with patch.object(config, "SECTIONS", ["zwave"]):
loop.run_until_complete(async_setup_component(hass, "config", {}))
return loop.run_until_complete(hass_client())
async def test_get_device_config(client):
"""Test getting device config."""
def mock_read(path):
"""Mock reading data."""
return {"hello.beer": {"free": "beer"}, "other.entity": {"do": "something"}}
with patch("homeassistant.components.config._read", mock_read):
resp = await client.get("/api/config/zwave/device_config/hello.beer")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"free": "beer"}
async def test_update_device_config(client):
"""Test updating device config."""
orig_data = {
"hello.beer": {"ignored": True},
"other.entity": {"polling_intensity": 2},
}
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
):
resp = await client.post(
"/api/config/zwave/device_config/hello.beer",
data=json.dumps({"polling_intensity": 2}),
)
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"result": "ok"}
orig_data["hello.beer"]["polling_intensity"] = 2
assert written[0] == orig_data
async def test_update_device_config_invalid_key(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/invalid_entity",
data=json.dumps({"polling_intensity": 2}),
)
assert resp.status == HTTPStatus.BAD_REQUEST
async def test_update_device_config_invalid_data(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/hello.beer",
data=json.dumps({"invalid_option": 2}),
)
assert resp.status == HTTPStatus.BAD_REQUEST
async def test_update_device_config_invalid_json(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/hello.beer", data="not json"
)
assert resp.status == HTTPStatus.BAD_REQUEST
async def test_get_values(hass, client):
"""Test getting values on node."""
node = MockNode(node_id=1)
value = MockValue(
value_id=123456,
node=node,
label="Test Label",
instance=1,
index=2,
poll_intensity=4,
)
values = MockEntityValues(primary=value)
node2 = MockNode(node_id=2)
value2 = MockValue(value_id=234567, node=node2, label="Test Label 2")
values2 = MockEntityValues(primary=value2)
hass.data[const.DATA_ENTITY_VALUES] = [values, values2]
resp = await client.get("/api/zwave/values/1")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {
"123456": {
"label": "Test Label",
"instance": 1,
"index": 2,
"poll_intensity": 4,
}
}
async def test_get_groups(hass, client):
"""Test getting groupdata on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
node.groups.associations = "assoc"
node.groups.associations_instances = "inst"
node.groups.label = "the label"
node.groups.max_associations = "max"
node.groups = {1: node.groups}
network.nodes = {2: node}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {
"1": {
"association_instances": "inst",
"associations": "assoc",
"label": "the label",
"max_associations": "max",
}
}
async def test_get_groups_nogroups(hass, client):
"""Test getting groupdata on node with no groups."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
network.nodes = {2: node}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_get_groups_nonode(hass, client):
"""Test getting groupdata on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_config(hass, client):
"""Test getting config on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
value = MockValue(index=12, command_class=const.COMMAND_CLASS_CONFIGURATION)
value.label = "label"
value.help = "help"
value.type = "type"
value.data = "data"
value.data_items = ["item1", "item2"]
value.max = "max"
value.min = "min"
node.values = {12: value}
network.nodes = {2: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/config/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {
"12": {
"data": "data",
"data_items": ["item1", "item2"],
"help": "help",
"label": "label",
"max": "max",
"min": "min",
"type": "type",
}
}
async def test_get_config_noconfig_node(hass, client):
"""Test getting config on node without config."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
network.nodes = {2: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/config/2")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_get_config_nonode(hass, client):
"""Test getting config on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/config/2")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_usercodes_nonode(hass, client):
"""Test getting usercodes on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/usercodes/2")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_usercodes(hass, client):
"""Test getting usercodes on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE])
value = MockValue(index=0, command_class=const.COMMAND_CLASS_USER_CODE)
value.genre = const.GENRE_USER
value.label = "label"
value.data = "1234"
node.values = {0: value}
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {"0": {"code": "1234", "label": "label", "length": 4}}
async def test_get_usercode_nousercode_node(hass, client):
"""Test getting usercodes on node without usercodes."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18)
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_get_usercodes_no_genreuser(hass, client):
"""Test getting usercodes on node missing genre user."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE])
value = MockValue(index=0, command_class=const.COMMAND_CLASS_USER_CODE)
value.genre = const.GENRE_SYSTEM
value.label = "label"
value.data = "1234"
node.values = {0: value}
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert result == {}
async def test_save_config_no_network(hass, client):
"""Test saving configuration without network data."""
resp = await client.post("/api/zwave/saveconfig")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert result == {"message": "No Z-Wave network data found"}
async def test_save_config(hass, client):
"""Test saving configuration."""
network = hass.data[DATA_NETWORK] = MagicMock()
resp = await client.post("/api/zwave/saveconfig")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert network.write_config.called
assert result == {"message": "Z-Wave configuration saved to file"}
async def test_get_protection_values(hass, client):
"""Test getting protection values on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
node.get_protection_item.return_value = "Unprotected"
node.get_protection_items.return_value = value.data_items
node.get_protections.return_value = {value.value_id: "Object"}
resp = await client.get("/api/zwave/protection/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert node.get_protections.called
assert node.get_protection_item.called
assert node.get_protection_items.called
assert result == {
"value_id": "123456",
"selected": "Unprotected",
"options": ["Unprotected", "Protection by Sequence", "No Operation Possible"],
}
async def test_get_protection_values_nonexisting_node(hass, client):
"""Test getting protection values on node with wrong nodeid."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {17: node}
node.value = value
resp = await client.get("/api/zwave/protection/18")
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert not node.get_protections.called
assert not node.get_protection_item.called
assert not node.get_protection_items.called
assert result == {"message": "Node not found"}
async def test_get_protection_values_without_protectionclass(hass, client):
"""Test getting protection values on node without protectionclass."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18)
value = MockValue(value_id=123456, index=0, instance=1)
network.nodes = {18: node}
node.value = value
resp = await client.get("/api/zwave/protection/18")
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert not node.get_protections.called
assert not node.get_protection_item.called
assert not node.get_protection_items.called
assert result == {}
async def test_set_protection_value(hass, client):
"""Test setting protection value on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protection by Sequence"}),
)
assert resp.status == HTTPStatus.OK
result = await resp.json()
assert node.set_protection.called
assert result == {"message": "Protection setting successfully set"}
async def test_set_protection_value_failed(hass, client):
"""Test setting protection value failed on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTPStatus.ACCEPTED
result = await resp.json()
assert node.set_protection.called
assert result == {"message": "Protection setting did not complete"}
async def test_set_protection_value_nonexisting_node(hass, client):
"""Test setting protection value on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=17, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {17: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert not node.set_protection.called
assert result == {"message": "Node not found"}
async def test_set_protection_value_missing_class(hass, client):
"""Test setting protection value on node without protectionclass."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=17)
value = MockValue(value_id=123456, index=0, instance=1)
network.nodes = {17: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/17",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTPStatus.NOT_FOUND
result = await resp.json()
assert not node.set_protection.called
assert result == {"message": "No protection commandclass on this node"}
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the bindings for command line integration and dynamic loading of tasks
If you don't want to run luigi from the command line. You may use the methods
defined in this module to programatically run luigi.
"""
import logging
import logging.config
import os
import sys
import tempfile
import signal
import warnings
from luigi import configuration
from luigi import lock
from luigi import parameter
from luigi import rpc
from luigi import scheduler
from luigi import task
from luigi import worker
from luigi import execution_summary
from luigi.cmdline_parser import CmdlineParser
def setup_interface_logging(conf_file='', level_name='DEBUG'):
# use a variable in the function object to determine if it has run before
if getattr(setup_interface_logging, "has_run", False):
return
if conf_file == '':
# no log config given, setup default logging
level = getattr(logging, level_name, logging.DEBUG)
logger = logging.getLogger('luigi-interface')
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level)
formatter = logging.Formatter('%(levelname)s: %(message)s')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
else:
logging.config.fileConfig(conf_file, disable_existing_loggers=False)
setup_interface_logging.has_run = True
class core(task.Config):
''' Keeps track of a bunch of environment params.
Uses the internal luigi parameter mechanism.
The nice thing is that we can instantiate this class
and get an object with all the environment variables set.
This is arguably a bit of a hack.
'''
use_cmdline_section = False
local_scheduler = parameter.BoolParameter(
default=False,
description='Use an in-memory central scheduler. Useful for testing.',
always_in_help=True)
scheduler_host = parameter.Parameter(
default='localhost',
description='Hostname of machine running remote scheduler',
config_path=dict(section='core', name='default-scheduler-host'))
scheduler_port = parameter.IntParameter(
default=8082,
description='Port of remote scheduler api process',
config_path=dict(section='core', name='default-scheduler-port'))
scheduler_url = parameter.Parameter(
default='',
description='Full path to remote scheduler',
config_path=dict(section='core', name='default-scheduler-url'),
)
lock_size = parameter.IntParameter(
default=1,
description="Maximum number of workers running the same command")
no_lock = parameter.BoolParameter(
default=False,
description='Ignore if similar process is already running')
lock_pid_dir = parameter.Parameter(
default=os.path.join(tempfile.gettempdir(), 'luigi'),
description='Directory to store the pid file')
take_lock = parameter.BoolParameter(
default=False,
description='Signal other processes to stop getting work if already running')
workers = parameter.IntParameter(
default=1,
description='Maximum number of parallel tasks to run')
logging_conf_file = parameter.Parameter(
default='',
description='Configuration file for logging')
log_level = parameter.ChoiceParameter(
default='DEBUG',
choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
description="Default log level to use when logging_conf_file is not set")
module = parameter.Parameter(
default='',
description='Used for dynamic loading of modules',
always_in_help=True)
parallel_scheduling = parameter.BoolParameter(
default=False,
description='Use multiprocessing to do scheduling in parallel.')
assistant = parameter.BoolParameter(
default=False,
description='Run any task from the scheduler.')
help = parameter.BoolParameter(
default=False,
description='Show most common flags and all task-specific flags',
always_in_help=True)
help_all = parameter.BoolParameter(
default=False,
description='Show all command line flags',
always_in_help=True)
class _WorkerSchedulerFactory(object):
def create_local_scheduler(self):
return scheduler.Scheduler(prune_on_get_work=True, record_task_history=False)
def create_remote_scheduler(self, url):
return rpc.RemoteScheduler(url)
def create_worker(self, scheduler, worker_processes, assistant=False):
return worker.Worker(
scheduler=scheduler, worker_processes=worker_processes, assistant=assistant)
def _schedule_and_run(tasks, worker_scheduler_factory=None, override_defaults=None):
"""
:param tasks:
:param worker_scheduler_factory:
:param override_defaults:
:return: True if all tasks and their dependencies were successfully run (or already completed);
False if any error occurred.
"""
if worker_scheduler_factory is None:
worker_scheduler_factory = _WorkerSchedulerFactory()
if override_defaults is None:
override_defaults = {}
env_params = core(**override_defaults)
# search for logging configuration path first on the command line, then
# in the application config file
logging_conf = env_params.logging_conf_file
if logging_conf != '' and not os.path.exists(logging_conf):
raise Exception(
"Error: Unable to locate specified logging configuration file!"
)
if not configuration.get_config().getboolean(
'core', 'no_configure_logging', False):
setup_interface_logging(logging_conf, env_params.log_level)
kill_signal = signal.SIGUSR1 if env_params.take_lock else None
if (not env_params.no_lock and
not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size, kill_signal))):
raise PidLockAlreadyTakenExit()
if env_params.local_scheduler:
sch = worker_scheduler_factory.create_local_scheduler()
else:
if env_params.scheduler_url != '':
url = env_params.scheduler_url
else:
url = 'http://{host}:{port:d}/'.format(
host=env_params.scheduler_host,
port=env_params.scheduler_port,
)
sch = worker_scheduler_factory.create_remote_scheduler(url=url)
worker = worker_scheduler_factory.create_worker(
scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant)
success = True
logger = logging.getLogger('luigi-interface')
with worker:
for t in tasks:
success &= worker.add(t, env_params.parallel_scheduling)
logger.info('Done scheduling tasks')
success &= worker.run()
logger.info(execution_summary.summary(worker))
return dict(success=success, worker=worker)
class PidLockAlreadyTakenExit(SystemExit):
"""
The exception thrown by :py:func:`luigi.run`, when the lock file is inaccessible
"""
pass
def run(*args, **kwargs):
return _run(*args, **kwargs)['success']
def _run(cmdline_args=None, main_task_cls=None,
worker_scheduler_factory=None, use_dynamic_argparse=None, local_scheduler=False):
"""
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param cmdline_args:
:param main_task_cls:
:param worker_scheduler_factory:
:param use_dynamic_argparse: Deprecated and ignored
:param local_scheduler:
"""
if use_dynamic_argparse is not None:
warnings.warn("use_dynamic_argparse is deprecated, don't set it.",
DeprecationWarning, stacklevel=2)
if cmdline_args is None:
cmdline_args = sys.argv[1:]
if main_task_cls:
cmdline_args.insert(0, main_task_cls.task_family)
if local_scheduler:
cmdline_args.insert(0, '--local-scheduler')
with CmdlineParser.global_instance(cmdline_args) as cp:
return _schedule_and_run([cp.get_task_obj()], worker_scheduler_factory)
def build(tasks, worker_scheduler_factory=None, **env_params):
"""
Run internally, bypassing the cmdline parsing.
Useful if you have some luigi code that you want to run internally.
Example:
.. code-block:: python
luigi.build([MyTask1(), MyTask2()], local_scheduler=True)
One notable difference is that `build` defaults to not using
the identical process lock. Otherwise, `build` would only be
callable once from each process.
:param tasks:
:param worker_scheduler_factory:
:param env_params:
:return: True if there were no scheduling errors, even if tasks may fail.
"""
if "no_lock" not in env_params:
env_params["no_lock"] = True
return _schedule_and_run(tasks, worker_scheduler_factory, override_defaults=env_params)['success']
|
|
"""
Tests - Deuce Client - Client - Deuce - Block
"""
import json
import random
import urllib.parse
import uuid
import httpretty
import deuceclient.client.deuce
import deuceclient.api as api
from deuceclient.tests import *
from deuceclient.common import errors as errors
@httpretty.activate
class ClientDeuceBlockTests(ClientTestBase):
def setUp(self):
super(ClientDeuceBlockTests, self).setUp()
self.client = deuceclient.client.deuce.DeuceClient(self.authenticator,
self.apihost,
sslenabled=True)
def tearDown(self):
super(ClientDeuceBlockTests, self).tearDown()
def test_patch_vault_block_status_non_existent_vault(self):
httpretty.register_uri(httpretty.PATCH,
get_blocks_url(
self.apihost,
self.vault.vault_id),
content_type='text/plain',
body="mock failure",
status=404)
with self.assertRaises(RuntimeError) as patch_error:
self.client.VaultBlockStatusReset(self.vault)
def test_patch_vault_block_status(self):
httpretty.register_uri(httpretty.PATCH,
get_blocks_url(
self.apihost,
self.vault.vault_id),
content_type='text/plain',
status=204)
self.assertTrue(
self.client.VaultBlockStatusReset(self.vault))
def test_block_list(self):
data = [block[0] for block in create_blocks(block_count=1)]
expected_data = json.dumps(data)
httpretty.register_uri(httpretty.GET,
get_blocks_url(self.apihost,
self.vault.vault_id),
content_type='application/json',
body=expected_data,
status=200)
self.assertTrue(self.client.GetBlockList(self.vault))
self.assertIsNone(self.vault.blocks.marker)
for block_id in data:
self.assertIn(block_id, self.vault.blocks)
def test_block_list_with_next_batch(self):
data = [block[0] for block in create_blocks(block_count=1)]
expected_data = json.dumps(data)
url = get_blocks_url(self.apihost, self.vault.vault_id)
url_params = urllib.parse.urlencode({'marker': data[0]})
next_batch = '{0}?{1}'.format(url, url_params)
httpretty.register_uri(httpretty.GET,
url,
content_type='application/json',
adding_headers={
'x-next-batch': next_batch
},
body=expected_data,
status=200)
self.assertTrue(self.client.GetBlockList(self.vault))
self.assertIsNotNone(self.vault.blocks.marker)
self.assertEqual(self.vault.blocks.marker, data[0])
for block_id in data:
self.assertIn(block_id, self.vault.blocks)
def test_block_list_with_marker(self):
block_id, block_data, block_size = create_block()
data = [block[0] for block in create_blocks(block_count=1)]
expected_data = json.dumps(data)
httpretty.register_uri(httpretty.GET,
get_blocks_url(self.apihost,
self.vault.vault_id),
content_type='application/json',
body=expected_data,
status=200)
self.assertTrue(self.client.GetBlockList(self.vault, marker=block_id))
self.assertIsNone(self.vault.blocks.marker)
for block_id in data:
self.assertIn(block_id, self.vault.blocks)
def test_block_list_with_marker_and_limit(self):
block_id, block_data, block_size = create_block()
data = [block[0] for block in create_blocks(block_count=5)]
expected_data = json.dumps(data)
httpretty.register_uri(httpretty.GET,
get_blocks_url(self.apihost,
self.vault.vault_id),
content_type='application/json',
body=expected_data,
status=200)
self.assertTrue(self.client.GetBlockList(self.vault,
marker=block_id,
limit=5))
self.assertEqual(len(data), len(self.vault.blocks))
self.assertIsNone(self.vault.blocks.marker)
for block_id in data:
self.assertIn(block_id, self.vault.blocks)
def test_block_list_with_limit(self):
data = [block[0] for block in create_blocks(block_count=5)]
expected_data = json.dumps(data)
httpretty.register_uri(httpretty.GET,
get_blocks_url(self.apihost,
self.vault.vault_id),
content_type='application/json',
body=expected_data,
status=200)
self.assertTrue(self.client.GetBlockList(self.vault, limit=5))
self.assertIsNone(self.vault.blocks.marker)
self.assertEqual(5, len(self.vault.blocks))
self.assertEqual(len(data), len(self.vault.blocks))
for block_id in data:
self.assertIn(block_id, self.vault.blocks)
def test_block_list_bad_vault(self):
data = [block[0] for block in create_blocks(block_count=1)]
expected_data = json.dumps(data)
with self.assertRaises(TypeError):
self.client.GetBlockList(self.vault.vault_id)
def test_block_list_failure(self):
httpretty.register_uri(httpretty.GET,
get_blocks_url(self.apihost,
self.vault.vault_id),
content_type='text/plain',
body="mock failure",
status=404)
with self.assertRaises(RuntimeError) as stats_error:
self.client.GetBlockList(self.vault)
def test_blocks_upload_without_response(self):
blocks = []
for block_id, blockdata, block_size in [create_block()
for _ in range(5)]:
blocks.append(block_id)
a_block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
self.vault.blocks[block_id] = a_block
httpretty.register_uri(httpretty.POST,
get_blocks_url(self.apihost,
self.vault.vault_id),
status=201)
self.assertTrue(self.client.UploadBlocks(self.vault,
blocks,
request_mapping=False))
def test_blocks_upload_with_response(self):
blocks = []
response_data = {}
for block_id, blockdata, block_size in [create_block()
for _ in range(5)]:
blocks.append(block_id)
a_block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
self.vault.blocks[block_id] = a_block
response_data[block_id] = create_storage_block(block_id)
httpretty.register_uri(httpretty.POST,
get_blocks_url(self.apihost,
self.vault.vault_id),
status=200,
body=json.dumps(response_data))
self.assertTrue(self.client.UploadBlocks(self.vault, blocks))
def test_blocks_upload_no_blocks_in_vault(self):
blocks = []
for block_id, blockdata, block_size in [create_block()
for _ in range(5)]:
blocks.append(block_id)
with self.assertRaises(KeyError):
self.client.UploadBlocks(self.vault, blocks)
def test_blocks_upload_failed(self):
blocks = []
for block_id, blockdata, block_size in [create_block()
for _ in range(5)]:
blocks.append(block_id)
a_block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
self.vault.blocks[block_id] = a_block
httpretty.register_uri(httpretty.POST,
get_blocks_url(self.apihost,
self.vault.vault_id),
status=404)
with self.assertRaises(RuntimeError):
self.client.UploadBlocks(self.vault, blocks)
def test_block_upload(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
httpretty.register_uri(httpretty.PUT,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
status=201)
self.assertTrue(self.client.UploadBlock(self.vault, block))
def test_block_upload_bad_vault(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
with self.assertRaises(TypeError):
self.client.UploadBlock(self.vault.vault_id, block)
def test_block_upload_bad_block(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
with self.assertRaises(TypeError):
self.client.UploadBlock(self.vault, block.block_id)
def test_block_upload_failed(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
httpretty.register_uri(httpretty.PUT,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
status=404)
with self.assertRaises(RuntimeError) as upload_error:
self.client.UploadBlock(self.vault, block)
def test_block_list_deletion(self):
count = 5
for block_id, blockdata, block_size in [create_block()
for _ in range(count)]:
self.vault.blocks.add(api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata))
self.assertEqual(len(self.vault.blocks), count)
self.assertEqual(len(self.vault.blocks.keys()), count)
[httpretty.register_uri(httpretty.DELETE,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
status=204) for block_id in
self.vault.blocks.keys()]
results = self.client.DeleteBlocks(self.vault,
self.vault.blocks.keys())
self.assertEqual(len(results), count)
for block_id, r in results:
self.assertTrue(r)
def test_block_list_deletion_failed(self):
count = 5
for block_id, blockdata, block_size in [create_block()
for _ in range(count)]:
self.vault.blocks.add(api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata))
self.assertEqual(len(self.vault.blocks), count)
self.assertEqual(len(self.vault.blocks.keys()), count)
[httpretty.register_uri(httpretty.DELETE,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
status=404) for block_id in
self.vault.blocks.keys()]
results = self.client.DeleteBlocks(self.vault,
self.vault.blocks.keys())
self.assertEqual(len(results), count)
for block_id, r in results:
self.assertFalse(r)
def test_block_list_deletion_mixed(self):
count = 5
for block_id, blockdata, block_size in [create_block()
for _ in range(count)]:
self.vault.blocks.add(api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata))
self.assertEqual(len(self.vault.blocks), count)
self.assertEqual(len(self.vault.blocks.keys()), count)
expected_results = {}
count = 0
for block_id in self.vault.blocks.keys():
if count % 2 == 0:
httpretty.register_uri(httpretty.DELETE,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
status=204)
expected_results[block_id] = True
else:
httpretty.register_uri(httpretty.DELETE,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
status=404)
expected_results[block_id] = False
count = count + 1
results = self.client.DeleteBlocks(self.vault,
self.vault.blocks.keys())
self.assertEqual(len(results), count)
for block_id, r in results:
self.assertEqual(r, expected_results[block_id])
def test_block_deletion(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
httpretty.register_uri(httpretty.DELETE,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
status=204)
self.assertTrue(self.client.DeleteBlock(self.vault, block))
def test_block_deletion_bad_vault(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
with self.assertRaises(TypeError):
self.client.DeleteBlock(self.vault.vault_id, block)
def test_block_deletion_bad_block(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
with self.assertRaises(TypeError):
self.client.DeleteBlock(self.vault, block.block_id)
def test_block_deletion_failed(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id,
data=blockdata)
httpretty.register_uri(httpretty.DELETE,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
content_type='text/plain',
body="mock failure",
status=404)
with self.assertRaises(RuntimeError) as deletion_error:
self.client.DeleteBlock(self.vault, block)
def test_block_download(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
httpretty.register_uri(httpretty.GET,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
content_type='text/plain',
body=blockdata,
status=200)
self.assertTrue(self.client.DownloadBlock(self.vault, block))
self.assertEqual(block.data, blockdata)
def test_block_download_bad_vault(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
with self.assertRaises(TypeError):
self.client.DownloadBlock(self.vault.vault_id, block)
def test_block_download_bad_block(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
with self.assertRaises(TypeError):
self.client.DownloadBlock(self.vault, block.block_id)
def test_block_download_failed(self):
block_id, blockdata, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
httpretty.register_uri(httpretty.GET,
get_block_url(self.apihost,
self.vault.vault_id,
block_id),
content_type='text/plain',
body="mock failure",
status=404)
with self.assertRaises(RuntimeError) as deletion_error:
self.client.DownloadBlock(self.vault, block)
def test_block_download_missing(self):
block_id, block_data, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
httpretty.register_uri(httpretty.GET,
get_block_url(self.apihost,
self.vault.vault_id,
block.block_id),
content_type='text/plain',
body='mocking error',
status=410)
with self.assertRaises(errors.MissingBlockError):
self.client.DownloadBlock(self.vault, block)
def test_block_head_non_existent(self):
block_id, block_data, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
httpretty.register_uri(httpretty.HEAD,
get_block_url(self.apihost,
self.vault.vault_id,
block.block_id),
content_type='text/plain',
body='mocking error',
status=404)
with self.assertRaises(RuntimeError):
self.client.HeadBlock(self.vault, block)
def test_block_head_missing(self):
block_id, block_data, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
httpretty.register_uri(httpretty.HEAD,
get_block_url(self.apihost,
self.vault.vault_id,
block.block_id),
content_type='text/plain',
body='mocking error',
status=410)
with self.assertRaises(errors.MissingBlockError):
self.client.HeadBlock(self.vault, block)
def test_block_head(self):
block_id, block_data, block_size = create_block()
block = api.Block(project_id=self.vault.project_id,
vault_id=self.vault.vault_id,
block_id=block_id)
check_data = {
'storage-id': '{0}_{1}'.format(block_id, uuid.uuid4()),
'ref-count': random.randint(0, 100),
'ref-modified': int(datetime.datetime.max.timestamp()),
}
httpretty.register_uri(httpretty.HEAD,
get_block_url(self.apihost,
self.vault.vault_id,
block.block_id),
adding_headers={
'x-block-reference-count':
str(check_data['ref-count']),
'x-ref-modified':
str(check_data['ref-modified']),
'x-storage-id': check_data['storage-id'],
'x-block-id': block_id,
'x-block-size': str(block_size),
},
status=204)
block = self.client.HeadBlock(self.vault, block)
self.assertEqual(block.ref_count, check_data['ref-count'])
self.assertEqual(block.ref_modified, check_data['ref-modified'])
self.assertEqual(block.storage_id, check_data['storage-id'])
self.assertEqual(block.block_id, block_id)
self.assertEqual(len(block), block_size)
self.assertFalse(block.block_orphaned)
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011,2012 Akira YOSHIYAMA <akirayoshiyama@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This source code is based ./auth_token.py and ./ec2_token.py.
# See them for their copyright.
"""
S3 TOKEN MIDDLEWARE
This WSGI component:
* Get a request from the swift3 middleware with an S3 Authorization
access key.
* Validate s3 token in Keystone.
* Transform the account name to AUTH_%(tenant_name).
"""
import logging
import requests
import six
from six.moves import urllib
import webob
from keystoneclient.openstack.common import jsonutils
PROTOCOL_NAME = 'S3 Token Authentication'
# TODO(kun): remove it after oslo merge this.
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises: ValueError if given an invalid path
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError('Invalid path: %s' % urllib.parse.quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError('Invalid path: %s' % urllib.parse.quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs
class ServiceError(Exception):
pass
class S3Token(object):
"""Auth Middleware that handles S3 authenticating client calls."""
def __init__(self, app, conf):
"""Common initialization code."""
self.app = app
self.logger = logging.getLogger(conf.get('log_name', __name__))
self.logger.debug('Starting the %s component', PROTOCOL_NAME)
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
# where to find the auth service (we use this to validate tokens)
auth_host = conf.get('auth_host')
auth_port = int(conf.get('auth_port', 35357))
auth_protocol = conf.get('auth_protocol', 'https')
self.request_uri = '%s://%s:%s' % (auth_protocol, auth_host, auth_port)
# SSL
insecure = conf.get('insecure', False)
cert_file = conf.get('certfile')
key_file = conf.get('keyfile')
if insecure:
self.verify = False
elif cert_file and key_file:
self.verify = (cert_file, key_file)
elif cert_file:
self.verify = cert_file
else:
self.verify = None
def deny_request(self, code):
error_table = {
'AccessDenied': (401, 'Access denied'),
'InvalidURI': (400, 'Could not parse the specified URI'),
}
resp = webob.Response(content_type='text/xml')
resp.status = error_table[code][0]
error_msg = ('<?xml version="1.0" encoding="UTF-8"?>\r\n'
'<Error>\r\n <Code>%s</Code>\r\n '
'<Message>%s</Message>\r\n</Error>\r\n' %
(code, error_table[code][1]))
if six.PY3:
error_msg = error_msg.encode()
resp.body = error_msg
return resp
def _json_request(self, creds_json):
headers = {'Content-Type': 'application/json'}
try:
response = requests.post('%s/v2.0/s3tokens' % self.request_uri,
headers=headers, data=creds_json,
verify=self.verify)
except requests.exceptions.RequestException as e:
self.logger.info('HTTP connection exception: %s', e)
resp = self.deny_request('InvalidURI')
raise ServiceError(resp)
if response.status_code < 200 or response.status_code >= 300:
self.logger.debug('Keystone reply error: status=%s reason=%s',
response.status_code, response.reason)
resp = self.deny_request('AccessDenied')
raise ServiceError(resp)
return response
def __call__(self, environ, start_response):
"""Handle incoming request. authenticate and send downstream."""
req = webob.Request(environ)
self.logger.debug('Calling S3Token middleware.')
try:
parts = split_path(req.path, 1, 4, True)
version, account, container, obj = parts
except ValueError:
msg = 'Not a path query, skipping.'
self.logger.debug(msg)
return self.app(environ, start_response)
# Read request signature and access id.
if 'Authorization' not in req.headers:
msg = 'No Authorization header. skipping.'
self.logger.debug(msg)
return self.app(environ, start_response)
token = req.headers.get('X-Auth-Token',
req.headers.get('X-Storage-Token'))
if not token:
msg = 'You did not specify an auth or a storage token. skipping.'
self.logger.debug(msg)
return self.app(environ, start_response)
auth_header = req.headers['Authorization']
try:
access, signature = auth_header.split(' ')[-1].rsplit(':', 1)
except ValueError:
msg = 'You have an invalid Authorization header: %s'
self.logger.debug(msg, auth_header)
return self.deny_request('InvalidURI')(environ, start_response)
# NOTE(chmou): This is to handle the special case with nova
# when we have the option s3_affix_tenant. We will force it to
# connect to another account than the one
# authenticated. Before people start getting worried about
# security, I should point that we are connecting with
# username/token specified by the user but instead of
# connecting to its own account we will force it to go to an
# another account. In a normal scenario if that user don't
# have the reseller right it will just fail but since the
# reseller account can connect to every account it is allowed
# by the swift_auth middleware.
force_tenant = None
if ':' in access:
access, force_tenant = access.split(':')
# Authenticate request.
creds = {'credentials': {'access': access,
'token': token,
'signature': signature}}
creds_json = jsonutils.dumps(creds)
self.logger.debug('Connecting to Keystone sending this JSON: %s',
creds_json)
# NOTE(vish): We could save a call to keystone by having
# keystone return token, tenant, user, and roles
# from this call.
#
# NOTE(chmou): We still have the same problem we would need to
# change token_auth to detect if we already
# identified and not doing a second query and just
# pass it through to swiftauth in this case.
try:
resp = self._json_request(creds_json)
except ServiceError as e:
resp = e.args[0]
msg = 'Received error, exiting middleware with error: %s'
self.logger.debug(msg, resp.status_code)
return resp(environ, start_response)
self.logger.debug('Keystone Reply: Status: %d, Output: %s',
resp.status_code, resp.content)
try:
identity_info = resp.json()
token_id = str(identity_info['access']['token']['id'])
tenant = identity_info['access']['token']['tenant']
except (ValueError, KeyError):
error = 'Error on keystone reply: %d %s'
self.logger.debug(error, resp.status_code, resp.content)
return self.deny_request('InvalidURI')(environ, start_response)
req.headers['X-Auth-Token'] = token_id
tenant_to_connect = force_tenant or tenant['id']
self.logger.debug('Connecting with tenant: %s', tenant_to_connect)
new_tenant_name = '%s%s' % (self.reseller_prefix, tenant_to_connect)
environ['PATH_INFO'] = environ['PATH_INFO'].replace(account,
new_tenant_name)
return self.app(environ, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return S3Token(app, conf)
return auth_filter
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from testtools import matchers
# NOTE(morganfainberg): import endpoint filter to populate the SQL model
from keystone.contrib import endpoint_filter # noqa
from keystone.tests.unit import test_v3
class TestExtensionCase(test_v3.RestfulTestCase):
EXTENSION_NAME = 'endpoint_filter'
EXTENSION_TO_ADD = 'endpoint_filter_extension'
def config_overrides(self):
super(TestExtensionCase, self).config_overrides()
self.config_fixture.config(
group='catalog',
driver='keystone.contrib.endpoint_filter.backends.catalog_sql.'
'EndpointFilterCatalog')
def setUp(self):
super(TestExtensionCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id})
class EndpointFilterCRUDTestCase(TestExtensionCase):
def test_create_endpoint_project_association(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid endpoint and project id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
def test_create_endpoint_project_association_with_invalid_project(self):
"""PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_create_endpoint_project_association_with_invalid_endpoint(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_create_endpoint_project_association_with_unexpected_body(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Unexpected body in request. The body should be ignored.
"""
self.put(self.default_request_url,
body={'project_id': self.default_domain_project_id},
expected_status=204)
def test_check_endpoint_project_association(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project and endpoint id test case.
"""
self.put(self.default_request_url,
body='',
expected_status=204)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_check_endpoint_project_association_with_invalid_project(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_check_endpoint_project_association_with_invalid_endpoint(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_list_endpoints_associated_with_valid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project and endpoint id test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(resource_url)
self.assertValidEndpointListResponse(r, self.endpoint,
resource_url=resource_url)
def test_list_endpoints_associated_with_invalid_project(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Invalid project id test case.
"""
self.put(self.default_request_url)
self.get('/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_list_projects_associated_with_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint-project association test case.
"""
self.put(self.default_request_url)
resource_url = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {
'endpoint_id': self.endpoint_id}
r = self.get(resource_url)
self.assertValidProjectListResponse(r, self.default_domain_project,
resource_url=resource_url)
def test_list_projects_with_no_endpoint_project_association(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Valid endpoint id but no endpoint-project associations test case.
"""
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id},
expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_list_projects_associated_with_invalid_endpoint(self):
"""GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
Invalid endpoint id test case.
"""
self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': uuid.uuid4().hex},
expected_status=404)
def test_remove_endpoint_project_association(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Valid project id and endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': self.endpoint_id},
expected_status=204)
def test_remove_endpoint_project_association_with_invalid_project(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid project id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': uuid.uuid4().hex,
'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
def test_remove_endpoint_project_association_with_invalid_endpoint(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
Invalid endpoint id test case.
"""
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
def test_endpoint_project_association_cleanup_when_project_deleted(self):
self.put(self.default_request_url)
association_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id})
r = self.get(association_url, expected_status=200)
self.assertValidProjectListResponse(r, expected_length=1)
self.delete('/projects/%(project_id)s' % {
'project_id': self.default_domain_project_id})
r = self.get(association_url, expected_status=200)
self.assertValidProjectListResponse(r, expected_length=0)
def test_endpoint_project_association_cleanup_when_endpoint_deleted(self):
self.put(self.default_request_url)
association_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(association_url, expected_status=200)
self.assertValidEndpointListResponse(r, expected_length=1)
self.delete('/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id})
r = self.get(association_url, expected_status=200)
self.assertValidEndpointListResponse(r, expected_length=0)
class EndpointFilterTokenRequestTestCase(TestExtensionCase):
def test_project_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from project scoped token filtered."""
# create a project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_default_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from default scoped token filtered."""
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_project_scoped_token_with_no_catalog_using_endpoint_filter(self):
"""Verify endpoint filter when project scoped token returns no catalog.
Test that the project scoped token response is valid for a given
endpoint-project association when no service catalog is returned.
"""
# create a project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_default_scoped_token_with_no_catalog_using_endpoint_filter(self):
"""Verify endpoint filter when default scoped token returns no catalog.
Test that the default project scoped token response is valid for a
given endpoint-project association when no service catalog is returned.
"""
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_project_scoped_token_with_no_endpoint_project_association(self):
"""Verify endpoint filter when no endpoint-project association.
Test that the project scoped token response is valid when there are
no endpoint-project associations defined.
"""
# create a project to work with
ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# grant the user a role on the project
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'user_id': self.user['id'],
'project_id': project['id'],
'role_id': self.role['id']})
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body=body)
self.assertValidUserResponse(r)
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True)
self.assertEqual(r.result['token']['project']['id'], project['id'])
def test_default_scoped_token_with_no_endpoint_project_association(self):
"""Verify endpoint filter when no endpoint-project association.
Test that the default project scoped token response is valid when
there are no endpoint-project associations defined.
"""
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens?nocatalog', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False,
endpoint_filter=True,)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_invalid_endpoint_project_association(self):
"""Verify an invalid endpoint-project association is handled."""
# add first endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
# create a second temporary endpoint
self.endpoint_id2 = uuid.uuid4().hex
self.endpoint2 = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint2['id'] = self.endpoint_id2
self.catalog_api.create_endpoint(
self.endpoint_id2,
self.endpoint2.copy())
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id2},
body='',
expected_status=204)
# remove the temporary reference
# this will create inconsistency in the endpoint filter table
# which is fixed during the catalog creation for token request
self.catalog_api.delete_endpoint(self.endpoint_id2)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
self.assertEqual(r.result['token']['project']['id'],
self.project['id'])
def test_disabled_endpoint(self):
"""Test that a disabled endpoint is handled."""
# Add an enabled endpoint to the default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': self.endpoint_id},
expected_status=204)
# Add a disabled endpoint to the default project.
# Create a disabled endpoint that's like the enabled one.
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
disabled_endpoint_ref.update({
'id': disabled_endpoint_id,
'enabled': False,
'interface': 'internal'
})
self.catalog_api.create_endpoint(disabled_endpoint_id,
disabled_endpoint_ref)
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': disabled_endpoint_id},
expected_status=204)
# Authenticate to get token with catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
endpoints = r.result['token']['catalog'][0]['endpoints']
endpoint_ids = [ep['id'] for ep in endpoints]
self.assertEqual([self.endpoint_id], endpoint_ids)
def test_multiple_endpoint_project_associations(self):
def _create_an_endpoint():
endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
r = self.post('/endpoints', body={'endpoint': endpoint_ref})
return r.result['endpoint']['id']
# create three endpoints
endpoint_id1 = _create_an_endpoint()
endpoint_id2 = _create_an_endpoint()
_create_an_endpoint()
# only associate two endpoints with project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint_id1},
expected_status=204)
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint_id2},
expected_status=204)
# there should be only two endpoints in token catalog
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
r = self.post('/auth/tokens', body=auth_data)
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=2)
class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_projects': {
'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects',
'href-vars': {
'endpoint_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'endpoint_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_groups': {
'href': '/OS-EP-FILTER/endpoint_groups',
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_group_to_project_association': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects/{project_id}',
'href-vars': {
'project_id':
'http://docs.openstack.org/api/openstack-identity/3/param/'
'project_id',
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/projects_associated_with_endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoints_in_endpoint_group': {
'href-template': '/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/endpoints',
'href-vars': {
'endpoint_group_id':
'http://docs.openstack.org/api/openstack-identity/3/'
'ext/OS-EP-FILTER/1.0/param/endpoint_group_id',
},
},
}
class EndpointGroupCRUDTestCase(TestExtensionCase):
DEFAULT_ENDPOINT_GROUP_BODY = {
'endpoint_group': {
'description': 'endpoint group description',
'filters': {
'interface': 'admin'
},
'name': 'endpoint_group_name'
}
}
DEFAULT_ENDPOINT_GROUP_URL = '/OS-EP-FILTER/endpoint_groups'
def test_create_endpoint_group(self):
"""POST /OS-EP-FILTER/endpoint_groups
Valid endpoint group test case.
"""
r = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=self.DEFAULT_ENDPOINT_GROUP_BODY)
expected_filters = (self.DEFAULT_ENDPOINT_GROUP_BODY
['endpoint_group']['filters'])
expected_name = (self.DEFAULT_ENDPOINT_GROUP_BODY
['endpoint_group']['name'])
self.assertEqual(expected_filters,
r.result['endpoint_group']['filters'])
self.assertEqual(expected_name, r.result['endpoint_group']['name'])
self.assertThat(
r.result['endpoint_group']['links']['self'],
matchers.EndsWith(
'/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': r.result['endpoint_group']['id']}))
def test_create_invalid_endpoint_group(self):
"""POST /OS-EP-FILTER/endpoint_groups
Invalid endpoint group creation test case.
"""
invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'}
self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=invalid_body,
expected_status=400)
def test_get_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
"""
# create an endpoint group to work with
response = self.post(self.DEFAULT_ENDPOINT_GROUP_URL,
body=self.DEFAULT_ENDPOINT_GROUP_BODY)
endpoint_group_id = response.result['endpoint_group']['id']
endpoint_group_filters = response.result['endpoint_group']['filters']
endpoint_group_name = response.result['endpoint_group']['name']
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.get(url)
self.assertEqual(endpoint_group_id,
response.result['endpoint_group']['id'])
self.assertEqual(endpoint_group_filters,
response.result['endpoint_group']['filters'])
self.assertEqual(endpoint_group_name,
response.result['endpoint_group']['name'])
self.assertThat(response.result['endpoint_group']['links']['self'],
matchers.EndsWith(url))
def test_get_invalid_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.get(url, expected_status=404)
def test_check_endpoint_group(self):
"""HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Valid endpoint_group_id test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.head(url, expected_status=200)
def test_check_invalid_endpoint_group(self):
"""HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
Invalid endpoint_group_id test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.head(url, expected_status=404)
def test_patch_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
"""
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'region_id': 'UK'}
body['endpoint_group']['name'] = 'patch_test'
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
r = self.patch(url, body=body)
self.assertEqual(endpoint_group_id,
r.result['endpoint_group']['id'])
self.assertEqual(body['endpoint_group']['filters'],
r.result['endpoint_group']['filters'])
self.assertThat(r.result['endpoint_group']['links']['self'],
matchers.EndsWith(url))
def test_patch_nonexistent_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group patch test case.
"""
body = {
'endpoint_group': {
'name': 'patch_test'
}
}
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': 'ABC'}
self.patch(url, body=body, expected_status=404)
def test_patch_invalid_endpoint_group(self):
"""PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group patch test case.
"""
body = {
'endpoint_group': {
'description': 'endpoint group description',
'filters': {
'region': 'UK'
},
'name': 'patch_test'
}
}
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.patch(url, body=body, expected_status=400)
# Perform a GET call to ensure that the content remains
# the same (as DEFAULT_ENDPOINT_GROUP_BODY) after attempting to update
# with an invalid filter
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
r = self.get(url)
del r.result['endpoint_group']['id']
del r.result['endpoint_group']['links']
self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result)
def test_delete_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Valid endpoint group test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url)
self.get(url, expected_status=404)
def test_delete_invalid_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}
Invalid endpoint group test case.
"""
endpoint_group_id = 'foobar'
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url, expected_status=404)
def test_add_endpoint_group_to_project(self):
"""Create a valid endpoint group and project association."""
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
def test_add_endpoint_group_to_project_with_invalid_project_id(self):
"""Create an invalid endpoint group and project association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.put(url, expected_status=404)
def test_get_endpoint_group_in_project(self):
"""Test retrieving project endpoint group association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
response = self.get(url)
self.assertEqual(
endpoint_group_id,
response.result['project_endpoint_group']['endpoint_group_id'])
self.assertEqual(
self.project_id,
response.result['project_endpoint_group']['project_id'])
def test_get_invalid_endpoint_group_in_project(self):
"""Test retrieving project endpoint group association."""
endpoint_group_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.get(url, expected_status=404)
def test_check_endpoint_group_to_project(self):
"""Test HEAD with a valid endpoint group and project association."""
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.head(url, expected_status=200)
def test_check_endpoint_group_to_project_with_invalid_project_id(self):
"""Test HEAD with an invalid endpoint group and project association."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint group to project association
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
self.put(url)
# send a head request with an invalid project id
project_id = uuid.uuid4().hex
url = self._get_project_endpoint_group_url(
endpoint_group_id, project_id)
self.head(url, expected_status=404)
def test_list_endpoint_groups(self):
"""GET /OS-EP-FILTER/endpoint_groups."""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# recover all endpoint groups
url = '/OS-EP-FILTER/endpoint_groups'
r = self.get(url)
self.assertNotEmpty(r.result['endpoint_groups'])
self.assertEqual(endpoint_group_id,
r.result['endpoint_groups'][0].get('id'))
def test_list_projects_associated_with_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects
Valid endpoint group test case.
"""
# create an endpoint group to work with
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# associate endpoint group with project
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
# recover list of projects associated with endpoint group
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/projects' %
{'endpoint_group_id': endpoint_group_id})
self.get(url)
def test_list_endpoints_associated_with_endpoint_group(self):
"""GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints
Valid endpoint group test case.
"""
# create a service
service_ref = self.new_service_ref()
response = self.post(
'/services',
body={'service': service_ref})
service_id = response.result['service']['id']
# create an endpoint
endpoint_ref = self.new_endpoint_ref(service_id=service_id)
response = self.post(
'/endpoints',
body={'endpoint': endpoint_ref})
endpoint_id = response.result['endpoint']['id']
# create an endpoint group
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id}
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, body)
# create association
self._create_endpoint_group_project_association(endpoint_group_id,
self.project_id)
# recover list of endpoints associated with endpoint group
url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/endpoints' % {'endpoint_group_id': endpoint_group_id})
r = self.get(url)
self.assertNotEmpty(r.result['endpoints'])
self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id'))
def test_list_endpoints_associated_with_project_endpoint_group(self):
"""GET /OS-EP-FILTER/projects/{project_id}/endpoints
Valid project, endpoint id, and endpoint group test case.
"""
# create a temporary service
service_ref = self.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id2 = response.result['service']['id']
# create additional endpoints
self._create_endpoint_and_associations(
self.default_domain_project_id, service_id2)
self._create_endpoint_and_associations(
self.default_domain_project_id)
# create project and endpoint association with default endpoint:
self.put(self.default_request_url)
# create an endpoint group that contains a different endpoint
body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY)
body['endpoint_group']['filters'] = {'service_id': service_id2}
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, body)
# associate endpoint group with project
self._create_endpoint_group_project_association(
endpoint_group_id, self.default_domain_project_id)
# Now get a list of the filtered endpoints
endpoints_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(len(endpoints), 2)
# Now remove project endpoint group association
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.delete(url)
# Now remove endpoint group
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
self.delete(url)
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(len(endpoints), 1)
def test_endpoint_group_project_cleanup_with_project(self):
# create endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
project_ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
project['id'])
self.put(url)
# check that we can recover the project endpoint group association
self.get(url)
# Now delete the project and then try and retrieve the project
# endpoint group association again
self.delete('/projects/%(project_id)s' % {
'project_id': project['id']})
self.get(url, expected_status=404)
def test_endpoint_group_project_cleanup_with_endpoint_group(self):
# create endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
project_ref = self.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
project['id'])
self.put(url)
# check that we can recover the project endpoint group association
self.get(url)
# now remove the project endpoint group association
self.delete(url)
self.get(url, expected_status=404)
def test_removing_an_endpoint_group_project(self):
# create an endpoint group
endpoint_group_id = self._create_valid_endpoint_group(
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create an endpoint_group project
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.default_domain_project_id)
self.put(url)
# remove the endpoint group project
self.delete(url)
self.get(url, expected_status=404)
def _create_valid_endpoint_group(self, url, body):
r = self.post(url, body=body)
return r.result['endpoint_group']['id']
def _create_endpoint_group_project_association(self,
endpoint_group_id,
project_id):
url = self._get_project_endpoint_group_url(endpoint_group_id,
project_id)
self.put(url)
def _get_project_endpoint_group_url(self,
endpoint_group_id,
project_id):
return ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
'/projects/%(project_id)s' %
{'endpoint_group_id': endpoint_group_id,
'project_id': project_id})
def _create_endpoint_and_associations(self, project_id, service_id=None):
"""Creates an endpoint associated with service and project."""
if not service_id:
# create a new service
service_ref = self.new_service_ref()
response = self.post(
'/services', body={'service': service_ref})
service_id = response.result['service']['id']
# create endpoint
endpoint_ref = self.new_endpoint_ref(service_id=service_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint = response.result['endpoint']
# now add endpoint to project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
'endpoint_id': endpoint['id']})
return endpoint
|
|
#!/usr/bin/env python
"""Json related utilities."""
import copy
import datetime
import logging
from mapreduce.lib import simplejson
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.ext import db
# pylint: disable=invalid-name
class JsonEncoder(simplejson.JSONEncoder):
"""MR customized json encoder."""
TYPE_ID = "__mr_json_type"
def default(self, o):
"""Inherit docs."""
if type(o) in _TYPE_TO_ENCODER:
encoder = _TYPE_TO_ENCODER[type(o)]
json_struct = encoder(o)
json_struct[self.TYPE_ID] = type(o).__name__
return json_struct
return super(JsonEncoder, self).default(o)
class JsonDecoder(simplejson.JSONDecoder):
"""MR customized json decoder."""
def __init__(self, **kwargs):
if "object_hook" not in kwargs:
kwargs["object_hook"] = self._dict_to_obj
super(JsonDecoder, self).__init__(**kwargs)
def _dict_to_obj(self, d):
"""Converts a dictionary of json object to a Python object."""
if JsonEncoder.TYPE_ID not in d:
return d
type_name = d.pop(JsonEncoder.TYPE_ID)
if type_name in _TYPE_NAME_TO_DECODER:
decoder = _TYPE_NAME_TO_DECODER[type_name]
return decoder(d)
else:
raise TypeError("Invalid type %s.", type_name)
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def _json_encode_datetime(o):
"""Json encode a datetime object.
Args:
o: a datetime object.
Returns:
A dict of json primitives.
"""
return {"isostr": o.strftime(_DATETIME_FORMAT)}
def _json_decode_datetime(d):
"""Converts a dict of json primitives to a datetime object."""
return datetime.datetime.strptime(d["isostr"], _DATETIME_FORMAT)
def _register_json_primitive(object_type, encoder, decoder):
"""Extend what MR can json serialize.
Args:
object_type: type of the object.
encoder: a function that takes in an object and returns a dict of
json primitives.
decoder: inverse function of encoder.
"""
global _TYPE_TO_ENCODER
global _TYPE_NAME_TO_DECODER
if object_type not in _TYPE_TO_ENCODER:
_TYPE_TO_ENCODER[object_type] = encoder
_TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
_TYPE_TO_ENCODER = {}
_TYPE_NAME_TO_DECODER = {}
_register_json_primitive(datetime.datetime,
_json_encode_datetime,
_json_decode_datetime)
class JsonMixin(object):
"""Simple, stateless json utilities mixin.
Requires class to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def to_json_str(self):
"""Convert data to json string representation.
Returns:
json representation as string.
"""
json = self.to_json()
try:
return simplejson.dumps(json, sort_keys=True, cls=JsonEncoder)
except:
logging.exception("Could not serialize JSON: %r", json)
raise
@classmethod
def from_json_str(cls, json_str):
"""Convert json string representation into class instance.
Args:
json_str: json representation as string.
Returns:
New instance of the class with data loaded from json string.
"""
return cls.from_json(simplejson.loads(json_str, cls=JsonDecoder))
class JsonProperty(db.UnindexedProperty):
"""Property type for storing json representation of data.
Requires data types to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def __init__(self, data_type, default=None, **kwargs):
"""Constructor.
Args:
data_type: underlying data type as class.
default: default value for the property. The value is deep copied
fore each model instance.
**kwargs: remaining arguments.
"""
kwargs["default"] = default
super(JsonProperty, self).__init__(**kwargs)
self.data_type = data_type
def get_value_for_datastore(self, model_instance):
"""Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value.
"""
value = super(JsonProperty, self).get_value_for_datastore(model_instance)
if not value:
return None
json_value = value
if not isinstance(value, dict):
json_value = value.to_json()
if not json_value:
return None
return datastore_types.Text(simplejson.dumps(
json_value, sort_keys=True, cls=JsonEncoder))
def make_value_from_datastore(self, value):
"""Convert value from datastore representation.
Args:
value: datastore value.
Returns:
value to store in the model.
"""
if value is None:
return None
json = simplejson.loads(value, cls=JsonDecoder)
if self.data_type == dict:
return json
return self.data_type.from_json(json)
def validate(self, value):
"""Validate value.
Args:
value: model value.
Returns:
Whether the specified value is valid data type value.
Raises:
BadValueError: when value is not of self.data_type type.
"""
if value is not None and not isinstance(value, self.data_type):
raise datastore_errors.BadValueError(
"Property %s must be convertible to a %s instance (%s)" %
(self.name, self.data_type, value))
return super(JsonProperty, self).validate(value)
def empty(self, value):
"""Checks if value is empty.
Args:
value: model value.
Returns:
True passed value is empty.
"""
return not value
def default_value(self):
"""Create default model value.
If default option was specified, then it will be deeply copied.
None otherwise.
Returns:
default model value.
"""
if self.default:
return copy.deepcopy(self.default)
else:
return None
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""ShakesCorpus is an iterable over the lines of The Complete Workds of William Shakespeare
This module provides the ShakesCorpus class and functions for segmenting
Gutenberg Project books with formatting similar to the text file used for this Corpus.
source text: 'gensim/test/test_data/shakespeare-complete-works.txt.gz'
source meta-data: 'gensim/test/test_data/shakespeare-complete-works-meta.json'
"""
from __future__ import with_statement
import gzip
import re
import json
import logging
# from six import string_types
from gensim import utils
from gensim.corpora.textcorpus import TextCorpus
from gensim.corpora.dictionary import Dictionary
logger = logging.getLogger('gensim.corpora.shakescorpus')
# FIXME:
# module_path = os.path.dirname(__file__)
PATH_SHAKESPEARE = utils.datapath('shakespeare-complete-works.txt.gz')
DICT_ROMAN2INT = {'I': 1, 'II': 2, 'III': 3, 'IV': 4, 'V': 5,
'VI': 6, 'VII': 7, 'VIII': 8, 'IX': 9, 'X': 10}
for num_X in range(1, 5):
for s, num in DICT_ROMAN2INT.items():
DICT_ROMAN2INT['X' * num_X + s] = 10 + num
PATH_SHAKES = utils.datapath('shakespeare-complete-works.txt.gz')
META_SHAKES = json.load(open(utils.datapath('shakespeare-complete-works-meta.json'), 'rU'))
RE_TITLE = re.compile(r'(([-;,\'A-Z]+[ ]?){3,8})')
RE_TITLE_LINE = re.compile(r'^' + RE_TITLE.pattern + r'$')
RE_GUTEN_LINE = re.compile(r'^\*\*\*\ START\ OF\ THIS\ PROJECT\ GUTENBERG\ EBOOK\ ' +
RE_TITLE.pattern + r'\ \*\*\*$')
RE_ACT_SCENE_LINE = re.compile(r'^((ACT\ [IV]+)[.]?\ (SCENE\ [0-9]{1,2})[.]?)$')
RE_YEAR_LINE = re.compile(r'^1[56][0-9]{2}$')
RE_THE_END = re.compile(r'^THE[ ]END$')
RE_BY_LINE = re.compile(r'^((by\ )?(William\ Shakespeare))$', re.IGNORECASE)
def generate_lines(input_file,
start=0,
stop=float('inf')):
"""Generate (yield) lines in a gzipped file (*.txt.gz) one line at a time"""
with gzip.GzipFile(input_file, 'rU') as f:
for i, line in enumerate(f):
if i < start:
continue
if i >= stop:
break
yield line.rstrip()
def segment_shakespeare_works(input_file=PATH_SHAKESPEARE, verbose=False):
"""Find start and end of each volume within _Complete Works of William Shakespeare_
"""
works = [{}]
meta = {}
j = 0
for i, line in enumerate(generate_lines(input_file=input_file)):
if 'title' not in meta:
match = RE_GUTEN_LINE.match(line)
if match:
meta['title'] = match.groups()[0]
meta['body_start'] = i
continue
if j >= len(works):
works += [{}]
if not len(works[j]):
match = RE_YEAR_LINE.match(line)
if match:
if verbose:
print(" year {:02d}, {}: {}".format(j, i, match.group()))
works[j]['year'] = int(match.group())
works[j]['start'] = i
elif len(works[j]) == 2:
match = RE_TITLE_LINE.match(line)
if match:
if verbose:
print("title {:02d}, {}: {}".format(j, i, match.groups()[0]))
works[j]['title'] = match.groups()[0]
works[j]['title_lineno'] = i
elif len(works[j]) == 4:
match = RE_BY_LINE.match(line)
if match:
if verbose:
print(" by {:02d}, {}: {}".format(j, i, match.group()))
works[j]['by'] = match.groups()[2]
works[j]['by_lineno'] = i
elif len(works[j]) > 4:
match = RE_ACT_SCENE_LINE.match(line)
if match:
section_meta = {
'start': i,
'title': match.groups()[0],
'act_roman': match.groups()[1].split()[-1],
'act': int(DICT_ROMAN2INT[match.groups()[1].split()[-1]]),
'scene': int(match.groups()[2].split()[-1]),
}
works[j]['sections'] = works[j].get('sections', []) + [section_meta]
else:
match = RE_THE_END.match(line)
if match and 'GUTENBERG' not in match.group().upper():
if verbose:
print(" stop {:02d}, {}: {}".format(j, i, match.group()))
works[j]['stop'] = i
j += 1
if not len(works[-1]):
works = works[:-1]
meta['volumes'] = works
return meta
class ShakesCorpus(TextCorpus):
"""Iterable, memory-efficient sequence of BOWs (bag of words vectors) for each line in Shakespeare's words"""
def __init__(self, input_file=PATH_SHAKES, lemmatize=False, lowercase=False, dictionary=None, filter_namespaces=('0',), metadata=False):
"""Initialize a Corpus of the lines in Shakespeare's Collected Works
Unless a dictionary is provided, this scans the corpus once, to determine its vocabulary.
This Corpus should not be used with any other input_file than that provided in gensim/test/test_data.
>>> shakes = ShakesCorpus()
>>> for i, tokens in enumerate(shakes.get_texts()):
... print(i, tokens)
... if i >= 4:
... break
(0, [])
(1, [])
(2, [u'THE', u'SONNETS'])
(3, [])
(4, [u'by', u'William', u'Shakespeare'])
>>> for i, vec in enumerate(shakes):
... print(i, vec)
... if i >= 4:
... break
(0, [])
(1, [])
(2, [(0, 1), (1, 1)])
(3, [])
(4, [(2, 1), (3, 1), (4, 1)])
"""
if input_file is None:
raise(ValueError('ShakesCorpus requires an input document which it preprocesses to compute ' +
'the Dictionary and `book_meta` information (title, sections, etc).'))
super(ShakesCorpus, self).__init__(input=None, metadata=metadata)
self.lowercase = lowercase
self.lemmatize = lemmatize
if input_file is None:
self.book_meta = dict(META_SHAKES)
else:
logger.warn('This ShakesCorpus is only intended for use with the gzipped text file from the ' +
'Gutenberg project which comes with gensim.')
self.book_meta = segment_shakespeare_works(input_file)
self.input_file_path = input_file or PATH_SHAKES
self.dictionary = Dictionary(self.get_texts(metadata=False))
def get_texts(self, metadata=None):
"""Iterate over the lines of "The Complete Works of William Shakespeare".
This yields lists of strings (**texts**) rather than vectors (vectorized bags-of-words).
And the **texts** yielded are lines rather than entire plays or sonnets.
If you want vectors, use the corpus interface instead of this method.
>>> shakes = ShakesCorpus(lowercase=True)
>>> for i, tokens in enumerate(shakes.get_texts()):
... print(i, tokens)
... if i >= 4:
... break
(0, [])
(1, [])
(2, [u'the', u'sonnets'])
(3, [])
(4, [u'by', u'william', u'shakespeare'])
"""
if metadata is None:
metadata = self.metadata
self.input_file = gzip.GzipFile(self.input_file_path)
volume_num = 0
with self.input_file as lines:
for lineno, line in enumerate(lines):
if volume_num >= len(self.book_meta['volumes']):
raise StopIteration()
if lineno < self.book_meta['volumes'][volume_num]['start']:
continue
if lineno < self.book_meta['volumes'][volume_num]['stop']:
# act_num, scene_num = 0, 0 # FIXME: use self.book_meta['volumes'][volume_num]['sections']
if metadata:
# FIXME: use self.lemmatize
toks = self.tokenize(line, lowercase=self.lowercase)
yield (toks, (lineno,))
else:
toks = self.tokenize(line, lowercase=self.lowercase)
yield toks
else:
volume_num += 1 # don't yield the "THE END" line?
def tokenize(self, line, **kwargs):
return list(utils.tokenize(line, **kwargs))
def __len__(self):
if not hasattr(self, 'length'):
# cache the corpus length
self.length = sum(1 for _ in self.get_texts())
return self.length
# endclass TextCorpus
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import operator
import time
import unittest2
from gcloud import _helpers
from gcloud._helpers import _datetime_from_microseconds
from gcloud._helpers import _microseconds_from_datetime
from gcloud._helpers import UTC
from gcloud.bigtable.client import Client
from gcloud.bigtable.column_family import MaxVersionsGCRule
from gcloud.bigtable.row_filters import ApplyLabelFilter
from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter
from gcloud.bigtable.row_filters import RowFilterChain
from gcloud.bigtable.row_filters import RowFilterUnion
from gcloud.bigtable.row_data import Cell
from gcloud.bigtable.row_data import PartialRowData
from gcloud.environment_vars import TESTS_PROJECT
from system_test_utils import unique_resource_id
LOCATION_ID = 'us-central1-c'
INSTANCE_ID = 'gcloud' + unique_resource_id('-')
TABLE_ID = 'gcloud-python-test-table'
COLUMN_FAMILY_ID1 = u'col-fam-id1'
COLUMN_FAMILY_ID2 = u'col-fam-id2'
COL_NAME1 = b'col-name1'
COL_NAME2 = b'col-name2'
COL_NAME3 = b'col-name3-but-other-fam'
CELL_VAL1 = b'cell-val'
CELL_VAL2 = b'cell-val-newer'
CELL_VAL3 = b'altcol-cell-val'
CELL_VAL4 = b'foo'
ROW_KEY = b'row-key'
ROW_KEY_ALT = b'row-key-alt'
EXISTING_INSTANCES = []
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE = None
def _operation_wait(operation, max_attempts=5):
"""Wait until an operation has completed.
:type operation: :class:`gcloud.bigtable.instance.Operation`
:param operation: Operation that has not finished.
:type max_attempts: int
:param max_attempts: (Optional) The maximum number of times to check if
the operation has finished. Defaults to 5.
:rtype: bool
:returns: Boolean indicating if the operation finished.
"""
total_sleep = 0
while not operation.finished():
if total_sleep > max_attempts:
return False
time.sleep(1)
total_sleep += 1
return True
def _retry_backoff(meth, *args, **kw):
from grpc.beta.interfaces import StatusCode
from grpc.framework.interfaces.face.face import AbortionError
backoff_intervals = [1, 2, 4, 8]
while True:
try:
return meth(*args, **kw)
except AbortionError as error:
if error.code != StatusCode.UNAVAILABLE:
raise
if backoff_intervals:
time.sleep(backoff_intervals.pop(0))
else:
raise
def setUpModule():
_helpers.PROJECT = TESTS_PROJECT
Config.CLIENT = Client(admin=True)
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID)
Config.CLIENT.start()
instances, failed_locations = _retry_backoff(
Config.CLIENT.list_instances)
if len(failed_locations) != 0:
raise ValueError('List instances failed in module set up.')
EXISTING_INSTANCES[:] = instances
# After listing, create the test instance.
created_op = Config.INSTANCE.create()
if not _operation_wait(created_op):
raise RuntimeError('Instance creation exceed 5 seconds.')
def tearDownModule():
Config.INSTANCE.delete()
Config.CLIENT.stop()
class TestInstanceAdminAPI(unittest2.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
def test_list_instances(self):
instances, failed_locations = Config.CLIENT.list_instances()
self.assertEqual(failed_locations, [])
# We have added one new instance in `setUpModule`.
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (instance in EXISTING_INSTANCES or
instance == Config.INSTANCE)
self.assertTrue(instance_existence)
def test_reload(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
instance.reload()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
def test_create_instance(self):
ALT_INSTANCE_ID = 'new' + unique_resource_id('-')
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
self.assertTrue(_operation_wait(operation))
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
def test_update(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = 'Foo Bar Baz'
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
Config.INSTANCE.update()
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class TestTableAdminAPI(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls._table = Config.INSTANCE.table(TABLE_ID)
cls._table.create()
@classmethod
def tearDownClass(cls):
cls._table.delete()
def setUp(self):
self.tables_to_delete = []
def tearDown(self):
for table in self.tables_to_delete:
table.delete()
def test_list_tables(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the table
# created in `setUpClass` here will be the only one.
tables = Config.INSTANCE.list_tables()
self.assertEqual(tables, [self._table])
def test_create_table(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
# First, create a sorted version of our expected result.
name_attr = operator.attrgetter('name')
expected_tables = sorted([temp_table, self._table], key=name_attr)
# Then query for the tables in the instance and sort them by
# name as well.
tables = Config.INSTANCE.list_tables()
sorted_tables = sorted(tables, key=name_attr)
self.assertEqual(sorted_tables, expected_tables)
def test_create_column_family(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
self.assertEqual(temp_table.list_column_families(), {})
gc_rule = MaxVersionsGCRule(1)
column_family = temp_table.column_family(COLUMN_FAMILY_ID1,
gc_rule=gc_rule)
column_family.create()
col_fams = temp_table.list_column_families()
self.assertEqual(len(col_fams), 1)
retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1]
self.assertTrue(retrieved_col_fam._table is column_family._table)
self.assertEqual(retrieved_col_fam.column_family_id,
column_family.column_family_id)
self.assertEqual(retrieved_col_fam.gc_rule, gc_rule)
def test_update_column_family(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
gc_rule = MaxVersionsGCRule(1)
column_family = temp_table.column_family(COLUMN_FAMILY_ID1,
gc_rule=gc_rule)
column_family.create()
# Check that our created table is as expected.
col_fams = temp_table.list_column_families()
self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family})
# Update the column family's GC rule and then try to update.
column_family.gc_rule = None
column_family.update()
# Check that the update has propagated.
col_fams = temp_table.list_column_families()
self.assertEqual(col_fams[COLUMN_FAMILY_ID1].gc_rule, None)
def test_delete_column_family(self):
temp_table_id = 'foo-bar-baz-table'
temp_table = Config.INSTANCE.table(temp_table_id)
temp_table.create()
self.tables_to_delete.append(temp_table)
self.assertEqual(temp_table.list_column_families(), {})
column_family = temp_table.column_family(COLUMN_FAMILY_ID1)
column_family.create()
# Make sure the family is there before deleting it.
col_fams = temp_table.list_column_families()
self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1])
column_family.delete()
# Make sure we have successfully deleted it.
self.assertEqual(temp_table.list_column_families(), {})
class TestDataAPI(unittest2.TestCase):
@classmethod
def setUpClass(cls):
cls._table = table = Config.INSTANCE.table(TABLE_ID)
table.create()
table.column_family(COLUMN_FAMILY_ID1).create()
table.column_family(COLUMN_FAMILY_ID2).create()
@classmethod
def tearDownClass(cls):
# Will also delete any data contained in the table.
cls._table.delete()
def setUp(self):
self.rows_to_delete = []
def tearDown(self):
for row in self.rows_to_delete:
row.clear()
row.delete()
row.commit()
def _write_to_row(self, row1=None, row2=None, row3=None, row4=None):
timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC)
timestamp1_micros = _microseconds_from_datetime(timestamp1)
# Truncate to millisecond granularity.
timestamp1_micros -= (timestamp1_micros % 1000)
timestamp1 = _datetime_from_microseconds(timestamp1_micros)
# 1000 microseconds is a millisecond
timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000)
timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000)
timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000)
if row1 is not None:
row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1,
timestamp=timestamp1)
if row2 is not None:
row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2,
timestamp=timestamp2)
if row3 is not None:
row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3,
timestamp=timestamp3)
if row4 is not None:
row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4,
timestamp=timestamp4)
# Create the cells we will check.
cell1 = Cell(CELL_VAL1, timestamp1)
cell2 = Cell(CELL_VAL2, timestamp2)
cell3 = Cell(CELL_VAL3, timestamp3)
cell4 = Cell(CELL_VAL4, timestamp4)
return cell1, cell2, cell3, cell4
def test_read_row(self):
row = self._table.row(ROW_KEY)
self.rows_to_delete.append(row)
cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row)
row.commit()
# Read back the contents of the row.
partial_row_data = self._table.read_row(ROW_KEY)
self.assertEqual(partial_row_data.row_key, ROW_KEY)
# Check the cells match.
ts_attr = operator.attrgetter('timestamp')
expected_row_contents = {
COLUMN_FAMILY_ID1: {
COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True),
COL_NAME2: [cell3],
},
COLUMN_FAMILY_ID2: {
COL_NAME3: [cell4],
},
}
self.assertEqual(partial_row_data.cells, expected_row_contents)
def test_read_rows(self):
row = self._table.row(ROW_KEY)
row_alt = self._table.row(ROW_KEY_ALT)
self.rows_to_delete.extend([row, row_alt])
cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt,
row, row_alt)
row.commit()
row_alt.commit()
rows_data = self._table.read_rows()
self.assertEqual(rows_data.rows, {})
rows_data.consume_all()
# NOTE: We should refrain from editing protected data on instances.
# Instead we should make the values public or provide factories
# for constructing objects with them.
row_data = PartialRowData(ROW_KEY)
row_data._chunks_encountered = True
row_data._committed = True
row_data._cells = {
COLUMN_FAMILY_ID1: {
COL_NAME1: [cell1],
COL_NAME2: [cell3],
},
}
row_alt_data = PartialRowData(ROW_KEY_ALT)
row_alt_data._chunks_encountered = True
row_alt_data._committed = True
row_alt_data._cells = {
COLUMN_FAMILY_ID1: {
COL_NAME1: [cell2],
},
COLUMN_FAMILY_ID2: {
COL_NAME3: [cell4],
},
}
expected_rows = {
ROW_KEY: row_data,
ROW_KEY_ALT: row_alt_data,
}
self.assertEqual(rows_data.rows, expected_rows)
def test_read_with_label_applied(self):
row = self._table.row(ROW_KEY)
self.rows_to_delete.append(row)
cell1, _, cell3, _ = self._write_to_row(row, None, row)
row.commit()
# Combine a label with column 1.
label1 = u'label-red'
label1_filter = ApplyLabelFilter(label1)
col1_filter = ColumnQualifierRegexFilter(COL_NAME1)
chain1 = RowFilterChain(filters=[col1_filter, label1_filter])
# Combine a label with column 2.
label2 = u'label-blue'
label2_filter = ApplyLabelFilter(label2)
col2_filter = ColumnQualifierRegexFilter(COL_NAME2)
chain2 = RowFilterChain(filters=[col2_filter, label2_filter])
# Bring our two labeled columns together.
row_filter = RowFilterUnion(filters=[chain1, chain2])
partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter)
self.assertEqual(partial_row_data.row_key, ROW_KEY)
cells_returned = partial_row_data.cells
col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1)
# Make sure COLUMN_FAMILY_ID1 was the only key.
self.assertEqual(len(cells_returned), 0)
cell1_new, = col_fam1.pop(COL_NAME1)
cell3_new, = col_fam1.pop(COL_NAME2)
# Make sure COL_NAME1 and COL_NAME2 were the only keys.
self.assertEqual(len(col_fam1), 0)
# Check that cell1 has matching values and gained a label.
self.assertEqual(cell1_new.value, cell1.value)
self.assertEqual(cell1_new.timestamp, cell1.timestamp)
self.assertEqual(cell1.labels, [])
self.assertEqual(cell1_new.labels, [label1])
# Check that cell3 has matching values and gained a label.
self.assertEqual(cell3_new.value, cell3.value)
self.assertEqual(cell3_new.timestamp, cell3.timestamp)
self.assertEqual(cell3.labels, [])
self.assertEqual(cell3_new.labels, [label2])
|
|
# Code originally from Demo code distributed with Python 2.6
# Sun RPC version 2 -- RFC1057.
# XXX There should be separate exceptions for the various reasons why
# XXX an RPC can fail, rather than using RuntimeError for everything
# XXX Need to use class based exceptions rather than string exceptions
# XXX The UDP version of the protocol resends requests when it does
# XXX not receive a timely reply -- use only for idempotent calls!
# XXX There is no provision for call timeout on TCP connections
import xdrlib
import socket
import os
RPCVERSION = 2
CALL = 0
REPLY = 1
AUTH_NULL = 0
AUTH_UNIX = 1
AUTH_SHORT = 2
AUTH_DES = 3
MSG_ACCEPTED = 0
MSG_DENIED = 1
SUCCESS = 0 # RPC executed successfully
PROG_UNAVAIL = 1 # remote hasn't exported program
PROG_MISMATCH = 2 # remote can't support version #
PROC_UNAVAIL = 3 # program can't support procedure
GARBAGE_ARGS = 4 # procedure can't decode params
RPC_MISMATCH = 0 # RPC version number != 2
AUTH_ERROR = 1 # remote can't authenticate caller
AUTH_BADCRED = 1 # bad credentials (seal broken)
AUTH_REJECTEDCRED = 2 # client must begin new session
AUTH_BADVERF = 3 # bad verifier (seal broken)
AUTH_REJECTEDVERF = 4 # verifier expired or replayed
AUTH_TOOWEAK = 5 # rejected for security reasons
class Packer(xdrlib.Packer):
def pack_auth(self, auth):
flavor, stuff = auth
self.pack_enum(flavor)
self.pack_opaque(stuff)
def pack_auth_unix(self, stamp, machinename, uid, gid, gids):
self.pack_uint(stamp)
self.pack_string(machinename)
self.pack_uint(uid)
self.pack_uint(gid)
self.pack_uint(len(gids))
for i in gids:
self.pack_uint(i)
def pack_callheader(self, xid, prog, vers, proc, cred, verf):
self.pack_uint(xid)
self.pack_enum(CALL)
self.pack_uint(RPCVERSION)
self.pack_uint(prog)
self.pack_uint(vers)
self.pack_uint(proc)
self.pack_auth(cred)
self.pack_auth(verf)
# Caller must add procedure-specific part of call
def pack_replyheader(self, xid, verf):
self.pack_uint(xid)
self.pack_enum(REPLY)
self.pack_uint(MSG_ACCEPTED)
self.pack_auth(verf)
self.pack_enum(SUCCESS)
# Caller must add procedure-specific part of reply
# Exceptions
class BadRPCFormat(Exception): pass
class BadRPCVersion(Exception): pass
class GarbageArgs(Exception): pass
class Unpacker(xdrlib.Unpacker):
def unpack_auth(self):
flavor = self.unpack_enum()
stuff = self.unpack_opaque()
return (flavor, stuff)
def unpack_callheader(self):
xid = self.unpack_uint()
temp = self.unpack_enum()
if temp != CALL:
raise BadRPCFormat, 'no CALL but %r' % (temp,)
temp = self.unpack_uint()
if temp != RPCVERSION:
raise BadRPCVersion, 'bad RPC version %r' % (temp,)
prog = self.unpack_uint()
vers = self.unpack_uint()
proc = self.unpack_uint()
cred = self.unpack_auth()
verf = self.unpack_auth()
return xid, prog, vers, proc, cred, verf
# Caller must add procedure-specific part of call
def unpack_replyheader(self):
xid = self.unpack_uint()
mtype = self.unpack_enum()
if mtype != REPLY:
raise RuntimeError, 'no REPLY but %r' % (mtype,)
stat = self.unpack_enum()
if stat == MSG_DENIED:
stat = self.unpack_enum()
if stat == RPC_MISMATCH:
low = self.unpack_uint()
high = self.unpack_uint()
raise RuntimeError, \
'MSG_DENIED: RPC_MISMATCH: %r' % ((low, high),)
if stat == AUTH_ERROR:
stat = self.unpack_uint()
raise RuntimeError, \
'MSG_DENIED: AUTH_ERROR: %r' % (stat,)
raise RuntimeError, 'MSG_DENIED: %r' % (stat,)
if stat != MSG_ACCEPTED:
raise RuntimeError, \
'Neither MSG_DENIED nor MSG_ACCEPTED: %r' % (stat,)
verf = self.unpack_auth()
stat = self.unpack_enum()
if stat == PROG_UNAVAIL:
raise RuntimeError, 'call failed: PROG_UNAVAIL'
if stat == PROG_MISMATCH:
low = self.unpack_uint()
high = self.unpack_uint()
raise RuntimeError, \
'call failed: PROG_MISMATCH: %r' % ((low, high),)
if stat == PROC_UNAVAIL:
raise RuntimeError, 'call failed: PROC_UNAVAIL'
if stat == GARBAGE_ARGS:
raise RuntimeError, 'call failed: GARBAGE_ARGS'
if stat != SUCCESS:
raise RuntimeError, 'call failed: %r' % (stat,)
return xid, verf
# Caller must get procedure-specific part of reply
# Subroutines to create opaque authentication objects
def make_auth_null():
return ''
def make_auth_unix(seed, host, uid, gid, groups):
p = Packer()
p.pack_auth_unix(seed, host, uid, gid, groups)
return p.get_buf()
def make_auth_unix_default():
try:
from os import getuid, getgid
uid = getuid()
gid = getgid()
except ImportError:
uid = gid = 0
import time
return make_auth_unix(int(time.time()-unix_epoch()), \
socket.gethostname(), uid, gid, [])
_unix_epoch = -1
def unix_epoch():
"""Very painful calculation of when the Unix Epoch is.
This is defined as the return value of time.time() on Jan 1st,
1970, 00:00:00 GMT.
On a Unix system, this should always return 0.0. On a Mac, the
calculations are needed -- and hard because of integer overflow
and other limitations.
"""
return 0.0
global _unix_epoch
if _unix_epoch >= 0: return _unix_epoch
import time
now = time.time()
localt = time.localtime(now) # (y, m, d, hh, mm, ss, ..., ..., ...)
gmt = time.gmtime(now)
offset = time.mktime(localt) - time.mktime(gmt)
y, m, d, hh, mm, ss = 1970, 1, 1, 0, 0, 0
offset, ss = divmod(ss + offset, 60)
offset, mm = divmod(mm + offset, 60)
offset, hh = divmod(hh + offset, 24)
d = d + offset
_unix_epoch = time.mktime((y, m, d, hh, mm, ss, 0, 0, 0))
print "Unix epoch:", time.ctime(_unix_epoch)
return _unix_epoch
# Common base class for clients
class Client:
def __init__(self, host, prog, vers, port):
self.host = host
self.prog = prog
self.vers = vers
self.port = port
self.makesocket() # Assigns to self.sock
self.bindsocket()
self.connsocket()
self.lastxid = 0 # XXX should be more random?
self.addpackers()
self.cred = None
self.verf = None
def close(self):
self.sock.close()
def makesocket(self):
# This MUST be overridden
raise RuntimeError, 'makesocket not defined'
def connsocket(self):
# Override this if you don't want/need a connection
self.sock.connect((self.host, self.port))
def bindsocket(self):
# Override this to bind to a different port (e.g. reserved)
self.sock.bind(('', 0))
def addpackers(self):
# Override this to use derived classes from Packer/Unpacker
self.packer = Packer()
self.unpacker = Unpacker('')
def make_call(self, proc, args, pack_func, unpack_func):
# Don't normally override this (but see Broadcast)
if pack_func is None and args is not None:
raise TypeError, 'non-null args with null pack_func'
self.start_call(proc)
if pack_func:
pack_func(args)
self.do_call()
if unpack_func:
result = unpack_func()
else:
result = None
self.unpacker.done()
return result
def start_call(self, proc):
# Don't override this
self.lastxid = xid = self.lastxid + 1
cred = self.mkcred()
verf = self.mkverf()
p = self.packer
p.reset()
p.pack_callheader(xid, self.prog, self.vers, proc, cred, verf)
def do_call(self):
# This MUST be overridden
raise RuntimeError, 'do_call not defined'
def mkcred(self):
# Override this to use more powerful credentials
if self.cred is None:
self.cred = (AUTH_NULL, make_auth_null())
return self.cred
def mkverf(self):
# Override this to use a more powerful verifier
if self.verf is None:
self.verf = (AUTH_NULL, make_auth_null())
return self.verf
def call_0(self): # Procedure 0 is always like this
return self.make_call(0, None, None, None)
# Record-Marking standard support
def sendfrag(sock, last, frag):
x = len(frag)
if last: x = x | 0x80000000L
header = (chr(int(x>>24 & 0xff)) + chr(int(x>>16 & 0xff)) + \
chr(int(x>>8 & 0xff)) + chr(int(x & 0xff)))
sock.send(header + frag)
def sendrecord(sock, record):
sendfrag(sock, 1, record)
def recvfrag(sock):
header = sock.recv(4)
if len(header) < 4:
raise EOFError
x = long(ord(header[0]))<<24 | ord(header[1])<<16 | \
ord(header[2])<<8 | ord(header[3])
last = ((x & 0x80000000) != 0)
n = int(x & 0x7fffffff)
frag = ''
while n > 0:
buf = sock.recv(n)
if not buf: raise EOFError
n = n - len(buf)
frag = frag + buf
return last, frag
def recvrecord(sock):
record = ''
last = 0
while not last:
last, frag = recvfrag(sock)
record = record + frag
return record
# Try to bind to a reserved port (must be root)
last_resv_port_tried = None
def bindresvport(sock, host):
global last_resv_port_tried
FIRST, LAST = 600, 1024 # Range of ports to try
if last_resv_port_tried is None:
import os
last_resv_port_tried = FIRST + os.getpid() % (LAST-FIRST)
for i in range(last_resv_port_tried, LAST) + \
range(FIRST, last_resv_port_tried):
last_resv_port_tried = i
try:
sock.bind((host, i))
return last_resv_port_tried
except socket.error as e:
if e.errno != 114 and e.errno != 98:
raise e
raise RuntimeError, 'can\'t assign reserved port'
# Client using TCP to a specific port
class RawTCPClient(Client):
def makesocket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def do_call(self):
call = self.packer.get_buf()
sendrecord(self.sock, call)
reply = recvrecord(self.sock)
u = self.unpacker
u.reset(reply)
xid, verf = u.unpack_replyheader()
if xid != self.lastxid:
# Can't really happen since this is TCP...
raise RuntimeError, 'wrong xid in reply %r instead of %r' % (
xid, self.lastxid)
# Client using UDP to a specific port
class RawUDPClient(Client):
def __init__(self, *args, **kwargs):
Client.__init__(self, *args, **kwargs)
self.BUFSIZE = 8192
def makesocket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def do_call(self):
call = self.packer.get_buf()
self.sock.send(call)
try:
from select import select
except ImportError:
print 'WARNING: select not found, RPC may hang'
select = None
timeout = 1
count = 5
while 1:
r, w, x = [self.sock], [], []
if select:
r, w, x = select(r, w, x, timeout)
if self.sock not in r:
count = count - 1
if count < 0: raise RuntimeError, 'timeout'
if timeout < 25: timeout = timeout *2
## print 'RESEND', timeout, count
self.sock.send(call)
continue
reply = self.sock.recv(self.BUFSIZE)
u = self.unpacker
u.reset(reply)
xid, verf = u.unpack_replyheader()
if xid != self.lastxid:
## print 'BAD xid'
continue
break
# Client using UDP broadcast to a specific port
class RawBroadcastUDPClient(RawUDPClient):
def __init__(self, bcastaddr, prog, vers, port):
RawUDPClient.__init__(self, bcastaddr, prog, vers, port)
self.reply_handler = None
self.timeout = 30
def connsocket(self):
# Don't connect -- use sendto
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def set_reply_handler(self, reply_handler):
self.reply_handler = reply_handler
def set_timeout(self, timeout):
self.timeout = timeout # Use None for infinite timeout
def make_call(self, proc, args, pack_func, unpack_func):
if pack_func is None and args is not None:
raise TypeError, 'non-null args with null pack_func'
self.start_call(proc)
if pack_func:
pack_func(args)
call = self.packer.get_buf()
self.sock.sendto(call, (self.host, self.port))
try:
from select import select
except ImportError:
print 'WARNING: select not found, broadcast will hang'
select = None
replies = []
if unpack_func is None:
def dummy(): pass
unpack_func = dummy
while 1:
r, w, x = [self.sock], [], []
if select:
if self.timeout is None:
r, w, x = select(r, w, x)
else:
r, w, x = select(r, w, x, self.timeout)
if self.sock not in r:
break
reply, fromaddr = self.sock.recvfrom(self.BUFSIZE)
u = self.unpacker
u.reset(reply)
xid, verf = u.unpack_replyheader()
if xid != self.lastxid:
## print 'BAD xid'
continue
reply = unpack_func()
self.unpacker.done()
replies.append((reply, fromaddr))
if self.reply_handler:
self.reply_handler(reply, fromaddr)
return replies
# Port mapper interface
# Program number, version and (fixed!) port number
PMAP_PROG = 100000
PMAP_VERS = 2
PMAP_PORT = 111
# Procedure numbers
PMAPPROC_NULL = 0 # (void) -> void
PMAPPROC_SET = 1 # (mapping) -> bool
PMAPPROC_UNSET = 2 # (mapping) -> bool
PMAPPROC_GETPORT = 3 # (mapping) -> unsigned int
PMAPPROC_DUMP = 4 # (void) -> pmaplist
PMAPPROC_CALLIT = 5 # (call_args) -> call_result
# A mapping is (prog, vers, prot, port) and prot is one of:
IPPROTO_TCP = 6
IPPROTO_UDP = 17
# A pmaplist is a variable-length list of mappings, as follows:
# either (1, mapping, pmaplist) or (0).
# A call_args is (prog, vers, proc, args) where args is opaque;
# a call_result is (port, res) where res is opaque.
class PortMapperPacker(Packer):
def pack_mapping(self, mapping):
prog, vers, prot, port = mapping
self.pack_uint(prog)
self.pack_uint(vers)
self.pack_uint(prot)
self.pack_uint(port)
def pack_pmaplist(self, list):
self.pack_list(list, self.pack_mapping)
def pack_call_args(self, ca):
prog, vers, proc, args = ca
self.pack_uint(prog)
self.pack_uint(vers)
self.pack_uint(proc)
self.pack_opaque(args)
class PortMapperUnpacker(Unpacker):
def unpack_mapping(self):
prog = self.unpack_uint()
vers = self.unpack_uint()
prot = self.unpack_uint()
port = self.unpack_uint()
return prog, vers, prot, port
def unpack_pmaplist(self):
return self.unpack_list(self.unpack_mapping)
def unpack_call_result(self):
port = self.unpack_uint()
res = self.unpack_opaque()
return port, res
class PartialPortMapperClient:
def addpackers(self):
self.packer = PortMapperPacker()
self.unpacker = PortMapperUnpacker('')
def Set(self, mapping):
return self.make_call(PMAPPROC_SET, mapping, \
self.packer.pack_mapping, \
self.unpacker.unpack_uint)
def Unset(self, mapping):
return self.make_call(PMAPPROC_UNSET, mapping, \
self.packer.pack_mapping, \
self.unpacker.unpack_uint)
def Getport(self, mapping):
return self.make_call(PMAPPROC_GETPORT, mapping, \
self.packer.pack_mapping, \
self.unpacker.unpack_uint)
def Dump(self):
return self.make_call(PMAPPROC_DUMP, None, \
None, \
self.unpacker.unpack_pmaplist)
def Callit(self, ca):
return self.make_call(PMAPPROC_CALLIT, ca, \
self.packer.pack_call_args, \
self.unpacker.unpack_call_result)
class TCPPortMapperClient(PartialPortMapperClient, RawTCPClient):
def __init__(self, host):
RawTCPClient.__init__(self, \
host, PMAP_PROG, PMAP_VERS, PMAP_PORT)
class UDPPortMapperClient(PartialPortMapperClient, RawUDPClient):
def __init__(self, host):
RawUDPClient.__init__(self, \
host, PMAP_PROG, PMAP_VERS, PMAP_PORT)
class BroadcastUDPPortMapperClient(PartialPortMapperClient, \
RawBroadcastUDPClient):
def __init__(self, bcastaddr):
RawBroadcastUDPClient.__init__(self, \
bcastaddr, PMAP_PROG, PMAP_VERS, PMAP_PORT)
# Generic clients that find their server through the Port mapper
class TCPClient(RawTCPClient):
def __init__(self, host, prog, vers):
pmap = TCPPortMapperClient(host)
port = pmap.Getport((prog, vers, IPPROTO_TCP, 0))
pmap.close()
if port == 0:
raise RuntimeError, 'program not registered'
RawTCPClient.__init__(self, host, prog, vers, port)
class UDPClient(RawUDPClient):
def __init__(self, host, prog, vers):
pmap = UDPPortMapperClient(host)
port = pmap.Getport((prog, vers, IPPROTO_UDP, 0))
pmap.close()
if port == 0:
raise RuntimeError, 'program not registered'
RawUDPClient.__init__(self, host, prog, vers, port)
class BroadcastUDPClient(Client):
def __init__(self, bcastaddr, prog, vers):
self.pmap = BroadcastUDPPortMapperClient(bcastaddr)
self.pmap.set_reply_handler(self.my_reply_handler)
self.prog = prog
self.vers = vers
self.user_reply_handler = None
self.addpackers()
def close(self):
self.pmap.close()
def set_reply_handler(self, reply_handler):
self.user_reply_handler = reply_handler
def set_timeout(self, timeout):
self.pmap.set_timeout(timeout)
def my_reply_handler(self, reply, fromaddr):
port, res = reply
self.unpacker.reset(res)
result = self.unpack_func()
self.unpacker.done()
self.replies.append((result, fromaddr))
if self.user_reply_handler is not None:
self.user_reply_handler(result, fromaddr)
def make_call(self, proc, args, pack_func, unpack_func):
self.packer.reset()
if pack_func:
pack_func(args)
if unpack_func is None:
def dummy(): pass
self.unpack_func = dummy
else:
self.unpack_func = unpack_func
self.replies = []
packed_args = self.packer.get_buf()
dummy_replies = self.pmap.Callit( \
(self.prog, self.vers, proc, packed_args))
return self.replies
# Server classes
# These are not symmetric to the Client classes
# XXX No attempt is made to provide authorization hooks yet
class Server:
def __init__(self, host, prog, vers, port):
self.host = host # Should normally be '' for default interface
self.prog = prog
self.vers = vers
self.port = port # Should normally be 0 for random port
self.makesocket() # Assigns to self.sock and self.prot
self.bindsocket()
self.host, self.port = self.sock.getsockname()
self.addpackers()
def register(self):
mapping = self.prog, self.vers, self.prot, self.port
p = TCPPortMapperClient(self.host)
if not p.Set(mapping):
raise RuntimeError, 'register failed'
def unregister(self):
mapping = self.prog, self.vers, self.prot, self.port
p = TCPPortMapperClient(self.host)
if not p.Unset(mapping):
raise RuntimeError, 'unregister failed'
def handle(self, call):
# Don't use unpack_header but parse the header piecewise
# XXX I have no idea if I am using the right error responses!
self.unpacker.reset(call)
self.packer.reset()
xid = self.unpacker.unpack_uint()
self.packer.pack_uint(xid)
temp = self.unpacker.unpack_enum()
if temp != CALL:
return None # Not worthy of a reply
self.packer.pack_uint(REPLY)
temp = self.unpacker.unpack_uint()
if temp != RPCVERSION:
self.packer.pack_uint(MSG_DENIED)
self.packer.pack_uint(RPC_MISMATCH)
self.packer.pack_uint(RPCVERSION)
self.packer.pack_uint(RPCVERSION)
return self.packer.get_buf()
self.packer.pack_uint(MSG_ACCEPTED)
self.packer.pack_auth((AUTH_NULL, make_auth_null()))
prog = self.unpacker.unpack_uint()
if prog != self.prog:
self.packer.pack_uint(PROG_UNAVAIL)
return self.packer.get_buf()
vers = self.unpacker.unpack_uint()
if vers != self.vers:
self.packer.pack_uint(PROG_MISMATCH)
self.packer.pack_uint(self.vers)
self.packer.pack_uint(self.vers)
return self.packer.get_buf()
proc = self.unpacker.unpack_uint()
methname = 'handle_' + repr(proc)
try:
meth = getattr(self, methname)
except AttributeError:
self.packer.pack_uint(PROC_UNAVAIL)
return self.packer.get_buf()
cred = self.unpacker.unpack_auth()
verf = self.unpacker.unpack_auth()
try:
meth() # Unpack args, call turn_around(), pack reply
except (EOFError, GarbageArgs):
# Too few or too many arguments
self.packer.reset()
self.packer.pack_uint(xid)
self.packer.pack_uint(REPLY)
self.packer.pack_uint(MSG_ACCEPTED)
self.packer.pack_auth((AUTH_NULL, make_auth_null()))
self.packer.pack_uint(GARBAGE_ARGS)
return self.packer.get_buf()
def turn_around(self):
try:
self.unpacker.done()
except RuntimeError:
raise GarbageArgs
self.packer.pack_uint(SUCCESS)
def handle_0(self): # Handle NULL message
self.turn_around()
def makesocket(self):
# This MUST be overridden
raise RuntimeError, 'makesocket not defined'
def bindsocket(self):
# Override this to bind to a different port (e.g. reserved)
self.sock.bind((self.host, self.port))
def addpackers(self):
# Override this to use derived classes from Packer/Unpacker
self.packer = Packer()
self.unpacker = Unpacker('')
class TCPServer(Server):
def makesocket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.prot = IPPROTO_TCP
def loop(self):
self.sock.listen(0)
while 1:
self.session(self.sock.accept())
def session(self, connection):
sock, (host, port) = connection
while 1:
try:
call = recvrecord(sock)
except EOFError:
break
except socket.error as e:
print 'socket error:', e.message
break
reply = self.handle(call)
if reply is not None:
sendrecord(sock, reply)
def forkingloop(self):
# Like loop but uses forksession()
self.sock.listen(0)
while 1:
self.forksession(self.sock.accept())
def forksession(self, connection):
# Like session but forks off a subprocess
import os
# Wait for deceased children
try:
while 1:
pid, sts = os.waitpid(0, 1)
except os.error:
pass
pid = None
try:
pid = os.fork()
if pid: # Parent
connection[0].close()
return
# Child
self.session(connection)
finally:
# Make sure we don't fall through in the parent
if pid == 0:
os._exit(0)
class UDPServer(Server):
def makesocket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.prot = IPPROTO_UDP
def loop(self):
while 1:
self.session()
def session(self):
call, host_port = self.sock.recvfrom(8192)
reply = self.handle(call)
if reply is not None:
self.sock.sendto(reply, host_port)
# Simple test program -- dump local portmapper status
def test():
pmap = UDPPortMapperClient('')
list = pmap.Dump()
list.sort()
for prog, vers, prot, port in list:
print prog, vers,
if prot == IPPROTO_TCP: print 'tcp',
elif prot == IPPROTO_UDP: print 'udp',
else: print prot,
print port
# Test program for broadcast operation -- dump everybody's portmapper status
def testbcast():
import sys
if sys.argv[1:]:
bcastaddr = sys.argv[1]
else:
bcastaddr = '<broadcast>'
def rh(reply, fromaddr):
host, port = fromaddr
print host + '\t' + repr(reply)
pmap = BroadcastUDPPortMapperClient(bcastaddr)
pmap.set_reply_handler(rh)
pmap.set_timeout(5)
replies = pmap.Getport((100002, 1, IPPROTO_UDP, 0))
# Test program for server, with corresponding client
# On machine A: python -c 'import rpc; rpc.testsvr()'
# On machine B: python -c 'import rpc; rpc.testclt()' A
# (A may be == B)
def testsvr():
# Simple test class -- proc 1 doubles its string argument as reply
class S(UDPServer):
def handle_1(self):
arg = self.unpacker.unpack_string()
self.turn_around()
print 'RPC function 1 called, arg', repr(arg)
self.packer.pack_string(arg + arg)
#
s = S('', 0x20000000, 1, 0)
try:
s.unregister()
except RuntimeError as e:
print 'RuntimeError:', e.message, '(ignored)'
s.register()
print 'Service started...'
try:
s.loop()
finally:
s.unregister()
print 'Service interrupted.'
def testclt():
import sys
if sys.argv[1:]: host = sys.argv[1]
else: host = ''
# Client for above server
class C(UDPClient):
def call_1(self, arg):
return self.make_call(1, arg, \
self.packer.pack_string, \
self.unpacker.unpack_string)
c = C(host, 0x20000000, 1)
print 'making call...'
reply = c.call_1('hello, world, ')
print 'call returned', repr(reply)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineImagesOperations(object):
"""VirtualMachineImagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-04-30-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-30-preview"
self.config = config
def get(
self, location, publisher_name, offer, skus, version, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineImage or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineImage or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, location, publisher_name, offer, skus, filter=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all virtual machine image versions for the specified
location, publisher, offer, and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_offers(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image offers for the specified location
and publisher.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_publishers(
self, location, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image publishers for the specified Azure
location.
:param location: The name of a supported Azure region.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_skus(
self, location, publisher_name, offer, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image SKUs for the specified location,
publisher, and offer.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and methods for working with physical quantities,
particularly the :class:`Quantity` class for representing physical quantities.
"""
import numpy
import quantities as pq
import rmgpy.constants as constants
################################################################################
# Explicitly set the default units to SI
pq.set_default_units('si')
# These units are not defined by the quantities package, but occur frequently
# in data handled by RMG, so we define them manually
pq.UnitQuantity('kilocalories', pq.cal*1e3, symbol='kcal')
pq.UnitQuantity('kilojoules', pq.J*1e3, symbol='kJ')
pq.UnitQuantity('kilomoles', pq.mol*1e3, symbol='kmol')
pq.UnitQuantity('molecule', pq.mol/6.02214179e23, symbol='molecule')
pq.UnitQuantity('molecules', pq.mol/6.02214179e23, symbol='molecules')
pq.UnitQuantity('debye', 1.0/(constants.c*1e21)*pq.C*pq.m, symbol='De')
################################################################################
# Units that should not be used in RMG-Py:
NOT_IMPLEMENTED_UNITS = [
'degC',
'C',
'degF',
'F',
'degR',
'R'
]
class QuantityError(Exception):
"""
An exception to be raised when an error occurs while working with physical
quantities in RMG. Pass a string describing the circumstances of the
exceptional behavior.
"""
pass
################################################################################
class Units(object):
"""
The :class:`Units` class provides a representation of the units of a
physical quantity. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`units` A string representation of the units
=================== ========================================================
Functions that return the conversion factors to and from SI units are
provided.
"""
# A dict of conversion factors (to SI) for each of the frequent units
# Here we also define that cm^-1 is not to be converted to m^-1 (or Hz, J, K, etc.)
conversionFactors = {'cm^-1': 1.0}
def __init__(self, units=''):
if units in NOT_IMPLEMENTED_UNITS:
raise NotImplementedError(
'The units {} are not yet supported. Please choose SI units.'.format(units)
)
self.units = units
def getConversionFactorToSI(self):
"""
Return the conversion factor for converting a quantity in a given set
of`units` to the SI equivalent units.
"""
try:
# Process several common units manually for speed
factor = Units.conversionFactors[self.units]
except KeyError:
# Fall back to (slow!) quantities package for less common units
factor = float(pq.Quantity(1.0, self.units).simplified)
# Cache the conversion factor so we don't ever need to use
# quantities to compute it again
Units.conversionFactors[self.units] = factor
return factor
def getConversionFactorFromSI(self):
"""
Return the conversion factor for converting a quantity to a given set
of `units` from the SI equivalent units.
"""
return 1.0 / self.getConversionFactorToSI()
################################################################################
class ScalarQuantity(Units):
"""
The :class:`ScalarQuantity` class provides a representation of a scalar
physical quantity, with optional units and uncertainty information. The
attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`value` The numeric value of the quantity in the given units
`units` The units the value was specified in
`uncertainty` The numeric uncertainty in the value
`uncertaintyType` The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
`value_si` The numeric value of the quantity in the corresponding SI units
=================== ========================================================
It is often more convenient to perform computations using SI units instead
of the given units of the quantity. For this reason, the SI equivalent of
the `value` attribute can be directly accessed using the `value_si`
attribute. This value is cached on the :class:`ScalarQuantity` object for
speed.
"""
def __init__(self, value, units='', uncertainty=None, uncertaintyType='+|-'):
Units.__init__(self, units)
self.value = value
self.uncertaintyType = uncertaintyType
self.uncertainty = float(uncertainty) if uncertainty is not None else 0.0
def __reduce__(self):
"""
Return a tuple of information used to pickle the scalar quantity.
"""
return (ScalarQuantity, (self.value, self.units, self.uncertainty, self.uncertaintyType))
def __str__(self):
"""
Return a string representation of the scalar quantity.
"""
result = '{0:g}'.format(self.value)
if self.uncertainty != 0.0:
result += ' {0} {1:g}'.format(self.uncertaintyType, self.uncertainty)
if self.units != '':
result += ' {0}'.format(self.units)
return result
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
scalar quantity.
"""
if self.units == '' and self.uncertainty == 0.0:
return '{0:g}'.format(self.value)
else:
result = '({0:g},{1!r}'.format(self.value, self.units)
if self.uncertainty != 0.0:
result += ',{0!r},{1:g}'.format(self.uncertaintyType, self.uncertainty)
result += ')'
return result
def copy(self):
"""
Return a copy of the quantity.
"""
return ScalarQuantity(self.value, self.units, self.uncertainty, self.uncertaintyType)
def getValue(self):
"""
The numeric value of the quantity, in the given units
"""
return self.value_si * self.getConversionFactorFromSI()
def setValue(self, v):
self.value_si = float(v) * self.getConversionFactorToSI()
value = property(getValue, setValue)
def getUncertainty(self):
"""
The numeric value of the uncertainty, in the given units if additive, or no units if multiplicative.
"""
if self.isUncertaintyAdditive():
return self.uncertainty_si * self.getConversionFactorFromSI()
else:
return self.uncertainty_si
def setUncertainty(self, v):
if self.isUncertaintyAdditive():
self.uncertainty_si = float(v) * self.getConversionFactorToSI()
else:
self.uncertainty_si = float(v)
uncertainty = property(getUncertainty, setUncertainty)
def getUncertaintyType(self):
"""
The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
"""
return self._uncertaintyType
def setUncertaintyType(self, v):
"""
Check the uncertainty type is valid, then set it, and set the uncertainty to -1.
If you set the uncertainty then change the type, we have no idea what to do with
the units. This ensures you set the type first.
"""
if v not in ['+|-','*|/']:
raise QuantityError("Invalid uncertainty type")
self._uncertaintyType = v
self.uncertainty_si = -1
uncertaintyType = property(getUncertaintyType, setUncertaintyType)
def equals(self, quantity):
"""
Return ``True`` if the everything in a quantity object matches
the parameters in this object. If there are lists of values or uncertainties,
each item in the list must be matching and in the same order.
Otherwise, return ``False``
(Originally intended to return warning if units capitalization was
different, however, Quantity object only parses units matching in case, so
this will not be a problem.)
"""
def approx_equal(x, y, atol = .01):
"""
Returns true if two float/double values are approximately equal
within a relative error of 1% or under a user specific absolute tolerance.
"""
return abs(x-y) <= 1e-2*abs(x) or abs(x-y) <= 1e-2*abs(y) or abs(x-y) <= atol
if isinstance(quantity, ScalarQuantity):
if (self.uncertaintyType == quantity.uncertaintyType and
approx_equal(self.uncertainty * self.getConversionFactorToSI(), quantity.uncertainty * quantity.getConversionFactorToSI()) and
self.units == quantity.units):
if self.units == "kcal/mol":
# set absolute tolerance to .01 kcal/mol = 42 J/mol
atol = 42
else:
# for other units, set it to .01
atol = .01
if not approx_equal(self.value_si, quantity.value_si, atol):
return False
return True
return False
def isUncertaintyAdditive(self):
"""
Return ``True`` if the uncertainty is specified in additive format
and ``False`` otherwise.
"""
return self._uncertaintyType == '+|-'
def isUncertaintyMultiplicative(self):
"""
Return ``True`` if the uncertainty is specified in multiplicative
format and ``False`` otherwise.
"""
return self._uncertaintyType == '*|/'
################################################################################
class ArrayQuantity(Units):
"""
The :class:`ArrayQuantity` class provides a representation of an array of
physical quantity values, with optional units and uncertainty information.
The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`value` The numeric value of the quantity in the given units
`units` The units the value was specified in
`uncertainty` The numeric uncertainty in the value
`uncertaintyType` The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative
`value_si` The numeric value of the quantity in the corresponding SI units
=================== ========================================================
It is often more convenient to perform computations using SI units instead
of the given units of the quantity. For this reason, the SI equivalent of
the `value` attribute can be directly accessed using the `value_si`
attribute. This value is cached on the :class:`ArrayQuantity` object for
speed.
"""
def __init__(self, value, units='', uncertainty=None, uncertaintyType='+|-'):
Units.__init__(self, units)
self.value = value
self.uncertaintyType = uncertaintyType
if uncertainty is None:
self.uncertainty = numpy.zeros_like(self.value)
elif isinstance(uncertainty, (int,float)):
self.uncertainty = numpy.ones_like(self.value) * uncertainty
else:
uncertainty = numpy.array(uncertainty)
if uncertainty.ndim != self.value.ndim:
raise QuantityError('The given uncertainty has {0:d} dimensions, while the given value has {1:d} dimensions.'.format(uncertainty.ndim, self.value.ndim))
for i in range(self.value.ndim):
if self.value.shape[i] != uncertainty.shape[i]:
raise QuantityError('Dimension {0:d} has {1:d} elements for the given value, but {2:d} elements for the given uncertainty.'.format(i, self.value.shape[i], uncertainty.shape[i]))
else:
self.uncertainty = uncertainty
def __reduce__(self):
"""
Return a tuple of information used to pickle the array quantity.
"""
return (ArrayQuantity, (self.value, self.units, self.uncertainty, self.uncertaintyType))
def __str__(self):
"""
Return a string representation of the array quantity.
"""
if self.value.ndim == 1:
value = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.value]))
elif self.value.ndim == 2:
value = []
for i in range(self.value.shape[0]):
value.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.value[i,j])) for j in range(self.value.shape[1])])))
value = '[{0}]'.format(','.join(value))
if self.uncertainty.ndim == 1:
uncertainty = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.uncertainty]))
elif self.uncertainty.ndim == 2:
uncertainty = []
for i in range(self.uncertainty.shape[0]):
uncertainty.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.uncertainty[i,j])) for j in range(self.uncertainty.shape[1])])))
uncertainty = '[{0}]'.format(','.join(uncertainty))
result = '{0}'.format(value)
if any(self.uncertainty != 0.0):
result += ' {0} {1}'.format(self.uncertaintyType, uncertainty)
if self.units != '':
result += ' {0}'.format(self.units)
return result
def __repr__(self):
"""
Return a string representation that can be used to reconstruct the
array quantity.
"""
if self.value.ndim == 1:
value = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.value]))
elif self.value.ndim == 2:
value = []
for i in range(self.value.shape[0]):
value.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.value[i,j])) for j in range(self.value.shape[1])])))
value = '[{0}]'.format(','.join(value))
if self.uncertainty.ndim == 1:
uncertainty = '[{0}]'.format(','.join(['{0:g}'.format(float(v)) for v in self.uncertainty]))
elif self.uncertainty.ndim == 2:
uncertainty = []
for i in range(self.uncertainty.shape[0]):
uncertainty.append('[{0}]'.format(','.join(['{0:g}'.format(float(self.uncertainty[i,j])) for j in range(self.uncertainty.shape[1])])))
uncertainty = '[{0}]'.format(','.join(uncertainty))
if self.units == '' and not numpy.any(self.uncertainty != 0.0):
return '{0}'.format(value)
else:
result = '({0},{1!r}'.format(value, self.units)
if numpy.any(self.uncertainty != 0.0):
result += ',{0!r},{1}'.format(self.uncertaintyType, uncertainty)
result += ')'
return result
def copy(self):
"""
Return a copy of the quantity.
"""
return ArrayQuantity(self.value.copy(), self.units, self.uncertainty.copy(), self.uncertaintyType)
def getValue(self):
return self.value_si * self.getConversionFactorFromSI()
def setValue(self, v):
self.value_si = numpy.array(v) * self.getConversionFactorToSI()
value = property(getValue, setValue)
def equals(self, quantity):
"""
Return ``True`` if the everything in a quantity object matches
the parameters in this object. If there are lists of values or uncertainties,
each item in the list must be matching and in the same order.
Otherwise, return ``False``
(Originally intended to return warning if units capitalization was
different, however, Quantity object only parses units matching in case, so
this will not be a problem.)
"""
def approx_equal(x, y, atol = .01):
"""
Returns true if two float/double values are approximately equal
within a relative error of 1% or under a user specific absolute tolerance.
"""
return abs(x-y) <= 1e-2*abs(x) or abs(x-y) <= 1e-2*abs(y) or abs(x-y) <= atol
if isinstance(quantity, ArrayQuantity):
if (self.uncertaintyType == quantity.uncertaintyType and self.units == quantity.units):
if self.units == "kcal/mol":
# set absolute tolerance to .01 kcal/mol = 42 J/mol
atol = 42
else:
# for other units, set it to .01
atol = .01
if self.value.ndim != quantity.value.ndim:
return False
for i in range(self.value.ndim):
if self.value.shape[i] != quantity.value.shape[i]:
return False
for v1, v2 in zip(self.value.flat, quantity.value.flat):
if not approx_equal(v1, v2, atol):
return False
if self.uncertainty.ndim != quantity.uncertainty.ndim:
return False
for i in range(self.uncertainty.ndim):
if self.uncertainty.shape[i] != quantity.uncertainty.shape[i]:
return False
for v1, v2 in zip(self.uncertainty.flat, quantity.uncertainty.flat):
if not approx_equal(v1, v2, atol):
return False
return True
return False
def isUncertaintyAdditive(self):
"""
Return ``True`` if the uncertainty is specified in additive format
and ``False`` otherwise.
"""
return self.uncertaintyType == '+|-'
def isUncertaintyMultiplicative(self):
"""
Return ``True`` if the uncertainty is specified in multiplicative
format and ``False`` otherwise.
"""
return self.uncertaintyType == '*|/'
################################################################################
def Quantity(*args, **kwargs):
"""
Create a :class:`ScalarQuantity` or :class:`ArrayQuantity` object for a
given physical quantity. The physical quantity can be specified in several
ways:
* A scalar-like or array-like value (for a dimensionless quantity)
* An array of arguments (including keyword arguments) giving some or all of
the `value`, `units`, `uncertainty`, and/or `uncertaintyType`.
* A tuple of the form ``(value,)``, ``(value,units)``,
``(value,units,uncertainty)``, or
``(value,units,uncertaintyType,uncertainty)``
* An existing :class:`ScalarQuantity` or :class:`ArrayQuantity` object, for
which a copy is made
"""
# Initialize attributes
value = None
units = ''
uncertaintyType = '+|-'
uncertainty = None
if len(args) == 1 and len(kwargs) == 0 and args[0] is None:
return None
# Unpack args if necessary
if isinstance(args, tuple) and len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# Process args
Nargs = len(args)
if Nargs == 1 and isinstance(args[0], (ScalarQuantity,ArrayQuantity)):
# We were given another quantity object, so make a (shallow) copy of it
other = args[0]
value = other.value
units = other.units
uncertaintyType = other.uncertaintyType
uncertainty = other.uncertainty
elif Nargs == 1:
# If one parameter is given, it should be a single value
value, = args
elif Nargs == 2:
# If two parameters are given, it should be a value and units
value, units = args
elif Nargs == 3:
# If three parameters are given, it should be a value, units and uncertainty
value, units, uncertainty = args
elif Nargs == 4:
# If four parameters are given, it should be a value, units, uncertainty type, and uncertainty
value, units, uncertaintyType, uncertainty = args
elif Nargs != 0:
raise QuantityError('Invalid parameters {0!r} passed to ArrayQuantity.__init__() method.'.format(args))
# Process kwargs
for k, v in kwargs.items():
if k == 'value':
if len(args) >= 1:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
value = v
elif k == 'units':
if len(args) >= 2:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
units = v
elif k == 'uncertainty':
if len(args) >= 3:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
uncertainty = v
elif k == 'uncertaintyType':
if len(args) >= 4:
raise QuantityError('Multiple values for argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
else:
uncertaintyType = v
else:
raise QuantityError('Invalid keyword argument {0} passed to ArrayQuantity.__init__() method.'.format(k))
# Process units and uncertainty type parameters
if uncertaintyType not in ['+|-', '*|/']:
raise QuantityError('Unexpected uncertainty type "{0}"; valid values are "+|-" and "*|/".'.format(uncertaintyType))
if isinstance(value, (list,tuple,numpy.ndarray)):
return ArrayQuantity(value, units, uncertainty, uncertaintyType)
try:
value = float(value)
except TypeError:
return ArrayQuantity(value, units, uncertainty, uncertaintyType)
uncertainty = 0.0 if uncertainty is None else float(uncertainty)
return ScalarQuantity(value, units, uncertainty, uncertaintyType)
################################################################################
class UnitType:
"""
The :class:`UnitType` class represents a factory for producing
:class:`ScalarQuantity` or :class:`ArrayQuantity` objects of a given unit
type, e.g. time, volume, etc.
"""
def __init__(self, units, commonUnits=None, extraDimensionality=None):
self.units = units
self.dimensionality = pq.Quantity(1.0, units).simplified.dimensionality
self.commonUnits = commonUnits or []
self.extraDimensionality = {}
if extraDimensionality:
for unit, factor in extraDimensionality.items():
self.extraDimensionality[pq.Quantity(1.0, unit).simplified.dimensionality] = factor
def __call__(self, *args, **kwargs):
# Make a ScalarQuantity or ArrayQuantity object out of the given parameter
quantity = Quantity(*args, **kwargs)
if quantity is None:
return quantity
units = quantity.units
# If the units are in the common units, then we can do the conversion
# very quickly and avoid the slow calls to the quantities package
if units == self.units or units in self.commonUnits:
return quantity
# Check that the units are consistent with this unit type
# This uses the quantities package (slow!)
units = pq.Quantity(1.0, units)
dimensionality = units.simplified.dimensionality
if dimensionality == self.dimensionality:
pass
elif dimensionality in self.extraDimensionality:
quantity.value_si *= self.extraDimensionality[dimensionality]
quantity.units = self.units
else:
raise QuantityError('Invalid units {0!r}.'.format(quantity.units))
# Return the Quantity or ArrayQuantity object object
return quantity
Acceleration = UnitType('m/s^2')
Area = UnitType('m^2')
Concentration = UnitType('mol/m^3')
Dimensionless = UnitType('')
DipoleMoment = UnitType('C*m', extraDimensionality={
'De': 1.0 / (1.0e21 * constants.c),
})
"We have to allow 'energies' to be created in units of Kelvins, because Chemkin does so"
Energy = Enthalpy = FreeEnergy = UnitType('J/mol',
commonUnits=['kJ/mol', 'cal/mol', 'kcal/mol'],
extraDimensionality={'K': constants.R },
)
Entropy = HeatCapacity = UnitType('J/(mol*K)', commonUnits=['kJ/(mol*K)', 'cal/(mol*K)', 'kcal/(mol*K)'])
Flux = UnitType('mol/(m^2*s)')
Frequency = UnitType('cm^-1', extraDimensionality={
's^-1': 1.0 / (constants.c * 100.),
'Hz': 1.0 / (constants.c * 100.),
'J': 1.0 / (constants.h * constants.c * 100.),
'K': constants.kB / (constants.h * constants.c * 100.),
})
Force = UnitType('N')
Inertia = UnitType('kg*m^2')
Length = UnitType('m')
Mass = UnitType('amu', extraDimensionality={'kg/mol': 1000.*constants.amu})
Momentum = UnitType('kg*m/s^2')
Power = UnitType('W')
Pressure = UnitType('Pa', commonUnits=['bar', 'atm', 'torr', 'psi', 'mbar'])
Temperature = UnitType('K', commonUnits=[])
Time = UnitType('s')
Velocity = UnitType('m/s')
Volume = UnitType('m^3')
# Polarizability = UnitType('C*m^2*V^-1')
"""
What's called Polarizability in the transport properties is in fact a polarizability volume,
which is related by $4*\pi*\epsilon_0$ where $\epsilon_0$ is the permittivity of free space.
Rather than mess around with conversions, I suggest we just use "Volume" as the units for
what we call 'polarizability'. Chemkin expects it in Angstrom^3. We'll store it in m^3.
"""
# RateCoefficient is handled as a special case since it can take various
# units depending on the reaction order
RATECOEFFICIENT_CONVERSION_FACTORS = {
(1.0/pq.s).dimensionality: 1.0,
(pq.m**3/pq.s).dimensionality: 1.0,
(pq.m**6/pq.s).dimensionality: 1.0,
(pq.m**9/pq.s).dimensionality: 1.0,
(pq.m**3/(pq.mol*pq.s)).dimensionality: 1.0,
(pq.m**6/(pq.mol**2*pq.s)).dimensionality: 1.0,
(pq.m**9/(pq.mol**3*pq.s)).dimensionality: 1.0,
}
RATECOEFFICIENT_COMMON_UNITS = ['s^-1', 'm^3/(mol*s)', 'cm^3/(mol*s)', 'm^3/(molecule*s)', 'cm^3/(molecule*s)']
def RateCoefficient(*args, **kwargs):
# Make a ScalarQuantity or ArrayQuantity object out of the given parameter
quantity = Quantity(*args, **kwargs)
if quantity is None:
return quantity
units = quantity.units
# If the units are in the common units, then we can do the conversion
# very quickly and avoid the slow calls to the quantities package
if units in RATECOEFFICIENT_COMMON_UNITS:
return quantity
dimensionality = pq.Quantity(1.0, quantity.units).simplified.dimensionality
try:
factor = RATECOEFFICIENT_CONVERSION_FACTORS[dimensionality]
quantity.value_si *= factor
except KeyError:
raise QuantityError('Invalid units {0!r}.'.format(quantity.units))
# Return the Quantity or ArrayQuantity object object
return quantity
|
|
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import logging
import select
import time
from pika.adapters.base_connection import BaseConnection
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
stop_ioloop_on_close=True):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
:param on_open_error_callback: Method to call if the connection cant
be opened
:type on_open_error_callback: method
:param method on_close_callback: Method to call on connection close
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:raises: RuntimeError
"""
ioloop = IOLoop(self._manage_event_state)
super(SelectConnection, self).__init__(parameters, on_open_callback,
on_open_error_callback,
on_close_callback,
ioloop,
stop_ioloop_on_close)
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True on success, False
on failure.
:rtype: bool
"""
error = super(SelectConnection, self)._adapter_connect()
if not error:
self.ioloop.start_poller(self._handle_events,
self.event_state,
self.socket.fileno())
return error
def _flush_outbound(self):
"""Call the state manager who will figure out that we need to write then
call the poller's poll function to force it to process events.
"""
self.ioloop.poller._manage_event_state()
# Force our poller to come up for air, but in write only mode
# write only mode prevents messages from coming in and kicking off
# events through the consumer
self.ioloop.poller.poll(write_only=True)
class IOLoop(object):
"""Singlton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
blocking state by calling the pollers start method. Poller should keep
looping until IOLoop.instance().stop() is called or there is a socket
error.
Also provides a convenient pass-through for add_timeout and set_events
"""
def __init__(self, state_manager):
"""Create an instance of the IOLoop object.
:param method state_manager: The method to manage state
"""
self.poller = None
self._manage_event_state = state_manager
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
if not self.poller:
time.sleep(deadline)
return callback_method()
return self.poller.add_timeout(deadline, callback_method)
@property
def poller_type(self):
"""Return the type of poller.
:rtype: str
"""
return self.poller.__class__.__name__
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack of the poller
:param str timeout_id: The timeout id to remove
"""
self.poller.remove_timeout(timeout_id)
def start(self):
"""Start the IOLoop, waiting for a Poller to take over."""
LOGGER.debug('Starting IOLoop')
self.poller.open = True
while not self.poller:
time.sleep(SelectPoller.TIMEOUT)
self.poller.start()
self.poller.flush_pending_timeouts()
def start_poller(self, handler, events, fileno):
"""Start the Poller, once started will take over for IOLoop.start()
:param method handler: The method to call to handle events
:param int events: The events to handle
:param int fileno: The file descriptor to poll for
"""
LOGGER.debug('Starting the Poller')
self.poller = None
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
self.poller = EPollPoller(fileno, handler, events,
self._manage_event_state)
if not self.poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
self.poller = KQueuePoller(fileno, handler, events,
self._manage_event_state)
if not self.poller and hasattr(select, 'poll') and hasattr(select.poll(), 'modify'):
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
self.poller = PollPoller(fileno, handler, events,
self._manage_event_state)
if not self.poller:
LOGGER.debug('Using SelectPoller')
self.poller = SelectPoller(fileno, handler, events,
self._manage_event_state)
def stop(self):
"""Stop the poller's event loop"""
LOGGER.debug('Stopping the poller event loop')
self.poller.open = False
def update_handler(self, fileno, events):
"""Pass in the events to process for the given file descriptor.
:param int fileno: The file descriptor to poll for
:param int events: The events to handle
"""
self.poller.update_handler(fileno, events)
class SelectPoller(object):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
TIMEOUT = 1
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the SelectPoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
self.fileno = fileno
self.events = events
self.open = True
self._handler = handler
self._timeouts = dict()
self._manage_event_state = state_manager
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
value = {'deadline': time.time() + deadline,
'callback': callback_method}
timeout_id = hash(frozenset(value.items()))
self._timeouts[timeout_id] = value
return timeout_id
def flush_pending_timeouts(self):
"""
"""
if len(self._timeouts) > 0:
time.sleep(SelectPoller.TIMEOUT)
self.process_timeouts()
def poll(self, write_only=False):
"""Check to see if the events that are cared about have fired.
:param bool write_only: Don't look at self.events, just look to see if
the adapter can write.
"""
# Build our values to pass into select
input_fileno, output_fileno, error_fileno = [], [], []
if self.events & READ:
input_fileno = [self.fileno]
if self.events & WRITE:
output_fileno = [self.fileno]
if self.events & ERROR:
error_fileno = [self.fileno]
# Wait on select to let us know what's up
try:
read, write, error = select.select(input_fileno,
output_fileno,
error_fileno,
SelectPoller.TIMEOUT)
except select.error as error:
return self._handler(self.fileno, ERROR, error)
# Build our events bit mask
events = 0
if read:
events |= READ
if write:
events |= WRITE
if error:
events |= ERROR
if events:
self._handler(self.fileno, events, write_only=write_only)
def process_timeouts(self):
"""Process the self._timeouts event stack"""
start_time = time.time()
for timeout_id in self._timeouts.keys():
if timeout_id not in self._timeouts:
continue
if self._timeouts[timeout_id]['deadline'] <= start_time:
callback = self._timeouts[timeout_id]['callback']
del self._timeouts[timeout_id]
callback()
def remove_timeout(self, timeout_id):
"""Remove a timeout if it's still in the timeout stack
:param str timeout_id: The timeout id to remove
"""
if timeout_id in self._timeouts:
del self._timeouts[timeout_id]
def start(self):
"""Start the main poller loop. It will loop here until self.closed"""
while self.open:
self.poll()
self.process_timeouts()
self._manage_event_state()
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
self.events = events
class KQueuePoller(SelectPoller):
"""KQueuePoller works on BSD based systems and is faster than select"""
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
super(KQueuePoller, self).__init__(fileno, handler, events,
state_manager)
self.events = 0
self._kqueue = select.kqueue()
self.update_handler(fileno, events)
self._manage_event_state = state_manager
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
# No need to update if our events are the same
if self.events == events:
return
kevents = list()
if not events & READ:
if self.events & READ:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_DELETE))
else:
if not self.events & READ:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD))
if not events & WRITE:
if self.events & WRITE:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_DELETE))
else:
if not self.events & WRITE:
kevents.append(select.kevent(fileno,
filter=select.KQ_FILTER_WRITE,
flags=select.KQ_EV_ADD))
for event in kevents:
self._kqueue.control([event], 0)
self.events = events
def start(self):
"""Start the main poller loop. It will loop here until self.closed"""
while self.open:
self.poll()
self.process_timeouts()
self._manage_event_state()
def poll(self, write_only=False):
"""Check to see if the events that are cared about have fired.
:param bool write_only: Don't look at self.events, just look to see if
the adapter can write.
"""
events = 0
try:
kevents = self._kqueue.control(None, 1000, SelectPoller.TIMEOUT)
except OSError as error:
return self._handler(self.fileno, ERROR, error)
for event in kevents:
if event.filter == select.KQ_FILTER_READ and READ & self.events:
events |= READ
if event.filter == select.KQ_FILTER_WRITE and WRITE & self.events:
events |= WRITE
if event.flags & select.KQ_EV_ERROR and ERROR & self.events:
events |= ERROR
if events:
LOGGER.debug("Calling %s(%i)", self._handler, events)
self._handler(self.fileno, events, write_only=write_only)
class PollPoller(SelectPoller):
"""Poll works on Linux and can have better performance than EPoll in
certain scenarios. Both are faster than select.
"""
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the KQueuePoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
super(PollPoller, self).__init__(fileno, handler, events, state_manager)
self._poll = select.poll()
self._poll.register(fileno, self.events)
def update_handler(self, fileno, events):
"""Set the events to the current events
:param int fileno: The file descriptor
:param int events: The event mask
"""
self.events = events
self._poll.modify(fileno, self.events)
def start(self):
"""Start the main poller loop. It will loop here until self.closed"""
was_open = self.open
while self.open:
self.poll()
self.process_timeouts()
self._manage_event_state()
if not was_open:
return
try:
LOGGER.info("Unregistering poller on fd %d" % self.fileno)
self.update_handler(self.fileno, 0)
self._poll.unregister(self.fileno)
except IOError as err:
LOGGER.debug("Got IOError while shutting down poller: %s", err)
def poll(self, write_only=False):
"""Poll until TIMEOUT waiting for an event
:param bool write_only: Only process write events
"""
try:
events = self._poll.poll(int(SelectPoller.TIMEOUT * 1000))
except select.error as error:
return self._handler(self.fileno, ERROR, error)
if events:
LOGGER.debug("Calling %s with %d events",
self._handler, len(events))
for fileno, event in events:
self._handler(fileno, event, write_only=write_only)
class EPollPoller(PollPoller):
"""EPoll works on Linux and can have better performance than Poll in
certain scenarios. Both are faster than select.
"""
def __init__(self, fileno, handler, events, state_manager):
"""Create an instance of the EPollPoller
:param int fileno: The file descriptor to check events for
:param method handler: What is called when an event happens
:param int events: The events to look for
:param method state_manager: The method to manage state
"""
super(EPollPoller, self).__init__(fileno, handler, events,
state_manager)
self._poll = select.epoll()
self._poll.register(fileno, self.events)
def poll(self, write_only=False):
"""Poll until TIMEOUT waiting for an event
:param bool write_only: Only process write events
"""
try:
events = self._poll.poll(SelectPoller.TIMEOUT)
except IOError as error:
return self._handler(self.fileno, ERROR, error)
if events:
LOGGER.debug("Calling %s", self._handler)
for fileno, event in events:
self._handler(fileno, event, write_only=write_only)
|
|
import os
from ecs_deplojo import task_definitions
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_generate_task_definition(tmpdir):
task_data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(task_data)
task_definition = task_definitions.generate_task_definition(
filename.strpath,
environment={},
template_vars={"image": "my-docker-image:1.0"},
overrides={},
name="my-task-def",
)
expected = task_definitions.TaskDefinition(
{
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
)
assert task_definition == expected
def test_generate_task_definition_overrides(tmpdir):
task_data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(task_data)
task_definition = task_definitions.generate_task_definition(
filename.strpath,
environment={},
template_vars={"image": "my-docker-image:1.0"},
overrides={
"default": {
"memory": 512,
"memoryReservation": 128,
"portMappings": [{"hostPort": 80, "containerPort": 9000}],
}
},
name="my-task-def",
)
expected = task_definitions.TaskDefinition(
{
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 512,
"memoryReservation": 128,
"cpu": 0,
"portMappings": [
{"containerPort": 8080, "hostPort": 0},
{"containerPort": 9000, "hostPort": 80},
],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
)
assert task_definition == expected
def test_generate_multiple_task_definitions(tmpdir):
task_data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "web-1",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
},
{
"name": "web-2",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(task_data)
config = {
"environment": {"DATABASE_URL": "postgresql://"},
"environment_groups": {
"group-1": {"ENV_CODE": "group-1"},
"group-2": {"ENV_CODE": "group-2"},
},
"task_definitions": {
"task-def-1": {
"template": filename.strpath,
"environment_group": "group-1",
"overrides": {"web-1": {"memory": 512}},
},
"task-def-2": {
"template": filename.strpath,
"environment_group": "group-2",
"overrides": {"web-1": {"memory": 512}},
},
},
}
result = task_definitions.generate_task_definitions(
config, template_vars={"image": "my-docker-image:1.0"}, base_path=None
)
expected = {
"task-def-1": task_definitions.TaskDefinition(
{
"family": "task-def-1",
"volumes": [],
"containerDefinitions": [
{
"name": "web-1",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "task-def-1-web-1",
"memory": 512,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {
"DATABASE_URL": "postgresql://",
"ENV_CODE": "group-1",
},
},
{
"name": "web-2",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "task-def-1-web-2",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {
"DATABASE_URL": "postgresql://",
"ENV_CODE": "group-1",
},
},
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
),
"task-def-2": task_definitions.TaskDefinition(
{
"family": "task-def-2",
"volumes": [],
"containerDefinitions": [
{
"name": "web-1",
"image": "my-docker-image:1.0",
"essential": True,
"hostname": "task-def-2-web-1",
"command": ["hello", "world"],
"memory": 512,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {
"DATABASE_URL": "postgresql://",
"ENV_CODE": "group-2",
},
},
{
"name": "web-2",
"image": "my-docker-image:1.0",
"hostname": "task-def-2-web-2",
"essential": True,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {
"DATABASE_URL": "postgresql://",
"ENV_CODE": "group-2",
},
},
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
),
}
assert result == expected
def test_generate_task_definitions_write_output(tmpdir):
task_data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "web-1",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"logConfiguration": {},
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
base_path = tmpdir.join("input").mkdir()
filename = base_path.join("task_definition.json")
filename.write(task_data)
config = {
"environment": {"DATABASE_URL": "postgresql://"},
"task_definitions": {
"task-def-1": {
"template": "task_definition.json",
"overrides": {
"web-1": {
"memory": 512,
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "default",
"awslogs-region": "eu-west-1",
},
},
}
},
}
},
}
task_definitions.generate_task_definitions(
config,
template_vars={"image": "my-docker-image:1.0"},
base_path=base_path.strpath,
output_path=tmpdir.join("output").mkdir().strpath,
)
assert tmpdir.join("output").join("task-def-1.json").exists()
def test_generate_task_definition_with_task_role_arn(tmpdir):
task_data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(task_data)
result = task_definitions.generate_task_definition(
filename.strpath,
environment={},
task_role_arn="arn:my-task-role",
template_vars={"image": "my-docker-image:1.0"},
overrides={},
name="my-task-def",
)
expected = task_definitions.TaskDefinition(
{
"family": "my-task-def",
"taskRoleArn": "arn:my-task-role",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"hostname": "my-task-def",
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
)
assert result == expected
def test_generate_task_definition_with_execution_role_arn(tmpdir):
task_data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(task_data)
result = task_definitions.generate_task_definition(
filename.strpath,
environment={},
execution_role_arn="arn:my-task-execution-role",
template_vars={"image": "my-docker-image:1.0"},
overrides={},
name="my-task-def",
)
expected = task_definitions.TaskDefinition(
{
"family": "my-task-def",
"executionRoleArn": "arn:my-task-execution-role",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"hostname": "my-task-def",
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
)
assert result == expected
def test_generate_task_definition_with_secrets(tmpdir):
task_data = """
{
"family": "default",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(task_data)
result = task_definitions.generate_task_definition(
filename.strpath,
environment={},
task_role_arn="arn:my-task-role",
template_vars={"image": "my-docker-image:1.0"},
overrides={},
name="my-task-def",
secrets={
"SUPER_SECRET_ENV_VAR": "/path/in/param/store",
"SUPER_SECRET_ENV_VAR2": "/other/path/in/param/store",
},
)
expected = task_definitions.TaskDefinition(
{
"family": "my-task-def",
"taskRoleArn": "arn:my-task-role",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"hostname": "my-task-def",
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
"secrets": {
"SUPER_SECRET_ENV_VAR": "/path/in/param/store",
"SUPER_SECRET_ENV_VAR2": "/other/path/in/param/store",
},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
)
assert result == expected
def test_generate_task_definition_awsvpc(tmpdir):
task_data = """
{
"family": "default",
"networkMode": "awsvpc",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "${image}",
"essential": true,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [
{
"containerPort": 8080,
"hostPort": 0
}
]
}
]
}
""".strip()
filename = tmpdir.join("task_definition.json")
filename.write(task_data)
task_definition = task_definitions.generate_task_definition(
filename.strpath,
environment={},
template_vars={"image": "my-docker-image:1.0"},
overrides={},
name="my-task-def",
)
expected = task_definitions.TaskDefinition(
{
"family": "my-task-def",
"volumes": [],
"networkMode": "awsvpc",
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
)
assert task_definition == expected
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inplace_ops."""
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.platform import test as test_lib
class InplaceOpsTest(test_util.TensorFlowTestCase):
def testBasicUpdate(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] = 1
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.ones([1, 3], dtype) * 2)
y[-1, :] = 2
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] = 7
self.assertAllClose(x, y)
def testBasicUpdateBool(self):
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtypes.bool)
y = np.ones([7, 3], dtypes.bool.as_numpy_dtype)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3],
dtypes.bool))
y[3, :] = True
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.zeros([1, 3], dtypes.bool))
y[-1, :] = False
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, 5, array_ops.zeros([3], dtypes.bool))
y[5, :] = False
self.assertAllClose(x, y)
def testBasicAdd(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = array_ops.inplace_add(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] += 1
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] += 2
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] += 7
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] += 99
self.assertAllClose(x, y)
def testBasicSub(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with test_util.use_gpu():
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] -= 1
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] -= 2
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] -= 7
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] -= 99
self.assertAllClose(x, y)
def testRandom(self):
with test_util.use_gpu():
d0, d1, d2 = 100, 3, 5
x = array_ops.zeros([d0, d1, d2])
y = np.zeros([d0, d1, d2])
for _ in range(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10, d1, d2))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx, :] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx, :] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx, :] -= val
self.assertAllClose(x, y)
def testRandom1D(self):
with test_util.use_gpu():
d0 = 100
x = array_ops.zeros([d0])
y = np.zeros([d0])
for _ in range(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx] -= val
self.assertAllClose(x, y)
def testAlias(self):
with test_util.use_gpu():
x = array_ops.ones([2, 3])
y = inplace_ops.alias_inplace_add(x, [0], [[1, 2, 3]])
with ops.control_dependencies([y]):
z = array_ops.identity(x)
_, vy, vz = self.evaluate([x, y, z])
self.assertAllClose(vy, vz)
def testError(self):
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must be a vector"):
_ = self.evaluate(inplace_ops.inplace_update([[1.]], [[0]], [[10]]))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"x and v shape doesn't match"):
_ = self.evaluate(inplace_ops.inplace_update([[1.]], [0], [10]))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"i and x shape doesn't match"):
_ = self.evaluate(inplace_ops.inplace_update([[1.]], [0, 1], [[10]]))
def testEmpty(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64, dtypes.bool,
dtypes.uint8
]:
with test_util.use_gpu():
test_shapes = [(), (1,), (2, 3), (0, 2), (2, 3, 5), (2, 0, 5)]
for shape in test_shapes:
val = self.evaluate(inplace_ops.empty(shape, dtype))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = self.evaluate(inplace_ops.empty(shape, dtype, init=True))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
val = self.evaluate(
inplace_ops.empty_like(array_ops.zeros(shape, dtype)))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = self.evaluate(inplace_ops.empty_like(
array_ops.zeros(shape, dtype), init=True))
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
with test_util.use_gpu():
val = self.evaluate(inplace_ops.empty((1, 2), dtypes.string, init=True))
self.assertEqual(val.tolist(), [[b"", b""]])
val = self.evaluate(inplace_ops.empty((1, 2), dtypes.string, init=False))
self.assertEqual(val.tolist(), [[b"", b""]])
def testInplaceOpOnEmptyTensors(self):
op_fns = [
inplace_ops.inplace_add,
inplace_ops.inplace_sub,
inplace_ops.inplace_update,
]
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
for op_fn in op_fns:
with test_util.use_gpu():
x = array_ops.zeros([7, 0], dtype)
y = np.zeros([7, 0], dtype.as_numpy_dtype)
self.assertAllClose(x, y)
x = op_fn(x, [3], array_ops.ones([1, 0], dtype))
self.assertAllClose(x, y)
x = op_fn(x, None, array_ops.ones([1, 0], dtype))
self.assertAllClose(x, y)
if __name__ == "__main__":
test_lib.main()
|
|
# -*- coding: utf-8 -*-
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module Homepage """
module_name = settings.modules[c].get("name_nice")
response.title = module_name
htable = s3db.hrm_human_resource
stable = s3db.fire_station
person_id = auth.s3_logged_in_person()
query = (htable.person_id == person_id) & \
(htable.site_id == stable.site_id)
row = db(query).select(stable.id,
stable.name,
limitby = (0, 1),
).first()
if row:
station_id = row.id
station_name = row.name
else:
station_id = None
station_name = None
incidents = DIV(A(DIV(T("Fire"),
_style = "background-color:red;",
_class = "question-container fleft",
),
_href = URL(c="event", f="incident_report",
args = ["create"],
vars = {"incident_type": "Fire"},
),
),
A(DIV(T("Rescue"),
_style = "background-color:green;",
_class = "question-container fleft"),
_href = URL(c="event", f="incident_report",
args = ["create"],
# Needs 'Rescue' adding to event_incident_type table
vars = {"incident_type": "Rescue"},
),
),
A(DIV(T("Hazmat"),
_style = "background-color:yellow;",
_class = "question-container fleft",
),
_href = URL(c="event", f="incident_report",
args = ["create"],
vars = {"incident_type": "Hazardous Material"},
),
))
return {"incidents": incidents,
"station_id": station_id,
"station_name": station_name,
"module_name": module_name,
}
# -----------------------------------------------------------------------------
def zone():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def zone_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def station():
""" Fire Station """
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
if r.interactive:
if r.component:
component_name = r.component_name
if component_name in ("asset", "vehicle"):
atable = s3db.asset_asset
# Stay within Site tab
s3db.configure("asset_asset",
create_next = None,
)
if component_name == "asset":
# Default/Hide the Organisation field
org_field = atable.organisation_id
org_field.default = r.record.organisation_id
org_field.readable = org_field.writable = False
# Filter out Vehicles
r.resource.add_component_filter(component_name, (FS("type") != 1))
else:
atable.organisation_id.required = False # Otherwise needs to be in crud_form & isn't defaulted
# Default new to Vehicle
atable.type.default = 1
# Only select from vehicles
ctable = s3db.supply_item_category
vehicle_categories = db(ctable.is_vehicle == True).select(ctable.id)
atable.item_id.requires.set_filter(filterby = "item_category_id",
filter_opts = [row.id for row in vehicle_categories],
)
# Include Vehicle Details in the form
from s3 import S3SQLCustomForm, S3SQLInlineComponent
def vehicle_postprocess(form):
# Set the organisation_id
db(atable.id == form.vars.id).update(organisation_id = r.record.organisation_id)
crud_form = S3SQLCustomForm("number",
(T("Vehicle Type"), "item_id"),
(T("License Plate"), "sn"),
"purchase_date",
"purchase_price",
"purchase_currency",
"cond",
S3SQLInlineComponent("vehicle",
label = "",
multiple = False,
fields = [#"vehicle_type_id",
"mileage",
"service_mileage",
"service_date",
"insurance_date",
],
),
postprocess = vehicle_postprocess,
)
s3db.configure("asset_asset",
crud_form = crud_form,
)
s3.crud_strings["asset_asset"] = Storage(label_create = T("Add Vehicle Details"),
title_display = T("Vehicle Details"),
title_list = T("Vehicles"),
title_update = T("Edit Vehicle Details"),
label_list_button = T("List Vehicle Details"),
label_delete_button = T("Delete Vehicle Details"),
msg_record_created = T("Vehicle Details added"),
msg_record_modified = T("Vehicle Details updated"),
msg_record_deleted = T("Vehicle Details deleted"),
msg_list_empty = T("No Vehicle Details currently defined"),
)
return True
s3.prep = prep
return s3_rest_controller(rheader = fire_rheader,
# CSV column headers, so no T()
csv_extra_fields = [{"label": "Country",
"field": s3db.gis_country_id(),
},
{"label": "Organisation",
"field": s3db.org_organisation_id(),
},
],
)
# -----------------------------------------------------------------------------
def person():
""" Person Controller for Ajax Requests """
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def fire_rheader(r, tabs=[]):
""" Resource headers for component views """
rheader = None
if r.representation == "html":
if r.name == "station":
station = r.record
if station:
tabs = [(T("Station Details"), None),
(T("Staff"), "human_resource"),
(T("Shifts"), "shift"),
# @ToDo:
#(T("Roster"), "shift_staff"),
(T("Vehicles"), "vehicle"),
(T("Vehicle Deployments"), "vehicle_report"),
(T("Assets"), "asset"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(rheader_tabs)
return rheader
# END =========================================================================
|
|
"""
kombu.transport.redis
=====================
Redis transport.
"""
from __future__ import absolute_import
import socket
from bisect import bisect
from contextlib import contextmanager
from time import time
from anyjson import loads, dumps
from kombu.exceptions import InconsistencyError, VersionMismatch
from kombu.five import Empty, values, string_t
from kombu.log import get_logger
from kombu.utils import cached_property, uuid
from kombu.utils.eventio import poll, READ, ERR
from kombu.utils.encoding import bytes_to_str
from kombu.utils.url import _parse_url
NO_ROUTE_ERROR = """
Cannot route message for exchange {0!r}: Table empty or key no longer exists.
Probably the key ({1!r}) has been removed from the Redis database.
"""
try:
from billiard.util import register_after_fork
except ImportError: # pragma: no cover
try:
from multiprocessing.util import register_after_fork # noqa
except ImportError:
def register_after_fork(*args, **kwargs): # noqa
pass
try:
import redis
except ImportError: # pragma: no cover
redis = None # noqa
from . import virtual
logger = get_logger('kombu.transport.redis')
crit, warn = logger.critical, logger.warn
DEFAULT_PORT = 6379
DEFAULT_DB = 0
PRIORITY_STEPS = [0, 3, 6, 9]
# This implementation may seem overly complex, but I assure you there is
# a good reason for doing it this way.
#
# Consuming from several connections enables us to emulate channels,
# which means we can have different service guarantees for individual
# channels.
#
# So we need to consume messages from multiple connections simultaneously,
# and using epoll means we don't have to do so using multiple threads.
#
# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout
# exchanges (broadcast), as an alternative to pushing messages to fanout-bound
# queues manually.
class MutexHeld(Exception):
pass
@contextmanager
def Mutex(client, name, expire):
lock_id = uuid()
i_won = client.setnx(name, lock_id)
try:
if i_won:
client.expire(name, expire)
yield
else:
if not client.ttl(name):
client.expire(name, expire)
raise MutexHeld()
finally:
if i_won:
pipe = client.pipeline(True)
try:
pipe.watch(name)
if pipe.get(name) == lock_id:
pipe.multi()
pipe.delete(name)
pipe.execute()
pipe.unwatch()
except redis.WatchError:
pass
class QoS(virtual.QoS):
restore_at_shutdown = True
def __init__(self, *args, **kwargs):
super(QoS, self).__init__(*args, **kwargs)
self._vrestore_count = 0
def append(self, message, delivery_tag):
delivery = message.delivery_info
EX, RK = delivery['exchange'], delivery['routing_key']
with self.pipe_or_acquire() as pipe:
pipe.zadd(self.unacked_index_key, delivery_tag, time()) \
.hset(self.unacked_key, delivery_tag,
dumps([message._raw, EX, RK])) \
.execute()
super(QoS, self).append(message, delivery_tag)
def restore_unacked(self):
for tag in self._delivered:
self.restore_by_tag(tag)
self._delivered.clear()
def ack(self, delivery_tag):
self._remove_from_indices(delivery_tag).execute()
super(QoS, self).ack(delivery_tag)
def reject(self, delivery_tag, requeue=False):
if requeue:
self.restore_by_tag(delivery_tag, leftmost=True)
self.ack(delivery_tag)
@contextmanager
def pipe_or_acquire(self, pipe=None):
if pipe:
yield pipe
else:
with self.channel.conn_or_acquire() as client:
yield client.pipeline()
def _remove_from_indices(self, delivery_tag, pipe=None):
with self.pipe_or_acquire(pipe) as pipe:
return pipe.zrem(self.unacked_index_key, delivery_tag) \
.hdel(self.unacked_key, delivery_tag)
def restore_visible(self, start=0, num=10, interval=10):
self._vrestore_count += 1
if (self._vrestore_count - 1) % interval:
return
with self.channel.conn_or_acquire() as client:
ceil = time() - self.visibility_timeout
try:
with Mutex(client, self.unacked_mutex_key,
self.unacked_mutex_expire):
visible = client.zrevrangebyscore(
self.unacked_index_key, ceil, 0,
start=num and start, num=num, withscores=True)
for tag, score in visible or []:
self.restore_by_tag(tag, client)
except MutexHeld:
pass
def restore_by_tag(self, tag, client=None, leftmost=False):
with self.channel.conn_or_acquire(client) as client:
p, _, _ = self._remove_from_indices(
tag, client.pipeline().hget(self.unacked_key, tag)).execute()
if p:
M, EX, RK = loads(bytes_to_str(p)) # json is unicode
self.channel._do_restore_message(M, EX, RK, client, leftmost)
@cached_property
def unacked_key(self):
return self.channel.unacked_key
@cached_property
def unacked_index_key(self):
return self.channel.unacked_index_key
@cached_property
def unacked_mutex_key(self):
return self.channel.unacked_mutex_key
@cached_property
def unacked_mutex_expire(self):
return self.channel.unacked_mutex_expire
@cached_property
def visibility_timeout(self):
return self.channel.visibility_timeout
class MultiChannelPoller(object):
eventflags = READ | ERR
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map.
self._fd_to_chan = {}
# channel -> socket map
self._chan_to_sock = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
def close(self):
for fd in values(self._chan_to_sock):
try:
self.poller.unregister(fd)
except (KeyError, ValueError):
pass
self._channels.clear()
self._fd_to_chan.clear()
self._chan_to_sock.clear()
self.poller = None
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
def _register(self, channel, client, type):
if (channel, client, type) in self._chan_to_sock:
self._unregister(channel, client, type)
if client.connection._sock is None: # not connected yet.
client.connection.connect()
sock = client.connection._sock
self._fd_to_chan[sock.fileno()] = (channel, type)
self._chan_to_sock[(channel, client, type)] = sock
self.poller.register(sock, self.eventflags)
def _unregister(self, channel, client, type):
self.poller.unregister(self._chan_to_sock[(channel, client, type)])
def _register_BRPOP(self, channel):
"""enable BRPOP mode for channel."""
ident = channel, channel.client, 'BRPOP'
if channel.client.connection._sock is None or \
ident not in self._chan_to_sock:
channel._in_poll = False
self._register(*ident)
if not channel._in_poll: # send BRPOP
channel._brpop_start()
def _register_LISTEN(self, channel):
"""enable LISTEN mode for channel."""
if channel.subclient.connection._sock is None:
channel._in_listen = False
self._register(channel, channel.subclient, 'LISTEN')
if not channel._in_listen:
channel._subscribe() # send SUBSCRIBE
def on_poll_start(self):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
def on_poll_init(self, poller):
self.poller = poller
for channel in self._channels:
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def maybe_restore_messages(self):
for channel in self._channels:
if channel.active_queues:
# only need to do this once, as they are not local to channel.
return channel.qos.restore_visible(
num=channel.unacked_restore_limit,
)
def on_readable(self, fileno):
chan, type = self._fd_to_chan[fileno]
if chan.qos.can_consume():
return chan.handlers[type]()
def handle_event(self, fileno, event):
if event & READ:
return self.on_readable(fileno), self
elif event & ERR:
chan, type = self._fd_to_chan[fileno]
chan._poll_error(type)
def get(self, timeout=None):
for channel in self._channels:
if channel.active_queues: # BRPOP mode?
if channel.qos.can_consume():
self._register_BRPOP(channel)
if channel.active_fanout_queues: # LISTEN mode?
self._register_LISTEN(channel)
events = self.poller.poll(timeout)
for fileno, event in events or []:
ret = self.handle_event(fileno, event)
if ret:
return ret
# - no new data, so try to restore messages.
# - reset active redis commands.
self.maybe_restore_messages()
raise Empty()
@property
def fds(self):
return self._fd_to_chan
class Channel(virtual.Channel):
QoS = QoS
_client = None
_subclient = None
supports_fanout = True
keyprefix_queue = '_kombu.binding.%s'
keyprefix_fanout = '/{db}.'
sep = '\x06\x16'
_in_poll = False
_in_listen = False
_fanout_queues = {}
ack_emulation = True
unacked_key = 'unacked'
unacked_index_key = 'unacked_index'
unacked_mutex_key = 'unacked_mutex'
unacked_mutex_expire = 300 # 5 minutes
unacked_restore_limit = None
visibility_timeout = 3600 # 1 hour
priority_steps = PRIORITY_STEPS
socket_timeout = None
max_connections = 10
#: Transport option to enable disable fanout keyprefix.
#: Should be enabled by default, but that is not
#: backwards compatible. Can also be string, in which
#: case it changes the default prefix ('/{db}.') into to something
#: else. The prefix must include a leading slash and a trailing dot.
fanout_prefix = False
_pool = None
from_transport_options = (
virtual.Channel.from_transport_options +
('ack_emulation',
'unacked_key',
'unacked_index_key',
'unacked_mutex_key',
'unacked_mutex_expire',
'visibility_timeout',
'unacked_restore_limit',
'fanout_prefix',
'socket_timeout',
'max_connections',
'priority_steps') # <-- do not add comma here!
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
if not self.ack_emulation: # disable visibility timeout
self.QoS = virtual.QoS
self._queue_cycle = []
self.Client = self._get_client()
self.ResponseError = self._get_response_error()
self.active_fanout_queues = set()
self.auto_delete_queues = set()
self._fanout_to_queue = {}
self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive}
if self.fanout_prefix:
if isinstance(self.fanout_prefix, string_t):
self.keyprefix_fanout = self.fanout_prefix
else:
# previous versions did not set a fanout, so cannot enable
# by default.
self.keyprefix_fanout = ''
# Evaluate connection.
try:
self.client.info()
except Exception:
if self._pool:
self._pool.disconnect()
raise
self.connection.cycle.add(self) # add to channel poller.
# copy errors, in case channel closed but threads still
# are still waiting for data.
self.connection_errors = self.connection.connection_errors
register_after_fork(self, self._after_fork)
def _after_fork(self):
if self._pool is not None:
self._pool.disconnect()
def _do_restore_message(self, payload, exchange, routing_key,
client=None, leftmost=False):
with self.conn_or_acquire(client) as client:
try:
try:
payload['headers']['redelivered'] = True
except KeyError:
pass
for queue in self._lookup(exchange, routing_key):
(client.lpush if leftmost else client.rpush)(
queue, dumps(payload),
)
except Exception:
crit('Could not restore message: %r', payload, exc_info=True)
def _restore(self, message, leftmost=False):
tag = message.delivery_tag
with self.conn_or_acquire() as client:
P, _ = client.pipeline() \
.hget(self.unacked_key, tag) \
.hdel(self.unacked_key, tag) \
.execute()
if P:
M, EX, RK = loads(bytes_to_str(P)) # json is unicode
self._do_restore_message(M, EX, RK, client, leftmost)
def _restore_at_beginning(self, message):
return self._restore(message, leftmost=True)
def _next_delivery_tag(self):
return uuid()
def basic_consume(self, queue, *args, **kwargs):
if queue in self._fanout_queues:
exchange = self._fanout_queues[queue]
self.active_fanout_queues.add(queue)
self._fanout_to_queue[exchange] = queue
ret = super(Channel, self).basic_consume(queue, *args, **kwargs)
self._update_cycle()
return ret
def basic_cancel(self, consumer_tag):
try:
queue = self._tag_to_queue[consumer_tag]
except KeyError:
return
try:
self.active_fanout_queues.discard(queue)
self._fanout_to_queue.pop(self._fanout_queues[queue])
except KeyError:
pass
ret = super(Channel, self).basic_cancel(consumer_tag)
self._update_cycle()
return ret
def _subscribe(self):
prefix = self.keyprefix_fanout
keys = [''.join([prefix, self._fanout_queues[queue]])
for queue in self.active_fanout_queues]
if not keys:
return
c = self.subclient
if c.connection._sock is None:
c.connection.connect()
self._in_listen = True
self.subclient.subscribe(keys)
def _handle_message(self, client, r):
if r[0] == 'unsubscribe' and r[2] == 0:
client.subscribed = False
elif r[0] == 'pmessage':
return {'type': r[0], 'pattern': r[1],
'channel': r[2], 'data': r[3]}
else:
return {'type': r[0], 'pattern': None,
'channel': r[1], 'data': r[2]}
def _receive(self):
c = self.subclient
response = None
try:
response = c.parse_response()
except self.connection_errors:
self._in_listen = False
raise Empty()
if response is not None:
payload = self._handle_message(c, response)
if bytes_to_str(payload['type']) == 'message':
channel = bytes_to_str(payload['channel'])
if payload['data']:
if channel[0] == '/':
_, _, channel = channel.partition('.')
try:
message = loads(bytes_to_str(payload['data']))
except (TypeError, ValueError):
warn('Cannot process event on channel %r: %r',
channel, payload, exc_info=1)
return message, self._fanout_to_queue[channel]
raise Empty()
def _brpop_start(self, timeout=1):
queues = self._consume_cycle()
if not queues:
return
keys = [self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS
for queue in queues] + [timeout or 0]
self._in_poll = True
self.client.connection.send_command('BRPOP', *keys)
def _brpop_read(self, **options):
try:
try:
dest__item = self.client.parse_response(self.client.connection,
'BRPOP',
**options)
except self.connection_errors:
# if there's a ConnectionError, disconnect so the next
# iteration will reconnect automatically.
self.client.connection.disconnect()
raise Empty()
if dest__item:
dest, item = dest__item
dest = bytes_to_str(dest).rsplit(self.sep, 1)[0]
self._rotate_cycle(dest)
return loads(bytes_to_str(item)), dest
else:
raise Empty()
finally:
self._in_poll = False
def _poll_error(self, type, **options):
try:
self.client.parse_response(type)
except self.connection_errors:
pass
def _get(self, queue):
with self.conn_or_acquire() as client:
for pri in PRIORITY_STEPS:
item = client.rpop(self._q_for_pri(queue, pri))
if item:
return loads(bytes_to_str(item))
raise Empty()
def _size(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.llen(self._q_for_pri(queue, pri))
sizes = cmds.execute()
return sum(size for size in sizes if isinstance(size, int))
def _q_for_pri(self, queue, pri):
pri = self.priority(pri)
return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', ''))
def priority(self, n):
steps = self.priority_steps
return steps[bisect(steps, n) - 1]
def _put(self, queue, message, **kwargs):
"""Deliver message."""
try:
pri = max(min(int(
message['properties']['delivery_info']['priority']), 9), 0)
except (TypeError, ValueError, KeyError):
pri = 0
with self.conn_or_acquire() as client:
client.lpush(self._q_for_pri(queue, pri), dumps(message))
def _put_fanout(self, exchange, message, **kwargs):
"""Deliver fanout message."""
with self.conn_or_acquire() as client:
client.publish(
''.join([self.keyprefix_fanout, exchange]), dumps(message),
)
def _new_queue(self, queue, auto_delete=False, **kwargs):
if auto_delete:
self.auto_delete_queues.add(queue)
def _queue_bind(self, exchange, routing_key, pattern, queue):
if self.typeof(exchange).type == 'fanout':
# Mark exchange as fanout.
self._fanout_queues[queue] = exchange
with self.conn_or_acquire() as client:
client.sadd(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
def _delete(self, queue, exchange, routing_key, pattern, *args):
self.auto_delete_queues.discard(queue)
with self.conn_or_acquire() as client:
client.srem(self.keyprefix_queue % (exchange, ),
self.sep.join([routing_key or '',
pattern or '',
queue or '']))
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.delete(self._q_for_pri(queue, pri))
cmds.execute()
def _has_queue(self, queue, **kwargs):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
cmds = cmds.exists(self._q_for_pri(queue, pri))
return any(cmds.execute())
def get_table(self, exchange):
key = self.keyprefix_queue % exchange
with self.conn_or_acquire() as client:
values = client.smembers(key)
if not values:
raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key))
return [tuple(bytes_to_str(val).split(self.sep)) for val in values]
def _purge(self, queue):
with self.conn_or_acquire() as client:
cmds = client.pipeline()
for pri in PRIORITY_STEPS:
priq = self._q_for_pri(queue, pri)
cmds = cmds.llen(priq).delete(priq)
sizes = cmds.execute()
return sum(sizes[::2])
def close(self):
if self._pool:
self._pool.disconnect()
if not self.closed:
# remove from channel poller.
self.connection.cycle.discard(self)
# delete fanout bindings
for queue in self._fanout_queues:
if queue in self.auto_delete_queues:
self.queue_delete(queue)
# Close connections
for attr in 'client', 'subclient':
try:
self.__dict__[attr].connection.disconnect()
except (KeyError, AttributeError, self.ResponseError):
pass
super(Channel, self).close()
def _prepare_virtual_host(self, vhost):
if not isinstance(vhost, int):
if not vhost or vhost == '/':
vhost = DEFAULT_DB
elif vhost.startswith('/'):
vhost = vhost[1:]
try:
vhost = int(vhost)
except ValueError:
raise ValueError(
'Database is int between 0 and limit - 1, not {0}'.format(
vhost,
))
return vhost
def _connparams(self):
conninfo = self.connection.client
connparams = {'host': conninfo.hostname or '127.0.0.1',
'port': conninfo.port or DEFAULT_PORT,
'virtual_host': conninfo.virtual_host,
'password': conninfo.password,
'max_connections': self.max_connections,
'socket_timeout': self.socket_timeout}
host = connparams['host']
if '://' in host:
scheme, _, _, _, _, path, query = _parse_url(host)
if scheme == 'socket':
connparams.update({
'connection_class': redis.UnixDomainSocketConnection,
'path': '/' + path}, **query)
connparams.pop('host', None)
connparams.pop('port', None)
connparams['db'] = self._prepare_virtual_host(
connparams.pop('virtual_host', None))
return connparams
def _create_client(self):
return self.Client(connection_pool=self.pool)
def _get_pool(self):
params = self._connparams()
self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db'])
return redis.ConnectionPool(**params)
def _get_client(self):
if redis.VERSION < (2, 4, 4):
raise VersionMismatch(
'Redis transport requires redis-py versions 2.4.4 or later. '
'You have {0.__version__}'.format(redis))
# KombuRedis maintains a connection attribute on it's instance and
# uses that when executing commands
# This was added after redis-py was changed.
class KombuRedis(redis.Redis): # pragma: no cover
def __init__(self, *args, **kwargs):
super(KombuRedis, self).__init__(*args, **kwargs)
self.connection = self.connection_pool.get_connection('_')
return KombuRedis
@contextmanager
def conn_or_acquire(self, client=None):
if client:
yield client
else:
if self._in_poll:
client = self._create_client()
try:
yield client
finally:
self.pool.release(client.connection)
else:
yield self.client
@property
def pool(self):
if self._pool is None:
self._pool = self._get_pool()
return self._pool
@cached_property
def client(self):
"""Client used to publish messages, BRPOP etc."""
return self._create_client()
@cached_property
def subclient(self):
"""Pub/Sub connection used to consume fanout queues."""
client = self._create_client()
pubsub = client.pubsub()
pool = pubsub.connection_pool
pubsub.connection = pool.get_connection('pubsub', pubsub.shard_hint)
return pubsub
def _update_cycle(self):
"""Update fair cycle between queues.
We cycle between queues fairly to make sure that
each queue is equally likely to be consumed from,
so that a very busy queue will not block others.
This works by using Redis's `BRPOP` command and
by rotating the most recently used queue to the
and of the list. See Kombu github issue #166 for
more discussion of this method.
"""
self._queue_cycle = list(self.active_queues)
def _consume_cycle(self):
"""Get a fresh list of queues from the queue cycle."""
active = len(self.active_queues)
return self._queue_cycle[0:active]
def _rotate_cycle(self, used):
"""Move most recently used queue to end of list."""
cycle = self._queue_cycle
try:
cycle.append(cycle.pop(cycle.index(used)))
except ValueError:
pass
def _get_response_error(self):
from redis import exceptions
return exceptions.ResponseError
@property
def active_queues(self):
"""Set of queues being consumed from (excluding fanout queues)."""
return set(queue for queue in self._active_queues
if queue not in self.active_fanout_queues)
class Transport(virtual.Transport):
Channel = Channel
polling_interval = None # disable sleep between unsuccessful polls.
default_port = DEFAULT_PORT
supports_ev = True
driver_type = 'redis'
driver_name = 'redis'
def __init__(self, *args, **kwargs):
super(Transport, self).__init__(*args, **kwargs)
# Get redis-py exceptions.
self.connection_errors, self.channel_errors = self._get_errors()
# All channels share the same poller.
self.cycle = MultiChannelPoller()
def driver_version(self):
return redis.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.on_poll_init(loop.poller)
cycle_poll_start = cycle.on_poll_start
add_reader = loop.add_reader
on_readable = self.on_readable
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
loop.call_repeatedly(10, cycle.maybe_restore_messages)
def on_readable(self, fileno):
"""Handle AIO event for one of our file descriptors."""
item = self.cycle.on_readable(fileno)
if item:
message, queue = item
if not queue or queue not in self._callbacks:
raise KeyError(
'Message for queue {0!r} without consumers: {1}'.format(
queue, message))
self._callbacks[queue](message)
def _get_errors(self):
"""Utility to import redis-py's exceptions at runtime."""
from redis import exceptions
# This exception suddenly changed name between redis-py versions
if hasattr(exceptions, 'InvalidData'):
DataError = exceptions.InvalidData
else:
DataError = exceptions.DataError
return (
(virtual.Transport.connection_errors + (
InconsistencyError,
socket.error,
IOError,
OSError,
exceptions.ConnectionError,
exceptions.AuthenticationError)),
(virtual.Transport.channel_errors + (
DataError,
exceptions.InvalidResponse,
exceptions.ResponseError)),
)
|
|
#! /usr/bin/env python
#
# Copyright (C) 2007-2009 Rich Lewis <rl403@cam.ac.uk>
# License: 3-clause BSD
"""
## skchem.descriptors.fingerprints
Fingerprinting classes and associated functions are defined.
"""
import pandas as pd
from rdkit.Chem import GetDistanceMatrix
from rdkit.DataStructs import ConvertToNumpyArray
from rdkit.Chem.rdMolDescriptors import (
GetMorganFingerprint,
GetHashedMorganFingerprint,
GetMorganFingerprintAsBitVect,
GetAtomPairFingerprint,
GetHashedAtomPairFingerprint,
GetHashedAtomPairFingerprintAsBitVect,
GetTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprint,
GetHashedTopologicalTorsionFingerprintAsBitVect,
GetMACCSKeysFingerprint,
GetFeatureInvariants,
GetConnectivityInvariants)
from rdkit.Chem.rdReducedGraphs import GetErGFingerprint
from rdkit.Chem.rdmolops import RDKFingerprint
import numpy as np
from ..base import Transformer, Featurizer
class MorganFeaturizer(Transformer, Featurizer):
""" Morgan fingerprints, implemented by RDKit.
Notes:
Currently, folded bits are by far the fastest implementation.
Due to the speed of calculation, it is unlikely to see a speedup using
the current parallel code, as more time is spent moving data across
processes than for calculating in a single process.
Examples:
>>> import skchem
>>> import pandas as pd
>>> pd.options.display.max_rows = pd.options.display.max_columns = 5
>>> mf = skchem.features.MorganFeaturizer()
>>> m = skchem.Mol.from_smiles('CCC')
Can transform an individual molecule to yield a Series:
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
2046 0
2047 0
Name: MorganFeaturizer, dtype: uint8
Can transform a list of molecules to yield a DataFrame:
>>> mf.transform([m])
morgan_fp_idx 0 1 ... 2046 2047
0 0 0 ... 0 0
<BLANKLINE>
[1 rows x 2048 columns]
Change the number of features the fingerprint is folded down to using
`n_feats`.
>>> mf.n_feats = 1024
>>> mf.transform(m)
morgan_fp_idx
0 0
1 0
..
1022 0
1023 0
Name: MorganFeaturizer, dtype: uint8
Count fingerprints with `as_bits` = False
>>> mf.as_bits = False
>>> res = mf.transform(m); res[res > 0]
morgan_fp_idx
33 2
80 1
294 2
320 1
Name: MorganFeaturizer, dtype: int64
Pseudo-gradient with `grad` shows which atoms contributed to which
feature.
>>> mf.grad(m)[res > 0]
atom_idx 0 1 2
features
33 1 0 1
80 0 1 0
294 1 2 1
320 1 1 1
"""
def __init__(self, radius=2, n_feats=2048, as_bits=True,
use_features=False, use_bond_types=True, use_chirality=False,
n_jobs=1, verbose=True):
""" Initialize the fingerprinter object.
Args:
radius (int):
The maximum radius for atom environments.
Default is `2`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `True`.
use_features (bool):
Whether to use map atom types to generic features (FCFP).
Default is `False`.
use_bond_types (bool):
Whether to use bond types to differentiate environments.
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
n_jobs (int):
The number of processes to run the featurizer in.
verbose (bool):
Whether to output a progress bar.
"""
super(MorganFeaturizer, self).__init__(n_jobs=n_jobs, verbose=verbose)
self.radius = radius
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_features = use_features
self.use_bond_types = use_bond_types
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use `transform` for the public method, which genericizes the argument
to iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetMorganFingerprintAsBitVect(
mol, self.radius, nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetMorganFingerprint(
mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedMorganFingerprint(
mol, self.radius, nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'morg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='morgan_fp_idx')
def grad(self, mol):
""" Calculate the pseudo gradient with respect to the atoms.
The pseudo gradient is the number of times the atom set that particular
bit.
Args:
mol (skchem.Mol):
The molecule for which to calculate the pseudo gradient.
Returns:
pandas.DataFrame:
Dataframe of pseudogradients, with columns corresponding to
atoms, and rows corresponding to features of the fingerprint.
"""
cols = pd.Index(list(range(len(mol.atoms))), name='atom_idx')
dist = GetDistanceMatrix(mol)
info = {}
if self.n_feats < 0:
res = GetMorganFingerprint(mol, self.radius,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info).GetNonzeroElements()
idx_list = list(res.keys())
idx = pd.Index(idx_list, name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[idx_list.index(bit)] += (dist <= radius)[atom_idx]
else:
GetHashedMorganFingerprint(mol, self.radius, nBits=self.n_feats,
useFeatures=self.use_features,
useBondTypes=self.use_bond_types,
useChirality=self.use_chirality,
bitInfo=info)
idx = pd.Index(range(self.n_feats), name='features')
grad = np.zeros((len(idx), len(cols)))
for bit in info:
for atom_idx, radius in info[bit]:
grad[bit] += (dist <= radius)[atom_idx]
grad = pd.DataFrame(grad, index=idx, columns=cols)
if self.as_bits:
grad = (grad > 0)
return grad.astype(int)
class AtomPairFeaturizer(Transformer, Featurizer):
""" Atom Pair Fingerprints, implemented by RDKit. """
def __init__(self, min_length=1, max_length=30, n_feats=2048,
as_bits=False, use_chirality=False, n_jobs=1, verbose=True):
""" Instantiate an atom pair fingerprinter.
Args:
min_length (int):
The minimum length of paths between pairs.
Default is `1`, i.e. pairs can be bonded together.
max_length (int):
The maximum length of paths between pairs.
Default is `30`.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
n_jobs (int):
The number of processes to run the featurizer in.
verbose (bool):
Whether to output a progress bar.
"""
super(AtomPairFeaturizer, self).__init__(n_jobs=n_jobs,
verbose=verbose)
self.min_length = min_length
self.max_length = max_length
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
def _transform_mol(self, mol):
"""Private method to transform a skchem molecule.
Use transform` for the public method, which genericizes the argument to
iterables of mols.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedAtomPairFingerprintAsBitVect(
mol, nBits=self.n_feats, minLength=self.min_length,
maxLength=self.max_length, includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetAtomPairFingerprint(
mol, nBits=self.n_feats, minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedAtomPairFingerprint(
mol, nBits=self.n_feats, minLength=self.min_length,
maxLength=self.max_length,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def name(self):
return 'atom_pair'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='ap_fp_idx')
class TopologicalTorsionFeaturizer(Transformer, Featurizer):
""" Topological Torsion fingerprints, implemented by RDKit. """
def __init__(self, target_size=4, n_feats=2048, as_bits=False,
use_chirality=False, n_jobs=1, verbose=True):
""" Initialize a TopologicalTorsionFeaturizer object.
Args:
target_size (int):
# TODO
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
Default is `2048`.
as_bits (bool):
Whether to return bits (`True`) or counts (`False`).
Default is `False`.
use_chirality (bool):
Whether to use chirality to differentiate environments.
Default is `False`.
n_jobs (int):
The number of processes to run the featurizer in.
verbose (bool):
Whether to output a progress bar.
"""
self.target_size = target_size
self.n_feats = n_feats
self.sparse = self.n_feats < 0
self.as_bits = as_bits
self.use_chirality = use_chirality
super(TopologicalTorsionFeaturizer, self).__init__(n_jobs=n_jobs,
verbose=verbose)
def _transform_mol(self, mol):
""" Private method to transform a skchem molecule.
Args:
mol (skchem.Mol): Molecule to calculate fingerprint for.
Returns:
np.array or dict:
Fingerprint as an array (or a dict if sparse).
"""
if self.as_bits and self.n_feats > 0:
fp = GetHashedTopologicalTorsionFingerprintAsBitVect(
mol, nBits=self.n_feats, targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(0)
ConvertToNumpyArray(fp, res)
res = res.astype(np.uint8)
else:
if self.n_feats <= 0:
res = GetTopologicalTorsionFingerprint(
mol, nBits=self.n_feats, targetSize=self.target_size,
includeChirality=self.use_chirality)
res = res.GetNonzeroElements()
if self.as_bits:
res = {k: int(v > 0) for k, v in res.items()}
else:
res = GetHashedTopologicalTorsionFingerprint(
mol, nBits=self.n_feats, targetSize=self.target_size,
includeChirality=self.use_chirality)
res = np.array(list(res))
return res
@property
def names(self):
return 'top_tort'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='tt_fp_idx')
class MACCSFeaturizer(Transformer, Featurizer):
""" MACCS Keys Fingerprints."""
def __init__(self, n_jobs=1, verbose=True):
""" Initialize a MACCS Featurizer.
Args:
n_jobs (int):
The number of processes to run the featurizer in.
verbose (bool):
Whether to output a progress bar.
"""
super(MACCSFeaturizer, self).__init__(n_jobs=n_jobs, verbose=verbose)
self.n_feats = 166
def _transform_mol(self, mol):
return np.array(list(GetMACCSKeysFingerprint(mol)))[1:]
@property
def name(self):
return 'maccs'
@property
def columns(self):
return pd.Index(
['ISOTOPE', '103 < ATOMIC NO. < 256',
'GROUP IVA,VA,VIA PERIODS 4-6 (Ge...)', 'ACTINIDE',
'GROUP IIIB,IVB (Sc...)', 'LANTHANIDE',
'GROUP VB,VIB,VIIB (V...)', 'QAAA@1', 'GROUP VIII (Fe...)',
'GROUP IIA (ALKALINE EARTH)', '4M RING', 'GROUP IB,IIB (Cu...)',
'ON(C)C', 'S-S', 'OC(O)O', 'QAA@1', 'CTC',
'GROUP IIIA (B...)', '7M RING', 'SI', 'C=C(Q)Q', '3M RING',
'NC(O)O', 'N-O', 'NC(N)N', 'C$=C($A)$A', 'I',
'QCH2Q', 'P', 'CQ(C)(C)A', 'QX', 'CSN', 'NS', 'CH2=A',
'GROUP IA (ALKALI METAL)', 'S HETEROCYCLE',
'NC(O)N', 'NC(C)N', 'OS(O)O', 'S-O', 'CTN', 'F', 'QHAQH', 'OTHER',
'C=CN', 'BR', 'SAN', 'OQ(O)O', 'CHARGE',
'C=C(C)C', 'CSO', 'NN', 'QHAAAQH', 'QHAAQH', 'OSO', 'ON(O)C',
'O HETEROCYCLE', 'QSQ', 'Snot%A%A', 'S=O',
'AS(A)A', 'A$A!A$A', 'N=O', 'A$A!S', 'C%N', 'CC(C)(C)A', 'QS',
'QHQH (&...)', 'QQH', 'QNQ', 'NO', 'OAAO',
'S=A', 'CH3ACH3', 'A!N$A', 'C=C(A)A', 'NAN', 'C=N', 'NAAN',
'NAAAN', 'SA(A)A', 'ACH2QH', 'QAAAA@1', 'NH2',
'CN(C)C', 'CH2QCH2', 'X!A$A', 'S', 'OAAAO', 'QHAACH2A',
'QHAAACH2A', 'OC(N)C', 'QCH3', 'QN', 'NAAO',
'5M RING', 'NAAAO', 'QAAAAA@1', 'C=C', 'ACH2N', '8M RING', 'QO',
'CL', 'QHACH2A', 'A$A($A)$A', 'QA(Q)Q',
'XA(A)A', 'CH3AAACH2A', 'ACH2O', 'NCO', 'NACH2A', 'AA(A)(A)A',
'Onot%A%A', 'CH3CH2A', 'CH3ACH2A',
'CH3AACH2A', 'NAO', 'ACH2CH2A > 1', 'N=A',
'HETEROCYCLIC ATOM > 1 (&...)', 'N HETEROCYCLE', 'AN(A)A',
'OCO', 'QQ', 'AROMATIC RING > 1', 'A!O!A', 'A$A!O > 1 (&...)',
'ACH2AAACH2A', 'ACH2AACH2A',
'QQ > 1 (&...)', 'QH > 1', 'OACH2A', 'A$A!N', 'X (HALOGEN)',
'Nnot%A%A', 'O=A > 1', 'HETEROCYCLE',
'QCH2A > 1 (&...)', 'OH', 'O > 3 (&...)', 'CH3 > 2 (&...)',
'N > 1', 'A$A!O', 'Anot%A%Anot%A',
'6M RING > 1', 'O > 2', 'ACH2CH2A', 'AQ(A)A', 'CH3 > 1',
'A!A$A!A', 'NH', 'OC(C)C', 'QCH2A', 'C=O',
'A!CH2!A', 'NA(A)A', 'C-O', 'C-N', 'O > 1', 'CH3', 'N',
'AROMATIC', '6M RING', 'O', 'RING', 'FRAGMENTS'],
name='maccs_idx')
class ErGFeaturizer(Transformer, Featurizer):
""" Extended Reduced Graph Fingerprints.
Implemented in RDKit."""
def __init__(self, atom_types=0, fuzz_increment=0.3, min_path=1,
max_path=15, n_jobs=1, verbose=True):
""" Initialize an ErGFeaturizer object.
# TODO complete docstring
Args:
atom_types (AtomPairsParameters):
The atom types to use.
fuzz_increment (float):
The fuzz increment.
min_path (int):
The minimum path.
max_path (int):
The maximum path.
n_jobs (int):
The number of processes to run the featurizer in.
verbose (bool):
Whether to output a progress bar.
"""
super(ErGFeaturizer, self).__init__(n_jobs=n_jobs, verbose=verbose)
self.atom_types = atom_types
self.fuzz_increment = fuzz_increment
self.min_path = min_path
self.max_path = max_path
self.n_feats = 315
def _transform_mol(self, mol):
return np.array(GetErGFingerprint(mol))
@property
def name(self):
return 'erg'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='erg_fp_idx')
class FeatureInvariantsFeaturizer(Transformer, Featurizer):
""" Feature invariants fingerprints. """
def __init__(self, n_jobs=1, verbose=True):
""" Initialize a FeatureInvariantsFeaturizer.
Args:
verbose (bool):
Whether to output a progress bar.
"""
super(FeatureInvariantsFeaturizer, self).__init__(n_jobs=n_jobs,
verbose=verbose)
raise NotImplementedError
def _transform_mol(self, mol):
return np.array(GetFeatureInvariants(mol))
@property
def name(self):
return 'feat_inv'
@property
def columns(self):
return None
class ConnectivityInvariantsFeaturizer(Transformer, Featurizer):
""" Connectivity invariants fingerprints """
def __init__(self, include_ring_membership=True, n_jobs=1,
verbose=True):
""" Initialize a ConnectivityInvariantsFeaturizer.
Args:
include_ring_membership (bool):
Whether ring membership is considered when generating the
invariants.
n_jobs (int):
The number of processes to run the featurizer in.
verbose (bool):
Whether to output a progress bar.
"""
super(ConnectivityInvariantsFeaturizer, self).__init__(self,
n_jobs=n_jobs,
verbose=verbose)
self.include_ring_membership = include_ring_membership
raise NotImplementedError # this is a sparse descriptor
def _transform_mol(self, mol):
return np.array(GetConnectivityInvariants(mol))
@property
def name(self):
return 'conn_inv'
@property
def columns(self):
return None
class RDKFeaturizer(Transformer, Featurizer):
""" RDKit fingerprint """
def __init__(self, min_path=1, max_path=7, n_feats=2048, n_bits_per_hash=2,
use_hs=True, target_density=0.0, min_size=128,
branched_paths=True, use_bond_types=True, n_jobs=1,
verbose=True):
""" RDK fingerprints
Args:
min_path (int):
minimum number of bonds to include in the subgraphs.
max_path (int):
maximum number of bonds to include in the subgraphs.
n_feats (int):
The number of features to which to fold the fingerprint down.
For unfolded, use `-1`.
n_bits_per_hash (int)
number of bits to set per path.
use_hs (bool):
include paths involving Hs in the fingerprint if the molecule
has explicit Hs.
target_density (float):
fold the fingerprint until this minimum density has been
reached.
min_size (int):
the minimum size the fingerprint will be folded to when trying
to reach tgtDensity.
branched_paths (bool):
if set both branched and unbranched paths will be used in the
fingerprint.
use_bond_types (bool):
if set both bond orders will be used in the path hashes.
n_jobs (int):
The number of processes to run the featurizer in.
verbose (bool):
Whether to output a progress bar.
"""
super(RDKFeaturizer, self).__init__(n_jobs=n_jobs, verbose=verbose)
self.min_path = min_path
self.max_path = max_path
self.n_feats = n_feats
self.n_bits_per_hash = n_bits_per_hash
self.use_hs = use_hs
self.target_density = target_density
self.min_size = min_size
self.branched_paths = branched_paths
self.use_bond_types = use_bond_types
def _transform_mol(self, mol):
return np.array(list(RDKFingerprint(mol, minPath=self.min_path,
maxPath=self.max_path,
fpSize=self.n_feats,
nBitsPerHash=self.n_bits_per_hash,
useHs=self.use_hs,
tgtDensity=self.target_density,
minSize=self.min_size,
branchedPaths=self.branched_paths,
useBondOrder=self.use_bond_types)))
@property
def name(self):
return 'rdkfp'
@property
def columns(self):
return pd.RangeIndex(self.n_feats, name='rdk_fp_idx')
|
|
"""Module containing class `MetadataImporter`."""
from collections import defaultdict
import datetime
import logging
from django.db import transaction
from vesper.command.command import CommandSyntaxError
from vesper.django.app.models import (
AnnotationConstraint, AnnotationInfo, Device, DeviceConnection,
DeviceInput, DeviceModel, DeviceModelInput, DeviceModelOutput,
DeviceOutput, Job, Processor, Station, StationDevice, TagInfo)
import vesper.command.command_utils as command_utils
import vesper.util.time_utils as time_utils
import vesper.util.yaml_utils as yaml_utils
# TODO: Allow omission of device name, in which case it is automatically
# generated by concatenating model name and serial number.
# TODO: Allow omission of connections when there is exactly one recorder,
# only single-output microphones, and no more microphones than recorder
# inputs, in which case microphones are connected in the order they
# appear in the device list to recorder inputs. The period of the
# connections is the period of association of the devices with the
# station.
# TODO: Allow specification of station devices with stations, e.g.:
#
# stations:
#
# - name: Ithaca
# description: >
# Imaginary recording station in Ithaca, NY, USA.
# The location given for the station is within Cayuga Lake
# to emphasize that the station is not real!
# time_zone: US/Eastern
# latitude: 42.473168
# longitude: -76.516825
# elevation: 120
# devices:
# - name: Swift
# start_time: 2018-01-01
# end_time: 2019-01-01
#
# If we support infinite connection intervals, perhaps you could even
# say:
#
# devices: [Swift, 21c]
# TODO: Allow compact specification of multiple devices in metadata YAML,
# e.g.:
#
# - name_prefix: Swift
# model: Swift
# serial_numbers: [0, 1, 2, 3]
#
# to specify devices "Swift 0", "Swift 1", "Swift 2", and "Swift 3".
#
# Associated possible syntactic sugar for sets of numeric serial numbers
# might include items like:
#
# name_format: "Swift {n:02d}"
#
# where `n` is a numeric serial number (perhaps we should check the
# format specification, constraining it to ensure safety against
# injection attacks), and:
#
# serial_number_range: [0, 3]
#
# to specify a range of numeric serial numbers.
# TODO: Allow specification of input or output number for devices
# that have just one input or output, e.g. "Swift Input 0", instead
# of requiring that input or output number be omitted, e.g.
# "Swift Input".
# TODO: Allow omission of device connection start and/or end times
# to indicate infinite time intervals.
# TODO: If it isn't specified, infer time zone from location using
# `timezonefinder` (or some such) Python module. See
# https://stackoverflow.com/questions/16086962/
# how-to-get-a-time-zone-from-a-location-using-latitude-and-longitude-coordinates
class MetadataImporter:
"""
Importer for metadata including stations, devices, etc.
The data to be archived are in the `metadata` command argument.
The value of the argument is a mapping from string keys like `'stations'`
and `'devices'` to collections of mappings, with each mapping in the
collection describing the fields of one archive object.
"""
extension_name = 'Metadata Importer'
def __init__(self, args):
self.metadata = command_utils.get_required_arg('metadata', args)
def execute(self, job_info):
self._logger = logging.getLogger()
try:
with transaction.atomic():
self._add_stations()
self._add_device_models()
self._add_devices()
self._add_station_devices()
self._add_detectors()
self._add_classifiers()
self._add_annotation_constraints(job_info)
self._add_annotations(job_info)
self._add_tags(job_info)
except Exception:
self._logger.error(
'Metadata import failed with an exception. Database '
'has been restored to its state before the import. See '
'below for exception traceback.')
raise
return True
def _add_stations(self):
stations_data = self.metadata.get('stations')
if stations_data is not None:
for data in stations_data:
name = _get_required(data, 'name', 'station')
self._logger.info('Adding station "{}"...'.format(name))
description = data.get('description', '')
latitude = _get_required(data, 'latitude', 'station')
longitude = _get_required(data, 'longitude', 'station')
elevation = _get_required(data, 'elevation', 'station')
time_zone = _get_required(data, 'time_zone', 'station')
Station.objects.create(
name=name,
description=description,
latitude=latitude,
longitude=longitude,
elevation=elevation,
time_zone=time_zone)
def _add_device_models(self):
device_models_data = self.metadata.get('device_models')
if device_models_data is not None:
for data in device_models_data:
model = self._add_device_model(data)
self._add_ports(model, data, 'input', DeviceModelInput)
self._add_ports(model, data, 'output', DeviceModelOutput)
def _add_device_model(self, data):
name = _get_required(data, 'name', 'device model')
self._logger.info('Adding device model "{}"...'.format(name))
type_ = _get_required(data, 'type', 'device model')
manufacturer = _get_required(data, 'manufacturer', 'device model')
model = _get_required(data, 'model', 'device model')
description = data.get('description', '')
model = DeviceModel.objects.create(
name=name,
type=type_,
manufacturer=manufacturer,
model=model,
description=description
)
return model
def _add_ports(self, model, data, port_type, port_class):
port_data = self._get_port_data(data, port_type)
for local_name, channel_num in port_data:
self._logger.info(
'Adding device model "{}" {} "{}"...'.format(
model.name, port_type, local_name))
port_class.objects.create(
model=model,
local_name=local_name,
channel_num=channel_num)
def _get_port_data(self, data, port_type):
names = data.get(port_type + 's')
if names is None:
key = 'num_{}s'.format(port_type)
num_ports = data.get(key, 0)
if num_ports == 0:
names = []
elif num_ports == 1:
names = [port_type.capitalize()]
else:
names = ['{} {}'.format(port_type.capitalize(), i)
for i in range(num_ports)]
return [(name, i) for i, name in enumerate(names)]
def _add_devices(self):
devices_data = self.metadata.get('devices')
if devices_data is not None:
models = _create_objects_dict(DeviceModel)
for data in devices_data:
device = self._add_device(data, models)
self._add_device_inputs(device)
self._add_device_outputs(device)
def _add_device(self, data, models):
name = _get_required(data, 'name', 'device')
self._logger.info('Adding device "{}"...'.format(name))
model = self._get_device_model(data, models)
serial_number = _get_required(data, 'serial_number', 'device')
description = data.get('description', '')
return Device.objects.create(
name=name,
model=model,
serial_number=serial_number,
description=description)
def _get_device_model(self, data, models):
name = _get_required(data, 'model', 'device')
try:
return models[name]
except KeyError:
raise CommandSyntaxError(
'Unrecognized device model name "{}".'.format(name))
def _add_device_inputs(self, device):
for model_input in device.model.inputs.all():
self._logger.info(
'Adding device "{}" input "{}"...'.format(
device.name, model_input.local_name))
DeviceInput.objects.create(
device=device,
model_input=model_input)
def _add_device_outputs(self, device):
for model_output in device.model.outputs.all():
self._logger.info(
'Adding device "{}" output "{}"...'.format(
device.name, model_output.local_name))
DeviceOutput.objects.create(
device=device,
model_output=model_output)
def _add_station_devices(self):
station_devices_data = self.metadata.get('station_devices')
if station_devices_data is not None:
devices = _create_objects_dict(Device)
inputs = _create_objects_dict(DeviceInput)
outputs = _create_objects_dict(DeviceOutput)
for data in station_devices_data:
station = self._get_station(data)
data_name = 'station devices array'
start_time = self._get_time(
data, 'start_time', station, data_name)
end_time = self._get_time(
data, 'end_time', station, data_name)
device_names = _get_required(data, 'devices', data_name)
station_devices = []
for name in device_names:
device = self._get_device(name, devices)
self._add_station_device(
station, device, start_time, end_time)
station_devices.append(device)
shorthand_inputs, shorthand_outputs = \
_get_shorthand_ports(station_devices)
connections = _get_required(data, 'connections', data_name)
for connection in connections:
output = self._get_port(
connection, 'output', shorthand_outputs, outputs)
input_ = self._get_port(
connection, 'input', shorthand_inputs, inputs)
self._add_connection(
station, output, input_, start_time, end_time)
def _get_station(self, data):
name = _get_required(data, 'station', 'station devices item')
try:
return Station.objects.get(name=name)
except Station.DoesNotExist:
raise CommandSyntaxError('Unrecognized station "{}".'.format(name))
def _get_time(self, data, key, station, data_name):
dt = _get_required(data, key, data_name)
if isinstance(dt, datetime.date):
dt = datetime.datetime(dt.year, dt.month, dt.day)
return station.local_to_utc(dt)
def _get_device(self, name, devices):
try:
return devices[name]
except KeyError:
raise CommandSyntaxError('Unrecognized device "{}".'.format(name))
def _add_station_device(self, station, device, start_time, end_time):
self._logger.info(
'Adding station "{}" device "{}" from {} to {}"...'.format(
station.name, device.name, str(start_time), str(end_time)))
StationDevice.objects.create(
station=station,
device=device,
start_time=start_time,
end_time=end_time)
def _get_port(self, connection, port_type, shorthand_ports, ports):
name = _get_required(connection, port_type, 'device connection')
port = shorthand_ports.get(name)
if port is None:
port = ports.get(name)
if port is None:
raise CommandSyntaxError(
'Unrecognized device {} "{}".'.format(port_type, name))
else:
return port
def _add_connection(self, station, output, input_, start_time, end_time):
self._logger.info((
'Adding station "{}" device connection "{} -> {} '
'from {} to {}"...').format(
station.name, output.name, input_.name,
str(start_time), str(end_time)))
DeviceConnection.objects.create(
output=output,
input=input_,
start_time=start_time,
end_time=end_time)
def _add_detectors(self):
self._add_processors('detectors', 'detector', 'Detector')
def _add_processors(self, data_key, log_type_name, db_type_name):
processors_data = self.metadata.get(data_key)
if processors_data is not None:
for data in processors_data:
name = _get_required(data, 'name', log_type_name)
self._logger.info(
'Adding {} "{}"...'.format(log_type_name, name))
description = data.get('description', '')
Processor.objects.create(
name=name,
type=db_type_name,
description=description)
def _add_classifiers(self):
self._add_processors('classifiers', 'classifier', 'Classifier')
def _add_annotation_constraints(self, job_info):
constraints_data = self.metadata.get('annotation_constraints')
if constraints_data is not None:
for data in constraints_data:
name = _get_required(data, 'name', 'annotation constraint')
self._logger.info(
'Adding annotation constraint "{}"...'.format(name))
description = data.get('description', '')
text = yaml_utils.dump(data)
creation_time = time_utils.get_utc_now()
creating_user = None
creating_job = Job.objects.get(id=job_info.job_id)
AnnotationConstraint.objects.create(
name=name,
description=description,
text=text,
creation_time=creation_time,
creating_user=creating_user,
creating_job=creating_job)
def _add_annotations(self, job_info):
annotations_data = self.metadata.get('annotations')
if annotations_data is not None:
for data in annotations_data:
name = _get_required(data, 'name', 'annotation')
self._logger.info('Adding annotation "{}"...'.format(name))
description = data.get('description', '')
type_ = data.get('type', 'String')
constraint = self._get_annotation_constraint(data)
creation_time = time_utils.get_utc_now()
creating_user = None
creating_job = Job.objects.get(id=job_info.job_id)
AnnotationInfo.objects.create(
name=name,
description=description,
type=type_,
constraint=constraint,
creation_time=creation_time,
creating_user=creating_user,
creating_job=creating_job)
def _get_annotation_constraint(self, data):
try:
name = data['constraint']
except KeyError:
return None
else:
return AnnotationConstraint.objects.get(name=name)
def _add_tags(self, job_info):
tags_data = self.metadata.get('tags')
if tags_data is not None:
for data in tags_data:
name = _get_required(data, 'name', 'tag')
self._logger.info('Adding tag "{}"...'.format(name))
description = data.get('description', '')
creation_time = time_utils.get_utc_now()
creating_user = None
creating_job = Job.objects.get(id=job_info.job_id)
TagInfo.objects.create(
name=name,
description=description,
creation_time=creation_time,
creating_user=creating_user,
creating_job=creating_job)
def _get_required(data, key, data_name):
try:
return data[key]
except KeyError:
raise CommandSyntaxError(
'{} missing required item "{}".'.format(
data_name.capitalize(), key))
def _create_objects_dict(cls):
objects = {}
for obj in cls.objects.all():
objects[obj.name] = obj
objects[obj.long_name] = obj
return objects
def _get_shorthand_ports(devices):
# Create mapping from model names to sets of devices.
model_devices = defaultdict(set)
for device in devices:
model_devices[device.model.name].add(device)
# Create mappings from shorthand port names to ports. A shorthand
# port name is like a regular port name except that it includes
# only a model name rather than a device name. We include an item
# in this mapping for each port of each device that is the only one
# of its model in `devices`.
shorthand_inputs = {}
shorthand_outputs = {}
for model_name, devices in model_devices.items():
if len(devices) == 1:
for device in devices:
_add_shorthand_ports(
shorthand_inputs, device.inputs.all(), model_name)
_add_shorthand_ports(
shorthand_outputs, device.outputs.all(), model_name)
return shorthand_inputs, shorthand_outputs
def _add_shorthand_ports(shorthand_ports, ports, model_name):
for port in ports:
name = '{} {}'.format(model_name, port.local_name)
shorthand_ports[name] = port
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import numpy as np
import warnings
from affine import Affine
from shapely.geometry import shape
from .io import read_features, Raster
from .utils import (rasterize_geom, get_percentile, check_stats,
remap_categories, key_assoc_val, boxify_points)
def raster_stats(*args, **kwargs):
"""Deprecated. Use zonal_stats instead."""
warnings.warn("'raster_stats' is an alias to 'zonal_stats'"
" and will disappear in 1.0", DeprecationWarning)
return zonal_stats(*args, **kwargs)
def zonal_stats(*args, **kwargs):
"""The primary zonal statistics entry point.
All arguments are passed directly to ``gen_zonal_stats``.
See its docstring for details.
The only difference is that ``zonal_stats`` will
return a list rather than a generator."""
return list(gen_zonal_stats(*args, **kwargs))
def gen_zonal_stats(
vectors, raster,
layer=0,
band=1,
nodata=None,
affine=None,
stats=None,
all_touched=False,
categorical=False,
category_map=None,
add_stats=None,
zone_func=None,
raster_out=False,
prefix=None,
geojson_out=False,
preserve_properties=False,
preserve_ids=False, **kwargs):
"""Zonal statistics of raster values aggregated to vector geometries.
Parameters
----------
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
preserve_properties: boolean (default: False)
preserve the properties of each feature in the returned stats data
preserve_ids: boolean (default: False)
Preserve the IDs of each feature in the returned stats data
Returns
-------
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
"""
stats, run_count = check_stats(stats, categorical)
# Handle 1.0 deprecations
transform = kwargs.get('transform')
if transform:
warnings.warn("GDAL-style transforms will disappear in 1.0. "
"Use affine=Affine.from_gdal(*transform) instead",
DeprecationWarning)
if not affine:
affine = Affine.from_gdal(*transform)
cp = kwargs.get('copy_properties')
if cp:
warnings.warn("Use `geojson_out` to preserve feature properties",
DeprecationWarning)
band_num = kwargs.get('band_num')
if band_num:
warnings.warn("Use `band` to specify band number", DeprecationWarning)
band = band_num
with Raster(raster, affine, nodata, band) as rast:
features_iter = read_features(vectors, layer)
for _, feat in enumerate(features_iter):
geom = shape(feat['geometry'])
if 'Point' in geom.type:
geom = boxify_points(geom, rast)
geom_bounds = tuple(geom.bounds)
fsrc = rast.read(bounds=geom_bounds)
# rasterized geometry
rv_array = rasterize_geom(geom, like=fsrc, all_touched=all_touched)
# nodata mask
isnodata = (fsrc.array == fsrc.nodata)
# add nan mask (if necessary)
has_nan = (np.issubdtype(fsrc.array.dtype, float)
and np.isnan(fsrc.array.min()))
if has_nan:
isnodata = (isnodata | np.isnan(fsrc.array))
# Mask the source data array
# mask everything that is not a valid value or not within our geom
masked = np.ma.MaskedArray(
fsrc.array,
mask=(isnodata | ~rv_array))
# execute zone_func on masked zone ndarray
if zone_func is not None:
if not callable(zone_func):
raise TypeError(('zone_func must be a callable '
'which accepts function a '
'single `zone_array` arg.'))
zone_func(masked)
if masked.compressed().size == 0:
# nothing here, fill with None and move on
feature_stats = dict([(stat, None) for stat in stats])
if 'count' in stats: # special case, zero makes sense here
feature_stats['count'] = 0
else:
if run_count:
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(zip([np.asscalar(k) for k in keys],
[np.asscalar(c) for c in counts]))
if categorical:
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
feature_stats = {}
if preserve_properties and 'properties' in feat:
feature_stats['properties'] = feat['properties']
if preserve_ids:
if 'id' in feat['properties']:
feature_stats['id'] = feat['properties']['id']
if 'min' in stats:
feature_stats['min'] = float(masked.min())
if 'max' in stats:
feature_stats['max'] = float(masked.max())
if 'mean' in stats:
feature_stats['mean'] = float(masked.mean())
if 'count' in stats:
feature_stats['count'] = int(masked.count())
# optional
if 'sum' in stats:
feature_stats['sum'] = float(masked.sum())
if 'std' in stats:
feature_stats['std'] = float(masked.std())
if 'median' in stats:
feature_stats['median'] = float(np.median(masked.compressed()))
if 'majority' in stats:
feature_stats['majority'] = float(key_assoc_val(pixel_count, max))
if 'minority' in stats:
feature_stats['minority'] = float(key_assoc_val(pixel_count, min))
if 'unique' in stats:
feature_stats['unique'] = len(list(pixel_count.keys()))
if 'range' in stats:
try:
rmin = feature_stats['min']
except KeyError:
rmin = float(masked.min())
try:
rmax = feature_stats['max']
except KeyError:
rmax = float(masked.max())
feature_stats['range'] = rmax - rmin
for pctile in [s for s in stats if s.startswith('percentile_')]:
q = get_percentile(pctile)
pctarr = masked.compressed()
feature_stats[pctile] = np.percentile(pctarr, q)
if 'nodata' in stats or 'nan' in stats:
featmasked = np.ma.MaskedArray(fsrc.array, mask=(~rv_array))
if 'nodata' in stats:
feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())
if 'nan' in stats:
feature_stats['nan'] = float(np.isnan(featmasked).sum()) if has_nan else 0
if add_stats is not None:
for stat_name, stat_func in add_stats.items():
feature_stats[stat_name] = stat_func(masked)
if raster_out:
feature_stats['mini_raster_array'] = masked
feature_stats['mini_raster_affine'] = fsrc.affine
feature_stats['mini_raster_nodata'] = fsrc.nodata
if prefix is not None:
prefixed_feature_stats = {}
for key, val in feature_stats.items():
newkey = "{}{}".format(prefix, key)
prefixed_feature_stats[newkey] = val
feature_stats = prefixed_feature_stats
if geojson_out:
for key, val in feature_stats.items():
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][key] = val
yield feat
else:
yield feature_stats
|
|
from functools import reduce
from operator import iand, ior
from string import punctuation
import django
from django.apps import apps
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.core.exceptions import ImproperlyConfigured
from django.db.models import CharField, Manager, Q, TextField
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
if django.VERSION >= (1, 10):
class ManagerDescriptor(ManagerDescriptor):
"""
This class exists purely to skip the abstract model check
in the __get__ method of Django's ManagerDescriptor.
"""
def __get__(self, instance, cls=None):
if instance is not None:
raise AttributeError(
"Manager isn't accessible via %s instances" % cls.__name__
)
# In ManagerDescriptor.__get__, an exception is raised here
# if cls is abstract
if cls._meta.swapped:
raise AttributeError(
"Manager isn't available; "
"'%s.%s' has been swapped for '%s'"
% (
cls._meta.app_label,
cls._meta.object_name,
cls._meta.swapped,
)
)
return cls._meta.managers_map[self.manager.name]
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED),
)
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super().__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = (
" ".join(query.split())
.replace("+ ", "+")
.replace('+"', '"+')
.replace("- ", "-")
.replace('-"', '"-')
.split('"')
)
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [
("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()
]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in settings.STOP_WORDS]
get_positive_terms = lambda terms: [
t.lower().strip(punctuation) for t in terms if t[0:1] != "-"
]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [
reduce(
iand,
[
~Q(**{"%s__icontains" % f: t[1:]})
for f in self._search_fields.keys()
],
)
for t in terms
if t[0:1] == "-"
]
required = [
reduce(
ior,
[Q(**{"%s__icontains" % f: t[1:]}) for f in self._search_fields.keys()],
)
for t in terms
if t[0:1] == "+"
]
optional = [
reduce(
ior, [Q(**{"%s__icontains" % f: t}) for f in self._search_fields.keys()]
)
for t in terms
if t[0:1] not in "+-"
]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
clone = super()._clone(*args, **kwargs)
clone._search_terms = self._search_terms
clone._search_fields = self._search_fields
clone._search_ordered = self._search_ordered
return clone
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super().order_by(*field_names)
def annotate_scores(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super().iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
if result.publish_date:
age = (now() - result.publish_date).total_seconds()
if age > 0:
count = count / age ** settings.SEARCH_AGE_SCALE_FACTOR
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super().__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.get_fields():
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Newer versions of Django explicitly prevent managers being
accessed from abstract classes, which is behaviour the search
API has always relied on. Here we reinstate it.
"""
super().contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in apps.get_models() if issubclass(m, self.model)]
parents = reduce(ior, [set(m._meta.get_parent_list()) for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = apps.get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured(
"Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors)
)
for model in apps.get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs).annotate_scores())
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
use_in_migrations = False
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager, SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
class Home:
title = _("Home")
home = Home()
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in apps.get_models():
if issubclass(model, self.model):
if hasattr(model.objects, "published"):
for item in (
model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")
):
items[item.get_absolute_url()] = item
return items
|
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import importlib
import os
from os import path
import pkgutil
import shutil
import sys
import tempfile
import threading
import unittest
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
from grpc_tools import protoc
from six import moves
from tests.unit.framework.common import test_constants
_RELATIVE_PROTO_PATH = 'relative_proto_path'
_RELATIVE_PYTHON_OUT = 'relative_python_out'
_PROTO_FILES_PATH_COMPONENTS = (
(
'beta_grpc_plugin_test',
'payload',
'test_payload.proto',
),
(
'beta_grpc_plugin_test',
'requests',
'r',
'test_requests.proto',
),
(
'beta_grpc_plugin_test',
'responses',
'test_responses.proto',
),
(
'beta_grpc_plugin_test',
'service',
'test_service.proto',
),
)
_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2'
_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2'
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
STUB_IDENTIFIER = 'BetaTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
@contextlib.contextmanager
def _system_path(path_insertion):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
yield
sys.path = old_system_path
def _create_directory_tree(root, path_components_sequence):
created = set()
for path_components in path_components_sequence:
thus_far = ''
for path_component in path_components:
relative_path = path.join(thus_far, path_component)
if relative_path not in created:
os.makedirs(path.join(root, relative_path))
created.add(relative_path)
thus_far = path.join(thus_far, path_component)
def _massage_proto_content(raw_proto_content):
imports_substituted = raw_proto_content.replace(
b'import "tests/protoc_plugin/protos/',
b'import "beta_grpc_plugin_test/')
package_statement_substituted = imports_substituted.replace(
b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;')
return package_statement_substituted
def _packagify(directory):
for subdirectory, _, _ in os.walk(directory):
init_file_name = path.join(subdirectory, '__init__.py')
with open(init_file_name, 'wb') as init_file:
init_file.write(b'')
class _ServicerMethods(object):
def __init__(self, payload_pb2, responses_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
self._payload_pb2 = payload_pb2
self._responses_pb2 = responses_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = self._responses_pb2.SimpleResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = self._responses_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
@contextlib.contextmanager
def _CreateService(payload_pb2, responses_pb2, service_pb2):
"""Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
servicer_methods = _ServicerMethods(payload_pb2, responses_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield servicer_methods, stub
server.stop(0)
@contextlib.contextmanager
def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement methods and its stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Args:
service_pb2: The service_pb2 module generated by this test.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
pass
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield None, stub
server.stop(0)
def _streaming_input_request_iterator(payload_pb2, requests_pb2):
for _ in range(3):
request = requests_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def setUp(self):
self._directory = tempfile.mkdtemp(dir='.')
self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
os.makedirs(self._proto_path)
os.makedirs(self._python_out)
directories_path_components = {
proto_file_path_components[:-1]
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS
}
_create_directory_tree(self._proto_path, directories_path_components)
self._proto_file_names = set()
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS:
raw_proto_content = pkgutil.get_data(
'tests.protoc_plugin.protos',
path.join(*proto_file_path_components[1:]))
massaged_proto_content = _massage_proto_content(raw_proto_content)
proto_file_name = path.join(self._proto_path,
*proto_file_path_components)
with open(proto_file_name, 'wb') as proto_file:
proto_file.write(massaged_proto_content)
self._proto_file_names.add(proto_file_name)
def tearDown(self):
shutil.rmtree(self._directory)
def _protoc(self):
args = [
'',
'--proto_path={}'.format(self._proto_path),
'--python_out={}'.format(self._python_out),
'--grpc_python_out=grpc_1_0:{}'.format(self._python_out),
] + list(self._proto_file_names)
protoc_exit_code = protoc.main(args)
self.assertEqual(0, protoc_exit_code)
_packagify(self._python_out)
with _system_path([self._python_out]):
self._payload_pb2 = importlib.import_module(_PAYLOAD_PB2)
self._requests_pb2 = importlib.import_module(_REQUESTS_PB2)
self._responses_pb2 = importlib.import_module(_RESPONSES_PB2)
self._service_pb2 = importlib.import_module(_SERVICE_PB2)
def testImportAttributes(self):
self._protoc()
# check that we can access the generated module and its members.
self.assertIsNotNone(
getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(getattr(self._service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2):
self._requests_pb2.SimpleRequest(response_size=13)
def testIncompleteServicer(self):
self._protoc()
with _CreateIncompleteService(self._service_pb2) as (_, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
try:
stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
except face.AbortionError as error:
self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED,
error.code)
def testUnaryCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
expected_response = methods.UnaryCall(request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testUnaryCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.fail():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
expected_responses = methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.pause():
responses = stub.StreamingOutputCall(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testStreamingOutputCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testStreamingOutputCallFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testStreamingInputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
response = stub.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
def testStreamingInputCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
def testStreamingInputCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
responses = stub.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_responses = methods.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
responses = stub.FullDuplexCall(request_iterator,
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testFullDuplexCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testFullDuplexCallFailed(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testHalfDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.LONG_TIMEOUT)
expected_responses = methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for check in moves.zip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
self._protoc()
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with wait():
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(face.ExpirationError):
next(responses)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
"""Tests for useful utilities for higher level polynomial classes. """
from sympy import S, Integer, sin, cos, sqrt, symbols, pi, Eq, Integral, exp
from sympy.utilities.pytest import raises
from sympy.polys.polyutils import (
_nsort,
_sort_gens,
_unify_gens,
_analyze_gens,
_sort_factors,
parallel_dict_from_expr,
dict_from_expr,
)
from sympy.polys.polyerrors import (
GeneratorsNeeded,
PolynomialError,
)
from sympy.polys.domains import ZZ
x, y, z, p, q, r, s, t, u, v, w = symbols('x,y,z,p,q,r,s,t,u,v,w')
A, B = symbols('A,B', commutative=False)
def test__nsort():
# issue 6137
r = S('''[3/2 + sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) - 4/sqrt(-7/3 +
61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3)) -
61/(18*(-415/216 + 13*I/12)**(1/3)))/2 - sqrt(-7/3 + 61/(18*(-415/216
+ 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3))/2, 3/2 - sqrt(-7/3
+ 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3))/2 - sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) -
4/sqrt(-7/3 + 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3)) - 61/(18*(-415/216 + 13*I/12)**(1/3)))/2, 3/2 +
sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) + 4/sqrt(-7/3 +
61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3)) -
61/(18*(-415/216 + 13*I/12)**(1/3)))/2 + sqrt(-7/3 + 61/(18*(-415/216
+ 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3))/2, 3/2 + sqrt(-7/3
+ 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3))/2 - sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) +
4/sqrt(-7/3 + 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3)) - 61/(18*(-415/216 + 13*I/12)**(1/3)))/2]''')
ans = [r[1], r[0], r[-1], r[-2]]
assert _nsort(r) == ans
assert len(_nsort(r, separated=True)[0]) == 0
b, c, a = exp(-1000), exp(-999), exp(-1001)
assert _nsort((b, c, a)) == [a, b, c]
def test__sort_gens():
assert _sort_gens([]) == ()
assert _sort_gens([x]) == (x,)
assert _sort_gens([p]) == (p,)
assert _sort_gens([q]) == (q,)
assert _sort_gens([x, p]) == (x, p)
assert _sort_gens([p, x]) == (x, p)
assert _sort_gens([q, p]) == (p, q)
assert _sort_gens([q, p, x]) == (x, p, q)
assert _sort_gens([x, p, q], wrt=x) == (x, p, q)
assert _sort_gens([x, p, q], wrt=p) == (p, x, q)
assert _sort_gens([x, p, q], wrt=q) == (q, x, p)
assert _sort_gens([x, p, q], wrt='x') == (x, p, q)
assert _sort_gens([x, p, q], wrt='p') == (p, x, q)
assert _sort_gens([x, p, q], wrt='q') == (q, x, p)
assert _sort_gens([x, p, q], wrt='x,q') == (x, q, p)
assert _sort_gens([x, p, q], wrt='q,x') == (q, x, p)
assert _sort_gens([x, p, q], wrt='p,q') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q,p') == (q, p, x)
assert _sort_gens([x, p, q], wrt='x, q') == (x, q, p)
assert _sort_gens([x, p, q], wrt='q, x') == (q, x, p)
assert _sort_gens([x, p, q], wrt='p, q') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q, p') == (q, p, x)
assert _sort_gens([x, p, q], wrt=[x, 'q']) == (x, q, p)
assert _sort_gens([x, p, q], wrt=[q, 'x']) == (q, x, p)
assert _sort_gens([x, p, q], wrt=[p, 'q']) == (p, q, x)
assert _sort_gens([x, p, q], wrt=[q, 'p']) == (q, p, x)
assert _sort_gens([x, p, q], wrt=['x', 'q']) == (x, q, p)
assert _sort_gens([x, p, q], wrt=['q', 'x']) == (q, x, p)
assert _sort_gens([x, p, q], wrt=['p', 'q']) == (p, q, x)
assert _sort_gens([x, p, q], wrt=['q', 'p']) == (q, p, x)
assert _sort_gens([x, p, q], sort='x > p > q') == (x, p, q)
assert _sort_gens([x, p, q], sort='p > x > q') == (p, x, q)
assert _sort_gens([x, p, q], sort='p > q > x') == (p, q, x)
assert _sort_gens([x, p, q], wrt='x', sort='q > p') == (x, q, p)
assert _sort_gens([x, p, q], wrt='p', sort='q > x') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q', sort='p > x') == (q, p, x)
X = symbols('x0,x1,x2,x10,x11,x12,x20,x21,x22')
assert _sort_gens(X) == X
def test__unify_gens():
assert _unify_gens([], []) == ()
assert _unify_gens([x], [x]) == (x,)
assert _unify_gens([y], [y]) == (y,)
assert _unify_gens([x, y], [x]) == (x, y)
assert _unify_gens([x], [x, y]) == (x, y)
assert _unify_gens([x, y], [x, y]) == (x, y)
assert _unify_gens([y, x], [y, x]) == (y, x)
assert _unify_gens([x], [y]) == (x, y)
assert _unify_gens([y], [x]) == (y, x)
assert _unify_gens([x], [y, x]) == (y, x)
assert _unify_gens([y, x], [x]) == (y, x)
assert _unify_gens([x, y, z], [x, y, z]) == (x, y, z)
assert _unify_gens([z, y, x], [x, y, z]) == (z, y, x)
assert _unify_gens([x, y, z], [z, y, x]) == (x, y, z)
assert _unify_gens([z, y, x], [z, y, x]) == (z, y, x)
assert _unify_gens([x, y, z], [t, x, p, q, z]) == (t, x, y, p, q, z)
def test__analyze_gens():
assert _analyze_gens((x, y, z)) == (x, y, z)
assert _analyze_gens([x, y, z]) == (x, y, z)
assert _analyze_gens(([x, y, z],)) == (x, y, z)
assert _analyze_gens(((x, y, z),)) == (x, y, z)
def test__sort_factors():
assert _sort_factors([], multiple=True) == []
assert _sort_factors([], multiple=False) == []
F = [[1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [[1, 2], [1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [1, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [[2, 2], [1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [2, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([1, 2], 1), ([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([1, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([2, 2], 1), ([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([2, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([2, 2], 1), ([1, 2, 3], 1), ([1, 2], 2), ([1], 1)]
G = [([1], 1), ([2, 2], 1), ([1, 2], 2), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
def test__dict_from_expr_if_gens():
assert dict_from_expr(
Integer(17), gens=(x,)) == ({(0,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17), gens=(x, y)) == ({(0, 0): Integer(17)}, (x, y))
assert dict_from_expr(
Integer(17), gens=(x, y, z)) == ({(0, 0, 0): Integer(17)}, (x, y, z))
assert dict_from_expr(
Integer(-17), gens=(x,)) == ({(0,): Integer(-17)}, (x,))
assert dict_from_expr(
Integer(-17), gens=(x, y)) == ({(0, 0): Integer(-17)}, (x, y))
assert dict_from_expr(Integer(
-17), gens=(x, y, z)) == ({(0, 0, 0): Integer(-17)}, (x, y, z))
assert dict_from_expr(
Integer(17)*x, gens=(x,)) == ({(1,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17)*x, gens=(x, y)) == ({(1, 0): Integer(17)}, (x, y))
assert dict_from_expr(Integer(
17)*x, gens=(x, y, z)) == ({(1, 0, 0): Integer(17)}, (x, y, z))
assert dict_from_expr(
Integer(17)*x**7, gens=(x,)) == ({(7,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17)*x**7*y, gens=(x, y)) == ({(7, 1): Integer(17)}, (x, y))
assert dict_from_expr(Integer(17)*x**7*y*z**12, gens=(
x, y, z)) == ({(7, 1, 12): Integer(17)}, (x, y, z))
assert dict_from_expr(x + 2*y + 3*z, gens=(x,)) == \
({(1,): Integer(1), (0,): 2*y + 3*z}, (x,))
assert dict_from_expr(x + 2*y + 3*z, gens=(x, y)) == \
({(1, 0): Integer(1), (0, 1): Integer(2), (0, 0): 3*z}, (x, y))
assert dict_from_expr(x + 2*y + 3*z, gens=(x, y, z)) == \
({(1, 0, 0): Integer(
1), (0, 1, 0): Integer(2), (0, 0, 1): Integer(3)}, (x, y, z))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x,)) == \
({(1,): y + 2*z, (0,): 3*y*z}, (x,))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x, y)) == \
({(1, 1): Integer(1), (1, 0): 2*z, (0, 1): 3*z}, (x, y))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x, y, z)) == \
({(1, 1, 0): Integer(
1), (1, 0, 1): Integer(2), (0, 1, 1): Integer(3)}, (x, y, z))
assert dict_from_expr(2**y*x, gens=(x,)) == ({(1,): 2**y}, (x,))
assert dict_from_expr(Integral(x, (x, 1, 2)) + x) == (
{(0, 1): 1, (1, 0): 1}, (x, Integral(x, (x, 1, 2))))
raises(PolynomialError, lambda: dict_from_expr(2**y*x, gens=(x, y)))
def test__dict_from_expr_no_gens():
raises(GeneratorsNeeded, lambda: dict_from_expr(Integer(17)))
assert dict_from_expr(x) == ({(1,): Integer(1)}, (x,))
assert dict_from_expr(y) == ({(1,): Integer(1)}, (y,))
assert dict_from_expr(x*y) == ({(1, 1): Integer(1)}, (x, y))
assert dict_from_expr(
x + y) == ({(1, 0): Integer(1), (0, 1): Integer(1)}, (x, y))
assert dict_from_expr(sqrt(2)) == ({(1,): Integer(1)}, (sqrt(2),))
raises(GeneratorsNeeded, lambda: dict_from_expr(sqrt(2), greedy=False))
assert dict_from_expr(x*y, domain=ZZ[x]) == ({(1,): x}, (y,))
assert dict_from_expr(x*y, domain=ZZ[y]) == ({(1,): y}, (x,))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=None) == ({(1, 1, 1, 1): 3}, (x, y, pi, sqrt(2)))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=True) == ({(1, 1, 1): 3*sqrt(2)}, (x, y, pi))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=True) == ({(1, 1, 1): 3*sqrt(2)}, (x, y, pi))
f = cos(x)*sin(x) + cos(x)*sin(y) + cos(y)*sin(x) + cos(y)*sin(y)
assert dict_from_expr(f) == ({(0, 1, 0, 1): 1, (0, 1, 1, 0): 1,
(1, 0, 0, 1): 1, (1, 0, 1, 0): 1}, (cos(x), cos(y), sin(x), sin(y)))
def test__parallel_dict_from_expr_if_gens():
assert parallel_dict_from_expr([x + 2*y + 3*z, Integer(7)], gens=(x,)) == \
([{(1,): Integer(1), (0,): 2*y + 3*z}, {(0,): Integer(7)}], (x,))
def test__parallel_dict_from_expr_no_gens():
assert parallel_dict_from_expr([x*y, Integer(3)]) == \
([{(1, 1): Integer(1)}, {(0, 0): Integer(3)}], (x, y))
assert parallel_dict_from_expr([x*y, 2*z, Integer(3)]) == \
([{(1, 1, 0): Integer(
1)}, {(0, 0, 1): Integer(2)}, {(0, 0, 0): Integer(3)}], (x, y, z))
def test_parallel_dict_from_expr():
assert parallel_dict_from_expr([Eq(x, 1), Eq(
x**2, 2)]) == ([{(0,): -Integer(1), (1,): Integer(1)},
{(0,): -Integer(2), (2,): Integer(1)}], (x,))
raises(PolynomialError, lambda: parallel_dict_from_expr([A*B - B*A]))
def test_dict_from_expr():
assert dict_from_expr(Eq(x, 1)) == \
({(0,): -Integer(1), (1,): Integer(1)}, (x,))
raises(PolynomialError, lambda: dict_from_expr(A*B - B*A))
|
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
If a file named "pyvenv.cfg" exists one directory above sys.executable,
sys.prefix and sys.exec_prefix are set to that directory and
it is also checked for site-packages (sys.base_prefix and
sys.base_exec_prefix will always be the "real" prefixes of the Python
installation). If "pyvenv.cfg" (a bootstrap configuration file) contains
the key "include-system-site-packages" set to anything other than "false"
(case-insensitive), the system-level prefixes will still also be
searched for site-packages; otherwise they won't.
All of the resulting site-specific directories, if they exist, are
appended to sys.path, and also inspected for path configuration
files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
The readline module is also automatically configured to enable
completion for systems that support it. This can be overridden in
sitecustomize, usercustomize or PYTHONSTARTUP. Starting Python in
isolated mode (-I) disables automatic readline configuration.
After these operations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import builtins
import _sitebuiltins
is_pypy = '__pypy__' in sys.builtin_module_names
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs_paths():
"""Set all module __file__ and __cached__ attributes to an absolute path"""
for m in set(sys.modules.values()):
if (getattr(getattr(m, '__loader__', None), '__module__', None) not in
('_frozen_importlib', '_frozen_importlib_external')):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
try:
m.__cached__ = os.path.abspath(m.__cached__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing file system items from sys.path."""
d = set()
for item in sys.path:
try:
if os.path.exists(item):
_, itemcase = makepath(item)
d.add(itemcase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = True
else:
reset = False
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "r")
except OSError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception:
print("Error processing line {:d} of {}:\n".format(n+1, fullname),
file=sys.stderr)
import traceback
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print(' '+line, file=sys.stderr)
print("\nRemainder of file ignored", file=sys.stderr)
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = True
else:
reset = False
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
known_paths.add(sitedircase)
try:
names = os.listdir(sitedir)
except OSError:
return
names = [name for name in names if name.endswith(".pth")]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages(prefixes=None):
"""Returns a list containing all global site-packages directories.
For each directory present in ``prefixes`` (or the global ``PREFIXES``),
this function will find its `site-packages` subdirectory depending on the
system environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
if prefixes is None:
prefixes = PREFIXES
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if is_pypy:
sitepackages.append(os.path.join(prefix, "site-packages"))
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python%d.%d" % sys.version_info[:2],
"site-packages"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
sitepackages.append(
os.path.join("/Library", framework,
'%d.%d' % sys.version_info[:2], "site-packages"))
return sitepackages
def addsitepackages(known_paths, prefixes=None):
"""Add site-packages to sys.path"""
for sitedir in getsitepackages(prefixes):
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
builtins.quit = _sitebuiltins.Quitter('quit', eof)
builtins.exit = _sitebuiltins.Quitter('exit', eof)
def setcopyright():
"""Set 'copyright' and 'credits' in builtins"""
builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright)
licenseargs = None
if is_pypy:
credits = "PyPy is maintained by the PyPy developers: http://pypy.org/"
license = "See https://bitbucket.org/pypy/pypy/src/default/LICENSE"
licenseargs = (license,)
elif sys.platform[:4] == 'java':
credits = ("Jython is maintained by the Jython developers "
"(www.jython.org).")
else:
credits = """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information."""
if licenseargs is None:
files, dirs = [], []
# Not all modules are required to have a __file__ attribute. See
# PEP 420 for more details.
if hasattr(os, '__file__'):
here = os.path.dirname(os.__file__)
files.extend(["LICENSE.txt", "LICENSE"])
dirs.extend([os.path.join(here, os.pardir), here, os.curdir])
license = "See https://www.python.org/psf/license/"
licenseargs = (license, files, dirs)
builtins.credits = _sitebuiltins._Printer("credits", credits)
builtins.license = _sitebuiltins._Printer("license", *licenseargs)
def sethelper():
builtins.help = _sitebuiltins._Helper()
def enablerlcompleter():
"""Enable default readline configuration on interactive prompts, by
registering a sys.__interactivehook__.
If the readline module can be imported, the hook will set the Tab key
as completion key and register ~/.python_history as history file.
This can be overridden in the sitecustomize or usercustomize module,
or in a PYTHONSTARTUP file.
"""
def register_readline():
import atexit
try:
import readline
import rlcompleter
except ImportError:
return
# Reading the initialization (config) file may not be enough to set a
# completion key, so we set one first and then read the file.
readline_doc = getattr(readline, '__doc__', '')
if readline_doc is not None and 'libedit' in readline_doc:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
try:
# Unimplemented on PyPy
#readline.read_init_file()
pass
except OSError:
# An OSError here could have many causes, but the most likely one
# is that there's no .inputrc file (or .editrc file in the case of
# Mac OS X + libedit) in the expected location. In that case, we
# want to ignore the exception.
pass
if readline.get_current_history_length() == 0:
# If no history was loaded, default to .python_history.
# The guard is necessary to avoid doubling history size at
# each interpreter exit when readline was already configured
# through a PYTHONSTARTUP hook, see:
# http://bugs.python.org/issue5845#msg198636
history = os.path.join(os.path.expanduser('~'),
'.python_history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
sys.__interactivehook__ = register_readline
def venv(known_paths):
global PREFIXES, ENABLE_USER_SITE
env = os.environ
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
exe_dir, _ = os.path.split(os.path.abspath(executable))
site_prefix = os.path.dirname(exe_dir)
sys._home = None
conf_basename = 'pyvenv.cfg'
candidate_confs = [
conffile for conffile in (
os.path.join(exe_dir, conf_basename),
os.path.join(site_prefix, conf_basename)
)
if os.path.isfile(conffile)
]
if candidate_confs:
virtual_conf = candidate_confs[0]
system_site = "true"
# Issue 25185: Use UTF-8, as that's what the venv module uses when
# writing the file.
with open(virtual_conf, encoding='utf-8') as f:
for line in f:
if '=' in line:
key, _, value = line.partition('=')
key = key.strip().lower()
value = value.strip()
if key == 'include-system-site-packages':
system_site = value.lower()
elif key == 'home':
sys._home = value
sys.prefix = sys.exec_prefix = site_prefix
# Doing this here ensures venv takes precedence over user-site
addsitepackages(known_paths, [sys.prefix])
# addsitepackages will process site_prefix again if its in PREFIXES,
# but that's ok; known_paths will prevent anything being added twice
if system_site == "true":
PREFIXES.insert(0, sys.prefix)
else:
PREFIXES = [sys.prefix]
ENABLE_USER_SITE = False
return known_paths
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
try:
import sitecustomize
except ImportError as exc:
if exc.name == 'sitecustomize':
pass
else:
raise
except Exception as err:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in sitecustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def execusercustomize():
"""Run custom user specific code, if available."""
try:
try:
import usercustomize
except ImportError as exc:
if exc.name == 'usercustomize':
pass
else:
raise
except Exception as err:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in usercustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def main():
"""Add standard site-specific directories to the module search path.
This function is called automatically when this module is imported,
unless the python interpreter was started with the -S flag.
"""
global ENABLE_USER_SITE
abs_paths()
known_paths = removeduppaths()
known_paths = venv(known_paths)
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
setquit()
setcopyright()
sethelper()
if not sys.flags.isolated:
enablerlcompleter()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Prevent extending of sys.path when python was started with -S and
# site is imported later.
if not sys.flags.no_site:
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
user_base = getuserbase()
user_site = getusersitepackages()
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
print("USER_BASE: %r (%s)" % (user_base,
"exists" if os.path.isdir(user_base) else "doesn't exist"))
print("USER_SITE: %r (%s)" % (user_site,
"exists" if os.path.isdir(user_site) else "doesn't exist"))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
|
|
#!/usr/bin/env python
__authors__ = "Vincent Dumoulin"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Guillaume Desjargins", "Vincent Dumoulin"]
__license__ = "3-clause BSD"
__maintainer__ = "Vincent Dumoulin"
"""
This script computes both an estimate of the partition function of the provided
DBM model and an estimate of the log-likelihood on the given training and test
sets.
This is guaranteed to work only for DBMs with a BinaryVector visible layer and
BinaryVectorMaxPool hidden layers with pool sizes of 1.
It uses annealed importance sampling (AIS) to estimate Z, the partition
function.
TODO: add more details, cite paper
usage: dbm_metrics.py [-h] {ais} model_path
positional arguments:
{ais} the desired metric
model_path path to the pickled DBM model
optional arguments:
-h, --help show the help message and exit
"""
import argparse
import warnings
import numpy
import logging
from theano.compat.six.moves import xrange
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano import scan
import pylearn2
from pylearn2.compat import OrderedDict
from pylearn2.datasets.mnist import MNIST
from pylearn2.utils import serial
from pylearn2 import utils
floatX = theano.config.floatX
logging.basicConfig(level=logging.INFO)
rng = numpy.random.RandomState(9873242)
theano_rng = RandomStreams(rng.randint(2**30))
def _sample_even_odd(W_list, b_list, samples, beta, odd=True):
"""
Sample from the even (or odd) layers given a list of previous states.
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
samples : array-like object of theano shared variables
Samples corresponding to the previous states
beta : theano.tensor.scalar
Inverse temperature parameter
odd : boolean
Whether to sample from the odd or the even layers (defaults to sampling
from odd layers)
"""
for i in xrange(odd, len(samples), 2):
samples[i] = sample_hi_given(samples, i, W_list, b_list, beta)
def _activation_even_odd(W_list, b_list, samples, beta, odd=True):
"""
Compute the activation of the even (or odd) layers given a list of
previous states.
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
samples : array-like object of theano shared variables
Samples corresponding to the previous states
beta : theano.tensor.scalar
Inverse temperature parameter
odd : boolean
Whether to compute activation for the odd or the even layers (defaults
to computing for odd layers)
"""
for i in xrange(odd, len(samples), 2):
samples[i] = hi_given(samples, i, W_list, b_list, beta,
apply_sigmoid=False)
def neg_sampling(W_list, b_list, nsamples, beta=1.0, pa_bias=None,
marginalize_odd=True, theano_rng=None):
"""
Generate a sample from the intermediate distribution defined at inverse
temperature 'beta', starting from state 'nsamples'. See file docstring for
equation of p_k(h1).
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
nsamples : array-like object of theano shared variables
Negative samples corresponding to the previous states
beta : theano.tensor.scalar
Inverse temperature parameter
marginalize_odd : boolean
Whether to marginalize odd layers
theano_rng : theano RandomStreams
Random number generator
Returns
-------
new_nsamples : array-like object of symbolic matrices
new_nsamples[i] contains new samples for i-th layer.
"""
# There's as much layers in the DBM as there are bias vectors
depth = len(b_list)
new_nsamples = [nsamples[i] for i in xrange(depth)]
# Contribution from model B, at temperature beta_k
_sample_even_odd(W_list, b_list, new_nsamples, beta, odd=marginalize_odd)
_activation_even_odd(W_list, b_list, new_nsamples, beta,
odd=not marginalize_odd)
# Contribution from model A, at temperature (1 - beta_k)
new_nsamples[not marginalize_odd] += pa_bias * (1. - beta)
# Loop over all layers (not being marginalized)
for i in xrange(not marginalize_odd, depth, 2):
new_nsamples[i] = T.nnet.sigmoid(new_nsamples[i])
new_nsamples[i] = theano_rng.binomial(
size=nsamples[i].get_value().shape, n=1, p=new_nsamples[i],
dtype=floatX
)
return new_nsamples
def free_energy_at_beta(W_list, b_list, samples, beta, pa_bias=None,
marginalize_odd=True):
"""
Compute the free-energy of the sample 'h1_sample', for model p_k(h1).
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
samples : array-like object of theano shared variable
Samples from which we extract the samples of layer h1
beta : theano.tensor.scalar
Inverse temperature beta_k of model p_k(h1) at which to measure the
free-energy.
pa_bias : array-like object of theano shared variables
Biases for the A model
marginalize_odd : boolean
Whether to marginalize odd layers
Returns
-------
fe : symbolic variable
Free-energy of sample 'h1_sample', at inverse temperature beta
"""
# There's as much layers in the DBM as there are bias vectors
depth = len(b_list)
fe = 0.
# Contribution of biases
keep_idx = numpy.arange(not marginalize_odd, depth, 2)
for i in keep_idx:
fe -= T.dot(samples[i], b_list[i]) * beta
# Contribution of biases
marg_idx = numpy.arange(marginalize_odd, depth, 2)
for i in marg_idx:
from_im1 = T.dot(samples[i-1], W_list[i]) if i >= 1 else 0.
from_ip1 = T.dot(samples[i+1], W_list[i+1].T) if i < depth-1 else 0
net_input = (from_im1 + from_ip1 + b_list[i]) * beta
fe -= T.sum(T.nnet.softplus(net_input), axis=1)
fe -= T.dot(samples[not marginalize_odd], pa_bias) * (1. - beta)
return fe
def compute_log_ais_weights(batch_size, free_energy_fn, sample_fn, betas):
"""
Compute log of the AIS weights
Parameters
----------
batch_size : scalar
Size of a batch of samples
free_energy_fn : theano.function
Function which, given temperature beta_k, computes the free energy
of the samples stored in model.samples. This function should return
a symbolic vector.
sample_fn : theano.function
Function which, given temperature beta_k, generates samples h1 ~
p_k(h1).
betas : array-like object of scalars
Inverse temperature parameters for which to compute the log_ais weights
Returns
-------
log_ais_w : theano.tensor.vector
Vector containing log ais-weights
"""
# Initialize log-ais weights
log_ais_w = numpy.zeros(batch_size, dtype=floatX)
# Iterate from inverse temperature beta_k=0 to beta_k=1...
for i in range(len(betas) - 1):
bp, bp1 = betas[i], betas[i+1]
log_ais_w += free_energy_fn(bp) - free_energy_fn(bp1)
sample_fn(bp1)
if i % 1e3 == 0:
logging.info('Temperature %f ' % bp1)
return log_ais_w
def estimate_from_weights(log_ais_w):
"""
Safely compute the log-average of the ais-weights
Parameters
----------
log_ais_w : theano.tensor.vector
Symbolic vector containing log_ais_w^{(m)}.
Returns
-------
dlogz : theano.tensor.scalar
log(Z_B) - log(Z_A)
var_dlogz : theano.tensor.scalar
Variance of our estimator
"""
# Utility function for safely computing log-mean of the ais weights
ais_w = T.vector()
max_ais_w = T.max(ais_w)
dlogz = T.log(T.mean(T.exp(ais_w - max_ais_w))) + max_ais_w
log_mean = theano.function([ais_w], dlogz, allow_input_downcast=False)
# Estimate the log-mean of the AIS weights
dlogz = log_mean(log_ais_w)
# Estimate log-variance of the AIS weights
# VAR(log(X)) \approx VAR(X) / E(X)^2 = E(X^2)/E(X)^2 - 1
m = numpy.max(log_ais_w)
var_dlogz = (log_ais_w.shape[0] *
numpy.sum(numpy.exp(2 * (log_ais_w - m))) /
numpy.sum(numpy.exp(log_ais_w - m)) ** 2 - 1.)
return dlogz, var_dlogz
def compute_log_za(b_list, pa_bias, marginalize_odd=True):
"""
Compute the exact partition function of model p_A(h1)
Parameters
----------
b_list : array-like object of theano shared variables
Biases of the DBM
pa_bias : array-like object of theano shared variables
Biases for the A model
marginalize_odd : boolean
Whether to marginalize odd layers
Returns
-------
log_za : scalar
Partition function of model A
"""
log_za = 0.
for i, b in enumerate(b_list):
if i == (not marginalize_odd):
log_za += numpy.sum(numpy.log(1 + numpy.exp(pa_bias)))
else:
log_za += numpy.log(2) * b.get_value().shape[0]
return log_za
def compute_likelihood_given_logz(nsamples, psamples, batch_size, energy_fn,
inference_fn, log_z, test_x):
"""
Compute test set likelihood as below, where q is the variational
approximation to the posterior p(h1,h2|v).
ln p(v) \approx \sum_h q(h) E(v,h1,h2) + H(q) - ln Z
See section 3.2 of DBM paper for details.
Parameters
----------
nsamples : array-like object of theano shared variables
Negative samples
psamples : array-like object of theano shared variables
Positive samples
batch_size : scalar
Size of a batch of samples
energy_fn : theano.function
Function which computes the (temperature 1) energy of the samples. This
function should return a symbolic vector.
inference_fn : theano.function
Inference function for DBM. Function takes a T.matrix as input (data)
and returns a list of length 'length(b_list)', where the i-th element
is an ndarray containing approximate samples of layer i.
log_z : scalar
Estimate partition function of 'model'.
test_x : numpy.ndarray
Test set data, in dense design matrix format.
Returns
-------
likelihood : scalar
Negative log-likelihood of test data under the model
"""
i = 0.
likelihood = 0
for i in xrange(0, len(test_x), batch_size):
# Recast data as floatX and apply preprocessing if required
x = numpy.array(test_x[i:numpy.minimum(test_x.shape[0], i + batch_size), :], dtype=floatX)
batch_size0 = len(x)
if len(x) < batch_size:
# concatenate x to have some dummy entries
x = numpy.concatenate((x, numpy.zeros((batch_size-len(x),x.shape[1]), dtype=floatX)), axis=0)
# Perform inference
inference_fn(x)
# Entropy of h(q) adds contribution to variational lower-bound
hq = 0
for psample in psamples[1:]:
temp = \
- psample.get_value() * numpy.log(1e-5 + psample.get_value()) \
- (1.-psample.get_value()) \
* numpy.log(1. - psample.get_value() + 1e-5)
hq += numpy.sum(temp, axis=1)
# Copy into negative phase buffers to measure energy
nsamples[0].set_value(x)
for ii, psample in enumerate(psamples):
if ii > 0:
nsamples[ii].set_value(psample.get_value())
# Compute sum of likelihood for current buffer
x_likelihood = numpy.sum((-energy_fn(1.0) + hq - log_z)[:batch_size0])
# Perform moving average of negative likelihood
# Divide by len(x) and not bufsize, since last buffer might be smaller
likelihood = (i * likelihood + x_likelihood) / (i + batch_size0)
return likelihood
def hi_given(samples, i, W_list, b_list, beta=1.0, apply_sigmoid=True):
"""
Compute the state of hidden layer i given all other layers
Parameters
----------
samples : array-like object of theano shared variables
For the positive phase, samples[0] points to the input, while
samples[i] contains the current state of the i-th layer. In the
negative phase, samples[i] contains the persistent chain associated
with the i-th layer.
i : integer
Compute activation of layer i of our DBM
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
beta : scalar
Inverse temperature parameter used when performing AIS
apply_sigmoid : boolean
When False, hi_given will not apply the sigmoid. Useful for AIS
estimate.
Returns
-------
hi_mean : symbolic variable
Activation of the i-th layer
"""
# There's as much layers in the DBM as there are bias vectors
depth = len(samples)
hi_mean = 0.
if i < depth-1:
# Top-down input
wip1 = W_list[i+1]
hi_mean += T.dot(samples[i+1], wip1.T) * beta
if i > 0:
# Bottom-up input
wi = W_list[i]
hi_mean += T.dot(samples[i-1], wi) * beta
hi_mean += b_list[i] * beta
if apply_sigmoid:
return T.nnet.sigmoid(hi_mean)
else:
return hi_mean
def sample_hi_given(samples, i, W_list, b_list, beta=1.0):
"""
Given current state of our DBM ('samples'), sample the values taken by
the i-th layer.
Parameters
----------
samples : array-like object of theano shared variables
For the positive phase, samples[0] points to the input, while
samples[i] contains the current state of the i-th layer. In the
negative phase, samples[i] contains the persistent chain associated
with the i-th layer.
i : integer
Compute activation of layer i of our DBM
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
beta : scalar
Inverse temperature parameter used when performing AIS
Returns
-------
hi_sample : symbolic variable
State of the i-th layer
"""
hi_mean = hi_given(samples, i, W_list, b_list, beta)
hi_sample = theano_rng.binomial(
size=samples[i].get_value().shape,
n=1, p=hi_mean,
dtype=floatX
)
return hi_sample
def _e_step(psamples, W_list, b_list, n_steps=100, eps=1e-5):
"""
Performs 'n_steps' of mean-field inference (used to compute positive phase
statistics)
Parameters
----------
psamples : array-like object of theano shared variables
State of each layer of the DBM (during the inference process).
psamples[0] points to the input
n_steps : integer
Number of iterations of mean-field to perform
"""
depth = len(psamples)
# now alternate mean-field inference for even/odd layers
def mf_iteration(*psamples):
new_psamples = [p for p in psamples]
for i in xrange(1, depth, 2):
new_psamples[i] = hi_given(psamples, i, W_list, b_list)
for i in xrange(2, depth, 2):
new_psamples[i] = hi_given(psamples, i, W_list, b_list)
score = 0.
for i in xrange(1, depth):
score = T.maximum(T.mean(abs(new_psamples[i] - psamples[i])),
score)
return new_psamples, theano.scan_module.until(score < eps)
new_psamples, updates = scan(
mf_iteration,
outputs_info=psamples,
n_steps=n_steps
)
return [x[-1] for x in new_psamples]
def estimate_likelihood(W_list, b_list, trainset, testset, free_energy_fn=None,
batch_size=100, large_ais=False, log_z=None,
pos_mf_steps=50, pos_sample_steps=0):
"""
Compute estimate of log-partition function and likelihood of trainset and
testset
Parameters
----------
W_list : array-like object of theano shared variables
b_list : array-like object of theano shared variables
Biases of the DBM
trainset : pylearn2.datasets.dataset.Dataset
Training set
testset : pylearn2.datasets.dataset.Dataset
Test set
free_energy_fn : theano.function
Function which, given temperature beta_k, computes the free energy
of the samples stored in model.samples. This function should return
a symbolic vector.
batch_size : integer
Size of a batch of examples
large_ais : boolean
If True, will use 3e5 chains, instead of 3e4
log_z : log-partition function (if precomputed)
pos_mf_steps: the number of fixed-point iterations for approximate inference
pos_sample_steps: same thing as pos_mf_steps
when both pos_mf_steps > 0 and pos_sample_steps > 0,
pos_mf_steps has a priority
Returns
-------
nll : scalar
Negative log-likelihood of data.X under `model`.
logz : scalar
Estimate of log-partition function of `model`.
"""
warnings.warn("This is garanteed to work only for DBMs with a " +
"BinaryVector visible layer and BinaryVectorMaxPool " +
"hidden layers with pool sizes of 1.")
# Add a dummy placeholder for visible layer's weights in W_list
W_list = [None] + W_list
# Depth of the DBM
depth = len(b_list)
# Initialize samples
psamples = []
nsamples = []
for i, b in enumerate(b_list):
psamples += [utils.sharedX(rng.rand(batch_size,
b.get_value().shape[0]),
name='psamples%i' % i)]
nsamples += [utils.sharedX(rng.rand(batch_size,
b.get_value().shape[0]),
name='nsamples%i' % i)]
psamples[0] = T.matrix('psamples0')
##########################
## BUILD THEANO FUNCTIONS
##########################
beta = T.scalar()
# For an even number of layers, we marginalize the odd layers
# (and vice-versa)
marginalize_odd = (depth % 2) == 0
# Build function to retrieve energy.
E = -T.dot(nsamples[0], b_list[0]) * beta
for i in xrange(1, depth):
E -= T.sum(T.dot(nsamples[i-1], W_list[i] * beta) * nsamples[i],
axis=1)
E -= T.dot(nsamples[i], b_list[i] * beta)
energy_fn = theano.function([beta], E)
# Build inference function.
assert (pos_mf_steps or pos_sample_steps)
pos_steps = pos_mf_steps if pos_mf_steps else pos_sample_steps
new_psamples = _e_step(psamples, W_list, b_list, n_steps=pos_steps)
ups = OrderedDict()
for psample, new_psample in zip(psamples[1:], new_psamples[1:]):
ups[psample] = new_psample
temp = numpy.asarray(trainset.X, dtype=floatX)
mean_train = numpy.mean(temp, axis=0)
inference_fn = theano.function(inputs=[psamples[0]], outputs=[],
updates=ups)
# Configure baserate bias for (h0 if `marginalize_odd` else h1)
inference_fn(numpy.tile(mean_train, (batch_size, 1)))
numpy_psamples = [mean_train[None, :]] + \
[psample.get_value() for psample in psamples[1:]]
mean_pos = numpy.minimum(numpy_psamples[not marginalize_odd], 1-1e-5)
mean_pos = numpy.maximum(mean_pos, 1e-5)
pa_bias = -numpy.log(1./mean_pos[0] - 1.)
# Build Theano function to sample from interpolating distributions.
updates = OrderedDict()
new_nsamples = neg_sampling(W_list, b_list, nsamples, beta=beta,
pa_bias=pa_bias,
marginalize_odd=marginalize_odd,
theano_rng=theano_rng)
for (nsample, new_nsample) in zip(nsamples, new_nsamples):
updates[nsample] = new_nsample
sample_fn = theano.function([beta], [], updates=updates,
name='sample_func')
# Build function to compute free-energy of p_k(h1).
fe_bp_h1 = free_energy_at_beta(W_list, b_list, nsamples, beta,
pa_bias, marginalize_odd=marginalize_odd)
free_energy_fn = theano.function([beta], fe_bp_h1)
###########
## RUN AIS
###########
# Generate exact sample for the base model.
for i, nsample_i in enumerate(nsamples):
bias = pa_bias if i == 1 else b_list[i].get_value()
hi_mean_vec = 1. / (1. + numpy.exp(-bias))
hi_mean = numpy.tile(hi_mean_vec, (batch_size, 1))
r = rng.random_sample(hi_mean.shape)
hi_sample = numpy.array(hi_mean > r, dtype=floatX)
nsample_i.set_value(hi_sample)
# Default configuration for interpolating distributions
if large_ais:
betas = numpy.cast[floatX](
numpy.hstack((numpy.linspace(0, 0.5, 1e5+1)[:-1],
numpy.linspace(0.5, 0.9, 1e5+1)[:-1],
numpy.linspace(0.9, 1.0, 1e5))))
else:
betas = numpy.cast[floatX](
numpy.hstack((numpy.linspace(0, 0.5, 1e4+1)[:-1],
numpy.linspace(0.5, 0.9, 1e4+1)[:-1],
numpy.linspace(0.9, 1.0, 1e4))))
if log_z is None:
log_ais_w = compute_log_ais_weights(batch_size, free_energy_fn,
sample_fn, betas)
dlogz, var_dlogz = estimate_from_weights(log_ais_w)
log_za = compute_log_za(b_list, pa_bias, marginalize_odd)
log_z = log_za + dlogz
logging.info('log_z = %f' % log_z)
logging.info('log_za = %f' % log_za)
logging.info('dlogz = %f' % dlogz)
logging.info('var_dlogz = %f' % var_dlogz)
train_ll = compute_likelihood_given_logz(nsamples, psamples, batch_size,
energy_fn, inference_fn, log_z,
trainset.X)
logging.info('Training likelihood = %f' % train_ll)
test_ll = compute_likelihood_given_logz(nsamples, psamples, batch_size,
energy_fn, inference_fn, log_z,
testset.X)
logging.info('Test likelihood = %f' % test_ll)
return (train_ll, test_ll, log_z)
if __name__ == '__main__':
# Possible metrics
metrics = {'ais': estimate_likelihood}
datasets = {'mnist': MNIST}
# Argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("metric", help="the desired metric",
choices=metrics.keys())
parser.add_argument("dataset", help="the dataset used for computing the " +
"metric", choices=datasets.keys())
parser.add_argument("model_path", help="path to the pickled DBM model")
args = parser.parse_args()
metric = metrics[args.metric]
dataset = datasets[args.dataset]
model = serial.load(args.model_path)
layers = [model.visible_layer] + model.hidden_layers
W_list = [theano.shared(hidden_layer.get_weights())
for hidden_layer in model.hidden_layers]
b_list = [theano.shared(layer.get_biases()) for layer in layers]
trainset = dataset(which_set='train')
testset = dataset(which_set='test')
metric(W_list, b_list, trainset, testset, pos_mf_steps=5)
|
|
# coding: utf-8
"""
HTCondor job manager. See https://research.cs.wisc.edu/htcondor.
"""
__all__ = ["HTCondorJobManager", "HTCondorJobFileFactory"]
import os
import stat
import time
import re
import subprocess
from law.config import Config
from law.job.base import BaseJobManager, BaseJobFileFactory
from law.util import interruptable_popen, make_list, make_unique, quote_cmd
from law.logger import get_logger
from law.contrib.htcondor.util import get_htcondor_version
logger = get_logger(__name__)
_cfg = Config.instance()
class HTCondorJobManager(BaseJobManager):
# chunking settings
chunk_size_submit = 0
chunk_size_cancel = _cfg.get_expanded_int("job", "htcondor_chunk_size_cancel")
chunk_size_query = _cfg.get_expanded_int("job", "htcondor_chunk_size_query")
submission_job_id_cre = re.compile(r"^(\d+) job\(s\) submitted to cluster (\d+)\.$")
long_block_cre = re.compile(r"(\w+) \= \"?(.*)\"?\n")
def __init__(self, pool=None, scheduler=None, user=None, threads=1):
super(HTCondorJobManager, self).__init__()
self.pool = pool
self.scheduler = scheduler
self.user = user
self.threads = threads
# determine the htcondor version once
self.htcondor_version = get_htcondor_version()
# flags for versions with some important changes
self.htcondor_v833 = self.htcondor_version and self.htcondor_version >= (8, 3, 3)
self.htcondor_v856 = self.htcondor_version and self.htcondor_version >= (8, 5, 6)
def cleanup(self, *args, **kwargs):
raise NotImplementedError("HTCondorJobManager.cleanup is not implemented")
def cleanup_batch(self, *args, **kwargs):
raise NotImplementedError("HTCondorJobManager.cleanup_batch is not implemented")
def submit(self, job_file, pool=None, scheduler=None, retries=0, retry_delay=3, silent=False):
# default arguments
if pool is None:
pool = self.pool
if scheduler is None:
scheduler = self.scheduler
# get the job file location as the submission command is run it the same directory
job_file_dir, job_file_name = os.path.split(os.path.abspath(job_file))
# build the command
cmd = ["condor_submit"]
if pool:
cmd += ["-pool", pool]
if scheduler:
cmd += ["-name", scheduler]
cmd += [job_file_name]
cmd = quote_cmd(cmd)
# define the actual submission in a loop to simplify retries
while True:
# run the command
logger.debug("submit htcondor job with command '{}'".format(cmd))
code, out, err = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=job_file_dir)
# get the job id(s)
if code == 0:
last_line = out.strip().split("\n")[-1].strip()
m = self.submission_job_id_cre.match(last_line)
if m:
job_ids = ["{}.{}".format(m.group(2), i) for i in range(int(m.group(1)))]
else:
code = 1
err = "cannot parse htcondor job id(s) from output:\n{}".format(out)
# retry or done?
if code == 0:
return job_ids
else:
logger.debug("submission of htcondor job '{}' failed with code {}:\n{}".format(
job_file, code, err))
if retries > 0:
retries -= 1
time.sleep(retry_delay)
continue
elif silent:
return None
else:
raise Exception("submission of htcondor job '{}' failed:\n{}".format(
job_file, err))
def cancel(self, job_id, pool=None, scheduler=None, silent=False):
# default arguments
if pool is None:
pool = self.pool
if scheduler is None:
scheduler = self.scheduler
# build the command
cmd = ["condor_rm"]
if pool:
cmd += ["-pool", pool]
if scheduler:
cmd += ["-name", scheduler]
cmd += make_list(job_id)
cmd = quote_cmd(cmd)
# run it
logger.debug("cancel htcondor job(s) with command '{}'".format(cmd))
code, out, err = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# check success
if code != 0 and not silent:
raise Exception("cancellation of htcondor job(s) '{}' failed with code {}:\n{}".format(
job_id, code, err))
def query(self, job_id, pool=None, scheduler=None, user=None, silent=False):
# default arguments
if pool is None:
pool = self.pool
if scheduler is None:
scheduler = self.scheduler
if user is None:
user = self.user
chunking = isinstance(job_id, (list, tuple))
job_ids = make_list(job_id)
# default ClassAds to getch
ads = "ClusterId,ProcId,JobStatus,ExitCode,ExitStatus,HoldReason,RemoveReason"
# build the condor_q command
cmd = ["condor_q"] + job_ids
if pool:
cmd += ["-pool", pool]
if scheduler:
cmd += ["-name", scheduler]
cmd += ["-long"]
# since v8.3.3 one can limit the number of jobs to query
if self.htcondor_v833:
cmd += ["-limit", str(len(job_ids))]
# since v8.5.6 one can define the attributes to fetch
if self.htcondor_v856:
cmd += ["-attributes", ads]
cmd = quote_cmd(cmd)
logger.debug("query htcondor job(s) with command '{}'".format(cmd))
code, out, err = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# handle errors
if code != 0:
if silent:
return None
else:
raise Exception("queue query of htcondor job(s) '{}' failed with code {}:"
"\n{}".format(job_id, code, err))
# parse the output and extract the status per job
query_data = self.parse_long_output(out)
# some jobs might already be in the condor history, so query for missing job ids
missing_ids = [_job_id for _job_id in job_ids if _job_id not in query_data]
if missing_ids:
# build the condor_history command, which is fairly similar to the condor_q command
cmd = ["condor_history"] + missing_ids
if pool:
cmd += ["-pool", pool]
if scheduler:
cmd += ["-name", scheduler]
cmd += ["-long"]
# since v8.3.3 one can limit the number of jobs to query
if self.htcondor_v833:
cmd += ["-limit", str(len(missing_ids))]
# since v8.5.6 one can define the attributes to fetch
if self.htcondor_v856:
cmd += ["-attributes", ads]
cmd = quote_cmd(cmd)
logger.debug("query htcondor job history with command '{}'".format(cmd))
code, out, err = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# handle errors
if code != 0:
if silent:
return None
else:
raise Exception("history query of htcondor job(s) '{}' failed with code {}:"
"\n{}".format(job_id, code, err))
# parse the output and update query data
query_data.update(self.parse_long_output(out))
# compare to the requested job ids and perform some checks
for _job_id in job_ids:
if _job_id not in query_data:
if not chunking:
if silent:
return None
else:
raise Exception("htcondor job(s) '{}' not found in query response".format(
job_id))
else:
query_data[_job_id] = self.job_status_dict(job_id=_job_id, status=self.FAILED,
error="job not found in query response")
return query_data if chunking else query_data[job_id]
@classmethod
def parse_long_output(cls, out):
# retrieve information per block mapped to the job id
query_data = {}
for block in out.strip().split("\n\n"):
data = dict(cls.long_block_cre.findall(block + "\n"))
if not data:
continue
# build the job id
if "ClusterId" not in data and "ProcId" not in data:
continue
job_id = "{ClusterId}.{ProcId}".format(**data)
# get the job status code
status = cls.map_status(data.get("JobStatus"))
# get the exit code
code = int(data.get("ExitCode") or data.get("ExitStatus") or "0")
# get the error message (if any)
error = data.get("HoldReason") or data.get("RemoveReason")
# handle inconsistencies between status, code and the presence of an error message
if code != 0:
if status != cls.FAILED:
status = cls.FAILED
if not error:
error = "job status set to '{}' due to non-zero exit code {}".format(
cls.FAILED, code)
# store it
query_data[job_id] = cls.job_status_dict(job_id=job_id, status=status, code=code,
error=error)
return query_data
@classmethod
def map_status(cls, status_flag):
# see http://pages.cs.wisc.edu/~adesmet/status.html
if status_flag in ("0", "1", "U", "I"):
return cls.PENDING
elif status_flag in ("2", "R"):
return cls.RUNNING
elif status_flag in ("4", "C"):
return cls.FINISHED
elif status_flag in ("5", "6", "H", "E"):
return cls.FAILED
else:
return cls.FAILED
class HTCondorJobFileFactory(BaseJobFileFactory):
config_attrs = BaseJobFileFactory.config_attrs + [
"file_name", "universe", "executable", "arguments", "input_files", "output_files",
"postfix_output_files", "log", "stdout", "stderr", "notification", "custom_content",
"absolute_paths",
]
def __init__(self, file_name="job.jdl", universe="vanilla", executable=None, arguments=None,
input_files=None, output_files=None, postfix_output_files=True, log="log.txt",
stdout="stdout.txt", stderr="stderr.txt", notification="Never", custom_content=None,
absolute_paths=False, **kwargs):
# get some default kwargs from the config
cfg = Config.instance()
if kwargs.get("dir") is None:
kwargs["dir"] = cfg.get_expanded("job", cfg.find_option("job",
"htcondor_job_file_dir", "job_file_dir"))
if kwargs.get("mkdtemp") is None:
kwargs["mkdtemp"] = cfg.get_expanded_boolean("job", cfg.find_option("job",
"htcondor_job_file_dir_mkdtemp", "job_file_dir_mkdtemp"))
if kwargs.get("cleanup") is None:
kwargs["cleanup"] = cfg.get_expanded_boolean("job", cfg.find_option("job",
"htcondor_job_file_dir_cleanup", "job_file_dir_cleanup"))
super(HTCondorJobFileFactory, self).__init__(**kwargs)
self.file_name = file_name
self.universe = universe
self.executable = executable
self.arguments = arguments
self.input_files = input_files or []
self.output_files = output_files or []
self.postfix_output_files = postfix_output_files
self.log = log
self.stdout = stdout
self.stderr = stderr
self.notification = notification
self.custom_content = custom_content
self.absolute_paths = absolute_paths
def create(self, postfix=None, render_variables=None, **kwargs):
# merge kwargs and instance attributes
c = self.get_config(kwargs)
# some sanity checks
if not c.file_name:
raise ValueError("file_name must not be empty")
elif not c.universe:
raise ValueError("universe must not be empty")
elif not c.executable:
raise ValueError("executable must not be empty")
# default render variables
if not render_variables:
render_variables = {}
# add postfix to render variables
if postfix and "file_postfix" not in render_variables:
render_variables["file_postfix"] = postfix
# linearize render variables
render_variables = self.linearize_render_variables(render_variables)
# prepare the job file and the executable
job_file = self.postfix_file(os.path.join(c.dir, c.file_name), postfix)
executable_is_file = c.executable in map(os.path.basename, c.input_files)
if executable_is_file:
c.executable = self.postfix_file(os.path.basename(c.executable), postfix)
# prepare input files
def prepare_input(path):
path = self.provide_input(os.path.abspath(path), postfix, c.dir, render_variables)
path = path if c.absolute_paths else os.path.basename(path)
return path
c.input_files = list(map(prepare_input, c.input_files))
# make the executable file executable for the user
if executable_is_file:
for input_file in c.input_files:
if os.path.basename(input_file) == c.executable:
if not c.absolute_paths:
input_file = os.path.join(c.dir, input_file)
if not os.path.exists(input_file):
raise IOError("could not find input file '{}'".format(input_file))
os.chmod(input_file, os.stat(input_file).st_mode | stat.S_IXUSR)
break
# output files
if c.postfix_output_files:
c.output_files = [self.postfix_file(path, postfix) for path in c.output_files]
c.log = c.log and self.postfix_file(c.log, postfix)
c.stdout = c.stdout and self.postfix_file(c.stdout, postfix)
c.stderr = c.stdout and self.postfix_file(c.stderr, postfix)
# custom log file
if c.custom_log_file:
c.custom_log_file = self.postfix_file(c.custom_log_file, postfix)
c.output_files.append(c.custom_log_file)
# job file content
content = []
content.append(("universe", c.universe))
content.append(("executable", c.executable))
if c.log:
content.append(("log", c.log))
if c.stdout:
content.append(("output", c.stdout))
if c.stderr:
content.append(("error", c.stderr))
if c.input_files or c.output_files:
content.append(("should_transfer_files", "YES"))
if c.input_files:
content.append(("transfer_input_files", make_unique(c.input_files)))
if c.output_files:
content.append(("transfer_output_files", make_unique(c.output_files)))
content.append(("when_to_transfer_output", "ON_EXIT"))
if c.notification:
content.append(("notification", c.notification))
# add custom content
if c.custom_content:
content += c.custom_content
# finally arguments and queuing statements
if c.arguments:
for _arguments in make_list(c.arguments):
content.append(("arguments", _arguments))
content.append("queue")
else:
content.append("queue")
# write the job file
with open(job_file, "w") as f:
for obj in content:
line = self.create_line(*make_list(obj))
f.write(line + "\n")
logger.debug("created htcondor job file at '{}'".format(job_file))
return job_file, c
@classmethod
def create_line(cls, key, value=None):
if isinstance(value, (list, tuple)):
value = ",".join(str(v) for v in value)
if value is None:
return str(key)
else:
return "{} = {}".format(key, value)
|
|
# Copyright (c) 2013 Thomas Nicholson <tnnich@googlemail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The names of the author(s) may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from honssh.protocols import baseProtocol, sftp, term, execTerm
from twisted.python import log
from kippo.core.config import config
import struct, uuid, random, os, ConfigParser, re
class SSH(baseProtocol.BaseProtocol):
channels = []
username = ''
password = ''
cfg = config()
packetLayout = {
1 : 'SSH_MSG_DISCONNECT', #['uint32', 'reason_code'], ['string', 'reason'], ['string', 'language_tag']
2 : 'SSH_MSG_IGNORE', #['string', 'data']
3 : 'SSH_MSG_UNIMPLEMENTED', #['uint32', 'seq_no']
4 : 'SSH_MSG_DEBUG', #['boolean', 'always_display']
5 : 'SSH_MSG_SERVICE_REQUEST', #['string', 'service_name']
6 : 'SSH_MSG_SERVICE_ACCEPT', #['string', 'service_name']
20 : 'SSH_MSG_KEXINIT', #['string', 'service_name']
21 : 'SSH_MSG_NEWKEYS', #
50 : 'SSH_MSG_USERAUTH_REQUEST', #['string', 'username'], ['string', 'service_name'], ['string', 'method_name']
51 : 'SSH_MSG_USERAUTH_FAILURE', #['name-list', 'authentications'], ['boolean', 'partial_success']
52 : 'SSH_MSG_USERAUTH_SUCCESS', #
53 : 'SSH_MSG_USERAUTH_BANNER', #['string', 'message'], ['string', 'language_tag']
80 : 'SSH_MSG_GLOBAL_REQUEST', #['string', 'request_name'], ['boolean', 'want_reply'] #tcpip-forward
81 : 'SSH_MSG_REQUEST_SUCCESS', #
82 : 'SSH_MSG_REQUEST_FAILURE', #
90 : 'SSH_MSG_CHANNEL_OPEN', #['string', 'channel_type'], ['uint32', 'sender_channel'], ['uint32', 'initial_window_size'], ['uint32', 'maximum_packet_size'],
91 : 'SSH_MSG_CHANNEL_OPEN_CONFIRMATION', #['uint32', 'recipient_channel'], ['uint32', 'sender_channel'], ['uint32', 'initial_window_size'], ['uint32', 'maximum_packet_size'],
92 : 'SSH_MSG_CHANNEL_OPEN_FAILURE', #['uint32', 'recipient_channel'], ['uint32', 'reason_code'], ['string', 'reason'], ['string', 'language_tag']
93 : 'SSH_MSG_CHANNEL_WINDOW_ADJUST', #['uint32', 'recipient_channel'], ['uint32', 'additional_bytes']
94 : 'SSH_MSG_CHANNEL_DATA', #['uint32', 'recipient_channel'], ['string', 'data']
95 : 'SSH_MSG_CHANNEL_EXTENDED_DATA', #['uint32', 'recipient_channel'], ['uint32', 'data_type_code'], ['string', 'data']
96 : 'SSH_MSG_CHANNEL_EOF', #['uint32', 'recipient_channel']
97 : 'SSH_MSG_CHANNEL_CLOSE', #['uint32', 'recipient_channel']
98 : 'SSH_MSG_CHANNEL_REQUEST', #['uint32', 'recipient_channel'], ['string', 'request_type'], ['boolean', 'want_reply']
99 : 'SSH_MSG_CHANNEL_SUCCESS', #
100 : 'SSH_MSG_CHANNEL_FAILURE' #
}
def __init__(self, server, out):
self.out = out
self.server = server
self.channels = []
def setClient(self, client):
self.client = client
def parsePacket(self, parent, messageNum, payload):
self.data = payload
self.packetSize = len(payload)
self.sendOn = True
packet = self.packetLayout[messageNum]
if self.cfg.get('packets', 'enabled') == 'true':
if parent == '[SERVER]':
direction = 'CLIENT -> SERVER'
else:
direction = 'SERVER -> CLIENT'
self.out.advancedLog(direction + ' - ' + packet.ljust(33) + ' - ' + repr(payload))
# - UserAuth
if packet == 'SSH_MSG_USERAUTH_REQUEST':
self.username = self.extractString()
service = self.extractString()
authType = self.extractString()
if authType == 'password':
self.extractBool()
psize = self.packetSize
self.password = self.extractString()
if self.password != "":
if self.cfg.get('spoof', 'enabled') == 'true':
user = self.getUsers(self.username)
rand = 0
if user != None:
if user[2] == 'fixed':
passwords = re.sub(r'\s', '', user[3]).split(',')
if self.password in passwords:
rand = 1
elif user[2] == 'random':
randomFactor = (100 / int(user[3])) + 1
rand = random.randrange(1, randomFactor)
found = False
logfile = self.cfg.get('folders', 'log_path') + "/spoof.log"
if os.path.isfile(logfile):
f = file(logfile, 'r')
creds = f.read().splitlines()
f.close()
for cred in creds:
cred = cred.strip().split(' - ')
if cred[0] == self.username and cred[1] == self.password:
rand = 1
self.out.writePossibleLink(cred[2:])
break
if rand == 1:
payload = payload[:0-psize] + self.stringToHex(user[1])
self.out.addConnectionString("[SSH ] Spoofing Login - Changing %s to %s" % (self.password, user[1]))
self.out.writeSpoofPass(self.username, self.password)
elif authType == 'publickey':
if self.cfg.get('hp-restrict', 'disable_publicKey') == 'true':
self.sendOn = False
self.server.sendPacket(51, self.stringToHex('password') + chr(0))
elif packet == 'SSH_MSG_USERAUTH_FAILURE':
authList = self.extractString()
if 'publickey' in authList:
if self.cfg.get('hp-restrict', 'disable_publicKey') == 'true':
log.msg("[SSH] - Detected Public Key Auth - Disabling!")
payload = self.stringToHex('password') + chr(0)
if self.username != '' and self.password != '':
self.out.loginFailed(self.username, self.password)
elif packet == 'SSH_MSG_USERAUTH_SUCCESS':
if self.username != '' and self.password != '':
self.out.loginSuccessful(self.username, self.password)
# - End UserAuth
# - Channels
elif packet == 'SSH_MSG_CHANNEL_OPEN':
type = self.extractString()
id = self.extractInt(4)
if type == 'session':
self.createChannel(parent, id, type)
elif type == 'x11':
if self.cfg.get('hp-restrict', 'disable_x11') == 'true':
log.msg("[SSH] - Detected X11 Channel - Disabling!")
self.sendOn = False
self.sendBack(parent, 92, self.intToHex(id))
else:
##LOG X11 Channel opened - not logging
self.createChannel(parent, id, type, session=baseProtocol.BaseProtocol())
elif type == 'direct-tcpip':
if self.cfg.get('hp-restrict', 'disable_port_forwarding') == 'true':
log.msg("[SSH] - Detected Port Forwarding Channel - Disabling!")
self.sendOn = False
self.sendBack(parent, 92, self.intToHex(id) + self.intToHex(1) + self.stringToHex('open failed') + self.intToHex(0))
else:
##LOG PORT FORWARDING Channel opened - not logging
self.createChannel(parent, id, type, session=baseProtocol.BaseProtocol())
else:
##UNKNOWN CHANNEL TYPE
if type not in ['exit-status']:
log.msg("[SSH] - Unknown Channel Type Detected - " + type)
elif packet == 'SSH_MSG_CHANNEL_OPEN_CONFIRMATION':
channel = self.getChannel(self.extractInt(4), parent)
senderID = self.extractInt(4) #SENDER
if parent == '[SERVER]':
channel['serverID'] = senderID
elif parent == '[CLIENT]':
channel['clientID'] = senderID
##CHANNEL OPENED
elif packet == 'SSH_MSG_CHANNEL_OPEN_FAILURE':
channel = self.getChannel(self.extractInt(4), parent)
self.channels.remove(channel)
##CHANNEL FAILED TO OPEN
elif packet == 'SSH_MSG_CHANNEL_REQUEST':
channel = self.getChannel(self.extractInt(4), parent)
type = self.extractString()
theUUID = uuid.uuid4().hex
if type == 'pty-req':
channel['name'] = '[TERM' + str(channel['serverID']) + ']'
self.out.channelOpened(theUUID, channel['name'])
channel['session'] = term.Term(self.out, theUUID, channel['name'])
elif type == 'exec':
if self.cfg.get('hp-restrict','disable_exec') == 'true':
log.msg("[SSH] - Detected EXEC Channel Request - Disabling!")
self.sendOn = False
self.sendBack(parent, 100, self.intToHex(channel['serverID']))
else:
channel['name'] = '[EXEC' + str(channel['serverID']) + ']'
self.extractBool()
command = self.extractString()
self.out.channelOpened(theUUID, channel['name'])
channel['session'] = execTerm.ExecTerm(self.out, theUUID, channel['name'], command)
elif type == 'subsystem':
self.extractBool()
subsystem = self.extractString()
if subsystem == 'sftp':
if self.cfg.get('hp-restrict','disable_sftp') == 'true':
log.msg("[SSH] - Detected SFTP Channel Request - Disabling!")
self.sendOn = False
self.sendBack(parent, 100, self.intToHex(channel['serverID']))
else:
channel['name'] = '[SFTP' + str(channel['serverID']) + ']'
self.out.channelOpened(theUUID, channel['name'])
channel['session'] = sftp.SFTP(self.out, theUUID, channel['name'])
else:
##UNKNOWN SUBSYSTEM
log.msg("[SSH] - Unknown Subsystem Type Detected - " + subsystem)
elif type == 'x11-req':
if self.cfg.get('hp-restrict', 'disable_x11') == 'true':
self.sendOn = False
self.sendBack(parent, 82, '')
else:
##UNKNOWN CHANNEL REQUEST TYPE
if type not in ['window-change', 'env', 'shell', 'exit-status']:
log.msg("[SSH] - Unknown Channel Request Type Detected - " + type)
elif packet == 'SSH_MSG_CHANNEL_FAILURE':
pass
elif packet == 'SSH_MSG_CHANNEL_CLOSE':
channel = self.getChannel(self.extractInt(4), parent)
channel[parent] = True
if '[SERVER]' in channel and '[CLIENT]' in channel:
##CHANNEL CLOSED
if channel['session'] != None:
channel['session'].channelClosed()
self.out.channelClosed(channel['session'])
self.channels.remove(channel)
# - END Channels
# - ChannelData
elif packet == 'SSH_MSG_CHANNEL_DATA':
channel = self.getChannel(self.extractInt(4), parent)
channel['session'].parsePacket(parent, self.extractString())
elif packet == 'SSH_MSG_CHANNEL_EXTENDED_DATA':
channel = self.getChannel(self.extractInt(4), parent)
self.extractInt(4)
channel['session'].parsePacket(parent, self.extractString())
# - END ChannelData
elif packet == 'SSH_MSG_GLOBAL_REQUEST':
type = self.extractString()
if type == 'tcpip-forward':
if self.cfg.get('hp-restrict', 'disable_port_forwarding') == 'true':
self.sendOn = False
self.sendBack(parent, 82, '')
if self.sendOn:
if parent == '[SERVER]':
self.client.sendPacket(messageNum, payload)
else:
self.server.sendPacket(messageNum, payload)
def sendBack(self, parent, messageNum, payload):
if self.cfg.get('packets', 'enabled') == 'true':
packet = self.packetLayout[messageNum]
if parent == '[SERVER]':
direction = 'HONSSH -> CLIENT'
else:
direction = 'HONSSH -> SERVER'
self.out.advancedLog(direction + ' - ' + packet.ljust(33) + ' - ' + repr(payload))
if parent == '[SERVER]':
self.server.sendPacket(messageNum, payload)
elif parent == '[CLIENT]':
self.client.sendPacket(messageNum, payload)
def createChannel(self, parent, id, type, session=None):
if parent == '[SERVER]':
self.channels.append({'serverID':id, 'type': type, 'session':session})
elif parent == '[CLIENT]':
self.channels.append({'clientID':id, 'type': type, 'session':session})
def getChannel(self, channelNum, parent):
theChannel = None
for channel in self.channels:
if parent == '[CLIENT]':
search = 'serverID'
else:
search = 'clientID'
if channel[search] == channelNum:
theChannel = channel
break
return channel
def getUsers(self, username):
usersCfg = ConfigParser.ConfigParser()
if os.path.exists(self.cfg.get('spoof','users_conf')):
usersCfg.read(self.cfg.get('spoof','users_conf'))
users = usersCfg.sections()
for user in users:
if user == username:
if usersCfg.has_option(user, 'fake_passwords'):
return [user, usersCfg.get(user, 'real_password'), 'fixed', usersCfg.get(user, 'fake_passwords')]
if usersCfg.has_option(user, 'random_chance'):
return [user, usersCfg.get(user, 'real_password'), 'random', usersCfg.get(user, 'random_chance')]
else:
log.msg("ERROR: users_conf does not exist")
return None
def stringToHex(self, message):
b = message.encode('utf-8')
size = struct.pack('>L',len(b))
return size + b
def intToHex(self, int):
return struct.pack('>L', int)
|
|
# Copyright 2013 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo.config import cfg
from oslo import messaging
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova import rpc
CONF = cfg.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConductorAPI(object):
"""Client side of the conductor RPC API
API version history:
1.0 - Initial version.
1.1 - Added migration_update
1.2 - Added instance_get_by_uuid and instance_get_all_by_host
1.3 - Added aggregate_host_add and aggregate_host_delete
1.4 - Added migration_get
1.5 - Added bw_usage_update
1.6 - Added get_backdoor_port()
1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
1.9 - Added provider_fw_rule_get_all
1.10 - Added agent_build_get_by_triple
1.11 - Added aggregate_get
1.12 - Added block_device_mapping_update_or_create
1.13 - Added block_device_mapping_get_all_by_instance
1.14 - Added block_device_mapping_destroy
1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
1.16 - Added instance_destroy
1.17 - Added instance_info_cache_delete
1.18 - Added instance_type_get
1.19 - Added vol_get_usage_by_time and vol_usage_update
1.20 - Added migration_get_unconfirmed_by_dest_compute
1.21 - Added service_get_all_by
1.22 - Added ping
1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
1.26 - Added instance_info_cache_update
1.27 - Added service_create
1.28 - Added binary arg to service_get_all_by
1.29 - Added service_destroy
1.30 - Added migration_create
1.31 - Added migration_get_in_progress_by_host_and_node
1.32 - Added optional node to instance_get_all_by_host
1.33 - Added compute_node_create and compute_node_update
1.34 - Added service_update
1.35 - Added instance_get_active_by_window_joined
1.36 - Added instance_fault_create
1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
1.38 - Added service name to instance_update
1.39 - Added notify_usage_exists
1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
1.43 - Added compute_stop
1.44 - Added compute_node_delete
1.45 - Added project_id to quota_commit and quota_rollback
1.46 - Added compute_confirm_resize
1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
1.49 - Added columns_to_join to instance_get_by_uuid
1.50 - Added object_action() and object_class_action()
1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
1.52 - Pass instance objects for compute_confirm_resize
1.53 - Added compute_reboot
1.54 - Added 'update_cells' argument to bw_usage_update
1.55 - Pass instance objects for compute_stop
1.56 - Remove compute_confirm_resize and
migration_get_unconfirmed_by_dest_compute
1.57 - Remove migration_create()
1.58 - Remove migration_get()
... Havana supports message version 1.58. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.58.
1.59 - Remove instance_info_cache_update()
1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
... - Remove security_group_get_by_instance() and
security_group_rule_get_by_security_group()
1.61 - Return deleted instance from instance_destroy()
1.62 - Added object_backport()
1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
1.64 - Added use_slave to instance_get_all_filters()
... - Remove instance_type_get()
"""
VERSION_ALIASES = {
'grizzly': '1.48',
'havana': '1.58',
}
def __init__(self):
super(ConductorAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic, version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def instance_update(self, context, instance_uuid, updates,
service=None):
updates_p = jsonutils.to_primitive(updates)
cctxt = self.client.prepare(version='1.38')
return cctxt.call(context, 'instance_update',
instance_uuid=instance_uuid,
updates=updates_p,
service=service)
def instance_get(self, context, instance_id):
cctxt = self.client.prepare(version='1.24')
return cctxt.call(context, 'instance_get', instance_id=instance_id)
def instance_get_by_uuid(self, context, instance_uuid,
columns_to_join=None):
if self.client.can_send_version('1.49'):
version = '1.49'
kwargs = {'instance_uuid': instance_uuid,
'columns_to_join': columns_to_join}
else:
version = '1.2'
kwargs = {'instance_uuid': instance_uuid}
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'instance_get_by_uuid', **kwargs)
def migration_get_in_progress_by_host_and_node(self, context,
host, node):
cctxt = self.client.prepare(version='1.31')
return cctxt.call(context,
'migration_get_in_progress_by_host_and_node',
host=host, node=node)
def migration_update(self, context, migration, status):
migration_p = jsonutils.to_primitive(migration)
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'migration_update',
migration=migration_p,
status=status)
def aggregate_host_add(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'aggregate_host_add',
aggregate=aggregate_p,
host=host)
def aggregate_host_delete(self, context, aggregate, host):
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(version='1.3')
return cctxt.call(context, 'aggregate_host_delete',
aggregate=aggregate_p,
host=host)
def aggregate_get(self, context, aggregate_id):
cctxt = self.client.prepare(version='1.11')
return cctxt.call(context, 'aggregate_get', aggregate_id=aggregate_id)
def aggregate_get_by_host(self, context, host, key=None):
cctxt = self.client.prepare(version='1.7')
return cctxt.call(context, 'aggregate_get_by_host', host=host, key=key)
def aggregate_metadata_get_by_host(self, context, host, key):
cctxt = self.client.prepare(version='1.42')
return cctxt.call(context, 'aggregate_metadata_get_by_host',
host=host,
key=key)
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
last_refreshed=None, update_cells=True):
msg_kwargs = dict(uuid=uuid, mac=mac, start_period=start_period,
bw_in=bw_in, bw_out=bw_out, last_ctr_in=last_ctr_in,
last_ctr_out=last_ctr_out,
last_refreshed=last_refreshed)
if self.client.can_send_version('1.54'):
version = '1.54'
msg_kwargs['update_cells'] = update_cells
else:
version = '1.5'
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'bw_usage_update', **msg_kwargs)
def provider_fw_rule_get_all(self, context):
cctxt = self.client.prepare(version='1.9')
return cctxt.call(context, 'provider_fw_rule_get_all')
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
cctxt = self.client.prepare(version='1.10')
return cctxt.call(context, 'agent_build_get_by_triple',
hypervisor=hypervisor, os=os,
architecture=architecture)
def block_device_mapping_update_or_create(self, context, values,
create=None):
cctxt = self.client.prepare(version='1.12')
return cctxt.call(context, 'block_device_mapping_update_or_create',
values=values, create=create)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
instance_p = jsonutils.to_primitive(instance)
if self.client.can_send_version('1.51'):
version = '1.51'
kwargs = {'legacy': legacy}
elif legacy:
# If the remote side is >= 1.51, it defaults to legacy=True.
# If it's older, it only understands the legacy format.
version = '1.13'
kwargs = {}
else:
# If we require new style data, but can't ask for it, then we must
# fail here.
raise messaging.RPCVersionCapError(
vesion='1.51', version_cap=self.client.version_cap)
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'block_device_mapping_get_all_by_instance',
instance=instance_p, **kwargs)
def block_device_mapping_destroy(self, context, bdms=None,
instance=None, volume_id=None,
device_name=None):
bdms_p = jsonutils.to_primitive(bdms)
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version='1.14')
return cctxt.call(context, 'block_device_mapping_destroy',
bdms=bdms_p, instance=instance_p,
volume_id=volume_id, device_name=device_name)
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join=None,
use_slave=False):
msg_kwargs = dict(filters=filters, sort_key=sort_key,
sort_dir=sort_dir, columns_to_join=columns_to_join)
if self.client.can_send_version('1.64'):
version = '1.64'
msg_kwargs['use_slave'] = use_slave
else:
version = '1.47'
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'instance_get_all_by_filters', **msg_kwargs)
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
cctxt = self.client.prepare(version='1.35')
return cctxt.call(context, 'instance_get_active_by_window_joined',
begin=begin, end=end, project_id=project_id,
host=host)
def instance_destroy(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version='1.61')
return cctxt.call(context, 'instance_destroy', instance=instance_p)
def instance_info_cache_delete(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version='1.17')
cctxt.call(context, 'instance_info_cache_delete', instance=instance_p)
def vol_get_usage_by_time(self, context, start_time):
start_time_p = jsonutils.to_primitive(start_time)
cctxt = self.client.prepare(version='1.19')
return cctxt.call(context, 'vol_get_usage_by_time',
start_time=start_time_p)
def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req,
wr_bytes, instance, last_refreshed=None,
update_totals=False):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version='1.19')
return cctxt.call(context, 'vol_usage_update',
vol_id=vol_id, rd_req=rd_req,
rd_bytes=rd_bytes, wr_req=wr_req,
wr_bytes=wr_bytes,
instance=instance_p, last_refreshed=last_refreshed,
update_totals=update_totals)
def service_get_all_by(self, context, topic=None, host=None, binary=None):
cctxt = self.client.prepare(version='1.28')
return cctxt.call(context, 'service_get_all_by',
topic=topic, host=host, binary=binary)
def instance_get_all_by_host(self, context, host, node=None,
columns_to_join=None):
cctxt = self.client.prepare(version='1.47')
return cctxt.call(context, 'instance_get_all_by_host',
host=host, node=node,
columns_to_join=columns_to_join)
def instance_fault_create(self, context, values):
cctxt = self.client.prepare(version='1.36')
return cctxt.call(context, 'instance_fault_create', values=values)
def action_event_start(self, context, values):
values_p = jsonutils.to_primitive(values)
cctxt = self.client.prepare(version='1.25')
return cctxt.call(context, 'action_event_start', values=values_p)
def action_event_finish(self, context, values):
values_p = jsonutils.to_primitive(values)
cctxt = self.client.prepare(version='1.25')
return cctxt.call(context, 'action_event_finish', values=values_p)
def service_create(self, context, values):
cctxt = self.client.prepare(version='1.27')
return cctxt.call(context, 'service_create', values=values)
def service_destroy(self, context, service_id):
cctxt = self.client.prepare(version='1.29')
return cctxt.call(context, 'service_destroy', service_id=service_id)
def compute_node_create(self, context, values):
cctxt = self.client.prepare(version='1.33')
return cctxt.call(context, 'compute_node_create', values=values)
def compute_node_update(self, context, node, values, prune_stats=False):
node_p = jsonutils.to_primitive(node)
if self.client.can_send_version('1.63'):
version = '1.63'
else:
version = '1.33'
if 'stats' in values:
values['stats'] = jsonutils.loads(values['stats'])
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'compute_node_update',
node=node_p, values=values,
prune_stats=prune_stats)
def compute_node_delete(self, context, node):
node_p = jsonutils.to_primitive(node)
cctxt = self.client.prepare(version='1.44')
return cctxt.call(context, 'compute_node_delete', node=node_p)
def service_update(self, context, service, values):
service_p = jsonutils.to_primitive(service)
cctxt = self.client.prepare(version='1.34')
return cctxt.call(context, 'service_update',
service=service_p, values=values)
def task_log_get(self, context, task_name, begin, end, host, state=None):
cctxt = self.client.prepare(version='1.37')
return cctxt.call(context, 'task_log_get',
task_name=task_name, begin=begin, end=end,
host=host, state=state)
def task_log_begin_task(self, context, task_name, begin, end, host,
task_items=None, message=None):
cctxt = self.client.prepare(version='1.37')
return cctxt.call(context, 'task_log_begin_task',
task_name=task_name,
begin=begin, end=end, host=host,
task_items=task_items, message=message)
def task_log_end_task(self, context, task_name, begin, end, host, errors,
message=None):
cctxt = self.client.prepare(version='1.37')
return cctxt.call(context, 'task_log_end_task',
task_name=task_name, begin=begin, end=end,
host=host, errors=errors, message=message)
def notify_usage_exists(self, context, instance, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
instance_p = jsonutils.to_primitive(instance)
system_metadata_p = jsonutils.to_primitive(system_metadata)
extra_usage_info_p = jsonutils.to_primitive(extra_usage_info)
cctxt = self.client.prepare(version='1.39')
return cctxt.call(
context, 'notify_usage_exists',
instance=instance_p,
current_period=current_period,
ignore_missing_network_data=ignore_missing_network_data,
system_metadata=system_metadata_p,
extra_usage_info=extra_usage_info_p)
def security_groups_trigger_handler(self, context, event, args):
args_p = jsonutils.to_primitive(args)
cctxt = self.client.prepare(version='1.40')
return cctxt.call(context, 'security_groups_trigger_handler',
event=event, args=args_p)
def security_groups_trigger_members_refresh(self, context, group_ids):
cctxt = self.client.prepare(version='1.40')
return cctxt.call(context, 'security_groups_trigger_members_refresh',
group_ids=group_ids)
def network_migrate_instance_start(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
cctxt = self.client.prepare(version='1.41')
return cctxt.call(context, 'network_migrate_instance_start',
instance=instance_p, migration=migration_p)
def network_migrate_instance_finish(self, context, instance, migration):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
cctxt = self.client.prepare(version='1.41')
return cctxt.call(context, 'network_migrate_instance_finish',
instance=instance_p, migration=migration_p)
def quota_commit(self, context, reservations, project_id=None,
user_id=None):
reservations_p = jsonutils.to_primitive(reservations)
cctxt = self.client.prepare(version='1.45')
return cctxt.call(context, 'quota_commit',
reservations=reservations_p,
project_id=project_id, user_id=user_id)
def quota_rollback(self, context, reservations, project_id=None,
user_id=None):
reservations_p = jsonutils.to_primitive(reservations)
cctxt = self.client.prepare(version='1.45')
return cctxt.call(context, 'quota_rollback',
reservations=reservations_p,
project_id=project_id, user_id=user_id)
def get_ec2_ids(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version='1.42')
return cctxt.call(context, 'get_ec2_ids',
instance=instance_p)
def compute_unrescue(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(version='1.48')
return cctxt.call(context, 'compute_unrescue', instance=instance_p)
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
cctxt = self.client.prepare(version='1.50')
return cctxt.call(context, 'object_class_action',
objname=objname, objmethod=objmethod,
objver=objver, args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
cctxt = self.client.prepare(version='1.50')
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport(self, context, objinst, target_version):
cctxt = self.client.prepare(version='1.62')
return cctxt.call(context, 'object_backport', objinst=objinst,
target_version=target_version)
class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None):
if self.client.can_send_version('1.6'):
version = '1.6'
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
flavor_p = jsonutils.to_primitive(flavor)
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'migrate_server',
instance=instance, scheduler_hint=scheduler_hint,
live=live, rebuild=rebuild, flavor=flavor_p,
block_migration=block_migration,
disk_over_commit=disk_over_commit,
reservations=reservations)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(version='1.5')
cctxt.cast(context, 'build_instances',
instances=instances, image=image_p,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def unshelve_instance(self, context, instance):
cctxt = self.client.prepare(version='1.3')
cctxt.cast(context, 'unshelve_instance', instance=instance)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.