text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import pynacolada as pcd
from Scientific.IO import NetCDF
import os
import numpy as np
import pylab as pl
fnins = ['/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf.nc' , '/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf2.nc']
#print fnin
# fobjin = open(fnin,'rb')
# fin = NetCDF.NetCDFFile(fnin,'r')
fout = NetCDF.NetCDFFile('/home/hendrik/data/belgium_aq/rcm/aq09/stage1/int2lm/laf2009010100_urb_ahf3.nc','w')
#os.system('rm '+fnout)
#print fnout
# fobjout = open(fnout,'wb+''rlat')
datin = [{'file':fnins,'varname':'T','predim':'time'},\
{'file':fnins,'varname':'rlat','predim':'time'}]
datout = [{'file':fout,'varname':'T'},]
# selection of function dimension input
func = lambda x, y: (np.array([np.mean(x,axis=0)],dtype=np.float32) ,) # *(1.+np.zeros(x.shape))
dnamsel = ['time',]
pcd.pcd(func,dnamsel,datin,datout,appenddim=True)
fout.close();print('output file written to:',fout )
# fig = pl.figure()
# fout = NetCDF.NetCDFFile(fnout,'r')
# pl.imshow(fout.variables['T'][:].squeeze())
# fig.show()
# fout.close()
|
hendrikwout/pynacolada
|
old/examples/pynatestlist.py
|
Python
|
gpl-3.0
| 1,089
|
[
"NetCDF"
] |
a9f0ecc06069d57274391675191690e5e1707afa90b943a88c47ff94c1109d9f
|
global XRFALLOWED
try:
from xrf_analysis import *
XRFALLOWED=True
except:
XRFALLOWED=False
import numpy, pylab, matplotlib
from xrd_math_fcns import *
from xrd_diffraction_conversion_fcns import *
from XRDdefaults import *
from xrf_depprof import *
import os
import time
import h5py
import operator
#import Elemental
#class atabclass(tables.IsDescription):
# qqpkind = tables.UInt16Col() #indeces of qqpktab
# qqaqqind = tables.UInt16Col() # the qqpk is at a position a,b in the qq map. these give a and b in units of indeces of qq
# qqbqqind = tables.UInt16Col()
# qqaiind = tables.Float32Col() #same as above but in units of ifnnn (coordinate in qq)
# qqbiind = tables.Float32Col()
# iinda = tables.Float32Col() #indeces of ifnnn (coordinate in ifnnn)
# iindb = tables.Float32Col()
# kinda = tables.UInt16Col() #indeces of knnn (coordinate in ifnnn but represented by the peak index)
# kindb = tables.UInt16Col()
# delsig = tables.Float32Col()
# deliind = tables.Float32Col()
# qqpkvol = tables.Float32Col()
# qqpknorm = tables.Float32Col()
#
#class qqpktabclass(tables.IsDescription):
# qqindhigh = tables.UInt16Col()
# qqindlow = tables.UInt16Col()
# qqindlowmin = tables.UInt16Col()
# qqindlowmax = tables.UInt16Col()
# qqindhighmin = tables.UInt16Col()
# qqindhighmax = tables.UInt16Col()
# lowminbool = tables.BoolCol()
# lowmaxbool = tables.BoolCol()
# highminbool = tables.BoolCol()
# highmaxbool = tables.BoolCol()
# qqpkintensity = tables.Float32Col()
# qqpkvolume = tables.Float32Col()
# qqpknorm = tables.Float32Col()
##
def readh5pyarray(arrpoint):
return eval('arrpoint'+('['+':,'*len(arrpoint.shape))[:-1]+']')
def readblin(h5mar, bin=0):
bs=['blin0', 'blin1']
if bin:
bs=[b+'bin%d' %bin for b in bs]
return tuple([readh5pyarray(h5mar[b]) for b in bs]), numpy.array([h5mar[b].attrs['weights'][:] for b in bs]).T
def getimapqgrid(chessh5dsetstr, imap=True, qgrid=True, bin=0):
h5chess=CHESSRUNFILE()
imappoint=h5chess[chessh5dsetstr]
temp=tuple()
if imap:
if bin==0:
temp+=(readh5pyarray(imappoint),)
else:
temp+=(readh5pyarray(h5chess[chessh5dsetstr+('bin%d' %bin)]),)
if qgrid:
temp+=(imappoint.attrs['qgrid'], )
h5chess.close()
if len(temp)==1:
return temp[0]
return temp
def getchimapchigrid(chessh5dsetstr, chimap=True, chigrid=True, bin=0):
h5chess=CHESSRUNFILE()
chimappoint=h5chess[chessh5dsetstr]
temp=tuple()
if chimap:
if bin==0:
temp+=(readh5pyarray(chimappoint),)
else:
temp+=(readh5pyarray(h5chess[chessh5dsetstr+('bin%d' %bin)]),)
if chigrid:
temp+=(chimappoint.attrs['chigrid'], )
h5chess.close()
if len(temp)==1:
return temp[0]
return temp
def getkillmap(chessh5dsetstr, bin=0):
h5chess=CHESSRUNFILE()
if 'killmap' in chessh5dsetstr.rpartition('/')[2]:
if bin==0:
temp=numpy.bool_(readh5pyarray(h5chess[chessh5dsetstr]))
else:
temp=numpy.bool_(readh5pyarray(h5chess[chessh5dsetstr+('bin%d' %bin)]))
else:
print 'KILLMAP NOT FOUND> USING DEFAULT'
xrdname='mar345'
p=chessh5dsetstr.rpartition('/')[0]
while p in h5chess: #this is to try to get the right detector but include backwards compatib lity from when 'xrdname' didn't exist
if 'xrdname' in h5chess[p].attrs:
xrdname=h5chess[p].attrs['xrdname']
break
p=p.rpartition('/')[0]
if bin==0:
temp=numpy.bool_(readh5pyarray(h5chess[xrdname+'killmap']))
else:
temp=numpy.bool_(readh5pyarray(h5chess[xrdname+'killmapbin%d' %bin]))
h5chess.close()
return temp
def getdqchiimage(chessh5dsetstr, bin=0):
h5chess=CHESSRUNFILE()
if bin==0:
temp=readh5pyarray(h5chess[chessh5dsetstr])
else:
temp=readh5pyarray(h5chess[chessh5dsetstr+('bin%d' %bin)])
h5chess.close()
return temp
def labelnumberformat(x):
if x>=1000 or x<.001:
a="%.*e" % (3, x)
a=a.replace('+','')
b,c,d=a.partition('e')
d=d.replace('0','')
return ''.join((b,c,d))
else:
return "%.*g" % (3, x)
def qqpktuplist_h5qqpktab(qqpktab,qqnormcritval=0.0):
return [([arow['qqindhigh'], arow['qqindlow'], arow['qqindlowmin'], arow['qqindlowmax'], arow['qqindhighmin'], arow['qqindhighmax'], arow['lowminbool'], arow['lowmaxbool'], arow['highminbool'], arow['highmaxbool']], [arow['qqpkintensity'], arow['qqpkvolume'], arow['qqpknorm']]) for arow in qqpktab.where('qqpknorm>=qqnormcritval')]
def updatelog(h5group, string):
h5group.attrs['modifiedlog']='\n'.join((string, h5group.attrs['modifiedlog']))
def writeattr(h5path, h5groupstr, attrdict):
"""h5path must exist, write attr dict as individiual attrs in h5groupstr"""
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
#node=h5file[h5groupstr]
for key, val in attrdict.iteritems():
h5analysis.attrs[key]=val
if not ('modifiedlog' in h5analysis.attrs):
h5analysis.attrs['modifiedlog']=''.join(('modifiedlog created ', time.ctime()))
updatelog(h5analysis, ''.join(('DAQ attribute dictionary updated. background is ',attrdict['bcknd'],'. ', time.ctime())))
h5file.close()
def getbin(h5an):
h5mar=h5an[getxrdname(h5an)]
bin=1
for name in h5mar.listnames():
if name.startswith('countsbin'):
bin=eval(name.partition('countsbin')[2])
break
return bin
getxrdname=lambda h5an: ('xrdname' in h5an.attrs.keys() and h5an.attrs['xrdname']) or 'mar345'
def getattr(h5path, h5groupstr):
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
attrdict={}
keys=['pointlist', 'command', 'xgrid', 'zgrid', 'wavelength', 'cal', 'alpha', 'counter', 'elements', 'bcknd', 'chessrunstr', 'imapstr', 'chimapstr', 'killmapstr', 'qimagestr', 'chiimagestr', 'dqchiimagestr', 'x', 'z', 'acquisition_time', 'acquisition_shape', 'xrdname', 'psize', 'bin']
for key in keys:
if key in h5analysis.attrs:
attrdict[key]=h5analysis.attrs[key]
if (not 'psize' in attrdict.keys()) and ('chessrunstr' in attrdict.keys()):
h5chess=CHESSRUNFILE()
h5grp=h5chess[attrdict['chessrunstr']]
attrdict['psize']=h5grp.attrs['psize']
h5chess.close()
h5file.close()
return attrdict
def getdefaultscan(h5path):
h5file=h5py.File(h5path, mode='r')
if 'defaultscan' in h5file.attrs:
temp=h5file.attrs['defaultscan']
else:
temp=None
h5file.close()
return temp
def ReadGunPropDict(h5analysis):#h5analysis must be the anlaysis group of an open h5
if not ('depprof' in h5analysis):
return None
h5depprof=h5analysis['depprof']
d={}
for key in h5depprof.attrs.keys():
d[key]=h5depprof.attrs[key]
return d
def numpts_attrdict(attrdict):
if 'acquisition_shape' in attrdict:
return numpy.prod(numpy.uint16(attrdict['acquisition_shape']))
#below should not be necessary
if 'mesh' in attrdict['command']:
return int(round(attrdict['xgrid'][2]*attrdict['zgrid'][2]))
else:
return int(round(max(attrdict['xgrid'][2], attrdict['zgrid'][2])))
def constructbckndarr_linbyposn(arrtup, ind):
x=[]
for a in arrtup:
if a.ndim==3:
x+=[a[ind]]
else:
x+=[a]
return numpy.array(x)
def calcbcknd(h5path, h5groupstr, bcknd, bin=3, critfrac=0.05, weightprecision=0.01, normrank=0.5):
"""groupstr is to the main scan group, e.g. XRD.PrimDataset. bcknd starts with 'min' or 'ave'"""
print 'calculating ', bcknd, ' background on ', h5path, h5groupstr
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((h5groupstr,'measurement', getxrdname(h5analysis), 'counts'))]
pointlist=h5analysis.attrs['pointlist']
shape=(h5marcounts.shape[1], h5marcounts.shape[2])
if bin!=0:
binshape=(shape[0]//bin, shape[1]//bin)
if 'min' in bcknd:
bcknddata=h5marcounts[pointlist[0], :, :]
if 'bmin' in h5mar:
del h5mar['bmin']
if ('bminbin%d' %bin) in h5mar:
del h5mar[('bminbin%d' %bin)]
percind=max(1, int(round(len(pointlist)*critfrac)))
if percind+1>=len(pointlist):
print 'something bad might be about to happend because you have asked for too high of a percentile'
if percind!=1:
bcknddata=numpy.empty((percind+1, shape[0], shape[1]), dtype=h5marcounts.dtype)
elif bcknd=='ave':
bcknddata=numpy.zeros(shape,dtype='float32')
if 'bave' in h5mar:
del h5mar['bave']
if ('bavebin%d' %bin) in h5mar:
del h5mar[('bminbin%d' %bin)]
if 'lin' in bcknd:
data=h5marcounts[pointlist, :, :]
if data.ndim==2:#this happens if pointlist has only one element... I think
data=numpy.array([data])
killmap=getkillmap(h5analysis.attrs['killmapstr'])
b0=readh5pyarray(h5mar['blin0'])
f0vals=h5mar['blin0'].attrs['trialimageweights'][:]
b1=readh5pyarray(h5mar['blin1'])
f1vals=h5mar['blin1'].attrs['trialimageweights'][:]
ans=FindLinearSumBcknd(data, killmap, b0, b1, f0vals, f1vals, fraczeroed=critfrac, rankfornorm=normrank, fprecision=weightprecision)
for nam, wt, bn in zip(['blin0', 'blin1'], ans, [b0, b1]):
h5ar=h5mar[nam]
weights=numpy.zeros(h5marcounts.shape[0], dtype='float32')
weights[pointlist]=wt
h5ar.attrs['zerofrac']=critfrac
h5ar.attrs['weightprecision']=weightprecision
h5ar.attrs['normrank']=normrank
h5ar.attrs['weights']=weights
if bin!=0:
binnam='%sbin%d' %(nam, bin)
if binnam in h5mar:
del h5mar[binnam]
if bn.ndim==3:
binbn=numpy.array([binimage(bnv, bin) for bnv in bn])
else:
binbn=binimage(bn, bin)
h5arbin=h5mar.create_dataset(binnam, data=binbn)
for key, val in h5ar.attrs.iteritems():
h5arbin.attrs[key]=val
else:
for count, pointind in enumerate(pointlist):
print pointind
data=h5marcounts[pointind, :, :]
if 'min' in bcknd:
if percind==1:
indeces=data<bcknddata
bcknddata[indeces]=data[indeces]
else:
if count<=percind:
bcknddata[count, :, :]=data
if count==percind:
bcknddata=numpy.sort(bcknddata, axis=0)
else:
bcknddata[percind, :, :]=data
bcknddata=numpy.sort(bcknddata, axis=0)
if pointind==pointlist[-1]:
bcknddata=bcknddata[-2, :, :]
elif bcknd=='ave':
bcknddata+=data
if len(pointlist)==0:
print 'background calculation error: NO IMAGES FOUND'
h5file.close()
else:
for dset in h5mar.iterobjects():
if isinstance(dset, h5py.Dataset) and (('b'+bcknd) in dset.name.rpartition('/')[2]):
del dset #deletes the array about to be created and its derivatives
if 'min' in bcknd:
bminpoint=h5mar.create_dataset('bmin', data=bcknddata)
bminpoint.attrs['percentile']=critfrac
if bin!=0:
h5mar.create_dataset('bminbin%d' %bin, data=binimage(bcknddata, bin))
elif bcknd=='ave':
bcknddata=numpy.array(round(bcknddata/len(pointlist)), dtype=h5marcounts.dtype)
h5mar.create_dataset('bave', data=bcknddata)
if bin!=0:
h5mar.create_dataset('bavebin%d' %bin, data=binimage(bcknddata, bin))
print bcknd[:3] ,' background calculation complete'
if bin==0:
t1=''
else:
t1=' and binned'
updatelog(h5analysis, ''.join(('dataset background calculated', t1, ': ', bcknd, '. finished ', time.ctime())))
h5file.close()
if bcknd=='minanom':
calcbanom(h5path, h5groupstr, bqgrid=None, bin=bin)
def integrate(h5path, h5groupstr, singleimage=None, bckndbool=True, normbydqchiimage=False, data=None):#singleimage is a string that is an index of marcounts or a string for other dataset or 'banom#'. only marcounts can get backnd subtraction. if data is passed, it will be integrated and saved as 'i"+singleimage
performed=True
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
imap, qgrid=getimapqgrid(h5analysis.attrs['imapstr'])
if normbydqchiimage:
dqchiimage=getdqchiimage(h5analysis.attrs['dqchiimagestr'])
else:
dqchiimage=None
slots=numpy.uint16(qgrid[2])
killmap=getkillmap(h5analysis.attrs['killmapstr'])
normalizer=integrationnormalization(killmap, imap, dqchiimage, slots)#if this normalizer ever changes there will need to be changes elsewhere, e.g. where bcknd is integrated for 1d subtraction
imap*=killmap
bcknd='no bcknd'
if bckndbool:
attrdict=getattr(h5path, h5groupstr)
bcknd=attrdict['bcknd']
if 'lin' in bcknd:
bckndarr, blinwts=readblin(h5mar)
else:
bstr=''.join(('b', bcknd[:3]))
if bstr in h5mar:
bckndarr=readh5pyarray(h5mar[bstr])
if bcknd=='minanom':
bminanomf=readh5pyarray(h5mar['bminanomf'])
else:
print 'Aborting: INTEGRATION ABORTED: CANNOT FIND ', bstr
return 'Aborting: INTEGRATION ABORTED: CANNOT FIND ', bstr
h5file.close()
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((h5groupstr,'measurement', getxrdname(h5analysis), 'counts'))]
if not data is None:
if singleimage is not None:
if singleimage.isdigit():
pointlist=[eval(singleimage)]
elif singleimage.startswith('banom'):
data=h5mar['banom'][eval(singleimage[5:]), :, :]
elif singleimage.startswith('raw'):
data=h5marcounts[eval(singleimage[3:]), :, :]
else:
data=readh5pyarray(h5mar[singleimage])
else:
pointlist=h5analysis.attrs['pointlist']
if not (data is None):
if data.shape[0]<imap.shape[0]:
if (imap.shape[0]%data.shape[0])!=0:
h5file.close()
print 'INTEGRATION ABORTED:', numstr, " is bigger than or incommensurate with imap"
return 'INTEGRATION ABORTED:', numstr, " is bigger than or incommensurate with imap"
data=unbinimage(data, imap.shape[0]/data.shape[0])
savearr=normalizer*intbyarray(data, imap, dqchiimage, slots)
savename='i%s' %singleimage
if savename in h5mar:
del h5mar[savename]
h5mar.create_dataset('i%s' %singleimage, data=savearr)
pointlist=[]
else:
if 'icounts' in h5mar:
del h5mar['icounts']
icounts=h5mar.create_dataset('icounts', data=numpy.zeros((h5marcounts.shape[0], qgrid[2]), dtype='float32'))
icounts.attrs['qgrid']=qgrid
for pointind in pointlist:
print pointind
data=h5marcounts[pointind, :, :]
if data.shape[0]<imap.shape[0]:
if (imap.shape[0]%data.shape[0])!=0:
h5file.close()
print 'INTEGRATION ABORTED:', numstr, " is bigger than or incommensurate with imap"
return 'INTEGRATION ABORTED:', numstr, " is bigger than or incommensurate with imap"
data=unbinimage(data, imap.shape[0]/data.shape[0])
if bckndbool:
if bcknd=='minanom':
if bminanomf[pointind, 0]<0:
h5file.close()
print 'no calculation of bminanom background on the fly for integration'
return 'no calculation of bminanom background on the fly for integration'
else:
banom=h5mar['banom'][pointind, :, :]
data=bckndsubtract(data, bckndarr, killmap, btype=bcknd, banom_f_f=(banom, bminanomf[pointind, 0], bminanomf[pointind, 1]))[0]
elif 'lin' in bcknd:
data=bckndsubtract(data, constructbckndarr_linbyposn(bckndarr, pointind), killmap, btype=bcknd, linweights=blinwts[pointind])[0]
else:
data=bckndsubtract(data, bckndarr, killmap, btype=bcknd)[0]
icounts[pointind, :]=normalizer*intbyarray(data, imap, dqchiimage, slots)[:]
if singleimage is not None:
t2=singleimage
else:
t2='entire pointlist'
updatelog(h5analysis, ''.join(('image integration with ', bcknd, ': ', t2, '. finished ', time.ctime())))
h5file.close()
def qqcalc(h5path, h5groupstr, qgrid, image): #assume q interval is integral number of 1d int interval (imap qgrid)
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
numstrlist=None
singleimage=False
if 'icounts' in image:
counts=readh5pyarray(h5mar['icounts'])
elif 'ifcounts' in image:
counts=readh5pyarray(h5mar['ifcounts'])
else:
h5file.close()
print "qqcalc aborted. don't understand image parameter"
return "qqcalc aborted. don't understand image parameter"
pointlist=h5analysis.attrs['pointlist']
iqgrid=h5mar['icounts'].attrs['qgrid']
h5file.close()
imin, imax, iint=minmaxint_qgrid(iqgrid)
qqmin, qqmax, qqint=minmaxint_qgrid(qgrid)
if imin>qqmin:
qgrid[2]-=(imin-qqmin)//qqint
qgrid[0]=imin
qqmin, qqmax, qqint=minmaxint_qgrid(qgrid)
if imax<qqmax:
qgrid[2]-=numpy.ceil((qqmax-imax)/qqint)
qqmin, qqmax, qqint=minmaxint_qgrid(qgrid)
indlow=numpy.uint16((qqmin-imin)/iint)
indhigh=numpy.uint16(iqgrid[2]-(imax-qqmax)/iint)
indratio=numpy.uint16(qqint/iint)
indarray=numpy.array(range(indlow, indhigh, indratio))
print 'qgrid ', qgrid, ' length ', indarray.size
qqshape=(qgrid[2], qgrid[2])
h5file=h5py.File(h5path, mode='r+')
if 'qqcounts' in h5mar:
del h5mar['qqcounts']
qqcounts=h5mar.create_dataset('qqcounts', (icounts.shape[0], qqshape[0], qqshape[1]), dtype='float32')
qqmap=numpy.zeros(qqshape, dtype='float32')
for pointind in range(icounts.shape[0]):
qqcounts[pointind, :, :]=qqmap[:, :]
qqcounts.attrs['qgrid']=qgrid
for pointind in pointlist:
print pointind
qqtemp=qq_gen(counts[pointind, indarray])
qqcounts[pointind, :, :]=qqtemp[:, :]
qqmap+=qqtemp
if 'qq' in h5mar:
del h5mar['qq']
qq=h5mar.create_dataset('qq', data=numpy.float32(qqmap/(1.0*len(pointlist))))
qq.attrs['qgrid']=qgrid
updatelog(h5analysis, ''.join(('qq calculation: ', image.partition(' ')[0], '. finished ', time.ctime())))
h5file.close()
def buildintmap(chessh5grpstr, qgrid, bin=3):
h5chess=CHESSRUNFILE()
h5grp=h5chess[chessh5grpstr]
qimage=readh5pyarray(h5grp['qimage'])
h5chess.close()
imap=imap_gen(qimage, qgrid)
imapname=','.join(tuple([labelnumberformat(num) for num in qgrid]))
h5chess=CHESSRUNFILE('r+')
h5grp=h5chess[chessh5grpstr+'/imap']
if imapname in h5grp:
del h5grp[imapname]
dset=h5grp.create_dataset(imapname, data=imap)
dset.attrs['qgrid']=qgrid
if imapname+('bin%d' %bin) in h5grp:
del h5grp[imapname+('bin%d' %bin)]
h5grp.create_dataset(imapname+('bin%d' %bin), data=binimage(imap, bin=bin, zerokill=True))
h5chess.close()
def buildchimap(chessh5grpstr, chigrid, bin=3):
h5chess=CHESSRUNFILE()
h5grp=h5chess[chessh5grpstr]
print '&'
qimage=readh5pyarray(h5grp['qimage'])
chiimage=readh5pyarray(h5grp['chiimage'])
h5chess.close()
chimap=chimap_gen(qimage, chiimage, chigrid)
chimapname=','.join(tuple([labelnumberformat(num) for num in chigrid]))
h5chess=CHESSRUNFILE('r+')
h5grp=h5chess[chessh5grpstr+'/chimap']
if chimapname in h5grp:
del h5grp[chimapname]
if chimapname+('bin%d' %bin) in h5grp:
del h5grp[chimapname+('bin%d' %bin)]
dset=h5grp.create_dataset(chimapname, data=chimap)
dset.attrs['chigrid']=chigrid
h5grp.create_dataset(chimapname+('bin%d' %bin), data=binimage(chimap, bin=bin, zerokill=True))
h5chess.close()
def calcqchiimages(chessh5grpstr, alsocalcbin=3, equate_chi_azim=False, custom2011strecth=False):
bin=1
if bin>1:
alsocalcbin=None
onlybinsavestr='bin%d' %bin
else:
onlybinsavestr=''
h5chess=CHESSRUNFILE()
h5grp=h5chess[chessh5grpstr]
imageshape=h5grp.attrs['detectorshape'][::-1]
cal=h5grp.attrs['cal']
fit2dcenter=cal[:2]
alpharad=h5grp.attrs['alpha']*numpy.pi/180.
L=cal[2]
wl=h5grp.attrs['wavelength']
psize=h5grp.attrs['psize']
tiltdir=h5grp.attrs['tiltdirection']
if 'xrdname' in h5grp.attrs:
xrdname=h5grp.attrs['xrdname']
else:
xrdname='mar345'
h5chess.close()
center=centerindeces_fit2dcenter(fit2dcenter, detsize=imageshape[0])
if not equate_chi_azim:
center, imageshape = tiltdirectionoperation(center, imageshape, tiltdir)
center=numpy.uint16(numpy.round(numpy.float32(bincenterind_centerind(center, bin))))
sizex=imageshape[0]//bin
sizey=imageshape[1]//bin
c=int(round(center[1]))
if c>=sizey-1-c:
leftisbig=True
else:
leftisbig=False
c=sizey-1-c #this effectively reverse direction of the bincenter if closer to LHS than RHS
sizey=c+1 #this is the size of the qchidimage. the size of the expanded image will be 2*(c)+1
if custom2011strecth:
xctr_pixrad=lambda p:-.009544*p+537.506
x=numpy.float32(range(sizex))
stretchcctr=xctr_pixrad(numpy.abs(x-center[0]))
print center[0], numpy.min(stretchcctr), numpy.max(stretchcctr)
xvals=(x-stretchcctr)*psize #does not work with non unity bin
else:
xvals=(numpy.float32(range(sizex))-center[0])*bin*psize
yvals=numpy.float32(range(sizey))*bin*psize #these have units of mm (rho in x^ and y^ directions)
rsq=(bin*psize*sizey)**2
qimage=numpy.float32([[[q_rhosq(x**2+y**2, L, wl)*((x**2+y**2)<=rsq), azimuth_coords(x,y)] for y in yvals] for x in xvals])
azimimage=qimage[:, :, 1]
qimage=qimage[:, :, 0]
inds=numpy.where(qimage>0)
if equate_chi_azim:
chiimage=numpy.zeros(qimage.shape, dtype='float32')
chiimage[inds]=azimimage[inds]
dqchiimage=numpy.ones(qimage.shape, dtype='float32')
else:
chiimage=numpy.zeros(qimage.shape, dtype='float32')
chiimage[inds]=chi_q_azim(qimage[inds], azimimage[inds], alpharad, L, wl)
print 'alpharad, L, wl', alpharad, L, wl
chipos=chiimage[chiimage>0]
print 'chiminmax', numpy.min(chipos), numpy.max(chipos)
dqchiimage=numpy.zeros(qimage.shape, dtype='float32')
dqchiimage[inds]=numpy.abs(dqchiperpixel(qimage[inds], chiimage[inds], azimimage[inds], alpharad, L, wl, binpsize=psize*bin))
#used to not save azim. now save it as well as 2 others.
twothetaimage=numpy.zeros(qimage.shape, dtype='float32')
twothetaimage[inds]=twotheta_q(qimage[inds], wl, units='rad')
polfactimage=numpy.zeros(qimage.shape, dtype='float32')
polfactimage[inds]=polarizfactor_q_twotheta_azim(qimage[inds], twothetaimage[inds], azimimage[inds], wl)
SiASFimage=numpy.zeros(qimage.shape, dtype='float32')
SiASFimage[inds]=Si_atomsensfact(twothetaimage[inds], wl)
fullsizeimage=numpy.zeros(imageshape, dtype='float32')
smallwidth=imageshape[1]-sizey
h5chess=CHESSRUNFILE('r+')
circkillmap=readh5pyarray(h5chess[xrdname+'killmap'])
h5grp=h5chess[chessh5grpstr]
imls=[(qimage,'qimage'), (chiimage,'chiimage'), (dqchiimage,'dqchiimage'), (twothetaimage, 'twothetaimage'), (azimimage, 'azimimage'), (polfactimage, 'polfactimage'), (SiASFimage, 'SiASFimage')]
for im, name in imls:
if name=='chiimage':# and not leftisbig:
negatesmall=-1
else:#else includes all other arrays
negatesmall=1
fullsizeimage[:, :smallwidth]=im[:, smallwidth-1::-1]*negatesmall # smallwidth-1::-1 gives smallwidth indeces
# if name=='chiimage':
# negatesmall*=-1
fullsizeimage[:, smallwidth:]=im[:, :]
fullsizeimage*=circkillmap
if leftisbig:
fullsizeimage=fullsizeimage[:,::-1] #CANNOT say fullsizeimage[:,:]= becuase hits creates a mirror
if not equate_chi_azim:
fullsizeimage=tiltdirectioninverseoperation(fullsizeimage, tiltdir)
if name+onlybinsavestr in h5grp:
del h5grp[name+onlybinsavestr]
dset=h5grp.create_dataset(name+onlybinsavestr, data=fullsizeimage)
if not (alsocalcbin is None):
name+='bin%d' %(alsocalcbin)
binnedimage=binimage(fullsizeimage, bin=alsocalcbin, zerokill=True)
if name in h5grp:
del h5grp[name]
h5grp.create_dataset(name, data=binnedimage)
h5chess.close()
def writenumtotxtfile(runpath, xvals, yvals, savename, header=None):
#xvals and yvals must be arrays
if header is not None:
writestr=''.join((header, '\n'))
else:
writestr=''
for i in range(xvals.size):
writestr=''.join((writestr,'%f' %xvals[i], '\t', '%f' %yvals[i], '\n'))
filename=os.path.join(runpath,''.join((name, '.txt'))).replace('\\','/')
filename=os.path.join(runpath,''.join((savename, '.txt'))).replace('\\','/')
fout = open(filename, "w")
fout.write(writestr)
fout.close()
def writeplotso(runpath, xvals, yvals, attrdict, xtype, savename): #xvals and yvals must be same length numpy arrays, savename is filename without extension
yvals[yvals<0]=0.
writestr=plotsoheader(attrdict, xtype)
arg=numpy.argsort(xvals)
xvals=xvals[arg]
yvals=yvals[arg]
for i in range(xvals.size):
sigdig=7
yv=yvals[i]
if yv==0:
yv+=10**(-1*sigdig)
try:
writestr=''.join((writestr,'%.6f' %xvals[i], ' ', eval(''.join(("'%.","%d" %(int(round(numpy.abs(sigdig-numpy.ceil(numpy.log10(yv)))))),"f' %yv"))), '\n'))
except:
print ''.join(("'%.","%d" %(int(round(numpy.abs(sigdig-numpy.ceil(numpy.log10(yv)))))),"f' %yv"))
#the 7 in above line gives 7 significant digits, the precision of float32
filename=os.path.join(runpath,''.join((savename, '.plt'))).replace('\\','/')
fout = open(filename, "w")
fout.write(writestr)
fout.close()
def writeall2dimages(runpath, h5path, h5groupstr, type, typestr, colorrange=None, datsave=False, extrabin=1):
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
savename1='_'.join((os.path.split(h5path)[1][0:-3], h5groupstr, typestr, ''))
pointlist=h5analysis.attrs['pointlist']
bcknd=h5analysis.attrs['bcknd']
usebanom=(type>0 and bcknd=='minanom') or (type==3)
if usebanom:
bminanomf=h5mar['bminanomf']
if type>0:
killmapbin=getkillmap(h5analysis.attrs['killmapstr'], bin=getbin(h5analysis))
if extrabin>1:
killmapbin=binboolimage(killmapbin, bin=extrabin)
if type==1 or type==3 or type==4:
if 'min' in bcknd:
bckndarr=readh5pyarray(h5mar['bminbin%d' %getbin(h5analysis)])
elif 'lin' in bcknd:
bckndarr, blinwts=readblin(h5mar)
else:
bckndarr=readh5pyarray(h5mar['bavebin%d' %getbin(h5analysis)])
if extrabin>1:
if 'lin' in bcknd:
linbyposnhandler=lambda b: (len(b.shape)==3 and (numpy.array([binimage(arr, bin=extrabin) for arr in b]),) or (binimage(b, bin=extrabin),))[0]
bckndarr=tuple([linbyposnhandler(b) for b in bckndarr])
else:
bckndarr=binimage(bckndarr, bin=extrabin)
cb=None
btuple=None
if not colorrange is None:
norm = matplotlib.colors.Normalize(vmin=colorrange[0], vmax=colorrange[1])
if type==4:
if 'lin' in bcknd:
linbyposnhandler=lambda b: (len(b.shape)==3 and (numpy.array([arr*killmapbin for arr in b]),) or (b*killmapbin,))[0]
bckndarr=linbyposnhandler(bckndarr)
else:
bckndarr*=killmapbin
savename1='_'.join((os.path.split(h5path)[1][0:-3], h5groupstr, bcknd[0:3]))
if datsave:
if 'lin' in bcknd:
b0, b1=bckndarr
b0.tofile(str(''.join((runpath, '/',savename1, '0.dat'))))
b1.tofile(str(''.join((runpath, '/',savename1, '1.dat'))))
else:
bckndarr.tofile(str(''.join((runpath, '/',savename1, '.dat'))))
else:
if 'lin' in bcknd:
for counter, b in enumerate(bckndarr):
if len(b.shape)>2:
print 'blin dataset is "by position" or some mode that is not supported for this plotting'
continue
if not colorrange is None:
pyim=pylab.imshow(b, norm=norm)
else:
pyim=pylab.imshow(b)
pylab.savefig(str(''.join((runpath, '/',savename1, `counter`,'.png'))))
pylab.cla()
else:
if not colorrange is None:
pyim=pylab.imshow(bckndarr, norm=norm)
else:
pyim=pylab.imshow(bckndarr)
pylab.savefig(str(''.join((runpath, '/',savename1, '.png'))))
pylab.cla()
else:
for countsname in ['countsbin3', 'countsbin2', 'counts']:
if countsname in h5mar:
break
for pointind in pointlist:
imname=`pointind`
pnnn=h5mar[countsname][pointind, :, :]
if extrabin>1:
pnnn=binimage(pnnn, bin=extrabin)
if usebanom:
banom=h5mar['banom'][pointind, :, :]
btuple=(banom, bminanomf[pointind, 0], bminanomf[pointind, 1])
#if extrabin>1, banom will get further binned in bckndsubtract()
if type==0:
saveim=pnnn
elif type==1:
if usebanom:
saveim=bckndsubtract(pnnn, bckndarr, killmapbin, btype=bcknd, banom_f_f=btuple)[0]
elif 'lin' in bcknd:
saveim=bckndsubtract(pnnn, constructbckndarr_linbyposn(bckndarr, pointind), killmapbin, btype=bcknd, linweights=blinwts[pointind])[0]
else:
saveim=bckndsubtract(pnnn, bckndarr, killmapbin, btype=bcknd)[0]
elif type==2:
saveim=banom
else:
saveim=bckndsubtract(pnnn, bckndarr, killmapbin, btype=bcknd, banom_f_f=btuple)[1]
if datsave:
saveim.tofile(str(''.join((runpath, '/',savename1, imname, '.dat'))))
else:
if not colorrange is None:
pyim=pylab.imshow(saveim, norm=norm)
else:
pyim=pylab.imshow(saveim)
if cb is None:
cb = pylab.colorbar()
else:
cb.update_bruteforce(pyim)
pylab.savefig(str(''.join((runpath, '/',savename1, imname, '.png'))))
pylab.cla()
h5file.close()
def plotsoheader(attrdict, xtype):
sampleline=''.join(('Title/SampleName: ', ''.join(tuple(attrdict['elements']))))
waveline=''.join(('Wavelength: ', '%.6f' %attrdict['wavelength'] , 'nm'))
Lline=''.join(('Detector distance: ', '%.2f' %attrdict['cal'][2] , 'mm'))
temp= '\n!@!!'.join(('!@!!XRD integrated', sampleline, 'Site: Cornell University', waveline, Lline))
if xtype=='2th':
temp=''.join((temp, '\n', '!@!XDegrees', '\n!@!YCounts\n'))
else:
if xtype=='d':
temp2='d-spacing (nm)'
elif xtype=='pix':
temp2='Detector pixels'
else:
temp2='scattering vector (1/nm)'
temp=''.join((temp, '\n', '!@!!X: ', temp2,'\n!@!YCounts\n'))
return temp
def readplotso(filename, headerlines=0, splitstr=None):#can use ! in the file or headerlines to skp header
fin = open(filename, "r")
lines=fin.readlines()
fin.close()
xtype=''
xvals=[]
yvals=[]
for line in lines[headerlines:]:
if line.startswith('!'):
if 'XDegrees' in line:
xtype='2th'
elif 'X:' in line:
if 'd-spacing' in line:
xtype='d (nm)'
elif 'pixels' in line:
xtype=='pix'
elif len(line)>2:
if splitstr is None:
if '\t' in line:
splitstr='\t'
else:
splitstr=' '
a, b, c=line.partition(splitstr)
a=a.strip()
c=c.strip()
try:
a=eval(a)
c=eval(c)
except:
continue
xvals+=[a]
yvals+=[c]
if xtype=='':
xtype='q (1/nm)'
return numpy.float32(xvals), numpy.float32(yvals), xtype
def calcbanom(h5path, h5groupstr, bqgrid=None, bin=3):
#this function for entire pointlist
#if bqgrid is None, use the default bqgrid to calculate bimap or use an already saved bimap. if bqgrid passed then if it is the same as that of saved bimap, use saved bimap, else calc new bimap.
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
attrdict=getattr(h5path, h5groupstr)
pointlist=h5analysis.attrs['pointlist']
killmap=getkillmap(h5analysis.attrs['killmapstr'], bin=bin)
imap, qgrid=getimapqgrid(h5analysis.attrs['imapstr'], bin=bin)
bmin=readh5pyarray(h5mar['bminbin%d' %bin])
#ALL OF THESE ARE BINNED VERSIONS BUT NAMES AS USUAL
bminanomf=numpy.ones(h5mar['bminanomf'].shape, dtype='float32')*(-1.0)
h5file.close()
killmap*=(imap!=0)
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((h5groupstr,'measurement', getxrdname(h5analysis), 'counts'))]
if 'bimap' in h5mar:
del h5mar['bimap']
if 'banom' in h5mar:
del h5mar['banom']
if 'bminanomf' in h5mar:
del h5mar['bminanomf']
banompoint=h5mar.create_dataset('banom', (h5marcounts.shape[0], bmin.shape[0], bmin.shape[1]), dtype='float32')
z=numpy.zeros((bmin.shape[0], bmin.shape[1]), dtype='float32')
for pointind in range(banompoint.shape[0]):
banompoint[pointind, :, :]=z[:, :]
if 'bimap' in h5mar:
eval(''.join((fulldergrpstr, '.bimap', '._f_remove()')))
bimap=None
for pointind in pointlist:
print pointind
data=h5mar['countsbin%d' %bin][pointind, :, :]
if bimap is None:
h5chess=CHESSRUNFILE()
qimage=readh5pyarray(h5chess[h5analysis.attrs['qimagestr']+'bin%d' %bin])
h5chess.close()
cbbf=calc_bmin_banom_factors(data, bmin, killmap, imap, qgrid, attrdict, qimage=qimage)
bimap=cbbf.bimap
bqgrid=cbbf.bqgrid
else:
cbbf=calc_bmin_banom_factors(data, bmin, killmap, imap, qgrid, attrdict, bimap=bimap, bqgrid=bqgrid)
bminanomf[pointind, 0]=cbbf.fmin
bminanomf[pointind, 1]=cbbf.fanom
banom=cbbf.banom
totbcknd=(cbbf.fmin*bmin+cbbf.fanom*banom)*killmap
data*=killmap
a=data<totbcknd
bminanomf[pointind, 2]=a.sum()/(1.0*killmap.sum()) #frac pixels zeroed in binned data
print 'bminanomf:', bminanomf[pointind, :]
banompoint[pointind, :, :]=banom[:, :]
print 'banom calculation complete'
h5mar.create_dataset('bminanomf', data=bminanomf)
bimappoint=h5mar.create_dataset('bimap', data=bimap)
bimappoint.attrs['bqgrid']=bqgrid
updatelog(h5analysis, ''.join(('banom background calculation for entire pointlist finished ', time.ctime())))
h5file.close()
def process1dint(h5path, h5groupstr, maxcurv=16.2, type='h5mar:icounts'):
#makes new solid angle array, deletes and ifnnn and makes a new one for every innn
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if 'h5mar' in type:
h5arrname=type.partition(':')[2]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'ifcounts' in h5mar:
del h5mar['ifcounts']
icounts=readh5pyarray(h5mar['icounts'])
qgrid=h5mar['icounts'].attrs['qgrid']
pointlist=h5analysis.attrs['pointlist']
ifcountspoint=h5mar.create_dataset('ifcounts', data=numpy.zeros(icounts.shape, dtype='float32'))
ifcountspoint.attrs['qgrid']=qgrid
if 'h5tex' in type:
h5grpname=type.partition(':')[2]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5tex=h5mar['texture']
h5texgrp=h5tex[h5grpname]
if 'ifcounts' in h5texgrp:
del h5texgrp['ifcounts']
icounts=readh5pyarray(h5texgrp['icounts'])
qgrid=h5texgrp.attrs['chigrid']
pointlist=h5texgrp.attrs['pointlist']
ifcountspoint=h5texgrp.create_dataset('ifcounts', data=numpy.zeros(icounts.shape, dtype='float32'))
ifcountspoint.attrs['chigrid']=qgrid
ifcountspoint.attrs['maxcurv']=maxcurv
solidangles=None
for pointind in pointlist:
print pointind
ifcountspoint[pointind, :]=bcknd1dprogram(qgrid, icounts[pointind, :], returnall=False, maxcurv=maxcurv)
#normalization by solidangles removed April 2009. if reinstated, then send attrdictORangle=None for 'h5tex'
updatelog(h5analysis, ''.join(('All innn 1D intensity processed. Finished ', time.ctime())))
h5file.close()
def wavepeaksearch1d(h5path, h5groupstr, minridgelength=3, minchildlength=0, wavenoisecutoff=2.5, maxqscale_localmax=1.5, minridgewtsum=100., minchildwtsum=0., pointlist=None, verbose=False, type='h5mar'):
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'h5mar' in type:
wtgrpstr='/'.join((h5groupstr, 'analysis', getxrdname(h5analysis), 'wavetrans1d'))
if pointlist is None:
pointlist=h5analysis.attrs['pointlist']
elif 'h5tex' in type:
h5grpname=type.partition(':')[2]
h5tex=h5mar['texture']
h5texgrp=h5tex[h5grpname]
if pointlist is None:
pointlist=h5texgrp.attrs['pointlist']
wtgrpstr='/'.join((h5groupstr, 'analysis', getxrdname(h5analysis), 'texture', h5grpname, 'wavetrans1d'))
h5file.close()
errormsg=ridges_wavetrans1d(h5path, wtgrpstr, noiselevel=wavenoisecutoff, pointlist=pointlist)
if not errormsg is None:
return errormsg
errormsg=peaks_ridges1d(h5path, wtgrpstr, minridgelength=minridgelength, minchildlength=minchildlength, maxqscale_localmax=maxqscale_localmax, minridgewtsum=minridgewtsum, minchildwtsum=minchildwtsum, pointlist=pointlist, verbose=verbose)
if not errormsg is None:
return errormsg
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
updatelog(h5analysis, ''.join((type, 'wavelet 1d peak search finished ', time.ctime())))
h5file.close()
def ridges_wavetrans1d(h5path, h5wtgrpstr, noiselevel=None, numscalesskippedinaridge=1.5, pointlist=None):
h5file=h5py.File(h5path, mode='r+')
wtgrp=h5file[h5wtgrpstr]
qscalegrid=wtgrp.attrs['qscalegrid']
qscalevals=scale_scalegrid_ind(qscalegrid)
qposngrid=wtgrp.attrs['qposngrid']
qposnint=qposngrid[1]
qsindlist=[2*max(int(numpy.ceil(1.*qs/qposnint)), 1) for qs in qscalevals[::-1]]
wtpoint=wtgrp['wavetrans']
nonoisecut=(noiselevel is None)
ridges_pointlist=[]
for pointind in pointlist:
temp=wtpoint[pointind, :, :]#reverse first index so that it goes from widest to smallest scale
wtrev=temp[::-1, :]
if nonoisecut:
noiselevel=wtrev.min()
ridges_pointlist+=[perform_ridges_wavetrans1d(wtrev, qsindlist, noiselevel, numscalesskippedinaridge=numscalesskippedinaridge)]
numr_pointlist=[len(r) for r in ridges_pointlist]
maxnr=max(numr_pointlist)
filler=[[32767]*wtrev.shape[0]]*maxnr
for r in ridges_pointlist:
r+=filler[:len(filler)-len(r)]
ridgessav=32767*numpy.ones((wtpoint.shape[0], maxnr, wtrev.shape[0]), dtype='int16')
ridgessav[numpy.array(pointlist), :, :]=numpy.int16(ridges_pointlist)
if 'ridges' in wtgrp:
del wtgrp['ridges']
wtgrp.create_dataset('ridges', data=ridgessav)
wtgrp.attrs['noiselevel']=noiselevel
wtgrp.attrs['numscalesskippedinaridge']=numscalesskippedinaridge
h5file.close()
def peaks_ridges1d(h5path, h5wtgrpstr, minridgelength=3, minchildlength=0., maxqscale_localmax=1.5, minridgewtsum=100., minchildwtsum=0., pointlist=[], verbose=False): #the qwidthrange is in /nm and the ridge must have a local maximum in that range
minridgelength=max(1, minridgelength)
h5file=h5py.File(h5path, mode='r+')
wtgrp=h5file[h5wtgrpstr]
qscalegrid=wtgrp.attrs['qscalegrid']
ridgeqscalevals=scale_scalegrid_ind(qscalegrid)[::-1] #ordered big->small
ridgescalecritind=numpy.where(ridgeqscalevals<=maxqscale_localmax)[0]
if len(ridgescalecritind)<2:
h5file.close()
print 'aborted: the set of qscales does not include more than 1 point in the specified qwidthrange'
return 'aborted: the set of qscales does not include more than 1 point in the specified qwidthrange'
ridgescalecritind=ridgescalecritind[0] #takes the last because these are in decreasing order now
wtpoint=wtgrp['wavetrans']
ridgespoint=wtgrp['ridges']
peaks_pointlist=[]
for pointind in pointlist:
wt=wtpoint[pointind, :, :]
ridges=ridgespoint[pointind, :, :]
peaks_pointlist+=[perform_peaks_ridges1d(wt, ridges, ridgescalecritind=ridgescalecritind, minridgelength=minridgelength, minchildlength=minchildlength, minridgewtsum=minridgewtsum, minchildwtsum=minchildwtsum, verbose=verbose)]
#print 'peaks:', peaks_pointlist
#h5file.close()
#return
numpks_pointlist=[len(p) for p in peaks_pointlist]
maxnp=max(numpks_pointlist)
filler=[[32767]*2]*maxnp
for p in peaks_pointlist:
p+=filler[:len(filler)-len(p)]
peakssav=numpy.ones((wtpoint.shape[0], 2, maxnp), dtype='uint16')*32767
def pksort(arr):
sortind=arr[1].argsort()
return numpy.uint16([arr[0, sortind], arr[1, sortind]])
peakssav[numpy.array(pointlist), :, :]=numpy.uint16([pksort(numpy.uint16(p).T) for p in peaks_pointlist])
if 'peaks' in wtgrp:
del wtgrp['peaks']
wtgrp.create_dataset('peaks', data=peakssav)
wtgrp.attrs['minridgelength']=minridgelength
wtgrp.attrs['maxqscale_localmax']=maxqscale_localmax
wtgrp.attrs['minridgewtsum']=minridgewtsum
wtgrp.attrs['minchildlength']=minchildlength
wtgrp.attrs['minchildwtsum']=minchildwtsum
h5file.close()
def getchiminmax(chessh5grpstr):
h5chess=CHESSRUNFILE()
h5grp=h5chess[chessh5grpstr]
cal=h5grp.attrs['cal']
alpharad=h5grp.attrs['alpha']*numpy.pi/180
L=cal[2]
wl=h5grp.attrs['wavelength']
qvals=set([])
for dset in h5grp['imap'].iterobjects():
if isinstance(dset, h5py.Dataset) and ('qgrid' in dset.attrs):
qgrid=dset.attrs['qgrid']
qvals|=set(q_qgrid_ind(qgrid))
h5chess.close()
if len(qvals)==0:
print 'no imaps found to help with chimap range'
return (0, 1)
chivals=numpy.array([[chi_q_azim(q, azim, alpharad, L, wl) for q in qvals] for azim in [0, numpy.pi/2.0, numpy.pi, 1.5*numpy.pi]])*180.0/numpy.pi
return (numpy.min(chivals), numpy.max(chivals)) #try obvious combinations of q and azim to find the max and min chivals so that this algorithm is robusts to simple changes in experiment geometry
def readsampleinfotxt(filename):
fin = open(filename, "r")
lines=fin.readlines()
fin.close()
headings=[]
vals=[]
for line in lines:
temp=line
if line[0].isalpha():
while len(temp.partition('\t')[2])>0:
temp2, garbage, temp=temp.partition('\t')
headings+=[temp2]
headings+=[temp.partition('\n')[0]]
else:
rowvals=[]
while len(temp.partition('\t')[2])>0:
temp2, garbage, temp=temp.partition('\t')
rowvals+=[temp2]
rowvals+=[temp.partition('\n')[0]]
vals+=[rowvals]
vals=numpy.float32(vals).T
headings.pop(0)
return headings, list(numpy.uint16(numpy.round(vals[0]))), vals[1:]
def importsampleinfotoh5(h5path, h5groupstr, importfilepath):#zeroth column of arr MUST be spec imagenumber
head, pointinds, vals=readsampleinfotxt(importfilepath)
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if 'otherdata' in h5analysis:
h5otherdata=h5analysis['otherdata']
else:
h5otherdata=h5analysis.create_group('otherdata')
attrdict=getattr(h5path, h5groupstr)
numpts=numpts_attrdict(attrdict)
info=numpy.empty(numpts, dtype='float32')
for nam, arr in zip(head, vals):
info=numpy.ones(numpts, dtype='float32')*numpy.nan
for ind, v in zip(pointinds, arr):
info[ind]=v
if nam in h5otherdata:
del h5otherdata[nam]
h5otherdata.create_dataset(nam, data=info)
print "'Other Data' arrays created for ", ', '.join(head)
h5file.close()
def getpointinfo(h5path, h5groupstr, types=[]):#returns several types of info for each spec point. In addition to x,z substrate coordinates there are deposition profile (DP), XRF and OTHER types of data - all arrays indexed by spec index and become dictionary entries. Some 'types' get several arrays,e.g. mol fractions. boolean 'success' let's you know if all the requested types were found.
attrdict=getattr(h5path, h5groupstr)
numpts=numpts_attrdict(attrdict)
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if 'depprof' in h5analysis:
h5depprof=h5analysis['depprof']
gunpropdict=ReadGunPropDict(h5analysis)
if 'xrf' in h5analysis:
h5xrf=h5analysis['xrf']
if 'otherdata' in h5analysis:
h5otherdata=h5analysis['otherdata']
alltypes=['x(mm)', 'z(mm)', 'DPnmolscm2ALL', 'DPmolfracALL', 'DPmassfracALL', 'DPgcm3', 'DPnmolcm2', 'DPnm', 'XRFmolfracALL', 'XRFnm', 'XRFareaALL', 'XRFsigareaALL', 'OTHER']
if types==[]:
types=alltypes
else:
types=[ty for ty in types if ty in alltypes]
d={}
success=True
for ty in types:
try:
if ty.startswith('DP'):
if 'ALL' in ty:
temp=ty.partition('DP')[2].partition('ALL')[0]
for i, el in zip(gunpropdict['guninds'], gunpropdict['symbol']):
d['DP%s_%s' %(temp, el)]=readh5pyarray(h5depprof['%sgun%d' %(temp, i)])
else:
temp=ty.partition('DP')[2]
d[ty]=readh5pyarray(h5depprof[temp])
elif ty.startswith('XRF'):
if 'ALL' in ty:
if 'area' in ty:
for dset in h5xrf['areas'].iterobjects():
if isinstance(dset, h5py.Dataset):
temp=ty.partition('ALL')[0]
d['%s_%s' %(temp, dset.name.rpartition('/')[2])]=readh5pyarray(dset)[:, 'sig' in ty]
else:
temp=ty.partition('XRF')[2].partition('ALL')[0]
for el, arr in zip(h5xrf.attrs['elements'], readh5pyarray(h5xrf[temp]).T):
d['XRF%s_%s' %(temp, el)]=arr
else:
temp=ty.partition('XRF')[2]
d[ty]=readh5pyarray(h5xrf[temp])
elif ty.startswith('OTHER'):
for dset in h5otherdata.iterobjects():
if isinstance(dset, h5py.Dataset) and len(dset.shape)==1 and dset.shape[0]==numpts:
d['OTHER_%s' %dset.name.rpartition('/')[2]]=readh5pyarray(dset)
elif ty=='x(mm)' or ty=='z(mm)':
d['x(mm)']=attrdict['x']
d['z(mm)']=attrdict['z']
else:
print 'WARNING: pointinfo type ', ty, ' not understood'
success=False
except: #if the data doesn't exist then skip and go on
print 'WARNING: ', ty, 'not found'
success=False
continue
h5file.close()
return d, success
def pointinfodictkeysort(d):
def metric(k):
v=1
if k=='x(mm)':
return 10000
if k=='z(mm)':
return 10001
v*=(1+999*('DP' in k))
v*=(1+99*(('XRF' in k) and not ('area' in k)))
v*=(1+9*('OTH' in k))
v+=(('XRF' in k) and ('area' in k) and not ('sig' in k))
v+='molfrac' in k
return v
kv=[[k, metric(k)] for k in d.keys()]
kv.sort(key=operator.itemgetter(1), reverse=True)
return [k[0] for k in kv]
def binmapsinh5chess(chessh5grpstr, bin=3):
h5chess=CHESSRUNFILE('r+')
h5grp=h5chess[chessh5grpstr]
print chessh5grpstr, h5grp.listitems()
grps=[h5grp['imap'], h5grp['chimap'], h5grp['killmap']]
cmdstr=['binimage(arr, bin=bin, zerokill=True)', 'binimage(arr, bin=bin, zerokill=True)', 'binboolimage(arr, bin=bin)']
for grp, cs in zip(grps, cmdstr):
for dset in grp.iterobjects():
if isinstance(dset, h5py.Dataset) and not ('bin' in dset.name):
binname=dset.name+('bin%d' %bin)
if not (binname in grp):
arr=readh5pyarray(dset)
h5grp.create_dataset(binname, data=eval(cs))
h5chess.close()
def buildwaveset1d(qscalegrid, qposngrid, qgrid, maxfixenfrac=0.12, enfractol=0.0, maxoverenergy=None):
ENERGY=0.57457 #this is constant fro all scales and translations
maxfixenfrac+=1 #this notes a discrepancy between fixenfrac in GUI and in code and saved attribute
if maxoverenergy is None:
maxoverenergy=maxfixenfrac
waveattrdict={'qgrid':qgrid, 'qscalegrid':qscalegrid, 'qposngrid':qposngrid,'ENERGY':ENERGY, 'maxfixenfrac':maxfixenfrac, 'maxoverenergy':maxoverenergy, 'enfractol':enfractol}
dq=qgrid[1]
waveset=waveletset1d(qgrid, qscalegrid, qposngrid)
a, b, c=waveset.shape
fixenarr=numpy.empty((a, b), dtype='float32')
for i in range(a):
for j in range(b):
en=((waveset[i, j, :]**2)*dq).sum()
fixenfrac=ENERGY/en
if fixenfrac<maxfixenfrac and 1/fixenfrac<maxoverenergy:
if en<((1.0-enfractol)*ENERGY) or en>((1.0+enfractol)*ENERGY):
waveset[i, j, :]=wave1dkillfix(waveset[i, j, :], ENERGY, dq=dq)*dq
else:
fixenfrac=0.0
waveset[i, j, :]*=dq
fixenarr[i, j]=fixenfrac
else:
waveset[i, j, :]*=numpy.nan
fixenarr[i, j]=numpy.nan
if numpy.isnan(fixenarr).sum()==fixenarr.size:
print 'every wavelet calculation resulted in error. check energy. nothing saved'
return
h5wave=WAVESET1dFILE('r+')
grpname='_'.join([','.join([labelnumberformat(num) for num in qscalegrid]), ','.join([labelnumberformat(num) for num in qposngrid]), ','.join([labelnumberformat(num) for num in qgrid])])
if grpname in h5wave:
del h5wave[grpname]
wavegrp=h5wave.create_group(grpname)
for key, val in waveattrdict.iteritems(): #because h5py doesn't have bools - this can be removed when new version of h5py arrives
if isinstance(val, bool):
wavegrp.attrs[key]=int(val)
else:
wavegrp.attrs[key]=val
wavegrp.create_dataset('waveset', data=waveset)
wavegrp.create_dataset('fixenfrac', data=fixenarr)
h5wave.close()
def wavetrans1d(h5path, h5groupstr, wavesetname, type='h5mar:icounts'):#wavetrans qgrid can be subset of icounts qgrid but not vice versa
# print "h5path='", h5path, "'"
# print "h5groupstr='", h5groupstr, "'"
# print "wavesetname='", wavesetname, "'"
h5wave=WAVESET1dFILE()
wavegrp=h5wave[wavesetname]
waveset=wavegrp['waveset'][:, :, :]
waveqgrid=wavegrp.attrs['qgrid']
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if 'h5mar' in type:
h5arrname=type.partition(':')[2]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'wavetrans1d' in h5mar:
del h5mar['wavetrans1d']
wtgrp=h5mar.create_group('wavetrans1d')
icountspoint=h5mar[h5arrname]
qgrid=icountspoint.attrs['qgrid']
if 'h5tex' in type:
h5grpname=type.partition(':')[2]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5tex=h5mar['texture']
h5texgrp=h5tex[h5grpname]
if 'wavetrans1d' in h5texgrp:
del h5texgrp['wavetrans1d']
wtgrp=h5texgrp.create_group('wavetrans1d')
icountspoint=h5texgrp['icounts']
qgrid=h5texgrp.attrs['chigrid']
wtgrp.attrs['wavesetname']=wavesetname
qscalegrid=wavegrp.attrs['qscalegrid']
for key, val in wavegrp.attrs.iteritems():
wtgrp.attrs[key]=val
wtgrp.create_dataset('fixenfrac', data=wavegrp['fixenfrac'][:, :])
h5wave.close()
pointlist=h5analysis.attrs['pointlist']
a, b, c =waveset.shape # num scales, num posn, length of data
dfltarr=numpy.empty((a, b), dtype='float32')*numpy.nan
icind=numpy.array([qval in q_qgrid_ind(waveqgrid) for qval in q_qgrid_ind(qgrid)])
wt=wtgrp.create_dataset('wavetrans', (icountspoint.shape[0], a, b))
#for ind in set(range(wt.shape[0]))-set(pointlist):
for ind in range(wt.shape[0]):
wt[ind, :, :]=dfltarr[:, :]
for pointind in pointlist:
data=icountspoint[pointind][icind]
datainds=numpy.where(numpy.logical_not(numpy.isnan(data))) # this violates the philosophy that the wavelets should be correected before hand - active wavelet stretching could be added here
#wt[pointind, :, :]=numpy.float32([[(vec*data).sum() for vec in arr] for arr in waveset])
print '*', pointind, data.shape, waveset.shape, scale_scalegrid_ind(qscalegrid).shape
wt[pointind, :, :]=numpy.float32([[(vec*data)[datainds].sum()/scale for vec in arr] for arr, scale in zip(waveset, scale_scalegrid_ind(qscalegrid))])
h5file.close()
def peakfit1d(h5path, h5groupstr, windowextend_hwhm=3, peakshape='Gaussian', critresidual=.2, use_added_peaks=False, type='h5mar'):
try:
peakfcn=eval(peakshape)
except:
print 'ABORTED: did not understand peak shape "',peakshape,'" - this must be an already defined function.'
return 'ABORTED: did not understand peak shape "',peakshape,'" - this must be an already defined function.'
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'h5mar' in type:
wtgrpstr='/'.join((h5groupstr, 'analysis', getxrdname(h5analysis), 'wavetrans1d'))
pointlist=h5analysis.attrs['pointlist']
ifcountspoint=h5mar['ifcounts']
numpts=ifcountspoint.shape[0]
qgrid=h5mar['ifcounts'].attrs['qgrid']
h5grpstr='/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))
elif 'h5tex' in type:
h5grpname=type.partition(':')[2]
h5tex=h5mar['texture']
h5texgrp=h5tex[h5grpname]
pointlist=h5texgrp.attrs['pointlist']
wtgrpstr='/'.join((h5groupstr, 'analysis', getxrdname(h5analysis), 'texture', h5grpname, 'wavetrans1d'))
h5grpstr='/'.join((h5groupstr, 'analysis', getxrdname(h5analysis), 'texture', h5grpname))
ifcountspoint=h5texgrp['ifcounts']
numpts=ifcountspoint.shape[0]
qgrid=h5texgrp['ifcounts'].attrs['chigrid']
wtgrp=h5file[wtgrpstr]
qvals=q_qgrid_ind(qgrid)
qscalegrid=wtgrp.attrs['qscalegrid']
qposngrid=wtgrp.attrs['qposngrid']
wtpeakspoint=wtgrp['peaks']
if 'additionalpeaks' in h5file[h5grpstr] and use_added_peaks:
addpeaks=readh5pyarray(h5file[h5grpstr]['additionalpeaks'])
else:
addpeaks=None
#pointlist=[41, 49]#***
qshsss=[] #q values, scale value, height, sigma q scale, sigma sclae, sigma height
for peakind in pointlist:
#print 'point', peakind
counts=ifcountspoint[peakind]
notnaninds=numpy.where(numpy.logical_not(numpy.isnan(counts)))[0]
wtpeakdata=wtpeakspoint[peakind, :, :]
qscales=wtpeakdata[0, :]
qposns=wtpeakdata[1, :]
qscales=qscales[qscales!=32767]
qposns=qposns[qposns!=32767]
qscales=scale_scalegrid_ind(qscalegrid, qscales)
#print qscales
qscales*=0.36 #for wavelet->Gaussian HWHM
qposns=q_qgrid_ind(qposngrid, qposns)
if not (addpeaks is None):
addpeakinds=numpy.where(numpy.uint16(numpy.round(addpeaks[:, 0]))==peakind)
if len(addpeakinds[0])>0:
#print addpeaks[addpeakinds, 1], '**',addpeaks[addpeakinds, 2]
qscales=numpy.append(qscales, addpeaks[addpeakinds, 1])
qposns=numpy.append(qposns, addpeaks[addpeakinds, 2])
sortinds=qposns.argsort()
qposns=qposns[sortinds]
qscales=qscales[sortinds]
#print qposns
#print qscales
if len(qscales)==0:
qshsss+=[numpy.float32([[]])]
continue
qscales=numpy.float32([max(qs, .25) for qs in qscales])#this is intended for overlapping peaks where wt will give very low qscale.
indrangeandpeakinds=windows_peakpositions(qgrid, qscales, qposns, windowextend_qscales=windowextend_hwhm)
#print 'windows', indrangeandpeakinds
pars=None
sigs=None
for indrange, peakinds in indrangeandpeakinds:
startpars=[[qposns[i], qscales[i], counts[notnaninds[numpy.argmin((notnaninds-ind_qgrid_q(qgrid, qposns[i]))**2)]]] for i in peakinds]
#print 'startpars', startpars
inds=list(set(notnaninds)&set(range(indrange[0], indrange[1])))
if len(inds)==0:
print 'THIS WILL CRASH BECUASE THERE ARE NO VALID DATA POINT IN THIS WINDOW - THE DATA INDEX ENDPOINTS BEFORE NANs WERE REMOVED WERE ' , indrange[0], indrange[1]
p, s, r=fitpeakset(qvals[inds], counts[inds], startpars, peakfcn)
if pars is None:
pars=p[:, :]
sigs=s[:, :]
else:
pars=numpy.concatenate((pars,p),axis=0)
sigs=numpy.concatenate((sigs,s),axis=0)
qshsss+=[numpy.concatenate((pars.T,sigs.T),axis=0)]
h5file.close()
#print qshsss
#return
maxnumpeaks=max([arr.shape[1] for arr in qshsss])
savearr=numpy.ones((numpts, 6, maxnumpeaks), dtype='float32')*numpy.nan
for pointind, arr in zip(pointlist, qshsss):
savearr[pointind, :, :arr.shape[1]]=arr[:, :]
h5file=h5py.File(h5path, mode='r+')
h5grp=h5file[h5grpstr]
if 'pkcounts' in h5grp:
del h5grp['pkcounts']
pkcounts=h5grp.create_dataset('pkcounts', data=savearr)
pkcounts.attrs['windowextend_hwhm']=windowextend_hwhm
pkcounts.attrs['peakshape']=peakshape
pkcounts.attrs['critresidual']=critresidual
if not (addpeaks is None):
h5grp['additionalpeaks'].attrs['usedinfitting']=1
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
updatelog(h5analysis, ''.join((type, ' peak fitting. ', time.ctime())))
h5file.close()
def getpeaksinrange(h5path, h5groupstr, indlist=None, qmin=0, qmax=1000, returnonlyq=True, performprint=False, returnonlytallest=True):
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
pkcounts=readh5pyarray(h5mar['pkcounts'])
pointlist=h5analysis.attrs['pointlist']
returnpointinds=[]
returnpeakinfo=[]
if indlist is None:
indlist=pointlist
for i in indlist:
a, b, c, d, e, f=peakinfo_pksavearr(pkcounts[i,:,:], fiterr=True)
goodinds=numpy.where((a>=qmin)&(a<=qmax))
if len(goodinds[0])>0:
if returnonlytallest:
printindlist=[goodinds[0][myargmax(c[goodinds])]]
else:
printindlist=goodinds[0]
for printind in printindlist:
returnpointinds+=[i]
if returnonlyq:
returnpeakinfo+=[a[printind]]
if performprint:
print i, '\t', a[printind]
else:
returnpeakinfo+=[[a[printind], b[printind], c[printind], d[printind], e[printind], f[printind]]]
if performprint:
print '\t'.join((`i`, `a[printind]`, `b[printind]`, `c[printind]`, `d[printind]`, `e[printind]`, `f[printind]`))
continue
if performprint:
print '\t'*6*(1-returnonlyq)
h5file.close()
return returnpointinds, numpy.float32(returnpeakinfo) #if indlist had no peaks then it is not in returnointlist
def writedepprof(h5path, h5groupstr, gunpropdict, mappedquantdict):
h5file=h5py.File(h5path, mode='r+')
#node=h5file[h5groupstr]
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if 'depprof' in h5analysis:
del h5analysis['depprof']
h5depprof=h5analysis.create_group('depprof')
for key, val in gunpropdict.iteritems():
if isinstance(val, list) and len(val)==0:
continue
h5depprof.attrs[key]=val
for key, val in mappedquantdict.iteritems():
print key, type(val)
if isinstance(val, numpy.ndarray):
if key in h5depprof:
del h5depprof[key]
h5depprof.create_dataset(key, data=val)
h5file.close()
def get_elMd_el(ellist): #ellist should be a list of element symbols. If el is not recognized it will not be in the return list
#elsymbols=[Elemental.table[i].symbol for i in range(len(Elemental.table))] #could alternatively use PyMEl.ElementsInfo which is alist of the elements. for each element there is a list where symbol, M, d*1000 are at indeces 0, 5, 6
smd=[[l[0],l[5],l[6]/1000.] for l in PyMEl.ElementsInfo]
elsymbols=map(operator.itemgetter(0),smd)
elmass=map(operator.itemgetter(1),smd)
eldens=map(operator.itemgetter(2),smd)
temp=[[el, elsymbols.index(el)] for el in ellist if el in elsymbols] #is element info was not provided then it will be looked up in the below lines. but if the element symbol is not found it will be excluded from analysis
if len(temp)==0:
print 'ABORTING: could not find info on any of the elements'
return None
if len(temp)<len(ellist):
print 'SOME ELEMENTS NOT RECOGNIZED - THEY WERE SKIPPED'
#return [[el, Elemental.table[elind].atomic_mass.value, Elemental.table[elind].density_solid.value] for el, elind in temp]
return [[el, elmass[elind], eldens[elind]] for el, elind in temp]
def getinfoforxrf(h5path, h5groupstr):
h5file=h5py.File(h5path, mode='r')
#node=h5file[h5groupstr]
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
gunpropdict=ReadGunPropDict(h5analysis)
if gunpropdict is None:
gunpropdict={}
gunpropdict['symbol']=getattr(h5path, h5groupstr)['elements']
if not (set(['CenterMolRates', 'M', 'd'])<=set(gunpropdict.keys())):
temp=get_elMd_el(gunpropdict['symbol'])
if temp is None:
elsym, elM, eld=([], [], [])
else:
elsym, elM, eld = zip(*temp)
gunpropdict['symbol']=list(elsym)
gunpropdict['M']=list(elM)
gunpropdict['d']=list(eld)
gunpropdict['CenterMolRates']=[2.]*len(elsym)#default to 2nmol/s/cm2 for every element
return gunpropdict, None, None
h5depprof=h5analysis['depprof']
comp=[]
for i in range(4):
s='molfracgun%d' %i
if s in h5depprof:
comp+=[readh5pyarray(h5depprof[s])]
comp=numpy.float32(comp).T
nm=readh5pyarray(h5depprof['nm'])
h5file.close()
return gunpropdict, comp, nm
#def XRFanalysis(h5path, h5groupstr, elements, BckndCounts, FluxCal, DepProfEst, Underlayer, Sicm, SecondaryAction='Notify', ICcts='IC3', cfgpath=None):
def XRFanalysis(h5path, h5groupstr, elements, quantElTr, eld, elM, approxstoich, BckndCounts, RepEn, cfgpath, otherElTr, FluxCal, DepProfEst, Underlayer, Sicm, time, dlambda='', mflambda='', SecondaryAction='Notify'):
attrdict=getattr(h5path, h5groupstr)
pointlist=attrdict['pointlist']
infoforxrf=getinfoforxrf(h5path, h5groupstr)
h5file=h5py.File(h5path, mode='r')
h5mcacountspoint=h5file['/'.join((h5groupstr, 'measurement/MCA/counts'))]
counts=readh5pyarray(h5mcacountspoint)
timearr=readh5pyarray(h5file['/'.join((h5groupstr, 'measurement/scalar_data', time))])
h5file.close()
be=eV_nm(attrdict['wavelength'])/1000.0
#pointlist=numpy.array([112])
est_film_comp=approxstoich
est_film_nm = 100.0
if isinstance(FluxCal, float):
flux=FluxCal
pointind_fluxcal=None
elif isinstance(FluxCal, str) and FluxCal.startswith("CalUsing"):
if infoforxrf[2] is None or set(elements)!=set(infoforxrf[0]):
print 'ABORTING: '+FluxCal+' requested but the DepProf data is not available.'
return 'ABORTING: '+FluxCal+' requested but the DepProf data is not available.'
flux=None
pointind_fluxcal=eval(FluxCal.partition("CalUsing")[2])
est_film_nm = infoforxrf[2][pointind_fluxcal]
else:
pointind_fluxcal=None
flux=None
if DepProfEst:
if infoforxrf[1] is None:
print 'ABORTING: DepProf estimates for film comp and thickness were requested but the DepProf data is not available.'
return 'ABORTING: DepProf estimates for film comp and thickness were requested but the DepProf data is not available.'
est_film_comp = infoforxrf[1][pointlist]
est_film_nm = infoforxrf[2][pointlist]
global d#even though eld and elM are passed to XRFanalyzer, any global variables used in the lambda functions must be defined here
global M #these get deleted at the end
d=numpy.float32(eld)
M=numpy.float32(elM)
lambdafcns=[None, None]
lambdastrlist=[dlambda, mflambda]
testcomp=numpy.ones(len(quantElTr), dtype='float32')/len(quantElTr)
for count, lstr in enumerate(lambdastrlist):
if lstr!='':
try:
if 'i' in lstr:
f=eval(lstr)
lambdafcns[count]=[f(i) for i in pointlist]
justfortest=[f(testcomp) for f in lambdafcns[count]]
else:
lambdafcns[count]=eval(lstr)
justfortest=lambdafcns[count](testcomp)
except:
del d
del M
print 'ABORTING XRF CALCULATION: problem with ', (count==0 and 'density') or 'massfrac', ' lambda function'
return 'ABORTING XRF CALCULATION: problem with ', (count==0 and 'density') or 'massfrac', ' lambda function'
xrfan=XRFanalyzer(counts, elements, quantElTr, eld, elM, BckndCounts=BckndCounts, RepEn=RepEn, cfgpath=cfgpath, otherElTr=otherElTr, pointlist=list(pointlist), beamenergy=be, est_film_comp=est_film_comp, est_film_nm = est_film_nm, SecondaryAction=SecondaryAction, Sicm=Sicm, Underlayer_El_d_nm=Underlayer, pointind_fluxcal=pointind_fluxcal, flux=flux, daqtime=timearr, densfcn=lambdafcns[0], mffcn=lambdafcns[1])
h5file=h5py.File(h5path, mode='r+')
h5node=h5file['/'.join((h5groupstr, 'analysis'))]
if 'xrf' in h5node:
del h5node['xrf']
h5xrf=h5node.create_group('xrf')
if 'areas' in h5xrf:
del h5xrf['areas']
h5xrfareas=h5xrf.create_group('areas')
h5xrf.attrs['elements']=elements
h5xrf.attrs['quantElTr']=quantElTr
h5xrf.attrs['d']=d
h5xrf.attrs['M']=M
h5xrf.attrs['BckndCounts']=BckndCounts
h5xrf.attrs['RepEn']=RepEn
h5xrf.attrs['dlambda']=dlambda
h5xrf.attrs['mflambda']=mflambda
h5xrf.create_dataset('molfrac', data=xrfan.comp_res)
h5xrf.create_dataset('nm', data=xrfan.thick_res)
h5xrf.create_dataset('cfg', data=numpy.array(xrfan.cfgstr))
pks=xrfan.resultdict[pointlist[0]].keys()
pks=[(p, p.replace(' ',''), numpy.zeros((counts.shape[0], 2), dtype='float32')) for p in pks]
for k, nam, arr in pks:
for ind in pointlist:
arr[ind, 0]=xrfan.resultdict[ind][k]['fitarea']
arr[ind, 1]=xrfan.resultdict[ind][k]['sigmaarea']
h5xrfareas.create_dataset(nam, data=arr)
h5file.close()
if True: #to make this a different code block
del d
del M
def elements_elstr(elstr, min_num_els=3):
elstrlist=[]
inds=[]
for count, ch in enumerate(elstr):
if ch.isupper():
inds+=[count]
inds+=[count+1]
for i, j in zip(inds[:-1], inds[1:]):
elstrlist+=[elstr[i:j]]
if len(elstrlist)<min_num_els:
elstrlist+=['X']*(min_num_els-len(elstrlist))
el_gun=[]
for count, el in enumerate(elstrlist):
if el!='X':
el_gun+=[[el, count]]
return elstrlist, el_gun
#def elements_elstr(elstr, min_num_els=3): #from Nov 2010 but reverted to above June2010 which may have worked better??
# if isinstance(elstr, str):
# elstrlist=[]
# inds=[]
# for count, ch in enumerate(elstr):
# if ch.isupper():
# inds+=[count]
# inds+=[count+1]
# for i, j in zip(inds[:-1], inds[1:]):
# elstrlist+=[elstr[i:j]]
# if len(elstrlist)<min_num_els:
# elstrlist+=['X']*(min_num_els-len(elstrlist))
# el_gun=[]
# for count, el in enumerate(elstrlist):
# if el!='X':
# el_gun+=[[el, count]]
# else:
# elstr=numpy.array(elstr)
# ginds=numpy.where(numpy.logical_and(elstr!='X', elstr!=''))
# if len(ginds[0])<min_num_els:
# elstr[elstr=='']='X'
# elstrlist=list(elstr)+['X']*(min_num_els-len(elstr))
# else:
# elstrlist=list(elstr[ginds])
# el_gun=[[e, g] for e, g in zip(elstr[ginds], ginds[0])]
# return elstrlist, el_gun
def getcomps(h5path, h5groupstr, elstrlist=None, infotype='DPmolfracALL', normalize=True, num_els=None): #elstrlist is a list of elments symbols or 'X' or whatever and comlist should be a 'type' of getpointinfo with 'ALL', if an element is not found in the data then its composition will be zero. If elstrlist is passed, num_els is ignored, otherwise the array is guarenteed to have num_else compositions that are normalized where possible
if elstrlist is None:
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if 'depprof' in h5analysis: #even if getting XRF data, use the element list from dep profiles if possible
elstrlist=list(h5analysis['depprof'].attrs['symbol'])
if not num_els is None:
if len(elstrlist)>=num_els:
elstrlist=elstrlist[:num_els]
else:# not enough elements so make a list of elements, filling in the ones we have in the appropriate gunind spots
guninds=h5analysis['depprof'].attrs['guninds'][:]
temp=copy.copy(elstrlist)
elstrlist=[]
for i in range(num_els):
if i in guninds:
elstrlist+=[temp[numpy.where(guninds==i)[0]]]
else:
elstrlist+=['X']
elif 'xrf' in h5analysis:
elstrlist=list(h5analysis['xrf'].attrs['elements'])
if not num_els is None:
if len(elstrlist)>=num_els:
elstrlist=elstrlist[:num_els]
else:
elstrlist+=['X']*(num_els-len(elstrlist))
h5file.close()
infodict, success=getpointinfo(h5path, h5groupstr, types=[infotype])
if not success:
return None, None
keyroot=infotype.partition('ALL')[0]
foundkeys=[(count, keyroot+'_'+el) for count, el in enumerate(elstrlist) if keyroot+'_'+el in infodict.keys()]
if len(foundkeys)==0:
return None, None
comps=numpy.zeros((len(infodict[foundkeys[0][1]]), len(elstrlist)), dtype='float32')
for (ind, k) in foundkeys:
comps[:, ind]=infodict[k][:]
if normalize:
tot=comps.sum(axis=1)
tot[tot==0.]=1.
comps=numpy.float32([c/t for c, t in zip(comps, tot)])
return elstrlist, comps
def getternarycomps(h5path, h5groupstr, elstr=None, infotype='DPmolfracALL'):
if elstr is None:
compsarr=None
else:
elstrlist, el_gun=elements_elstr(elstr)
if len(el_gun)>3:#if there are more than 3 "real" elements use the 1st 3, if there are less than 3, elstrlist will already be filled to 3 with 'X'
elstrlist=[elgun[i][0] for i in range(3)]
elstrlist, compsarr=getcomps(h5path, h5groupstr, elstrlist=elstrlist, infotype=infotype)
if compsarr is None:
elstrlist, compsarr=getcomps(h5path, h5groupstr, infotype=infotype, num_els=3)
return elstrlist, compsarr
def synthpeakshape(q, pk):
return pk[1]*numpy.exp(-2.0*(q-pk[0])**2/pk[2]**2)
#assumed format is 1 text line and then a line for each peak tab-delimeted
#pointind phaseregion phaseconcs A B C neighs Q H W
def readsyntheticpeaks(path):
f = open(path, "r")
lines=f.readlines()
f.close()
pointind=[]
comp=[]
peaks=[]
peaks_ind=[]
for l in lines[1:]:
p, garbage, a=l.partition('\t')
garbage, garbage, a=a.partition('\t')
garbage, garbage, a=a.partition('\t')
a, garbage, b=a.partition('\t')
b, garbage, c=b.partition('\t')
c, garbage, q=c.partition('\t')
garbage, garbage, q=q.partition('\t')
q, garbage, h=q.partition('\t')
h, garbage, w=h.partition('\t')
w.strip()
#print p, a, b, c, q, h, w
p=eval(p)
if p in pointind:
peaks_ind+=[[eval(q), eval(h), eval(w)]]
else:
pointind+=[p]
comp+=[[eval(a), eval(b), eval(c)]]
peaks+=[peaks_ind]
peaks_ind=[[eval(q), eval(h), eval(w)]]
peaks+=[peaks_ind]
peaks=peaks[1:]
return pointind, comp, peaks
def createsynthetich5_peaktxt(h5path, peaktxtpath, elstr='ABC'):
pointind, comp, pklist=readsyntheticpeaks(peaktxtpath)
comp=numpy.float32(comp)
if pointind!=range(len(pointind)):
print 'ABORTED: the list of point indeces is required to be 0, 1, 2, ...'
return 'ABORTED: the list of point indeces is required to be 0, 1, 2, ...'
grid=[-30., 60./len(pointind), len(pointind)]
cmd='a2scan'
wl=.02
cal=[0, 0, 500., 0, 0, 0]
al=46.
c=0.
b='min'
attrdict={'pointlist':pointind, 'command':cmd, 'xgrid':grid, 'zgrid':grid, 'wavelength':wl, 'cal':cal, 'alpha':al, 'counter':c, 'elements':elstr, 'bcknd':b, 'chessrunstr':'', 'imapstr':'/2008NovDec/imap/16,0.05,1.560e3', 'chimapstr':'', 'killmapstr':'', 'qimagestr':'', 'chiimagestr':'', 'dqchiimagestr':''}
h5file=h5py.File(h5path, mode='w')
h5groupstr='1'
h5file.attrs['defaultscan']=h5groupstr
h5grp=h5file.create_group(h5groupstr)
h5grp=h5grp.create_group('analysis')
h5mar=h5grp.create_group('mar345')
h5depprof=h5grp.create_group('depprof')
h5file.close()
writeattr(h5path, h5groupstr, attrdict)
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5depprof=h5file['/'.join((h5groupstr, 'analysis/depprof'))]
if 'icounts' in h5mar:
qgrid=h5mar['icounts'].attrs['qgrid']
else:
qgrid=getimapqgrid(h5analysis.attrs['imapstr'], imap=False)
qvals=numpy.float32(q_qgrid_ind(qgrid))
icounts=[]
maxnumpks=0
for pks in pklist:
maxnumpks=max(maxnumpks, len(pks))
pattern=numpy.zeros(qvals.shape, dtype='float32')
for pk in pks:
pattern+=synthpeakshape(qvals, pk)
icounts+=[pattern]
icounts=numpy.float32(icounts)
pkssavearr=[]
for pks in pklist:
pks=[[pk[0], pk[2], pk[1]] for pk in pks]#synthpeaks file has posn,height, width but pkcounts is posn,width,height
pks+=[[numpy.nan, numpy.nan, numpy.nan] for i in range(maxnumpks-len(pks))]
pkssavearr+=[pks]
pkssavearr=numpy.float32(pkssavearr)
h5mar.create_dataset('synthpks', data=pkssavearr)
for count, arr in enumerate(comp.T):
h5depprof.create_dataset('molfracgun%d' %count, data=arr)
elstrslist, garbage=elements_elstr(elstr)
h5depprof.attrs['symbol']=elstrslist
h5depprof.attrs['guninds']=range(len(elstrslist))
h5ic=h5mar.create_dataset('icounts', data=icounts)
h5ic.attrs['qgrid']=qgrid
h5ifc=h5mar.create_dataset('ifcounts', data=icounts)
h5ifc.attrs['qgrid']=qgrid
h5file.close()
def createh5_txtfiles(h5path, txtpath, headerlines=0, elstr='ABC'):
dirname, fname=os.path.split(txtpath)
a, b, fext=fname.rpartition('.')
ind=-1
while a[ind].isdigit():
ind-=1
if ind==-1:
print 'problem with file format. expecting name#.ext where # is an integer'
rootname=a[:ind+1]
files=os.listdir(dirname)
file_num=[[f, eval(f.partition(rootname)[2].rpartition('.')[0])] for f in files if f.startswith(rootname) and f.endswith(fext)]
files=map(operator.itemgetter(0),sorted(file_num, key=operator.itemgetter(1)))
xvals=[]
yvals=[]
for path in files:
print 'reading: ', path
x, y, type=readplotso(os.path.join(dirname, path).replace('\\','/'), headerlines=headerlines)
xvals+=[x]
yvals+=[y]
nvals=numpy.uint16([len(x) for x in xvals])
nval=numpy.min(nvals)
if not numpy.all(nvals==nval):
print 'WARNING - not all datasets were the same legnth - datasets will be truncated to the shortest regardless of the alignment of the measurement axes'
xvals=[x[:nval] for x in xvals]
yvals=[y[:nval] for y in yvals]
xvals=numpy.float32(xvals)
yvals=numpy.float32(yvals)
if len(xvals)>1 and not numpy.all(xvals[1:,:]==xvals[:-1,:]):
print 'WARNING: not all xvals are the same - just using first one'
qgrid=qgrid_minmaxnum(xvals[0, 0], xvals[0, -1], xvals.shape[1])
print 'assessed qgrid:', qgrid
pointind=range(xvals.shape[0])
grid=[-30., 60./len(pointind), len(pointind)]
cmd='a2scan'
wl=.02
cal=[0, 0, 500., 0, 0, 0]
al=46.
c=0.
b='min'
attrdict={'pointlist':pointind, 'command':cmd, 'xgrid':grid, 'zgrid':grid, 'wavelength':wl, 'cal':cal, 'alpha':al, 'counter':c, 'elements':elstr, 'bcknd':b, 'chessrunstr':'', 'imapstr':'', 'chimapstr':'', 'killmapstr':'', 'qimagestr':'', 'chiimagestr':'', 'dqchiimagestr':''}
h5file=h5py.File(h5path, mode='w')
h5groupstr='1'
h5file.attrs['defaultscan']=h5groupstr
h5grp=h5file.create_group(h5groupstr)
h5grp=h5grp.create_group('analysis')
h5mar=h5grp.create_group('mar345')
#h5depprof=h5grp.create_group('depprof')
h5file.close()
writeattr(h5path, h5groupstr, attrdict)
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
#h5depprof=h5file['/'.join((h5groupstr, 'analysis/depprof'))]
h5ic=h5mar.create_dataset('icounts', data=yvals)
h5ic.attrs['qgrid']=qgrid
h5file.close()
def exportpeaklist(h5path, h5groupstr, runpath):
pointlist, peakinfo=getpeaksinrange(h5path, h5groupstr, indlist=None, returnonlyq=False, returnonlytallest=False)
neighbors=getneighbors(h5path, h5groupstr)
nbool=not neighbors is None
if nbool:
lines=['PointInd\tNeighborIndeces\tQpons\tGaussSigma\tHeight']
else:
lines=['PointInd\tQpons\tGaussSigma\tHeight']
for ind, pk in zip(pointlist, peakinfo):
lstr='%d' %ind
if nbool:
nstr=','.join(['%d' %n for n in neighbors[ind]])
lstr='\t'.join((lstr, nstr))
for n in pk[:3]:
lstr='\t'.join((lstr, numtostring(n, 4)))
lines+=[lstr]
writestr='\n'.join(lines)
savename='_'.join((os.path.split(h5path)[1][0:-3], h5groupstr, 'peaklist.txt'))
filename=os.path.join(runpath,savename).replace('\\','/')
fout = open(filename, "w")
fout.write(writestr)
fout.close()
def getneighbors(h5path, h5groupstr):
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if not 'neighbors' in h5analysis:
return None
neigharr=readh5pyarray(h5analysis['neighbors'])
neighlist=[]
for n in neigharr:
neighlist+=[list(n[n!=32767])]
return neighlist
def saveneighbors(h5path, h5groupstr, neighbors, pardict={}):#len(neighbors ) should be the numpts in experiment
maxnumneighs=max([len(n) for n in neighbors])
savearr=numpy.ones((len(neighbors), maxnumneighs), dtype='uint16')*32767
for count, n in enumerate(neighbors):
savearr[count, :len(n)]=numpy.uint16(n)
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
if 'neighbors' in h5analysis:
del h5analysis['neighbors']
h5neighbors=h5analysis.create_dataset('neighbors', data=savearr)
for k, v in pardict.iteritems():
print k, v, type(k), type(v)
h5neighbors.attrs[k]=v
h5file.close()
def buildnewscan(h5path, h5groupstr, newscandict):
xrdname=newscandict['xrdname']
h5file=h5py.File(h5path, mode='r+')
h5root=h5file.create_group(h5groupstr)
h5analysis=h5root.create_group('analysis')
measpath='/'.join((h5groupstr,'measurement'))
measpath_copyfrom='/'.join((newscandict['sourcename'],'measurement'))
anpath_copyfrom='/'.join((newscandict['sourcename'],'analysis'))
h5file.copy(measpath_copyfrom, measpath)
h5measurement=h5file[measpath]
for k, v in h5file[anpath_copyfrom].attrs.iteritems():
h5analysis.attrs[k]=v
for ind, newname, newind in zip(newscandict['ind_tobereplaced'], newscandict['newimage_scanname'], newscandict['newimage_ind']):#"new" refers to the replacement
h5measnew=h5file['/'.join((newname,'measurement'))]
h5measurement[xrdname+'/counts'][ind, :, :]=h5measnew[xrdname+'/counts'][newind, :, :]
if 'MCA/counts' in h5measurement and 'MCA/counts' in h5measnew:
h5measurement['MCA/counts'][ind, :]=h5measnew['MCA/counts'][newind, :]
if 'scalar_data/Seconds' in h5measnew:
h5analysis.attrs['acquisition_time'][ind]=h5measnew['scalar_data/Seconds'][newind]
for item in h5measurement['scalar_data'].iterobjects():
itemname=item.name.rpartition('/')[2]
if (not itemname in ['samx', 'samz']) and isinstance(item,h5py.Dataset) and len(item.shape)==1 and itemname in h5measnew['scalar_data']:#replace thinks like IC counts but not x,z position
item[ind]=h5measnew['scalar_data/%s' %itemname][newind]
for grpname, attr in zip(newscandict['appendscan_name'], newscandict['appendscan_attr']):
h5analysis.attrs['command']='USER-COMPILED'
#xgrid and zgrid are nto changed and are no longer valid
numappendpts=len(attr['x'])
h5measnew=h5file['/'.join((grpname,'measurement'))]
h5analysis.attrs['acquisition_shape']=(numpy.prod(numpy.uint16(h5analysis.attrs['acquisition_shape']))+numappendpts,)
h5analysis.attrs['x']=numpy.append(numpy.float32(h5analysis.attrs['x']), numpy.float32(attr['x']))
h5analysis.attrs['z']=numpy.append(numpy.float32(h5analysis.attrs['z']), numpy.float32(attr['z']))
if 'acquisition_time' in attr:
h5analysis.attrs['acquisition_time']=numpy.append(numpy.float32(h5analysis.attrs['acquisition_time']), numpy.float32(attr['acquisition_time']))
elif 'scalar_data/Seconds' in h5measnew:
h5analysis.attrs['acquisition_time']=numpy.append(numpy.float32(h5analysis.attrs['acquisition_time']), numpy.float32(h5measnew['scalar_data/Seconds'][:]))
else:
h5analysis.attrs['acquisition_time']=numpy.append(numpy.float32(h5analysis.attrs['acquisition_time']), numpy.ones(numappendpts, dtype='float32'))
h5mar=h5measurement[xrdname]
arr1=readh5pyarray(h5mar['counts'])
del h5mar['counts']# this way it is deleted no matter what and it will be rewritten if there is data to append. this ensure that all arrays end up the right length.
if (xrdname+'/counts') in h5measnew:
arr2=readh5pyarray(h5measnew[xrdname+'/counts'])
arr1=numpy.append(arr1, arr2, axis=0)
h5mar.create_dataset('counts', data=arr1)
if 'MCA/counts' in h5measurement:
h5mca=h5measurement['MCA']
arr1=readh5pyarray(h5mca['counts'])
del h5mca['counts']
if 'MCA/counts' in h5measnew:
arr2=readh5pyarray(h5measnew['MCA/counts'])
arr1=numpy.append(arr1, arr2, axis=0)
h5mca.create_dataset('counts', data=arr1)
h5sd=h5measurement['scalar_data']
for item in h5sd.values():
itemname=item.name.rpartition('/')[2]
if isinstance(item,h5py.Dataset) and len(item.shape)==1:
del h5sd[itemname]
if itemname in h5measnew['scalar_data']:
arr1=readh5pyarray(item)
arr2=readh5pyarray(h5measnew['scalar_data/%s' %itemname])
arr1=numpy.append(arr1, arr2, axis=0)
h5sd.create_dataset(itemname, data=arr1)
h5file.close()
def initializescan(h5path, h5groupstr, bin=3, insituscalarname='IC3'):
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((h5groupstr,'measurement', getxrdname(h5analysis), 'counts'))]
h5analysis.attrs['bin']=bin
attrdict=getattr(h5path, h5groupstr)
if not ('bminanomf' in h5mar):
bminanomfinit=numpy.ones((numpts_attrdict(attrdict), 3), dtype='float32')*(-1.0)
h5mar.create_dataset('bminanomf', data=bminanomfinit)
print 'binning data'
pointlist=[]
if 'countsbin%d' %bin in h5mar:
del h5mar['countsbin%d' %bin]
countsbin=h5mar.create_dataset('countsbin%d' %bin, (h5marcounts.shape[0],h5marcounts.shape[1]//bin,h5marcounts.shape[2]//bin), dtype=h5marcounts.dtype)
z=numpy.zeros((h5marcounts.shape[1]//bin,h5marcounts.shape[2]//bin), dtype=h5marcounts.dtype)
for count, image in enumerate(h5marcounts):
data=image[:, :]
if data.max()>0:
pointlist+=[count]
countsbin[count, :, :]=binimage(data, bin)
else:
countsbin[count, :, :]=z[:, :]
h5analysis.attrs['pointlist']=pointlist
h5file.attrs['defaultscan']=str(h5groupstr)
if 'min' in attrdict['bcknd']:
initbcknd='min'
elif 'lin' in attrdict['bcknd']:
initbcknd='min'
else:
initbcknd='ave'
if 'tseries 1' in attrdict['command'] and h5marcounts.shape[0]>1:# insitu experiment #TODO: what to do with acquisition_shape?
nimages=h5marcounts.shape[0]
g=h5file['/'.join((h5groupstr, 'measurement', 'scalar_data'))]
temp=[insituscalarname]
if insituscalarname!='Seconds':
temp+=['Seconds']
for nam in temp:
totic=g[nam]
ic=numpy.ones(nimages, dtype='float32')*totic/nimages
g.copy(nam, 'asimported_'+nam)
del g[nam]
g.create_dataset(nam, data=ic)
print 'calculating ', initbcknd, 'background - last step of data initialization'
calcbcknd(h5path=h5path, h5groupstr=h5groupstr, bcknd=initbcknd, bin=bin)
h5file.close()
def readxrfinfodatabase():
f=open(XRFINFOpath(), mode='r')
lines=f.readlines()
f.close()
entries=[]
currentdict={}
currentval=None
lines+=[' ']#toadd the last dict to entries
for l in lines:
l=l.strip()
if len(l)==0:
if not currentval is None:
if len(currentval)==1:
currentval=currentval[0]
currentdict[k]=currentval
currentval=None
entries+=[copy.deepcopy(currentdict)]
currentdict={}
elif l.startswith('['):
if not currentval is None:
if len(currentval)==1:
currentval=currentval[0]
currentdict[k]=currentval
currentval=[]
k=l[1:].partition(']')[0]
else:
lineval=[]
c=l
while len(c)>0:
a, b, c=c.partition('\t')
a=a.strip()
c=c.strip()
try:
val=eval(a)
except:
val=a
lineval+=[val]
if len(lineval)==1:
lineval=lineval[0]
currentval+=[lineval]
return entries
#getpeaksinrange('E:/CHESS2008/2008CHESSh5analysis/20081121bsub3RuPtX.dat.h5', '2', [11,19,20,21,29,30,31,39,40,41,49,50,51,59,60,61,69], 32, 32.5, performprint=True)
def myexpformat(x, pos=0):
for ndigs in range(6):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','')
if eval(lab)==x:
return lab
return lab
#ExpTickLabels=FuncFormatter(myexpformat)
#ax.xaxis.set_major_formatter(ExpTickLabels)
pointlist=[11,20,21,28,29,30,31,38,39,40,41,48,49,50,51,58,59,60,61,69]
def runme():
return testwavetrans1d('/mnt/SharedData/CHESS2008/2008CHESSh5analysis/20081121bsub3RuPtX.dat.h5','2','0.1,1.18,23_18,0.1,750_16,0.05,1.540e3')
def testwavetrans1d(h5path, h5groupstr, wavesetname):#wavetrans qgrid can be subset of icounts qgrid but not vice versa
# print "h5path='", h5path, "'"
# print "h5groupstr='", h5groupstr, "'"
# print "wavesetname='", wavesetname, "'"
h5wave=WAVESET1dFILE()
wavegrp=h5wave[wavesetname]
waveset=wavegrp['waveset'][:, :, :]
waveqgrid=wavegrp.attrs['qgrid']
print 'wave grid', waveqgrid
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
wtgrp=h5mar['wavetrans1d']
qscalegrid=wavegrp.attrs['qscalegrid']
qposngrid=wavegrp.attrs['qposngrid']
print 'wave grid', qscalegrid, qposngrid
h5wave.close()
pointlist=[11,20,21,28,29,30,31,38,39,40,41,48,49,50,51,58,59,60,61,69]
a, b, c =waveset.shape # num scales, num posn, length of data
#dfltarr=numpy.empty((a, b), dtype='float32')*numpy.nan
icountspoint=h5mar['icounts']
qgrid=icountspoint.attrs['qgrid']
print 'qgrid', qgrid
icind=numpy.array([qval in q_qgrid_ind(waveqgrid) for qval in q_qgrid_ind(qgrid)])
icounts=icountspoint[:, :]
ridgespoint=readh5pyarray(wtgrp['ridges'])
wt=numpy.zeros((icountspoint.shape[0], a, b), dtype='float32')
h5file.close()
TIMESTART=time.time()
#for ind in set(range(wt.shape[0]))-set(pointlist):
# for ind in range(wt.shape[0]):
# wt[ind, :, :]=dfltarr[:, :]
for pointind in pointlist:
data=icounts[pointind][icind]
#wt[pointind, :, :]=numpy.float32([[(vec*data).sum() for vec in arr] for arr in waveset])
wt[pointind, :, :]=numpy.float32([[(vec*data).sum()/scale for vec in arr] for arr, scale in zip(waveset, scale_scalegrid_ind(qscalegrid))])
minridgelength=1
noiselevel=20.
maxqscale_localmax=1.5
minridgewtsum=100.
verbose=False
# h5file=h5py.File(h5path, mode='r+')
# h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
# h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
# wtgrp=h5mar['wavetrans1d']
#qscalegrid=wtgrp.attrs['qscalegrid']
qscalevals=scale_scalegrid_ind(qscalegrid)
#qposngrid=wtgrp.attrs['qposngrid']
qposnint=qposngrid[1]
qsindlist=[2*max(int(numpy.ceil(1.*qs/qposnint)), 1) for qs in qscalevals[::-1]]
#pointlist=h5analysis.attrs['pointlist']
ridgeqscalevals=scale_scalegrid_ind(qscalegrid)[::-1] #ordered big->small
ridgescalecritind=numpy.where(ridgeqscalevals<=maxqscale_localmax)[0]
ridgescalecritind=ridgescalecritind[0] #takes the last because these are in decreasing order now
ridges_pointlist=[]
peaks_pointlist=[]
for pointind in pointlist:
temp=wt[pointind, :, :]#reverse first index so that it goes from widest to smallest scale
wtrev=temp[::-1, :]
perform_ridges_wavetrans1d(wtrev, qsindlist, noiselevel, numscalesskippedinaridge=1.5)
ridges=ridgespoint[pointind, :, :]
peaks_pointlist+=[perform_peaks_ridges1d(temp, ridges, ridgescalecritind=ridgescalecritind, minridgelength=minridgelength, minridgewtsum=minridgewtsum, verbose=verbose)]
TIMESTOP=time.time()
print 'time elapsed=', TIMESTOP-TIMESTART
print 'num peaks=', numpy.sum(numpy.array([len(b) for b in peaks_pointlist]))
return peaks_pointlist
def readpdffile(pdfentriespath):
fin = open(pdfentriespath, "r")
lines=fin.readlines()
fin.close()
pdfname=[]
pdflist=[]
for ln in lines:
name, garbage, liststr=ln.partition(':')
try:
temp=eval(liststr.strip())
if len(temp)==2:
temp=numpy.float32(temp).T
else:
temp=numpy.float32(temp)
temp[:, 1]/=numpy.max(temp[:, 1])
pdfname+=[name]
pdflist+=[temp]
except:
print 'format error in pdf entry ', liststr
return pdfname, pdflist
def xrdraw_dezing_rescale(h5path, h5groupstr=None, h5grppath=None, dezingbool=False, normdsetname=None, multval=None, outlier_nieghbratio=None):
h5file=h5py.File(h5path, mode='r+')
if not h5groupstr is None:
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5marcounts=h5file['/'.join((h5groupstr,'measurement/'+getxrdname(h5analysis)+'/counts'))]
h5sd=h5file['/'.join((h5groupstr,'measurement', 'scalar_data'))]
else:
h5analysis=None
h5marcounts=h5file[h5grppath]['counts']
if not normdsetname is None:
h5sd=(h5file[h5grppath].parent)['scalar_data']
marcounts=readh5pyarray(h5marcounts)
if multval is None:
multval=1.
#multval=numpy.array([multval], dtype=marcounts.dtype)[0]
mvals=[]
for count, arr in enumerate(marcounts):
if not normdsetname is None:
m=multval/h5sd[normdsetname][count]
else:
m=multval
m*=1.
if m==0.:
m==1.
if dezingbool:
arr=dezing(arr, critval=arr.max())
if not outlier_nieghbratio is None:
arr=removesinglepixoutliers(arr, critratiotoneighbors=outlier_nieghbratio)
else:
outlier_nieghbratio=0.#this is just for saving as attr
marcounts[count, :, :]=numpy.array(arr*m, dtype=marcounts.dtype)#don't worry about rounding - assume 1 count doesn't matter
mvals+=[m]
mvals=numpy.array(mvals)
h5marcounts[:, :, :]=marcounts[:, :, :]
for k, v in zip(['mod_dezing', 'mod_normbyscalar', 'mod_multiplier', 'mod_outlier_neighbratio', 'mod_multiplierarray'], [dezingbool, normdsetname, multval, outlier_nieghbratio, mvals]):
print k, v
if v is None:
continue
h5marcounts.attrs[k]=v
if not h5analysis is None:
updatelog(h5analysis, ''.join(('raw XRD data modified using dezingbool=%s, normdsetname=%s, multval=%s' %(`dezingbool`, `normdsetname`, `multval`), '. finished ', time.ctime())))
h5file.close()
def CopyLinBckndData(h5path, h5groupstr, h5path_from, h5groupstr_from):
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5file_from=h5py.File(h5path_from, mode='r')
h5analysis_from=h5file_from['/'.join((h5groupstr_from, 'analysis'))]
h5mar_from=h5file_from['/'.join((h5groupstr_from, 'analysis', getxrdname(h5analysis_from)))]
dellist=[]
for pnt in h5mar.itervalues():
if isinstance(pnt,h5py.Dataset):
temp=pnt.name.rpartition('/')[2]
if temp.startswith('blin'):
dellist+=[temp]
for temp in dellist:
del h5mar[temp]
anycopied=False
for pnt in h5mar_from.itervalues():
if isinstance(pnt,h5py.Dataset) and (pnt.name.rpartition('/')[2]).startswith('blin'):
h5file_from.copy(pnt.name, h5mar, name=pnt.name.rpartition('/')[2])
anycopied=True
if anycopied:
updatelog(h5analysis, ''.join(('LinBcknd arrays and attrs copied from %s %s' %(h5path_from, h5groupstr_from), '. finished ', time.ctime())))
h5file.close()
h5file_from.close()
else:
h5file.close()
h5file_from.close()
print 'CopyFailed: No LinBkcnd arrays were found'
return 'CopyFailed: No LinBkcnd arrays were found'
def bckndsub1d_difffiles(h5path, h5groupstr, h5path_bcknd, h5groupstr_bcknd, specind_bcknd):#no gui interface
h5file=h5py.File(h5path_bcknd, mode='r')
h5analysis=h5file['/'.join((h5groupstr_bcknd, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr_bcknd, 'analysis', getxrdname(h5analysis)))]
icountspoint=h5mar['icounts']
if isinstance(specind_bcknd, int):
bcknddata=-1*icountspoint[specind_bcknd, :]
else:
bcknddata=[]
for i in specind_bcknd:
bcknddata+=[-1*icountspoint[i, :]]
h5file.close()
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
pointlist=h5analysis.attrs['pointlist']
icounts=readh5pyarray(h5mar['icounts'])
if isinstance(specind_bcknd, int):#don't worry about pointlist
newicounts=numpy.array([ic+bcknddata for ic in icounts], dtype=icounts.dtype)
elif len(bcknddata)!=len(pointlist):
print 'ABORTED: the pointlist for the data contains %d points but the number of bcknd indeces provided is %d' %(len(pointlist), len(bcknddata))
h5file.close()
return
else:
newicounts=numpy.zeros(icounts.shape, dtype=icounts.dtype)
newicounts[pointlist]=numpy.array([ic+bc for ic, bc in zip(icounts[pointlist], bcknddata)], dtype=icounts.dtype)
newicounts[newicounts<0.]=0.
h5mar['icounts'][:, :]=newicounts[:, :]
if 'asintegratedicounts' in h5mar:
del h5mar['asintegratedicounts']
print 'WARNING:There should not have been an existing icounts_asintegrated but it is being overwritten anyway'
icountsasint=h5mar.create_dataset('asintegratedicounts', data=icounts)
# icountsasint.attrs['bcknd1daddition']=bcknddata
asintattrs={'h5path_bcknd':h5path_bcknd, 'h5groupstr_bcknd':h5groupstr_bcknd, 'specind_bcknd': specind_bcknd}
for key, val in asintattrs.iteritems():
# if isinstance(val, list) and len(val)==0:
# continue
icountsasint.attrs[key]=val
if 'ibckndadd' in h5mar:
del h5mar['ibckndadd']
h5mar.create_dataset('ibckndadd', data=numpy.array(bcknddata))
# if 'ibminnew' in h5mar:
# del h5mar['ibminnew']
# if 'ibmin' in h5mar:
# h5mar.create_dataset('ibminnew', data=h5mar['ibmin'][:]-self.newadditionfrom1dbckndsubtraction[:])
h5file.close()
#def linbckndsub1d(h5path, h5groupstr, bckndinvname0, bckndinvname1, fraczeroed=0.003, fprecision=0.001, scalarname='IC3', refineduringloop=True, maxtries=100, singlepointind=None):#no gui interface
#
# #single image -> asintegratedicounts not saved and no attributes saved
# single=not (singlepointind is None)
# h5file=h5py.File(h5path, mode='r+')
# h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
# h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
# if 'asintegratedicounts' in h5mar:
# t=h5mar['asintegratedicounts'][:, :]
# if single:
# h5mar['icounts'][singlepointind, :]=t[singlepointind, :]
# else:
# h5mar['icounts'][:, :]=t
# h5file.close()
#
# h5file=h5py.File(h5path, mode='r')
# h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
# h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
# attrdict=getattr(h5path, h5groupstr)
# if single:
# pointlist=[singlepointind]
# else:
# pointlist=h5analysis.attrs['pointlist']
# icounts=readh5pyarray(h5mar['icounts'])
# h5marcounts=h5file['/'.join((h5groupstr, 'measurement', getxrdname(h5analysis), 'counts'))]
#
# ic=h5file['/'.join((h5groupstr, 'measurement', 'scalar_data', scalarname))][:]
# if 'subexposures' in h5marcounts.attrs.keys():
# subex=h5marcounts.attrs['subexposures']
# else:
# subex=1
#
# subex=numpy.float32(subex)
#
# if len(subex.shape)==0:#setup for future possiblity of different subexposures
# subex=numpy.ones(icounts.shape[0], dtype='float32')*subex
#
# if 'mod_multiplierarray' in h5marcounts.attrs.keys():
# m=h5marcounts.attrs['mod_multiplierarray']
# m[m==0.]=1.
# subex/=m
# ic/=m
#
# h5chess=CHESSRUNFILE()
# h5grp=h5chess[attrdict['chessrunstr']]
# bigrp=h5grp['BckndInventory']
# kms=attrdict['killmapstr'].rpartition('/')[2]
# ims=attrdict['imapstr'].rpartition('/')[2]
# if (ims in bigrp) and (kms in bigrp[ims]) and (bckndinvname0 in bigrp[ims][kms]) and (bckndinvname1 in bigrp[ims][kms]):
# pnt=bigrp[ims][kms][bckndinvname0]
# b0=readh5pyarray(pnt)
# if scalarname in pnt.attrs.keys():
# ic_b0=pnt.attrs[scalarname]
# else:
# ic_b0=1.
# if 'subexposures' in pnt.attrs.keys():
# subex_b0=pnt.attrs['subexposures']
# else:
# subex_b0=1.
# pnt=bigrp[ims][kms][bckndinvname1]
# b1=readh5pyarray(pnt)
# if scalarname in pnt.attrs.keys():
# ic_b1=pnt.attrs[scalarname]
# else:
# ic_b1=1.
# if 'subexposures' in pnt.attrs.keys():
# subex_b1=pnt.attrs['subexposures']
# else:
# subex_b1=1.
# h5chess.close()
# h5file.close()
## except:
## h5chess.close()
## h5file.close()
## print 'problem reading data from arrays - most likely cause is absence of attributes %s and %s' %(bckndinvname1, 'subexposures')
## return
#
# else:
# h5chess.close()
# h5file.close()
# print 'aborting because bcknd arrays not found or not matched with imap and killmap'
# return
#
#
#
# b0cpy=copy.copy(b0)
# b1cpy=copy.copy(b1)
#
# f0startlist=[]
# f1startlist=[]
# f0list=[]
# f1list=[]
# fraczlist=[]
# for pointind in pointlist:
# print pointind
# #print subex[pointind], ic[pointind], subex_b0, ic_b0, subex_b1, ic_b1
# f0, f1=f0_f1_exp_ic(subex[pointind], ic[pointind], subex_b0, ic_b0, subex_b1, ic_b1)
# #print f0, f1
# if isinstance(fprecision, list):
# for fp in fprecision[:-1]:
# blinclass=calc_blin_factors(icounts[pointind], b0, b1, f0=f0, f1=f1, fraczeroed=fraczeroed, factorprecision=fp, refineduringloop=refineduringloop, maxtries=maxtries)
# if blinclass.warning!='':
# print 'pointind %d, with fp=%s : %s' %(pointind, `fp`, blinclass.warning)
# f0=blinclass.f0
# f1=blinclass.f1
# fp=fprecision[-1]
# else:
# fp=fprecision
# blinclass=calc_blin_factors(icounts[pointind], b0, b1, f0=f0, f1=f1, fraczeroed=fraczeroed, factorprecision=fp, refineduringloop=refineduringloop, maxtries=maxtries)
# if blinclass.warning!='':
# print 'pointind %d, with fp=%s : %s', (pointind, `fp`, blinclass.warning)
# f0list+=[blinclass.f0]
# f1list+=[blinclass.f1]
# fraczlist+=[blinclass.fracz]
#
# f0=numpy.array(f0list)
# f1=numpy.array(f1list)
# fracz=numpy.array(fraczlist)
# bcknddata=-1.*numpy.dot(numpy.array([f0, f1]).T, numpy.array([b0cpy, b1cpy]))
#
# h5file=h5py.File(h5path, mode='r+')
# h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
# h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
#
# if single:
# newicounts=numpy.array([ic+bc for ic, bc in zip(icounts[pointlist], bcknddata)], dtype=icounts.dtype)
# newicounts[newicounts<0.]=0.
# h5mar['icounts'][singlepointind, :]=newicounts[0]
# else:
# newicounts=numpy.zeros(icounts.shape, dtype=icounts.dtype)
# newicounts[pointlist]=numpy.array([ic+bc for ic, bc in zip(icounts[pointlist], bcknddata)], dtype=icounts.dtype)
# newicounts[newicounts<0.]=0.
# if 'asintegratedicounts' in h5mar:
# del h5mar['asintegratedicounts']
# print 'WARNING:There should not have been an existing icounts_asintegrated but it is being overwritten anyway'
# icountsasint=h5mar.create_dataset('asintegratedicounts', data=icounts)
#
# h5mar['icounts'][:, :]=newicounts[:, :]
#
# w=numpy.zeros((icounts.shape[0], 2), dtype=f0.dtype)
# w[pointlist, :]=numpy.array([f0, f1]).T
# asintattrs={'weights':w, 'b0':b0cpy, 'b1':b1cpy, 'bckndinvname0':bckndinvname0, 'bckndinvname1':bckndinvname1, 'f0startvals':f0startlist, 'f1startvals':f1startlist, 'fraczeroed':fraczeroed, 'fprecision':fprecision, 'refineduringloop':numpy.uint8(refineduringloop)}
#
#
# if 'ibckndadd' in h5mar:
# del h5mar['ibckndadd']
# h5mar.create_dataset('ibckndadd', data=numpy.array(bcknddata))
#
# h5file.close()
def linbckndsub1d(h5path, h5groupstr, fraczeroed=0.003, fprecision=0.001, scalarname='IC3', refineduringloop=True, maxtries=100):#no gui interface
#***
#single image -> asintegratedicounts not saved and no attributes saved
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
if 'asintegratedicounts' in h5mar:
t=h5mar['asintegratedicounts'][:, :]
h5mar['icounts'][:, :]=t[:, :]
h5file.close()
h5file=h5py.File(h5path, mode='r')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
attrdict=getattr(h5path, h5groupstr)
pointlist=h5analysis.attrs['pointlist']
icounts=readh5pyarray(h5mar['icounts'])
h5marcounts=h5file['/'.join((h5groupstr, 'measurement', getxrdname(h5analysis), 'counts'))]
ic=h5file['/'.join((h5groupstr, 'measurement', 'scalar_data', scalarname))][:]
if 'subexposures' in h5marcounts.attrs.keys():
subex=h5marcounts.attrs['subexposures']
else:
subex=1
subex=numpy.float32(subex)
if len(subex.shape)==0:#setup for future possiblity of different subexposures
subex=numpy.ones(icounts.shape[0], dtype='float32')*subex
if 'mod_multiplierarray' in h5marcounts.attrs.keys():
m=h5marcounts.attrs['mod_multiplierarray']
m[m==0.]=1.
subex/=m
ic/=m
kms=attrdict['killmapstr'].rpartition('/')[2]
ims=attrdict['imapstr'].rpartition('/')[2]
blin0_name=h5mar['blin0'].attrs['sourcename']
a, b, c=blin0_name.rpartition('/')
blin0_1dpath='/'.join((a, ims, kms, c))
blin0_inds=h5mar['blin0'].attrs['sourcearrayindex']
if isinstance(blin0_inds, int):
blin0_processtype=1
elif isinstance(blin0_inds, numpy.ndarray):
blin0_processtype=2
else:
blin0_processtype=0
blin1_name=h5mar['blin1'].attrs['sourcename']
a, b, c=blin1_name.rpartition('/')
blin1_1dpath='/'.join((a, ims, kms, c))
blin1_inds=h5mar['blin1'].attrs['sourcearrayindex']
if isinstance(blin1_inds, int):
blin1_processtype=1
elif isinstance(blin1_inds, numpy.ndarray):
blin1_processtype=2
else:
blin1_processtype=0
h5file.close()
h5chess=CHESSRUNFILE()
# h5grp=h5chess[attrdict['chessrunstr']]
# bigrp=h5grp['BckndInventory']
pnt=h5chess[blin0_1dpath]
b0=readh5pyarray(pnt)
if scalarname in pnt.attrs.keys():
ic_b0=pnt.attrs[scalarname]
else:
ic_b0=1.
if 'subexposures' in pnt.attrs.keys():
subex_b0=pnt.attrs['subexposures']
else:
subex_b0=1.
pnt=h5chess[blin1_1dpath]
b1=readh5pyarray(pnt)
if scalarname in pnt.attrs.keys():
ic_b1=pnt.attrs[scalarname]
else:
ic_b1=1.
if 'subexposures' in pnt.attrs.keys():
subex_b1=pnt.attrs['subexposures']
else:
subex_b1=1.
h5chess.close()
if blin0_processtype==1:
b0=b0[blin0_inds]
if isinstance(ic_b0, numpy.ndarray):
ic_b0=ic_b0[blin0_inds]
if isinstance(subex_b0, numpy.ndarray):
subex_b0=subex_b0[blin0_inds]
if blin1_processtype==1:
b1=b1[blin1_inds]
if isinstance(ic_b1, numpy.ndarray):
ic_b1=ic_b1[blin1_inds]
if isinstance(subex_b1, numpy.ndarray):
subex_b1=subex_b1[blin1_inds]
if blin0_processtype==2:
if isinstance(ic_b0, numpy.ndarray):
ic_b0arr=numpy.array([ic_b0[i] for i in blin1_inds])
else:
ic_b0arr=numpy.array([ic_b0]*len(blin1_inds))
if isinstance(subex_b0, numpy.ndarray):
subex_b0arr=numpy.array([subex_b0[i] for i in blin1_inds])
else:
subex_b0arr=numpy.array([subex_b0]*len(blin1_inds))
b0arr=numpy.array([b0[i] for i in blin0_inds])
else:
b0cpy=copy.copy(b0)
if blin1_processtype==2:
if isinstance(ic_b1, numpy.ndarray):
ic_b1arr=numpy.array([ic_b1[i] for i in blin1_inds])
else:
ic_b1arr=numpy.array([ic_b1]*len(blin1_inds))
if isinstance(subex_b1, numpy.ndarray):
subex_b1arr=numpy.array([subex_b1[i] for i in blin1_inds])
else:
subex_b1arr=numpy.array([subex_b1]*len(blin1_inds))
b1arr=numpy.array([b1[i] for i in blin1_inds])
else:
b1cpy=copy.copy(b1)
f0startlist=[]
f1startlist=[]
f0list=[]
f1list=[]
fraczlist=[]
for pointind in pointlist:
print pointind
if blin0_processtype==2:
ic_b0=ic_b0arr[pointind]
subex_b0=subex_b0arr[pointind]
b0=b0arr[pointind]
if blin1_processtype==2:
ic_b1=ic_b1arr[pointind]
subex_b1=subex_b1arr[pointind]
b1=b1arr[pointind]
f0, f1=f0_f1_exp_ic(subex[pointind], ic[pointind], subex_b0, ic_b0, subex_b1, ic_b1)
print f0, f1
f0startlist+=[f0]
f1startlist+=[f1]
if isinstance(fprecision, list):
for fp in fprecision[:-1]:
blinclass=calc_blin_factors(icounts[pointind], b0, b1, f0=f0, f1=f1, fraczeroed=fraczeroed, factorprecision=fp, refineduringloop=refineduringloop, maxtries=maxtries)
if blinclass.warning!='':
print 'pointind %d, with fp=%s : %s' %(pointind, `fp`, blinclass.warning)
f0=blinclass.f0
f1=blinclass.f1
print 'after fp=%.4f, f0=%.4f, f1=%.4f' %(fp, f0, f1)
fp=fprecision[-1]
else:
fp=fprecision
blinclass=calc_blin_factors(icounts[pointind], b0, b1, f0=f0, f1=f1, fraczeroed=fraczeroed, factorprecision=fp, refineduringloop=refineduringloop, maxtries=maxtries)
if blinclass.warning!='':
print 'pointind %d, with fp=%s : %s', (pointind, `fp`, blinclass.warning)
print 'after fp=%.4f, f0=%.4f, f1=%.4f' %(fp, blinclass.f0, blinclass.f1)
f0list+=[blinclass.f0]
f1list+=[blinclass.f1]
fraczlist+=[blinclass.fracz]
f0=numpy.array(f0list)
f1=numpy.array(f1list)
fracz=numpy.array(fraczlist)
if blin0_processtype==2:
bcknddata=-1.*numpy.array([f0v*b0arr[pointind] for pointind, f0v in zip(pointlist, f0)])
else:
bcknddata=-1.*numpy.array([f0v*b0cpy for f0v in f0])
if blin1_processtype==2:
bcknddata+=-1.*numpy.array([f1v*b1arr[pointind] for pointind, f1v in zip(pointlist, f1)])
else:
bcknddata+=-1.*numpy.array([f1v*b1cpy for f1v in f1])
#bcknddata=-1.*numpy.dot(numpy.array([f0, f1]).T, numpy.array([b0cpy, b1cpy]))
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
newicounts=numpy.zeros(icounts.shape, dtype=icounts.dtype)
newicounts[pointlist]=numpy.array([ic+bc for ic, bc in zip(icounts[pointlist], bcknddata)], dtype=icounts.dtype)
newicounts[newicounts<0.]=0.
if 'asintegratedicounts' in h5mar:
del h5mar['asintegratedicounts']
print 'WARNING:There should not have been an existing icounts_asintegrated but it is being overwritten anyway'
icountsasint=h5mar.create_dataset('asintegratedicounts', data=icounts)
h5mar['icounts'][:, :]=newicounts[:, :]
w=numpy.zeros((icounts.shape[0], 2), dtype=f0.dtype)
w[pointlist, :]=numpy.array([f0, f1]).T
saveattrs={'weights':w, 'blin0_h5chesspath':blin0_1dpath, 'blin1_h5chess':blin1_1dpath, 'f0startvals':f0startlist, 'f1startvals':f1startlist, 'fraczeroed':fraczeroed, 'fprecision':fprecision, 'refineduringloop':numpy.uint8(refineduringloop)}
if 'ibckndadd' in h5mar:
del h5mar['ibckndadd']
ds=h5mar.create_dataset('ibckndadd', data=numpy.array(bcknddata))
for k, v in saveattrs.iteritems():
ds.attrs[k]=v
updatelog(h5analysis, ''.join(('1D lin bcknd substraction, all icounts. Finished ', time.ctime())))
h5file.close()
#ACTIVEPATH='F:/CHESS2011_h5MAIN/2011Jun01a.h5'
#ACTIVEGRP='13'
#linbckndsub1d(ACTIVEPATH, ACTIVEGRP, fprecision=[.01, .0005], maxtries=2000)#, singlepointind=2)
#ACTIVEPATH='C:/Users/JohnnyG/Documents/CHESS/CHESS2010-12/h5analysis/AuSiCupnsc_assembledh5_1dnosample/AuSiCu_pnsc_notheated.dat.h5'
#ACTIVEGRP='7'
#linbckndsub1d(ACTIVEPATH, ACTIVEGRP, 'Dark_24imave','NoSample_NoPb_480s', fprecision=[.0005], maxtries=2000)#, singlepointind=2)
#print 'done'
#linbckndsub1d('F:/CHESS_2010DEC/AgRapid/Ag_rapidExpopens130ms_6dec_24imsum.h5', '24of130ms', 'Dark_24imave','NoSample_NoPb_10s', fprecision=0.00003, maxtries=1000)
def integratebckndinvimage(h5path=None, h5groupstr=None, kill_i_dqchi_mapstr=None, bckndinvname=None):#no gui interface
if not h5path is None and not h5groupstr is None:
attrdict=getattr(h5path, h5groupstr)
imap, qgrid=getimapqgrid(attrdict['imapstr'])
dqchiimage=getdqchiimage(attrdict['dqchiimagestr'])
killmap=getkillmap(attrdict['killmapstr'])
chessrun=attrdict['chessrunstr']
kms=attrdict['killmapstr'].rpartition('/')[2]
ims=attrdict['imapstr'].rpartition('/')[2]
elif not kill_i_dqchi_mapstr is None:
killmap=getkillmap(kill_i_dqchi_mapstr[0])
map, qgrid=getimapqgrid(kill_i_dqchi_mapstr[1])
dqchiimage=getdqchiimage(kill_i_dqchi_mapstr[2])
chessrun=kill_i_dqchi_mapstr[0].strip('/').partition('/')[0]
kms=kill_i_dqchi_mapstr[0].rpartition('/')[2]
ims=kill_i_dqchi_mapstr[1].rpartition('/')[2]
slots=numpy.uint16(qgrid[2])
killmap=getkillmap(attrdict['killmapstr'])
normalizer=integrationnormalization(killmap, imap, dqchiimage, slots)
imap*=killmap
h5chess=CHESSRUNFILE(mode='r+')
h5grp=h5chess[chessrun]
bigrp=h5grp['BckndInventory']
if ims in bigrp:
savegrp=bigrp[ims]
else:
savegrp=bigrp.create_group(ims)
if kms in savegrp:
savegrp=savegrp[kms]
else:
savegrp=savegrp.create_group(kms)
if bckndinvname is None:
blist=[pnt.name.rpartition('/')[2] for pnt in bigrp.itervalues() if isinstance(pnt, h5py.Dataset)]
else:
blist=[bckndinvname]
for bn in blist:
b=readh5pyarray(bigrp[bn])
b1d=normalizer*intbyarray(b, imap, dqchiimage, slots)
ds=savegrp.create_dataset(bn, data=b1d)
for k, v in bigrp[bn].attrs.iteritems():
ds.attrs[k]=v
print 'created ', ds.name
h5chess.close()
def integrateallbckndinvimages(chessrun, imapname, killmapname):#no gui interface***
imap, qgrid=getimapqgrid('/'.join([chessrun, 'imap', imapname]))
slots=numpy.uint16(qgrid[2])
killmap=getkillmap('/'.join([chessrun, 'killmap', killmapname]))
dqchiimage=getdqchiimage('/'.join([chessrun, 'dqchiimage']))
normalizer=integrationnormalization(killmap, imap, dqchiimage, slots)
imap*=killmap
h5chess=CHESSRUNFILE(mode='r+')
h5grp=h5chess[chessrun]
bigrp=h5grp['BckndInventory']
if imapname in bigrp:
savegrp=bigrp[imapname]
else:
savegrp=bigrp.create_group(imapname)
if killmapname in savegrp:
savegrp=savegrp[killmapname]
else:
savegrp=savegrp.create_group(killmapname)
blist=[pnt.name.rpartition('/')[2] for pnt in bigrp.itervalues() if isinstance(pnt, h5py.Dataset)]
for bn in blist:
print bn
b=readh5pyarray(bigrp[bn])
if b.ndim==3:
b1d=numpy.array([normalizer*intbyarray(arr, imap, dqchiimage, slots) for arr in b])
else:
b1d=normalizer*intbyarray(b, imap, dqchiimage, slots)
if bn in savegrp:
del savegrp[bn]
ds=savegrp.create_dataset(bn, data=b1d)
for k, v in bigrp[bn].attrs.iteritems():
ds.attrs[k]=v
print 'created ', ds.name
h5chess.close()
#integratebckndinvimage(h5path='C:/Users/JohnnyG/Documents/CHESS/CHESS2010-12/calibrants/BckndXRD_3060/NoSample_30.dat.h5', h5groupstr='4')
#print 'done'
#def linbckndsub1d(h5path, h5groupstr, bckndinvname0, bckndinvname1, f0vals, f1vals, fraczeroed=0.005, rankfornorm=0.5, fprecision=0.002):#no gui interface
#
# h5file=h5py.File(h5path, mode='r')
# h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
# h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
# attrdict=getattr(h5path, h5groupstr)
# pointlist=h5analysis.attrs['pointlist']
# icounts=readh5pyarray(h5mar['icounts'])
#
# imap, qgrid=getimapqgrid(attrdict['imapstr'])
# dqchiimage=getdqchiimage(attrdict['dqchiimagestr'])
# slots=numpy.uint16(qgrid[2])
# killmap=getkillmap(attrdict['killmapstr'])
# normalizer=integrationnormalization(killmap, imap, dqchiimage, slots)
# imap*=killmap
# h5chess=CHESSRUNFILE()
# h5grp=h5chess[attrdict['chessrunstr']]
# b0=readh5pyarray(h5grp['/'.join(('BckndInventory', bckndinvname0))])
# b1=readh5pyarray(h5grp['/'.join(('BckndInventory', bckndinvname1))])
# h5chess.close()
# h5file.close()
#
# b0=normalizer*intbyarray(b0, imap, dqchiimage, slots)
# b1=normalizer*intbyarray(b1, imap, dqchiimage, slots)
#
# b0cpy=copy.copy(b0)
# b1cpy=copy.copy(b1)
## h5chess=CHESSRUNFILE()
## h5grp=h5chess[attrdict['chessrunstr']]
## imapname=attrdict['imapstr'].rpartition('/')[2]
## b0=readh5pyarray(h5grp['/'.join(('BckndInventory1d', imapname, bckndinvname0))])
## b1=readh5pyarray(h5grp['/'.join(('BckndInventory1d', imapname, bckndinvname1))])
## h5chess.close()
#
# f0, f1=FindLinearSumBcknd1d(icounts[pointlist], b0, b1, f0vals, f1vals, fraczeroed=fraczeroed, rankfornorm=rankfornorm, fprecision=fprecision)
# bcknddata=-1.*numpy.dot(numpy.array([f0, f1]).T, numpy.array([b0cpy, b1cpy]))
#
# newicounts=numpy.zeros(icounts.shape, dtype=icounts.dtype)
# newicounts[pointlist]=numpy.array([ic+bc for ic, bc in zip(icounts[pointlist], bcknddata)], dtype=icounts.dtype)
# newicounts[newicounts<0.]=0.
# h5file=h5py.File(h5path, mode='r+')
# h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
# h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
#
# if 'asintegratedicounts' in h5mar:
# del h5mar['asintegratedicounts']
# print 'WARNING:There should not have been an existing icounts_asintegrated but it is being overwritten anyway'
# icountsasint=h5mar.create_dataset('asintegratedicounts', data=icounts)
#
# h5mar['icounts'][:, :]=newicounts[:, :]
#
# w=numpy.zeros((icounts.shape[0], 2), dtype=f0.dtype)
# w[pointlist, :]=numpy.array([f0, f1]).T
# asintattrs={'weights':w, 'b0':b0cpy, 'b1':b1cpy, 'bckndinvname0':bckndinvname0, 'bckndinvname1':bckndinvname1, 'f0vals':f0vals, 'f1vals':f1vals, 'fraczeroed':fraczeroed, 'rankfornorm':rankfornorm, 'fprecision':fprecision}
# for key, val in asintattrs.iteritems():
# icountsasint.attrs[key]=val
#
# if 'ibckndadd' in h5mar:
# del h5mar['ibckndadd']
# h5mar.create_dataset('ibckndadd', data=numpy.array(bcknddata))
#
# h5file.close()
#
#def FindLinearSumBcknd1d(counts, b0, b1, f0vals, f1vals, fraczeroed=0.05, rankfornorm=0.5, fprecision=0.01):#takes the n x image counts and kill and bcknd images and using the f0vals,f1vals as guesses for the weights of the normalized bcknd images, find the f0,f1 that sum to maximum total counts while staying below fraczeroed pixels being zeroed
# vol0=b0.sum()
# vol1=b1.sum()
# b0=numpy.float32(b0)
# b0wt=AveArrUpToRank(b0, rank=rankfornorm)
# b0/=b0wt
# b1=numpy.float32(b1)
# b1wt=AveArrUpToRank(b1, rank=rankfornorm)
# b1/=b1wt
# nz=counts.shape[1]*fraczeroed
# print 'nz: ', nz, ' trials: ', len(f0vals)*len(f1vals)
# print 'The trial values are \nf0:', f0vals, '\nf1:', f1vals
# f0final=[]
# f1final=[]
# for counter, c in enumerate(counts):
# print 'Starting image ', counter
# c=numpy.float32(c)
# cwt=AveArrUpToRank(c, rank=rankfornorm)
# c/=cwt
# nzero_f0f1=numpy.float32([[((c-f0*b0-f1*b1)<0.).sum(dtype='float32') for f1 in f1vals] for f0 in f0vals])
# print 'num trials w too many zeroed: ', len(numpy.where(nzero_f0f1>nz)[0])
# garb, f0poss, f1poss=find2darrayzeros(f0vals, f1vals, nzero_f0f1-nz)
# print 'Zeros were found within the array of guesses?', garb
# f0mod=[]
# f1mod=[]
# print 'f0poss', f0poss
# print 'f1poss', f1poss
# for f0, f1 in zip(f0poss, f1poss):
# lowbool=((c-f0*b0-f1*b1)<0.).sum(dtype='float32')<nz
# newlowbool=lowbool
# fct=1.+(lowbool*2.-1.)*fprecision
# while lowbool==newlowbool:
# f0*=fct
# f1*=fct
# newlowbool=((c-f0*b0-f1*b1)<0.).sum(dtype='float32')<nz
# if lowbool: #use the factors that low-ball the nz
# f0/=fct
# f1/=fct
# f0mod+=[f0]
# f1mod+=[f1]
# #print 'f0mod', f0mod
# #print 'f1mod', f1mod
# f0mod=numpy.float32(f0mod)
# f1mod=numpy.float32(f1mod)
# print 'tot vol', vol0*f0mod/b0wt+vol1*f1mod/b1wt
# i=numpy.argmax(vol0*f0mod/b0wt+vol1*f1mod/b1wt) #vol0 and vol1 were calcuated before the biwt scaling so the wieghts have to be used here
# print 'f0poss, f1poss used', f0poss[i], f1poss[i]
# print 'f0mod, f1mod used', f0mod[i], f1mod[i]
# f0final+=[f0mod[i]*cwt/b0wt]
# f1final+=[f1mod[i]*cwt/b1wt]
## pylab.subplot(211)
## pylab.plot(c)
## pylab.plot(b0, label='b0')
## pylab.plot(b1, label='b1')
## pylab.plot(f0mod[i]*b0+f1mod[i]*b1, label='bcknd')
## pylab.legend()
# return numpy.float32(f0final), numpy.float32(f1final)
#
#
##bckndsub1d_difffiles('/home/gregoire/CHESS_2010-12/AuSiCu_mem_220C.dat.h5', '23cells', '/mnt/hgfs/HostDocuments/CHESS/CHESS2010-12/h5analysis/NoSample_40round2_noPb_25Jan.h5', '8', 0)
#
##bckndinvname0='Dark_24imave'
##bckndinvname1='NoSample_NoPb_480s'
##f0vals=numpy.sort(numpy.float32(numpy.append([-1., -.7, -.5, .5, .7, 1.], numpy.linspace(-.4,.4,20.))))
##f1vals=numpy.sort(numpy.float32(numpy.append([.00001, .01, .1, .2, .3, .4, 1.5, 1.7, 2.], numpy.linspace(.5,1.4, 20.))))
##
##f0vals=numpy.sort(numpy.float32(numpy.append(numpy.linspace(-.4,.4,6.), numpy.linspace(.41,1.2,40.))))
##f1vals=numpy.sort(numpy.float32(numpy.append([.00001, .12, .16, .2, .3, .7, 1., 1.5], numpy.linspace(.0001, .1, 30))))
##
##
##h5path='F:/CHESS_2010DEC/AgRapid/Ag_rapidExpopens130ms_6dec_nobcknd.h5'
##h5groupstr='130ms'
##h5file=h5py.File(h5path, mode='r+')
##h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
##h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
##t=h5mar['asintegratedicounts'][:, :]
##h5mar['icounts'][:, :]=t
##h5file.close()
##
###linbckndsub1d(h5path,h5groupstr, bckndinvname0, bckndinvname1, f0vals, f1vals, fraczeroed=0.008, rankfornorm=0.5, fprecision=0.002)
##
##
##fraczeroed=0.008
###rankfornorm=0.5
##rankfornorm=1.
##fprecision=0.0005
##
##
##h5file=h5py.File(h5path, mode='r')
##h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
##h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
##attrdict=getattr(h5path, h5groupstr)
##pointlist=h5analysis.attrs['pointlist']
##pointlist=[0]
##icounts=readh5pyarray(h5mar['icounts'])
##
###imap, qgrid=getimapqgrid(attrdict['imapstr'])
###dqchiimage=getdqchiimage(attrdict['dqchiimagestr'])
###slots=numpy.uint16(qgrid[2])
###killmap=getkillmap(attrdict['killmapstr'])
###normalizer=integrationnormalization(killmap, imap, dqchiimage, slots)
###imap*=killmap
###h5chess=CHESSRUNFILE()
###h5grp=h5chess[attrdict['chessrunstr']]
###b0=readh5pyarray(h5grp['/'.join(('BckndInventory', bckndinvname0))])
###b1=readh5pyarray(h5grp['/'.join(('BckndInventory', bckndinvname1))])
###h5chess.close()
##
##b0=h5mar['asintegratedicounts'].attrs['b0']
##b1=h5mar['asintegratedicounts'].attrs['b1']
##h5file.close()
##
###b0=normalizer*intbyarray(b0, imap, dqchiimage, slots)
###b1=normalizer*intbyarray(b1, imap, dqchiimage, slots)
##
##b0cpy=copy.copy(b0)
##b1cpy=copy.copy(b1)
##
##f0, f1=FindLinearSumBcknd1d(icounts[pointlist], b0, b1, f0vals, f1vals, fraczeroed=fraczeroed, rankfornorm=rankfornorm, fprecision=fprecision)
##bcknddata=-1.*numpy.dot(numpy.array([f0, f1]).T, numpy.array([b0cpy, b1cpy]))
##
##newicounts=numpy.zeros(icounts.shape, dtype=icounts.dtype)
##newicounts[pointlist]=numpy.array([ic+bc for ic, bc in zip(icounts[pointlist], bcknddata)], dtype=icounts.dtype)
##newicounts[newicounts<0.]=0.
##h5file=h5py.File(h5path, mode='r+')
##h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
##h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
##
###if 'asintegratedicounts' in h5mar:
### del h5mar['asintegratedicounts']
### print 'WARNING:There should not have been an existing icounts_asintegrated but it is being overwritten anyway'
###icountsasint=h5mar.create_dataset('asintegratedicounts', data=icounts)
##
###h5mar['icounts'][:, :]=newicounts[:, :]
##
##w=numpy.zeros((icounts.shape[0], 2), dtype=f0.dtype)
##w[pointlist, :]=numpy.array([f0, f1]).T
##asintattrs={'weights':w, 'b0':b0cpy, 'b1':b1cpy, 'bckndinvname0':bckndinvname0, 'bckndinvname1':bckndinvname1, 'f0vals':f0vals, 'f1vals':f1vals, 'fraczeroed':fraczeroed, 'rankfornorm':rankfornorm, 'fprecision':fprecision}
###for key, val in asintattrs.iteritems():
### icountsasint.attrs[key]=val
##
##if 'ibckndadd' in h5mar:
## del h5mar['ibckndadd']
##h5mar.create_dataset('ibckndadd', data=numpy.array(bcknddata))
##
##h5file.close()
##
##pylab.subplot(212)
##pylab.plot(icounts[0, :])
##pylab.plot(newicounts[0, :])
##pylab.plot(b0cpy, label='b0')
##pylab.plot(b1cpy, label='b1')
##pylab.plot(-1.*bcknddata[0], label='bcknd')
##h5file.close()
##pylab.legend()
##pylab.show()
##
###h5file=h5py.File(h5path, mode='r')
###h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
###h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
###import pylab
###pylab.plot(h5mar['asintegratedicounts'][9, :])
###pylab.plot(h5mar['icounts'][9, :])
###h5file.close()
###pylab.show()
##
##
##
##print 'done'
##
#
def darksubtract2dimages(h5path, h5groupstr, bckdninvname):#no GUI
h5file=h5py.File(h5path, mode='r+')
h5analysis=h5file['/'.join((h5groupstr, 'analysis'))]
h5mar=h5file['/'.join((h5groupstr, 'analysis', getxrdname(h5analysis)))]
h5marcounts=h5file['/'.join((h5groupstr,'measurement', getxrdname(h5analysis), 'counts'))]
pointlist=h5analysis.attrs['pointlist']
# except:
# h5marcounts=h5file['/'.join((h5groupstr,'measurement', 'area_detector', 'counts'))]
# pointlist=range(h5marcounts.shape[0])
#h5marcounts.parent.copy('counts', 'initcounts')
h5chess=CHESSRUNFILE()
h5grp=h5chess[h5analysis.attrs['chessrunstr']]
bipnt=h5grp['BckndInventory'][bckdninvname]
b0=readh5pyarray(bipnt)
subex_b0=bipnt.attrs['subexposures']
h5chess.close()
b0=numpy.float32(b0)
if 'mod_multiplierarray' in h5marcounts.attrs:
multarr=h5marcounts.attrs['mod_multiplierarray']
else:
multarr=numpy.ones(h5marcounts.shape[0], dtype='float32')
multarr*=h5marcounts.attrs['subexposures']*1./subex_b0
h5marcounts.attrs['DarkSub_BckndInv']=bckdninvname
h5marcounts.attrs['DarkSub_multarr']=multarr
for pointind in pointlist:
data=h5marcounts[pointind, :, :]
data=numpy.float32(data)-b0*multarr[pointind]
print (data<0.).sum(), ' pixels being zeroed in 2-d dark subtraction on image ', pointind
data[data<0.]=0.
data=numpy.array(data, dtype=h5marcounts.dtype)
h5marcounts[pointind, :, :]=data
h5file.close()
#p='E:/CHESS_2010DEC/Cornell_h5analysis/Anna_20100609XPdAu.dat.h5'
#g='3'
#darksubtract2dimages(p, g, 'Dark_24imave')
#integrateallbckndinvimages('2011', '8,0.1,630', 'killmap1')
|
johnmgregoire/vanDover_CHESS
|
xrd_fileIO_fcns.py
|
Python
|
bsd-3-clause
| 132,535
|
[
"Gaussian"
] |
b78e777d3a86e1590293f5caae91786967e7d23f7e92066ecf463e43c6098089
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2009-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Schema Update Wizard """
import platform
import sys
import glib
import gtk
from stoqlib.api import api
from stoqlib.gui.base.wizards import BaseWizard, BaseWizardStep
from stoqlib.gui.utils.logo import render_logo_pixbuf
from stoqlib.gui.widgets.processview import ProcessView
from stoqlib.lib.kiwilibrary import library
from stoqlib.lib.message import info
from stoqlib.lib.translation import stoqlib_gettext as _
import stoq
#
# Wizard Steps
#
class UpdateWelcomeStep(BaseWizardStep):
gladefile = 'UpdateWelcomeStep'
def post_init(self):
self.title_label.set_size('xx-large')
self.title_label.set_bold(True)
self.logo.set_from_pixbuf(render_logo_pixbuf('update'))
self.wizard.next_button.grab_focus()
def next_step(self):
return UpdateSchemaStep(None, self.wizard)
class UpdateSchemaStep(BaseWizardStep):
gladefile = 'UpdateSchemaStep'
#
# WizardStep
#
def post_init(self):
self._finished = False
self.process_view = ProcessView()
self.process_view.listen_stderr = True
self.process_view.connect('read-line', self._on_processview__readline)
self.process_view.connect('finished', self._on_processview__finished)
self.expander.add(self.process_view)
self._launch_stoqdbadmin()
glib.timeout_add(50, self._on_timeout_add)
def has_next_step(self):
return False
# Private
def _parse_process_line(self, line):
# Errors and other messages thrown by stoqdbadmin are not displayed in
# this wizard. Using info here instead of error, so that the user can
# still se the log.
if line.startswith('ERROR:'):
msg = line[7:]
info(msg)
LOG_CATEGORY = 'stoqlib.database.create'
log_pos = line.find(LOG_CATEGORY)
if log_pos == -1:
return
line = line[log_pos + len(LOG_CATEGORY) + 1:]
longer = None
if line.startswith('PATCH:'):
patch = line.split(':', 1)[1]
text = _("Applying patch %s ...") % (patch, )
elif line.startswith('BACKUP-START:'):
text = _("Creating a database backup")
longer = _('Creating a database backup in case anything goes wrong.')
elif line.startswith('RESTORE-START:'):
text = _("Restoring database backup")
longer = _(
'Stoq update failed.\n\n'
'We will try to restore the current database.\n\n'
'This may take some time.')
elif line.startswith('RESTORE-DONE:'):
msg = line.split(':', 1)[1]
text = _("Database backup restored")
longer = _(
'Stoq database update failed but the database was restored.\n'
'An automatic crash report was submitted. Please, '
'enter in contact at <b>stoq-users@stoq.com.br</b> for '
'assistance in recovering your database and making it '
'possible to use Stoq %s again.\n\n'
'A backup database was created as <b>%s</b>') % (
stoq.version, msg, )
else:
return
self.progressbar.set_text(text)
if not longer:
longer = ''
self.label.set_markup(longer)
def _launch_stoqdbadmin(self):
self.wizard.disable_next()
if sys.argv[0].endswith('.egg'):
args = [sys.executable, sys.argv[0]]
elif platform.system() == 'Windows':
if library.uninstalled:
args = ['stoq.bat']
else:
args = ['stoq.exe']
else:
args = ['stoq']
args.extend(['dbadmin', 'updateschema', '-v'])
args.extend(api.db_settings.get_command_line_arguments())
self.process_view.execute_command(args)
self.progressbar.set_text(_('Applying database patches...'))
def _finish(self, returncode):
self._finished = True
if returncode:
self.wizard.cancel_button.set_label(gtk.STOCK_QUIT)
self.progressbar.set_fraction(0.0)
else:
# Migration may have changed some parameters, so clear the cache.
api.sysparam.clear_cache()
self.wizard.cancel_button.set_sensitive(True)
self.progressbar.set_text(_("Done. Click 'Forward' to continue"))
self.progressbar.set_fraction(1.0)
self.wizard.enable_next()
self.wizard.next_button.grab_focus()
# Callbacks
def _on_processview__readline(self, view, line):
self._parse_process_line(line)
def _on_processview__finished(self, view, returncode):
self._finish(returncode)
def _on_timeout_add(self):
if self._finished:
return False
self.progressbar.pulse()
return True
#
# Main wizard
#
class SchemaUpdateWizard(BaseWizard):
title = _("Updating Stoq")
size = (450, 300)
def __init__(self):
first_step = UpdateWelcomeStep(None, wizard=self)
BaseWizard.__init__(self, None, first_step, title=self.title)
# Disable back until #2771 is solved
self.previous_button.hide()
#
# WizardStep hooks
#
def finish(self):
self.retval = True
self.close()
|
andrebellafronte/stoq
|
stoq/gui/update.py
|
Python
|
gpl-2.0
| 6,253
|
[
"VisIt"
] |
0e35797558535ad3962d14759fd256f24cdefb871e08ac61f14ff43b3ebe5dfe
|
# pyeq2 is a collection of equations expressed as Python classes
#
# Copyright (C) 2012 James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
#
# email: zunzun@zunzun.com
# web: http://zunzun.com
#
# License: BSD-style (see LICENSE.txt in main source directory)
# Version info: $Id: Miscellaneous.py 1 2012-01-07 22:20:43Z zunzun.com@gmail.com $
import sys, os
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '..'))
import pyeq2
import numpy
numpy.seterr(over = 'raise', divide = 'raise', invalid = 'raise', under = 'ignore') # numpy raises warnings, convert to exceptions to trap them
import pyeq2.Model_3D_BaseClass
class GaryCler(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gary Cler's Custom Equation"
_HTML = 'z = a * x<sup>b</sup> * y<sup>c</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * numpy.power(x_in, b) * numpy.power(y_in, c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(x_in, b) * pow(y_in, c);\n"
return s
class GaryCler_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gary Cler's Custom Equation Transform"
_HTML = 'z = a * (dx + f)<sup>b</sup> * (gy + h)<sup>c</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g', 'h']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
h = inCoeffs[6]
try:
temp = a * numpy.power(d * x_in + f, b) * numpy.power(g * y_in + h, c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(d * x_in + f, b) * pow(g * y_in + h, c);\n"
return s
class GaussianCurvatureOfParaboloid(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gaussian Curvature Of Paraboloid"
_HTML = 'z = 4a<sup>2</sup> / (1 + 4a<sup>2</sup> * (x<sup>2</sup> + y<sup>2</sup>))<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XSQPLUSYSQ(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XSQPLUSYSQ = inDataCacheDictionary['XSQPLUSYSQ'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = 4.0 * a * a / numpy.power(1.0 + 4.0 * a * a * XSQPLUSYSQ, 2.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 4.0 * a * a / pow(1.0 + 4.0 * a * a * (x_in * x_in + y_in * y_in), 2.0);\n"
return s
class GaussianCurvatureOfRichmondsMinimalSurface(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gaussian Curvature Of Richmond's Minimal Surface"
_HTML = 'z = -1.0 * a * (x<sup>2</sup> + y<sup>2</sup>)<sup>3</sup> / (b + (x<sup>2</sup> + y<sup>2</sup>)<sup>2</sup>)<sup>4</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XSQPLUSYSQ(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XSQPLUSYSQ = inDataCacheDictionary['XSQPLUSYSQ'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = -1.0 * a * numpy.power(XSQPLUSYSQ, 3.0) / numpy.power(b + numpy.power(XSQPLUSYSQ, 2.0), 4.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = -1.0 * a * pow(x_in * x_in + y_in * y_in, 3.0) / pow(b + pow(x_in * x_in + y_in * y_in, 2.0), 4.0);\n"
return s
class GaussianCurvatureOfWhitneysUmbrellaA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gaussian Curvature Of Whitney's Umbrella A"
_HTML = 'z = -1.0 * a * y<sup>2</sup> / (x<sup>2</sup> + a * (y<sup>2</sup> + y<sup>4</sup>))<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[4.0]), [4.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
PowY2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
PowY4 = inDataCacheDictionary['PowY_4.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = -1.0 * a * PowY2 / numpy.power(PowX2 + a * (PowY2 + PowY4), 2.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = -1.0 * a * y_in * y_in / pow(x_in * x_in + a * (y_in * y_in + pow(y_in, 4.0)), 2.0);\n"
return s
class GaussianCurvatureOfWhitneysUmbrellaB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Gaussian Curvature Of Whitney's Umbrella B"
_HTML = 'z = -1.0 * a * x<sup>2</sup> / (y<sup>2</sup> + a * (x<sup>2</sup> + x<sup>4</sup>))<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[4.0]), [4.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowY2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
PowX4 = inDataCacheDictionary['PowX_4.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = -1.0 * a * PowX2 / numpy.power(PowY2 + a * (PowX2 + PowX4), 2.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = -1.0 * a * x_in * x_in / pow(y_in * y_in + a * (x_in * x_in + pow(x_in, 4.0)), 2.0);\n"
return s
class LipingZheng(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Liping Zheng's core loss coefficients"
_HTML = 'z = ax<sup>2</sup>y + bx<sup>2</sup>y<sup>2</sup> + cx<sup>1.5</sup>y<sup>1.5</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX_PowY(NameOrValueFlag=1, args=[2.0, 1.0]), [2.0, 1.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX_PowY(NameOrValueFlag=1, args=[2.0, 2.0]), [2.0, 2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX_PowY(NameOrValueFlag=1, args=[1.5, 1.5]), [1.5, 1.5]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_PowY21 = inDataCacheDictionary['PowX_PowY_2.01.0'] # only need to perform this dictionary look-up once
PowX_PowY22 = inDataCacheDictionary['PowX_PowY_2.02.0'] # only need to perform this dictionary look-up once
PowX_PowY15_15 = inDataCacheDictionary['PowX_PowY_1.51.5'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * PowX_PowY21 + b * PowX_PowY22 + c * PowX_PowY15_15
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * x_in * x_in * y_in + b * x_in * x_in * y_in * y_in + c * pow(x_in, 1.5) * pow(y_in, 1.5);\n"
return s
class MeanCurvatureOfParaboloid(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Mean Curvature Of Paraboloid"
_HTML = 'z = 2 * (a + 2a<sup>3</sup> * (x<sup>2</sup> + y<sup>2</sup>)) / (1 + 4a<sup>2</sup> * (x<sup>2</sup> + y<sup>2</sup>))<sup>1.5</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XSQPLUSYSQ(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XSQPLUSYSQ = inDataCacheDictionary['XSQPLUSYSQ'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = 2.0 * (a + 2.0 * numpy.power(a, 3.0) * XSQPLUSYSQ) / numpy.power(1.0 + 4.0 * a * a * XSQPLUSYSQ, 1.5)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = 2.0 * (a + 2.0 * pow(a, 3.0) * (x_in * x_in + y_in * y_in)) / pow(1.0 + 4.0 * a * a * (x_in * x_in + y_in * y_in), 1.5);\n"
return s
class MeanCurvatureOfWhitneysUmbrellaA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Mean Curvature Of Whitney's Umbrella A"
_HTML = 'z = -1.0 * x * (a + b * y<sup>2</sup>) / (x<sup>2</sup> + a * (y<sup>2</sup> + y<sup>4</sup>))<sup>1.5</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[4.0]), [4.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
PowY2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
PowY4 = inDataCacheDictionary['PowY_4.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = -1.0 * x_in * (a + b * PowY2) / numpy.power(PowX2 + a * (PowY2 + PowY4), 1.5)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = -1.0 * x_in * (a + b * y_in * y_in) / pow(x_in * x_in + a * (y_in * y_in + pow(y_in, 4.0)), 1.5);\n"
return s
class MeanCurvatureOfWhitneysUmbrellaB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Mean Curvature Of Whitney's Umbrella B"
_HTML = 'z = -1.0 * y * (a + b * x<sup>2</sup>) / (y<sup>2</sup> + a * (x<sup>2</sup> + x<sup>4</sup>))<sup>1.5</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[4.0]), [4.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
PowY2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
PowX2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
PowX4 = inDataCacheDictionary['PowX_4.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = -1.0 * y_in * (a + b * PowX2) / pow(PowY2 + a * (PowX2 + PowX4), 1.5)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = -1.0 * y_in * (a + b * x_in * x_in) / pow(y_in * y_in + a * (x_in * x_in + pow(x_in, 4.0)), 1.5);\n"
return s
class MennSurfaceA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Menn's Surface A"
_HTML = 'z = ax<sup>4</sup> + bx<sup>2</sup>y - cy<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[4.0]), [4.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
PowX_4 = inDataCacheDictionary['PowX_4.0'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
PowY_2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * PowX_4 + b * PowX_2 * y_in - c * PowY_2
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * x_in * x_in * x_in * x_in + b * x_in * x_in * y_in - c * y_in * y_in;\n"
return s
class MennSurfaceB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Menn's Surface B"
_HTML = 'z = ay<sup>4</sup> + by<sup>2</sup>x - cy<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[4.0]), [4.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowY_2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
PowY_4 = inDataCacheDictionary['PowY_4.0'] # only need to perform this dictionary look-up once
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
PowX_2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a * PowY_4 + b * PowY_2 * x_in - c * PowX_2
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * y_in * y_in * y_in * y_in + b * y_in * y_in * x_in - c * x_in * x_in;\n"
return s
class MonkeySaddleA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Monkey Saddle A"
_HTML = 'z = ax<sup>3</sup> - bxy<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[3.0]), [3.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
PowX_3 = inDataCacheDictionary['PowX_3.0'] # only need to perform this dictionary look-up once
PowY_2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * PowX_3 - b * x_in * PowY_2
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * x_in * x_in * x_in - b * x_in * y_in * y_in;\n"
return s
class MonkeySaddleB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Monkey Saddle B"
_HTML = 'z = ay<sup>3</sup> - byx<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[3.0]), [3.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
PowY_3 = inDataCacheDictionary['PowY_3.0'] # only need to perform this dictionary look-up once
PowX_2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a * PowY_3 - b * y_in * PowX_2
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * y_in * y_in * y_in - b * y_in * x_in * x_in;\n"
return s
class MonkeySaddle_TransformA(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Monkey Saddle Transform A"
_HTML = 'z = a(cx + d)<sup>3</sup> - b(cx + d)(fy + g)<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.power(c * x_in + d, 3.0) - b * (c * x_in + d) * numpy.power(f * y_in + g, 2.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(c * x_in + d, 3.0) - b * (c * x_in + d) * pow(f * y_in + g, 2.0);\n"
return s
class MonkeySaddle_TransformB(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Monkey Saddle Transform B"
_HTML = 'z = a(cy + d)<sup>3</sup> - b(cy + d)(ex + f)<sup>2</sup>'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f', 'g']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
g = inCoeffs[5]
try:
temp = a * numpy.power(c * y_in + d, 3.0) - b * (c * y_in + d) * numpy.power(f * x_in + g, 2.0)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * pow(c * x_in + d, 3.0) - b * (c * x_in + d) * pow(f * y_in + g, 2.0);\n"
return s
class Paraboloid(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Paraboloid"
_HTML = 'z = a * (x<sup>2</sup> + y<sup>2</sup>)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XSQPLUSYSQ(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XSQPLUSYSQ = inDataCacheDictionary['XSQPLUSYSQ'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
try:
temp = a * XSQPLUSYSQ
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * (x_in * x_in + y_in * y_in);\n"
return s
class Paraboloid_Transform(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Paraboloid Transform"
_HTML = 'z = a * ((bx + c)<sup>2</sup> + (dy + f)<sup>2</sup>)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c', 'd', 'f']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
d = inCoeffs[3]
f = inCoeffs[4]
try:
temp = a * (numpy.power(b * x_in + c, 2.0) + numpy.power(d * y_in + f, 2.0))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a * (pow(b * x_in + c, 2.0) + pow(d * y_in + f, 2.0));\n"
return s
class PaschensBreakdownFieldStrengthLaw(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Paschen's Law for Breakdown Field Strength"
_HTML = 'Ebreakdown = pressure * (a / (ln(pressure * distance) + b))'
_leftSideHTML = 'Ebreakdown'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
LogXY = inDataCacheDictionary['LogXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = x_in * (a / (LogXY + b))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = x_in * (a / (log(x_in * y_in) + b));\n"
return s
class PaschensBreakdownVoltageLaw(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Paschen's Law for Breakdown Voltage"
_HTML = 'Vbreakdown = a(pressure * distance) / (ln(pressure * distance) + b)'
_leftSideHTML = 'Vbreakdown'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogXY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
LogXY = inDataCacheDictionary['LogXY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = (a * XY) / (LogXY + b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a * x_in * y_in) / (log(x_in * y_in) + b);\n"
return s
class Simple_Equation_01(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 01"
_HTML = 'z = a*pow(x,b)*pow(y,c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in,b)*numpy.power(y_in,c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b)*pow(y_in,c);\n"
return s
class Simple_Equation_02(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 02"
_HTML = 'z = x/(a+b*y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = x_in/(a+b*y_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = x_in/(a+b*y_in);\n"
return s
class Simple_Equation_03(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 03"
_HTML = 'z = y/(a+b*x)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = y_in/(a+b*x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = y_in/(a+b*x_in);\n"
return s
class Simple_Equation_04(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 04"
_HTML = 'z = a*pow(x,b*y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*numpy.power(x_in,b*y_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b*y_in);\n"
return s
class Simple_Equation_05(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 05"
_HTML = 'z = a*pow(y,b*x)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*numpy.power(y_in,b*x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b*x_in);\n"
return s
class Simple_Equation_06(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 06"
_HTML = 'z = a*pow(x,b/y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*numpy.power(x_in,b/y_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b/y_in);\n"
return s
class Simple_Equation_07(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 07"
_HTML = 'z = a*pow(y,b/x)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*numpy.power(y_in,b/x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b/x_in);\n"
return s
class Simple_Equation_08(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 08"
_HTML = 'z = a*x+b*pow(y,2.0)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
PowY_2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*x_in+b*PowY_2
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*x_in+b*pow(y_in,2.0);\n"
return s
class Simple_Equation_09(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 09"
_HTML = 'z = a*y+b*pow(x,2.0)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*y_in+b*PowX_2
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*y_in+b*pow(x_in,2.0);\n"
return s
class Simple_Equation_10(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 10"
_HTML = 'z = x/(a+b*pow(y,2.0))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
PowY_2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = x_in/(a+b*PowY_2)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = x_in/(a+b*pow(y_in,2.0));\n"
return s
class Simple_Equation_11(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 11"
_HTML = 'z = y/(a+b*pow(x,2.0))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = y_in/(a+b*PowX_2)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = y_in/(a+b*pow(x_in,2.0));\n"
return s
class Simple_Equation_12(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 12"
_HTML = 'z = a*pow(b,x)*pow(y,c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(b,x_in)*numpy.power(y_in,c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(b,x_in)*pow(y_in,c);\n"
return s
class Simple_Equation_13(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 13"
_HTML = 'z = a*pow(b,y)*pow(x,c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(b,y_in)*numpy.power(x_in,c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(b,y_in)*pow(x_in,c);\n"
return s
class Simple_Equation_14(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 14"
_HTML = 'z = a*pow(x*y,b)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XY = inDataCacheDictionary['XY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*numpy.power(XY,b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in*y_in,b);\n"
return s
class Simple_Equation_15(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 15"
_HTML = 'z = a*pow(x/y,b)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.XOVERY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
XOVERY = inDataCacheDictionary['XOVERY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
try:
temp = a*numpy.power(XOVERY,b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in/y_in,b);\n"
return s
class Simple_Equation_16(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 16"
_HTML = 'z = a*(pow(b,1.0/x))*pow(y,c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.RecipX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
RecipX = inDataCacheDictionary['RecipX'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*(numpy.power(b,RecipX))*numpy.power(y_in,c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*(pow(b,1.0/x_in))*pow(y_in,c);\n"
return s
class Simple_Equation_17(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 17"
_HTML = 'z = a*pow(b,1.0/y)*pow(x,c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.RecipY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
RecipY = inDataCacheDictionary['RecipY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(b,RecipY)*numpy.power(x_in,c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(b,1.0/y_in)*pow(x_in,c);\n"
return s
class Simple_Equation_18(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 18"
_HTML = 'z = a*pow(x/b,c)*exp(y/b)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in/b,c)*numpy.exp(y_in/b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in/b,c)*exp(y_in/b);\n"
return s
class Simple_Equation_19(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 19"
_HTML = 'z = a*pow(y/b,c)*exp(x/b)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(y_in/b,c)*numpy.exp(x_in/b)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in/b,c)*exp(x_in/b);\n"
return s
class Simple_Equation_20(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 20"
_HTML = 'z = a*pow(x,b+c*y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in,b+c*y_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b+c*y_in);\n"
return s
class Simple_Equation_21(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 21"
_HTML = 'z = a*pow(y,b+c*x)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(y_in,b+c*x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b+c*x_in);\n"
return s
class Simple_Equation_22(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 22"
_HTML = 'z = a*pow(x,b+c/y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in,b+c/y_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b+c/y_in);\n"
return s
class Simple_Equation_23(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 23"
_HTML = 'z = a*pow(y,b+c/x)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(y_in,b+c/x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b+c/x_in);\n"
return s
class Simple_Equation_24(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 24"
_HTML = 'z = a*pow(x,b+c*ln(y))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
LogY = inDataCacheDictionary['LogY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in,b+c*LogY)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b+c*ln(y_in));\n"
return s
class Simple_Equation_25(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 25"
_HTML = 'z = a*pow(y,b+c*ln(x))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(y_in,b+c*LogX)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b+c*ln(x_in));\n"
return s
class Simple_Equation_26(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 26"
_HTML = 'z = a*pow(y,b+c/ln(x))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(y_in,b+c/LogX)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b+c/ln(x_in));\n"
return s
class Simple_Equation_27(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 27"
_HTML = 'z = a*pow(x,b+c/ln(y))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
LogY = inDataCacheDictionary['LogY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in,b+c/LogY)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b+c/ln(y_in));\n"
return s
class Simple_Equation_28(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 28"
_HTML = 'z = a*exp(b*x+c*pow(y,2.0))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
PowY_2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.exp(b*x_in+c*PowY_2)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*exp(b*x_in+c*pow(y_in,2.0));\n"
return s
class Simple_Equation_29(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 29"
_HTML = 'z = a*exp(b*y+c*pow(x,2.0))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.exp(b*y_in+c*PowX_2)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*exp(b*y_in+c*pow(x_in,2.0));\n"
return s
class Simple_Equation_30(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 30"
_HTML = 'z = a*exp(b/x+c*y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.exp(b/x_in+c*y_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*exp(b/x_in+c*y_in);\n"
return s
class Simple_Equation_31(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 31"
_HTML = 'z = a*exp(b/y+c*x)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.exp(b/y_in+c*x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*exp(b/y_in+c*x_in);\n"
return s
class Simple_Equation_32(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 32"
_HTML = 'z = (a+x)/(b+c*y)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = (a+x_in)/(b+c*y_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a+x_in)/(b+c*y_in);\n"
return s
class Simple_Equation_33(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 33"
_HTML = 'z = (a+y)/(b+c*x)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = (a+y_in)/(b+c*x_in)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a+y_in)/(b+c*x_in);\n"
return s
class Simple_Equation_34(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 34"
_HTML = 'z = (a+x)/(b+c*pow(y,2.0))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[2.0]), [2.0]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
PowY_2 = inDataCacheDictionary['PowY_2.0'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = (a+x_in)/(b+c*PowY_2)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a+x_in)/(b+c*pow(y_in,2.0));\n"
return s
class Simple_Equation_35(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 35"
_HTML = 'z = (a+y)/(b+c*pow(x,2.0))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[2.0]), [2.0]])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
PowX_2 = inDataCacheDictionary['PowX_2.0'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = (a+y_in)/(b+c*PowX_2)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = (a+y_in)/(b+c*pow(x_in,2.0));\n"
return s
class Simple_Equation_36(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 36"
_HTML = 'z = a*(exp(b*x)-exp(c*y))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*(numpy.exp(b*x_in)-numpy.exp(c*y_in))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*(exp(b*x_in)-exp(c*y_in));\n"
return s
class Simple_Equation_37(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 37"
_HTML = 'z = a*pow(x,b*pow(y,c))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in,b*numpy.power(y_in,c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b*pow(y_in,c));\n"
return s
class Simple_Equation_38(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 38"
_HTML = 'z = a*pow(y,b*pow(x,c))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(y_in,b*numpy.power(x_in,c))
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b*pow(x_in,c));\n"
return s
class Simple_Equation_39(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 39"
_HTML = 'z = x/(a+b*y+c*pow(y,0.5))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowY(NameOrValueFlag=1, args=[0.5]), [0.5]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
PowY05 = inDataCacheDictionary['PowY_0.5'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = x_in/(a+b*y_in+c*PowY05)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = x_in/(a+b*y_in+c*pow(y_in,0.5));\n"
return s
class Simple_Equation_40(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 40"
_HTML = 'z = y/(a+b*x+c*pow(x,0.5))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.PowX(NameOrValueFlag=1, args=[0.5]), [0.5]])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
PowX05 = inDataCacheDictionary['PowX_0.5'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = y_in/(a+b*x_in+c*PowX05)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = y_in/(a+b*x_in+c*pow(x_in,0.5));\n"
return s
class Simple_Equation_41(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 41"
_HTML = 'z = exp(a+b/x+c*ln(y))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogY(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
LogY = inDataCacheDictionary['LogY'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = numpy.exp(a+b/x_in+c*LogY)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = exp(a+b/x_in+c*ln(y_in));\n"
return s
class Simple_Equation_42(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 42"
_HTML = 'z = exp(a+b/y+c*ln(x))'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = False
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = True
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = True
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.LogX(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
LogX = inDataCacheDictionary['LogX'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = numpy.exp(a+b/y_in+c*LogX)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = exp(a+b/y_in+c*ln(x_in));\n"
return s
class Simple_Equation_43(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 43"
_HTML = 'z = a*pow(x,b)*ln(y+c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = True
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = False
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(x_in,b)*numpy.log(y_in+c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(x_in,b)*ln(y_in+c);\n"
return s
class Simple_Equation_44(pyeq2.Model_3D_BaseClass.Model_3D_BaseClass):
_baseName = "Simple Equation 44"
_HTML = 'z = a*pow(y,b)*ln(x+c)'
_leftSideHTML = 'z'
_coefficientDesignators = ['a', 'b', 'c']
_canLinearSolverBeUsedForSSQABS = False
webReferenceURL = ''
baseEquationHasGlobalMultiplierOrDivisor_UsedInExtendedVersions = True
autoGenerateOffsetForm = True
autoGenerateReciprocalForm = True
autoGenerateInverseForms = True
autoGenerateGrowthAndDecayForms = True
independentData1CannotContainZeroFlag = False
independentData1CannotContainPositiveFlag = False
independentData1CannotContainNegativeFlag = False
independentData2CannotContainZeroFlag = False
independentData2CannotContainPositiveFlag = False
independentData2CannotContainNegativeFlag = True
def GetDataCacheFunctions(self):
functionList = []
functionList.append([pyeq2.DataCache.DataCacheFunctions.X(NameOrValueFlag=1), []])
functionList.append([pyeq2.DataCache.DataCacheFunctions.Y(NameOrValueFlag=1), []])
return self.extendedVersionHandler.GetAdditionalDataCacheFunctions(self, functionList)
def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
y_in = inDataCacheDictionary['Y'] # only need to perform this dictionary look-up once
a = inCoeffs[0]
b = inCoeffs[1]
c = inCoeffs[2]
try:
temp = a*numpy.power(y_in,b)*numpy.log(x_in+c)
return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
except:
return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300
def SpecificCodeCPP(self):
s = "\ttemp = a*pow(y_in,b)*ln(x_in+c);\n"
return s
|
JMoravec/unkRadnet
|
fitToCurve/pyeq2/Models_3D/Miscellaneous.py
|
Python
|
bsd-3-clause
| 128,597
|
[
"Gaussian"
] |
74f1139ad45dfe033f9152023a9917cde66e36208aa4e9ee5d0f5da62d8617bb
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
dataobject.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
from qgis.core import (QgsDataProvider,
QgsRasterLayer,
QgsWkbTypes,
QgsVectorLayer,
QgsProject,
QgsSettings,
QgsProcessingContext,
QgsFeatureRequest,
QgsExpressionContext,
QgsExpressionContextUtils,
QgsExpressionContextScope)
from qgis.gui import QgsSublayersDialog
from qgis.PyQt.QtCore import QCoreApplication
from qgis.utils import iface
from processing.core.ProcessingConfig import ProcessingConfig
ALL_TYPES = [-1]
TYPE_VECTOR_ANY = -1
TYPE_VECTOR_POINT = 0
TYPE_VECTOR_LINE = 1
TYPE_VECTOR_POLYGON = 2
TYPE_RASTER = 3
TYPE_FILE = 4
TYPE_TABLE = 5
def createContext(feedback=None):
"""
Creates a default processing context
:param feedback: Optional existing QgsProcessingFeedback object, or None to use a default feedback object
:type feedback: Optional[QgsProcessingFeedback]
:returns: New QgsProcessingContext object
:rtype: QgsProcessingContext
"""
context = QgsProcessingContext()
context.setProject(QgsProject.instance())
context.setFeedback(feedback)
invalid_features_method = ProcessingConfig.getSetting(ProcessingConfig.FILTER_INVALID_GEOMETRIES)
if invalid_features_method is None:
invalid_features_method = QgsFeatureRequest.GeometryAbortOnInvalid
context.setInvalidGeometryCheck(invalid_features_method)
settings = QgsSettings()
context.setDefaultEncoding(settings.value("/Processing/encoding", "System"))
context.setExpressionContext(createExpressionContext())
return context
def createExpressionContext():
context = QgsExpressionContext()
context.appendScope(QgsExpressionContextUtils.globalScope())
context.appendScope(QgsExpressionContextUtils.projectScope(QgsProject.instance()))
if iface and iface.mapCanvas():
context.appendScope(QgsExpressionContextUtils.mapSettingsScope(iface.mapCanvas().mapSettings()))
processingScope = QgsExpressionContextScope()
if iface and iface.mapCanvas():
extent = iface.mapCanvas().fullExtent()
processingScope.setVariable('fullextent_minx', extent.xMinimum())
processingScope.setVariable('fullextent_miny', extent.yMinimum())
processingScope.setVariable('fullextent_maxx', extent.xMaximum())
processingScope.setVariable('fullextent_maxy', extent.yMaximum())
context.appendScope(processingScope)
return context
def load(fileName, name=None, crs=None, style=None, isRaster=False):
"""
Loads a layer/table into the current project, given its file.
.. deprecated:: 3.0
Do not use, will be removed in QGIS 4.0
"""
from warnings import warn
warn("processing.load is deprecated and will be removed in QGIS 4.0", DeprecationWarning)
if fileName is None:
return
prjSetting = None
settings = QgsSettings()
if crs is not None:
prjSetting = settings.value('/Projections/defaultBehavior')
settings.setValue('/Projections/defaultBehavior', '')
if name is None:
name = os.path.split(fileName)[1]
if isRaster:
qgslayer = QgsRasterLayer(fileName, name)
if qgslayer.isValid():
if crs is not None and qgslayer.crs() is None:
qgslayer.setCrs(crs, False)
if style is None:
style = ProcessingConfig.getSetting(ProcessingConfig.RASTER_STYLE)
qgslayer.loadNamedStyle(style)
QgsProject.instance().addMapLayers([qgslayer])
else:
if prjSetting:
settings.setValue('/Projections/defaultBehavior', prjSetting)
raise RuntimeError(QCoreApplication.translate('dataobject',
'Could not load layer: {0}\nCheck the processing framework log to look for errors.').format(
fileName))
else:
qgslayer = QgsVectorLayer(fileName, name, 'ogr')
if qgslayer.isValid():
if crs is not None and qgslayer.crs() is None:
qgslayer.setCrs(crs, False)
if style is None:
if qgslayer.geometryType() == QgsWkbTypes.PointGeometry:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POINT_STYLE)
elif qgslayer.geometryType() == QgsWkbTypes.LineGeometry:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_LINE_STYLE)
else:
style = ProcessingConfig.getSetting(ProcessingConfig.VECTOR_POLYGON_STYLE)
qgslayer.loadNamedStyle(style)
QgsProject.instance().addMapLayers([qgslayer])
if prjSetting:
settings.setValue('/Projections/defaultBehavior', prjSetting)
return qgslayer
def getRasterSublayer(path, param):
layer = QgsRasterLayer(path)
try:
# If the layer is a raster layer and has multiple sublayers, let the user chose one.
# Based on QgisApp::askUserForGDALSublayers
if layer and param.showSublayersDialog and layer.dataProvider().name() == "gdal" and len(layer.subLayers()) > 1:
layers = []
subLayerNum = 0
# simplify raster sublayer name
for subLayer in layer.subLayers():
# if netcdf/hdf use all text after filename
if bool(re.match('netcdf', subLayer, re.I)) or bool(re.match('hdf', subLayer, re.I)):
subLayer = subLayer.split(path)[1]
subLayer = subLayer[1:]
else:
# remove driver name and file name
subLayer.replace(subLayer.split(QgsDataProvider.SUBLAYER_SEPARATOR)[0], "")
subLayer.replace(path, "")
# remove any : or " left over
if subLayer.startswith(":"):
subLayer = subLayer[1:]
if subLayer.startswith("\""):
subLayer = subLayer[1:]
if subLayer.endswith(":"):
subLayer = subLayer[:-1]
if subLayer.endswith("\""):
subLayer = subLayer[:-1]
ld = QgsSublayersDialog.LayerDefinition()
ld.layerId = subLayerNum
ld.layerName = subLayer
layers.append(ld)
subLayerNum = subLayerNum + 1
# Use QgsSublayersDialog
# Would be good if QgsSublayersDialog had an option to allow only one sublayer to be selected
chooseSublayersDialog = QgsSublayersDialog(QgsSublayersDialog.Gdal, "gdal")
chooseSublayersDialog.populateLayerTable(layers)
if chooseSublayersDialog.exec_():
return layer.subLayers()[chooseSublayersDialog.selectionIndexes()[0]]
else:
# If user pressed cancel then just return the input path
return path
else:
# If the sublayers selection dialog is not to be shown then just return the input path
return path
except:
# If the layer is not a raster layer, then just return the input path
return path
|
m-kuhn/QGIS
|
python/plugins/processing/tools/dataobjects.py
|
Python
|
gpl-2.0
| 8,407
|
[
"NetCDF"
] |
633f23a2666b4b2105b1c78a3b9cb96dfc82402c8508f09cb4abc5b321d27e59
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
print(len(cw154))
print(len(trito))
totalfiles = normalB + mcell + pcell + cd19cell + cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr3_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG",
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("total_chrom3.phy", header=None, index=None)
print(tott.shape)
|
evanbiederstedt/RRBSfun
|
trees/chrom_scripts/total_chr03.py
|
Python
|
mit
| 32,997
|
[
"MCell"
] |
72e741284cfddb8e43c865af2519da4e1524ce026553043576113e9fb3c494cc
|
from game_api import *
class ToyTree :
def __init__(self, label=None, score=None) :
self.score = score
self.label = label
self.children = []
self.zipper = []
self.sibling_index = None
# sibling index records how many left siblings this node has.
self.sibling = None
def __eq__(self, other) :
return [self.score, self.label, self.children, self.zipper] == [other.score, other.label, other.children, other.zipper]
def __str__(self, tab=0) :
ret = ""
for x in self.children :
ret += x.__str__(tab+1)
ret = ("-" * 3 * tab) + (" " * (tab > 0) ) + (self.label or "node") + ("("+str(self.score)+")" if self.score is not None else "") + "\n" + ret
return ret
# __repr__ = __str__
def describe_previous_move(self) :
return "Took branch "+str(self.sibling_index) if self.sibling_index is not None else "[none]"
def get_score(self) :
return self.score
def set_score(self, score) :
self.score = score
return self
def append(self, child) :
"""Append a ToyTree child node to the end of the list of children."""
child.zipper = []
child.sibling_index = len(self.children)
self.children.append(child)
if len(self.children) > 1 :
self.children[-2].sibling = child
return self
def sub(self, label=None, value=None) :
return self.append(ToyTree(label, value))
def is_leaf(self) :
return not not self.children
# moving around
def down(self) :
"""Visit the first child."""
child = self.children[0]
child.zipper = self.zipper + [self]
return child
def up(self) :
"""Visit parent."""
parent = self.zipper[-1]
parent.zipper = self.zipper[:-1]
return parent
def right(self) :
"""Visit sibling."""
assert self.sibling
self.sibling.zipper = self.zipper
return self.sibling
def top(self) :
"""Visit root."""
if self.zipper :
return self.zipper[0]
else :
return self
def create_toy_tree(score_dict, nested_list) :
"""Creates a toy tree from a dict (nodeName -> score), and a nested
list of node names from the dictionary. A well-formed nested list is a
pair whose first element is a node name and whose second element is a
(possibly empty) list containing well-formed nested lists.
"""
return reduce(lambda parent, child : parent.append(child),
map(lambda x : create_toy_tree(score_dict, x), nested_list[1]),
ToyTree(nested_list[0], score_dict.get(nested_list[0])))
# OR EQUIVALENTLY
label, sublists = nested_list
label = nested_list[0]
sublists = nested_list[1]
root = ToyTree(label, score_dict.get(label))
children = [create_toy_tree(score_dict, x) for x in sublists]
for x in children :
root.append(x)
return root
def wrapper_toytree(score_dict, nested_list) :
tree = create_toy_tree(score_dict, nested_list)
return AbstractGameState(snapshot = tree,
is_game_over_fn = toytree_is_game_over,
generate_next_states_fn = toytree_generate_next_states,
endgame_score_fn = toytree_endgame_score)
# TREE FOR ALL SEARCHES
tree4 = ToyTree()
tree4.sub().sub().sub().sub()
tree4.down().sub(None,7).sub(None,11).sub(None, 3).sub(None, 10)
tree4.down().right().sub(None,4).sub(None,9).sub(None, 14).sub(None, 8)
tree4.down().right().right().sub(None,5).sub(None,2).sub(None, 12).sub(None, 16)
tree4.down().right().right().right().sub(None,15).sub(None,6).sub(None, 1).sub(None, 3)
# If max goes first, 4 is the minimax score, and alpha-beta prunes 3 nodes.
# If min goes first, 11 is the minimax score, and alpha-beta prunes 5 nodes.
# [[[7],[11],[3],[10]],[[4],[9],[14],[8]], [[5],[2],[12],[16]], [[15],[6],[1],[13]]]
def toytree_is_game_over(tree) :
return tree.children == []
def toytree_generate_next_states(tree) :
return tree.children
def toytree_endgame_score_fn(tree, is_current_player_maximizer) :
return tree.score
GAME1 = AbstractGameState(tree4,
toytree_is_game_over,
toytree_generate_next_states,
toytree_endgame_score_fn)
def toytree_heuristic_fn(tree, is_current_player_maximizer) :
return tree.score
# 2013 final part 1D
tree5 = ToyTree("A",10) # static values at all levels
tree5.sub("B",11).sub("C",2).sub("D",3).sub("E",6)
tree5.down().sub("F",10).sub("G",12)
tree5.down().down().right().sub("K",7).sub("L",11)
tree5.down().right().sub("H",9).sub("I",12)
tree5.down().right().down().right().sub("M",12).sub("N",13)
tree5.down().right().right().right().sub("J",7).down().sub("O",8)
GAME_STATIC_ALL_LEVELS = AbstractGameState(tree5,
toytree_is_game_over,
toytree_generate_next_states,
toytree_endgame_score_fn)
tree6 = ToyTree("A")
tree6.sub("B").sub("C")
tree6.down().sub("D").sub("E")
tree6.down().right().sub("F").sub("G")
tree6.down().down().sub("H").sub("I")
tree6.down().down().right().sub("J").sub("K")
tree6.down().right().down().sub("L").sub("M")
tree6.down().right().down().right().sub("N")
tree6.down().down().down().sub("O",3).sub("P",17)
tree6.down().down().down().right().sub("Q",2).sub("R",12)
tree6.down().down().right().down().sub("S",15)
tree6.down().down().right().down().right().sub("T",25).sub("U",0)
tree6.down().right().down().down().sub("V",2).sub("W",5)
tree6.down().right().down().down().right().sub("X",3)
tree6.down().right().down().right().down().sub("Y",2).sub("Z",14)
# A tree that checks exit condition of alpha = beta.
GAME_EQUALITY_PRUNING = AbstractGameState(tree6,
toytree_is_game_over,
toytree_generate_next_states,
toytree_endgame_score_fn)
|
jasonleaster/MIT_6.034_2015
|
lab3/toytree.py
|
Python
|
gpl-2.0
| 6,112
|
[
"VisIt"
] |
3deb7b8ad4a7b367027f69faa8eafcbca009fd3189d7c862fd97e4a3b408465a
|
"""
This module holds the Layer class and its various subclasses:
Layer: Base class for layers. Defines interface and shared methods
PerceptronLayer: Layer subclass with a perceptron activation function
BoltzmannMachineLayer: Layer subclass with a Boltzmann Machine activation function
"""
from brio.misc.utils import overrides
from .aux import LayerType
import numpy as np
np.seterr('raise')
class Layer(object):
"""
Base class for network layers.
Defines the interface for layers and implements some common functionality
To use, inheriting classes must override the async_activation and async_update methods
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, n_dims, ltype=LayerType.unconstrained,
update_bias=True, allow_self_con=False, label=None):
""" Initialize Layer object
:param n_dims: the number of neurons in the layer
:param ltype: Enum holding constants particular to layer type.
unconstrainted, excitatory or inhibitory
:param update_bias: True if this layer should update its biases following foldiak's rule during traing
:param allow_self_con: True if neurons in this layer are allowed to connect to themselves
:param label: string used as repr for this Layer. By default a label
is generated from this Layer's parameters
:returns: a Layer object
:rtype: Layer
"""
self.n_dims = n_dims
self.bias = np.ones((self.n_dims, 1))
self.inputs = []
self.outputs = []
self.ltype = ltype
self.update_sign = 1
self.update_bias = update_bias
self.allow_self_con = allow_self_con
self.label = label
def set_up(self, network):
""" adds an attribute pointing to the parent network and sets up the
weighting used for computing firing rates
Also sets the target firing rate and the max history length
:param network: Network object. The parent network
:returns: None
:rtype: None
"""
self.params = network.params
# import params
self.max_history_length = network.params.layer_history_length
self.target_firing_rate = (self.ltype.firing_rate_multiplier *
network.params.baseline_firing_rate)
self.learning_rate = network.params.bias_learning_rate * network.params.baseline_lrate
self.stim_per_epoch = self.params.stimuli_per_epoch
self.update_cap = self.params.update_cap
# initialize attributes
self.state = np.zeros((self.n_dims, self.stim_per_epoch))
self._history = [self.state.copy()]
self._firing_rates = self.state.copy()
self._fr_history = []
self._lfr_mean = np.ones(self.n_dims) * self.target_firing_rate
self._epoch_fr = np.zeros((self.n_dims, self.stim_per_epoch))
# additional set up for inheriting layers (if necessary)
self.aux_set_up()
@property
def firing_rates(self):
""" Property for firing rates
:returns _firing_rates
:rtype: array
"""
return self._firing_rates
@property
def fr_history(self):
""" Property for firing rate history
:returns: _fr_history trimmed to proper length
:rtype: array
"""
return np.array(self._fr_history[-self.params.presentations:])
@property
def history(self):
""" Property for history
:returns: _history trimmed to proper length
:rtype: array
"""
return np.array(self._history[:self.params.presentations])
@property
def lfr_mean(self):
""" Property for lfr_mean
:returns: _lfr_mean
:rtype: array
"""
return self._lfr_mean
@property
def epoch_fr(self):
""" Property for epoch_fr
:returns: _epoch_fr
:rtype: array
"""
return self._epoch_fr
@property
def prev_state(self):
""" Property that returns the previous state
:returns: history[0]
:rtype: array
"""
return self._history[0]
def aux_set_up(self):
""" This method is called after the main set up has finished executing
Intended to be overrided by inheriting classes and used to set up
Layer type specific state variables (such as potential)
:returns: None
:rtype: None
"""
pass
def sync_update(self):
""" Synchronously updates the state of all of the units in this layer
Must be implemented by inheriting class
:returns: None
:rtype: None
"""
raise NotImplementedError
def bias_update(self):
""" Update the unit biases for this layer
By default uses the homeostatic threshold rule from
Foldiak 1990
:returns: None
:rtype: None
"""
if self.update_bias:
# unit: timeunit / epoch
epoch_time_units = self.params.update_batch_size * self.params.timestep
# unit: spikes / timeunit
delta = (self.target_firing_rate - self.epoch_fr).reshape(-1, 1)
# unit: spikes / epoch
delta_b = (self.update_sign * self.learning_rate * delta * epoch_time_units)
if self.update_cap is not None:
delta_b[np.where(delta_b > self.update_cap)] = self.update_cap
delta_b[np.where(delta_b < -self.update_cap)] = -self.update_cap
self.bias += delta_b
def update_lifetime_mean(self):
""" Updates the lifetime mean firing rate for this layer
:returns: None
:rtype: None
"""
# unit: spikes / timestep
act_mean = np.mean(self._history[:self.params.layer_history_length], axis=(0, 2))
# unit: spikes / timestep
fr_mean = np.mean(self._fr_history[-self.params.layer_history_length:], axis=(0, 1))
# unit: spikes / timestep * timestep / timeunit = spikes / timeunit
self._epoch_fr = (act_mean / self.params.timestep)
# unit: spikes / timeunit
self._lfr_mean += self.params.ema_lfr * ((fr_mean / self.params.timestep) - self._lfr_mean)
def update_history(self):
""" appends the current state to the history
additionally updates the firing rates
:returns: None
:rtype: None
"""
self._firing_rates += self.params.ema_curr * (self.state - self._firing_rates)
self._history.insert(0, self.state.copy())
self._fr_history.append(self._firing_rates.copy().T)
def reset(self):
""" Reset the layer in anticipation of running
the next batch of stimuli
Clears the history and calls a method which resets the Layer type specific
state variables
:returns: None
:rtype: None
"""
self.reset_state_vars()
self._firing_rates = np.zeros((self.n_dims, self.stim_per_epoch))
self._fr_history = []
self._history = [self.state.copy()]
def reset_state_vars(self):
""" Reset the state variables for this layer such as state
or membrane potential
override this method when inheriting - NOT reset
:returns: None
:rtype: None
"""
self.state = np.zeros((self.n_dims, self.stim_per_epoch))
def __repr__(self):
"""
A nicer string for this class
"""
if self.label is None:
return "{} {} of size {}".format(self.ltype.name, type(self).__name__, self.n_dims)
else:
return self.label
class LIFLayer(Layer):
"""
Implements a layer of leaky integrate and fire neurons
Manages to remain compatible with everything else by using state to reinterpret spikes
and defining auxiliary potential variables
"""
def __init__(self, *args, **kwargs):
super(LIFLayer, self).__init__(*args, **kwargs)
self.update_sign = -1
self.pot_history = []
# messy trick: inhibitory neurons have a faster firing rate and this gives them
# a faster rc time constant too
self.decay_scale = self.ltype.firing_rate_multiplier
@overrides(Layer)
def sync_update(self):
""" Implements synchronous state update for leaky integrate and fire neurons
:returns: None
:rtype: None
"""
# update mebrane potentials
# self.potentials *= np.exp(-self.decay_scale / float(self.params.steps_per_rc_time))
self.potentials *= np.exp(- self.params.timestep * self.decay_scale)
for input_connection in self.inputs:
multiplier = input_connection.weight_multiplier
weights = input_connection.weights.T
state = input_connection.presynaptic_layer.prev_state
self.potentials += multiplier * np.dot(weights, state)
# set state for neurons that cross threshold
fire_idxs = np.where(self.potentials >= self.bias)
self.state = np.zeros((self.n_dims, self.stim_per_epoch))
self.state[fire_idxs] = 1
# reset membrane potential
self.potentials[fire_idxs] = 0
if self.params.keep_extra_history:
if len(self.pot_history) >= self.params.presentations:
self.pot_history = []
self.pot_history.append(self.potentials.copy())
@overrides(Layer)
def aux_set_up(self):
self.potentials = np.zeros((self.n_dims, self.stim_per_epoch))
@overrides(Layer)
def reset_state_vars(self):
self.state = np.zeros((self.n_dims, self.stim_per_epoch))
self.potentials = np.zeros((self.n_dims, self.stim_per_epoch))
self.pot_history = []
class BoltzmannMachineLayer(Layer):
"""
Implements the Boltzman Machine async_activation function
"""
@overrides(Layer)
def sync_update(self):
""" Implements synchronous state update for Boltzmann Machines
:returns: None
:rtype: None
"""
delta_e = np.tile(self.bias, self.stim_per_epoch).reshape(-1, self.stim_per_epoch)
for input_connection in self.inputs:
multiplier = input_connection.weight_multiplier
weights = input_connection.weights.T
state = input_connection.presynaptic_layer.prev_state
delta_e += multiplier * np.dot(weights, state)
for output_connection in self.outputs:
multiplier = output_connection.weight_multiplier
weights = output_connection.weights
state = output_connection.postsynaptic_layer.prev_state
delta_e += multiplier * np.dot(weights, state)
p_on = 1. / (1 + np.exp(-delta_e))
rand_p = np.random.random((self.n_dims, self.stim_per_epoch))
update_idxs = np.where(rand_p < p_on)
self.state = np.zeros((self.n_dims, self.stim_per_epoch))
self.state[update_idxs] = 1
class PerceptronLayer(Layer):
"""
Simple feedforward perceptron with a hard threshold activation function
"""
@overrides(Layer)
def sync_update(self):
""" Implemented a synchronous state update for a perceptron layer
:returns: None
:rtype: None
"""
energy = np.tile(self.bias, self.stim_per_epoch).reshape(-1, self.stim_per_epoch)
for input_connection in self.inputs:
multiplier = input_connection.weight_multiplier
weights = input_connection.weights.T
state = input_connection.presynaptic_layer.prev_state
energy += multiplier * np.dot(weights, state)
update_idxs = np.where(energy > 0)
self.state = np.zeros((self.n_dims, self.stim_per_epoch))
self.state[update_idxs] = 1
#######################
# Input layers #
#######################
class InputLayer(Layer):
"""
Input layer. Lacks async_update methods
"""
def __init__(self, *args, **kwargs):
super(InputLayer, self).__init__(*args, **kwargs)
self.update_bias = False
def set_state(self, state):
""" set state as the state of the input layer
:param state: array of shape (self.n_dims, self.stimuli_per_epoch)
:returns: None
:rtype: None
"""
assert state.shape == (self.n_dims, self.stim_per_epoch)
self.state = state.copy() * self.params.timestep
self._history.insert(0, self.state.copy())
@overrides(Layer)
def sync_update(self):
pass
class RasterInputLayer(InputLayer):
"""
An input layer that contains methods to rasterize scalar variables into spike trains
The range of the scalar variable is partitioned into equal bins from specified bounds and n_dims
Each bin is represented by a single neuron with independent poisson spiking behavior
For each stimulus value, the rate at which a particular neuron fires is computed as the integral
of a gaussian centered around the stimulus value across the bin that neuron codes for
"""
def __init__(self, n_dims, min_range, max_range, **kwargs):
super(RasterInputLayer, self).__init__(n_dims, **kwargs)
assert min_range < max_range
self.lower_bnd = min_range
self.upper_bnd = max_range
# dviding by 1E4 produces a pretty wide distribution of rates
# probably a good starting point for
# current variance of gaussian
self.var = (max_range - min_range) / 1E5
# overall scale of gaussian. 1 is normalized
self.scale = 3
# how long in each time bin
# also need to represent cooling schedule somehow
@overrides(InputLayer)
def aux_set_up(self):
self.sample_points = np.tile(np.linspace(self.lower_bnd, self.upper_bnd, self.n_dims),
self.stim_per_epoch).reshape(
self.n_dims, self.stim_per_epoch)
@overrides(InputLayer)
def set_state(self, scalar_value):
""" sets the state of this layer probabilistically according to the scheme
described in the class header doc
:param scalar_value: a scalar. must be in (min_range, max_range)
:returns: None
:rtype: None
"""
assert (self.lower_bnd < scalar_value).all()
assert (scalar_value < self.upper_bnd).all()
rates = self.rate_at_points(scalar_value)
p_fire_in_bin = 1 - np.exp(-rates)
rand_p = np.random.random((self.n_dims, self.stim_per_epoch))
firing_idx = np.where(rand_p < p_fire_in_bin)
self.state = np.zeros((self.n_dims, self.stim_per_epoch))
self.state[firing_idx] = 1
self._history.insert(0, self.state.copy())
def rate_at_points(self, scalar_value):
""" returns an array with the rates at each sample point
:param scalar_value: the value being coded for
:returns: rate array
:rtype: array
"""
# right now not normalizing at all which means that larger variance vastly increases
# firing rate of whole population
# normalizing the gaussian will not translate to keeping constant firing rates across gaussian
# need to normalize wrt the poisson cdf
return self.scale * np.exp(- ((self.sample_points - scalar_value) ** 2) / (2 * self.var))
class SplitInput(InputLayer):
"""
An input layer that accepts multiple stimuli simultaneously
Must not have any outputs
"""
# pylint:disable=too-many-instance-attributes
def __init__(self, n_dims, n_children, **kwargs):
super(SplitInput, self).__init__(n_dims, **kwargs)
self.update_bias = False
self.children = [InputLayer(n_dims, **kwargs) for _ in xrange(n_children)]
@overrides(InputLayer)
def set_state(self, rolled_stimuli_set):
""" Set the state of all the child layers
:param rolled_stimuli_set: a list of stimuli of len n_children,
each element of the list is an array of rolled stimuli of shape (n_dims, stimuli_per_epoch)
:returns: None
:rtype: None
"""
n_stim_dims = self.children[0].n_dims
if len(rolled_stimuli_set) == len(self.children):
for stimulus, child_layer in zip(rolled_stimuli_set, self.children):
child_layer.set_state(stimulus)
elif rolled_stimuli_set.shape[0] % n_stim_dims == 0:
# all children must have the same dimension if stimulus must be implicitly decoded
assert (np.array([c.n_dims for c in self.children]) == n_stim_dims).all()
for idx, child_layer in enumerate(self.children):
child_layer.set_state(rolled_stimuli_set[n_stim_dims * idx: n_stim_dims * (idx + 1)])
else:
raise ValueError("Stimuli provided could not be implicitly decoded")
self.aux_update()
def aux_update(self):
""" auxiliary updates to perform after setting child state
to be implemented by inheriting classes
:returns: None
:rtype: None
"""
pass
@overrides(InputLayer)
def sync_update(self):
pass
@overrides(InputLayer)
def aux_set_up(self):
"""
Check that this layer is disconnected
"""
assert len(self.outputs) == 0
assert len(self.inputs) == 0
class GatedInput(SplitInput):
"""
An input layer that accepts multiple stimuli simultaneously
State of this layer is set by gating inptu stimuli multiplicatively
Output must be a ConstantConnection
Sets firing rates to those of the postsynaptic to
preserve weight rule functionality
"""
# pylint:disable=too-many-instance-attributes
def __init__(self, n_dims, input_n_dims, n_children, **kwargs):
super(GatedInput, self).__init__(n_dims, n_children, **kwargs)
self.update_bias = False
self.children = [InputLayer(input_n_dims, **kwargs) for _ in xrange(n_children)]
@overrides(SplitInput)
def aux_update(self):
update_state = np.ones((self.n_dims, self.stim_per_epoch))
for input_connection in self.inputs:
multiplier = input_connection.weight_multiplier
weights = input_connection.weights.T
state = input_connection.presynaptic_layer.prev_state
update_state *= multiplier * np.dot(weights, state)
self.state = update_state
self._history.insert(0, self.state.copy())
@overrides(InputLayer)
def aux_set_up(self):
"""
Set the firing rate and history as those of the postsynaptic neuron
"""
from brio.blocks.connection import ConstantConnection
assert len(self.outputs) == 1
assert len(self.inputs) == len(self.children)
assert isinstance(self.outputs[0], ConstantConnection)
self.parent_layer = self.outputs[0].postsynaptic_layer
@overrides(InputLayer)
def update_lifetime_mean(self):
pass
@overrides(InputLayer)
def update_history(self):
self._history.insert(0, self.state.copy())
@property
@overrides(InputLayer)
def firing_rates(self):
""" Property for firing rates
:returns _firing_rates
:rtype: array
"""
return self.parent_layer.firing_rates
@property
@overrides(InputLayer)
def fr_history(self):
""" Property for firing rate history
:returns: _fr_history trimmed to proper length
:rtype: array
"""
return self.parent_layer.fr_history
@property
@overrides(InputLayer)
def lfr_mean(self):
""" Property for lfr_mean
:returns: _lfr_mean
:rtype: array
"""
return self.parent_layer.lfr_mean
@property
@overrides(InputLayer)
def epoch_fr(self):
""" Property for epoch_fr
:returns: _epoch_fr
:rtype: array
"""
return self.parent_layer.epoch_fr
|
rueberger/brio
|
brio/blocks/layer.py
|
Python
|
gpl-2.0
| 20,103
|
[
"Gaussian",
"NEURON"
] |
a7f093bca5364d8a32525a21c5ce0e6189da469887aa58ed4fd6811102450370
|
<<<<<<< HEAD
from pymatgen.core.structure import Structure
=======
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
class XSF(object):
"""
Class for parsing XCrysden files.
<<<<<<< HEAD
TODO: Unittests. Write XCrysden output.
=======
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
def __init__(self, structure):
self.structure = structure
def to_string(self):
"""
Returns a string with the structure in XSF format
See http://www.xcrysden.org/doc/XSF.html
"""
lines = []
app = lines.append
app("CRYSTAL")
app("# Primitive lattice vectors in Angstrom")
app("PRIMVEC")
<<<<<<< HEAD
cell = self.structure.lattice_vectors(space="r")
=======
cell = self.structure.lattice.matrix
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
for i in range(3):
app(' %.14f %.14f %.14f' % tuple(cell[i]))
cart_coords = self.structure.cart_coords
app("# Cartesian coordinates in Angstrom.")
app("PRIMCOORD")
app(" %d 1" % len(cart_coords))
for a in range(len(cart_coords)):
sp = "%d" % self.structure.atomic_numbers[a]
app(sp + ' %20.14f %20.14f %20.14f' % tuple(cart_coords[a]))
return "\n".join(lines)
@classmethod
<<<<<<< HEAD
def from_string(self, input_string):
"""
Initialize a `Structure` object from a string with data in XSF format.
See http://www.xcrysden.org/doc/XSF.html
=======
def from_string(self, input_string, cls=None):
"""
Initialize a `Structure` object from a string with data in XSF format.
Args:
input_string: String with the structure in XSF format.
See http://www.xcrysden.org/doc/XSF.html
cls: Structure class to be created. default: pymatgen structure
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
# CRYSTAL see (1)
# these are primitive lattice vectors (in Angstroms)
# PRIMVEC
# 0.0000000 2.7100000 2.7100000 see (2)
# 2.7100000 0.0000000 2.7100000
# 2.7100000 2.7100000 0.0000000
# these are conventional lattice vectors (in Angstroms)
# CONVVEC
# 5.4200000 0.0000000 0.0000000 see (3)
# 0.0000000 5.4200000 0.0000000
# 0.0000000 0.0000000 5.4200000
# these are atomic coordinates in a primitive unit cell (in Angstroms)
# PRIMCOORD
# 2 1 see (4)
# 16 0.0000000 0.0000000 0.0000000 see (5)
# 30 1.3550000 -1.3550000 -1.3550000
lattice, coords, species = [], [], []
lines = input_string.splitlines()
for i in range(len(lines)):
if "PRIMVEC" in lines[i]:
for j in range(i+1, i+4):
lattice.append([float(c) for c in lines[j].split()])
if "PRIMCOORD" in lines[i]:
num_sites = int(lines[i+1].split()[0])
for j in range(i+2, i+2+num_sites):
tokens = lines[j].split()
species.append(int(tokens[0]))
coords.append([float(j) for j in tokens[1:]])
break
else:
raise ValueError("Invalid XSF data")
<<<<<<< HEAD
s = Structure(lattice, species, coords, coords_are_cartesian=True)
return XSF(s)
=======
if cls is None:
from pymatgen.core.structure import Structure
cls = Structure
s = cls(lattice, species, coords, coords_are_cartesian=True)
return XSF(s)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
|
Bismarrck/pymatgen
|
pymatgen/io/xcrysden.py
|
Python
|
mit
| 4,160
|
[
"CRYSTAL",
"pymatgen"
] |
c34b2f2e57620e98832b19f2c4df68b6ae57ecdee0f0c56ddebc8edb58b07311
|
#!/usr/bin/env python
# Install.py tool to download, unpack, build, and link to the LATTE library
# used to automate the steps described in the README file in this dir
from __future__ import print_function
import sys,os,re,subprocess,hashlib
# help message
help = """
Syntax from src dir: make lib-latte args="-b"
or: make lib-latte args="-p /usr/local/latte"
or: make lib-latte args="-m gfortran"
or: make lib-latte args="-b -v 1.2.1"
Syntax from lib dir: python Install.py -b
or: python Install.py -p /usr/local/latte
or: python Install.py -m gfortran
or: python Install.py -v 1.2.1 -b
specify one or more options, order does not matter
-b = download and build the LATTE library
-p = specify folder of existing LATTE installation
-m = copy Makefile.lammps.suffix to Makefile.lammps
-v = set version of LATTE library to download and set up (default = 1.2.1)
Example:
make lib-latte args="-b -m gfortran" # download/build in lib/latte
make lib-latte args="-p $HOME/latte" # use existing LATTE installation
"""
# settings
version = '1.2.1'
# known checksums for different LATTE versions. used to validate the download.
checksums = { \
'1.1.0' : '533635721ee222d0ed2925a18fb5b294', \
'1.2.0' : '68bf0db879da5e068a71281020239ae7', \
'1.2.1' : '85ac414fdada2d04619c8f936344df14', \
}
# print error message or help
def error(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# expand to full path name
# process leading '~' or relative path
def fullpath(path):
return os.path.abspath(os.path.expanduser(path))
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def geturl(url,fname):
success = False
if which('curl') != None:
cmd = 'curl -L -o "%s" %s' % (fname,url)
try:
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
success = True
except subprocess.CalledProcessError as e:
print("Calling curl failed with: %s" % e.output.decode('UTF-8'))
if not success and which('wget') != None:
cmd = 'wget -O "%s" %s' % (fname,url)
try:
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
success = True
except subprocess.CalledProcessError as e:
print("Calling wget failed with: %s" % e.output.decode('UTF-8'))
if not success:
error("Failed to download source code with 'curl' or 'wget'")
return
def checkmd5sum(md5sum,fname):
with open(fname,'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(81920)
if not data:
break
m.update(data)
fh.close()
return m.hexdigest() == md5sum
# parse args
args = sys.argv[1:]
nargs = len(args)
if nargs == 0: error()
homepath = "."
buildflag = False
pathflag = False
suffixflag = False
linkflag = True
iarg = 0
while iarg < nargs:
if args[iarg] == "-p":
if iarg+2 > nargs: error()
lattedir = fullpath(args[iarg+1])
pathflag = True
iarg += 2
elif args[iarg] == "-b":
buildflag = True
iarg += 1
elif args[iarg] == "-m":
if iarg+2 > nargs: error()
suffix = args[iarg+1]
suffixflag = True
iarg += 2
elif args[iarg] == "-v":
if iarg+2 > nargs: error()
version = args[iarg+1]
iarg += 2
else: error()
homedir = "LATTE-%s" % version
if (buildflag and pathflag):
error("Cannot use -b and -p flag at the same time")
if buildflag:
url = "https://github.com/lanl/LATTE/archive/v%s.tar.gz" % version
lattepath = fullpath(homepath)
lattedir = "%s/%s" % (lattepath,homedir)
if pathflag:
if not os.path.isdir(lattedir): error("LATTE path does not exist")
# download and unpack LATTE tarball
if buildflag:
print("Downloading LATTE ...")
geturl(url,"LATTE.tar.gz")
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version],'LATTE.tar.gz'):
error("Checksum for LATTE library does not match")
print("Unpacking LATTE ...")
if os.path.exists(lattedir):
cmd = 'rm -rf "%s"' % lattedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = 'cd "%s"; tar zxvf LATTE.tar.gz' % lattepath
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
os.remove("%s/LATTE.tar.gz" % lattepath)
# build LATTE
if buildflag:
print("Building LATTE ...")
cmd = 'cd "%s"; make' % lattedir
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))
# create 3 links in lib/latte to LATTE dirs
# do this -b or -p is set
if buildflag or pathflag:
print("Creating links to LATTE files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
if os.path.isfile("filelink.o") or os.path.islink("filelink.o"):
os.remove("filelink.o")
cmd = 'ln -s "%s/src" includelink' % lattedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = 'ln -s "%s" liblink' % lattedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = 'ln -s "%s/src/latte_c_bind.o" filelink.o' % lattedir
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
# copy Makefile.lammps.suffix to Makefile.lammps
if suffixflag:
print("Creating Makefile.lammps")
if os.path.exists("Makefile.lammps.%s" % suffix):
cmd = 'cp Makefile.lammps.%s Makefile.lammps' % suffix
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
|
quang-ha/lammps
|
lib/latte/Install.py
|
Python
|
gpl-2.0
| 5,988
|
[
"LAMMPS"
] |
4ba57a315a079e01eb96fa62b284c9ed4a4a531ff3b6c3b09933dfda7aa10802
|
from selenium.common.exceptions import NoSuchElementException
from datetime import datetime
from urllib import urlencode
# ============================================================================
class PrefixHandler(object):
def __init__(self, prefix, desc='Url Prefix Archiving Handler'):
self.prefix = prefix
self.desc = desc
def __call__(self, browser, url):
log_results = browser.visit(self.prefix + url)
try:
error = self.get_error(log_results, browser, url)
except NoSuchElementException:
# no error
error = None
except Exception as e:
error = {'msg': str(e)}
results = {'time': str(datetime.utcnow())}
if error:
results['error'] = error
results['archived'] = False
else:
results['archived'] = True
results['actual_url'] = self.get_actual_url(browser)
self.set_success_results(browser, url, results)
results['browser_url'] = self.get_browser_url(browser)
for n in list(log_results.keys()):
if not self.is_archived_url(n):
del log_results[n]
results['log'] = log_results
return results
def set_success_results(self, browser, url, results):
pass
def get_error(self, log_results, browser, url):
return None
def is_archived_url(self, url):
return url.startswith(self.prefix)
def get_desc(self):
return self.desc
def get_browser_url(self, browser):
try:
return browser.driver.current_url
except:
return ''
def get_actual_url(self, browser):
url = self.get_browser_url(browser)
try:
inx = url[1:].index('/http')
url = url[inx + 2:]
except:
pass
return url
# ============================================================================
class SavePageNowHandler(PrefixHandler):
BLOCKED_MSGS = ('Sorry.', 'Page cannot be crawled or displayed due to robots.txt.')
def __init__(self, prefix='https://web.archive.org/save/',
desc='Internet Archive <a href="https://web.archive.org/web/">Save Page Now</a> Archiving'):
super(SavePageNowHandler, self).__init__(prefix, desc)
def set_success_results(self, browser, url, results):
cookie = browser.driver.get_cookie('webrecorder.session')
# not exact but close enough
results['replay_url'] = 'https://web.archive.org/web/' + url
def get_error(self, log_results, browser, url):
err_text = browser.driver.find_element_by_css_selector("div#positionHome #error h2").text
info = err_text + ' ' + browser.driver.find_element_by_css_selector("div#positionHome #error p").text
if err_text in self.BLOCKED_MSGS:
return {'msg': info, 'type': 'blocked'}
else:
return {'msg': info}
return None
# ============================================================================
class WebRecorderHandler(PrefixHandler):
def __init__(self, prefix='https://webrecorder.io/record/',
desc='<a href="https://webrecorder.io/">webrecorder.io</a> Archiving'):
super(WebRecorderHandler, self).__init__(prefix, desc)
def get_error(self, log_results, browser, url):
browser.driver.switch_to.frame('iframe')
err_elem = browser.driver.find_element_by_css_selector('div.webrec-error div.page-header span.h2')
if err_elem.text == 'WebRecorder.io error':
try:
msg = browser.driver.find_element_by_css_selector('div.webrec-error p.h4').text
if 'Name or service not known' in msg:
msg = 'This url could not be reached'
except:
msg = 'unknown'
return {'msg': msg}
return None
def set_success_results(self, browser, url, results):
cookie = browser.driver.get_cookie('webrecorder.session')
if cookie:
query = urlencode({'url': url, 'sesh': cookie['value']})
#results['download_session'] = cookie['name'] + '=' + cookie['value']
results['download_url'] = 'https://webrecorder.io/cmd/sesh_download?' + query
results['replay_url'] = 'https://webrecorder.io/cmd/setsesh?' + query
return results
def is_archived_url(self, url):
if url.startswith(self.prefix) and '_/' in url:
return True
return False
|
ikreymer/browsertrix
|
web/handlers.py
|
Python
|
mit
| 4,566
|
[
"VisIt"
] |
4f2312711318bd56b1a136465f07b4dbb6a5442f7f662eaeeba43704d909cf17
|
import numpy as np
import visit_writer
if __name__ == '__main__':
print('# Start')
output = 'run.py3'
np.random.seed(0)
grid = np.array([-10., 10., 10, -10., 10., 10., 0., 0., 1.])
grid = np.reshape(grid, (3,3)).T
grid_length = grid[1] - grid[0]
grid_points = np.array(grid[2], dtype=np.int32)
num_points = grid_points[0] * grid_points[1] * grid_points[2]
# Set grid coordinates
dx_grid = grid_length / grid_points
grid_x = np.array([grid[0,0] + dx_grid[0] * (x+0.5) for x in range(grid_points[0])])
grid_y = np.array([grid[0,1] + dx_grid[1] * (x+0.5) for x in range(grid_points[1])])
grid_z = np.array([grid[0,2] + dx_grid[2] * (x+0.5) for x in range(grid_points[2])])
# Be aware, x is the fast axis.
zz, yy, xx = np.meshgrid(grid_z, grid_y, grid_x, indexing = 'ij')
grid_coor = np.zeros((num_points, 3))
grid_coor[:,0] = np.reshape(xx, xx.size)
grid_coor[:,1] = np.reshape(yy, yy.size)
grid_coor[:,2] = np.reshape(zz, zz.size)
# Create velocity
grid_velocity = np.random.randn(grid_coor.shape[0], grid_coor.shape[1])
# Prepara data for VTK writer
variables = [np.reshape(grid_velocity, grid_velocity.size)]
dims = np.array([grid_points[0]+1, grid_points[1]+1, grid_points[2]+1], dtype=np.int32)
nvars = 1
vardims = np.array([3])
centering = np.array([0])
varnames = ['velocity\0']
name = output + '.velocity_field.vtk'
grid_x = grid_x - dx_grid[0] * 0.5
grid_y = grid_y - dx_grid[1] * 0.5
grid_z = grid_z - dx_grid[2] * 0.5
grid_x = np.concatenate([grid_x, [grid[1,0]]])
grid_y = np.concatenate([grid_y, [grid[1,1]]])
grid_z = np.concatenate([grid_z, [grid[1,2]]])
# Write velocity field
visit_writer.boost_write_rectilinear_mesh(name, # File's name
0, # 0=ASCII, 1=Binary
dims, # {mx, my, mz}
grid_x, # xmesh
grid_y, # ymesh
grid_z, # zmesh
nvars, # Number of variables
vardims, # Size of each variable, 1=scalar, velocity=3*scalars
centering, # Write to cell centers of corners
varnames, # Variables' names
variables) # Variables
print('# End')
|
stochasticHydroTools/RotationalDiffusion
|
visit/test_visit.py
|
Python
|
gpl-3.0
| 2,516
|
[
"VTK"
] |
8cc788da19323f6e9cea7d0c93c1bcbb1ca4d738c1a6b34dc0b053e498055414
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index :
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews :
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index :
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value :
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the Numpy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`dtype`, `order`, etc.).
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, fill
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the given
shape, dtype, and order.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
.. versionadded:: 1.6.0
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
.. versionadded:: 1.6.0
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=numpy.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'count_nonzero',
"""
count_nonzero(a)
Counts the number of non-zero values in the array ``a``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
Returns
-------
count : int or array of int
Number of non-zero values in the array.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
5
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset; default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays together.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions of must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same
shape as `condition`.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
Find the indices of elements of `x` that are in `goodvalues`.
>>> goodvalues = [3, 4, 7]
>>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape)
>>> ix
array([[False, False, False],
[ True, True, False],
[False, True, False]], dtype=bool)
>>> np.where(ix)
(array([1, 1, 2]), array([0, 1, 1]))
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print ind
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it's the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'einsum',
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : data-type, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
add_newdoc('numpy.core', 'vdot',
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
the methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major or column-major order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input paramter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(a_min, a_max, out=None)
Return an array whose values are limited to ``[a_min, a_max]``.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
NumPy 1.10 the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A'}, optional
Whether to flatten in C (row-major), Fortran (column-major) order,
or preserve the C/Fortran ordering from `a`.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setasflat',
"""
a.setasflat(arr)
Equivalent to a.flat = arr.flat, but is generally more efficient.
This function does not check for overlap, so if ``arr`` and ``a``
are viewing the same data with different strides, the results will
be unpredictable.
Parameters
----------
arr : array_like
The array to copy into a.
Examples
--------
>>> a = np.arange(2*4).reshape(2,4)[:,:-1]; a
array([[0, 1, 2],
[4, 5, 6]])
>>> b = np.arange(3*3, dtype='f4').reshape(3,3).T[::-1,:-1]; b
array([[ 2., 5.],
[ 1., 4.],
[ 0., 3.]], dtype=float32)
>>> a.setasflat(b)
>>> a
array([[2, 5, 1],
[4, 0, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Parameters
----------
a, b : ndarray
Returns
-------
out : bool
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
above. `new_order` codes can be any of::
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=None)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(a, 3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tobytes()
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print type(y)
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print x
[(1, 20) (3, 4)]
Using a view to convert an array to a record array:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a Numpy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a Numpy universal function (``ufunc``) object.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# lib._compiled_base functions
#
##############################################################################
add_newdoc('numpy.lib._compiled_base', 'digitize',
"""
digitize(x, bins, right=False)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right
is True, then the right bin is closed so that the index ``i`` is such
that ``bins[i-1] < x <= bins[i]`` or bins[i-1] >= x > bins[i]`` if `bins`
is monotonically increasing or decreasing, respectively.
Parameters
----------
x : array_like
Input array to be binned. Prior to Numpy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`np.digitize` is implemented in terms of `np.searchsorted`. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
""")
add_newdoc('numpy.lib._compiled_base', 'bincount',
"""
bincount(x, weights=None, minlength=None)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
.. versionadded:: 1.6.0
A minimum number of bins for the output array.
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.lib._compiled_base', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.lib._compiled_base', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
.. versionadded:: 1.6.0
Determines whether the indices should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.lib._compiled_base', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.lib._compiled_base', 'add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.lib._compiled_base', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An integer type array whose elements should be packed to bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.lib._compiled_base', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print np.exp.identity
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
`a[indices] += b`, except that results are accumulated for elements that
are indexed more than once. For example, `a[[0,0]] += 1` will only
increment the first element once because of buffering, whereas
`add.at(a, [0,0], 1)` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
::
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
::
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Record, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Record, one field named 'f1', in itself containing a record with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Record, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
Array-interface compliant full description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print dt.fields
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the Numpy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order
specifications below. The default value ('S') results in
swapping the current byte order.
`new_order` codes can be any of::
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'is_busday',
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
""")
add_newdoc('numpy.core.multiarray', 'busday_offset',
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
""")
add_newdoc('numpy.core.multiarray', 'busday_count',
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
""")
##############################################################################
#
# nd_grid instances
#
##############################################################################
add_newdoc('numpy.lib.index_tricks', 'mgrid',
"""
`nd_grid` instance which returns a dense multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
(or fleshed out) mesh-grid when indexed, so that each returned argument
has the same shape. The dimensions and number of the output arrays are
equal to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` all of the same dimensions
See Also
--------
numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
""")
add_newdoc('numpy.lib.index_tricks', 'ogrid',
"""
`nd_grid` instance which returns an open multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
(i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
of each returned array is greater than 1. The dimension and number of the
output arrays are equal to the number of indexing dimensions. If the step
length is not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
See Also
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* 'S' - swap dtype from current to opposite endian
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for other scalar classes
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'bool_',
"""Numpy's Boolean type. Character code: ``?``. Alias: bool8""")
add_newdoc('numpy.core.numerictypes', 'complex64',
"""
Complex number type composed of two 32 bit floats. Character code: 'F'.
""")
add_newdoc('numpy.core.numerictypes', 'complex128',
"""
Complex number type composed of two 64 bit floats. Character code: 'D'.
Python complex compatible.
""")
add_newdoc('numpy.core.numerictypes', 'complex256',
"""
Complex number type composed of two 128-bit floats. Character code: 'G'.
""")
add_newdoc('numpy.core.numerictypes', 'float32',
"""
32-bit floating-point number. Character code 'f'. C float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float64',
"""
64-bit floating-point number. Character code 'd'. Python float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float96',
"""
""")
add_newdoc('numpy.core.numerictypes', 'float128',
"""
128-bit floating-point number. Character code: 'g'. C long float
compatible.
""")
add_newdoc('numpy.core.numerictypes', 'int8',
"""8-bit integer. Character code ``b``. C char compatible.""")
add_newdoc('numpy.core.numerictypes', 'int16',
"""16-bit integer. Character code ``h``. C short compatible.""")
add_newdoc('numpy.core.numerictypes', 'int32',
"""32-bit integer. Character code 'i'. C int compatible.""")
add_newdoc('numpy.core.numerictypes', 'int64',
"""64-bit integer. Character code 'l'. Python int compatible.""")
add_newdoc('numpy.core.numerictypes', 'object_',
"""Any Python object. Character code: 'O'.""")
|
larsmans/numpy
|
numpy/add_newdocs.py
|
Python
|
bsd-3-clause
| 217,825
|
[
"Brian"
] |
6f337abc09582f3ec5b88d222004e8dcea6d4407a754419737ea9fa9d419f098
|
"""
VTK shortcut functions
For more about VTK, visit:
http://www.vtk.org/
http://www.vtk.org/Wiki/VTK/Examples/Python
"""
import vtk
import sys
import numpy
from vtk.util.colors import peacock
def vtk_point_cloud(points, colors=[], point_size=2):
"""
Represent a point cloud in VTK
Parameters
----------
points : numpy array, each row is a point
colors : list of colors, one per point
point_size : rendering size for the points
Returns
-------
actor : vtkActor representing the point cloud
"""
nb = len(points);
vtk_points = vtk.vtkPoints();
vtk_verts = vtk.vtkCellArray();
if colors:
vtk_colors = vtk.vtkUnsignedCharArray();
vtk_colors.SetNumberOfComponents(3);
vtk_colors.SetName( "Colors");
for i in range(0,nb):
p = points[i]
if len(p) >= 3:
print "ok",p
coords = [p[0],p[1],p[2]]
elif len(p) == 2:
coords = [p[0],p[1],0]
elif len(p) == 1:
coords = [p[0],0,0]
else:
print "**ERROR** wrong dimension"
sys.exit(1)
id = vtk_points.InsertNextPoint( *coords )
vtk_verts.InsertNextCell(1)
vtk_verts.InsertCellPoint(id)
if colors:
vtk_colors.InsertNextTuple3( *colors[i] )
poly = vtk.vtkPolyData()
poly.SetPoints(vtk_points)
poly.SetVerts(vtk_verts)
if colors:
poly.GetPointData().SetScalars(vtk_colors)
poly.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(poly)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetRepresentationToPoints
actor.GetProperty().SetPointSize( point_size )
return actor
def vtk_basic( actors, save="", magnification=3 ):
"""
Create a window, renderer, interactor, add the actors and start the thing
Parameters
----------
actors : list of vtkActors
Returns
-------
nothing
"""
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600,600)
# ren.SetBackground( 1, 1, 1)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for a in actors:
# assign actor to the renderer
ren.AddActor(a )
# style = vtk.vtkInteractorStyleTerrain()
# iren.SetInteractorStyle( style )
# render
renWin.Render()
if save:
if not save.endswith('.png'):
save += '.png'
grabber = vtk.vtkWindowToImageFilter()
grabber.SetInput( renWin )
grabber.SetMagnification( magnification )
grabber.Update()
writer = vtk.vtkPNGWriter()
writer.SetInput( grabber.GetOutput() )
writer.SetFileName( save )
writer.Write()
else:
# enable user interface interactor
iren.Initialize()
iren.Start()
def vtk_Nviews( actors, split='h' ):
"""
Create a window, an interactor and one renderer per actor
Parameters
----------
actors : list of vtkActors
Returns
-------
nothing
"""
N = len(actors)
# create a rendering window and renderers
renderers = [vtk.vtkRenderer() for i in range(N)]
renWin = vtk.vtkRenderWindow()
renWin.SetSize( 600, 600 )
for i in range(N):
# split the viewport
if split == 'h':
renderers[i].SetViewport(0,float(N-i-1)/N,1,float(N-i)/N)
else:
renderers[i].SetViewport(float(N-i-1)/N,0,float(N-i)/N,1)
renderers[i].SetBackground( 1, 1, 1)
renderers[i].AddActor( actors[i] )
renWin.AddRenderer(renderers[i])
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
#enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
def vtk_show_points( points, colors=[] ):
"""
Display a point cloud
Parameters
----------
points : numpy array, each row is a point
colors : list of colors, one per point
Returns
-------
nothing
"""
point_cloud = vtk_point_cloud(points,colors)
vtk_basic( [point_cloud] )
def vtk_colored_graph(points, edges, colors=[], line_width=2):
"""
Represent a graph in VTK
Parameters
----------
points : numpy array, each row is a point
edges : numpy array of edges, each row is of the form
[ point_1, point_2, distance ]
colors : list of colors, one per point
line_width : rendering size for the lines
Returns
-------
actor : vtkActor representing the graph
"""
nb_points = len(points)
vtk_points = vtk.vtkPoints()
vtk_lines = vtk.vtkCellArray()
vtk_colors = vtk.vtkUnsignedCharArray()
vtk_colors.SetNumberOfComponents(3)
vtk_colors.SetName( "Colors")
if (len(colors) ==0):
for i in range(0,len(edges)):
colors.append((0, 164, 180))
for i in range(0,nb_points):
p = points[i]
if len(p) >= 3:
coords = [p[0],p[1],p[2]]
elif len(p) == 2:
coords = [p[0],p[1],0]
elif len(p) == 1:
coords = [p[0],0,0]
else:
print "**ERROR** wrong dimension"
sys.exit(1)
id = vtk_points.InsertNextPoint( *coords )
for i in range(0,len(edges)):
line = vtk.vtkLine()
line.GetPointIds().SetId(0,edges[i][0])
line.GetPointIds().SetId(1,edges[i][1])
vtk_lines.InsertNextCell(line)
vtk_colors.InsertNextTuple3( *colors[i] )
poly = vtk.vtkPolyData()
poly.SetPoints(vtk_points)
poly.SetLines(vtk_lines)
poly.GetCellData().SetScalars(vtk_colors);
poly.Update()
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(poly)
tubes = vtk.vtkTubeFilter()
tubes.SetInputConnection(cleaner.GetOutputPort())
tubes.SetRadius(0.1)
tubes.SetNumberOfSides(6)
mapEdges = vtk.vtkPolyDataMapper()
mapEdges.SetInputConnection(tubes.GetOutputPort())
edgeActor = vtk.vtkActor()
edgeActor.SetMapper(mapEdges)
edgeActor.GetProperty().SetSpecularColor(1, 1, 1)
edgeActor.GetProperty().SetSpecular(0.3)
edgeActor.GetProperty().SetSpecularPower(20)
edgeActor.GetProperty().SetAmbient(0.2)
edgeActor.GetProperty().SetDiffuse(0.8)
return edgeActor
def vtk_triangles(points, triangles, colors=[]):
"""
Display triangles in VTK
Parameters
----------
points : numpy array, each row is a point
triangle : numpy array of vertices, each row is of the form
[ point_1, point_2, point_3 ]
colors : list of colors, one per triangle
Returns
-------
actor : vtkActor representing the triangles
"""
nb_points = len(points)
vtk_points = vtk.vtkPoints()
vtk_triangles = vtk.vtkCellArray()
vtk_colors = vtk.vtkUnsignedCharArray()
vtk_colors.SetNumberOfComponents(3)
vtk_colors.SetName( "Colors")
if (len(colors) ==0):
for i in range(0,nb_points):
vtk_colors.InsertNextTuple3(0, 164, 180)
else:
for i in range(0,nb_points):
vtk_colors.InsertNextTuple3( *colors[i] )
for i in range(0,nb_points):
p = points[i]
if len(p) >= 3:
coords = [p[0],p[1],p[2]]
elif len(p) == 2:
coords = [p[0],p[1],0]
elif len(p) == 1:
coords = [p[0],0,0]
else:
print "**ERROR** wrong dimension"
sys.exit(1)
id = vtk_points.InsertNextPoint( *coords )
for i in range(0,len(triangles)):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0,triangles[i][0])
triangle.GetPointIds().SetId(1,triangles[i][1])
triangle.GetPointIds().SetId(2,triangles[i][2])
vtk_triangles.InsertNextCell(triangle)
poly = vtk.vtkPolyData()
poly.SetPoints(vtk_points)
poly.SetPolys(vtk_triangles)
poly.GetPointData().SetScalars(vtk_colors)
poly.Update()
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInput(poly)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(cleaner.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
from vtk.util.vtkConstants import *
def numpy2VTK(img,spacing=[1.0,1.0,1.0]):
importer = vtk.vtkImageImport()
img_data = img.astype('uint8')
img_string = img_data.tostring() # type short
dim = img.shape
importer.CopyImportVoidPointer(img_string, len(img_string))
importer.SetDataScalarType(VTK_UNSIGNED_CHAR)
importer.SetNumberOfScalarComponents(1)
extent = importer.GetDataExtent()
importer.SetDataExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
importer.SetWholeExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
importer.SetDataSpacing( spacing[2], spacing[1], spacing[0])
importer.SetDataOrigin( 0,0,0 )
# flip = vtk.vtkImageFlip()
# flip.SetFilteredAxis(2)
# flip.SetInput(importer.GetOutput())
return importer
def volumeRender(img, tf=[],spacing=[1.0,1.0,1.0], box=False):
importer = numpy2VTK(img,spacing)
# Transfer Functions
opacity_tf = vtk.vtkPiecewiseFunction()
color_tf = vtk.vtkColorTransferFunction()
if len(tf) == 0:
tf.append([img.min(),0,0,0,0])
tf.append([img.max(),1,1,1,1])
for p in tf:
color_tf.AddRGBPoint(p[0], p[1], p[2], p[3])
opacity_tf.AddPoint(p[0], p[4])
volMapper = vtk.vtkGPUVolumeRayCastMapper()
# compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
# compositeFunction.SetCompositeMethodToInterpolateFirst()
# volMapper.SetVolumeRayCastFunction(compositeFunction)
# pix_diag = 5.0
# volMapper.SetSampleDistance(pix_diag / 5.0)
volMapper.SetInputConnection(importer.GetOutputPort())
# The property describes how the data will look
volProperty = vtk.vtkVolumeProperty()
volProperty.SetColor(color_tf)
volProperty.SetScalarOpacity(opacity_tf)
volProperty.ShadeOn()
volProperty.SetInterpolationTypeToLinear()
#volProperty.SetScalarOpacityUnitDistance(pix_diag)
vol = vtk.vtkVolume()
vol.SetMapper(volMapper)
vol.SetProperty(volProperty)
if not box:
return [vol]
else:
bbox = outline(importer)
return [vol,bbox]
def marchingCubes(img,spacing=[1.0,1.0,1.0],contours=[]):
importer = numpy2VTK(img,spacing)
if len(contours) == 0:
contours = [[img.max(),1.0,1.0,1.0,1.0]]
actors = []
for c in contours:
mc = vtk.vtkMarchingCubes()
mc.ComputeScalarsOff()
mc.ComputeGradientsOff()
mc.ComputeNormalsOff()
mc.SetValue( 0, c[0] )
mc.SetInput( importer.GetOutput())
# connectivityFilter = vtk.vtkPolyDataConnectivityFilter()
# connectivityFilter.SetInput(mc.GetOutput())
# connectivityFilter.ColorRegionsOff()
# connectivityFilter.SetExtractionModeToLargestRegion()
# tris = vtk.vtkTriangleFilter()
# tris.SetInput(mc.GetOutput())
# tris.GetOutput().ReleaseDataFlagOn()
# tris.Update()
# strip = vtk.vtkStripper()
# strip.SetInput(tris.GetOutput())
# strip.GetOutput().ReleaseDataFlagOn()
# strip.Update()
mapper = vtk.vtkDataSetMapper()
mapper.SetInput(mc.GetOutput() )
mapper.ImmediateModeRenderingOn()
# mapper.SetInput( connectivityFilter.GetOutput() )
actor = vtk.vtkActor()
actor.SetMapper( mapper)
actor.GetProperty().SetColor(c[1],c[2],c[3])
actor.GetProperty().SetOpacity(c[4])
actor.GetProperty().SetRepresentationToSurface()
actors.append(actor)
return actors
def contours(img,spacing=[1.0,1.0,1.0],contours=[]):
importer = numpy2VTK(img,spacing)
if len(contours) == 0:
contours = [[img.max(),1.0,1.0,1.0,1.0]]
actors = []
for c in contours:
contourExtractor = vtk.vtkContourFilter()
contourExtractor.SetInputConnection(importer.GetOutputPort())
contourExtractor.SetValue(0, c[0])
# contourNormals = vtk.vtkPolyDataNormals()
# contourNormals.SetInputConnection(contourExtractor.GetOutputPort())
# contourNormals.SetFeatureAngle(60.0)
# contourStripper = vtk.vtkStripper()
# contourStripper.SetInputConnection(contourNormals.GetOutputPort())
deci = vtk.vtkDecimatePro()
deci.SetInputConnection(contourExtractor.GetOutputPort())
deci.SetTargetReduction(0.99)
deci.PreserveTopologyOn ()
# smoother = vtk.vtkSmoothPolyDataFilter()
# smoother.SetInputConnection(deci.GetOutputPort())
# smoother.SetNumberOfIterations(50)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(deci.GetOutputPort())
normals.FlipNormalsOn()
# smoothFilter = vtk.vtkSmoothPolyDataFilter()
# smoothFilter.SetInputConnection(contourStripper.GetOutputPort())
# smoothFilter.SetNumberOfIterations(50)
# smoothFilter.Update()
contourMapper = vtk.vtkPolyDataMapper()
# contourMapper.SetInputConnection(contourStripper.GetOutputPort())
contourMapper.SetInputConnection(normals.GetOutputPort())
contourMapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper( contourMapper)
actor.GetProperty().SetColor(c[1],c[2],c[3])
actor.GetProperty().SetOpacity(c[4])
actor.GetProperty().SetRepresentationToSurface()
# # An outline provides context around the data.
# outlineData = vtk.vtkOutlineFilter()
# outlineData.SetInputConnection(v16.GetOutputPort())
# mapOutline = vtk.vtkPolyDataMapper()
# mapOutline.SetInputConnection(outlineData.GetOutputPort())
# outline = vtk.vtkActor()
# outline.SetMapper(mapOutline)
# outline.GetProperty().SetColor(0, 0, 0)
actors.append(actor)
return actors
def axes(width=10):
pts = vtk.vtkPoints()
pts.InsertNextPoint([0.0, 0.0, 0.0])
pts.InsertNextPoint([width*10, 0.0, 0.0])
pts.InsertNextPoint([0.0, width*10, 0.0])
pts.InsertNextPoint([0.0, 0.0, width*10])
# http://vtk.org/gitweb?p=VTK.git;a=blob;f=Examples/Annotation/Tcl/textOrigin.tcl
# Xactor = vtk.vtkTextActor()
# Xactor.GetTextProperty().SetFontSize( 24 )
# Xactor.SetPosition( [width*10, 0.0, 0.0] )
# Xactor.SetInput( "X" )
# Xactor.GetTextProperty().SetColor( 1.0,0.0,0.0 )
# Setup the colors array
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
# Add the colors we created to the colors array
colors.InsertNextTuple3(255, 0, 0)
colors.InsertNextTuple3(0, 255, 0)
colors.InsertNextTuple3(0, 0, 255)
# Create the first line (between Origin and P0)
line0 = vtk.vtkLine()
line0.GetPointIds().SetId(0,0) #the second 0 is the index of the Origin in the vtkPoints
line0.GetPointIds().SetId(1,1) #the second 1 is the index of P0 in the vtkPoints
# Create the second line (between Origin and P1)
line1 = vtk.vtkLine()
line1.GetPointIds().SetId(0,0) #the second 0 is the index of the Origin in the vtkPoints
line1.GetPointIds().SetId(1,2) #2 is the index of P1 in the vtkPoints
# Create the third line (between Origin and P2)
line2 = vtk.vtkLine()
line2.GetPointIds().SetId(0,0) #the second 0 is the index of the Origin in the vtkPoints
line2.GetPointIds().SetId(1,3) #3 is the index of P2 in the vtkPoints
# Create a cell array to store the lines in and add the lines to it
lines = vtk.vtkCellArray()
lines.InsertNextCell(line0)
lines.InsertNextCell(line1)
lines.InsertNextCell(line2)
# Create a polydata to store everything in
linesPolyData = vtk.vtkPolyData()
# Add the points to the dataset
linesPolyData.SetPoints(pts)
# Add the lines to the dataset
linesPolyData.SetLines(lines)
# Color the lines - associate the first component (red) of the
# colors array with the first component of the cell array (line 0)
# and the second component (green) of the colors array with the
# second component of the cell array (line 1)
linesPolyData.GetCellData().SetScalars(colors)
# Visualize
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(linesPolyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
return actor#, Xactor
def move(actor, matrix):
transfo_mat = vtk.vtkMatrix4x4()
# for i in range(0,4):
# for j in range(0,4):
# transfo_mat.SetElement(i,j, matrix[i,j])
for k in range(0,4):
transfo_mat.SetElement(k,3, matrix[k,3])
for k in range(0,4):
transfo_mat.SetElement(k,0, matrix[k,2])
for k in range(0,4):
transfo_mat.SetElement(k,1, matrix[k,1])
for k in range(0,4):
transfo_mat.SetElement(k,2, matrix[k,0])
print matrix
print transfo_mat
actor.SetUserMatrix(transfo_mat)
def outline(source):
outline_filter = vtk.vtkOutlineFilter()
outline_filter.SetInput(source.GetOutput())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(outline_filter.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
|
sk1712/IRTK
|
wrapping/cython/irtk/ext/vtkTools.py
|
Python
|
bsd-3-clause
| 17,980
|
[
"VTK",
"VisIt"
] |
fee1641d6152e85d5255cf96804bd7292186f941d1828132cc3c776d4b9a7ffb
|
from scalars import *
def trig_test():
print(cos(Decimal('0.5')))
print(sin(Decimal('0.5')))
import math
print(math.cos(0.5))
print(math.sin(0.5))
def cos_test():
graph(lambda x: cos(pi*2*x))
#cos_test()
def v2f_test():
print(graph(v2f([1,1,1,1,1])))#should be dirac delta at 0
def integrate_test():
def f(x): return x*one
print(integrate(f))
print(integrate(f, 1000))
#integrate_test()
def DFT_test():
def c(x):
if x>one/7 and x<one*2/5: return 100
return 1
def d(x):
n=10**8
if x<one/20: return 25*n
if x<one/10: return 4*n
return 1*n
def e(x): return one*3/4-x
def f(x):
if dec_greater_than(x,one/2-one/5) and not dec_greater_than(x, one/2+one/5): return 50
#if x>one/2-one/20 and x<one/2+one/20: return 10
return 3
def g(x):
if x>0.5: return 100
return 0
def h(f):
v=DCT(f, 10)
a=v2f(v)
print(v)
print('\n\n')
graph(a, 100)
print('\n\n')
#print(integrate(a))
n=1000
b=cost_to_buy_shares([0]*len(v), v, n)
print(b)
import math
print(math.e**(float((a(0)-b)/n)))
h(f)
DFT_test()
|
AugurProject/augur-python
|
scalars_tests.py
|
Python
|
gpl-3.0
| 1,234
|
[
"DIRAC"
] |
aef849126dec254fa96e2679d03aea01f1cdccc7d5d96d603751476de5dd2478
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests and parser data tests for cclib."""
from data import *
|
Schamnad/cclib
|
test/__init__.py
|
Python
|
bsd-3-clause
| 268
|
[
"cclib"
] |
49b1c25f0fcc325ff5aa02bfba8583c043e41b9e93108aaeb34afc041888dce5
|
"""Twisted shell support.
XXX - This module is missing proper docs.
"""
# Tell nose to skip this module
__test__ = {}
import sys
from twisted.internet import reactor, threads
from IPython.ipmaker import make_IPython
from IPython.iplib import InteractiveShell
from IPython.ipstruct import Struct
import Queue,thread,threading,signal
from signal import signal, SIGINT
from IPython.genutils import Term,warn,error,flag_calls, ask_yes_no
import shellglobals
def install_gtk2():
""" Install gtk2 reactor, needs to be called bef """
from twisted.internet import gtk2reactor
gtk2reactor.install()
def hijack_reactor():
"""Modifies Twisted's reactor with a dummy so user code does
not block IPython. This function returns the original
'twisted.internet.reactor' that has been hijacked.
NOTE: Make sure you call this *AFTER* you've installed
the reactor of your choice.
"""
from twisted import internet
orig_reactor = internet.reactor
class DummyReactor(object):
def run(self):
pass
def __getattr__(self, name):
return getattr(orig_reactor, name)
def __setattr__(self, name, value):
return setattr(orig_reactor, name, value)
internet.reactor = DummyReactor()
return orig_reactor
class TwistedInteractiveShell(InteractiveShell):
"""Simple multi-threaded shell."""
# Threading strategy taken from:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65109, by Brian
# McErlean and John Finlay. Modified with corrections by Antoon Pardon,
# from the pygtk mailing list, to avoid lockups with system calls.
# class attribute to indicate whether the class supports threads or not.
# Subclasses with thread support should override this as needed.
isthreaded = True
def __init__(self,name,usage=None,rc=Struct(opts=None,args=None),
user_ns=None,user_global_ns=None,banner2='',**kw):
"""Similar to the normal InteractiveShell, but with threading control"""
InteractiveShell.__init__(self,name,usage,rc,user_ns,
user_global_ns,banner2)
# A queue to hold the code to be executed.
self.code_queue = Queue.Queue()
# Stuff to do at closing time
self._kill = None
on_kill = kw.get('on_kill', [])
# Check that all things to kill are callable:
for t in on_kill:
if not callable(t):
raise TypeError,'on_kill must be a list of callables'
self.on_kill = on_kill
# thread identity of the "worker thread" (that may execute code directly)
self.worker_ident = None
self.reactor_started = False
self.first_run = True
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Modified version of code.py's runsource(), to handle threading issues.
See the original for full docstring details."""
# If Ctrl-C was typed, we reset the flag and return right away
if shellglobals.KBINT:
shellglobals.KBINT = False
return False
if self._kill:
# can't queue new code if we are being killed
return True
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# shortcut - if we are in worker thread, or the worker thread is not running,
# execute directly (to allow recursion and prevent deadlock if code is run early
# in IPython construction)
if (not self.reactor_started or (self.worker_ident is None and not self.first_run)
or self.worker_ident == thread.get_ident() or shellglobals.run_in_frontend(source)):
InteractiveShell.runcode(self,code)
return
# Case 3
# Store code in queue, so the execution thread can handle it.
self.first_run = False
completed_ev, received_ev = threading.Event(), threading.Event()
self.code_queue.put((code,completed_ev, received_ev))
reactor.callLater(0.0,self.runcode)
received_ev.wait(5)
if not received_ev.isSet():
# the mainloop is dead, start executing code directly
print "Warning: Timeout for mainloop thread exceeded"
print "switching to nonthreaded mode (until mainloop wakes up again)"
self.worker_ident = None
else:
completed_ev.wait()
return False
def runcode(self):
"""Execute a code object.
Multithreaded wrapper around IPython's runcode()."""
# we are in worker thread, stash out the id for runsource()
self.worker_ident = thread.get_ident()
if self._kill:
print >>Term.cout, 'Closing threads...',
Term.cout.flush()
for tokill in self.on_kill:
tokill()
print >>Term.cout, 'Done.'
# allow kill() to return
self._kill.set()
return True
# Install SIGINT handler. We do it every time to ensure that if user
# code modifies it, we restore our own handling.
try:
pass
signal(SIGINT,shellglobals.sigint_handler)
except SystemError:
# This happens under Windows, which seems to have all sorts
# of problems with signal handling. Oh well...
pass
# Flush queue of pending code by calling the run methood of the parent
# class with all items which may be in the queue.
code_to_run = None
while 1:
try:
code_to_run, completed_ev, received_ev = self.code_queue.get_nowait()
except Queue.Empty:
break
received_ev.set()
# Exceptions need to be raised differently depending on which
# thread is active. This convoluted try/except is only there to
# protect against asynchronous exceptions, to ensure that a shellglobals.KBINT
# at the wrong time doesn't deadlock everything. The global
# CODE_TO_RUN is set to true/false as close as possible to the
# runcode() call, so that the KBINT handler is correctly informed.
try:
try:
shellglobals.CODE_RUN = True
InteractiveShell.runcode(self,code_to_run)
except KeyboardInterrupt:
print "Keyboard interrupted in mainloop"
while not self.code_queue.empty():
code = self.code_queue.get_nowait()
break
finally:
shellglobals.CODE_RUN = False
# allow runsource() return from wait
completed_ev.set()
# This MUST return true for gtk threading to work
return True
def kill(self):
"""Kill the thread, returning when it has been shut down."""
self._kill = threading.Event()
reactor.callLater(0.0,self.runcode)
self._kill.wait()
class IPShellTwisted:
"""Run a Twisted reactor while in an IPython session.
Python commands can be passed to the thread where they will be
executed. This is implemented by periodically checking for
passed code using a Twisted reactor callback.
"""
TIMEOUT = 0.01 # Millisecond interval between reactor runs.
def __init__(self, argv=None, user_ns=None, debug=1,
shell_class=TwistedInteractiveShell):
from twisted.internet import reactor
self.reactor = hijack_reactor()
mainquit = self.reactor.stop
# Make sure IPython keeps going after reactor stop.
def reactorstop():
pass
self.reactor.stop = reactorstop
reactorrun_orig = self.reactor.run
self.quitting = False
def reactorrun():
while True and not self.quitting:
reactorrun_orig()
self.reactor.run = reactorrun
self.IP = make_IPython(argv, user_ns=user_ns, debug=debug,
shell_class=shell_class,
on_kill=[mainquit])
# threading.Thread.__init__(self)
def run(self):
self.IP.mainloop()
self.quitting = True
self.IP.kill()
def mainloop(self):
def mainLoopThreadDeath(r):
print "mainLoopThreadDeath: ", str(r)
def spawnMainloopThread():
d=threads.deferToThread(self.run)
d.addBoth(mainLoopThreadDeath)
reactor.callWhenRunning(spawnMainloopThread)
self.IP.reactor_started = True
self.reactor.run()
print "mainloop ending...."
exists = True
if __name__ == '__main__':
# Sample usage.
# Create the shell object. This steals twisted.internet.reactor
# for its own purposes, to make sure you've already installed a
# reactor of your choice.
shell = IPShellTwisted(
argv=[],
user_ns={'__name__': '__example__',
'hello': 'world',
},
)
# Run the mainloop. This runs the actual reactor.run() method.
# The twisted.internet.reactor object at this point is a dummy
# object that passes through to the actual reactor, but prevents
# run() from being called on it again.
shell.mainloop()
# You must exit IPython to terminate your program.
print 'Goodbye!'
|
mastizada/kuma
|
vendor/packages/ipython/IPython/twshell.py
|
Python
|
mpl-2.0
| 9,871
|
[
"Brian"
] |
9242568195316f67aa5e27c18a33b5a1f8314825a52ec9955134610d33c8605d
|
# choco/_ast_util.py
# Copyright (C) 2006-2016 the Choco authors and contributors <see AUTHORS file>
#
# This module is part of Choco and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import * # noqa
from choco.compat import arg_stringname
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + arg_stringname(node.vararg))
if node.kwarg is not None:
write_comma()
self.write('**' + arg_stringname(node.kwarg))
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if getattr(node, "starargs", None):
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if getattr(node, "kwargs", None):
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if getattr(node, "starargs", None):
write_comma()
self.write('*')
self.visit(node.starargs)
if getattr(node, "kwargs", None):
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_arg(self, node):
self.write(node.arg)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
|
whiteclover/Choco
|
choco/_ast_util.py
|
Python
|
mit
| 25,694
|
[
"VisIt"
] |
c5a707a2d32a9df3a1bd56ec1f5c047ef7ccd2f47517896f5b47d6e01a1992d6
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPhonopy(PythonPackage):
"""Phonopy is an open source package for phonon
calculations at harmonic and quasi-harmonic levels."""
homepage = "http://atztogo.github.io/phonopy/index.html"
url = "http://sourceforge.net/projects/phonopy/files/phonopy/phonopy-1.10/phonopy-1.10.0.tar.gz"
version('1.10.0', sha256='6b7c540bbbb033203c45b8472696db02a3a55913a0e5eb23de4dc9a3bee473f7')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-phonopy/package.py
|
Python
|
lgpl-2.1
| 835
|
[
"phonopy"
] |
3c8449d57bff308e77005c3bd23fa13e815d2a201e8b76ea612783522fc9942f
|
from __future__ import print_function
import sys
import numpy as np
msg = "\n'TypeError: array cannot be safely cast to required type'\n"
msg += "means you are probably using a broken ScientficPython, \n"
msg += "see: https://bugs.launchpad.net/ubuntu/+source/python-scientific/+bug/1041302\n"
import Scientific.IO.NetCDF as netcdf
import Scientific
version = Scientific.__version__.split(".")
print('Found ScientificPython version: ',Scientific.__version__)
if list(map(int,version)) < [2,8]:
print('ScientificPython 2.8 or greater required for numpy support in NetCDF')
raise RuntimeError('ScientificPython version 2.8 or greater is requied')
handle = netcdf.NetCDFFile("test.nc", "w")
try:
handle.test = np.array([1.0])
except TypeError:
print(msg, file=sys.stderr)
raise
handle.close()
|
suttond/MODOI
|
ase/test/scientificpython_bug.py
|
Python
|
lgpl-3.0
| 814
|
[
"NetCDF"
] |
ebaf60ee75a175ce27277499797ebf0bedac779583e183b6b71250634bcef647
|
"""
This Code replaces usage of another pipeline that was also detected SNPs between the referrnce COI to sequncing data.
For backward-compatibility it outputs few NA columns
"""
from Bio.SeqIO import parse
from optparse import OptionParser
def Main():
parser = OptionParser()
parser.add_option("--db_file",
default="db.fa",
help="COI DB to calculate coverage")
parser.add_option("--blast",
default="coi.reads.blastn.COI.final",
help="suffix of blast res file")
parser.add_option("--hits",
default="pipe.blast.parsed.final",
help="Suffix of the mapping file between the reads to the COI from previous steps")
parser.add_option("--samples_file",
default="dirs",
help="A file with the names of all the direcetories")
parser.add_option("-o","--out",
help="Output file")
options, _ = parser.parse_args()
with open(options.samples_file) as f:
samples = f.readlines()
with open(options.db_file) as f:
cois = list(parse(f, "fasta"))
out_file = open(options.out, 'w')
out_file.write("Sample\tTaxonomy\tCOI\tCoveredBPs\tCOI_Length\tcoverage\tNA\tNA\n")
for sample in samples:
sample = sample.rstrip("\n")
coiToCoverdBases, coiTax = SampleCoverage("{}/{}.{}".format(sample,sample, options.hits), "{}/{}.{}".format(sample,sample, options.blast), cois)
for coi, coverage in coiToCoverdBases.iteritems():
covered_bps = len([bp for bp in coverage if bp])
if covered_bps:
out_file.write("{}\t{}\t{}\t{}\t{}\t{}\tNA\tNA\n".format(sample,coiTax[coi], coi, covered_bps, len(coverage), round(float(covered_bps)/len(coverage)*100,2)))
def SampleCoverage(hits_file, blast_file, cois):
coiToCoverdBases = {}
coiToTaxonomy = {}
for coi in cois:
coiToCoverdBases[coi.id] = [False] * len(coi.seq)
with open(hits_file) as f:
hits = f.readlines()
readsToCOI = {}
# Parse the hits file, the first raw is header
for hit in hits[1:]:
read, COI, _, order, family, genus, species = hit.split("\t")
# hits start with '*' are reads that mapped to difference COI of the same species so it OK to count it
read = read.replace("*", "", 1)
readsToCOI[read] = COI
taxon = "{}_{}_{}_{}".format(order, family, genus, species)
coiToTaxonomy[COI] = taxon.rstrip("\n")
with open(blast_file) as f:
blast_lines = f.readlines()
for blast_line in blast_lines:
query, subj, pid, aln_len, mismathces, gaps, qs, qe, ss, se, _, _ = blast_line.split("\t")
ss = int(ss)
se = int(se)
matching_COI = readsToCOI.get(query)
if not matching_COI or matching_COI != subj:
continue
covered_indices = xrange(ss - 1, se) if ss < se else xrange(se - 1, ss, 1)
for i in covered_indices:
coiToCoverdBases[matching_COI][i] = True
return coiToCoverdBases, coiToTaxonomy
if __name__ == "__main__":
Main()
|
omerzu/larvae_project
|
scripts/simple_coverage_calculator.py
|
Python
|
mit
| 3,160
|
[
"BLAST"
] |
9a67361053a9f2ab80d4782fa39b83ecf2e4862766268339f42127829f7c3dda
|
# gp_signals.py
"""Contains class factories for Gaussian Process (GP) signals.
GP signals are defined as the class of signals that have a basis
function matrix and basis prior vector..
"""
import functools
import itertools
import logging
import numpy as np
import scipy.sparse as sps
from sksparse.cholmod import cholesky
from enterprise.signals import parameter, selections, signal_base, utils
from enterprise.signals.parameter import function
from enterprise.signals.selections import Selection
from enterprise.signals.utils import KernelMatrix
# logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
def BasisGP(
priorFunction,
basisFunction,
coefficients=False,
combine=True,
selection=Selection(selections.no_selection),
name="",
):
"""Class factory for generic GPs with a basis matrix."""
class BasisGP(signal_base.Signal):
signal_type = "basis"
signal_name = name
signal_id = name
basis_combine = combine
def __init__(self, psr):
super(BasisGP, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
self._do_selection(psr, priorFunction, basisFunction, coefficients, selection)
def _do_selection(self, psr, priorfn, basisfn, coefficients, selection):
sel = selection(psr)
self._keys = sorted(sel.masks.keys())
self._masks = [sel.masks[key] for key in self._keys]
self._prior, self._bases = {}, {}
self._params, self._coefficients = {}, {}
for key, mask in zip(self._keys, self._masks):
pnames = [psr.name, name, key]
pname = "_".join([n for n in pnames if n])
self._prior[key] = priorfn(pname, psr=psr)
self._bases[key] = basisfn(pname, psr=psr)
for par in itertools.chain(self._prior[key]._params.values(), self._bases[key]._params.values()):
self._params[par.name] = par
if coefficients:
# we can only create GPCoefficients parameters if the basis
# can be constructed with default arguments
# (and does not change size)
self._construct_basis()
for key in self._keys:
pname = "_".join([n for n in [psr.name, name, key] if n])
chain = itertools.chain(self._prior[key]._params.values(), self._bases[key]._params.values())
priorargs = {par.name: self._params[par.name] for par in chain}
logprior = parameter.Function(functools.partial(self._get_coefficient_logprior, key), **priorargs)
size = self._slices[key].stop - self._slices[key].start
cpar = parameter.GPCoefficients(logprior=logprior, size=size)(pname + "_coefficients")
self._coefficients[key] = cpar
self._params[cpar.name] = cpar
@property
def basis_params(self):
"""Get any varying basis parameters."""
ret = []
for basis in self._bases.values():
ret.extend([pp.name for pp in basis.params])
return ret
# since this function has side-effects, it can only be cached
# with limit=1, so it will run again if called with params different
# than the last time
@signal_base.cache_call("basis_params", limit=1)
def _construct_basis(self, params={}):
basis, self._labels = {}, {}
for key, mask in zip(self._keys, self._masks):
basis[key], self._labels[key] = self._bases[key](params=params, mask=mask)
nc = sum(F.shape[1] for F in basis.values())
self._basis = np.zeros((len(self._masks[0]), nc))
# TODO: should this be defined here? it will cache phi
self._phi = KernelMatrix(nc)
self._slices = {}
nctot = 0
for key, mask in zip(self._keys, self._masks):
Fmat = basis[key]
nn = Fmat.shape[1]
self._basis[mask, nctot : nn + nctot] = Fmat
self._slices.update({key: slice(nctot, nn + nctot)})
nctot += nn
# this class does different things (and gets different method
# definitions) if the user wants it to model GP coefficients
# (e.g., for a hierarchical likelihood) or if they do not
if coefficients:
def _get_coefficient_logprior(self, key, c, **params):
self._construct_basis(params)
phi = self._prior[key](self._labels[key], params=params)
if phi.ndim == 1:
return -0.5 * np.sum(c * c / phi) - 0.5 * np.sum(np.log(phi)) - 0.5 * len(phi) * np.log(2 * np.pi)
# note: (2*pi)^(n/2) is not in signal_base likelihood
else:
# TO DO: this code could be embedded in KernelMatrix
phiinv, logdet = KernelMatrix(phi).inv(logdet=True)
return -0.5 * np.dot(c, np.dot(phiinv, c)) - 0.5 * logdet - 0.5 * phi.shape[0] * np.log(2 * np.pi)
# MV: could assign this to a data member at initialization
@property
def delay_params(self):
return [pp.name for pp in self.params if "_coefficients" in pp.name]
@signal_base.cache_call(["basis_params", "delay_params"])
def get_delay(self, params={}):
self._construct_basis(params)
c = np.zeros(self._basis.shape[1])
for key, slc in self._slices.items():
p = self._coefficients[key]
c[slc] = params[p.name] if p.name in params else p.value
return np.dot(self._basis, c)
def get_basis(self, params={}):
return None
def get_phi(self, params):
return None
def get_phiinv(self, params):
return None
else:
@property
def delay_params(self):
return []
def get_delay(self, params={}):
return 0
def get_basis(self, params={}):
self._construct_basis(params)
return self._basis
def get_phi(self, params):
self._construct_basis(params)
for key, slc in self._slices.items():
phislc = self._prior[key](self._labels[key], params=params)
self._phi = self._phi.set(phislc, slc)
return self._phi
def get_phiinv(self, params):
return self.get_phi(params).inv()
return BasisGP
def FourierBasisGP(
spectrum,
coefficients=False,
combine=True,
components=20,
selection=Selection(selections.no_selection),
Tspan=None,
modes=None,
name="red_noise",
pshift=False,
pseed=None,
):
"""Convenience function to return a BasisGP class with a
fourier basis."""
basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed)
BaseClass = BasisGP(spectrum, basis, coefficients=coefficients, combine=combine, selection=selection, name=name)
class FourierBasisGP(BaseClass):
signal_type = "basis"
signal_name = "red noise"
signal_id = name
return FourierBasisGP
def get_timing_model_basis(use_svd=False, normed=True):
if use_svd:
if normed is not True:
raise ValueError("use_svd == True requires normed == True")
return utils.svd_tm_basis()
elif normed is True:
return utils.normed_tm_basis()
elif normed is not False:
return utils.normed_tm_basis(norm=normed)
else:
return utils.unnormed_tm_basis()
def TimingModel(coefficients=False, name="linear_timing_model", use_svd=False, normed=True):
"""Class factory for marginalized linear timing model signals."""
basis = get_timing_model_basis(use_svd, normed)
prior = utils.tm_prior()
BaseClass = BasisGP(prior, basis, coefficients=coefficients, name=name)
class TimingModel(BaseClass):
signal_type = "basis"
signal_name = "linear timing model"
signal_id = name + "_svd" if use_svd else name
if coefficients:
def _get_coefficient_logprior(self, key, c, **params):
# MV: probably better to avoid this altogether
# than to use 1e40 as in get_phi
return 0
return TimingModel
@function
def ecorr_basis_prior(weights, log10_ecorr=-8):
"""Returns the ecorr prior.
:param weights: A vector or weights for the ecorr prior.
"""
return weights * 10 ** (2 * log10_ecorr)
def EcorrBasisModel(
log10_ecorr=parameter.Uniform(-10, -5),
coefficients=False,
selection=Selection(selections.no_selection),
name="basis_ecorr",
):
"""Convenience function to return a BasisGP class with a
quantized ECORR basis."""
basis = utils.create_quantization_matrix()
prior = ecorr_basis_prior(log10_ecorr=log10_ecorr)
BaseClass = BasisGP(prior, basis, coefficients=coefficients, selection=selection, name=name)
class EcorrBasisModel(BaseClass):
signal_type = "basis"
signal_name = "basis ecorr"
signal_id = name
return EcorrBasisModel
def BasisCommonGP(priorFunction, basisFunction, orfFunction, coefficients=False, combine=True, name=""):
class BasisCommonGP(signal_base.CommonSignal):
signal_type = "common basis"
signal_name = "common"
signal_id = name
basis_combine = combine
_orf = orfFunction(name)
_prior = priorFunction(name)
def __init__(self, psr):
super(BasisCommonGP, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
pname = "_".join([psr.name, name])
self._bases = basisFunction(pname, psr=psr)
self._params, self._coefficients = {}, {}
for par in itertools.chain(
self._prior._params.values(), self._orf._params.values(), self._bases._params.values()
):
self._params[par.name] = par
self._psrpos = psr.pos
if coefficients:
self._construct_basis()
# if we're given an instantiated coefficient vector
# that's what we will use
if isinstance(coefficients, parameter.Parameter):
self._coefficients[""] = coefficients
self._params[coefficients.name] = coefficients
return
chain = itertools.chain(
self._prior._params.values(), self._orf._params.values(), self._bases._params.values()
)
priorargs = {par.name: self._params[par.name] for par in chain}
logprior = parameter.Function(self._get_coefficient_logprior, **priorargs)
size = self._basis.shape[1]
cpar = parameter.GPCoefficients(logprior=logprior, size=size)(pname + "_coefficients")
self._coefficients[""] = cpar
self._params[cpar.name] = cpar
@property
def basis_params(self):
"""Get any varying basis parameters."""
return [pp.name for pp in self._bases.params]
# since this function has side-effects, it can only be cached
# with limit=1, so it will run again if called with params different
# than the last time
@signal_base.cache_call("basis_params", limit=1)
def _construct_basis(self, params={}):
self._basis, self._labels = self._bases(params=params)
if coefficients:
def _get_coefficient_logprior(self, c, **params):
# MV: for correlated GPs, the prior needs to use
# the coefficients for all GPs together;
# this may require parameter groups
raise NotImplementedError("Need to implement common prior " + "for BasisCommonGP coefficients")
@property
def delay_params(self):
return [pp.name for pp in self.params if "_coefficients" in pp.name]
@signal_base.cache_call(["basis_params", "delay_params"])
def get_delay(self, params={}):
self._construct_basis(params)
p = self._coefficients[""]
c = params[p.name] if p.name in params else p.value
return np.dot(self._basis, c)
def get_basis(self, params={}):
return None
def get_phi(self, params):
return None
def get_phicross(cls, signal1, signal2, params):
return None
def get_phiinv(self, params):
return None
else:
@property
def delay_params(self):
return []
def get_delay(self, params={}):
return 0
def get_basis(self, params={}):
self._construct_basis(params)
return self._basis
def get_phi(self, params):
self._construct_basis(params)
prior = BasisCommonGP._prior(self._labels, params=params)
orf = BasisCommonGP._orf(self._psrpos, self._psrpos, params=params)
return prior * orf
@classmethod
def get_phicross(cls, signal1, signal2, params):
prior = BasisCommonGP._prior(signal1._labels, params=params)
orf = BasisCommonGP._orf(signal1._psrpos, signal2._psrpos, params=params)
return prior * orf
return BasisCommonGP
def FourierBasisCommonGP(
spectrum,
orf,
coefficients=False,
combine=True,
components=20,
Tspan=None,
modes=None,
name="common_fourier",
pshift=False,
pseed=None,
):
if coefficients and Tspan is None:
raise ValueError(
"With coefficients=True, FourierBasisCommonGP " + "requires that you specify Tspan explicitly."
)
basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed)
BaseClass = BasisCommonGP(spectrum, basis, orf, coefficients=coefficients, combine=combine, name=name)
class FourierBasisCommonGP(BaseClass):
signal_type = "common basis"
signal_name = "common red noise"
signal_id = name
_Tmin, _Tmax = [], []
def __init__(self, psr):
super(FourierBasisCommonGP, self).__init__(psr)
if Tspan is None:
FourierBasisCommonGP._Tmin.append(psr.toas.min())
FourierBasisCommonGP._Tmax.append(psr.toas.max())
# since this function has side-effects, it can only be cached
# with limit=1, so it will run again if called with params different
# than the last time
@signal_base.cache_call("basis_params", 1)
def _construct_basis(self, params={}):
span = Tspan if Tspan is not None else max(FourierBasisCommonGP._Tmax) - min(FourierBasisCommonGP._Tmin)
self._basis, self._labels = self._bases(params=params, Tspan=span)
return FourierBasisCommonGP
# for simplicity, we currently do not handle Tspan automatically
def FourierBasisCommonGP_ephem(spectrum, components, Tspan, name="ephem_gp"):
basis = utils.createfourierdesignmatrix_ephem(nmodes=components, Tspan=Tspan)
orf = utils.monopole_orf()
return BasisCommonGP(spectrum, basis, orf, name=name)
def FourierBasisCommonGP_physicalephem(
frame_drift_rate=1e-9,
d_jupiter_mass=1.54976690e-11,
d_saturn_mass=8.17306184e-12,
d_uranus_mass=5.71923361e-11,
d_neptune_mass=7.96103855e-11,
jup_orb_elements=0.05,
sat_orb_elements=0.5,
model="setIII",
coefficients=False,
name="phys_ephem_gp",
):
"""
Class factory for physical ephemeris corrections as a common GP.
Individual perturbations can be excluded by setting the corresponding
prior sigma to None.
:param frame_drift_rate: Gaussian sigma for frame drift rate
:param d_jupiter_mass: Gaussian sigma for Jupiter mass perturbation
:param d_saturn_mass: Gaussian sigma for Saturn mass perturbation
:param d_uranus_mass: Gaussian sigma for Uranus mass perturbation
:param d_neptune_mass: Gaussian sigma for Neptune mass perturbation
:param jup_orb_elements: Gaussian sigma for Jupiter orbital elem. perturb.
:param sat_orb_elements: Gaussian sigma for Saturn orbital elem. perturb.
:param model: vector basis used by Jupiter and Saturn perturb.;
see PhysicalEphemerisSignal, defaults to "setIII"
:param coefficients: if True, treat GP coefficients as enterprise
parameters; if False, marginalize over them
:return: BasisCommonGP representing ephemeris perturbations
"""
basis = utils.createfourierdesignmatrix_physicalephem(
frame_drift_rate=frame_drift_rate,
d_jupiter_mass=d_jupiter_mass,
d_saturn_mass=d_saturn_mass,
d_uranus_mass=d_uranus_mass,
d_neptune_mass=d_neptune_mass,
jup_orb_elements=jup_orb_elements,
sat_orb_elements=sat_orb_elements,
model=model,
)
spectrum = utils.physicalephem_spectrum()
orf = utils.monopole_orf()
return BasisCommonGP(spectrum, basis, orf, coefficients=coefficients, name=name)
def WidebandTimingModel(
dmefac=parameter.Uniform(pmin=0.1, pmax=10.0),
log10_dmequad=parameter.Uniform(pmin=-7.0, pmax=0.0),
dmjump=parameter.Uniform(pmin=-0.01, pmax=0.01),
dmefac_selection=Selection(selections.no_selection),
log10_dmequad_selection=Selection(selections.no_selection),
dmjump_selection=Selection(selections.no_selection),
dmjump_ref=None,
name="wideband_timing_model",
):
"""Class factory for marginalized linear timing model signals
that take wideband TOAs and DMs. Currently assumes DMX for DM model."""
basis = utils.unnormed_tm_basis() # will need to normalize phi otherwise
prior = utils.tm_prior() # standard
BaseClass = BasisGP(prior, basis, coefficients=False, name=name)
class WidebandTimingModel(BaseClass):
signal_type = "basis"
signal_name = "wideband timing model"
signal_id = name
basis_combine = False # should never need to be True
def __init__(self, psr):
super(WidebandTimingModel, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
# make selection for DMEFACs
dmefac_select = dmefac_selection(psr)
self._dmefac_keys = list(sorted(dmefac_select.masks.keys()))
self._dmefac_masks = [dmefac_select.masks[key] for key in self._dmefac_keys]
# make selection for DMEQUADs
log10_dmequad_select = log10_dmequad_selection(psr)
self._log10_dmequad_keys = list(sorted(log10_dmequad_select.masks.keys()))
self._log10_dmequad_masks = [log10_dmequad_select.masks[key] for key in self._log10_dmequad_keys]
# make selection for DMJUMPs
dmjump_select = dmjump_selection(psr)
self._dmjump_keys = list(sorted(dmjump_select.masks.keys()))
self._dmjump_masks = [dmjump_select.masks[key] for key in self._dmjump_keys]
if self._dmjump_keys == [""] and dmjump is not None:
raise ValueError("WidebandTimingModel: can only do DMJUMP with more than one selection.")
# collect parameters
self._params = {}
self._dmefacs = []
for key in self._dmefac_keys:
pname = "_".join([n for n in [psr.name, key, "dmefac"] if n])
param = dmefac(pname)
self._dmefacs.append(param)
self._params[param.name] = param
self._log10_dmequads = []
for key in self._log10_dmequad_keys:
pname = "_".join([n for n in [psr.name, key, "log10_dmequad"] if n])
param = log10_dmequad(pname)
self._log10_dmequads.append(param)
self._params[param.name] = param
self._dmjumps = []
if dmjump is not None:
for key in self._dmjump_keys:
pname = "_".join([n for n in [psr.name, key, "dmjump"] if n])
if dmjump_ref is not None:
if pname == psr.name + "_" + dmjump_ref + "_dmjump":
fixed_dmjump = parameter.Constant(val=0.0)
param = fixed_dmjump(pname)
else:
param = dmjump(pname)
else:
param = dmjump(pname)
self._dmjumps.append(param)
self._params[param.name] = param
# copy psr quantities
self._ntoas = len(psr.toas)
self._npars = len(psr.fitpars)
self._freqs = psr.freqs
# collect DMX information (will be used to make phi and delay)
self._dmpar = psr.dm
self._dm = np.array(psr.flags["pp_dm"], "d")
self._dmerr = np.array(psr.flags["pp_dme"], "d")
check = np.zeros_like(psr.toas, "i")
# assign TOAs to DMX bins
self._dmx, self._dmindex, self._dmwhich = [], [], []
for index, key in enumerate(sorted(psr.dmx)):
dmx = psr.dmx[key]
if not dmx["fit"]:
raise ValueError("WidebandTimingModel: all DMX parameters must be estimated.")
self._dmx.append(dmx["DMX"])
self._dmindex.append(psr.fitpars.index(key))
self._dmwhich.append((dmx["DMXR1"] <= psr.stoas / 86400) & (psr.stoas / 86400 < dmx["DMXR2"]))
check += self._dmwhich[-1]
if np.sum(check) != self._ntoas:
raise ValueError("WidebandTimingModel: cannot account for all TOAs in DMX intervals.")
if "DM" in psr.fitpars:
raise ValueError("WidebandTimingModel: DM must not be estimated.")
self._ndmx = len(self._dmx)
@property
def delay_params(self):
# cache parameters are all DMEFACS, DMEQUADS, and DMJUMPS
return (
[p.name for p in self._dmefacs]
+ [p.name for p in self._log10_dmequads]
+ [p.name for p in self._dmjumps]
)
@signal_base.cache_call(["delay_params"])
def get_phi(self, params):
"""Return wideband timing-model prior."""
# get DMEFAC- and DMEQUAD-adjusted DMX errors
dme = self.get_dme(params)
# initialize the timing-model "infinite" prior
phi = KernelMatrix(1e40 * np.ones(self._npars, "d"))
# fill the DMX slots with weighted errors
for index, which in zip(self._dmindex, self._dmwhich):
phi.set(1.0 / np.sum(1.0 / dme[which] ** 2), index)
return phi
def get_phiinv(self, params):
"""Return inverse prior (using KernelMatrix inv)."""
return self.get_phi(params).inv()
@signal_base.cache_call(["delay_params"])
def get_delay(self, params):
"""Return the weighted-mean DM correction that applies for each residual.
(Will be the same across each DM bin, before measurement-frequency weighting.)"""
dm_delay = np.zeros(self._ntoas, "d")
avg_dm = self.get_mean_dm(params)
for dmx, which in zip(self._dmx, self._dmwhich):
dm_delay[which] = avg_dm[which] - (self._dmpar + dmx)
return dm_delay / (2.41e-4 * self._freqs ** 2)
@signal_base.cache_call(["delay_params"])
def get_dm(self, params):
"""Return DMJUMP-adjusted DM measurements."""
return (
sum(
(params[jump.name] if jump.name in params else jump.value) * mask
for jump, mask in zip(self._dmjumps, self._dmjump_masks)
)
+ self._dm
)
@signal_base.cache_call(["delay_params"])
def get_dme(self, params):
"""Return EFAC- and EQUAD-weighted DM errors."""
return (
sum(
(params[efac.name] if efac.name in params else efac.value) * mask
for efac, mask in zip(self._dmefacs, self._dmefac_masks)
)
** 2
* self._dmerr ** 2
+ (
10
** sum(
(params[equad.name] if equad.name in params else equad.value) * mask
for equad, mask in zip(self._log10_dmequads, self._log10_dmequad_masks)
)
)
** 2
) ** 0.5
@signal_base.cache_call(["delay_params"])
def get_mean_dm(self, params):
"""Get weighted DMX estimates (distributed to TOAs)."""
mean_dm = np.zeros(self._ntoas, "d")
# DMEFAC- and DMJUMP-adjusted
dm, dme = self.get_dm(params), self.get_dme(params)
for which in self._dmwhich:
mean_dm[which] = np.sum(dm[which] / dme[which] ** 2) / np.sum(1.0 / dme[which] ** 2)
return mean_dm
@signal_base.cache_call(["delay_params"])
def get_mean_dme(self, params):
"""Get weighted DMX uncertainties (distributed to TOAs).
Note that get_phi computes these variances directly."""
mean_dme = np.zeros(self._ntoas, "d")
# DMEFAC- and DMJUMP-adjusted
dme = self.get_dme(params)
for which in self._dmwhich:
mean_dme[which] = np.sqrt(1.0 / np.sum(1.0 / dme[which] ** 2))
return mean_dme
@signal_base.cache_call(["delay_params"])
def get_logsignalprior(self, params):
"""Get an additional likelihood/prior term to cover terms that would not
affect optimization, were they not dependent on DMEFAC, DMEQUAD, and DMJUMP."""
dm, dme = self.get_dm(params), self.get_dme(params)
mean_dm, mean_dme = self.get_mean_dm(params), self.get_mean_dme(params)
# now this is a bit wasteful, because it makes copies of the mean DMX and DMXERR
# and only uses the first value, but it shouldn't cost us too much
expterm = -0.5 * np.sum(dm ** 2 / dme ** 2)
expterm += 0.5 * sum(mean_dm[which][0] ** 2 / mean_dme[which][0] ** 2 for which in self._dmwhich)
# sum_i [-0.5 * log(dmerr**2)] = -sum_i log dmerr; same for mean_dmerr
logterm = -np.sum(np.log(dme)) + sum(np.log(mean_dme[which][0]) for which in self._dmwhich)
return expterm + logterm
# these are for debugging, but should not enter the likelihood computation
def get_delta_dm(self, params, use_mean_dm=False): # DM - DMX
delta_dm = np.zeros(self._ntoas, "d")
if use_mean_dm:
dm = self.get_mean_dm(params)
else:
dm = self.get_dm(params) # DMJUMP-adjusted
for dmx, which in zip(self._dmx, self._dmwhich):
delta_dm[which] = dm[which] - (self._dmpar + dmx)
return delta_dm
def get_dm_chi2(self, params, use_mean_dm=False): # 'DM' chi-sqaured
delta_dm = self.get_delta_dm(params, use_mean_dm=use_mean_dm)
if use_mean_dm:
dme = self.get_mean_dme(params)
chi2 = 0.0
for idmx, which in enumerate(self._dmwhich):
chi2 += (delta_dm[which][0] / dme[which][0]) ** 2
else:
dme = self.get_dme(params) # DMEFAC- and DMEQUAD-adjusted
chi2 = np.sum((delta_dm / dme) ** 2)
return chi2
return WidebandTimingModel
def MarginalizingTimingModel(name="marginalizing_linear_timing_model", use_svd=False, normed=True):
"""Class factory for marginalizing (fast-likelihood) linear timing model signals."""
basisFunction = get_timing_model_basis(use_svd, normed)
class TimingModel(signal_base.Signal):
signal_type = "white noise"
signal_name = "marginalizing linear timing model"
signal_id = name
def __init__(self, psr):
super(TimingModel, self).__init__(psr)
self.name = self.psrname + "_" + self.signal_id
pname = "_".join([psr.name, name])
self.Mmat = basisFunction(pname, psr=psr)
self._params = {}
@property
def ndiag_params(self):
return []
# there are none, but to be general...
@signal_base.cache_call("ndiag_params")
def get_ndiag(self, params):
return MarginalizingNmat(self.Mmat()[0])
return TimingModel
class MarginalizingNmat(object):
def __init__(self, Mmat, Nmat=0):
self.Mmat, self.Nmat = Mmat, Nmat
self.Mprior = Mmat.shape[1] * np.log(1e40)
def __add__(self, other):
if isinstance(other, MarginalizingNmat):
raise ValueError("Cannot combine multiple MarginalizingNmat objects.")
elif isinstance(other, np.ndarray) or hasattr(other, "solve"):
return MarginalizingNmat(self.Mmat, self.Nmat + other)
elif other == 0:
return self
else:
raise TypeError
def __radd__(self, other):
return self.__add__(other)
# in Python 3.8: @functools.cached_property
@property
@functools.lru_cache()
def cf(self):
MNM = sps.csc_matrix(self.Nmat.solve(self.Mmat, left_array=self.Mmat))
return cholesky(MNM)
@signal_base.simplememobyid
def MNr(self, res):
return self.Nmat.solve(res, left_array=self.Mmat)
@signal_base.simplememobyid
def MNF(self, T):
return self.Nmat.solve(T, left_array=self.Mmat)
@signal_base.simplememobyid
def MNMMNF(self, T):
return self.cf(self.MNF(T))
# we're ignoring logdet = True for two-dimensional cases, but OK
def solve(self, right, left_array=None, logdet=False):
if right.ndim == 1 and left_array is right:
res = right
rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=logdet)
MNr = self.MNr(res)
ret = rNr - np.dot(MNr, self.cf(MNr))
return (ret, logdet_N + self.cf.logdet() + self.Mprior) if logdet else ret
elif right.ndim == 1 and left_array is not None and left_array.ndim == 2:
res, T = right, left_array
TNr = self.Nmat.solve(res, left_array=T)
return TNr - np.tensordot(self.MNMMNF(T), self.MNr(res), (0, 0))
elif right.ndim == 2 and left_array is right:
T = right
TNT = self.Nmat.solve(T, left_array=T)
return TNT - np.tensordot(self.MNF(T), self.MNMMNF(T), (0, 0))
else:
raise ValueError("Incorrect arguments given to MarginalizingNmat.solve.")
|
nanograv/enterprise
|
enterprise/signals/gp_signals.py
|
Python
|
mit
| 31,619
|
[
"Gaussian"
] |
e59b37a99ebdba9b5f5fae3f9d762a5da97cba3732b534c77c22252802b97da8
|
from lettuce import *
from rapidsms.contrib.locations.models import *
from survey.features.page_objects.batches import AddBatchPage
from survey.features.page_objects.batches import AssignQuestionToBatchPage
from survey.features.page_objects.batches import BatchListPage
from survey.features.page_objects.batches import EditBatchPage
from survey.features.page_objects.question import BatchQuestionsListPage
from survey.features.page_objects.root import HomePage
from survey.investigator_configs import *
from survey.models import HouseholdMemberGroup, Survey
from survey.models.question import Question
from survey.models.batch import Batch, BatchLocationStatus
@step(u'And I have a batch')
def and_i_have_prime_locations(step):
world.batch = Batch.objects.create(
order=1,
name="Batch A",
description='description',
survey=world.survey)
@step(u'And I have prime locations')
def and_i_have_prime_locations(step):
district = LocationType.objects.create(
name=PRIME_LOCATION_TYPE, slug=PRIME_LOCATION_TYPE)
world.districts = (
Location.objects.create(name="Kampala", type=district),
Location.objects.create(name="Abim", type=district),
)
@step(u'And I visit batches listing page')
def and_i_visit_batches_listing_page(step):
world.page = BatchListPage(world.browser, world.survey)
world.page.visit()
@step(u'And I visit the first batch listed')
def and_i_visit_the_first_batch_listed(step):
world.page = world.page.visit_batch(world.batch)
@step(u'Then I should see all the prime locations with open close toggles')
def then_i_should_see_all_the_prime_locations_with_open_close_toggles(step):
world.page.batch_closed_for_all_locations()
@step(u'And I open batch for a location')
@step(u'When I open batch for a location')
def when_i_open_batch_for_a_location(step):
world.page.open_batch_for(world.districts[1])
@step(u'Then I should see it is open for that location in db')
def then_i_should_see_it_is_open_for_that_location_in_db(step):
assert BatchLocationStatus.objects.filter(
location=world.districts[1]).count() == 1
assert BatchLocationStatus.objects.filter(
location=world.districts[0]).count() == 0
@step(u'When I close batch for a location')
def when_i_close_batch_for_a_location(step):
world.page.close_batch_for(world.districts[1])
@step(u'Then I should see it is closed for that location in db')
def then_i_should_see_it_is_closed_for_that_location_in_db(step):
assert BatchLocationStatus.objects.count() == 0
@step(u'And I click add batch button')
def and_i_click_add_batch_button(step):
world.page.click_add_batch_button()
@step(u'Then I should see a add batch page')
def then_i_should_see_a_add_batch_page(step):
world.page = AddBatchPage(world.browser, world.survey)
world.page.validate_url()
world.page.validate_fields_present(["New Batch", "Name", "Description"])
@step(u'When I fill the details for add batch form')
def when_i_fill_the_details_for_add_batch_form(step):
data = {'name': 'hritik batch',
'description': 'roshan'}
world.page.fill_valid_values(data)
@step(u'Then I should go back to batches listing page')
def then_i_should_go_back_to_batches_listing_page(step):
world.page = BatchListPage(world.browser, world.survey)
world.page.validate_url()
@step(u'And I should see batch successfully added message')
def and_i_should_see_batch_successfully_added_message(step):
world.page.see_success_message('Batch', 'added')
@step(u'And I visit add batch page')
def and_i_visit_add_batch_page(step):
world.page = AddBatchPage(world.browser, world.survey)
world.page.visit()
@step(u'Then I should see validation error messages')
def then_i_should_see_validation_error_messages(step):
world.page.validate_error_message_on_fields()
@step(u'And I have 100 batches')
def and_i_have_100_batches(step):
for i in xrange(100):
try:
Batch.objects.create(
order=i,
name="Batch %d" %
i,
description='description %d' %
i,
survey=world.survey)
except Exception:
pass
@step(u'And I should see the batches list paginated')
def and_i_should_see_the_batches_list_paginated(step):
world.page.validate_fields()
world.page.validate_pagination()
world.page.validate_fields()
@step(u'And I click edit batch link')
def and_i_click_edit_batch_link(step):
world.page.click_link_by_text(' Edit')
@step(u'Then I should see edit batch page')
def then_i_should_see_edit_batch_page(step):
world.page = EditBatchPage(world.browser, world.batch, world.survey)
world.page.validate_url()
@step(u'When I fill the details for the batch')
def when_i_fill_the_details_for_the_batch(step):
data = {'name': 'hritik batch',
'description': 'roshan'}
world.page.fill_valid_values(data)
@step(u'And I should see the batch successfully edited')
def and_i_should_see_the_batch_successfully_edited(step):
world.page.see_success_message('Batch', 'edited')
@step(u'And I click delete batch link')
def and_i_click_delete_batch_link(step):
world.page.click_link_by_text(' Delete')
@step(u'Then I should see confirm delete batch')
def then_i_should_see_confirm_delete_bacth(step):
world.page.see_confirm_modal_message(world.batch.name)
@step(u'And if I click yes')
def and_if_i_click_yes(step):
world.page.click_link_by_text('Yes')
@step(u'And I should see the batch successfully deleted')
def and_i_should_see_the_batch_successfully_deleted(step):
world.page.see_success_message('Batch', 'deleted')
@step(u'And I click on batch name')
def and_i_click_on_batch_name(step):
world.page.click_link_by_text(world.batch.name)
@step(u'Then I should be on the list of questions under the batch page')
def then_i_should_be_on_the_list_of_questions_under_the_batch_page(step):
world.page = BatchQuestionsListPage(world.browser, world.batch)
world.page.validate_url()
@step(u'And I click on assign question link')
def and_i_click_on_assign_question_link(step):
world.page.click_link_by_text("Select Question")
@step(u'Then I should see the assign question page of that batch')
def then_i_should_see_the_assign_question_page_of_that_batch(step):
world.page = AssignQuestionToBatchPage(world.browser, world.batch)
world.page.validate_url()
world.page.is_text_present(
"Select Questions for %s - %s" %
(world.survey.name.capitalize(),
world.batch.name.capitalize()))
@step(u'When I select some questions')
def when_i_select_some_questions(step):
world.page.select('questions', [world.question_1.pk, world.question_2.pk])
@step(u'Then I should see the questions successfully assigned to that batch')
def then_i_should_see_the_questions_successfully_assigned_to_that_batch(step):
world.page.see_success_message(
"Questions", "assigned to batch: %s" % world.batch.name.capitalize())
@step(u'And I have 2 questions')
def and_i_have_2_questions(step):
world.question_1 = Question.objects.create(
text="question1", answer_type=Question.NUMBER, order=1)
world.question_2 = Question.objects.create(
text="question2", answer_type=Question.TEXT, order=2)
@step(u'And I visit the assign question to page batch')
def and_i_visit_the_assign_question_to_page_batch(step):
world.page = AssignQuestionToBatchPage(world.browser, world.batch)
world.page.visit()
@step(u'When I select the group')
def when_i_select_the_group(step):
world.page.select('group', [world.household_member_group.id])
@step(u'Then I should see the question which belong to that group')
def then_i_should_see_the_question_which_belong_to_that_group(step):
world.page.see_the_question(True, world.question_1.id)
world.page.see_the_question(False, world.question_2.id)
def create_question_for_group(group):
return Question.objects.create(
text="question-group%s" %
group.name,
answer_type=Question.NUMBER,
group=group)
@step(u'And I have one question belonging to that group')
def and_i_have_one_question_belonging_to_that_group(step):
world.question_1 = create_question_for_group(world.household_member_group)
@step(u'And another question which does not')
def and_another_question_which_does_not(step):
world.question_2 = Question.objects.create(
text="question2", answer_type=Question.TEXT)
@step(u'And I click add batch modal button')
def and_i_click_add_batch_modal_button(step):
world.page.click_link_by_partial_href("#new_batch")
@step(u'Then I should see the add batch modal')
def then_i_should_see_the_add_batch_modal(step):
world.page.validate_page_got_survey_id()
world.page.validate_fields_present(["New Batch", "Name", "Description"])
@step(u'And I have 2 member groups')
def and_i_have_2_member_groups(step):
world.household_member_group = HouseholdMemberGroup.objects.create(
name='Age 4-5', order=1)
world.member_group_2 = HouseholdMemberGroup.objects.create(
name='Age 15-49', order=2)
@step(u'And I have questions belonging to those groups')
def and_i_have_questions_belonging_to_those_groups(step):
world.question_1_with_group_1 = create_question_for_group(
world.household_member_group)
world.question_2_with_group_1 = create_question_for_group(
world.household_member_group)
world.question_1_with_group_2 = create_question_for_group(
world.member_group_2)
world.question_2_with_group_2 = create_question_for_group(
world.member_group_2)
@step(u'And I select a question from the list')
def and_i_select_a_question_from_the_list(step):
world.page.select_multiple("#id_questions", world.question_1_with_group_2)
@step(u'Then I should see in selected list the question which belong to that group')
def then_i_should_see_in_selected_list_the_question_which_belong_to_that_group(
step):
world.page.see_the_question(True, world.question_1_with_group_1.id)
world.page.see_the_question(True, world.question_2_with_group_1.id)
world.page.see_the_question(False, world.question_2_with_group_2.id)
@step(u'And I should see the previously selected questions on the page')
def and_i_should_see_the_previously_selected_questions_on_the_page(step):
world.page.see_the_selected_question(
True, world.question_1_with_group_2.id)
@step(u'When I fill the same name of the batch')
def when_i_fill_the_same_name_of_the_batch(step):
world.page.fill('name', world.batch.name)
world.page.fill('description', 'some description')
@step(u'Then I should see batch name already exists error message')
def then_i_should_see_batch_name_already_exists_error_message(step):
world.page.is_text_present("Batch with the same name already exists.")
@step(u'And If I have an open batch in another survey in this location')
def and_if_i_have_an_open_batch_in_another_survey_in_this_location(step):
world.survey1 = Survey.objects.create(
name='another survey',
description='another survey descrpition',
type=False,
sample_size=10)
batch = Batch.objects.create(
order=1,
name="Batch B",
description='description',
survey=world.survey1)
batch.open_for_location(world.districts[1])
@step(u'Then I should see an error that another batch from another survey is already open')
def then_i_should_see_an_error_that_another_batch_from_another_survey_is_already_open(
step):
open_batch_error_message = "%s has already open batches from survey %s" % (
world.districts[1].name, world.survey1.name)
world.page.is_text_present(open_batch_error_message)
@step(u'And I should not be able to open this batch')
def and_i_should_not_be_able_to_open_this_batch(step):
world.page.is_disabled("open_close_switch_%s" % world.districts[1].id)
@step(u'When I activate non response for batch and location')
def when_i_activate_non_response_for_batch_and_location(step):
world.page.activate_non_response_for_batch_and(world.districts[1])
@step(u'Then I should see it is activated for that location in db')
def then_i_should_see_it_is_activated_for_that_location_in_db(step):
assert world.batch.non_response_is_activated_for(
world.districts[0]) is True
assert world.batch.non_response_is_activated_for(
world.districts[1]) is False
@step(u'When I deactivate non response for batch and location')
def when_i_deactivate_non_response_for_batch_and_location(step):
world.page.deactivate_non_response_for_batch_and(world.districts[0])
@step(u'Then I should see it is deactivated for that location in db')
def then_i_should_see_it_is_deactivated_for_that_location_in_db(step):
assert world.batch.non_response_is_activated_for(
world.districts[0]) is False
assert world.batch.non_response_is_activated_for(
world.districts[1]) is False
@step(u'Then I should see message batch is closed that location')
def then_i_should_see_message_batch_is_closed_that_location(step):
world.page.is_text_present("%s is not open for %s" %
(world.batch.name, world.districts[1]))
@step(u'And I should not be able to activate this batch')
def and_i_should_not_be_able_to_activate_this_batch(step):
world.page.is_disabled("open_close_switch_%s" % world.districts[1].id)
@step(u'When I open batch for a different location')
def when_i_open_batch_for_a_different_location(step):
world.batch.open_for_location(world.districts[0])
@step(u'And I activate non response for that location')
def and_i_activate_non_response_for_that_location(step):
world.page.activate_non_response_for_batch_and(world.districts[0])
@step(u'When I visit the home page')
def when_i_visit_the_home_page(step):
world.page = HomePage(world.browser)
world.page.visit()
@step(u'Then I should see that it is still activated')
def then_i_should_see_that_it_is_still_activated_for_that_location_in_db(step):
world.page.is_text_present("On")
@step(u'When I close the batch of the other survey')
def when_i_close_the_batch_of_the_other_survey(step):
world.batch.close_for_location(world.districts[1])
@step(u'Then the non-response switch for that location is active')
def then_the_non_response_switch_for_that_location_is_active(step):
world.page.is_disabled('activate_non_response_switch_%d' % world.batch.id)
world.page.is_text_present("%s is not open for %s" % (
world.batch.name, world.districts[1]), False)
|
unicefuganda/uSurvey
|
survey/features/Batch-steps.py
|
Python
|
bsd-3-clause
| 14,663
|
[
"VisIt"
] |
20ade3d047b8fddda69e00d06984e4953ee4b736e871c58ebb19a393a819a31e
|
# coding: utf-8
#!/usr/bin/env python
from __future__ import division, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.feffio has been moved pymatgen.io.feff. This stub "
"will be removed in pymatgen 4.0.")
from .feff import *
|
rousseab/pymatgen
|
pymatgen/io/feffio.py
|
Python
|
mit
| 484
|
[
"FEFF",
"pymatgen"
] |
ce1fb5ba93da7d42f332d8ef1e482b6aa20fcb7b8dcdfc056db88de1033c13bc
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
from collections import OrderedDict
import itertools
import warnings
import re
from ast import literal_eval
import numpy as np
from monty.json import MSONable
from six import string_types
from ruamel.yaml import YAML
from pymatgen.util.io_utils import clean_lines
from pymatgen.core.structure import SiteCollection
from pymatgen import Molecule, Element
"""
This module implements a core class LammpsData for generating/parsing
LAMMPS data file, and other bridging classes to build LammpsData from
molecules.
Only point particle styles are supported for now (atom_style in angle,
atomic, bond, charge, full and molecular only). See the pages below for
more info.
http://lammps.sandia.gov/doc/atom_style.html
http://lammps.sandia.gov/doc/read_data.html
"""
__author__ = "Kiran Mathew, Zhi Deng"
__email__ = "kmathew@lbl.gov, z4deng@eng.ucsd.edu"
__credits__ = "Brandon Wood"
SECTION_KEYWORDS = {"atom": ["Atoms", "Velocities", "Masses",
"Ellipsoids", "Lines", "Triangles", "Bodies"],
"molecule": ["Bonds", "Angles", "Dihedrals", "Impropers"],
"ff": ["Pair Coeffs", "PairIJ Coeffs", "Bond Coeffs",
"Angle Coeffs", "Dihedral Coeffs",
"Improper Coeffs"],
"class2": ["BondBond Coeffs", "BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs", "AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs", "AngleAngle Coeffs"]}
ATOMS_LINE_FORMAT = {"angle": ["molecule-ID", "type", "x", "y", "z"],
"atomic": ["type", "x", "y", "z"],
"bond": ["molecule-ID", "type", "x", "y", "z"],
"charge": ["type", "q", "x", "y", "z"],
"full": ["molecule-ID", "type", "q", "x", "y", "z"],
"molecular": ["molecule-ID", "type", "x", "y", "z"]}
ATOMS_FLOATS = ["q", "x", "y", "z"]
class LammpsData(MSONable):
"""
Object for representing the data in a LAMMPS data file.
"""
def __init__(self, masses, atoms, box_bounds, box_tilt=None,
velocities=None, force_field=None, topology=None,
atom_style="full"):
"""
This constructor is designed to work with parsed data from a
file. Not recommended to use directly.
Args:
masses ([dict]): Data for Masses section.
[{"id": 1, "mass": 1.008}, ...]
atoms ([dict]): Data for Atoms section. Keys in dicts
varies with atom_style.
[{"id": 1, "type": 1,
"x": 0.0, "y": 0.0, "z": 0.0, ...}, ...]
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
velocities ([dict]): Data for Velocities section. Default
to None. If not None, its length and ids should be
consistent with atoms.
[{"id": 1, "velocity": [0.0, 0.0, 0.0]}, ...]
force_field (dict): Data for force field sections.
Default to None. All six valid keys listed below are
optional.
{
"Pair Coeffs":
[{"id": 1, "coeffs": [coeff]}, ...],
"Pair IJ Coeffs":
[{"id1": 1, "id2": 1, "coeffs": [coeff]}, ...],
"Bond Coeffs":
[{"id": 1, "coeffs": [coeff]}, ...],
"Angle Coeffs":
[{"id": 1, "coeffs": [coeff]}, ...],
"Dihedral Coeffs":
[{"id": 1, "coeffs": [coeff]}, ...],
"Improper Coeffs":
[{"id": 1, "coeffs": [coeff]}, ...],
}
topology (dict): Data for topology sections. Default to
None. All four valid keys listed below are optional.
{
"Bonds":
[{"id": 1, "bond": [1, 2]}, ...],
"Angles":
[{"id": 1, "angle": [2, 1, 3]}, ...],
"Dihedrals":
[{"id": 1, "dihedral": [3, 1, 2, 4]}, ...],
"Impropes":
[{"id": 1, "improper": [3, 1, 2, 4]}, ...],
}
atom_style (str): Output atom_style. Default to "full".
"""
bounds_arr = np.array(box_bounds)
bounds_shape = bounds_arr.shape
assert bounds_shape == (3, 2), \
"Expecting a (3, 2) array for box_bounds," \
" got {}".format(bounds_shape)
box_bounds = bounds_arr.tolist()
if box_tilt is not None:
tilt_arr = np.array(box_tilt)
tilt_shape = tilt_arr.shape
assert tilt_shape == (3,),\
"Expecting a (3,) array for box_tilt," \
" got {}".format(tilt_shape)
box_tilt = tilt_arr.tolist()
if velocities:
assert len(velocities) == len(atoms),\
"Inconsistency found between atoms and velocities"
if force_field:
force_field = {k: force_field[k] for k in SECTION_KEYWORDS["ff"]
if k in force_field}
if topology:
topology = {k: topology[k] for k in SECTION_KEYWORDS["molecule"]
if k in topology}
self.masses = masses
self.atoms = atoms
self.box_bounds = box_bounds
self.box_tilt = box_tilt
self.velocities = velocities
self.force_field = force_field
self.topology = topology
self.atom_style = atom_style
def __str__(self):
return self.get_string()
def get_string(self, significant_figures=6):
"""
Returns the string representation of LammpsData, equivalent to
the string that is to be written into a file.
Args:
significant_figures (int): No. of significant figures of
(changeable) quantities to output, default ot 6.
Quantities include box bounds and tilt, coordinates,
charges and velocities. While other stationary
quantities, like masses and force field coefficients,
are displayed as-is.
Returns:
String representation
"""
template = """Generated by pymatgen.io.lammps.data.LammpsData
{stats}
{box}
{masses}
{ff_sections}
{atoms}
{velocities}
{topo_sections}
"""
contents = {}
float_ph = "{:.%df}" % significant_figures \
if significant_figures else "{}"
def _pretty_section(title, str_mat):
lens = [max(map(len, col)) for col in zip(*str_mat)]
fmt = " ".join("{:>%d}" % x for x in lens)
rows = [title, ""] + [fmt.format(*row) for row in str_mat]
sec = "\n".join(rows)
return sec
counts = OrderedDict([("atoms", len(self.atoms))])
types = OrderedDict([("atom", len(self.masses))])
# box
box_lines = []
for bound, d in zip(self.box_bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([float_ph] * 2 + ["{}lo {}hi"])
box_lines.append(bound_format.format(*fillers))
if self.box_tilt:
tilt_format = " ".join([float_ph] * 3 + ["xy xz yz"])
box_lines.append(tilt_format.format(*self.box_tilt))
contents["box"] = "\n".join(box_lines)
# masses
masses_mat = [["%d" % m["id"], "{:.4f}".format(m["mass"])]
for m in self.masses]
contents["masses"] = _pretty_section("Masses", masses_mat)
# ff_sections
contents["ff_sections"] = ""
if self.force_field:
ff_parts = []
for kw in self.force_field.keys():
if kw == "PairIJ Coeffs":
ff_mat = [[str(i) for i in
[d["id1"], d["id2"]] + d["coeffs"]]
for d in self.force_field[kw]]
else:
ff_mat = [[str(i) for i in [d["id"]] + d["coeffs"]]
for d in self.force_field[kw]]
if not kw.startswith("Pair"):
types[kw.lower()[:-7]] = len(ff_mat)
ff_parts.append(_pretty_section(kw, ff_mat))
contents["ff_sections"] = "\n\n".join(ff_parts)
# atoms
atom_format = ["id"] + ATOMS_LINE_FORMAT[self.atom_style]
if "nx" in self.atoms[0].keys():
atom_format.extend(["nx", "ny", "nz"])
map_str = lambda t: float_ph if t in ATOMS_FLOATS else "{}"
atoms_mat = []
for a in self.atoms:
atoms_mat.append([map_str(k).format(a[k]) for k in atom_format])
contents["atoms"] = _pretty_section("Atoms", atoms_mat)
# velocities
contents["velocities"] = ""
if self.velocities:
velocities_mat = []
for v in self.velocities:
vs = [float_ph.format(i) for i in v["velocity"]]
velocities_mat.append(["%d" % v["id"]] + vs)
contents["velocities"] = _pretty_section("Velocities",
velocities_mat)
# topo_sections
contents["topo_sections"] = ""
if self.topology:
topo_parts = []
topo_keys = [k for k in SECTION_KEYWORDS["molecule"]
if k in self.topology]
for kw in topo_keys:
skw = kw.lower()[:-1]
topo_mat = [["%d" % v for v in [d["id"], d["type"]] + d[skw]]
for d in self.topology[kw]]
counts[kw.lower()] = len(topo_mat)
topo_parts.append(_pretty_section(kw, topo_mat))
contents["topo_sections"] = "\n\n".join(topo_parts)
# stats
all_stats = list(counts.values()) + list(types.values())
line_fmt = "{:>%d} {}" % len(str(max(all_stats)))
count_lines = [line_fmt.format(v, k) for k, v in counts.items()]
type_lines = [line_fmt.format(v, k + " types")
for k, v in types.items()]
contents["stats"] = "\n".join(count_lines + [""] + type_lines)
return template.format(**contents)
def write_file(self, filename, significant_figures=6):
"""
Writes LammpsData to file.
Args:
filename (str): Filename to write to.
significant_figures (int): No. of significant figures of
(changeable) quantities to output, default ot 6.
Quantities include box bounds and tilt, coordinates,
charges and velocities. While other stationary
quantities, like masses and force field coefficients,
are displayed as-is.
"""
with open(filename, "w") as f:
f.write(self.get_string(significant_figures=significant_figures))
@classmethod
def from_file(cls, filename, atom_style="full", sort_id=False):
"""
Constructor from parsing a file.
Args:
filename (str): Filename to read.
atom_style (str): Associated atom_style. Default to "full".
sort_id (bool): Whether sort each section by id. Default to
True.
"""
with open(filename) as f:
lines = f.readlines()
clines = list(clean_lines(lines))
section_marks = [i for i, l in enumerate(clines) if l
in itertools.chain(*SECTION_KEYWORDS.values())]
parts = np.split(clines, section_marks)
# First, parse header
float_group = r'([0-9eE.+-]+)'
header_pattern = {}
header_pattern["counts"] = r'^\s*(\d+)\s+([a-zA-Z]+)$'
header_pattern["types"] = r'^\s*(\d+)\s+([a-zA-Z]+)\s+types$'
header_pattern["bounds"] = r'^\s*{}$'.format(r'\s+'.join(
[float_group] * 2 + [r"([xyz])lo \3hi"]))
header_pattern["tilt"] = r'^\s*{}$'.format(r'\s+'.join(
[float_group] * 3 + ["xy xz yz"]))
header = {"counts": {}, "types": {}}
bounds = {}
for l in parts[0]:
match = None
for k, v in header_pattern.items():
match = re.match(v, l)
if match:
break
else:
continue
if match and k in ["counts", "types"]:
header[k][match.group(2)] = int(match.group(1))
elif match and k == "bounds":
g = match.groups()
bounds[g[2]] = [float(i) for i in g[:2]]
elif match and k == "tilt":
header["tilt"] = [float(i) for i in match.groups()]
header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"]
# Then, parse each section
topo_sections = SECTION_KEYWORDS["molecule"]
def parse_section(single_section_lines):
kw = single_section_lines[0]
if kw in SECTION_KEYWORDS["ff"] and kw != "PairIJ Coeffs":
parse_line = lambda l: {"coeffs": [literal_eval(x)
for x in l[1:]]}
elif kw == "PairIJ Coeffs":
parse_line = lambda l: {"id1": int(l[0]), "id2": int(l[1]),
"coeffs": [literal_eval(x)
for x in l[2:]]}
elif kw in topo_sections:
n = {"Bonds": 2, "Angles": 3, "Dihedrals": 4, "Impropers": 4}
parse_line = lambda l: {"type": int(l[1]), kw[:-1].lower():
[int(x) for x in l[2:n[kw] + 2]]}
elif kw == "Atoms":
keys = ATOMS_LINE_FORMAT[atom_style][:]
sample_l = single_section_lines[1].split()
if len(sample_l) == len(keys) + 1:
pass
elif len(sample_l) == len(keys) + 4:
keys += ["nx", "ny", "nz"]
else:
warnings.warn("Atoms section format might be imcompatible"
" with atom_style %s." % atom_style)
float_keys = [k for k in keys if k in ATOMS_FLOATS]
parse_line = lambda l: {k: float(v) if k in float_keys
else int(v) for (k, v) in zip(keys, l[1:len(keys) + 1])}
elif kw == "Velocities":
parse_line = lambda l: {"velocity": [float(x)
for x in l[1:4]]}
elif kw == "Masses":
parse_line = lambda l: {"mass": float(l[1])}
else:
warnings.warn("%s section parser has not been implemented. "
"Skipping..." % kw)
return kw, []
section = []
splitted_lines = [l.split() for l in single_section_lines[1:]]
if sort_id and kw != "PairIJ Coeffs":
splitted_lines = sorted(splitted_lines,
key=lambda l: int(l[0]))
for l in splitted_lines:
line_data = parse_line(l)
if kw != "PairIJ Coeffs":
line_data["id"] = int(l[0])
section.append(line_data)
return kw, section
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if name in ["Velocities"] + topo_sections and not seen_atoms:
raise RuntimeError(err_msg + "%s section appears before"
" Atoms section" % name)
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], \
err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] \
if body.get("Velocities") else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], \
err_msg.format("atoms", s)
for s in topo_sections:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], \
err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["box_bounds"] = header["bounds"]
items["box_tilt"] = header.get("tilt")
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body.keys() if k in SECTION_KEYWORDS["ff"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws \
else None
topo_kws = [k for k in body.keys()
if k in SECTION_KEYWORDS["molecule"]]
items["topology"] = {k: body[k] for k in topo_kws} \
if topo_kws else None
items["atom_style"] = atom_style
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, ff, topologies, box_bounds, box_tilt=None,
atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects.
Args:
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set(itertools.chain(*[t.types for t in topologies]))
assert atom_types.issubset(ff.atom_map.keys()),\
"Unknown atom type found in topologies"
items = {"box_bounds": box_bounds, "box_tilt": box_tilt,
"atom_style": atom_style}
items["masses"] = ff.masses
lookup = {"Atoms": ff.atom_map}
pair_coeffs = ff.get_pair_coeffs()
mol_coeffs = getattr(ff, "mol_coeffs")
force_field = {} if any((pair_coeffs, mol_coeffs)) else None
if pair_coeffs:
force_field.update(pair_coeffs)
if mol_coeffs:
for kw in mol_coeffs.keys():
coeffs, mapper = ff.get_coeffs_and_mapper(kw)
force_field.update(coeffs)
lookup[kw[:-7] + "s"] = mapper
items["force_field"] = force_field
atoms = []
velocities = [] if topologies[0].velocities else None
topology = {k: [] for k in SECTION_KEYWORDS["molecule"]}
stack = {k: 0 for k in ["Atoms"] + SECTION_KEYWORDS["molecule"]}
atom_format = ATOMS_LINE_FORMAT[atom_style]
for mid, topo in enumerate(topologies):
map_inds = lambda inds: tuple([topo.types[i] for i in inds])
topo_atoms = []
for aid, (s, t) in enumerate(zip(topo.sites, topo.types)):
d_atom = {"id": aid + 1 + stack["Atoms"],
"type": lookup["Atoms"][t]}
d_atom.update({k: getattr(s, k) for k in "xyz"})
if "molecule-ID" in atom_format:
d_atom["molecule-ID"] = mid + 1
topo_atoms.append(d_atom)
if "q" in atom_format:
charges = [0.0] * len(topo.sites) if not topo.charges \
else topo.charges
for d_atom, q in zip(topo_atoms, charges):
d_atom["q"] = q
atoms.extend(topo_atoms)
if isinstance(velocities, list):
velocities.extend({"id": aid + 1 + stack["Atoms"],
"velocity": v}
for aid, v in enumerate(topo.velocities))
if topo.topologies:
for kw in topo.topologies.keys():
topo_lookup = lookup[kw]
unfiltered_indices = np.array(topo.topologies[kw])
topo_topos = []
tid = stack[kw]
for inds in unfiltered_indices:
topo_type = topo_lookup.get(map_inds(inds))
if topo_type:
topo_inds = list(inds + stack["Atoms"] + 1)
topo_topos.append({"id": tid + 1,
"type": topo_type,
kw.lower()[:-1]: topo_inds})
tid += 1
topology[kw].extend(topo_topos)
stack[kw] = tid
stack["Atoms"] += len(topo_atoms)
topology = {k: v for k, v in topology.items() if len(v) > 0}
topology = None if len(topology) == 0 else topology
items.update({"atoms": atoms, "velocities": velocities,
"topology": topology})
return cls(**items)
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE single SiteCollection or its subclasses
(Molecule/Structure), or a plain list of Sites.
"""
def __init__(self, sites, atom_type=None, charges=None, velocities=None,
topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
atom_type (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, SiteCollection):
sites = Molecule.from_sites(sites)
if atom_type:
types = sites.site_properties.get(atom_type)
else:
types = [site.species_string for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),),\
"Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (len(sites), 3), \
"Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: topologies[k] for k in
SECTION_KEYWORDS["molecule"] if k in topologies}
self.sites = sites
self.atom_type = atom_type
self.types = types
self.charges = charges
self.velocities = velocities
self.topologies = topologies
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True,
atom_type=None, charges=None, velocities=None, tol=0.1):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
atom_type (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2]))
for b in real_bonds]
if not all((bond, bond_list)):
return cls(sites=molecule, atom_type=atom_type, charges=charges,
velocities=velocities)
else:
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)]
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = list(np.unique(bond_arr[ix]))
bonds.remove(hub)
hub_spokes[hub] = bonds
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 \
else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in
itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons:
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k,l in
itertools.product(ks, ls)
if k != l])
topologies = {k: v for k, v
in zip(SECTION_KEYWORDS["molecule"][:3],
[bond_list, angle_list, dihedral_list])
if len(v) > 0}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, atom_type=atom_type, charges=charges,
velocities=velocities, topologies=topologies)
class ForceField(MSONable):
"""
Class carrying most data in Masses and force field sections.
"""
def __init__(self, mass_info, pair_coeffs=None, mol_coeffs=None):
"""
Args:
mass_into (list): List of atomic mass info. Each item looks
like a key, value pair in an OrderedDict. Elements,
strings (symbols) and floats are all acceptable for the
values, with the first two converted to the atomic mass
of an element.
[("C": 12.01), ("H": Element("H")), ("O": "O"), ...]
pair_coeffs [coeffs]: List of pair or pairij coefficients,
of which the sequence must be sorted according to the
species in mass_dict. Pair or PairIJ determined by the
length of list.
mol_coeffs (dict): Dict with force field coefficients for
molecular topologies. Default to None, i.e., no
additional coefficients. All four valid keys listed
below are optional.
{
"Bond Coeffs":
[{"coeffs": [coeffs],
"types": [("C", "C"), ...]}, ...],
"Angle Coeffs":
[{"coeffs": [coeffs],
"types": [("H", "C", "H"), ...]}, ...],
"Dihedral Coeffs":
[{"coeffs": [coeffs],
"types": [("H", "C", "C", "H"), ...]}, ...],
"Improper Coeffs":
[{"coeffs": [coeffs],
"types": [("H", "C", "C", "H"), ...]}, ...],
}
"""
map_mass = lambda v: v.atomic_mass.real if isinstance(v, Element) \
else Element(v).atomic_mass.real if isinstance(v, string_types) \
else v
mass_info = [(k, map_mass(v)) for k, v in mass_info]
if pair_coeffs:
# validate No. of pair coeffs
npc = len(pair_coeffs)
nm = len(mass_info)
ncomb = nm * (nm + 1) / 2
if npc == nm:
self.pair_type = "pair"
elif npc == ncomb:
self.pair_type = "pairij"
else:
raise ValueError("Expecting {} Pair Coeffs or "
"{} PairIJ Coeffs for {} atom types,"
" got {}".format(nm, ncomb, nm, npc))
else:
self.pair_type = None
if mol_coeffs:
mol_coeffs = {k: mol_coeffs[k] for k in SECTION_KEYWORDS["ff"][2:]
if k in mol_coeffs}
complete_types = lambda l: l + [t[::-1] for t in l
if t[::-1] not in l]
for k, v in mol_coeffs.items():
for d in v:
d["types"] = complete_types(d["types"])
# No duplicated items under different types allowed
distinct_types = [set(d["types"]) for d in v]
if len(distinct_types) > 1:
assert set.intersection(*distinct_types) == set(),\
"Duplicated items found " \
"under different coefficients in %s" % k
# No undefined atom types allowed
atoms = set(np.ravel(list(itertools.chain(*distinct_types))))
assert atoms.issubset([m[0] for m in mass_info]), \
"Undefined atom type found in %s" % k
self.mass_info = mass_info
self.pair_coeffs = pair_coeffs
self.mol_coeffs = mol_coeffs
masses_sec, self.atom_map = self.get_coeffs_and_mapper("Masses")
self.masses = masses_sec["Masses"]
def get_coeffs_and_mapper(self, section):
"""
Returns data for Masses or a force field section for molecular
topology, also returns a mapper dict ({type: id, ...}) for
labeling data in Atoms or the relative topology section.
Args:
section (str): Section title. Choose among "Masses",
"Bond Coeffs", "Angle Coeffs", "Dihedral Coeffs", and
"Improper Coeffs".
Returns:
Dict with section title as key for the usage of
LammpsData, and a mapper dict for labeling
{"XX Coeffs": [{"id": 1, "coeffs": coeffs}, ...]},
{"type1": 1, ...}
"""
data = []
mapper = {}
if section == "Masses":
for i, (k, v) in enumerate(self.mass_info):
data.append({"id": i + 1, "mass": v})
mapper[k] = i + 1
elif section in SECTION_KEYWORDS["ff"][2:]:
for i, d in enumerate(self.mol_coeffs[section]):
data.append({"id": i + 1, "coeffs": d["coeffs"]})
mapper.update({k: i + 1 for k in d["types"]})
else:
raise RuntimeError("Invalid coefficient section keyword")
return {section: data}, mapper
def get_pair_coeffs(self):
"""
Returns data for Pair(IJ) Coeffs section.
Returns:
Dict with section title as key for the usage of
LammpsData
{"Pair Coeffs": [{"id": 1, "coeffs": coeffs}, ...]} or
{"PairIJ Coeffs": [{"id1": 1, "id2": 1,
"coeffs": coeffs}, ...]}
"""
if self.pair_type == "pair":
return {"Pair Coeffs": [{"id": i + 1, "coeffs": c}
for i, c in enumerate(self.pair_coeffs)]}
elif self.pair_type == "pairij":
n = len(self.mass_info)
ids = itertools.combinations_with_replacement(range(1, n + 1), 2)
return {"PairIJ Coeffs": [{"id1": i[0], "id2": i[1], "coeffs": c}
for i, c in zip(ids, self.pair_coeffs)]}
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): File name.
"""
d = {"mass_info": self.mass_info, "pair_coeffs": self.pair_coeffs,
"mol_coeffs": self.mol_coeffs}
yaml = YAML(typ="safe")
with open(filename, "w") as f:
yaml.dump(d, f)
@classmethod
def from_file(cls, filename):
"""
Constructor that reads in a file in YAML format.
Args:
filename (str): File name.
"""
yaml = YAML(typ="safe")
with open(filename, "r") as f:
d = yaml.load(f)
return cls.from_dict(d)
@classmethod
def from_dict(cls, d):
d["mass_info"] = [tuple(m) for m in d["mass_info"]]
if d.get("mol_coeffs"):
for v in d["mol_coeffs"].values():
for c in v:
c["types"] = [tuple(t) for t in c["types"]]
return cls(d["mass_info"], d["pair_coeffs"], d["mol_coeffs"])
|
Bismarrck/pymatgen
|
pymatgen/io/lammps/data.py
|
Python
|
mit
| 35,466
|
[
"LAMMPS",
"pymatgen"
] |
468c5cef801528034ca65e10b674d926f5dd08e740225665a8fc0a412ff60623
|
from nbodykit.base.mesh import MeshFilter
import numpy
class TopHat(MeshFilter):
""" A TopHat filter defined in Fourier space.
Notes
-----
A fourier space filter is different from a configuration space
filter. The TopHat in fourier space creates ringing effects
due to the truncation / discretization of modes.
"""
kind = 'wavenumber'
mode = 'complex'
def __init__(self, r):
"""
Parameters
----------
r : float
radius of the TopHat filter
"""
self.r = r
def filter(self, k, v):
r = self.r
k = sum(ki ** 2 for ki in k) ** 0.5
kr = k * r
w = 3 * (numpy.sin(kr) / kr **3 - numpy.cos(kr) / kr ** 2)
w[k == 0] = 1.0
return w * v
class Gaussian(MeshFilter):
""" A gaussian filter
.. math ::
G(r) = exp(-0.5 k^2 r^2)
"""
kind = 'wavenumber'
mode = 'complex'
def __init__(self, r):
"""
Parameters
----------
r : float
radius of the Gaussian filter
"""
self.r = r
def filter(self, k, v):
r = self.r
k2 = sum(ki ** 2 for ki in k)
return numpy.exp(- 0.5 * k2 * r**2)
|
nickhand/nbodykit
|
nbodykit/filters.py
|
Python
|
gpl-3.0
| 1,301
|
[
"Gaussian"
] |
9456001c4cddc52f61c86f4bb6a231d690197193e3e9cca0203a6a3de2513e52
|
# (c) 2015 - Jaguar Land Rover.
#
# Mozilla Public License 2.0
#
# Python-based life cycle manager PoC
import gtk
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
import sys
import time
import swm
import settings
import logging
import os
import getopt
import daemon
logger = logging.getLogger(settings.LOGGER)
#
# Lifecycle manager service
#
class LCMgrService(dbus.service.Object):
def __init__(self):
bus_name = dbus.service.BusName('org.genivi.LifecycleManager', dbus.SessionBus())
dbus.service.Object.__init__(self, bus_name, '/org/genivi/LifecycleManager')
@dbus.service.method('org.genivi.LifecycleManager',
async_callbacks=('send_reply', 'send_error'))
def startComponents(self,
transaction_id,
components,
send_reply,
send_error):
logger.debug('LifecycleManager.LCMgrService.startComponents(%s, %s): Called.', transaction_id, components)
#
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate starting components
for i in components:
logger.debug('LifecycleManager.LCMgrService.startComponents(): Starting: %s', i)
time.sleep(3.0)
swm.send_operation_result(transaction_id,
swm.SWMResult.SWM_RES_OK,
"Started components {}".format(", ".join(components)))
return None
@dbus.service.method('org.genivi.LifecycleManager',
async_callbacks=('send_reply', 'send_error'))
def stopComponents(self,
transaction_id,
components,
send_reply,
send_error):
logger.debug('LifecycleManager.LCMgrService.stopComponents(%s, %s): Called.', transaction_id, components)
#
# Send back an immediate reply since DBUS
# doesn't like python dbus-invoked methods to do
# their own calls (nested calls).
#
send_reply(True)
# Simulate stopping components
for i in components:
logger.debug('LifecycleManager.LCMgrService.stopComponents(): Stopping: %s', i)
time.sleep(3.0)
swm.send_operation_result(transaction_id,
swm.SWMResult.SWM_RES_OK,
"Stopped components {}".format(", ".join(components)))
return None
class LCMgrDaemon(daemon.Daemon):
"""
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
super(LCMgrDaemon, self).__init__(pidfile, stdin, stdout, stderr)
def run(self):
DBusGMainLoop(set_as_default=True)
lc_mgr = LCMgrService()
while True:
gtk.main_iteration()
def usage():
print "Usage:", sys.argv[0], "foreground|start|stop|restart"
print
print " foreground Start in foreground"
print " start Start in background"
print " stop Stop daemon running in background"
print " restart Restart daemon running in background"
print
print "Example:", sys.argv[0],"foreground"
sys.exit(1)
if __name__ == "__main__":
logger.debug('Lifecycle Manager - Initializing')
pid_file = settings.PID_FILE_DIR + os.path.splitext(os.path.basename(__file__))[0] + '.pid'
try:
opts, args = getopt.getopt(sys.argv[1:], "")
except getopt.GetoptError:
print "Lifecycle Manager - Could not parse arguments."
usage()
lcmgr_daemon = LCMgrDaemon(pid_file, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null')
for a in args:
if a in ('foreground', 'fg'):
# in foreground we also log to the console
logger.addHandler(logging._handlers['console'])
logger.debug('Lifecycle Manager - Running')
lcmgr_daemon.run()
elif a in ('start', 'st'):
logger.debug('Lifecycle Manager - Starting')
lcmgr_daemon.start()
elif a in ('stop', 'sp'):
logger.debug('Lifecycle Manager - Stopping')
lcmgr_daemon.stop()
elif a in ('restart', 're'):
logger.debug('Lifecycle Manager - Restarting')
lcmgr_daemon.restart()
else:
print "Unknown command: {}".format(a)
usage()
sys.exit(1)
|
rstreif/genivi_software_management
|
lifecycle_manager/lifecycle_manager.py
|
Python
|
mpl-2.0
| 4,711
|
[
"Jaguar"
] |
d3662f5166712d01a35aff5af59d79b11d102009ea1ad184551450aa154f69f4
|
########################################################################
# File: RequestDB.py
# pylint: disable=no-member
########################################################################
""" Frontend for ReqDB
:mod: RequestDB
=======================
.. module: RequestDB
:synopsis: db holding Requests
db holding Request, Operation and File
"""
import errno
import random
import datetime
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import relationship, backref, sessionmaker, joinedload, mapper
from sqlalchemy.sql import update
from sqlalchemy import (
create_engine,
func,
Table,
Column,
MetaData,
ForeignKey,
Integer,
String,
DateTime,
Enum,
BLOB,
BigInteger,
distinct,
)
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.ConfigurationSystem.Client.Utilities import getDBParameters
# Metadata instance that is used to bind the engine, Object and tables
metadata = MetaData()
# Description of the file table
fileTable = Table(
"File",
metadata,
Column("FileID", Integer, primary_key=True),
Column("OperationID", Integer, ForeignKey("Operation.OperationID", ondelete="CASCADE"), nullable=False),
Column("Status", Enum("Waiting", "Done", "Failed", "Scheduled"), server_default="Waiting"),
Column("LFN", String(255), index=True),
Column("PFN", String(255)),
Column("ChecksumType", Enum("ADLER32", "MD5", "SHA1", ""), server_default=""),
Column("Checksum", String(255)),
Column("GUID", String(36)),
Column("Size", BigInteger),
Column("Attempt", Integer),
Column("Error", String(255)),
mysql_engine="InnoDB",
)
# Map the File object to the fileTable, with a few special attributes
mapper(
File,
fileTable,
properties={
"_Status": fileTable.c.Status,
"_LFN": fileTable.c.LFN,
"_ChecksumType": fileTable.c.ChecksumType,
"_GUID": fileTable.c.GUID,
},
)
# Description of the Operation table
operationTable = Table(
"Operation",
metadata,
Column("TargetSE", String(255)),
Column("CreationTime", DateTime),
Column("SourceSE", String(255)),
Column("Arguments", BLOB),
Column("Error", String(255)),
Column("Type", String(64), nullable=False),
Column("Order", Integer, nullable=False),
Column(
"Status",
Enum("Waiting", "Assigned", "Queued", "Done", "Failed", "Canceled", "Scheduled"),
server_default="Queued",
),
Column("LastUpdate", DateTime),
Column("SubmitTime", DateTime),
Column("Catalog", String(255)),
Column("OperationID", Integer, primary_key=True),
Column("RequestID", Integer, ForeignKey("Request.RequestID", ondelete="CASCADE"), nullable=False),
mysql_engine="InnoDB",
)
# Map the Operation object to the operationTable, with a few special attributes
mapper(
Operation,
operationTable,
properties={
"_CreationTime": operationTable.c.CreationTime,
"_Arguments": operationTable.c.Arguments,
"_Order": operationTable.c.Order,
"_Status": operationTable.c.Status,
"_LastUpdate": operationTable.c.LastUpdate,
"_SubmitTime": operationTable.c.SubmitTime,
"_Catalog": operationTable.c.Catalog,
"__files__": relationship(
File,
backref=backref("_parent", lazy="immediate"),
lazy="immediate",
passive_deletes=True,
cascade="all, delete-orphan",
),
},
)
# Description of the Request Table
requestTable = Table(
"Request",
metadata,
Column("DIRACSetup", String(32)),
Column("CreationTime", DateTime),
Column("JobID", Integer, server_default="0"),
Column("OwnerDN", String(255)),
Column("RequestName", String(255), nullable=False),
Column("Error", String(255)),
Column("Status", Enum("Waiting", "Assigned", "Done", "Failed", "Canceled", "Scheduled"), server_default="Waiting"),
Column("LastUpdate", DateTime),
Column("OwnerGroup", String(32)),
Column("SubmitTime", DateTime),
Column("RequestID", Integer, primary_key=True),
Column("SourceComponent", BLOB),
Column("NotBefore", DateTime),
mysql_engine="InnoDB",
)
# Map the Request object to the requestTable, with a few special attributes
mapper(
Request,
requestTable,
properties={
"_CreationTime": requestTable.c.CreationTime,
"_SourceComponent": requestTable.c.SourceComponent,
"_Status": requestTable.c.Status,
"_LastUpdate": requestTable.c.LastUpdate,
"_SubmitTime": requestTable.c.SubmitTime,
"_NotBefore": requestTable.c.NotBefore,
"__operations__": relationship(
Operation,
backref=backref("_parent", lazy="immediate"),
order_by=operationTable.c.Order,
lazy="immediate",
passive_deletes=True,
cascade="all, delete-orphan",
),
},
)
########################################################################
class RequestDB(object):
"""
.. class:: RequestDB
db holding requests
"""
def __getDBConnectionInfo(self, fullname):
"""Collect from the CS all the info needed to connect to the DB.
This should be in a base class eventually
"""
result = getDBParameters(fullname)
if not result["OK"]:
raise Exception("Cannot get database parameters: %s" % result["Message"])
dbParameters = result["Value"]
self.dbHost = dbParameters["Host"]
self.dbPort = dbParameters["Port"]
self.dbUser = dbParameters["User"]
self.dbPass = dbParameters["Password"]
self.dbName = dbParameters["DBName"]
def __init__(self):
"""c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger("RequestDB")
# Initialize the connection info
self.__getDBConnectionInfo("RequestManagement/ReqDB")
runDebug = gLogger.getLevel() == "DEBUG"
self.engine = create_engine(
"mysql://%s:%s@%s:%s/%s" % (self.dbUser, self.dbPass, self.dbHost, self.dbPort, self.dbName),
echo=runDebug,
pool_recycle=3600,
)
metadata.bind = self.engine
self.DBSession = sessionmaker(bind=self.engine)
def createTables(self):
"""create tables"""
try:
metadata.create_all(self.engine)
except Exception as e:
return S_ERROR(e)
return S_OK()
def cancelRequest(self, requestID):
session = self.DBSession()
try:
updateRet = session.execute(
update(Request)
.where(Request.RequestID == requestID)
.values({Request._Status: "Canceled", Request._LastUpdate: datetime.datetime.utcnow()})
.execution_options(synchronize_session=False)
) # See FTS3DB for synchronize_session
session.commit()
# No row was changed
if not updateRet.rowcount:
return S_ERROR("No such request %s" % requestID)
return S_OK()
except Exception as e:
session.rollback()
self.log.exception("cancelRequest: unexpected exception", lException=e)
return S_ERROR("cancelRequest: unexpected exception %s" % e)
finally:
session.close()
def putRequest(self, request):
"""update or insert request into db
:param ~Request.Request request: Request instance
"""
session = self.DBSession(expire_on_commit=False)
try:
try:
if hasattr(request, "RequestID"):
status = session.query(Request._Status).filter(Request.RequestID == request.RequestID).one()
if status[0] == "Canceled":
self.log.info(
"Request %s(%s) was canceled, don't put it back" % (request.RequestID, request.RequestName)
)
return S_OK(request.RequestID)
except NoResultFound:
pass
# Since the object request is not attached to the session, we merge it to have an update
# instead of an insert with duplicate primary key
request = session.merge(request)
session.add(request)
session.commit()
session.expunge_all()
return S_OK(request.RequestID)
except Exception as e:
session.rollback()
self.log.exception("putRequest: unexpected exception", lException=e)
return S_ERROR("putRequest: unexpected exception %s" % e)
finally:
session.close()
def getScheduledRequest(self, operationID):
session = self.DBSession()
try:
requestID = (
session.query(Request.RequestID)
.join(Request.__operations__)
.filter(Operation.OperationID == operationID)
.one()
)
return self.getRequest(requestID[0])
except NoResultFound:
return S_OK()
finally:
session.close()
#
# def getRequestName( self, requestID ):
# """ get Request.RequestName for a given Request.RequestID """
#
# session = self.DBSession()
# try:
# requestName = session.query( Request.RequestName )\
# .filter( Request.RequestID == requestID )\
# .one()
# return S_OK( requestName[0] )
# except NoResultFound, e:
# return S_ERROR( "getRequestName: no request found for RequestID=%s" % requestID )
# finally:
# session.close()
def getRequest(self, reqID=0, assigned=True):
"""read request for execution
:param reqID: request's ID (default 0) If 0, take a pseudo random one
"""
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession(expire_on_commit=False)
log = self.log.getSubLogger("getRequest" if assigned else "peekRequest")
requestID = None
try:
if reqID:
requestID = reqID
log.verbose("selecting request '%s'%s" % (reqID, " (Assigned)" if assigned else ""))
status = None
try:
status = session.query(Request._Status).filter(Request.RequestID == reqID).one()
except NoResultFound:
return S_ERROR("getRequest: request '%s' not exists" % reqID)
if status and status == "Assigned" and assigned:
return S_ERROR(
"getRequest: status of request '%s' is 'Assigned', request cannot be selected" % reqID
)
else:
now = datetime.datetime.utcnow().replace(microsecond=0)
reqIDs = set()
try:
reqAscIDs = (
session.query(Request.RequestID)
.filter(Request._Status == "Waiting")
.filter(Request._NotBefore < now)
.order_by(Request._LastUpdate)
.limit(100)
.all()
)
reqIDs = set([reqID[0] for reqID in reqAscIDs])
reqDescIDs = (
session.query(Request.RequestID)
.filter(Request._Status == "Waiting")
.filter(Request._NotBefore < now)
.order_by(Request._LastUpdate.desc())
.limit(50)
.all()
)
reqIDs |= set([reqID[0] for reqID in reqDescIDs])
# No Waiting requests
except NoResultFound:
return S_OK()
if not reqIDs:
return S_OK()
reqIDs = list(reqIDs)
random.shuffle(reqIDs)
requestID = reqIDs[0]
# If we are here, the request MUST exist, so no try catch
# the joinedload is to force the non-lazy loading of all the attributes, especially _parent
request = (
session.query(Request)
.options(joinedload("__operations__").joinedload("__files__"))
.filter(Request.RequestID == requestID)
.one()
)
if not reqID:
log.verbose(
"selected request %s('%s')%s"
% (request.RequestID, request.RequestName, " (Assigned)" if assigned else "")
)
if assigned:
session.execute(
update(Request)
.where(Request.RequestID == requestID)
.values({Request._Status: "Assigned", Request._LastUpdate: datetime.datetime.utcnow()})
)
session.commit()
session.expunge_all()
return S_OK(request)
except Exception as e:
session.rollback()
log.exception("getRequest: unexpected exception", lException=e)
return S_ERROR("getRequest: unexpected exception : %s" % e)
finally:
session.close()
def getBulkRequests(self, numberOfRequest=10, assigned=True):
"""read as many requests as requested for execution
:param int numberOfRequest: Number of Request we want (default 10)
:param bool assigned: if True, the status of the selected requests are set to assign
:returns: a dictionary of Request objects indexed on the RequestID
"""
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession(expire_on_commit=False)
log = self.log.getSubLogger("getBulkRequest" if assigned else "peekBulkRequest")
requestDict = {}
try:
# If we are here, the request MUST exist, so no try catch
# the joinedload is to force the non-lazy loading of all the attributes, especially _parent
try:
now = datetime.datetime.utcnow().replace(microsecond=0)
requestIDs = (
session.query(Request.RequestID)
.with_for_update()
.filter(Request._Status == "Waiting")
.filter(Request._NotBefore < now)
.order_by(Request._LastUpdate)
.limit(numberOfRequest)
.all()
)
requestIDs = [ridTuple[0] for ridTuple in requestIDs]
log.debug("Got request ids %s" % requestIDs)
requests = (
session.query(Request)
.options(joinedload("__operations__").joinedload("__files__"))
.filter(Request.RequestID.in_(requestIDs))
.all()
)
log.debug("Got %s Request objects " % len(requests))
requestDict = dict((req.RequestID, req) for req in requests)
# No Waiting requests
except NoResultFound:
pass
if assigned and requestDict:
session.execute(
update(Request)
.where(Request.RequestID.in_(requestDict.keys()))
.values({Request._Status: "Assigned", Request._LastUpdate: datetime.datetime.utcnow()})
)
session.commit()
session.expunge_all()
except Exception as e:
session.rollback()
log.exception("unexpected exception", lException=e)
return S_ERROR("getBulkRequest: unexpected exception : %s" % e)
finally:
session.close()
return S_OK(requestDict)
def peekRequest(self, requestID):
"""get request (ro), no update on states
:param requestID: Request.RequestID
"""
return self.getRequest(requestID, False)
def getRequestIDsList(self, statusList=None, limit=None, since=None, until=None, getJobID=False):
"""select requests with status in :statusList:"""
statusList = statusList if statusList else list(Request.FINAL_STATES)
limit = limit if limit else 100
session = self.DBSession()
requestIDs = []
try:
if getJobID:
reqQuery = session.query(Request.RequestID, Request._Status, Request._LastUpdate, Request.JobID).filter(
Request._Status.in_(statusList)
)
else:
reqQuery = session.query(Request.RequestID, Request._Status, Request._LastUpdate).filter(
Request._Status.in_(statusList)
)
if since:
reqQuery = reqQuery.filter(Request._LastUpdate > since)
if until:
reqQuery = reqQuery.filter(Request._LastUpdate < until)
reqQuery = reqQuery.order_by(Request._LastUpdate).limit(limit)
requestIDs = [list(reqIDTuple) for reqIDTuple in reqQuery.all()]
except Exception as e:
session.rollback()
self.log.exception("getRequestIDsList: unexpected exception", lException=e)
return S_ERROR("getRequestIDsList: unexpected exception : %s" % e)
finally:
session.close()
return S_OK(requestIDs)
def deleteRequest(self, requestID):
"""delete request given its ID
:param str requestID: request.RequestID
:param mixed connection: connection to use if any
"""
session = self.DBSession()
try:
session.query(Request).filter(Request.RequestID == requestID).delete()
session.commit()
except Exception as e:
session.rollback()
self.log.exception("deleteRequest: unexpected exception", lException=e)
return S_ERROR("deleteRequest: unexpected exception : %s" % e)
finally:
session.close()
return S_OK()
def getDBSummary(self):
"""get db summary"""
# # this will be returned
retDict = {"Request": {}, "Operation": {}, "File": {}}
session = self.DBSession()
try:
requestQuery = session.query(Request._Status, func.count(Request.RequestID)).group_by(Request._Status).all()
for status, count in requestQuery:
retDict["Request"][status] = count
operationQuery = (
session.query(Operation.Type, Operation._Status, func.count(Operation.OperationID))
.group_by(Operation.Type, Operation._Status)
.all()
)
for oType, status, count in operationQuery:
retDict["Operation"].setdefault(oType, {})[status] = count
fileQuery = session.query(File._Status, func.count(File.FileID)).group_by(File._Status).all()
for status, count in fileQuery:
retDict["File"][status] = count
except Exception as e:
self.log.exception("getDBSummary: unexpected exception", lException=e)
return S_ERROR("getDBSummary: unexpected exception : %s" % e)
finally:
session.close()
return S_OK(retDict)
def getRequestSummaryWeb(self, selectDict, sortList, startItem, maxItems):
"""Returns a list of Request for the web portal
:param dict selectDict: parameter on which to restrain the query {key : Value}
key can be any of the Request columns, 'Type' (interpreted as Operation.Type)
and 'FromData' and 'ToData' are matched against the LastUpdate field
:param sortList: [sorting column, ASC/DESC]
:type sortList: python:list
:param int startItem: start item (for pagination)
:param int maxItems: max items (for pagination)
"""
parameterList = [
"RequestID",
"RequestName",
"JobID",
"OwnerDN",
"OwnerGroup",
"Status",
"Error",
"CreationTime",
"LastUpdate",
]
resultDict = {}
session = self.DBSession()
try:
summaryQuery = session.query(
Request.RequestID,
Request.RequestName,
Request.JobID,
Request.OwnerDN,
Request.OwnerGroup,
Request._Status,
Request.Error,
Request._CreationTime,
Request._LastUpdate,
)
for key, value in selectDict.items():
if key == "ToDate":
summaryQuery = summaryQuery.filter(Request._LastUpdate < value)
elif key == "FromDate":
summaryQuery = summaryQuery.filter(Request._LastUpdate > value)
else:
tableName = "Request"
if key == "Type":
summaryQuery = summaryQuery.join(Request.__operations__).group_by(
Request.RequestID,
Request.RequestName,
Request.JobID,
Request.OwnerDN,
Request.OwnerGroup,
Request._Status,
Request.Error,
Request._CreationTime,
Request._LastUpdate,
Operation.Type,
)
tableName = "Operation"
elif key == "Status":
key = "_Status"
if isinstance(value, list):
summaryQuery = summaryQuery.filter(eval("%s.%s.in_(%s)" % (tableName, key, value)))
else:
summaryQuery = summaryQuery.filter(eval("%s.%s" % (tableName, key)) == value)
if sortList:
summaryQuery = summaryQuery.order_by(eval("Request.%s.%s()" % (sortList[0][0], sortList[0][1].lower())))
try:
requestLists = summaryQuery.all()
except NoResultFound:
resultDict["ParameterNames"] = parameterList
resultDict["Records"] = []
return S_OK(resultDict)
except Exception as e:
return S_ERROR("Error getting the webSummary %s" % e)
nRequests = len(requestLists)
if startItem <= len(requestLists):
firstIndex = startItem
else:
return S_ERROR("getRequestSummaryWeb: Requested index out of range")
if (startItem + maxItems) <= len(requestLists):
secondIndex = startItem + maxItems
else:
secondIndex = len(requestLists)
records = []
for i in range(firstIndex, secondIndex):
row = requestLists[i]
records.append([str(x) for x in row])
resultDict["ParameterNames"] = parameterList
resultDict["Records"] = records
resultDict["TotalRecords"] = nRequests
return S_OK(resultDict)
#
except Exception as e:
self.log.exception("getRequestSummaryWeb: unexpected exception", lException=e)
return S_ERROR("getRequestSummaryWeb: unexpected exception : %s" % e)
finally:
session.close()
def getRequestCountersWeb(self, groupingAttribute, selectDict):
"""For the web portal.
Returns a dictionary {value : counts} for a given key.
The key can be any field from the RequestTable. or "Type",
which will be interpreted as 'Operation.Type'
"""
resultDict = {}
session = self.DBSession()
if groupingAttribute == "Type":
groupingAttribute = "Operation.Type"
elif groupingAttribute == "Status":
groupingAttribute = "Request._Status"
else:
groupingAttribute = "Request.%s" % groupingAttribute
try:
summaryQuery = session.query(eval(groupingAttribute), func.count(Request.RequestID))
for key, value in selectDict.items():
if key == "ToDate":
summaryQuery = summaryQuery.filter(Request._LastUpdate < value)
elif key == "FromDate":
summaryQuery = summaryQuery.filter(Request._LastUpdate > value)
else:
objectType = "Request"
if key == "Type":
summaryQuery = summaryQuery.join(Request.__operations__)
objectType = "Operation"
elif key == "Status":
key = "_Status"
if isinstance(value, list):
summaryQuery = summaryQuery.filter(eval("%s.%s.in_(%s)" % (objectType, key, value)))
else:
summaryQuery = summaryQuery.filter(eval("%s.%s" % (objectType, key)) == value)
summaryQuery = summaryQuery.group_by(eval(groupingAttribute))
try:
requestLists = summaryQuery.all()
resultDict = dict(requestLists)
except NoResultFound:
pass
except Exception as e:
return S_ERROR("Error getting the webCounters %s" % e)
return S_OK(resultDict)
except Exception as e:
self.log.exception("getRequestSummaryWeb: unexpected exception", lException=e)
return S_ERROR("getRequestSummaryWeb: unexpected exception : %s" % e)
finally:
session.close()
def getDistinctValues(self, tableName, columnName):
"""For a given table and a given field, return the list of of distinct values in the DB"""
session = self.DBSession()
distinctValues = []
if columnName == "Status":
columnName = "_Status"
try:
result = session.query(distinct(eval("%s.%s" % (tableName, columnName)))).all()
distinctValues = [dist[0] for dist in result]
except NoResultFound:
pass
except Exception as e:
self.log.exception("getDistinctValues: unexpected exception", lException=e)
return S_ERROR("getDistinctValues: unexpected exception : %s" % e)
finally:
session.close()
return S_OK(distinctValues)
def getRequestIDsForJobs(self, jobIDs):
"""returns request ids for jobs given jobIDs
:param list jobIDs: list of jobIDs
:return: S_OK( "Successful" : { jobID1 : Request, jobID2: Request, ... }
"Failed" : { jobID3: "error message", ... } )
"""
self.log.debug("getRequestIDsForJobs: got %s jobIDs to check" % str(jobIDs))
if not jobIDs:
return S_ERROR("Must provide jobID list as argument.")
if isinstance(jobIDs, int):
jobIDs = [jobIDs]
jobIDs = set(jobIDs)
reqDict = {"Successful": {}, "Failed": {}}
session = self.DBSession()
try:
ret = session.query(Request.JobID, Request.RequestID).filter(Request.JobID.in_(jobIDs)).all()
reqDict["Successful"] = dict((jobId, reqID) for jobId, reqID in ret)
reqDict["Failed"] = dict((jobid, "Request not found") for jobid in jobIDs - set(reqDict["Successful"]))
except Exception as e:
self.log.exception("getRequestIDsForJobs: unexpected exception", lException=e)
return S_ERROR("getRequestIDsForJobs: unexpected exception : %s" % e)
finally:
session.close()
return S_OK(reqDict)
def readRequestsForJobs(self, jobIDs=None):
"""read request for jobs
:param list jobIDs: list of JobIDs
:return: S_OK( "Successful" : { jobID1 : Request, jobID2: Request, ... }
"Failed" : { jobID3: "error message", ... } )
"""
self.log.debug("readRequestForJobs: got %s jobIDs to check" % str(jobIDs))
if not jobIDs:
return S_ERROR("Must provide jobID list as argument.")
if isinstance(jobIDs, int):
jobIDs = [jobIDs]
jobIDs = set(jobIDs)
reqDict = {"Successful": {}, "Failed": {}}
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession(expire_on_commit=False)
try:
ret = (
session.query(Request.JobID, Request)
.options(joinedload("__operations__").joinedload("__files__"))
.filter(Request.JobID.in_(jobIDs))
.all()
)
reqDict["Successful"] = dict((jobId, reqObj) for jobId, reqObj in ret)
reqDict["Failed"] = dict((jobid, "Request not found") for jobid in jobIDs - set(reqDict["Successful"]))
session.expunge_all()
except Exception as e:
self.log.exception("readRequestsForJobs: unexpected exception", lException=e)
return S_ERROR("readRequestsForJobs: unexpected exception : %s" % e)
finally:
session.close()
return S_OK(reqDict)
def getRequestStatus(self, requestID):
"""get request status for a given request ID"""
self.log.debug("getRequestStatus: checking status for '%s' request" % requestID)
session = self.DBSession()
try:
status = session.query(Request._Status).filter(Request.RequestID == requestID).one()
except NoResultFound:
return S_ERROR(errno.ENOENT, "Request %s does not exist" % requestID)
finally:
session.close()
return S_OK(status[0])
def getRequestFileStatus(self, requestID, lfnList):
"""get status for files in request given its id
A single status is returned by file, which corresponds
to the most representative one. That is:
* Failed: if it has failed in any of the operation
* Scheduled: if it is Scheduled in any of the operation
* Waiting: if the process is ongoing
* Done: if everything was executed
:param str requestID: Request.RequestID
:param lfnList: list of LFNs
:type lfnList: python:list
"""
session = self.DBSession()
try:
res = dict.fromkeys(lfnList, "UNKNOWN")
requestRet = (
session.query(File._LFN, File._Status)
.join(Request.__operations__)
.join(Operation.__files__)
.filter(Request.RequestID == requestID)
.filter(File._LFN.in_(lfnList))
.order_by(Operation._Order)
.all()
)
for lfn, status in requestRet:
# If the file was in one of these two state in the previous
# operations, that's the one we want to return
if res.get(lfn) not in ("Failed", "Scheduled"):
res[lfn] = status
return S_OK(res)
except Exception as e:
self.log.exception("getRequestFileStatus: unexpected exception", lException=e)
return S_ERROR("getRequestFileStatus: unexpected exception : %s" % e)
finally:
session.close()
def getRequestInfo(self, requestID):
"""get request info given Request.RequestID"""
session = self.DBSession()
try:
requestInfoQuery = session.query(
Request.RequestID,
Request._Status,
Request.RequestName,
Request.JobID,
Request.OwnerDN,
Request.OwnerGroup,
Request.DIRACSetup,
Request._SourceComponent,
Request._CreationTime,
Request._SubmitTime,
Request._LastUpdate,
).filter(Request.RequestID == requestID)
try:
requestInfo = requestInfoQuery.one()
except NoResultFound:
return S_ERROR("No such request")
return S_OK(list(requestInfo))
except Exception as e:
self.log.exception("getRequestInfo: unexpected exception", lException=e)
return S_ERROR("getRequestInfo: unexpected exception : %s" % e)
finally:
session.close()
def getDigest(self, requestID):
"""get digest for request given its id
:param str requestName: request id
"""
self.log.debug("getDigest: will create digest for request '%s'" % requestID)
request = self.getRequest(requestID, False)
if not request["OK"]:
self.log.error("getDigest: %s" % request["Message"])
return request
request = request["Value"]
if not isinstance(request, Request):
self.log.info("getDigest: request '%s' not found")
return S_OK()
return request.getDigest()
def getRequestIDForName(self, requestName):
"""read request id for given name
if the name is not unique, an error is returned
:param requestName: name of the request
"""
session = self.DBSession()
reqID = 0
try:
ret = session.query(Request.RequestID).filter(Request.RequestName == requestName).all()
if not ret:
return S_ERROR("No such request %s" % requestName)
elif len(ret) > 1:
return S_ERROR("RequestName %s not unique (%s matches)" % (requestName, len(ret)))
reqID = ret[0][0]
except NoResultFound:
return S_ERROR("No such request")
except Exception as e:
self.log.exception("getRequestIDsForName: unexpected exception", lException=e)
return S_ERROR("getRequestIDsForName: unexpected exception : %s" % e)
finally:
session.close()
return S_OK(reqID)
|
DIRACGrid/DIRAC
|
src/DIRAC/RequestManagementSystem/DB/RequestDB.py
|
Python
|
gpl-3.0
| 34,747
|
[
"DIRAC"
] |
1593b298b86043f021eade8456c9d27b51d3baacccc840d2dda94ee0ec032fd9
|
# -*- coding: utf-8 -*-
#
# pymatgen documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 15 00:13:52 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.dirname('..'))
sys.path.insert(0, os.path.dirname('../pymatgen'))
sys.path.insert(0, os.path.dirname('../..'))
from pymatgen import __version__, __author__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.viewcode', "sphinx.ext.mathjax"]
exclude_patterns = ['../**/tests*']
exclude_dirnames = ['../**/tests*']
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymatgen'
copyright = u'2011, ' + __author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'proBlue'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymatgendoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pymatgen.tex', u'pymatgen Documentation', __author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymatgen', u'pymatgen Documentation',
[__author__], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pymatgen', u'pymatgen Documentation',
__author__, 'pymatgen', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pymatgen'
epub_author = __author__
epub_publisher = u'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier, Dan Gunter, William Davidson Richards'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
migueldiascosta/pymatgen
|
docs/conf.py
|
Python
|
mit
| 9,424
|
[
"pymatgen"
] |
edbb2e20dfd1f6fa94c98a97a0dae9d088ffd7b5446ce396858d0f55f30413e8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# submitjob - job submit page
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Page for submitting jobs"""
import cgi
import cgitb
cgitb.enable()
from shared.functionality.submitjob import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/submitjob.py
|
Python
|
gpl-2.0
| 1,098
|
[
"Brian"
] |
9cd2ebb01ef3e49bcc093d756438c6821e83b7edd34b1425abc50b2d0a0fccef
|
'''
Contains physical constants used in snow modeling.
@var a_gravity: Gravitational acceleration [m s-2]
@var eta0: Viscosity of snow at T=0C and density=0 [N s m- 2 = kg m-1 s-1]
@var rho_air: Density of air [kg m-3], dry air at 0 C and 100 kPa
@var rho_water: Density of water [kg m-3]
@var rho_ice: Density of ice [kg m-3]
@var k_ice0: Thermal conductivity of ice [W m-1 K-1] at 0 C
@var k_ice10: Thermal conductivity of ice [W m-1 K-1] at -10 C
@var secperday: Seconds per day [s]
@var boltzmann: Boltzmann constant [J K-1].
The Boltzmann constant (k or kB) is the physical constant relating energy
at the particle level with temperature observed at the bulk level.
It is the gas constant R divided by the Avogadro constant NA: k = \frac{R}{N_{\rm A}}\,
It has the same units as entropy.
@var boltzmann_eV: Boltzmann constant [eV K-1]
@author: kmu
@since: 25. mai 2010
'''
# gravitational acceleration [m s-2]
a_gravity = 9.81
# viscosity of snow at T=0C and density=0 [N s m- 2= kg m-1 s-1]
eta0 = 3.6e6
# Density of air [kg m-3], dry air at 0 C and 100 kPa
rho_air = 1.2754
# Density of water [kg m-3]
rho_water = 1000.0
# Density of ice [kg m-3]
rho_ice = 916.0
# Thermal conductivity of ice [W m-1 K-1]
k_ice0 = 2.22 # at 0 C
k_ice10 = 2.30 # at -10 C
# Seconds per day [s]
secperday = 86400.0
# Boltzmann constant [J K-1]
# The Boltzmann constant (k or kB) is the physical constant relating energy
# at the particle level with temperature observed at the bulk level.
# It is the gas constant R divided by the Avogadro constant NA:
# k = \frac{R}{N_{\rm A}}\,
# It has the same units as entropy.
boltzmann = 1.380650424e-23
boltzmann_eV = 8.61734315e-5 # [eV K-1]
# Stefan-Boltzmann constant [W m-2 K-4]
stefan_boltzmann = 5.67040004e-8
|
kmunve/pysenorge
|
pysenorge/constants.py
|
Python
|
gpl-3.0
| 1,844
|
[
"Avogadro"
] |
7fd2a550fa5f09255626bc147e12f325e49f6c2d0982079967b01f8a458c3597
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import traceback
from kivy.logger import Logger
from ORCA.App import ORCA_App
ORCA = None
try:
ORCA = ORCA_App()
if __name__ in ('__android__', '__main__'):
ORCA.run()
except Exception as exception:
uMsg = 'ORCA:Unexpected error:'+ str(exception)
Logger.critical(uMsg)
uMsg = traceback.format_exc()
Logger.critical(uMsg)
if ORCA is not None:
ORCA.StopApp()
pass
|
thica/ORCA-Remote
|
src/main.py
|
Python
|
gpl-3.0
| 1,299
|
[
"ORCA"
] |
3e9a66e32dbbccabcbbf1e6bfaeafbdb2759a3bdb75fb6af31095afbeb189b7f
|
import matplotlib.pyplot as plt
from numpy import linspace
import sys
user_filename = raw_input("Please state filename of results: ")
f = open(user_filename,"r")
lines = f.readlines()
u = []
f_exact = []
v = []
epsilon = []
u2 = []
for line in lines:
u.append(line.split(' ')[1])
f_exact.append(line.split(' ')[2])
v.append(line.split(' ')[3])
epsilon.append(line.split(' ')[4])
u2.append(line.split(' ')[5])
f.close()
i = linspace(0,1,len(u))
plt.plot(i,u,'r-', label='Gaussian',)
plt.plot(i,f_exact, 'k-', label='Exact', )
plt.legend()
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.legend()
plt.show()
plt.figure()
plt.plot(i,epsilon,'--')
plt.xlabel('$x$')
plt.ylabel('Relative error, logarithmic scale')
plt.xlim([-0.1,1.1])
plt.show()
|
linegpe/FYS3150
|
Project1/plot_results.py
|
Python
|
gpl-3.0
| 750
|
[
"Gaussian"
] |
d89dab0b4b5dbef12fd2fafe6c20a7eadfac9908d1a9e9120d0ee38ae11181fa
|
import sys
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sklearn.preprocessing import MinMaxScaler
from pystruct.models import StructuredModel
from pystruct.learners import OneSlackSSVM
sys.path.append('.')
from shared import LOG_SMALL, LOG_TRANSITION, FEATURES # noqa: E402
class SSVM:
"""Structured SVM wrapper"""
def __init__(self, inference_train, inference_pred, dat_obj, C=1.0, share_params=True,
multi_label=True, poi_info=None, debug=False):
assert(C > 0)
self.C = C
self.inference_train = inference_train
self.inference_pred = inference_pred
self.share_params = share_params
self.multi_label = multi_label
self.dat_obj = dat_obj
self.debug = debug
self.trained = False
if poi_info is None:
self.poi_info = None
else:
self.poi_info = poi_info
self.scaler_node = MinMaxScaler(feature_range=(-1, 1), copy=False)
self.scaler_edge = MinMaxScaler(feature_range=(-1, 1), copy=False)
def train(self, trajid_list, n_jobs=4):
if self.poi_info is None:
self.poi_info = self.dat_obj.calc_poi_info(trajid_list)
# build POI_ID <--> POI__INDEX mapping for POIs used to train CRF
# which means only POIs in traj such that len(traj) >= 2 are included
poi_set = {p for tid in trajid_list for p in self.dat_obj.traj_dict[tid]
if len(self.dat_obj.traj_dict[tid]) >= 2}
self.poi_list = sorted(poi_set)
self.poi_id_dict, self.poi_id_rdict = dict(), dict()
for idx, poi in enumerate(self.poi_list):
self.poi_id_dict[poi] = idx
self.poi_id_rdict[idx] = poi
# generate training data
train_traj_list = [self.dat_obj.traj_dict[k] for k in trajid_list if len(self.dat_obj.traj_dict[k]) >= 2]
node_features_list = Parallel(n_jobs=n_jobs)(delayed(calc_node_features)(
tr[0], len(tr), self.poi_list, self.poi_info, self.dat_obj) for tr in train_traj_list)
edge_features = calc_edge_features(trajid_list, self.poi_list, self.poi_info, self.dat_obj)
# feature scaling: node features
# should each example be flattened to one vector before scaling?
self.fdim_node = node_features_list[0].shape
X_node_all = np.vstack(node_features_list)
X_node_all = self.scaler_node.fit_transform(X_node_all)
X_node_all = X_node_all.reshape(-1, self.fdim_node[0], self.fdim_node[1])
# feature scaling: edge features
fdim_edge = edge_features.shape
edge_features = self.scaler_edge.fit_transform(edge_features.reshape(fdim_edge[0] * fdim_edge[1], -1))
self.edge_features = edge_features.reshape(fdim_edge)
assert(len(train_traj_list) == X_node_all.shape[0])
X_train = [(X_node_all[k, :, :],
self.edge_features.copy(),
(self.poi_id_dict[train_traj_list[k][0]], len(train_traj_list[k])))
for k in range(len(train_traj_list))]
y_train = [np.array([self.poi_id_dict[k] for k in tr]) for tr in train_traj_list]
assert(len(X_train) == len(y_train))
# train
sm = MyModel(inference_train=self.inference_train, inference_pred=self.inference_pred,
share_params=self.share_params, multi_label=self.multi_label)
if self.debug is True:
print('C:', self.C)
verbose = 1 if self.debug is True else 0
self.osssvm = OneSlackSSVM(model=sm, C=self.C, n_jobs=n_jobs, verbose=verbose)
try:
self.osssvm.fit(X_train, y_train, initialize=True)
self.trained = True
print('SSVM training finished.')
except ValueError:
# except:
self.trained = False
sys.stderr.write('SSVM training FAILED.\n')
# raise
return self.trained
def predict(self, startPOI, nPOI):
assert(self.trained is True)
if startPOI not in self.poi_list:
return None
X_node_test = calc_node_features(startPOI, nPOI, self.poi_list, self.poi_info, self.dat_obj)
# feature scaling
# should each example be flattened to one vector before scaling?
# X_node_test = X_node_test.reshape(1, -1) # flatten test example to a vector
X_node_test = self.scaler_node.transform(X_node_test)
# X_node_test = X_node_test.reshape(self.fdim)
X_test = [(X_node_test, self.edge_features, (self.poi_id_dict[startPOI], nPOI))]
y_hat_list = self.osssvm.predict(X_test)[0]
# print(y_hat_list)
return [np.array([self.poi_id_rdict[x] for x in y_hat]) for y_hat in y_hat_list]
class MyModel(StructuredModel):
"""A Sequence model"""
def __init__(self, inference_train, inference_pred, share_params, multi_label,
n_states=None, n_features=None, n_edge_features=None):
assert(type(share_params) == bool)
assert(type(multi_label) == bool)
self.inference_method = 'customized'
self.inference_train = inference_train
self.inference_pred = inference_pred
self.class_weight = None
self.inference_calls = 0
self.n_states = n_states
self.n_features = n_features
self.n_edge_features = n_edge_features
self.share_params = share_params
self.multi_label = multi_label
self._set_size_joint_feature()
self._set_class_weight()
def _set_size_joint_feature(self):
if None not in [self.n_states, self.n_features, self.n_edge_features]:
if self.share_params is True: # share params among POIs/transitions
self.size_joint_feature = self.n_features + self.n_edge_features
else:
self.size_joint_feature = self.n_states * self.n_features + \
self.n_states * self.n_states * self.n_edge_features
def loss(self, y, y_hat):
# return np.mean(np.asarray(y) != np.asarray(y_hat)) # hamming loss (normalised)
return np.sum(np.asarray(y) != np.asarray(y_hat)) # hamming loss
def initialize(self, X, Y):
assert(len(X) == len(Y))
n_features = X[0][0].shape[1]
if self.n_features is None:
self.n_features = n_features
else:
assert(self.n_features == n_features)
n_states = len(np.unique(np.hstack([y.ravel() for y in Y])))
if self.n_states is None:
self.n_states = n_states
else:
assert(self.n_states == n_states)
n_edge_features = X[0][1].shape[2]
if self.n_edge_features is None:
self.n_edge_features = n_edge_features
else:
assert(self.n_edge_features == n_edge_features)
self._set_size_joint_feature()
self._set_class_weight()
self.traj_group_dict = dict()
for i in range(len(X)):
query = X[i][2]
if query in self.traj_group_dict:
# NO duplication
if not np.any([np.all(np.asarray(Y[i]) == np.asarray(yj)) for yj in self.traj_group_dict[query]]):
self.traj_group_dict[query].append(Y[i])
else:
self.traj_group_dict[query] = [Y[i]]
def __repr__(self):
return ("%s(n_states: %d, inference_method: %s, n_features: %d, n_edge_features: %d)"
% (type(self).__name__, self.n_states, self.inference_method, self.n_features, self.n_edge_features))
def joint_feature(self, x, y):
assert(not isinstance(y, tuple))
unary_features = x[0] # unary features of all POIs: n_POIs x n_features
pw_features = x[1] # pairwise features of all transitions: n_POIs x n_POIs x n_edge_features
query = x[2] # query = (startPOI, length)
n_nodes = query[1]
# assert(unary_features.ndim == 2)
# assert(pw_features.ndim == 3)
# assert(len(query) == 3)
assert(n_nodes == len(y))
assert(unary_features.shape == (self.n_states, self.n_features))
assert(pw_features.shape == (self.n_states, self.n_states, self.n_edge_features))
if self.share_params is True:
node_features = np.zeros((self.n_features), dtype=np.float)
edge_features = np.zeros((self.n_edge_features), dtype=np.float)
node_features = unary_features[y[0], :]
for j in range(len(y) - 1):
ss, tt = y[j], y[j + 1]
node_features = node_features + unary_features[tt, :]
edge_features = edge_features + pw_features[ss, tt, :]
else:
node_features = np.zeros((self.n_states, self.n_features), dtype=np.float)
edge_features = np.zeros((self.n_states, self.n_states, self.n_edge_features), dtype=np.float)
node_features[y[0], :] = unary_features[y[0], :]
for j in range(len(y) - 1):
ss, tt = y[j], y[j + 1]
node_features[tt, :] = unary_features[tt, :]
edge_features[ss, tt, :] = pw_features[ss, tt, :]
joint_feature_vector = np.hstack([node_features.ravel(), edge_features.ravel()])
return joint_feature_vector
def loss_augmented_inference(self, x, y, w, relaxed=None):
# inference procedure for training: (x, y) from training set (with features already scaled)
#
# argmax_y_hat np.dot(w, joint_feature(x, y_hat)) + loss(y, y_hat)
#
# the loss function should be decomposible in order to use Viterbi decoding, here we use Hamming loss
#
# x[0]: (unscaled) unary features of all POIs: n_POIs x n_features
# x[1]: (unscaled) pairwise features of all transitions: n_POIs x n_POIs x n_edge_features
# x[2]: query = (startPOI, length)
unary_features = x[0]
pw_features = x[1]
query = x[2]
assert(unary_features.ndim == 2)
assert(pw_features.ndim == 3)
assert(len(query) == 2)
ps = query[0]
L = query[1]
M = unary_features.shape[0] # total number of POIs
self._check_size_w(w)
if self.share_params is True:
unary_params = w[:self.n_features]
pw_params = w[self.n_features:].reshape(self.n_edge_features)
# duplicate params so that inference procedures work the same way no matter params are shared or not
unary_params = np.tile(unary_params, (self.n_states, 1))
pw_params = np.tile(pw_params, (self.n_states, self.n_states, 1))
else:
unary_params = w[:self.n_states * self.n_features].reshape((self.n_states, self.n_features))
pw_params = w[self.n_states * self.n_features:].reshape(
(self.n_states, self.n_states, self.n_edge_features))
if self.multi_label is True:
y_true_list = self.traj_group_dict[query]
else:
y_true_list = [y]
y_hat = self.inference_train(ps, L, M, unary_params, pw_params, unary_features, pw_features,
y_true=y, y_true_list=y_true_list)
return y_hat
def inference(self, x, w, relaxed=False, return_energy=False):
# inference procedure for testing: x from test set (features needs to be scaled)
#
# argmax_y np.dot(w, joint_feature(x, y))
#
# x[0]: (unscaled) unary features of all POIs: n_POIs x n_features
# x[1]: (unscaled) pairwise features of all transitions: n_POIs x n_POIs x n_edge_features
# x[2]: query = (startPOI, length)
unary_features = x[0]
pw_features = x[1]
query = x[2]
assert(unary_features.ndim == 2)
assert(pw_features.ndim == 3)
assert(len(query) == 2)
ps = query[0]
L = query[1]
M = unary_features.shape[0] # total number of POIs
self._check_size_w(w)
if self.share_params is True:
unary_params = w[:self.n_features]
pw_params = w[self.n_features:].reshape(self.n_edge_features)
# duplicate params so that inference procedures work the same way no matter params shared or not
unary_params = np.tile(unary_params, (self.n_states, 1))
pw_params = np.tile(pw_params, (self.n_states, self.n_states, 1))
else:
unary_params = w[:self.n_states * self.n_features].reshape((self.n_states, self.n_features))
pw_params = w[self.n_states * self.n_features:].reshape(
(self.n_states, self.n_states, self.n_edge_features))
y_pred = self.inference_pred(ps, L, M, unary_params, pw_params, unary_features, pw_features)
return y_pred
def calc_node_features(startPOI, nPOI, poi_list, poi_info, dat_obj):
"""
Compute node features (singleton) for all POIs given query (startPOI, nPOI)
"""
columns = FEATURES.copy()
p0, trajLen = startPOI, nPOI
assert(p0 in poi_info.index)
# DEBUG: use uniform node features
# nrows = len(poi_list)
# ncols = len(columns) + len(dat_obj.POI_CAT_LIST) + len(dat_obj.POI_CLUSTER_LIST) - 2
# return np.ones((nrows, ncols), dtype=np.float)
# return np.zeros((nrows, ncols), dtype=np.float)
df_ = pd.DataFrame(index=poi_list, columns=columns)
for poi in poi_list:
# lon, lat = poi_info.loc[poi, 'poiLon'], poi_info.loc[poi, 'poiLat']
pop, nvisit = poi_info.loc[poi, 'popularity'], poi_info.loc[poi, 'nVisit']
cat, cluster = poi_info.loc[poi, 'poiCat'], dat_obj.POI_CLUSTERS.loc[poi, 'clusterID']
nphotos = poi_info.loc[poi, ['nPhotoTotal', 'nPhotoMean', 'nPhotoP10', 'nPhotoP50', 'nPhotoP90']].tolist()
durations = poi_info.loc[poi, ['durationTotal', 'durationMean', 'durationP10', 'durationP50', 'durationP90']]\
.tolist()
idx = poi
df_.set_value(idx, 'category', tuple((cat == np.array(dat_obj.POI_CAT_LIST)).astype(np.int) * 2 - 1))
df_.set_value(idx, 'neighbourhood',
tuple((cluster == np.array(dat_obj.POI_CLUSTER_LIST)).astype(np.int) * 2 - 1))
df_.loc[idx, 'popularity'] = LOG_SMALL if pop < 1 else np.log10(pop)
df_.loc[idx, 'nVisit'] = LOG_SMALL if nvisit < 1 else np.log10(nvisit)
df_.loc[idx, 'nPhotoTotal'] = LOG_SMALL if nphotos[0] < 1 else np.log10(nphotos[0])
df_.loc[idx, 'nPhotoMean'] = LOG_SMALL if nphotos[1] < 1 else np.log10(nphotos[1])
df_.loc[idx, 'nPhotoP10'] = LOG_SMALL if nphotos[2] < 1 else np.log10(nphotos[2])
df_.loc[idx, 'nPhotoP50'] = LOG_SMALL if nphotos[3] < 1 else np.log10(nphotos[3])
df_.loc[idx, 'nPhotoP90'] = LOG_SMALL if nphotos[4] < 1 else np.log10(nphotos[4])
df_.loc[idx, 'durationTotal'] = LOG_SMALL if durations[0] < 1 else np.log10(durations[0])
df_.loc[idx, 'durationMean'] = LOG_SMALL if durations[1] < 1 else np.log10(durations[1])
df_.loc[idx, 'durationP10'] = LOG_SMALL if durations[2] < 1 else np.log10(durations[2])
df_.loc[idx, 'durationP50'] = LOG_SMALL if durations[3] < 1 else np.log10(durations[3])
df_.loc[idx, 'durationP90'] = LOG_SMALL if durations[4] < 1 else np.log10(durations[4])
df_.loc[idx, 'trajLen'] = trajLen
df_.loc[idx, 'sameCategory'] = 1 if cat == poi_info.loc[p0, 'poiCat'] else -1
df_.loc[idx, 'sameNeighbourhood'] = 1 if cluster == dat_obj.POI_CLUSTERS.loc[p0, 'clusterID'] else -1
df_.loc[idx, 'diffPopularity'] = pop - poi_info.loc[p0, 'popularity']
df_.loc[idx, 'diffNVisit'] = nvisit - poi_info.loc[p0, 'nVisit']
df_.loc[idx, 'diffNPhotoTotal'] = nphotos[0] - poi_info.loc[p0, 'nPhotoTotal']
df_.loc[idx, 'diffNPhotoMean'] = nphotos[1] - poi_info.loc[p0, 'nPhotoMean']
df_.loc[idx, 'diffNPhotoP10'] = nphotos[2] - poi_info.loc[p0, 'nPhotoP10']
df_.loc[idx, 'diffNPhotoP50'] = nphotos[3] - poi_info.loc[p0, 'nPhotoP50']
df_.loc[idx, 'diffNPhotoP90'] = nphotos[4] - poi_info.loc[p0, 'nPhotoP90']
df_.loc[idx, 'diffDurationTotal'] = durations[0] - poi_info.loc[p0, 'durationTotal']
df_.loc[idx, 'diffDurationMean'] = durations[1] - poi_info.loc[p0, 'durationMean']
df_.loc[idx, 'diffDurationP10'] = durations[2] - poi_info.loc[p0, 'durationP10']
df_.loc[idx, 'diffDurationP50'] = durations[3] - poi_info.loc[p0, 'durationP50']
df_.loc[idx, 'diffDurationP90'] = durations[4] - poi_info.loc[p0, 'durationP90']
df_.loc[idx, 'distance'] = dat_obj.POI_DISTMAT.loc[poi, p0]
# features other than category and neighbourhood
# X = df_[sorted(set(df_.columns) - {'category', 'neighbourhood'})].values
X = df_[FEATURES[2:]].values
# boolean features: category (+1, -1)
cat_features = np.vstack([list(df_.loc[x, 'category']) for x in df_.index])
# boolean features: neighbourhood (+1, -1)
neigh_features = np.vstack([list(df_.loc[x, 'neighbourhood']) for x in df_.index])
return np.hstack([cat_features, neigh_features, X]).astype(np.float)
def calc_edge_features(trajid_list, poi_list, poi_info, dat_obj, log_transition=LOG_TRANSITION):
"""
Compute edge features (transiton / pairwise)
"""
feature_names = ['poiCat', 'popularity', 'nVisit', 'durationMean', 'clusterID']
n_features = len(feature_names)
# DEBUG: use uniform edge features
# return np.ones((len(poi_list), len(poi_list), n_features), dtype=np.float)
# return np.zeros((len(poi_list), len(poi_list), n_features), dtype=np.float)
transmat_cat = dat_obj.gen_transmat_cat(trajid_list, poi_info)
transmat_pop = dat_obj.gen_transmat_pop(trajid_list, poi_info)
transmat_visit = dat_obj.gen_transmat_visit(trajid_list, poi_info)
transmat_duration = dat_obj.gen_transmat_duration(trajid_list, poi_info)
transmat_neighbor = dat_obj.gen_transmat_neighbor(trajid_list, poi_info)
poi_features = pd.DataFrame(data=np.zeros((len(poi_list), len(feature_names))),
columns=feature_names, index=poi_list)
poi_features.index.name = 'poiID'
poi_features['poiCat'] = poi_info.loc[poi_list, 'poiCat']
poi_features['popularity'] = np.digitize(poi_info.loc[poi_list, 'popularity'], dat_obj.LOGBINS_POP)
poi_features['nVisit'] = np.digitize(poi_info.loc[poi_list, 'nVisit'], dat_obj.LOGBINS_VISIT)
poi_features['durationMean'] = np.digitize(poi_info.loc[poi_list, 'durationMean'], dat_obj.LOGBINS_DURATION)
poi_features['clusterID'] = dat_obj.POI_CLUSTERS.loc[poi_list, 'clusterID']
edge_features = np.zeros((len(poi_list), len(poi_list), n_features), dtype=np.float64)
for j in range(len(poi_list)): # NOTE: POI order
pj = poi_list[j]
cat, pop = poi_features.loc[pj, 'poiCat'], poi_features.loc[pj, 'popularity']
visit, cluster = poi_features.loc[pj, 'nVisit'], poi_features.loc[pj, 'clusterID']
duration = poi_features.loc[pj, 'durationMean']
for k in range(len(poi_list)): # NOTE: POI order
pk = poi_list[k]
edge_features[j, k, :] = np.array([
transmat_cat.loc[cat, poi_features.loc[pk, 'poiCat']],
transmat_pop.loc[pop, poi_features.loc[pk, 'popularity']],
transmat_visit.loc[visit, poi_features.loc[pk, 'nVisit']],
transmat_duration.loc[duration, poi_features.loc[pk, 'durationMean']],
transmat_neighbor.loc[cluster, poi_features.loc[pk, 'clusterID']]])
if log_transition is True:
return np.log10(edge_features)
else:
return edge_features
|
cdawei/digbeta
|
dchen/tour/src/ssvm_nf.py
|
Python
|
gpl-3.0
| 19,817
|
[
"VisIt"
] |
a8f6327b96bbdf740664577a1cb231ac82c9a00b179bf6d7d7456b194c039325
|
"""Base Tornado handlers for the notebook.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import datetime
import email.utils
import hashlib
import logging
import mimetypes
import os
import stat
import threading
from tornado import web
from tornado import websocket
try:
from tornado.log import app_log
except ImportError:
app_log = logging.getLogger()
from IPython.config import Application
from IPython.external.decorator import decorator
from IPython.utils.path import filefind
#-----------------------------------------------------------------------------
# Monkeypatch for Tornado <= 2.1.1 - Remove when no longer necessary!
#-----------------------------------------------------------------------------
# Google Chrome, as of release 16, changed its websocket protocol number. The
# parts tornado cares about haven't really changed, so it's OK to continue
# accepting Chrome connections, but as of Tornado 2.1.1 (the currently released
# version as of Oct 30/2011) the version check fails, see the issue report:
# https://github.com/facebook/tornado/issues/385
# This issue has been fixed in Tornado post 2.1.1:
# https://github.com/facebook/tornado/commit/84d7b458f956727c3b0d6710
# Here we manually apply the same patch as above so that users of IPython can
# continue to work with an officially released Tornado. We make the
# monkeypatch version check as narrow as possible to limit its effects; once
# Tornado 2.1.1 is no longer found in the wild we'll delete this code.
import tornado
if tornado.version_info <= (2,1,1):
def _execute(self, transforms, *args, **kwargs):
from tornado.websocket import WebSocketProtocol8, WebSocketProtocol76
self.open_args = args
self.open_kwargs = kwargs
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"):
self.ws_connection = WebSocketProtocol8(self)
self.ws_connection.accept_connection()
elif self.request.headers.get("Sec-WebSocket-Version"):
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 8\r\n\r\n"))
self.stream.close()
else:
self.ws_connection = WebSocketProtocol76(self)
self.ws_connection.accept_connection()
websocket.WebSocketHandler._execute = _execute
del _execute
#-----------------------------------------------------------------------------
# Top-level handlers
#-----------------------------------------------------------------------------
class RequestHandler(web.RequestHandler):
"""RequestHandler with default variable setting."""
def render(*args, **kwargs):
kwargs.setdefault('message', '')
return web.RequestHandler.render(*args, **kwargs)
class AuthenticatedHandler(RequestHandler):
"""A RequestHandler with an authenticated user."""
def clear_login_cookie(self):
self.clear_cookie(self.cookie_name)
def get_current_user(self):
user_id = self.get_secure_cookie(self.cookie_name)
# For now the user_id should not return empty, but it could eventually
if user_id == '':
user_id = 'anonymous'
if user_id is None:
# prevent extra Invalid cookie sig warnings:
self.clear_login_cookie()
if not self.login_available:
user_id = 'anonymous'
return user_id
@property
def cookie_name(self):
default_cookie_name = 'username-{host}'.format(
host=self.request.host,
).replace(':', '-')
return self.settings.get('cookie_name', default_cookie_name)
@property
def password(self):
"""our password"""
return self.settings.get('password', '')
@property
def logged_in(self):
"""Is a user currently logged in?
"""
user = self.get_current_user()
return (user and not user == 'anonymous')
@property
def login_available(self):
"""May a user proceed to log in?
This returns True if login capability is available, irrespective of
whether the user is already logged in or not.
"""
return bool(self.settings.get('password', ''))
class IPythonHandler(AuthenticatedHandler):
"""IPython-specific extensions to authenticated handling
Mostly property shortcuts to IPython-specific settings.
"""
@property
def config(self):
return self.settings.get('config', None)
@property
def log(self):
"""use the IPython log by default, falling back on tornado's logger"""
if Application.initialized():
return Application.instance().log
else:
return app_log
@property
def use_less(self):
"""Use less instead of css in templates"""
return self.settings.get('use_less', False)
#---------------------------------------------------------------
# URLs
#---------------------------------------------------------------
@property
def ws_url(self):
"""websocket url matching the current request
By default, this is just `''`, indicating that it should match
the same host, protocol, port, etc.
"""
return self.settings.get('websocket_url', '')
@property
def mathjax_url(self):
return self.settings.get('mathjax_url', '')
@property
def base_project_url(self):
return self.settings.get('base_project_url', '/')
@property
def base_kernel_url(self):
return self.settings.get('base_kernel_url', '/')
#---------------------------------------------------------------
# Manager objects
#---------------------------------------------------------------
@property
def kernel_manager(self):
return self.settings['kernel_manager']
@property
def notebook_manager(self):
return self.settings['notebook_manager']
@property
def cluster_manager(self):
return self.settings['cluster_manager']
@property
def project(self):
return self.notebook_manager.notebook_dir
#---------------------------------------------------------------
# template rendering
#---------------------------------------------------------------
def get_template(self, name):
"""Return the jinja template object for a given name"""
return self.settings['jinja2_env'].get_template(name)
def render_template(self, name, **ns):
ns.update(self.template_namespace)
template = self.get_template(name)
return template.render(**ns)
@property
def template_namespace(self):
return dict(
base_project_url=self.base_project_url,
base_kernel_url=self.base_kernel_url,
logged_in=self.logged_in,
login_available=self.login_available,
use_less=self.use_less,
)
class AuthenticatedFileHandler(IPythonHandler, web.StaticFileHandler):
"""static files should only be accessible when logged in"""
@web.authenticated
def get(self, path):
return web.StaticFileHandler.get(self, path)
#-----------------------------------------------------------------------------
# File handler
#-----------------------------------------------------------------------------
# to minimize subclass changes:
HTTPError = web.HTTPError
class FileFindHandler(web.StaticFileHandler):
"""subclass of StaticFileHandler for serving files from a search path"""
_static_paths = {}
# _lock is needed for tornado < 2.2.0 compat
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
if isinstance(path, basestring):
path = [path]
self.roots = tuple(
os.path.abspath(os.path.expanduser(p)) + os.path.sep for p in path
)
self.default_filename = default_filename
@classmethod
def locate_file(cls, path, roots):
"""locate a file to serve on our static file search path"""
with cls._lock:
if path in cls._static_paths:
return cls._static_paths[path]
try:
abspath = os.path.abspath(filefind(path, roots))
except IOError:
# empty string should always give exists=False
return ''
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (abspath + os.path.sep).startswith(roots):
raise HTTPError(403, "%s is not in root static directory", path)
cls._static_paths[path] = abspath
return abspath
def get(self, path, include_body=True):
path = self.parse_url_path(path)
# begin subclass override
abspath = self.locate_file(path, self.roots)
# end subclass override
if os.path.isdir(abspath) and self.default_filename is not None:
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/")
return
abspath = os.path.join(abspath, self.default_filename)
if not os.path.exists(abspath):
raise HTTPError(404)
if not os.path.isfile(abspath):
raise HTTPError(403, "%s is not a file", path)
stat_result = os.stat(abspath)
modified = datetime.datetime.utcfromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type:
self.set_header("Content-Type", mime_type)
cache_time = self.get_cache_time(path, modified, mime_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() + \
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
else:
self.set_header("Cache-Control", "public")
self.set_extra_headers(path)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= modified:
self.set_status(304)
return
with open(abspath, "rb") as file:
data = file.read()
hasher = hashlib.sha1()
hasher.update(data)
self.set_header("Etag", '"%s"' % hasher.hexdigest())
if include_body:
self.write(data)
else:
assert self.request.method == "HEAD"
self.set_header("Content-Length", len(data))
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
This method may be overridden in subclasses (but note that it
is a class method rather than a static method). The default
implementation uses a hash of the file's contents.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
"""
# begin subclass override:
static_paths = settings['static_path']
if isinstance(static_paths, basestring):
static_paths = [static_paths]
roots = tuple(
os.path.abspath(os.path.expanduser(p)) + os.path.sep for p in static_paths
)
try:
abs_path = filefind(path, roots)
except IOError:
app_log.error("Could not find static file %r", path)
return None
# end subclass override
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
f = open(abs_path, "rb")
hashes[abs_path] = hashlib.md5(f.read()).hexdigest()
f.close()
except Exception:
app_log.error("Could not open static file %r", path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh[:5]
return None
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
#-----------------------------------------------------------------------------
# URL to handler mappings
#-----------------------------------------------------------------------------
default_handlers = []
|
marcoantoniooliveira/labweb
|
oscar/lib/python2.7/site-packages/IPython/html/base/handlers.py
|
Python
|
bsd-3-clause
| 14,220
|
[
"Brian"
] |
8df8dda09efe3c8ee4411dbf83a9ebe868e76b507743cca6eb362e46c136ae75
|
# Python3 code
# Made by: Yan Zhiqiang & Tereza Jerabkova
# An example file that construct galaxy-wide IMF according to the input parameters in file "input_parameters.txt"
# The outputs of this example are:
# - the comparison plot of galaxy-wide IMF, canonical IMF, and the histogram of stellar masses (optional);
# - the txt file containing the galaxy-wide IMF.
# - the txt file containing the number of stars in each mass bin (optional);
# --------------------------------------------------------------------------------------------------------------------------------
# Import modules and libraries
# --------------------------------------------------------------------------------------------------------------------------------
import galimf # galIMF containing IGIMF function and OSGIMF function and additional computational modules
import numpy as np
import math
import time
import sys
# --------------------------------------------------------------------------------------------------------------------------------
# Import parameters from file or inputted by hand:
# --------------------------------------------------------------------------------------------------------------------------------
if len(sys.argv) == 1:
print("Input parameters from the file 'input_parameters.txt':")
file = open('input_parameters.txt', 'r')
data_txt = file.readlines()
file.close()
data = [x for x in data_txt[0].split()]
SFR = float(data[0])
M_over_H = float(data[1])
gwIMF_model = data[2]
OSrequest = data[3]
M_str_L = float(data[4])
M_str_U = float(data[5])
elif len(sys.argv) < 7:
print("There needs to be none or 6 input arguments, being:\n"
"SFR, Metallicity, gwIMF model, OSGIMF, Lowest stellar mass, Highest stellar mass\n"
"You can input 'D' to apply the default parameter value:\n"
"1, 0, IGIMF_Z, 0, 0.08, 150\n"
"If there are no input parameters, the program will look for the input from file.\n")
else:
print("Input parameters:")
if sys.argv[1] == "D" or sys.argv[1] == "d":
SFR = 1
else:
SFR = float(sys.argv[1])
if sys.argv[2] == "D" or sys.argv[2] == "d":
M_over_H = 0
else:
M_over_H = float(sys.argv[2])
if sys.argv[3] == "D" or sys.argv[3] == "d":
gwIMF_model = "IGIMF_Z"
else:
gwIMF_model = sys.argv[3]
OSrequest = sys.argv[4]
if sys.argv[5] == "D" or sys.argv[5] == "d":
M_str_L = 0.08
else:
M_str_L = float(sys.argv[5])
if sys.argv[6] == "D" or sys.argv[6] == "d":
M_str_U = 150
else:
M_str_U = float(sys.argv[6])
print("SFR={}, M_over_H={}, gwIMF_model={}, OSrequest={}, M_str_L={}, M_str_U={}, ".format(SFR, M_over_H, gwIMF_model, OSrequest, M_str_L, M_str_U))
bindw = galimf.resolution_histogram_relative = 10 ** (max((0 - math.log(SFR, 10)), 0) ** 0.2 - 1.9)
# will change the resolution of histogram for optimal sampling automatically adjusted with SFR value.
if gwIMF_model == "IGIMF3":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 1 # 1 # see file 'galimf.py'
alpha1_model = 1 # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF_Z":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'Z' # 1 # see file 'galimf.py'
alpha1_model = 'Z' # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF2d5":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'IGIMF2.5' # 1 # see file 'galimf.py'
alpha1_model = 'IGIMF2.5' # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF2":
alpha3_model = 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 0 # 1 # see file 'galimf.py'
alpha1_model = 0 # 0 # see file 'galimf.py'
beta_model = 1
R14orNOT = False
elif gwIMF_model == "IGIMF_R14":
alpha3_model = 'R14' # 'R14' # 2 # 1 # IMF high-mass-end power-index model, see Function_alpha_3_change in file 'galimf.py'
alpha_2 = 2.3 # IMF middle-mass power-index
alpha_1 = 1.3 # IMF low-mass-end power-index
alpha2_model = 'R14' # 'R14' # 1 # see file 'galimf.py'
alpha1_model = 0 # 0 # see file 'galimf.py'
beta_model = 0
R14orNOT = True
# alpha3_model = 1 # IMF high-mass-end power-index model, see file 'galimf.py'
# alpha_2 = 2.3 # IMF middle-mass power-index
# alpha_1 = 1.3 # IMF low-mass-end power-index
# alpha2_model = 1 # see file 'galimf.py'
# alpha1_model = 1 # see file 'galimf.py'
# beta_model = 1
# M_str_L = 0.08 # star mass lower limit [solar mass]
# M_str_U = 150 # star mass upper limit [solar mass]
M_turn = 0.5 # IMF power-index breaking mass [solar mass]
M_turn2 = 1. # IMF power-index breaking mass [solar mass]
M_ecl_U = 10**9 # embedded cluster mass upper limit [solar mass]
M_ecl_L = 5. # embedded cluster mass lower limit [solar mass]
delta_t = 10. # star formation epoch [Myr]
I_ecl = 1. # normalization factor in the Optimal Sampling condition equation
I_str = 1. # normalization factor in the Optimal Sampling condition equation
# --------------------------------------------------------------------------------------------------------------------------------
# Construct IGIMF:
# --------------------------------------------------------------------------------------------------------------------------------
# print("\n Calculating galaxy-wide IMF......")
start_time = time.time()
galimf.function_galimf(
"I", # IorS ### "I" for IGIMF; "OS" for OSGIMF
'IGIMF', # 'R14'
SFR, # Star Formation Rate [solar mass / yr]
alpha3_model, # IMF high-mass-end power-index model, see file 'galimf.py'
delta_t, # star formation epoch [Myr]
M_over_H,
I_ecl, # normalization factor in the Optimal Sampling condition equation
M_ecl_U, # embedded cluster mass upper limit [solar mass]
M_ecl_L, # embedded cluster mass lower limit [solar mass]
beta_model, # ECMF power-index model, see file 'galimf.py'
I_str, # normalization factor in the Optimal Sampling condition equation
M_str_L, # star mass lower limit [solar mass]
alpha_1, # IMF low-mass-end power-index
alpha1_model, # see file 'galimf.py'
M_turn, # IMF power-index change point [solar mass]
alpha_2, # IMF middle-mass power-index
alpha2_model, # see file 'galimf.py'
M_turn2, # IMF power-index change point [solar mass]
M_str_U, # star mass upper limit [solar mass]
printout=True # save the generated IMF
)
masses_igimf = np.array(galimf.List_M_str_for_xi_str)
igimf = np.array(galimf.List_xi)
# --------------------------------------------------------------------------------------------------------------------------------
# Normalization:
# --------------------------------------------------------------------------------------------------------------------------------
# igimf is normalized by default to a total mass formed in 10 Myr given the SFR
# to change the normalization follow the commented part of a code
# Norm = simps(igimf*masses_igimf,masses_igimf) #- normalization to a total mass
# Norm = simps(igimf,masses_igimf) #- normalization to number of stars
# Mtot1Myr = SFR*10*1.e6 #total mass formed in 10 Myr
# igimf = np.array(igimf)*Mtot1Myr/Norm
# --------------------------------------------------------------------------------------------------------------------------------
# Construct OSGIMF if required by interactive input:
# --------------------------------------------------------------------------------------------------------------------------------
if OSrequest == "y" or OSrequest == "Y" or OSrequest == "yes" or OSrequest == "Yes" or OSrequest == "1":
resol = 0.2/(10**(math.log(SFR, 10)/8))
galimf.resolution_histogram_relative = bindw / resol
start_time = time.time()
galimf.function_galimf(
"OS", # IorS ### "I" for IGIMF; "OS" for OSGIMF
'IGIMF', # 'R14'
SFR, # Star Formation Rate [solar mass / yr]
alpha3_model, # IMF high-mass-end power-index model, see file 'galimf.py'
delta_t, # star formation epoch [Myr]
M_over_H,
I_ecl, # normalization factor in the Optimal Sampling condition equation
M_ecl_U, # embedded cluster mass upper limit [solar mass]
M_ecl_L, # embedded cluster mass lower limit [solar mass]
beta_model, # ECMF power-index model, see file 'galimf.py'
I_str, # normalization factor in the Optimal Sampling condition equation
M_str_L, # star mass lower limit [solar mass]
alpha_1, # IMF low-mass-end power-index
alpha1_model, # see file 'galimf.py'
M_turn, # IMF power-index change point [solar mass]
alpha_2, # IMF middle-mass power-index
alpha2_model, # see file 'galimf.py'
M_turn2, # IMF power-index change point [solar mass]
M_str_U, # star mass upper limit [solar mass]
printout=True # save the generated OSGIMF
)
# One can easily import data considering number of stars in each mass bin assuming optimal sampling
mass_range_center = galimf.mass_range_center
mass_range = galimf.mass_range
mass_range_upper_limit = galimf.mass_range_upper_limit
mass_range_lower_limit = galimf.mass_range_lower_limit
star_number = galimf.star_number
mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number = zip(
*sorted(zip(mass_range_center, mass_range, mass_range_upper_limit, mass_range_lower_limit, star_number)))
masses_osgimf = np.array(galimf.List_mass_grid_x_axis) + 1.e-50
osgimf = np.array(galimf.List_star_number_in_mass_grid_y_axis) + 1.e-50
# --------------------------------------------------------------------------------------------------------------------------------
# # Plot
# --------------------------------------------------------------------------------------------------------------------------------
# import matplotlib.pyplot as plt # matplotlib for plotting
# from scipy.integrate import quad
#
# fig0 = plt.figure(figsize=(3.4, 2.5))
# plt.plot(np.log10(masses_igimf + 1.e-50), np.log10(igimf + 1.e-50), color='blue', lw=2.5, label='Galaxy-wide IMF')
# ylim_min = np.min(igimf + 1.e-50)
# ylim_max = np.max(igimf + 1.e-50)
# plt.ylim(np.log10(ylim_min), np.log10(ylim_max))
# if OSrequest == "y" or OSrequest == "Y" or OSrequest == "yes" or OSrequest == "Yes" or OSrequest == "1":
# plt.plot(np.log10(masses_osgimf), np.log10(osgimf), color='green', lw=2.5, label='Stellar mass histogram')
#
# for k in range(20):
# sal_IMF = masses_igimf ** (-2.3)
# plt.plot(np.log10(masses_igimf), np.log10((1.e5*np.max(igimf)/np.max(sal_IMF))*sal_IMF)-k, c='grey', lw=0.5)
#
# N = 100
# can_imf = np.zeros(N)
# masses_igimf = np.logspace(np.log10(0.08), np.log10(120), N, base=10)
#
# for i, m in enumerate(masses_igimf):
# if m <= 0.5:
# can_imf[i] = m ** (-1.3)
# else:
# can_imf[i] = 0.5*m ** (-2.3)
#
#
# def imf(mass, k_star, alpha):
# return k_star*mass*mass**(-alpha)
#
#
# Norm = quad(imf, 0.08, 0.5, args=(1, 1.3))[0] + quad(imf, 0.5, 120, args=(0.5, 2.3))[0]
# Mtot1Myr = SFR*10*1.e6 # total mass formed in 10 Myr
# can_imf = np.array(can_imf)*Mtot1Myr/Norm
# plt.plot(np.log10(masses_igimf), np.log10(can_imf), color='r', lw=2, label='canonical IMF')
#
# if ylim_max < np.max(can_imf):
# ylim_max = np.max(can_imf)
#
# plt.xlabel('$\log{(m\,[M_{\odot}])}$')
# plt.ylabel('$\log{(\\xi_{\mathrm{gal}}\,[M_{\odot}^{-1}])}$')
#
# plt.ylim(np.log10(ylim_min), np.log10(ylim_max))
# plt.xlim(math.log(0.06, 10), math.log(160, 10))
#
# plt.legend(loc='best', ncol=1, fancybox=True, prop={'size': 7})
# plt.tight_layout()
# fig0.savefig('galaxy_wide_IMF_plot.pdf', dpi=250)
#
# plt.show()
|
Azeret/galIMF
|
igimf_calculator.py
|
Python
|
gpl-3.0
| 12,511
|
[
"Galaxy"
] |
d02b3b528b550b73283f64d32d4df1aa218f3c51cca438d9a8463c3b7474cd22
|
#! /usr/bin/env python
import sys
from optparse import OptionParser
from datetime import datetime
from itaps import iMesh, iBase
def get_centers_and_vals(filename, outfile, valtagname, errtagname, plotnum, \
header):
"""
"""
imesh = iMesh.Mesh()
imesh.load(filename)
cnt = 0
taltag = imesh.getTagHandle(valtagname)
errtag = imesh.getTagHandle(errtagname)
with open(outfile, 'w') as fw:
# write metadata/comment line
if header:
fw.write("# File created: {0} From: {1}\n".format( \
datetime.now().strftime("%D %H:%M"), filename))
fw.write("# {0:<12}\t{1:<12}\t{2:<12}\t{3:<12}\t{4:<12}\n".format( \
"x", "y", "z", "tally", "error"))
#
for tet in imesh.iterate(iBase.Type.region, iMesh.Topology.tetrahedron):
vtxs = imesh.getVtxCoords(imesh.getEntAdj(tet, iBase.Type.vertex))
# calculate centroid
centroid = list()
for idx in xrange(3):
centroid.append(sum([vtx[idx] for vtx in vtxs])/4.0)
# Write output
fw.write("{0:<12}\t{1:<12}\t{2:<12}\t{3:<12}\t{4:<12}\n".format( \
centroid[0], centroid[1], centroid[2], \
taltag[tet], errtag[tet]))
cnt += 1
# Optionally plot points of a tet for verification
if cnt == plotnum:
print "Example centroid...\n"
print "For tet with coords\n{0}\n".format(vtxs)
print "We find the centroid to be:\n{0}\n".format(centroid)
plot(vtxs, centroid)
print "{0} tetrahedrons found\nTet center coordinates and tally value/" \
"errors written to: {1}".format(cnt, outfile)
def plot(vtxs, centroid):
"""Creates 3D scatter plot of vertices and centroid of a single tetrahedron.
"""
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = [vtx[0] for vtx in vtxs]
ys = [vtx[1] for vtx in vtxs]
zs = [vtx[2] for vtx in vtxs]
centX = centroid[0]
centY = centroid[1]
centZ = centroid[2]
# plotting
(c, m) = ('r', 'o')
ax.scatter(centX, centY, centZ, c=c, marker=m)
(c, m) = ('b', '.')
ax.scatter(xs, ys, zs, c=c, marker=m)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def main():
"""
"""
usage = "usage: %prog input-moab-file [options]\n\n" \
"Script reads in a moab file (e.g. .vtk or .h5m), and outputs a file " \
"listing the centers of all tetrahedron voxels, and the " \
"corresponding values (e.g. tally and error) tagged to the voxels."
parser = OptionParser(usage)
#
parser.add_option("-o","--output",action="store",dest="outfile", \
default="tet_centers_output",help="Output file name for " \
"resulting file. Default: %default")
#
parser.add_option("-t","--taltag",action="store",dest="taltag", \
default="TALLY_TAG",help="Tag name for tally values. " \
"Default: %default")
#
parser.add_option("-e","--errtag",action="store",dest="errtag", \
default="ERROR_TAG",help="Tag name for error values. " \
"Default: %default")
# Include header
parser.add_option("-H","--header",action="store_false",dest="header", \
default=True,help="Option disables header line at top of output. " \
"Default: %default")
# Visual inspection
parser.add_option("-p","--plot",action="store",dest="plotnum", \
default=-1,help="Plot the vertices and centroid of the " \
"n'th tet. Default: n=-1 (doesn't plot)")
(options, args) = parser.parse_args()
get_centers_and_vals(args[0], options.outfile, options.taltag, \
options.errtag, int(options.plotnum), options.header)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "must pass a filename."
else:
main()
|
svalinn/meshtools
|
tetmesh2points/tet_vals_and_centroids.py
|
Python
|
bsd-2-clause
| 4,156
|
[
"VTK"
] |
5be3df534926eeacb7e96f7154016c05d6e3eea1de28afa560eabc4687f4f5e9
|
import cellprofiler.icons
from cellprofiler.gui.help import PROTIP_RECOMEND_ICON, PROTIP_AVOID_ICON, TECH_NOTE_ICON
__doc__ = '''<b>Export To Database</b> exports data directly to a database, or in
database readable format, including an imported file
with column names and a CellProfiler Analyst properties file, if desired.
<hr>
This module exports measurements directly to a database or to a SQL-compatible format.
It allows you to create and import MySQL and associated data files into a
database and gives you the option of creating
a properties file for use with CellProfiler Analyst. Optionally, you can create
an SQLite database file if you do not have a server on which to run MySQL itself.
This module must be run at the end of a pipeline, or second to last if
you are using the <b>CreateBatchFiles</b> module. If you forget this module, you
can also run the <i>ExportDatabase</i> data tool after processing is complete;
its functionality is the same.
The database is set up with two primary tables. These tables are the
<i>Per_Image</i> table and the <i>Per_Object</i> table (which may have a prefix if you
specify):
<ul>
<li>The Per_Image table consists of all the per-image measurements made during the pipeline, plus
per-image population statistics (such as mean, median, and standard deviation) of the object measurements. There is one
per_image row for every "cycle" that CellProfiler processes (a cycle is usually a single field of view, and a single cycle
usually contains several image files, each representing a different channel of the same field of view). </li>
<li>The Per_Object table contains all the
measurements for individual objects. There is one row of object
measurements per object identified. The two tables are connected with the
primary key column <i>ImageNumber</i>, which indicates the image to which each object belongs. The Per_Object table has another primary
key called <i>ObjectNumber</i>, which is unique to each image. </li>
</ul>
Typically, if multiple types of objects are identified and measured in a pipeline,
the numbers of those objects are equal to each other. For example, in most pipelines, each nucleus has exactly one cytoplasm, so the first row
of the Per-Object table contains all of the information about object #1, including both nucleus- and cytoplasm-related measurements. If this
one-to-one correspondence is <i>not</i> the case for all objects in the pipeline (for example, if dozens of speckles are identified and
measured for each nucleus), then you must configure <b>ExportToDatabase</b> to export only objects that maintain the one-to-one correspondence
(for example, export only <i>Nucleus</i> and <i>Cytoplasm</i>, but omit <i>Speckles</i>).
If you have extracted "Plate" and "Well" metadata from image filenames or loaded "Plate" and "Well" metadata via the <b>Metadata</b>
or <b>LoadData</b> modules, you can ask CellProfiler to create a "Per_Well" table, which aggregates object measurements across wells.
This option will output a SQL file (regardless of whether you choose to write directly to the database)
that can be used to create the Per_Well table. At the secure shell where you normally log in to MySQL, type
the following, replacing the italics with references to your database and files:
<tt>mysql -h <i>hostname</i> -u <i>username</i> -p <i>databasename</i> < <i>pathtoimages/perwellsetupfile.SQL</i></tt>
The commands written by CellProfiler to create the Per_Well table will be executed.
Oracle is not fully supported at present; you can create your own Oracle DB using
the .csv output option and writing a simple script to upload to the database.
<h4>Available measurements</h4>
For details on the nomenclature used by CellProfiler for the exported measurements,
see <i>Help > General Help > How Measurements Are Named</i>.
See also <b>ExportToSpreadsheet</b>.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import csv
import datetime
import hashlib
import logging
import numpy as np
import os
import random
import re
import sys
import traceback
logger = logging.getLogger(__name__)
try:
import MySQLdb
from MySQLdb.cursors import SSCursor
HAS_MYSQL_DB=True
except:
logger.warning("MySQL could not be loaded.", exc_info=True)
HAS_MYSQL_DB=False
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.preferences as cpprefs
import cellprofiler.measurements as cpmeas
from cellprofiler.pipeline import GROUP_INDEX, M_MODIFICATION_TIMESTAMP
from identify import M_NUMBER_OBJECT_NUMBER
from cellprofiler.modules.loadimages import C_FILE_NAME, C_PATH_NAME
from cellprofiler.gui.help import USING_METADATA_TAGS_REF, USING_METADATA_HELP_REF, USING_METADATA_GROUPING_HELP_REF
from cellprofiler.preferences import \
standardize_default_folder_names, DEFAULT_INPUT_FOLDER_NAME, \
DEFAULT_OUTPUT_FOLDER_NAME, DEFAULT_INPUT_SUBFOLDER_NAME, \
DEFAULT_OUTPUT_SUBFOLDER_NAME, ABSOLUTE_FOLDER_NAME, \
IO_FOLDER_CHOICE_HELP_TEXT, IO_WITH_METADATA_HELP_TEXT
##############################################
#
# Keyword for the cached measurement columns
#
##############################################
D_MEASUREMENT_COLUMNS = "MeasurementColumns"
'''The column name for the image number column'''
C_IMAGE_NUMBER = "ImageNumber"
'''The column name for the object number column'''
C_OBJECT_NUMBER = "ObjectNumber"
D_IMAGE_SET_INDEX = "ImageSetIndex"
'''The thumbnail category'''
C_THUMBNAIL = "Thumbnail"
##############################################
#
# Database options for the db_type setting
#
##############################################
DB_MYSQL = "MySQL"
DB_ORACLE = "Oracle"
DB_SQLITE = "SQLite"
DB_MYSQL_CSV = "MySQL / CSV"
##############################################
#
# Choices for which objects to include
#
##############################################
'''Put all objects in the database'''
O_ALL = "All"
'''Don't put any objects in the database'''
O_NONE = "None"
'''Select the objects you want from a list'''
O_SELECT = "Select..."
##############################################
#
# Choices for properties file
#
##############################################
NONE_CHOICE = "None"
PLATE_TYPES = [NONE_CHOICE,"6","24","96","384","1536","5600"]
COLOR_ORDER = ["red", "green", "blue", "cyan", "magenta", "yellow", "gray", "none"]
GROUP_COL_DEFAULT = "ImageNumber, Image_Metadata_Plate, Image_Metadata_Well"
##############################################
#
# Choices for workspace file
#
##############################################
W_DENSITYPLOT = "DensityPlot"
W_HISTOGRAM = "Histogram"
W_SCATTERPLOT = "ScatterPlot"
W_PLATEVIEWER = "PlateViewer"
W_BOXPLOT = "BoxPlot"
W_DISPLAY_ALL = [W_SCATTERPLOT, W_HISTOGRAM, W_PLATEVIEWER, W_DENSITYPLOT, W_BOXPLOT]
W_INDEX = "Index"
W_TYPE_ALL = [cpmeas.IMAGE, cpmeas.OBJECT, W_INDEX]
W_INDEX_ALL = [C_IMAGE_NUMBER, GROUP_INDEX]
################################################
#
# Choices for overwrite
#
################################################
OVERWRITE_NEVER = "Never"
OVERWRITE_DATA = "Data only"
OVERWRITE_ALL = "Data and schema"
"""Offset of the image group count in the settings"""
SETTING_IMAGE_GROUP_COUNT = 29
"""Offset of the group specification group count in the settings"""
SETTING_GROUP_FIELD_GROUP_COUNT = 30
"""Offset of the filter specification group count in the settings"""
SETTING_FILTER_FIELD_GROUP_COUNT = 31
"""Offset of the workspace specification group count in the settings"""
SETTING_WORKSPACE_GROUP_COUNT = 32
SETTING_OFFSET_PROPERTIES_IMAGE_URL_PREPEND_V26 = 21
SETTING_FIXED_SETTING_COUNT_V21 = 33
SETTING_FIXED_SETTING_COUNT_V22 = 35
SETTING_FIXED_SETTING_COUNT_V23 = 36
SETTING_FIXED_SETTING_COUNT_V24 = 37
SETTING_FIXED_SETTING_COUNT_V25 = 38
SETTING_FIXED_SETTING_COUNT_V26 = 39
SETTING_FIXED_SETTING_COUNT = 39
##############################################
#
# Choices for the output directory
#
##############################################
DIR_CUSTOM = "Custom folder"
DIR_CUSTOM_WITH_METADATA = "Custom folder with metadata"
##############################################
#
# Choices for object table format
#
##############################################
OT_PER_OBJECT = "One table per object type"
OT_COMBINE = "Single object table"
OT_VIEW = "Single object view"
'''Index of the object table format choice in the settings'''
OT_IDX = 17
'''Use this dictionary to keep track of rewording of above if it happens'''
OT_DICTIONARY = {
"One table per object type": OT_PER_OBJECT,
"Single object table": OT_COMBINE,
"Single object view": OT_VIEW
}
from identify import C_PARENT
T_EXPERIMENT = "Experiment"
T_EXPERIMENT_PROPERTIES = "Experiment_Properties"
T_RELATIONSHIPS = "Relationships"
T_RELATIONSHIP_TYPES = "RelationshipTypes"
CONSTRAINT_RT_UNIQUE = "RelationshipTypesUnique"
FK_RELATIONSHIP_TYPE_ID = "RRTypeIdFK"
CONSTRAINT_R_UNIQUE = "RelationshipUnique"
V_RELATIONSHIPS = "RelationshipsView"
I_RELATIONSHIPS1 = "IRelationships1"
I_RELATIONSHIPS2 = "IRelationships2"
COL_RELATIONSHIP_TYPE_ID = "relationship_type_id"
COL_MODULE_NUMBER = "module_number"
COL_RELATIONSHIP = "relationship"
COL_OBJECT_NAME1 = "object_name1"
COL_OBJECT_NAME2 = "object_name2"
COL_IMAGE_NUMBER1 = "image_number1"
COL_IMAGE_NUMBER2 = "image_number2"
COL_OBJECT_NUMBER1 = "object_number1"
COL_OBJECT_NUMBER2 = "object_number2"
def execute(cursor, query, bindings = None, return_result=True):
if bindings == None:
cursor.execute(query)
else:
cursor.execute(query, bindings)
if return_result:
return get_results_as_list(cursor)
def get_results_as_list(cursor):
r = get_next_result(cursor)
l = []
while r:
l.append(r)
r = get_next_result(cursor)
return l
def get_next_result(cursor):
try:
return cursor.next()
except MySQLdb.Error, e:
raise Exception('Error retrieving next result from database: %s' % (e))
except StopIteration, e:
return None
def connect_mysql(host, user, pw, db):
'''Creates and returns a db connection and cursor.'''
connection = MySQLdb.connect(host=host, user=user, passwd=pw, db=db)
cursor = SSCursor(connection)
#
# Use utf-8 encoding for strings
#
connection.set_character_set('utf8')
execute(cursor, "set names 'utf8'")
execute(cursor, "set character set utf8")
execute(cursor, "set character_set_connection=utf8")
return connection, cursor
def connect_sqlite(db_file):
'''Creates and returns a db connection and cursor.'''
import sqlite3
connection = sqlite3.connect(db_file, timeout=30)
cursor = connection.cursor()
return connection, cursor
class DBContext(object):
'''A database context suitable for the "with" statement
Usage:
assert isinstance(self, ExportToDatabase)
with DBContext(self):
do stuff with self.connection & self.cursor
# cursor and connection are closed. Changes are either committed
# or rolled back depending on exception status
'''
def __init__(self, module):
assert isinstance(module, ExportToDatabase)
self.module = module
def __enter__(self):
if self.module.db_type == DB_MYSQL:
self.connection, self.cursor = connect_mysql(
self.module.db_host.value,
self.module.db_user.value,
self.module.db_passwd.value,
self.module.db_name.value)
elif self.module.db_type == DB_SQLITE:
db_file = self.module.make_full_filename(
self.module.sqlite_file.value)
self.connection, self.cursor = connect_sqlite(db_file)
return self.connection, self.cursor
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.connection.commit()
else:
self.connection.rollback()
self.connection.close()
class ExportToDatabase(cpm.CPModule):
module_name = "ExportToDatabase"
variable_revision_number = 26
category = ["File Processing","Data Tools"]
def create_settings(self):
db_choices = ([DB_MYSQL, DB_MYSQL_CSV, DB_SQLITE] if HAS_MYSQL_DB
else [DB_MYSQL_CSV, DB_SQLITE])
default_db = DB_MYSQL if HAS_MYSQL_DB else DB_MYSQL_CSV
self.db_type = cps.Choice(
"Database type",
db_choices, default_db, doc = """
Specify the type of database you want to use:
<ul>
<li><i>%(DB_MYSQL)s:</i> Writes the data directly to a MySQL
database. MySQL is open-source software; you may require help from
your local Information Technology group to set up a database
server.</li>
<li><i>%(DB_MYSQL_CSV)s:</i> Writes a script file that
contains SQL statements for creating a database and uploading the
Per_Image and Per_Object tables. This option will write out the Per_Image
and Per_Object table data to two CSV files; you can use these files can be
used to import the data directly into an application
that accepts CSV data.</li>
<li><i>%(DB_SQLITE)s:</i> Writes SQLite files directly.
SQLite is simpler to set up than MySQL and
can more readily be run on your local computer rather than requiring a
database server. More information about SQLite can be found
<a href="http://www.sqlite.org/">here</a>. </li>
</ul>
<dl>
<dd><img src="memory:%(TECH_NOTE_ICON)s">
If running this module on a computing cluster, there are a few
considerations to note:
<ul>
<li>The <i>%(DB_MYSQL)s</i> option is well-suited for cluster use, since
multiple jobs can write to the database simultaneously.</li>
<li>The <i>%(DB_SQLITE)s</i> option is not
as appropriate; a SQLite database only allows access by one job at a time.</li>
</ul>
</dd>
</dl>"""%globals())
self.test_connection_button = cps.DoSomething(
"Press this button to test the connection to the remote server using the current settings",
"Test connection", self.test_connection, doc = """
This button test the connection to MySQL server specified using
the settings entered by the user.""")
self.db_name = cps.Text(
"Database name", "DefaultDB",doc = """
Select a name for the database you want to use""")
self.experiment_name = cps.Text(
"Experiment name", "MyExpt", doc = """
Select a name for the experiment. This name will be
registered in the database and linked to the tables that
<b>ExportToDatabase</b> creates. You will be able to select the experiment
by name in CellProfiler Analyst and will be able to find the
experiment's tables through database queries.""")
self.want_table_prefix = cps.Binary(
"Add a prefix to table names?", True, doc = """
Select whether you want to add a prefix to your table names. The
default table names are <i>Per_Image</i> for the per-image table and
<i>Per_Object</i> for the per-object table. Adding a prefix can be useful
for bookkeeping purposes.
<ul>
<li>Select <i>%(YES)s</i> to add a user-specified prefix to the default table names.
If you want to distinguish multiple sets of data written to the same
database, you probably want to use a prefix.</li>
<li>Select <i>%(NO)s</i> to use the default table names. For a one-time export of
data, this option is fine. </li>
</ul>
Whether you chose to use a prefix or not, CellProfiler will warn
you if your choice entails overwriting an existing table."""%globals())
self.table_prefix = cps.Text(
"Table prefix", "MyExpt_" , doc = """
<i>(Used if Add a prefix to table names?</i> is selected)<br>
Enter the table prefix you want to use.
<p>MySQL has a 64 character limit on the full name of the table.
If the combination of the table name and prefix exceeds this
limit, you will receive an error associated with this setting.</p>""")
self.sql_file_prefix = cps.Text(
"SQL file prefix", "SQL_", doc = """
<i>(Used if %(DB_MYSQL_CSV)s is selected as the database type)</i><br>
Enter the prefix to be used to name the SQL file."""%globals())
self.directory = cps.DirectoryPath(
"Output file location",
dir_choices = [
DEFAULT_OUTPUT_FOLDER_NAME, DEFAULT_INPUT_FOLDER_NAME,
ABSOLUTE_FOLDER_NAME, DEFAULT_OUTPUT_SUBFOLDER_NAME,
DEFAULT_INPUT_SUBFOLDER_NAME], doc="""
<i>(Used only when using a CSV or a SQLite database, and/or creating a properties or workspace file)</i><br>
This setting determines where the CSV files or SQLite database is saved if
you decide to write measurements to files instead of writing them
directly to the database. If you request a CellProfiler Analyst properties file
or workspace file, it will also be saved to this location. %(IO_FOLDER_CHOICE_HELP_TEXT)s
<p>%(IO_WITH_METADATA_HELP_TEXT)s %(USING_METADATA_TAGS_REF)s<br>
For instance, if you have a metadata tag named
"Plate", you can create a per-plate folder by selecting one of the subfolder options
and then specifying the subfolder name with the "Plate" metadata tag.
The module will substitute the metadata values for the last image set
processed for any metadata tags in the folder name. %(USING_METADATA_HELP_REF)s.</p>"""% globals())
self.directory.dir_choice = DEFAULT_OUTPUT_FOLDER_NAME
self.save_cpa_properties = cps.Binary(
"Create a CellProfiler Analyst properties file?",
False, doc = """
Select <i>%(YES)s</i> to generate a template properties file that will allow you to use
your new database with CellProfiler Analyst (a data
exploration tool which can also be downloaded from
<a href="http://www.cellprofiler.org/">http://www.cellprofiler.org/</a>).
The module will attempt to fill in as many as the entries as possible
based on the pipeline's settings, including the
server name, username and password if MySQL is used."""%globals())
self.location_object = cps.ObjectNameSubscriber(
"Which objects should be used for locations?", cps.NONE, doc = """
<i>(Used only if creating a properties file)</i><br>
CellProfiler Analyst displays cells during classification. This
setting determines which object centers will be used as the center
of the cells to be displayed. Choose one of the listed objects
and CellProfiler will save that object's location columns in
the properties file so that CellProfiler Analyst centers cells
using that object's center.
<p>You can manually change this choice in the properties file by
edting the <i>cell_x_loc</i> and <i>cell_y_loc</i> properties.
</p>
<p>Note that if there are no objects defined in the pipeline (e.g.
if only using MeasureImageQuality and/or Illumination Correction modules),
a warning will diplay until you choose <i>'None'</i> for the subsequent setting:
'Export measurements for all objects to the database?'.</p>
"""%globals())
self.wants_properties_image_url_prepend = cps.Binary(
"Access CPA images via URL?", False,
doc = """
<i>(Used only if creating a properties file)</i><br>
The image paths written to the database will be the absolute
path the the image files on your computer. If you plan to make these
files accessible via the web, you can have CellProfiler Analyst prepend
a URL to your file name.
Eg: If an image is loaded from the path "/cellprofiler/images/" and you use
a url prepend of "http://mysite.com/", CellProfiler Analyst will look
for your file at "http://mysite.com/cellprofiler/images/"
""")
#
# Hack: if user is on Broad IP, then plug in the imageweb url prepend
#
import socket
try:
fqdn = socket.getfqdn()
except:
fqdn = '127.0.0.1'
default_prepend = ""
if 'broadinstitute' in fqdn.lower(): # Broad
default_prepend = "http://imageweb/images/CPALinks"
self.properties_image_url_prepend = cps.Text(
"Enter an image url prepend if you plan to access your files via http",
default_prepend, doc = """
<i>(Used only if accessing CellProfiler Analyst images via URL)</i><br>
The image paths written to the database will be the absolute
path the the image files on your computer. If you plan to make these
files accessible via the web, you can enter a url prefix here. Eg:
If an image is loaded from the path "/cellprofiler/images/" and you use
a url prepend of "http://mysite.com/", CellProfiler Analyst will look
for your file at "http://mysite.com/cellprofiler/images/"
<p>If you are not using the web to access your files (i.e., they are locally
aceesible by your computer), leave this setting blank.""")
self.properties_plate_type = cps.Choice(
"Select the plate type",
PLATE_TYPES,doc="""
<i>(Used only if creating a properties file)</i><br>
If you are using a multi-well plate or microarray, you can select the plate
type here. Supported types in CellProfiler Analyst are 96- and 384-well plates,
as well as 5600-spot microarrays. If you are not using a plate or microarray, select
<i>None</i>.""")
self.properties_plate_metadata = cps.Choice(
"Select the plate metadata",
["None"],choices_fn = self.get_metadata_choices,doc="""
<i>(Used only if creating a properties file)</i><br>
If you are using a multi-well plate or microarray, you can select the metadata corresponding
to the plate here. If there is no plate metadata associated with the image set, select
<i>None</i>.
<p>%(USING_METADATA_HELP_REF)s.</p>"""% globals())
self.properties_well_metadata = cps.Choice(
"Select the well metadata",
["None"],choices_fn = self.get_metadata_choices,doc="""
<i>(Used only if creating a properties file)</i><br>
If you are using a multi-well plate or microarray, you can select the metadata corresponding
to the well here. If there is no well metadata associated with the image set, select
<i>None</i>.
<p>%(USING_METADATA_HELP_REF)s.</p>"""% globals())
self.properties_export_all_image_defaults = cps.Binary(
"Include information for all images, using default values?", True,doc="""
<i>(Used only if creating a properties file)</i><br>
Select <i>%(YES)s</i> to include information in the properties file for all images. This
option will do the following:
<ul>
<li>All images loaded using the <b>Input</b> modules or saved in <b>SaveImages</b> will be included.</li>
<li>The CellProfiler image name will be used for the <i>image_name</i> field.</li>
<li>A channel color listed in the <i>image_channel_colors</i> field will be assigned to the image by default order.</li>
</ul>
<p>Select <i>%(NO)s</i> to specify which images should be included or to override the automatic values.</p>"""%globals())
self.image_groups = []
self.image_group_count = cps.HiddenCount(self.image_groups,"Properties image group count")
self.add_image_group(False)
self.add_image_button = cps.DoSomething("", "Add another image",
self.add_image_group)
self.properties_wants_groups = cps.Binary(
"Do you want to add group fields?", False,doc = """
<i>(Used only if creating a properties file)</i><br>
<b>Please note that "groups" as defined by CellProfiler Analyst has nothing to do with "grouping" as defined by
CellProfiler in the Groups module.</b>
<p>Select <i>%(YES)s</i> to define a "group" for your image data (for example, when several images represent the same experimental
sample), by providing column(s) that identify unique images (the <i>image key</i>) to another set of columns
(the <i>group key</i>).</p>
<p>The format for a group in CPA is:<br>
<code>group_SQL_<XXX> = <MySQL SELECT statement that returns image-key columns followed by group-key columns></code>
For example, if you wanted to be able to group your data by unique plate names, you could define a group called
<i>SQL_Plate</i> as follows:<br>
<code>group_SQL_Plate = SELECT ImageNumber, Image_Metadata_Plate FROM Per_Image</code></p>
<p>Grouping is useful, for example, when you want to aggregate counts for each class of object and their scores
on a per-group basis (e.g., per-well) instead of on a per-image basis when scoring with Classifier. It will
also provide new options in the Classifier fetch menu so you can fetch objects from images with specific
values for the group columns.</p>"""%globals())
self.group_field_groups = []
self.group_field_count = cps.HiddenCount(self.group_field_groups,"Properties group field count")
self.add_group_field_group(False)
self.add_group_field_button = cps.DoSomething("", "Add another group",
self.add_group_field_group)
self.properties_wants_filters = cps.Binary(
"Do you want to add filter fields?", False,doc =
"""<i>(Used only if creating a properties file)</i><br>
Select <i>%(YES)s</i> to specify a subset of the images in your experiment by defining a <i>filter</i>.
Filters are useful, for example, for fetching and scoring objects in Classifier or making graphs using the
plotting tools that satisfy a specific metadata contraint. """%globals())
self.create_filters_for_plates = cps.Binary(
"Automatically create a filter for each plate?",False, doc= """
<i>(Used only if creating a properties file and specifiying an image data filter)</i><br>
If you have specified a plate metadata tag, selecting <i>%(YES)s</i> to create a set of filters
in the properties file, one for each plate."""%globals())
self.filter_field_groups = []
self.filter_field_count = cps.HiddenCount(self.filter_field_groups,"Properties filter field count")
self.add_filter_field_button = cps.DoSomething("", "Add another filter",
self.add_filter_field_group)
self.properties_class_table_name = cps.Text(
"Enter a phenotype class table name if using the classifier tool",
'', doc = """
<i>(Used only if creating a properties file)</i><br>
If you are using the machine-learning tool in CellProfiler Analyst,
you can create an additional table in your database which contains
the per-object phenotype labels. This table is produced after scoring
all the objects in your data set and will be named with the label given here.
Note that the actual class table will be named by prepending the table prefix
(if any) to what you enter here.
<p>You can manually change this choice in the properties file by
edting the <i>class_table</i> field. Leave this field blank if you are
not using the classifier or do not need the table written to the database</p>.""")
self.create_workspace_file = cps.Binary(
"Create a CellProfiler Analyst workspace file?", False, doc = """
Select <i>%(YES)s</i> to generate a workspace file for use with
CellProfiler Analyst, a data exploration tool which can
also be downloaded from <a href="http://www.cellprofiler.org/">
http://www.cellprofiler.org/</a>. A workspace file allows you
to open a selected set of measurements with the display tools
of your choice. This is useful, for example, if you want examine a standard
set of quality control image measurements for outliers."""%globals())
self.divider = cps.Divider(line=True)
self.divider_props = cps.Divider(line=True)
self.divider_props_wkspace = cps.Divider(line=True)
self.divider_wkspace = cps.Divider(line=True)
self.workspace_measurement_groups = []
self.workspace_measurement_count = cps.HiddenCount(self.workspace_measurement_groups, "Workspace measurement count")
def add_workspace_measurement_group(can_remove = True):
self.add_workspace_measurement_group(can_remove)
add_workspace_measurement_group(False)
self.add_workspace_measurement_button = cps.DoSomething("", "Add another measurement", self.add_workspace_measurement_group)
self.mysql_not_available = cps.Divider("Cannot write to MySQL directly - CSV file output only", line=False,
doc= """The MySQLdb python module could not be loaded. MySQLdb is necessary for direct export.""")
self.db_host = cps.Text("Database host", "")
self.db_user = cps.Text("Username", "")
self.db_passwd = cps.Text("Password", "")
self.sqlite_file = cps.Text(
"Name the SQLite database file",
"DefaultDB.db", doc = """
<i>(Used if SQLite selected as database type)</i><br>
Enter the name of the SQLite database filename to which you want to write.""")
self.wants_agg_mean = cps.Binary(
"Calculate the per-image mean values of object measurements?", True, doc = """
Select <i>%(YES)s</i> for <b>ExportToDatabase</b> to calculate population statistics over all the objects in each image
and store the results in the database. For instance, if
you are measuring the area of the Nuclei objects and you check the box for this option, <b>ExportToDatabase</b> will create a column in the Per_Image
table called "Mean_Nuclei_AreaShape_Area".
<p>You may not want to use <b>ExportToDatabase</b> to calculate these population statistics if your pipeline generates
a large number of per-object measurements; doing so might exceed database
column limits. These columns can be created manually for selected measurements directly in MySQL.
For instance, the following SQL command creates the Mean_Nuclei_AreaShape_Area column:
<p><tt>ALTER TABLE Per_Image ADD (Mean_Nuclei_AreaShape_Area);
UPDATE Per_Image SET Mean_Nuclei_AreaShape_Area =
(SELECT AVG(Nuclei_AreaShape_Area)
FROM Per_Object
WHERE Per_Image.ImageNumber = Per_Object.ImageNumber);</tt>"""%globals())
self.wants_agg_median = cps.Binary("Calculate the per-image median values of object measurements?", False)
self.wants_agg_std_dev = cps.Binary("Calculate the per-image standard deviation values of object measurements?", False)
self.wants_agg_mean_well = cps.Binary(
"Calculate the per-well mean values of object measurements?", False, doc = '''
Select <i>%(YES)s</i> for <b>ExportToDatabase</b> to calculate statistics over all the objects in each well
and store the results as columns in a "per-well" table in the database. For instance,
if you are measuring the area of the Nuclei objects and you check the aggregate
mean box in this module, <b>ExportToDatabase</b> will create a table in the database called
"Per_Well_avg", with a column called "Mean_Nuclei_AreaShape_Area". Selecting all three
aggregate measurements will create three per-well tables, one for each of the measurements.
<p>The per-well functionality will create the appropriate lines in a .SQL file, which can be
run on your Per-Image and Per-Object tables to create the desired per-well table.
<p><i>Note:</i> this option is only available if you have extracted plate and well metadata
from the filename using the <b>Metadata</b> or <b>LoadData</b> modules.
It will write out a .sql file with the statements necessary to create the Per_Well
table, regardless of the option chosen above. %(USING_METADATA_HELP_REF)s'''%globals())
self.wants_agg_median_well = cps.Binary(
"Calculate the per-well median values of object measurements?", False, doc = '''
Select <i>%(YES)s</i> for <b>ExportToDatabase</b> to calculate statistics over all the objects in each well
and store the results as columns in a "per-well" table in the database. For instance,
if you are measuring the area of the Nuclei objects and you check the aggregate
median box in this module, <b>ExportToDatabase</b> will create a table in the database called
"Per_Well_median", with a column called "Median_Nuclei_AreaShape_Area". Selecting all
three aggregate measurements will create three per-well tables, one for each of the measurements.
<p>The per-well functionality will create the appropriate lines in a .SQL file, which can be run on your
Per-Image and Per-Object tables to create the desired per-well table.
<p><i>Note:</i> this option is only
available if you have extracted plate and well metadata from the filename using
the <b>Metadata</b> or <b>LoadData</b> modules.
It will write out a .sql file with the statements necessary to create the Per_Well
table, regardless of the option chosen above. %(USING_METADATA_HELP_REF)s'''%globals())
self.wants_agg_std_dev_well = cps.Binary(
"Calculate the per-well standard deviation values of object measurements?", False, doc = '''
Select <i>%(YES)s</i> for <b>ExportToDatabase</b> to calculate statistics over all the objects in each well
and store the results as columns in a "per-well" table in the database. For instance,
if you are measuring the area of the Nuclei objects and you check the aggregate
standard deviation box in this module, <b>ExportToDatabase</b> will create a table in the database called
"Per_Well_std", with a column called "Mean_Nuclei_AreaShape_Area". Selecting all
three aggregate measurements will create three per-well tables, one for each of the measurements.
<p>The per-well functionality will create the appropriate lines in a .SQL file, which can be run on your
Per-Image and Per-Object tables to create the desired per-well table.
<p><i>Note:</i> this option is only
available if you have extracted plate and well metadata from the filename
using the <b>Metadata</b> or <b>LoadData</b> modules.
It will write out a .sql file with the statements necessary to create the Per_Well
table, regardless of the option chosen above. %(USING_METADATA_HELP_REF)s'''%globals())
self.objects_choice = cps.Choice(
"Export measurements for all objects to the database?",
[O_ALL, O_NONE, O_SELECT], doc = """
This option lets you choose the objects whose measurements will be saved in the Per_Object and
Per_Well(s) database tables.
<ul>
<li><i>%(O_ALL)s:</i> Export measurements from all objects.</li>
<li><i>%(O_NONE)s:</i> Do not export data to a Per_Object table. Save only Per_Image or Per_Well
measurements (which nonetheless include population statistics from objects).</li>
<li><i>%(O_SELECT)s:</i> Select the objects you want to export from a list.</li>
</ul>"""%globals())
self.objects_list = cps.ObjectSubscriberMultiChoice(
"Select the objects", doc = """
<i>(Used only if Select is chosen for adding objects)</i><br>
Choose one or more objects from this list (click using shift or command keys to select
multiple objects). The list includes
the objects that were created by prior modules. If you choose an
object, its measurements will be written out to the Per_Object and/or
Per_Well(s) tables, otherwise, the object's measurements will be skipped.""")
self.wants_relationship_table_setting = cps.Binary(
"Export object relationships?", True, doc = """
<i>(Used only for pipelines which relate objects to each other)</i><br>
Select <i>%(YES)s</i> to export object relationships to the
RelationshipsView view. Only certain modules produce
relationships that can be exported by this setting; see
the <b>TrackObjects</b>, <b>RelateObjects</b>,
<b>MeasureObjectNeighbors</b> and the <b>Identify</b> modules
for more details.
<p>This view has the following columns:
<ul><li><i>%(COL_MODULE_NUMBER)s</i>: the module number of the
module that produced the relationship. The first module in the
pipeline is module #1, etc.</li>
<li><i>%(COL_RELATIONSHIP)s</i>: the relationship between the two
objects, for instance, "Parent".</li>
<li><i>%(COL_OBJECT_NAME1)s, %(COL_OBJECT_NAME2)s</i>:
the names of the two objects being related.</li>
<li><i>%(COL_IMAGE_NUMBER1)s, %(COL_OBJECT_NUMBER1)s</i>:
the image number and object number of the first object in the
relationship</li>
<li><i>%(COL_IMAGE_NUMBER2)s, %(COL_OBJECT_NUMBER2)s</i>:
the image number and object number of the second object in the
relationship</li>
</ul>
</p>""" % globals())
self.max_column_size = cps.Integer(
"Maximum # of characters in a column name", 64,
minval = 10, maxval = 64,doc="""
This setting limits the number of characters that can appear
in the name of a field in the database. MySQL has a limit of 64
characters per field, but also has an overall limit on the number of characters
in all of the columns of a table. <b>ExportToDatabase</b> will
shorten all of the column names by removing characters, at the
same time guaranteeing that no two columns have the same name.""")
self.separate_object_tables = cps.Choice(
"Create one table per object, a single object table or a single object view?",
[OT_COMBINE, OT_PER_OBJECT, OT_VIEW],doc = """
<b>ExportToDatabase</b> can create either one table
for each type of object exported or a single
object table.<br><ul>
<li><i>%(OT_PER_OBJECT)s</i> creates one
table for each object type you export. The table name will reflect
the name of your objects. The table will have one row for each
of your objects. You can write SQL queries that join tables using
the "Number_ObjectNumber" columns of parent objects (such as those
created by <b>IdentifyPrimaryObjects</b>) with the corresponding
"Parent_... column" of the child objects. Choose
<i>%(OT_PER_OBJECT)s</i> if parent objects can have more than
one child object, if you want a relational representation of
your objects in the database,
or if you need to split columns among different
tables and shorten column names because of database limitations.</li>
<li><i>%(OT_COMBINE)s</i> creates a single
database table that records the object measurements. <b>
ExportToDatabase</b> will prepend each column name with the
name of the object associated with that column's measurement.
Each row of the table will have measurements for all objects
that have the same image and object number. Choose
<i>%(OT_COMBINE)s</i> if parent objects have a single child,
or if you want a simple table structure in your database. You can
combine the measurements for all or selected objects in this way.</li>
<li><i>%(OT_VIEW)s</i> creates a single
database view to contain the object measurements. A <i>view</i> is a
virtual database table which can be used to package together multiple
per-object tables into a single structure that is accessed just like a
regular table. Choose <i>%(OT_VIEW)s</i> if you want to combine multiple
objects but using <i>%(OT_COMBINE)s</i> would produce a table that hits
the database size limitations. <br>
An important note is that only objects that are related as primary, secondary
or tertiary objects to each other should be combined in a view. This is
because the view expects a one-to-one relationship between the combined objects. If
you are selecting objects for the view, the module will warn you if they are
not related in this way.
</li>
</ul>""" % globals())
self.want_image_thumbnails = cps.Binary(
"Write image thumbnails directly to the database?", False, doc = """
<i>(Used only if %(DB_MYSQL)s or %(DB_SQLITE)s are selected as database type)</i><br>
Select %(YES)s if you'd like to write image thumbnails directly
into the database. This will slow down the writing step, but will
enable new functionality in CellProfiler Analyst such as quickly
viewing images in the Plate Viewer tool by selecting "thumbnail"
from the "Well display" dropdown."""%globals())
self.thumbnail_image_names = cps.ImageNameSubscriberMultiChoice(
"Select the images for which you want to save thumbnails", doc = """
<i>(Used only if %(DB_MYSQL)s or %(DB_SQLITE)s are selected as database type
and writing thumbnails is selected)</i><br>
Select the images that you wish to save as thumbnails to
the database. Make multiple selections by using Ctrl-Click (Windows)
or Command-Click (Mac); """ %globals())
self.auto_scale_thumbnail_intensities = cps.Binary(
"Auto-scale thumbnail pixel intensities?", True,
doc = """
<i>(Used only if %(DB_MYSQL)s or %(DB_SQLITE)s are selected as database type
and writing thumbnails is selected)</i><br>
Select <i>%(YES)s</i> if you'd like to automatically rescale
the thumbnail pixel intensities to the range 0-1, where 0 is
black/unsaturated, and 1 is white/saturated."""%globals())
self.allow_overwrite = cps.Choice(
"Overwrite without warning?",
[OVERWRITE_NEVER, OVERWRITE_DATA, OVERWRITE_ALL],
doc = """
<b>ExportToDatabase</b> creates tables and databases at the start
of a run when writing directly to a MySQL or SQLite database. It
writes SQL scripts and CSVs when not writing directly. It also
can write CellProfiler Analysis property files. In some cases,
it is appropriate to run CellProfiler and append to or overwrite
the data in existing tables, for instance when running several
CellProfiler instances which each process a range of the experiment's
image sets. In other cases, such as when the measurements to be written
have changed, the data tables must be dropped completely.
<br>
You can choose fromm three options to conrtol overwriting behavior:
<ul><li><i>%(OVERWRITE_NEVER)s:</i> <b>ExportToDatabase</b> will
ask before dropping and recreating tables unless you are running
headless. CellProfiler will exit if running headless if the tables
exist and this option is chosen.</li>
<li><i>%(OVERWRITE_DATA)s:</i> <b>ExportToDatabase</b> will keep
the existing tables if present and will overwrite the data. Choose
<i>%(OVERWRITE_DATA)s</i> if you are breaking your experiment
into ranges of image sets and running each range on a separate
instance of CellProfiler.</li>
<li><i>%(OVERWRITE_ALL)s:</i> <b>ExportToDatabase</b> will
drop previous versions of tables at the start of a run. This option
is appropriate if you are using the <b>CreateBatchFiles</b> module;
your tables will be created by the run that creates the batch
data file. The actual analysis runs that utilize the <code>Batch_data</code>
file will use the existing tables without trying to recreate them.</li>
</ul>
"""%globals())
def add_image_group(self,can_remove = True):
group = cps.SettingsGroup()
group.can_remove = can_remove
group.append(
"image_cols", cps.Choice(
"Select an image to include",[cps.NONE],
choices_fn = self.get_property_file_image_choices, doc="""
<i>(Used only if creating a properties file and specifiying the image information)</i><br>
Choose image name to include it in the properties file of images.
<p>The images in the drop-down correspond to images that have been:
<ul>
<li>Loaded using one of the <b>Load</b> modules.</li>
<li>Saved with the <b>SaveImages</b> module, with the corresponding file and path information stored.</li>
</ul>
If you do not see your desired image listed, check the settings on these modules.</p>"""))
group.append(
"wants_automatic_image_name", cps.Binary(
"Use the image name for the display?", True, doc=
"""<i>(Used only if creating a properties file and specifiying the image information)</i><br>
Select <i>%(YES)s</i> to use the image name as given above for the displayed name.
<p>Select <i>%(NO)s</i> to name the file yourself.</p>"""%globals()))
group.append(
"image_name", cps.Text(
"Image name", "Channel%d"%(len(self.image_groups)+1), doc=
"""<i>(Used only if creating a properties file, specifiying the image information and naming the image)</i><br>
Enter a name for the specified image"""))
default_color = (COLOR_ORDER[len(self.image_groups)]
if len(self.image_groups) < len(COLOR_ORDER)
else COLOR_ORDER[0])
group.append(
"image_channel_colors", cps.Choice(
"Channel color", COLOR_ORDER, default_color, doc="""
<i>(Used only if creating a properties file and specifiying the image information)</i><br>
Enter a color to display this channel."""))
group.append("remover", cps.RemoveSettingButton("", "Remove this image", self.image_groups, group))
group.append("divider", cps.Divider(line=False))
self.image_groups.append(group)
def add_group_field_group(self,can_remove = True):
group = cps.SettingsGroup()
group.can_remove = can_remove
group.append(
"group_name",cps.Text(
"Enter the name of the group",'',doc="""
<i>(Used only if creating a properties file and specifiying an image data group)</i><br>
Enter a name for the group. Only alphanumeric characters and underscores are permitted."""))
group.append(
"group_statement", cps.Text(
"Enter the per-image columns which define the group, separated by commas",GROUP_COL_DEFAULT, doc="""
<i>(Used only if creating a properties file and specifiying an image data group)</i><br>
To define a group, enter the image key columns followed by group key columns, each separated by commas.
<p>In CellProfiler, the image key column is always given the name as <i>ImageNumber</i>; group keys
are typically metadata columns which are always prefixed with <i>Image_Metadata_</i>. For example, if you
wanted to be able to group your data by unique plate and well metadata tags, you could define a
group with the following MySQL statement:<br>
<code>group_SQL_Plate = SELECT ImageNumber, Image_Metadata_Plate, Image_Metadata_Well FROM Per_Image</code><br>
For this example, the columns to enter in this setting would be:<br>
<code>ImageNumber, Image_Metadata_Plate, Image_Metadata_Well</code></p>
<p>Groups are specified as MySQL statements in the properties file, but please note that the full SELECT and
FROM clauses will be added automatically, so there is no need to enter them here.</p>"""))
group.append("remover", cps.RemoveSettingButton("", "Remove this group", self.group_field_groups, group))
group.append("divider", cps.Divider(line=True))
self.group_field_groups.append(group)
def add_filter_field_group(self,can_remove = True):
group = cps.SettingsGroup()
group.can_remove = can_remove
group.append(
"filter_name",cps.Text(
"Enter the name of the filter",'',doc="""
<i>(Used only if creating a properties file and specifiying an image data filter)</i><br>
Enter a name for the filter. Only alphanumeric characters and underscores are permitted."""))
group.append(
"filter_statement", cps.Text(
"Enter the MySQL WHERE clause to define a filter",'',doc="""
<i>(Used only if creating a properties file and specifiying an image data filter)</i><br>
To define a filter, enter a MySQL <i>WHERE</i> clause that returns image-keys for images you want to include.
For example, here is a filter that returns only images from plate 1:<br>
<code>Image_Metadata_Plate = '1'</code><br>
Here is a filter returns only images from with a gene column that starts with CDK:
<code>Image_Metadata_Gene REGEXP 'CDK.*'</code><br>
<p>Filters are specified as MySQL statements in the properties file, but please note that the full SELECT and
FROM clauses (as well as the WHERE keyword) will be added automatically, so there is no need to enter them here.</p>"""))
group.append("remover", cps.RemoveSettingButton("", "Remove this filter", self.filter_field_groups, group))
group.append("divider", cps.Divider(line=True))
self.filter_field_groups.append(group)
def add_workspace_measurement_group(self, can_remove = True):
group = cps.SettingsGroup()
self.workspace_measurement_groups.append(group)
group.can_remove = can_remove
group.append("divider", cps.Divider(line=False))
group.append(
"measurement_display", cps.Choice(
"Select the measurement display tool",
W_DISPLAY_ALL, doc="""
<i>(Used only if creating a workspace file)</i><br>
Select what display tool in CPA you want to use to open the
measurements.
<ul>
<li>%(W_SCATTERPLOT)s</li>
<li>%(W_HISTOGRAM)s</li>
<li>%(W_DENSITYPLOT)s</li>
<li>%(W_PLATEVIEWER)s</li>
<li>%(W_BOXPLOT)s</li>
</ul>"""%globals()))
def measurement_type_help():
return """
<i>(Used only if creating a workspace file)</i><br>
You can plot two types of measurements:
<ul>
<li><i>Image:</i> For a per-image measurement, one numerical value is
recorded for each image analyzed.
Per-image measurements are produced by
many modules. Many have <b>MeasureImage</b> in the name but others do not
(e.g., the number of objects in each image is a per-image
measurement made by <b>IdentifyObject</b>
modules).</li>
<li><i>Object:</i> For a per-object measurement, each identified
object is measured, so there may be none or many
numerical values recorded for each image analyzed. These are usually produced by
modules with <b>MeasureObject</b> in the name.</li>
</ul>"""%globals()
def object_name_help():
return """<i>(Used only if creating a workspace file)</i><br>
Select the object that you want to measure from the list.
This should be an object created by a previous module such as
<b>IdentifyPrimaryObjects</b>, <b>IdentifySecondaryObjects</b>, or
<b>IdentifyTertiaryObjects</b>."""
def measurement_name_help():
return """<i>(Used only if creating a workspace file)</i><br>
Select the measurement to be plotted on the desired axis."""
def index_name_help():
return """<i>(Used only if creating a workspace file and an index is plotted)</i><br>
Select the index to be plot on the selected axis. Two options are available:
<ul>
<li><i>%(C_IMAGE_NUMBER)s:</i> In CellProfiler, the unique identifier for each image
is always given this name. Selecting this option allows you to plot a single measurement
for each image indexed by the order it was processed.</li>
<li><i>%(GROUP_INDEX)s:</i> This identifier is used in cases where grouping is applied.
Each image in a group is given an index indicating the order it was processed. Selecting
this option allows you to plot a set of measurements grouped by a common index.
%(USING_METADATA_GROUPING_HELP_REF)s
</li>
</ul>"""%globals()
group.append(
"x_measurement_type", cps.Choice(
"Type of measurement to plot on the X-axis",
W_TYPE_ALL, doc = measurement_type_help()))
group.append(
"x_object_name", cps.ObjectNameSubscriber(
"Enter the object name",cps.NONE,
doc = object_name_help()))
def object_fn_x():
if group.x_measurement_type.value in ( cpmeas.IMAGE, cpmeas.EXPERIMENT ):
return group.x_measurement_type.value
elif group.x_measurement_type.value == cpmeas.OBJECT:
return group.x_object_name.value
else:
raise NotImplementedError("Measurement type %s is not supported"%
group.x_measurement_type.value)
group.append(
"x_measurement_name", cps.Measurement(
"Select the X-axis measurement", object_fn_x,
doc = measurement_name_help()))
group.append(
"x_index_name", cps.Choice(
"Select the X-axis index", W_INDEX_ALL,
doc = index_name_help()))
group.append(
"y_measurement_type", cps.Choice(
"Type of measurement to plot on the Y-axis",
W_TYPE_ALL, doc = measurement_type_help()))
group.append(
"y_object_name", cps.ObjectNameSubscriber(
"Enter the object name",cps.NONE,
doc=object_name_help()))
def object_fn_y():
if group.y_measurement_type.value == cpmeas.IMAGE:
return cpmeas.IMAGE
elif group.y_measurement_type.value == cpmeas.OBJECT:
return group.y_object_name.value
else:
raise NotImplementedError("Measurement type %s is not supported"%
group.y_measurement_type.value)
group.append(
"y_measurement_name", cps.Measurement(
"Select the Y-axis measurement", object_fn_y,
doc = measurement_name_help()))
group.append(
"y_index_name", cps.Choice(
"Select the Y-axis index", W_INDEX_ALL,
doc = index_name_help()))
if can_remove:
group.append("remove_button", cps.RemoveSettingButton(
"", "Remove this measurement", self.workspace_measurement_groups, group))
def get_metadata_choices(self,pipeline):
columns = pipeline.get_measurement_columns()
choices = ["None"]
for column in columns:
object_name, feature, coltype = column[:3]
choice = feature[(len(cpmeas.C_METADATA)+1):]
if (object_name == cpmeas.IMAGE and
feature.startswith(cpmeas.C_METADATA)):
choices.append(choice)
return choices
def get_property_file_image_choices(self,pipeline):
columns = pipeline.get_measurement_columns()
image_names = []
for column in columns:
object_name, feature, coltype = column[:3]
choice = feature[(len(C_FILE_NAME)+1):]
if (object_name == cpmeas.IMAGE and (feature.startswith(C_FILE_NAME))):
image_names.append(choice)
return image_names
def prepare_settings(self, setting_values):
# These check the groupings of settings avilable in properties and workspace file creation
for count, sequence, fn in\
((int(setting_values[SETTING_IMAGE_GROUP_COUNT]), self.image_groups, self.add_image_group),
(int(setting_values[SETTING_GROUP_FIELD_GROUP_COUNT]), self.group_field_groups, self.add_group_field_group),
(int(setting_values[SETTING_FILTER_FIELD_GROUP_COUNT]), self.filter_field_groups, self.add_filter_field_group),
(int(setting_values[SETTING_WORKSPACE_GROUP_COUNT]), self.workspace_measurement_groups, self.add_workspace_measurement_group)):
del sequence[count:]
while len(sequence) < count:
fn()
def visible_settings(self):
needs_default_output_directory =\
(self.db_type != DB_MYSQL or
self.save_cpa_properties.value or
self.create_workspace_file.value)
# # # # # # # # # # # # # # # # # #
#
# DB type and connection info
#
# # # # # # # # # # # # # # # # # #
result = [self.db_type, self.experiment_name]
if not HAS_MYSQL_DB:
result += [self.mysql_not_available]
if self.db_type == DB_MYSQL:
result += [self.db_name]
result += [self.db_host]
result += [self.db_user]
result += [self.db_passwd]
result += [self.test_connection_button]
elif self.db_type == DB_MYSQL_CSV:
result += [self.sql_file_prefix]
result += [self.db_name]
elif self.db_type == DB_SQLITE:
result += [self.sqlite_file]
elif self.db_type == DB_ORACLE:
result += [self.sql_file_prefix]
if self.db_type != DB_MYSQL_CSV:
result += [self.allow_overwrite]
# # # # # # # # # # # # # # # # # #
#
# Table names
#
# # # # # # # # # # # # # # # # # #
result += [self.want_table_prefix]
if self.want_table_prefix.value:
result += [self.table_prefix]
# # # # # # # # # # # # # # # # # #
#
# CPA properties file
#
# # # # # # # # # # # # # # # # # #
if self.save_cpa_properties.value:
result += [self.divider_props] # Put divider here to make things easier to read
result += [self.save_cpa_properties]
if self.save_cpa_properties.value:
if (self.objects_choice != O_NONE and
(self.separate_object_tables == OT_COMBINE or self.separate_object_tables == OT_VIEW)):
result += [self.location_object]
result += [self.wants_properties_image_url_prepend]
if self.wants_properties_image_url_prepend:
result += [self.properties_image_url_prepend]
result += [
self.properties_plate_type, self.properties_plate_metadata,
self.properties_well_metadata,
self.properties_export_all_image_defaults]
if not self.properties_export_all_image_defaults:
for group in self.image_groups:
if group.can_remove:
result += [group.divider]
result += [group.image_cols, group.wants_automatic_image_name]
if not group.wants_automatic_image_name:
result += [group.image_name]
result += [group.image_channel_colors]
if group.can_remove:
result += [group.remover]
result += [ self.add_image_button]
result += [self.properties_wants_groups]
if self.properties_wants_groups:
for group in self.group_field_groups:
if group.can_remove:
result += [group.divider]
result += [group.group_name, group.group_statement]
if group.can_remove:
result += [group.remover]
result += [ self.add_group_field_button ]
result += [self.properties_wants_filters]
if self.properties_wants_filters:
result += [self.create_filters_for_plates]
for group in self.filter_field_groups:
result += [group.filter_name, group.filter_statement]
if group.can_remove:
result += [group.remover]
result += [group.divider]
result += [ self.add_filter_field_button ]
result += [self.properties_class_table_name]
if self.save_cpa_properties.value or self.create_workspace_file.value : # Put divider here to make things easier to read
result += [self.divider_props_wkspace]
result += [self.create_workspace_file]
if self.create_workspace_file:
for workspace_group in self.workspace_measurement_groups:
result += self.workspace_visible_settings(workspace_group)
if workspace_group.can_remove:
result += [workspace_group.remove_button]
result += [self.add_workspace_measurement_button]
if self.create_workspace_file.value: # Put divider here to make things easier to read
result += [self.divider_wkspace]
if needs_default_output_directory:
result += [self.directory]
# # # # # # # # # # # # # # # # # #
#
# Aggregations
#
# # # # # # # # # # # # # # # # # #
result += [self.wants_agg_mean, self.wants_agg_median,
self.wants_agg_std_dev]
if self.db_type != DB_SQLITE:
# We don't write per-well tables to SQLite yet.
result += [self.wants_agg_mean_well, self.wants_agg_median_well,
self.wants_agg_std_dev_well]
# # # # # # # # # # # # # # # # # #
#
# Table choices (1 / separate object tables, etc)
#
# # # # # # # # # # # # # # # # # #
result += [self.objects_choice]
if self.objects_choice == O_SELECT:
result += [self.objects_list]
result += [self.wants_relationship_table_setting]
if self.objects_choice != O_NONE:
result += [self.separate_object_tables]
# # # # # # # # # # # # # # # # # #
#
# Misc (column size + image thumbnails)
#
# # # # # # # # # # # # # # # # # #
result += [self.max_column_size]
if self.db_type in (DB_MYSQL, DB_MYSQL_CSV, DB_SQLITE):
result += [self.want_image_thumbnails]
if self.want_image_thumbnails:
result += [self.thumbnail_image_names,
self.auto_scale_thumbnail_intensities]
return result
def workspace_visible_settings(self, workspace_group):
result = []
if workspace_group.can_remove:
result += [workspace_group.divider]
result += [workspace_group.measurement_display]
result += [workspace_group.x_measurement_type]
if workspace_group.x_measurement_type == W_INDEX:
result += [workspace_group.x_index_name]
elif workspace_group.x_measurement_type == cpmeas.OBJECT:
result += [workspace_group.x_object_name, workspace_group.x_measurement_name]
else:
result += [workspace_group.x_measurement_name]
if workspace_group.measurement_display.value in (W_SCATTERPLOT, W_DENSITYPLOT):
result += [workspace_group.y_measurement_type]
if workspace_group.y_measurement_type == W_INDEX:
result += [workspace_group.y_index_name]
elif workspace_group.y_measurement_type == cpmeas.OBJECT:
result += [workspace_group.y_object_name, workspace_group.y_measurement_name]
else:
result += [workspace_group.y_measurement_name]
return result
def settings(self):
result = [self.db_type, self.db_name, self.want_table_prefix,
self.table_prefix, self.sql_file_prefix,
self.directory,
self.save_cpa_properties,
self.db_host, self.db_user, self.db_passwd, self.sqlite_file,
self.wants_agg_mean, self.wants_agg_median,
self.wants_agg_std_dev, self.wants_agg_mean_well,
self.wants_agg_median_well, self.wants_agg_std_dev_well,
self.objects_choice, self.objects_list, self.max_column_size,
self.separate_object_tables, self.properties_image_url_prepend,
self.want_image_thumbnails,self.thumbnail_image_names,
self.auto_scale_thumbnail_intensities,self.properties_plate_type,
self.properties_plate_metadata, self.properties_well_metadata,
self.properties_export_all_image_defaults,
self.image_group_count, self.group_field_count, self.filter_field_count,
self.workspace_measurement_count, self.experiment_name,
self.location_object, self.properties_class_table_name,
self.wants_relationship_table_setting, self.allow_overwrite,
self.wants_properties_image_url_prepend]
# Properties: Image groups
for group in self.image_groups:
result += [group.image_cols, group.wants_automatic_image_name, group.image_name,
group.image_channel_colors]
result += [self.properties_wants_groups]
# Properties: Grouping fields
for group in self.group_field_groups:
result += [group.group_name, group.group_statement]
# Properties: Filter fields
result += [self.properties_wants_filters, self.create_filters_for_plates]
for group in self.filter_field_groups:
result += [group.filter_name, group.filter_statement]
# Workspace settings
result += [ self.create_workspace_file ]
for group in self.workspace_measurement_groups:
result += [ group.measurement_display,
group.x_measurement_type, group.x_object_name, group.x_measurement_name, group.x_index_name,
group.y_measurement_type, group.y_object_name, group.y_measurement_name, group.y_index_name]
return result
def help_settings(self):
return [self.db_type, self.experiment_name,
self.db_name, self.db_host, self.db_user, self.db_passwd,
self.sql_file_prefix, self.sqlite_file,
self.allow_overwrite,
self.want_table_prefix, self.table_prefix,
self.save_cpa_properties, self.location_object,
self.wants_properties_image_url_prepend,
self.properties_image_url_prepend,
self.properties_plate_type, self.properties_plate_metadata, self.properties_well_metadata,
self.properties_export_all_image_defaults,
self.image_groups[0].image_cols, self.image_groups[0].wants_automatic_image_name, self.image_groups[0].image_name,
self.image_groups[0].image_channel_colors,
self.properties_wants_groups,
self.group_field_groups[0].group_name, self.group_field_groups[0].group_statement,
self.properties_wants_filters, self.create_filters_for_plates,
self.properties_class_table_name,
self.directory,
self.create_workspace_file,
self.workspace_measurement_groups[0].measurement_display,
self.workspace_measurement_groups[0].x_measurement_type, self.workspace_measurement_groups[0].x_object_name, self.workspace_measurement_groups[0].x_measurement_name,
self.workspace_measurement_groups[0].y_measurement_type, self.workspace_measurement_groups[0].y_object_name, self.workspace_measurement_groups[0].y_measurement_name,
self.wants_agg_mean, self.wants_agg_median, self.wants_agg_std_dev,
self.wants_agg_mean_well, self.wants_agg_median_well, self.wants_agg_std_dev_well,
self.objects_choice, self.objects_list,
self.separate_object_tables,
self.max_column_size,
self.want_image_thumbnails,self.thumbnail_image_names, self.auto_scale_thumbnail_intensities]
def validate_module(self,pipeline):
if self.want_table_prefix.value:
if not re.match("^[A-Za-z][A-Za-z0-9_]+$",self.table_prefix.value):
raise cps.ValidationError("Invalid table prefix",self.table_prefix)
if self.db_type==DB_MYSQL:
if not re.match("^[A-Za-z0-9_]+$",self.db_name.value):
raise cps.ValidationError("The database name has invalid characters",self.db_name)
elif self.db_type==DB_SQLITE:
if not re.match("^[A-Za-z0-9_].*$",self.sqlite_file.value):
raise cps.ValidationError("The sqlite file name has invalid characters",self.sqlite_file)
if self.db_type == DB_MYSQL:
if not re.match("^[A-Za-z0-9_].*$",self.db_host.value):
raise cps.ValidationError("The database host name has invalid characters",self.db_host)
if not re.match("^[A-Za-z0-9_]+$",self.db_user.value):
raise cps.ValidationError("The database user name has invalid characters",self.db_user)
else:
if not re.match("^[A-Za-z][A-Za-z0-9_]+$", self.sql_file_prefix.value):
raise cps.ValidationError("Invalid SQL file prefix", self.sql_file_prefix)
if self.objects_choice == O_SELECT:
self.objects_list.load_choices(pipeline)
if len(self.objects_list.choices) == 0:
raise cps.ValidationError("Please choose at least one object", self.objects_choice)
if self.save_cpa_properties:
if self.properties_plate_metadata == NONE_CHOICE and (self.properties_wants_filters.value and self.create_filters_for_plates.value):
raise cps.ValidationError("You must specify the plate metadata",self.create_filters_for_plates)
if self.want_image_thumbnails:
if not self.thumbnail_image_names.get_selections():
raise cps.ValidationError("Please choose at least one image", self.thumbnail_image_names)
if self.want_table_prefix:
max_char = 64
table_name_lengths = [len(self.table_prefix.value + "Per_Image")]
table_name_lengths += [len(self.table_prefix.value + "Per_Object")] if self.objects_choice != O_NONE and self.separate_object_tables.value in (OT_COMBINE, OT_VIEW) else []
table_name_lengths += [len(self.table_prefix.value+"Per_"+x) for x in (self.objects_list.value).split(',')] if self.objects_choice != O_NONE and self.separate_object_tables == OT_PER_OBJECT else []
if np.any(np.array(table_name_lengths) > max_char):
msg = "A table name exceeds the %d character allowed by MySQL.\n"%max_char
msg += "Please shorten the prefix if using a single object table,\n"
msg += "and/or the object name if using separate tables."
raise cps.ValidationError(msg,self.table_prefix)
def validate_module_warnings(self, pipeline):
'''Warn user re: Test mode '''
if pipeline.test_mode:
raise cps.ValidationError("ExportToDatabase does not produce output in Test Mode",
self.db_type)
# Warn user if using SQLLite and CreateBatchFiles
if self.db_type == DB_SQLITE and pipeline.has_create_batch_module():
raise cps.ValidationError(
"Only one process can access a SQLite database at a time.\n"
"Database operations will fail if you run more than one copy\n"
"of CellProfiler simultaneously. You can run multiple copies\n"
"of CellProfiler if you choose to output a MySQL database.\n"
"ExportToDatabase will work in multiprocessing mode using a\n"
"SQLite database.", self.db_type)
'''Warn user that they will have to merge tables to use CPA'''
if self.objects_choice != O_NONE and self.separate_object_tables == OT_PER_OBJECT:
raise cps.ValidationError(
("You will have to merge the separate object tables in order\n"
"to use CellProfiler Analyst fully, or you will be restricted\n"
"to only one object's data at a time in CPA. Choose\n"
"%s to write a single object table.") % ("'%s' or '%s'"%(OT_COMBINE,OT_VIEW)), self.separate_object_tables)
'''Warn user re: bad characters in object used for center, filter/group names and class_table name'''
if self.save_cpa_properties:
warning_string = "CellProfiler Analyst will not recogize this %s because it contains invalid characters. Allowable characters are letters, numbers and underscores."
if not re.match("^[\w]*$",self.location_object.value):
raise cps.ValidationError(warning_string%"object",self.location_object)
if self.properties_wants_groups:
for group in self.group_field_groups:
if not re.match("^[\w]*$",group.group_name.value) or group.group_name.value == '':
raise cps.ValidationError(warning_string%"group name",group.group_name)
if self.properties_wants_filters:
for group in self.filter_field_groups:
if not re.match("^[\w]*$",group.filter_name.value) or group.filter_name.value == '':
raise cps.ValidationError(warning_string%"filter name",group.filter_name)
if not re.match("^[\w\s\"\'=]*$",group.filter_statement.value) or group.filter_statement.value == '':
raise cps.ValidationError(warning_string%"filter statement",group.filter_statement)
if self.properties_class_table_name:
if not re.match("^[\w]*$",self.properties_class_table_name.value):
raise cps.ValidationError(warning_string%"class table name",self.properties_class_table_name)
'''Warn user re: objects that are not 1:1 (i.e., primary/secondary/tertiary) if creating a view'''
if self.objects_choice != O_NONE and self.separate_object_tables in (OT_VIEW,OT_COMBINE):
if self.objects_choice == O_SELECT:
selected_objs = self.objects_list.value.rsplit(',')
elif self.objects_choice == O_ALL:
selected_objs = pipeline.get_provider_dictionary(cps.OBJECT_GROUP).keys()
if len(selected_objs) > 1:
# Check whether each selected object comes from an Identify module. If it does, look for its parent.
d = dict.fromkeys(selected_objs,None)
for obj in selected_objs:
for module in pipeline.modules():
if module.is_object_identification_module():# and module.get_measurements(pipeline,obj,C_PARENT):
parent = module.get_measurements(pipeline,obj,C_PARENT)
if len(parent) > 0:
d[obj] = parent[0]
# For objects with no parents (primary), use the object itself
d = dict(zip(d.keys(),[key if value is None else value for (key,value) in d.items()]))
# Only those objects which have parents in common should be written together
if len(set(d.values())) > 1:
# Pick out the parent with the lowest representation in the selected object list
mismatched_parent = sorted(zip([d.values().count(item) for item in set(d.values())],set(d.values())))[0][1]
# Find the objects that this parent goes with
mismatched_objs = [key for (key,value) in d.items() if value == mismatched_parent]
msg = "%s is not in a 1:1 relationship with the other objects, which may cause downstream problems.\n "%",".join(mismatched_objs)
msg += "You may want to choose another object container"
msg += "." if self.objects_choice == O_ALL else " or de-select the object(s)."
raise cps.ValidationError(msg,self.separate_object_tables)
def test_connection(self):
'''Check to make sure the MySQL server is remotely accessible'''
import wx
error = None
try:
connection = connect_mysql(self.db_host.value,
self.db_user.value,
self.db_passwd.value,
self.db_name.value)
except MySQLdb.Error, error:
if error.args[0] == 1045:
msg = "Incorrect username or password"
elif error.args[0] == 1049:
msg = "The database does not exist."
else:
msg = "A connection error to the database host was returned: %s"%error.args[1]
if not error:
wx.MessageBox("Connection to database host successful.")
else:
wx.MessageBox("%s. Please check your settings."%msg)
def make_full_filename(self, file_name,
workspace = None, image_set_index = None):
"""Convert a file name into an absolute path
We do a few things here:
* apply metadata from an image set to the file name if an
image set is specified
* change the relative path into an absolute one using the "." and "&"
convention
* Create any directories along the path
"""
if image_set_index is not None and workspace is not None:
file_name = workspace.measurements.apply_metadata(file_name,
image_set_index)
measurements = None if workspace is None else workspace.measurements
path_name = self.directory.get_absolute_path(measurements,
image_set_index)
file_name = os.path.join(path_name, file_name)
path, file = os.path.split(file_name)
if not os.path.isdir(path):
os.makedirs(path)
return os.path.join(path,file)
def prepare_run(self, workspace, as_data_tool = False):
'''Prepare to run the pipeline
Establish a connection to the database.'''
if not as_data_tool:
self.get_dictionary().clear()
pipeline = workspace.pipeline
image_set_list = workspace.image_set_list
if pipeline.test_mode:
return True
needs_close = False
try:
if self.db_type==DB_MYSQL:
self.connection, self.cursor = connect_mysql(self.db_host.value,
self.db_user.value,
self.db_passwd.value,
self.db_name.value)
needs_close = True
if self.wants_well_tables:
per_well = self.write_mysql_table_per_well(pipeline, image_set_list)
elif self.db_type==DB_SQLITE:
db_file = self.make_full_filename(self.sqlite_file.value)
self.connection, self.cursor = connect_sqlite(db_file)
needs_close = True
#
# This caches the list of measurement columns for the run,
# fixing the column order, etc.
#
self.get_pipeline_measurement_columns(pipeline, image_set_list)
if pipeline.in_batch_mode() or not cpprefs.get_allow_schema_write():
return True
if self.db_type == DB_ORACLE:
raise NotImplementedError("Writing to an Oracle database is not yet supported")
if self.db_type in (DB_MYSQL, DB_SQLITE):
tables = [self.get_table_name(cpmeas.IMAGE)]
if self.objects_choice != O_NONE:
if self.separate_object_tables == OT_COMBINE:
tables.append(self.get_table_name(cpmeas.OBJECT))
else:
for object_name in self.get_object_names(pipeline, image_set_list):
tables.append(self.get_table_name(object_name))
tables_that_exist = []
for table in tables:
try:
r = execute(self.cursor,
'SELECT * FROM %s LIMIT 1'%(table))
tables_that_exist.append(table)
except:
pass
if len(tables_that_exist) > 0:
if len(tables_that_exist) == 1:
table_msg = "%s table" % tables_that_exist[0]
else:
table_msg = "%s and %s tables" % (
", ".join(tables_that_exist[:-1]),
tables_that_exist[-1])
if cpprefs.get_headless():
if self.allow_overwrite == OVERWRITE_NEVER:
logger.error("%s already in database and overwrite not allowed. Exiting" % table_msg)
return False
elif self.allow_overwrite == OVERWRITE_DATA:
logger.warning("%s already in database, not creating" % table_msg)
return True
elif self.allow_overwrite in (OVERWRITE_NEVER, OVERWRITE_DATA):
import wx
message = (
"Do you want ExportToDatabase to drop the %s?\n\n"
'Choose "Yes" to drop and recreate the tables, '
'discarding all existing data.\n'
'Choose "No" to keep the existing tables and '
'overwrite data as necessary.\n'
'Choose "Cancel" to stop and leave the tables intact.') % table_msg
with wx.MessageDialog(
workspace.frame, message,
style=wx.YES|wx.NO|wx.CANCEL|wx.ICON_QUESTION) as dlg:
result = dlg.ShowModal()
if result == wx.ID_CANCEL:
return False
elif result != wx.ID_YES:
return True
mappings = self.get_column_name_mappings(pipeline, image_set_list)
column_defs = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
if self.objects_choice != O_ALL:
onames = [cpmeas.EXPERIMENT, cpmeas.IMAGE, cpmeas.NEIGHBORS]
if self.objects_choice == O_SELECT:
onames += self.objects_list.selections
column_defs = [column for column in column_defs
if column[0] in onames]
self.create_database_tables(self.cursor, workspace)
return True
finally:
if needs_close:
self.connection.commit()
self.connection.close()
self.connection = None
self.cursor = None
def prepare_to_create_batch(self, workspace, fn_alter_path):
'''Alter the output directory path for the remote batch host'''
self.directory.alter_for_create_batch_files(fn_alter_path)
return True
def get_measurement_columns(self, pipeline):
if self.want_image_thumbnails:
cols = []
for name in self.thumbnail_image_names.get_selections():
cols += [(cpmeas.IMAGE, C_THUMBNAIL + "_" +name,
cpmeas.COLTYPE_LONGBLOB)]
return cols
return []
def run_as_data_tool(self, workspace):
'''Run the module as a data tool
ExportToDatabase has two modes - writing CSVs and writing directly.
We write CSVs in post_run. We write directly in run.
'''
#
# The measurements may have been created by an old copy of CP. We
# have to hack our measurement column cache to circumvent this.
#
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
d = self.get_dictionary()
columns = m.get_measurement_columns()
for i, (object_name, feature_name, coltype) in enumerate(columns):
if (object_name == cpmeas.IMAGE and
feature_name.startswith(C_THUMBNAIL)):
columns[i] = (object_name, feature_name, cpmeas.COLTYPE_LONGBLOB)
columns = self.filter_measurement_columns(columns)
d[D_MEASUREMENT_COLUMNS] = columns
if not self.prepare_run(workspace, as_data_tool=True):
return
self.prepare_group(workspace, None, None)
if self.db_type != DB_MYSQL_CSV:
workspace.measurements.is_first_image = True
for i in range(workspace.measurements.image_set_count):
if i > 0:
workspace.measurements.next_image_set()
self.run(workspace)
else:
workspace.measurements.image_set_number = \
workspace.measurements.image_set_count
self.post_run(workspace)
def run(self, workspace):
if self.want_image_thumbnails:
import PIL.Image as Image
from StringIO import StringIO
measurements = workspace.measurements
image_set = workspace.image_set
for name in self.thumbnail_image_names.get_selections():
# For each desired channel, convert the pixel data into a PIL
# image and then save it as a PNG into a StringIO buffer.
# Finally read the raw data out of the buffer and add it as
# as measurement to be written as a blob.
pixels = image_set.get_image(name).pixel_data
if issubclass(pixels.dtype.type, np.floating) or pixels.dtype == np.bool:
factor = 255
if self.auto_scale_thumbnail_intensities:
pixels = (pixels - pixels.min()) / pixels.max()
else:
raise Exception('ExportToDatabase cannot write image thumbnails from images of type "%s".'%(str(pixels.dtype)))
if pixels.ndim == 2:
im = Image.fromarray((pixels * factor).astype('uint8'), 'L')
elif pixels.ndim == 3:
im = Image.fromarray((pixels * factor).astype('uint8'), 'RGB')
else:
raise Exception('ExportToDatabase only supports saving thumbnails of grayscale or 3-channel images. "%s" was neither.'%(name))
# resize the image so the major axis is 200px long
if im.size[0] == max(im.size):
w, h = (200, 200 * min(im.size) / max(im.size))
else:
h, w = (200, 200 * min(im.size) / max(im.size))
im = im.resize((w,h))
fd = StringIO()
im.save(fd, 'PNG')
blob = fd.getvalue()
fd.close()
measurements.add_image_measurement(
C_THUMBNAIL + "_" + name, blob.encode('base64'))
if workspace.pipeline.test_mode:
return
if self.db_type == DB_MYSQL and not workspace.pipeline.test_mode:
try:
self.connection, self.cursor = connect_mysql(
self.db_host.value,
self.db_user.value,
self.db_passwd.value,
self.db_name.value)
self.write_data_to_db(workspace)
finally:
self.connection.commit()
self.connection.close()
self.connection = None
self.cursor = None
elif self.db_type == DB_SQLITE and not workspace.pipeline.test_mode:
# For distributed, use the interaction handler to run the
# database commands on the server
#
self.connection = self.cursor = SQLiteCommands()
try:
self.write_data_to_db(workspace)
workspace.interaction_request(
self, self.INTERACTION_EXECUTE, self.connection.get_state())
except workspace.NoInteractionException:
# Assume that the interaction can be handled directly,
# for instance, in headless mode with no handler
#
self.handle_interaction(self.INTERACTION_EXECUTE,
self.connection.get_state())
finally:
self.connection = None
self.cursor = None
INTERACTION_EXECUTE = "Execute"
INTERACTION_GET_RELATIONSHIP_TYPES = "GetRelationshipTypes"
INTERACTION_ADD_RELATIONSHIP_TYPE = "AddRelationshipType"
def handle_interaction(self, command, *args, **kwargs):
'''Handle sqllite interactions from workers'''
if command == self.INTERACTION_EXECUTE:
return self.handle_interaction_execute(*args, **kwargs)
elif command == self.INTERACTION_GET_RELATIONSHIP_TYPES:
return self.handle_interaction_get_relationship_types(*args, **kwargs)
elif command == self.INTERACTION_ADD_RELATIONSHIP_TYPE:
return self.handle_interaction_add_relationship_type(*args, **kwargs)
else:
raise ValueError("No %s interaction" % command)
def handle_interaction_execute(self, state):
commands = SQLiteCommands()
commands.set_state(state)
db_file = self.make_full_filename(self.sqlite_file.value)
connection, cursor = connect_sqlite(db_file)
try:
commands.execute_all(cursor)
connection.commit()
except:
connection.rollback()
raise
finally:
cursor.close()
connection.close()
def handle_interaction_get_relationship_types(self):
'''Get the relationship types from the database
returns a dictionary whose key is
(module_number, relationship name, object_name1, object_name2) and
whose value is the relationship type ID for that relationship.
'''
db_file = self.make_full_filename(self.sqlite_file.value)
with DBContext(self) as (connection, cursor):
return self.get_relationship_types(cursor).items()
def grt_interaction_to_dict(self, json_struct):
'''Handle the conversion from json mangled structure to dictionary
json_struct - the result from handle_interaction_get_relationship_types
which has been dumbed-down for json and which json
has likely turned tuples to lists
'''
return dict([(tuple(k), v) for k,v in json_struct])
def get_relationship_types(self, cursor):
'''Get the relationship types from the database
returns a dictionary whose key is
(module_number, relationship name, object_name1, object_name2) and
whose value is the relationship type ID for that relationship.
'''
relationship_type_table = self.get_table_name(T_RELATIONSHIP_TYPES)
statement = "SELECT %s, %s, %s, %s, %s FROM %s" % (
COL_RELATIONSHIP_TYPE_ID, COL_RELATIONSHIP, COL_MODULE_NUMBER,
COL_OBJECT_NAME1, COL_OBJECT_NAME2, relationship_type_table)
return dict(
[((int(mn), r, o1, o2), int(rt_id))
for rt_id, r, mn, o1, o2 in
execute(cursor, statement)])
def handle_interaction_add_relationship_type(
self, module_num, relationship, object_name1, object_name2):
'''Add a relationship type to the database
module_num, relationship, object_name1, object_name2: the key
to the relationship in the relationship type table
returns the relationship type ID
'''
with DBContext(self) as (connection, cursor):
return self.add_relationship_type(
module_num, relationship, object_name1, object_name2, cursor)
def add_relationship_type(self, module_num, relationship, object_name1,
object_name2, cursor):
'''Add a relationship type to the database
module_num, relationship, object_name1, object_name2: the key
to the relationship in the relationship type table
returns the relationship type ID
'''
logger.info("Adding missing relationship type:")
logger.info(" module #: %d" % module_num)
logger.info(" relationship: %s" % relationship)
logger.info(" object 1: %s" % object_name1)
logger.info(" object 2: %s" % object_name2)
#
# If the code reaches here, it's because:
# * some module has an absent or mis-coded get_relationship_columns
# * the user changed the pipeline after prepare_run was called.
#
relationship_type_table = self.get_table_name(T_RELATIONSHIP_TYPES)
#
# An insert guarantees that a record exists
#
# INSERT INTO <t> (...)
# SELECT * FROM (
# SELECT relationship_type_id + 1, <module #>... FROM <t>
# ) as mytable WHERE NOT EXISTS
# (SELECT 'x' FROM <t> WHERE MODULE_NUM=<module %>...)
# ORDER BY relationship_type_id desc LIMIT 1
#
statement = \
"INSERT INTO %s (%s, %s, %s, %s, %s) " % (
relationship_type_table,
COL_RELATIONSHIP_TYPE_ID, COL_MODULE_NUMBER,
COL_RELATIONSHIP, COL_OBJECT_NAME1, COL_OBJECT_NAME2)
statement += "SELECT * FROM "
statement += \
"(SELECT coalesce(max(%s), -1)+1 as %s, %d as %s, '%s' as %s, '%s' as %s, '%s' as %s FROM %s)" % \
(COL_RELATIONSHIP_TYPE_ID, COL_RELATIONSHIP_TYPE_ID,
module_num, COL_MODULE_NUMBER,
relationship, COL_RELATIONSHIP,
object_name1, COL_OBJECT_NAME1,
object_name2, COL_OBJECT_NAME2, relationship_type_table)
statement += " AS mytable WHERE NOT EXISTS "
statement += "(SELECT 'x' FROM %s WHERE " % relationship_type_table
statement += "%s = %d AND " % (COL_MODULE_NUMBER, module_num)
statement += "%s = '%s' AND " % (COL_RELATIONSHIP, relationship)
statement += "%s = '%s' AND " % (COL_OBJECT_NAME1, object_name1)
statement += "%s = '%s')" % (COL_OBJECT_NAME2, object_name2)
cursor.execute(statement)
#
# Then we select and find it
#
select_statement = \
"SELECT min(%s) FROM %s WHERE %s = %d" % (
COL_RELATIONSHIP_TYPE_ID, relationship_type_table,
COL_MODULE_NUMBER, module_num)
for col, value in ((COL_RELATIONSHIP, relationship),
(COL_OBJECT_NAME1, object_name1),
(COL_OBJECT_NAME2, object_name2)):
select_statement += " AND %s = '%s'" % (col, value)
cursor.execute(select_statement)
result = cursor.fetchall()
if len(result) == 0 or result[0][0] is None:
raise ValueError(
"Failed to retrieve relationship_type_id for "
"module # %d, %s %s %s" %
(module_num, relationship, object_name1, object_name2))
return int(result[0][0])
def post_group(self, workspace, grouping):
'''Write out any columns that are only available post-group'''
if workspace.pipeline.test_mode:
return
if self.db_type not in (DB_MYSQL, DB_SQLITE):
return
try:
if self.db_type==DB_MYSQL:
self.connection, self.cursor = connect_mysql(
self.db_host.value,
self.db_user.value,
self.db_passwd.value,
self.db_name.value)
elif self.db_type==DB_SQLITE:
self.connection = self.cursor = SQLiteCommands()
#
# Process the image numbers in the current image's group
#
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
group_number = m[cpmeas.IMAGE, cpmeas.GROUP_NUMBER,
m.image_set_number]
all_image_numbers = m.get_image_numbers()
all_group_numbers = m[cpmeas.IMAGE, cpmeas.GROUP_NUMBER,
all_image_numbers]
group_image_numbers = \
all_image_numbers[all_group_numbers == group_number]
for image_number in group_image_numbers:
self.write_data_to_db(workspace,
post_group = True,
image_number = image_number)
if self.db_type == DB_SQLITE:
try:
workspace.interaction_request(
self, self.INTERACTION_EXECUTE,
self.connection.get_state())
except workspace.NoInteractionException:
# Assume that the interaction can be handled directly,
# for instance, in headless mode with no handler
#
self.handle_interaction(self.INTERACTION_EXECUTE,
self.connection.get_state())
finally:
self.connection.commit()
self.connection.close()
self.connection = None
self.cursor = None
def post_run(self, workspace):
if self.save_cpa_properties.value:
self.write_properties_file(workspace)
if self.create_workspace_file.value:
self.write_workspace_file(workspace)
if self.db_type == DB_MYSQL_CSV:
path = self.directory.get_absolute_path(None if workspace is None
else workspace.measurements)
if not os.path.isdir(path):
os.makedirs(path)
self.write_mysql_table_defs(workspace)
self.write_csv_data(workspace)
else:
self.write_post_run_measurements(workspace)
@property
def wants_well_tables(self):
'''Return true if user wants any well tables'''
if self.db_type == DB_SQLITE:
return False
else:
return (self.wants_agg_mean_well or self.wants_agg_median_well or
self.wants_agg_std_dev_well)
@property
def wants_relationship_table(self):
'''True to write relationships to the database'''
return self.wants_relationship_table_setting.value
def should_stop_writing_measurements(self):
'''All subsequent modules should not write measurements'''
return True
def ignore_object(self,object_name, strict = False):
"""Ignore objects (other than 'Image') if this returns true
If strict is True, then we ignore objects based on the object selection
"""
if object_name in (cpmeas.EXPERIMENT, cpmeas.NEIGHBORS):
return True
if strict and self.objects_choice == O_NONE:
return True
if (strict and self.objects_choice == O_SELECT and
object_name != cpmeas.IMAGE):
return object_name not in self.objects_list.selections
return False
def ignore_feature(self, object_name, feature_name, measurements=None,
strict = False):
"""Return true if we should ignore a feature"""
if (self.ignore_object(object_name, strict) or
feature_name.startswith('Description_') or
feature_name.startswith('ModuleError_') or
feature_name.startswith('TimeElapsed_') or
feature_name.startswith('ExecutionTime_') or
(self.db_type not in (DB_MYSQL, DB_SQLITE) and feature_name.startswith('Thumbnail_'))
):
return True
return False
def get_column_name_mappings(self, pipeline, image_set_list):
"""Scan all the feature names in the measurements, creating column names"""
columns = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
mappings = ColumnNameMapping(self.max_column_size.value)
mappings.add(C_IMAGE_NUMBER)
mappings.add(C_OBJECT_NUMBER)
for column in columns:
object_name, feature_name, coltype = column[:3]
if self.ignore_feature(object_name, feature_name):
continue
mappings.add("%s_%s"%(object_name,feature_name))
if object_name != cpmeas.IMAGE:
for agg_name in self.agg_names:
mappings.add('%s_%s_%s'%(agg_name, object_name, feature_name))
return mappings
def get_aggregate_columns(self, pipeline, image_set_list, post_group = None):
'''Get object aggregate columns for the PerImage table
pipeline - the pipeline being run
image_set_list - for cacheing column data
post_group - true if only getting aggregates available post-group,
false for getting aggregates available after run,
None to get all
returns a tuple:
result[0] - object_name = name of object generating the aggregate
result[1] - feature name
result[2] - aggregation operation
result[3] - column name in Image database
'''
columns = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
mappings = self.get_column_name_mappings(pipeline, image_set_list)
ob_tables = self.get_object_names(pipeline, image_set_list)
result = []
for ob_table in ob_tables:
for column in columns:
if ((post_group is not None) and
not self.should_write(column, post_group)):
continue
obname, feature, ftype = column[:3]
if (obname==ob_table and
(not self.ignore_feature(obname, feature)) and
(not cpmeas.agg_ignore_feature(feature))):
feature_name = '%s_%s'%(obname, feature)
# create per_image aggregate column defs
result += [(obname, feature, aggname,
'%s_%s' % (aggname, feature_name))
for aggname in self.agg_names ]
return result
def get_object_names(self, pipeline, image_set_list):
'''Get the names of the objects whose measurements are being taken'''
column_defs = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
obnames = set([c[0] for c in column_defs])
#
# In alphabetical order
#
obnames = sorted(obnames)
return [ obname for obname in obnames
if not self.ignore_object(obname, True) and
obname not in (cpmeas.IMAGE, cpmeas.EXPERIMENT,
cpmeas.NEIGHBORS)]
@property
def agg_names(self):
'''The list of selected aggregate names'''
return [name
for name, setting
in ((cpmeas.AGG_MEAN, self.wants_agg_mean),
(cpmeas.AGG_MEDIAN, self.wants_agg_median),
(cpmeas.AGG_STD_DEV, self.wants_agg_std_dev))
if setting.value]
@property
def agg_well_names(self):
'''The list of selected aggregate names'''
return [name
for name, setting
in (('avg', self.wants_agg_mean_well),
('median', self.wants_agg_median_well),
('std', self.wants_agg_std_dev_well))
if setting.value]
#
# Create per_image and per_object tables in MySQL
#
def create_database_tables(self, cursor, workspace):
'''Creates empty image and object tables
Creates the MySQL database (if MySQL), drops existing tables of the
same name and creates the tables.
cursor - database cursor for creating the tables
column_defs - column definitions as returned by get_measurement_columns
mappings - mappings from measurement feature names to column names
'''
pipeline = workspace.pipeline
image_set_list = workspace.image_set_list
# Create the database
if self.db_type==DB_MYSQL:
#result = execute(cursor, "SHOW DATABASES LIKE '%s'" %
#self.db_name.value)
#if len(result) == 0:
execute(cursor, 'CREATE DATABASE IF NOT EXISTS %s' %
(self.db_name.value), return_result = False)
execute(cursor, 'USE %s'% self.db_name.value,
return_result = False)
columns = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
#
# Drop either the unified objects table or the view of it
#
object_table_name = self.get_table_name(cpmeas.OBJECT)
try:
execute(cursor, 'DROP TABLE IF EXISTS %s' %
self.get_table_name(cpmeas.OBJECT),
return_result = False)
except:
# MySQL is fine if the table is a view, but not SQLite
pass
try:
execute(cursor, 'DROP VIEW IF EXISTS %s' %
self.get_table_name(cpmeas.OBJECT),
return_result = False)
except:
pass
if self.objects_choice != O_NONE:
# Object table/view
if self.separate_object_tables == OT_COMBINE:
statement = self.get_create_object_table_statement(
None, pipeline, image_set_list)
execute(cursor, statement, return_result = False)
else:
for object_name in self.get_object_names(pipeline,
image_set_list):
execute(cursor, 'DROP TABLE IF EXISTS %s' %
self.get_table_name(object_name),
return_result = False)
statement = self.get_create_object_table_statement(
object_name, pipeline, image_set_list)
execute(cursor, statement, return_result=False)
if self.separate_object_tables == OT_VIEW:
statement = self.get_create_object_view_statement(
self.get_object_names(pipeline, image_set_list), pipeline, image_set_list)
execute(cursor, statement, return_result=False)
# Image table
execute(cursor, 'DROP TABLE IF EXISTS %s' %
self.get_table_name(cpmeas.IMAGE), return_result = False)
statement = self.get_create_image_table_statement(pipeline,
image_set_list)
execute(cursor, statement, return_result=False)
execute(cursor, 'DROP TABLE IF EXISTS %s' %
self.get_table_name(cpmeas.EXPERIMENT) )
for statement in self.get_experiment_table_statements(workspace):
execute(cursor, statement, return_result=False)
if self.wants_relationship_table:
for statement in self.get_create_relationships_table_statements(pipeline):
execute(cursor, statement, return_result=False)
cursor.connection.commit()
def get_experiment_table_statements(self, workspace):
statements = []
if self.db_type in (DB_MYSQL_CSV, DB_MYSQL):
autoincrement = "AUTO_INCREMENT"
need_text_size = True
else:
autoincrement = "AUTOINCREMENT"
need_text_size = False
create_experiment_table_statement = """
CREATE TABLE IF NOT EXISTS %s (
experiment_id integer primary key %s,
name text)""" % (T_EXPERIMENT, autoincrement)
statements.append(create_experiment_table_statement)
if need_text_size:
create_experiment_properties = """
CREATE TABLE IF NOT EXISTS %(T_EXPERIMENT_PROPERTIES)s (
experiment_id integer not null,
object_name text not null,
field text not null,
value longtext,
constraint %(T_EXPERIMENT_PROPERTIES)s_pk primary key
(experiment_id, object_name(200), field(200)))""" % globals()
else:
create_experiment_properties = """
CREATE TABLE IF NOT EXISTS %(T_EXPERIMENT_PROPERTIES)s (
experiment_id integer not null,
object_name text not null,
field text not null,
value longtext,
constraint %(T_EXPERIMENT_PROPERTIES)s_pk primary key (experiment_id, object_name, field))""" % globals()
statements.append(create_experiment_properties)
insert_into_experiment_statement = """
INSERT INTO %s (name) values ('%s')""" % (
T_EXPERIMENT, MySQLdb.escape_string(self.experiment_name.value))
statements.append(insert_into_experiment_statement)
properties = self.get_property_file_text(workspace)
for p in properties:
for k, v in p.properties.iteritems():
statement = """
INSERT INTO %s (experiment_id, object_name, field, value)
SELECT MAX(experiment_id), '%s', '%s', '%s' FROM %s""" % (
T_EXPERIMENT_PROPERTIES, p.object_name,
MySQLdb.escape_string(k),
MySQLdb.escape_string(v), T_EXPERIMENT)
statements.append(statement)
experiment_columns = filter(
lambda x:x[0] == cpmeas.EXPERIMENT,
workspace.pipeline.get_measurement_columns())
experiment_coldefs = [
"%s %s" % (x[1],
"TEXT" if x[2].startswith(cpmeas.COLTYPE_VARCHAR)
else x[2]) for x in experiment_columns]
create_per_experiment = """
CREATE TABLE %s (
%s)
""" % (self.get_table_name(cpmeas.EXPERIMENT),
",\n".join(experiment_coldefs))
statements.append(create_per_experiment)
column_names = []
values = []
for column in experiment_columns:
ftr = column[1]
column_names.append(ftr)
if ((len(column) > 3 and
column[3].get(cpmeas.MCA_AVAILABLE_POST_RUN, False)) or
not workspace.measurements.has_feature(cpmeas.EXPERIMENT, ftr)):
values.append("null")
continue
value = workspace.measurements.get_experiment_measurement(ftr)
if column[2].startswith(cpmeas.COLTYPE_VARCHAR):
if isinstance(value, unicode):
value = value.encode('utf-8')
if self.db_type != DB_SQLITE:
value = MySQLdb.escape_string(value)
else:
value = value.replace("'", "''")
value = "'"+value+"'"
else:
# Both MySQL and SQLite support blob literals of the style:
# X'0123456789ABCDEF'
#
value = "X'" + "".join(["%02X" % ord(x) for x in value]) + "'"
values.append(value)
experiment_insert_statement = "INSERT INTO %s (%s) VALUES (%s)" % (
self.get_table_name(cpmeas.EXPERIMENT),
",".join(column_names),
",".join(values))
statements.append(experiment_insert_statement)
return statements
def get_create_image_table_statement(self, pipeline, image_set_list):
'''Return a SQL statement that generates the image table'''
statement = 'CREATE TABLE '+ self.get_table_name(cpmeas.IMAGE) +' (\n'
statement += '%s INTEGER'%C_IMAGE_NUMBER
mappings = self.get_column_name_mappings(pipeline, image_set_list)
columns = self.get_pipeline_measurement_columns(
pipeline, image_set_list)
for column in columns:
obname, feature, ftype = column[:3]
if obname == cpmeas.IMAGE and not self.ignore_feature(obname, feature):
if ftype.startswith(cpmeas.COLTYPE_VARCHAR):
ftype = "TEXT"
feature_name = '%s_%s' % (obname, feature)
statement += ',\n%s %s'%(mappings[feature_name], ftype)
for column in self.get_aggregate_columns(pipeline, image_set_list):
statement += ',\n%s %s' % (mappings[column[3]],
cpmeas.COLTYPE_FLOAT)
statement += ',\nPRIMARY KEY (%s) )'%C_IMAGE_NUMBER
return statement
def get_create_object_table_statement(self, object_name, pipeline,
image_set_list):
'''Get the "CREATE TABLE" statement for the given object table
object_name - None = PerObject, otherwise a specific table
'''
if object_name == None:
object_table = self.get_table_name(cpmeas.OBJECT)
else:
object_table = self.get_table_name(object_name)
statement = 'CREATE TABLE '+object_table+' (\n'
statement += '%s INTEGER\n'%C_IMAGE_NUMBER
if object_name == None:
statement += ',%s INTEGER'%C_OBJECT_NUMBER
object_pk = C_OBJECT_NUMBER
else:
object_pk = "_".join((object_name,M_NUMBER_OBJECT_NUMBER))
column_defs = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
mappings = self.get_column_name_mappings(pipeline, image_set_list)
if object_name is None:
ob_tables = self.get_object_names(pipeline, image_set_list)
else:
ob_tables = [object_name]
for ob_table in ob_tables:
for column_def in column_defs:
obname, feature, ftype = column_def[:3]
if obname==ob_table and not self.ignore_feature(obname, feature):
feature_name = '%s_%s'%(obname, feature)
statement += ',\n%s %s'%(mappings[feature_name], ftype)
statement += ',\nPRIMARY KEY (%s, %s) )' %(C_IMAGE_NUMBER, object_pk)
return statement
def get_create_object_view_statement(self, object_names, pipeline,
image_set_list):
'''Get the "CREATE VIEW" statement for the given object view
object_names is the list of objects to be included into the view
'''
object_table = self.get_table_name(cpmeas.OBJECT)
# Produce a list of columns from each of the separate tables
list_of_columns = []
all_objects = dict(zip(object_names,[self.get_table_name(object_name) for object_name in object_names]))
column_defs = self.get_pipeline_measurement_columns(pipeline,image_set_list)
mappings = self.get_column_name_mappings(pipeline, image_set_list)
for (current_object,current_table) in all_objects.iteritems():
list_of_columns.append([])
for column_def in column_defs:
obname, feature, ftype = column_def[:3]
if obname == current_object and not self.ignore_feature(obname, feature):
feature_name = '%s_%s'%(obname, feature)
list_of_columns[-1] += [mappings[feature_name]]
all_columns = sum(list_of_columns,[])
selected_object = object_names[0]
all_columns = ["%s.%s"%(all_objects[selected_object],C_IMAGE_NUMBER),"%s_%s AS %s"%(selected_object, M_NUMBER_OBJECT_NUMBER, C_OBJECT_NUMBER)] + all_columns
# Create the new view
statement = "CREATE OR REPLACE VIEW " if self.db_type==DB_MYSQL else "CREATE VIEW "
statement += "%s AS SELECT %s FROM %s"%(object_table,",".join(all_columns), all_objects[selected_object])
object_table_pairs = all_objects.items()
object_table_pairs = [x for x in object_table_pairs if x[0] != selected_object]
for (current_object,current_table) in object_table_pairs:
statement = " ".join((statement,"INNER JOIN %s ON"%current_table,\
" AND ".join(("%s.%s = %s.%s"%(all_objects[selected_object], C_IMAGE_NUMBER, current_table, C_IMAGE_NUMBER),
"%s.%s_%s = %s.%s_%s"%(all_objects[selected_object], selected_object, M_NUMBER_OBJECT_NUMBER,
current_table, current_object, M_NUMBER_OBJECT_NUMBER)))))
return statement
def get_create_relationships_table_statements(self, pipeline):
"""Get the statements to create the relationships table
Returns a list of statements to execute.
"""
statements = []
#
# View name + drop view if appropriate
#
relationship_view_name = self.get_table_name(V_RELATIONSHIPS)
statements.append(
"DROP VIEW IF EXISTS %s" % relationship_view_name)
#
# Table names + drop table if appropriate
#
relationship_type_table_name = self.get_table_name(T_RELATIONSHIP_TYPES)
relationship_table_name = self.get_table_name(T_RELATIONSHIPS)
statements += [
"DROP TABLE IF EXISTS %s" % x for x in
relationship_table_name, relationship_type_table_name]
#
# The relationship type table has the module #, relationship name
# and object names of every relationship reported by
# pipeline.get_relationship_columns()
#
columns = [COL_RELATIONSHIP_TYPE_ID, COL_MODULE_NUMBER,
COL_RELATIONSHIP, COL_OBJECT_NAME1, COL_OBJECT_NAME2]
types = ["integer primary key", "integer", "varchar(255)",
"varchar(255)", "varchar(255)"]
rtt_unique_name = self.get_table_name(CONSTRAINT_RT_UNIQUE)
statement = "CREATE TABLE %s " % relationship_type_table_name
statement += "(" + ", ".join(["%s %s" % (c, t)
for c, t in zip(columns, types)])
statement += ", CONSTRAINT %s UNIQUE ( " % rtt_unique_name
statement += ", ".join(columns) + " ))"
statements.append(statement)
#
# Create a row in this table for each relationship
#
d = self.get_dictionary()
if T_RELATIONSHIP_TYPES not in d:
d[T_RELATIONSHIP_TYPES] = {}
rd = d[T_RELATIONSHIP_TYPES]
for i, (module_num, relationship, o1, o2, when) in \
enumerate(pipeline.get_object_relationships()):
relationship_type_id = i+1
statement = "INSERT INTO %s " % relationship_type_table_name
statement += "( "+", ".join(columns) + ") "
statement += "VALUES(%d, %d, '%s', '%s', '%s')" % (
relationship_type_id, module_num, relationship, o1, o2)
statements.append(statement)
rd[module_num, relationship, o1, o2] = relationship_type_id
#
# Create the relationships table
#
columns = [ COL_RELATIONSHIP_TYPE_ID,
COL_IMAGE_NUMBER1, COL_OBJECT_NUMBER1,
COL_IMAGE_NUMBER2, COL_OBJECT_NUMBER2 ]
statement = "CREATE TABLE %s " % relationship_table_name
statement += "( " + ", ".join(["%s integer" % c for c in columns])
statement += " ,CONSTRAINT %s FOREIGN KEY ( %s ) " % (
self.get_table_name(FK_RELATIONSHIP_TYPE_ID),
COL_RELATIONSHIP_TYPE_ID)
statement += " REFERENCES %s ( %s )" % (
relationship_type_table_name, COL_RELATIONSHIP_TYPE_ID)
statement += " ,CONSTRAINT %s UNIQUE" % self.get_table_name(
CONSTRAINT_R_UNIQUE)
statement += " ( " + ", ".join(columns) + " ))"
statements.append(statement)
#
# Create indexes for both the first and second objects
#
for index_name, image_column, object_column in (
(I_RELATIONSHIPS1, COL_IMAGE_NUMBER1, COL_OBJECT_NUMBER1),
(I_RELATIONSHIPS2, COL_IMAGE_NUMBER2, COL_OBJECT_NUMBER2)):
statement = "CREATE INDEX %s ON %s ( %s, %s, %s )" % (
self.get_table_name(index_name),
relationship_table_name, image_column, object_column,
COL_RELATIONSHIP_TYPE_ID)
statements.append(statement)
#
# Create the relationship view
#
statement = "CREATE VIEW %s AS SELECT " % relationship_view_name
statement += ", ".join([
"T.%s" % col for col in (
COL_MODULE_NUMBER, COL_RELATIONSHIP,
COL_OBJECT_NAME1, COL_OBJECT_NAME2)]) + ", "
statement += ", ".join([
"R.%s" % col for col in (
COL_IMAGE_NUMBER1, COL_OBJECT_NUMBER1,
COL_IMAGE_NUMBER2, COL_OBJECT_NUMBER2)])
statement += " FROM %s T JOIN %s R ON " % (
relationship_type_table_name, relationship_table_name)
statement += " T.%s = R.%s" % (
COL_RELATIONSHIP_TYPE_ID, COL_RELATIONSHIP_TYPE_ID)
statements.append(statement)
return statements
def get_relationship_type_id(self, workspace, module_num, relationship,
object_name1, object_name2):
'''Get the relationship_type_id for the given relationship
workspace - the analysis workspace
module_num - the module number of the module that generated the
record
relationship - the name of the relationship
object_name1 - the name of the first object in the relationship
object_name2 - the name of the second object in the relationship
Returns the relationship_type_id that joins to the relationship
type record in the relationship types table.
NOTE: this should not be called for CSV databases.
'''
assert self.db_type != DB_MYSQL_CSV
d = self.get_dictionary()
if T_RELATIONSHIP_TYPES not in d:
if self.db_type == DB_SQLITE:
try:
json_result = workspace.interaction_request(
self, self.INTERACTION_GET_RELATIONSHIP_TYPES)
except workspace.NoInteractionException:
# Assume headless and call as if through ZMQ
json_result = \
self.handle_interaction_get_relationship_types()
d[T_RELATIONSHIP_TYPES] = \
self.grt_interaction_to_dict(json_result)
else:
d[T_RELATIONSHIP_TYPES] = \
self.get_relationship_types(self.cursor)
rd = d[T_RELATIONSHIP_TYPES]
key = (module_num, relationship, object_name1, object_name2)
if key not in rd:
if self.db_type == DB_SQLITE:
try:
rd[key] = workspace.interaction_request(
self, self.INTERACTION_ADD_RELATIONSHIP_TYPE, *key)
except workspace.NoInteractionException:
rd[key] = \
self.handle_interaction_add_relationship_type(*key)
else:
rd[key] = self.add_relationship_type(
module_num, relationship, object_name1, object_name2,
self.cursor)
return rd[key]
def write_mysql_table_defs(self, workspace):
"""Write the table definitions to the SETUP.SQL file
The column order here is the same as in get_pipeline_measurement_columns
with the aggregates following the regular image columns.
"""
pipeline = workspace.pipeline
image_set_list = workspace.image_set_list
measurements = workspace.measurements
m_cols = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
mappings = self.get_column_name_mappings(pipeline, image_set_list)
file_name_width, path_name_width = self.get_file_path_width(workspace)
metadata_name_width = 128
file_name = "%sSETUP.SQL"%(self.sql_file_prefix)
path_name = self.make_full_filename(file_name,workspace)
fid = open(path_name,"wt")
fid.write("CREATE DATABASE IF NOT EXISTS %s;\n"%(self.db_name.value))
fid.write("USE %s;\n"%(self.db_name.value))
fid.write(self.get_create_image_table_statement(pipeline,
image_set_list) + ";\n")
#
# Write out the per-object table
#
if self.objects_choice != O_NONE:
if self.separate_object_tables == OT_COMBINE:
data = [(None, cpmeas.OBJECT)]
else:
data = [ (x, x) for x in self.get_object_names(
pipeline, image_set_list)]
for gcot_name, object_name in data:
fid.write(self.get_create_object_table_statement(
gcot_name, pipeline, image_set_list) + ";\n")
else:
data = []
for statement in self.get_experiment_table_statements(workspace):
fid.write(statement + ";\n")
fid.write("""
LOAD DATA LOCAL INFILE '%s_%s.CSV' REPLACE INTO TABLE %s
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\\\';
""" %
(self.base_name(workspace), cpmeas.IMAGE, self.get_table_name(cpmeas.IMAGE)))
for gcot_name, object_name in data:
fid.write("""
LOAD DATA LOCAL INFILE '%s_%s.CSV' REPLACE INTO TABLE %s
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\\\';
""" % (self.base_name(workspace), object_name,
self.get_table_name(object_name)))
if self.objects_choice != O_NONE and self.separate_object_tables == OT_VIEW:
fid.write("\n" + self.get_create_object_view_statement(
[object_name for gcot_name, object_name in data], pipeline, image_set_list) + ";\n")
if self.wants_relationship_table:
for statement in self.get_create_relationships_table_statements(pipeline):
fid.write(statement + ";\n")
fid.write("""
LOAD DATA LOCAL INFILE '%s_%s.CSV' REPLACE INTO TABLE %s
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\\\';
""" % (self.base_name(workspace), T_RELATIONSHIPS,
self.get_table_name(T_RELATIONSHIPS)))
if self.wants_well_tables:
self.write_mysql_table_per_well(
workspace.pipeline, workspace.image_set_list, fid)
fid.close()
def write_mysql_table_per_well(self, pipeline, image_set_list, fid=None):
'''Write SQL statements to generate a per-well table
pipeline - the pipeline being run (to get feature names)
image_set_list -
fid - file handle of file to write or None if statements
should be written to a separate file.
'''
if fid is None:
file_name = "%s_Per_Well_SETUP.SQL"%(self.sql_file_prefix)
path_name = self.make_full_filename(file_name)
fid = open(path_name,"wt")
needs_close = True
else:
needs_close = False
fid.write("USE %s;\n"%(self.db_name.value))
table_prefix = self.get_table_prefix()
#
# Do in two passes. Pass # 1 makes the column name mappings for the
# well table. Pass # 2 writes the SQL
#
mappings = self.get_column_name_mappings(pipeline, image_set_list)
object_names = self.get_object_names(pipeline, image_set_list)
columns = self.get_pipeline_measurement_columns(pipeline, image_set_list)
for aggname in self.agg_well_names:
well_mappings = ColumnNameMapping()
for do_mapping, do_write in ((True, False),(False, True)):
if do_write:
fid.write("CREATE TABLE %sPer_Well_%s AS SELECT " %
(self.get_table_prefix(), aggname))
for i, object_name in enumerate(object_names + [cpmeas.IMAGE]):
if object_name == cpmeas.IMAGE:
object_table_name = "IT"
elif self.separate_object_tables == OT_COMBINE:
object_table_name = "OT"
else:
object_table_name = "OT%d" % (i+1)
for column in columns:
column_object_name, feature, data_type = column[:3]
if column_object_name != object_name:
continue
if self.ignore_feature(object_name, feature):
continue
#
# Don't take an aggregate on a string column
#
if data_type.startswith(cpmeas.COLTYPE_VARCHAR):
continue
feature_name = "%s_%s"%(object_name,feature)
colname = mappings[feature_name]
well_colname = "%s_%s" % (aggname, colname)
if do_mapping:
well_mappings.add(well_colname)
if do_write:
fid.write("%s(%s.%s) as %s,\n" %
(aggname, object_table_name, colname,
well_mappings[well_colname]))
fid.write("IT.Image_Metadata_Plate, IT.Image_Metadata_Well "
"FROM %sPer_Image IT\n" % table_prefix)
if len(object_names) == 0:
pass
elif self.separate_object_tables == OT_COMBINE:
fid.write("JOIN %s OT ON IT.%s = OT.%s\n" %
(self.get_table_name(cpmeas.OBJECT),C_IMAGE_NUMBER,C_IMAGE_NUMBER))
elif len(object_names) == 1:
fid.write("JOIN %s OT1 ON IT.%s = OT1.%s\n" %
(self.get_table_name(object_names[0]),C_IMAGE_NUMBER,C_IMAGE_NUMBER))
else:
#
# We make up a table here that lists all of the possible
# image and object numbers from any of the object numbers.
# We need this to do something other than a cartesian join
# between object tables.
#
fid.write(
"RIGHT JOIN (SELECT DISTINCT %s, %s FROM\n"%(C_IMAGE_NUMBER, C_OBJECT_NUMBER))
fid.write("(SELECT %s, %s_%s as %s FROM %s\n" %
(C_IMAGE_NUMBER, object_names[0], M_NUMBER_OBJECT_NUMBER, C_OBJECT_NUMBER,
self.get_table_name(object_names[0])))
for object_name in object_names[1:]:
fid.write("UNION SELECT %s, %s_%s as %s "
"FROM %s\n" %
(C_IMAGE_NUMBER, object_name, M_NUMBER_OBJECT_NUMBER, C_OBJECT_NUMBER,
self.get_table_name(object_name)))
fid.write(") N_INNER) N ON IT.%s = N.%s\n"%(C_IMAGE_NUMBER, C_IMAGE_NUMBER))
for i, object_name in enumerate(object_names):
fid.write("LEFT JOIN %s OT%d " %
(self.get_table_name(object_name), i+1))
fid.write("ON N.%s = OT%d.%s " % (C_IMAGE_NUMBER, i+1, C_IMAGE_NUMBER))
fid.write("AND N.%s = OT%d.%s_%s\n" %
(C_OBJECT_NUMBER, i+1, object_name, M_NUMBER_OBJECT_NUMBER))
fid.write("GROUP BY IT.Image_Metadata_Plate, "
"IT.Image_Metadata_Well;\n\n""")
if needs_close:
fid.close()
def write_oracle_table_defs(self, workspace):
raise NotImplementedError("Writing to an Oracle database is not yet supported")
def base_name(self,workspace):
"""The base for the output file name"""
m = workspace.measurements
first = m.image_set_start_number
last = m.image_set_number
return '%s%d_%d'%(self.sql_file_prefix, first, last)
def write_csv_data(self, workspace):
"""Write the data in the measurements out to the csv files
workspace - contains the measurements
"""
if self.show_window:
disp_header = ['Table','Filename']
disp_columns = []
zeros_for_nan = False
measurements = workspace.measurements
pipeline = workspace.pipeline
image_set_list = workspace.image_set_list
image_filename = self.make_full_filename('%s_%s.CSV'%(self.base_name(workspace),cpmeas.IMAGE),workspace)
fid_per_image = open(image_filename,"wb")
columns = self.get_pipeline_measurement_columns(pipeline,
image_set_list, remove_postgroup_key = True)
agg_columns = self.get_aggregate_columns(pipeline, image_set_list)
for image_number in measurements.get_image_numbers():
image_row = []
image_row.append(image_number)
for object_name, feature, coltype in columns:
if object_name != cpmeas.IMAGE:
continue
if self.ignore_feature(object_name, feature, measurements):
continue
feature_name = "%s_%s" % (object_name,feature)
if not measurements.has_feature(cpmeas.IMAGE, feature):
value = np.NaN
else:
value = measurements.get_measurement(
cpmeas.IMAGE, feature, image_number)
if isinstance(value, np.ndarray):
value = value[0]
if coltype.startswith(cpmeas.COLTYPE_VARCHAR):
if isinstance(value, str) or isinstance(value, unicode):
value = '"'+MySQLdb.escape_string(value)+'"'
elif value is None:
value = "NULL"
else:
value = '"'+MySQLdb.escape_string(value)+'"'
elif np.isnan(value) or np.isinf(value):
value = "NULL"
image_row.append(value)
#
# Add the aggregate measurements
#
agg_dict = measurements.compute_aggregate_measurements(
image_number, self.agg_names)
image_row += [agg_dict[col[3]] for col in agg_columns]
fid_per_image.write(','.join([str(x) for x in image_row])+"\n")
fid_per_image.close()
#
# Object tables
#
object_names = self.get_object_names(pipeline, image_set_list)
if len(object_names) == 0:
return
if self.separate_object_tables == OT_COMBINE:
data = [(cpmeas.OBJECT, object_names)]
else:
data = [(object_name, [object_name])
for object_name in object_names]
for file_object_name, object_list in data:
file_name = "%s_%s.CSV" % (self.base_name(workspace),
file_object_name)
file_name = self.make_full_filename(file_name)
fid = open(file_name, "wb")
csv_writer = csv.writer(fid, lineterminator='\n')
for image_number in measurements.get_image_numbers():
max_count = 0
for object_name in object_list:
count = measurements.get_measurement(
cpmeas.IMAGE, "Count_%s" % object_name, image_number)
max_count = max(max_count, int(count))
d = {}
for j in range(max_count):
object_row = [image_number]
if file_object_name == cpmeas.OBJECT:
# the object number
object_row.append(j+1)
#
# Write out in same order as in the column definition
for object_name in object_list:
for object_name_to_check, feature, coltype in columns:
if object_name_to_check != object_name:
continue
key = (object_name, feature)
if key not in d:
if not measurements.has_feature(
object_name, feature):
values = None
else:
values = measurements.get_measurement(
object_name, feature, image_number)
d[key] = values
else:
values = d[key]
if (values is None or len(values) <= j or
np.isnan(values[j]) or np.isinf(values[j])):
value = "NULL"
else:
value = values[j]
object_row.append(value)
csv_writer.writerow(object_row)
fid.close()
if self.show_window:
disp_columns.append((file_object_name,'Wrote %s'%file_name))
#
# Relationships table
#
# Note that the code here assumes that pipeline.get_object_relationships
# returns the rows in the same order every time it's called.
#
if self.wants_relationship_table:
file_name = "%s_%s.CSV" % (
self.base_name(workspace), T_RELATIONSHIPS)
file_name = self.make_full_filename(file_name)
with open(file_name, "wb") as fid:
csv_writer = csv.writer(fid, lineterminator='\n')
for i, (module_num, relationship,
object_number1, object_number2, when) \
in enumerate(pipeline.get_object_relationships()):
relationship_type_id = i+1
r = measurements.get_relationships(
module_num, relationship,
object_number1, object_number2)
for i1, o1, i2, o2 in r:
csv_writer.writerow((
relationship_type_id, i1, o1, i2, o2))
if self.show_window:
disp_columns.append((T_RELATIONSHIPS,'Wrote %s'%file_name))
if self.show_window:
workspace.display_data.header = disp_header
workspace.display_data.columns = disp_columns
@staticmethod
def should_write(column, post_group):
'''Determine if a column should be written in run or post_group
column - 3 or 4 tuple column from get_measurement_columns
post_group - True if in post_group, false if in run
returns True if column should be written
'''
if len(column) == 3:
return not post_group
if not hasattr(column[3], "has_key"):
return not post_group
if not column[3].has_key(cpmeas.MCA_AVAILABLE_POST_GROUP):
return not post_group
return (post_group if column[3][cpmeas.MCA_AVAILABLE_POST_GROUP]
else not post_group)
def write_data_to_db(self, workspace,
post_group = False,
image_number = None):
"""Write the data in the measurements out to the database
workspace - contains the measurements
mappings - map a feature name to a column name
image_number - image number for primary database key. Defaults to current.
"""
if self.show_window:
disp_header = ["Table", "Statement"]
disp_columns = []
try:
zeros_for_nan = False
measurements = workspace.measurements
assert isinstance(measurements, cpmeas.Measurements)
pipeline = workspace.pipeline
image_set_list = workspace.image_set_list
measurement_cols = self.get_pipeline_measurement_columns(pipeline,
image_set_list)
mapping = self.get_column_name_mappings(pipeline, image_set_list)
###########################################
#
# The experiment table
#
# Update the modification timestamp. This
# has a side-effect of synchronizing (and blocking
# writes to the database for transactional DB engines)
# by taking a lock on the single row of
# the experiment table
###########################################
stmt = "UPDATE %s SET %s='%s'" %\
(self.get_table_name(cpmeas.EXPERIMENT),
M_MODIFICATION_TIMESTAMP,
datetime.datetime.now().isoformat())
execute(self.cursor, stmt, return_result=False)
###########################################
#
# The image table
#
###########################################
if image_number is None:
image_number = measurements.image_set_number
image_row = []
if not post_group:
image_row += [(image_number, cpmeas.COLTYPE_INTEGER, C_IMAGE_NUMBER)]
feature_names = set(measurements.get_feature_names(cpmeas.IMAGE))
for m_col in measurement_cols:
if m_col[0] != cpmeas.IMAGE:
continue
if not self.should_write(m_col, post_group):
continue
#
# Skip if feature name not in measurements. This
# can happen if image set gets aborted or for some legacy
# measurement files.
#
if m_col[1] not in feature_names:
continue
feature_name = "%s_%s"%(cpmeas.IMAGE, m_col[1])
value = measurements.get_measurement(
cpmeas.IMAGE, m_col[1], image_number)
if isinstance(value, np.ndarray):
value=value[0]
if isinstance(value, float) and not np.isfinite(value) and zeros_for_nan:
value = 0
image_row.append((value, m_col[2], feature_name))
#
# Aggregates for the image table
#
agg_dict = measurements.compute_aggregate_measurements(
image_number, self.agg_names)
agg_columns = self.get_aggregate_columns(pipeline, image_set_list,
post_group)
image_row += [(agg_dict[agg[3]],
cpmeas.COLTYPE_FLOAT,
agg[3])
for agg in agg_columns]
#
# Delete any prior data for this image
#
# Useful if you rerun a partially-complete batch
#
if not post_group:
stmt = ('DELETE FROM %s WHERE %s=%d'%
(self.get_table_name(cpmeas.IMAGE),
C_IMAGE_NUMBER,
image_number))
execute(self.cursor, stmt, return_result=False)
#
# Delete relationships as well.
#
if self.wants_relationship_table:
for col in (COL_IMAGE_NUMBER1, COL_IMAGE_NUMBER2):
stmt = 'DELETE FROM %s WHERE %s=%d' % (
self.get_table_name(T_RELATIONSHIPS), col,
image_number)
execute(self.cursor, stmt, return_result=False)
########################################
#
# Object tables
#
########################################
object_names = self.get_object_names(pipeline, image_set_list)
if len(object_names) > 0:
if self.separate_object_tables == OT_COMBINE:
data = [(cpmeas.OBJECT, object_names)]
else:
data = [(object_name, [object_name])
for object_name in object_names]
for table_object_name, object_list in data:
table_name = self.get_table_name(table_object_name)
columns = [column for column in measurement_cols
if column[0] in object_list
and self.should_write(column, post_group)]
if post_group and len(columns) == 0:
continue
max_count = 0
for object_name in object_list:
ftr_count = "Count_%s" % object_name
count = measurements.get_measurement(
cpmeas.IMAGE, ftr_count, image_number)
max_count = max(max_count, int(count))
column_values = []
for column in columns:
object_name, feature, coltype = column[:3]
values = measurements.get_measurement(
object_name, feature, image_number)
if len(values) < max_count:
values = list(values) + [None] * (max_count - len(values))
values = [
None if v is None or np.isnan(v) or np.isinf(v)
else str(v)
for v in values]
column_values.append(values)
object_cols = []
if not post_group:
object_cols += [C_IMAGE_NUMBER]
if table_object_name == cpmeas.OBJECT:
object_number_column = C_OBJECT_NUMBER
if not post_group:
object_cols += [object_number_column]
object_numbers = np.arange(1, max_count+1)
else:
object_number_column = "_".join((object_name, M_NUMBER_OBJECT_NUMBER))
object_numbers = measurements.get_measurement(
object_name, M_NUMBER_OBJECT_NUMBER, image_number)
object_cols += [mapping["%s_%s" % (column[0], column[1])]
for column in columns]
object_rows = []
for j in range(max_count):
if not post_group:
object_row = [image_number]
if table_object_name == cpmeas.OBJECT:
# the object number
object_row.append(object_numbers[j])
else:
object_row = []
for column, values in zip(columns, column_values):
object_name, feature, coltype = column[:3]
object_row.append(values[j])
if post_group:
object_row.append(object_numbers[j])
object_rows.append(object_row)
#
# Delete any prior data for this image
#
if not post_group:
stmt = ('DELETE FROM %s WHERE %s=%d'%
(table_name, C_IMAGE_NUMBER, image_number))
execute(self.cursor, stmt, return_result=False)
#
# Write the object table data
#
stmt = ('INSERT INTO %s (%s) VALUES (%s)'%
(table_name,
','.join(object_cols),
','.join(['%s']*len(object_cols))))
else:
stmt = (
('UPDATE %s SET\n' % table_name) +
(',\n'.join([" %s=%%s" % c for c in object_cols])) +
('\nWHERE %s = %d' % (C_IMAGE_NUMBER, image_number)) +
('\nAND %s = %%s' % object_number_column))
if self.db_type == DB_MYSQL:
# Write 25 rows at a time (to get under the max_allowed_packet limit)
for i in range(0,len(object_rows), 25):
my_rows = object_rows[i:min(i+25, len(object_rows))]
self.cursor.executemany(stmt, my_rows)
if self.show_window and len(object_rows) > 0:
disp_columns.append((table_name,self.truncate_string_for_display(stmt%tuple(my_rows[0]))))
else:
for row in object_rows:
row = [ 'NULL' if x is None else x for x in row]
row_stmt = stmt % tuple(row)
execute(self.cursor, row_stmt, return_result=False)
if self.show_window and len(object_rows) > 0:
disp_columns.append(
(table_name,
self.truncate_string_for_display(row_stmt)))
image_table = self.get_table_name(cpmeas.IMAGE)
replacement = '%s' if self.db_type == DB_MYSQL else "?"
image_row_values = [
None if field[0] is None
else None if ((field[1] == cpmeas.COLTYPE_FLOAT) and
(np.isnan(field[0]) or np.isinf(field[0])))
else float(field[0]) if (field[1] == cpmeas.COLTYPE_FLOAT)
else int(field[0]) if (field[1] == cpmeas.COLTYPE_INTEGER)
else buffer(field[0])
if field[1] in (cpmeas.COLTYPE_BLOB, cpmeas.COLTYPE_LONGBLOB,
cpmeas.COLTYPE_MEDIUMBLOB)
else field[0] for field in image_row]
if len(image_row) > 0:
if not post_group:
stmt = (
'INSERT INTO %s (%s) VALUES (%s)' %
(image_table,
','.join([mapping[colname] for val, dtype, colname in image_row]),
','.join([replacement] * len(image_row))))
else:
stmt = (
('UPDATE %s SET\n' % image_table) +
',\n'.join([" %s = %s" % (mapping[colname], replacement)
for val, dtype, colname in image_row]) +
('\nWHERE %s = %d' % (C_IMAGE_NUMBER, image_number)))
execute(self.cursor, stmt, image_row_values, return_result=False)
if self.show_window:
disp_columns.append((image_table,self.truncate_string_for_display(
stmt+" VALUES(%s)"%','.join(map(str,image_row_values))) if len(image_row) > 0 else ''))
if self.wants_relationship_table:
#
# Relationships table - for SQLite, check for previous existence
# but for MySQL use REPLACE INTO to do the same
#
rtbl_name = self.get_table_name(T_RELATIONSHIPS)
columns = [COL_RELATIONSHIP_TYPE_ID,
COL_IMAGE_NUMBER1, COL_OBJECT_NUMBER1,
COL_IMAGE_NUMBER2, COL_OBJECT_NUMBER2]
if self.db_type == DB_SQLITE:
stmt = "INSERT INTO %s (%s, %s, %s, %s, %s) " % \
tuple([rtbl_name]+columns)
stmt += " SELECT %d, %d, %d, %d, %d WHERE NOT EXISTS "
stmt += "(SELECT 'x' FROM %s WHERE " % rtbl_name
stmt += " AND ".join(["%s = %%d" % col for col in columns]) + ")"
else:
stmt = "REPLACE INTO %s (%s, %s, %s, %s, %s) " % \
tuple([rtbl_name]+columns)
stmt += "VALUES (%s, %s, %s, %s, %s)"
for module_num, relationship, object_name1, object_name2, when\
in pipeline.get_object_relationships():
if post_group != (when == cpmeas.MCA_AVAILABLE_POST_GROUP):
continue
r = measurements.get_relationships(
module_num, relationship, object_name1, object_name2,
image_numbers = [image_number])
rt_id = self.get_relationship_type_id(
workspace, module_num, relationship, object_name1,
object_name2)
if self.db_type == DB_MYSQL:
# max_allowed_packet is 16 MB by default
# 8 x 10 = 80/row -> 200K rows
row_values = [(rt_id, i1, o1, i2, o2)
for i1, o1, i2, o2 in r]
self.cursor.executemany(stmt, row_values)
if self.show_window:
disp_columns.append((rtbl_name,self.truncate_string_for_display(stmt%tuple(row_values[0]))))
else:
for i1, o1, i2, o2 in r:
row = (
rt_id, i1, o1, i2, o2, rt_id, i1, o1, i2, o2)
row_stmt = stmt % tuple(row)
execute(self.cursor, row_stmt, return_result=False)
if self.show_window and len(r) > 0:
disp_columns.append((rtbl_name,self.truncate_string_for_display(row_stmt)))
if self.show_window:
workspace.display_data.header = disp_header
workspace.display_data.columns = disp_columns
self.connection.commit()
except:
logger.error("Failed to write measurements to database", exc_info=True)
self.connection.rollback()
raise
def truncate_string_for_display(self,s,field_size=100):
''' Any string with more than this # of characters will
be truncated using an ellipsis.
'''
if len(s) > field_size:
half = int(field_size - 3) / 2
s = s[:half] + "..." + s[-half:]
return s
def display(self, workspace, figure):
figure.set_subplots((1, 1,))
if workspace.pipeline.test_mode:
figure.subplot_table(
0, 0, [["Data not written to database in test mode"]])
else:
figure.subplot_table(0, 0,
workspace.display_data.columns,
col_labels = workspace.display_data.header)
def display_post_run(self, workspace, figure):
figure.set_subplots((1, 1,))
figure.subplot_table(0, 0,
workspace.display_data.columns,
col_labels = workspace.display_data.header)
def write_post_run_measurements(self, workspace):
'''Write any experiment measurements marked as post-run'''
columns = workspace.pipeline.get_measurement_columns()
columns = filter(
(lambda c:
c[0] == cpmeas.EXPERIMENT and len(c) > 3 and
c[3].get(cpmeas.MCA_AVAILABLE_POST_RUN, False)), columns)
if len(columns) > 0:
statement = "UPDATE %s SET " % self.get_table_name(cpmeas.EXPERIMENT)
assignments = []
for column in columns:
if workspace.measurements.has_feature(
cpmeas.EXPERIMENT, column[1]):
value = workspace.measurements[cpmeas.EXPERIMENT, column[1]]
if value is not None:
assignments.append("%s='%s'" % (column[1], value))
if len(assignments) > 0:
statement += ",".join(assignments)
with DBContext(self) as (connection, cursor):
cursor.execute(statement)
connection.commit()
def write_properties_file(self, workspace):
"""Write the CellProfiler Analyst properties file"""
all_properties = self.get_property_file_text(workspace)
for properties in all_properties:
fid = open(properties.file_name, "wt")
fid.write(properties.text)
fid.close()
def get_property_file_text(self, workspace):
'''Get the text for all property files
workspace - the workspace from prepare_run
Returns a list of Property objects which describe each property file
The Property object has the following attributes:
* object_name - the name of the object: "Object" if combining all tables,
otherwise the name of the relevant object.
* file_name - save text in this file
* text - the text to save
* properties - a key / value dictionary of the properties
'''
class Properties(object):
def __init__(self, object_name, file_name, text):
self.object_name = object_name
self.file_name = file_name
self.text = text
self.properties = {}
for line in text.split("\n"):
line = line.strip()
if line.startswith("#") or line.find("=") == -1:
continue
k, v = [ x.strip() for x in line.split("=", 1)]
self.properties[k] = v
result = []
#
# Get appropriate object names
#
if self.objects_choice != O_NONE:
if self.separate_object_tables == OT_COMBINE:
object_names = [ self.location_object.value ]
elif self.separate_object_tables == OT_PER_OBJECT:
if self.objects_choice == O_SELECT:
object_names = (self.objects_list.value).split(',')
else:
object_names = [
object_name
for object_name in workspace.measurements.get_object_names()
if (object_name != cpmeas.IMAGE) and
(not self.ignore_object(object_name))]
elif self.separate_object_tables == OT_VIEW:
object_names = [None]
else:
object_names = [None]
default_image_names = []
# Find all images that have FileName and PathName
image_features = [
c[1] for c in workspace.pipeline.get_measurement_columns()
if c[0] == cpmeas.IMAGE]
for feature in image_features:
match = re.match('^%s_(.+)$'%C_FILE_NAME,feature)
if match:
default_image_names.append(match.groups()[0])
if not self.properties_export_all_image_defaults:
# Extract the user-specified images
user_image_names = []
for group in self.image_groups:
user_image_names.append(group.image_cols.value)
if self.db_type==DB_SQLITE:
name = os.path.splitext(self.sqlite_file.value)[0]
else:
name = self.db_name.value
tbl_prefix = self.get_table_prefix()
if tbl_prefix is not "":
if tbl_prefix.endswith('_'): tbl_prefix = tbl_prefix[:-1]
name = "_".join((name, tbl_prefix))
tblname = name
date = datetime.datetime.now().ctime()
db_type = (self.db_type == DB_MYSQL and 'mysql') or (self.db_type == DB_SQLITE and 'sqlite') or 'oracle_not_supported'
db_port = (self.db_type == DB_MYSQL and 3306) or (self.db_type == DB_ORACLE and 1521) or ''
db_host = self.db_host
db_pwd = self.db_passwd
db_name = self.db_name
db_user = self.db_user
db_sqlite_file = (self.db_type == DB_SQLITE and
self.make_full_filename(self.sqlite_file.value) ) or ''
if self.db_type == DB_MYSQL or self.db_type == DB_ORACLE:
db_info = 'db_type = %(db_type)s\n'%(locals())
db_info += 'db_port = %(db_port)d\n'%(locals())
db_info += 'db_host = %(db_host)s\n'%(locals())
db_info += 'db_name = %(db_name)s\n'%(locals())
db_info += 'db_user = %(db_user)s\n'%(locals())
db_info += 'db_passwd = %(db_pwd)s'%(locals())
elif self.db_type == DB_SQLITE:
db_info = 'db_type = %(db_type)s\n'%(locals())
db_info += 'db_sqlite_file = %(db_sqlite_file)s'%(locals())
elif self.db_type == DB_MYSQL_CSV:
db_info = 'db_type = mysql\n'
db_info += 'db_port = \n'
db_info += 'db_host = \n'
db_info += 'db_name = %(db_name)s\n'%(locals())
db_info += 'db_user = \n'
db_info += 'db_passwd = '
spot_tables = '%sPer_Image'%(self.get_table_prefix())
for object_name in object_names:
if object_name:
if self.objects_choice != O_NONE:
if self.separate_object_tables == OT_COMBINE :
cell_tables = '%sPer_Object'%(self.get_table_prefix())
object_id = C_OBJECT_NUMBER
filename = '%s.properties'%(tblname)
properties_object_name = "Object"
object_count = 'Image_Count_%s'%(self.location_object.value)
cell_x_loc = '%s_Location_Center_X'%(self.location_object.value)
cell_y_loc = '%s_Location_Center_Y'%(self.location_object.value)
elif self.separate_object_tables == OT_PER_OBJECT:
cell_tables = '%sPer_%s'%(self.get_table_prefix(),object_name)
object_id = '%s_Number_Object_Number'%(object_name)
filename = '%s_%s.properties'%(tblname,object_name)
properties_object_name = object_name
object_count = 'Image_Count_%s'%(object_name)
cell_x_loc = '%s_Location_Center_X'%(object_name)
cell_y_loc = '%s_Location_Center_Y'%(object_name)
else:
'''If object_name = None, it's either per_image only or a view '''
if self.objects_choice == O_NONE:
cell_tables = ''
object_id = ''
filename = '%s.properties'%(tblname)
properties_object_name = object_name
object_count = ''
cell_x_loc = ''
cell_y_loc = ''
elif self.separate_object_tables == OT_VIEW:
cell_tables = '%sPer_Object'%(self.get_table_prefix())
object_id = C_OBJECT_NUMBER
filename = '%s.properties'%(tblname)
properties_object_name = "Object"
object_count = 'Image_Count_%s'%(self.location_object.value)
cell_x_loc = '%s_Location_Center_X'%(self.location_object.value)
cell_y_loc = '%s_Location_Center_Y'%(self.location_object.value)
file_name = self.make_full_filename(filename, workspace)
unique_id = C_IMAGE_NUMBER
image_thumbnail_cols = ','.join(
['%s_%s_%s'%(cpmeas.IMAGE, C_THUMBNAIL, name)
for name in self.thumbnail_image_names.get_selections()]) \
if self.want_image_thumbnails else ''
if self.properties_export_all_image_defaults:
image_file_cols = ','.join(['%s_%s_%s'%(cpmeas.IMAGE,C_FILE_NAME,name) for name in default_image_names])
image_path_cols = ','.join(['%s_%s_%s'%(cpmeas.IMAGE,C_PATH_NAME,name) for name in default_image_names])
# Provide default colors
if len(default_image_names) == 1:
image_channel_colors = 'gray,'
else:
image_channel_colors = 'red, green, blue, cyan, magenta, yellow, gray, '+('none, ' * 10)
num_images = len(default_image_names)+len(set([name for name in self.thumbnail_image_names.get_selections()]).difference(default_image_names)) if self.want_image_thumbnails else 0
image_channel_colors = ','.join(image_channel_colors.split(',')[:num_images])
image_names_csl = ','.join(default_image_names) # Convert to comma-separated list
if self.want_image_thumbnails:
selected_thumbs = [name for name in self.thumbnail_image_names.get_selections()]
thumb_names = [name for name in default_image_names if name in selected_thumbs] + [name for name in selected_thumbs if name not in default_image_names]
image_thumbnail_cols = ','.join(
['%s_%s_%s'%(cpmeas.IMAGE, C_THUMBNAIL, name)
for name in thumb_names])
else:
image_thumbnail_cols = ''
else:
# Extract user-specified image names and colors
user_image_names = [];
image_channel_colors = []
selected_image_names = []
for group in self.image_groups:
selected_image_names += [group.image_cols.value]
if group.wants_automatic_image_name:
user_image_names += [group.image_cols.value]
else:
user_image_names += [group.image_name.value]
image_channel_colors += [group.image_channel_colors.value]
image_file_cols = ','.join(['%s_%s_%s'%(cpmeas.IMAGE,C_FILE_NAME,name) for name in selected_image_names])
image_path_cols = ','.join(['%s_%s_%s'%(cpmeas.IMAGE,C_PATH_NAME,name) for name in selected_image_names])
# Try to match thumbnail order to selected image order
if self.want_image_thumbnails:
selected_thumbs = [name for name in self.thumbnail_image_names.get_selections()]
thumb_names = [name for name in selected_image_names if name in selected_thumbs] + [name for name in selected_thumbs if name not in selected_image_names]
image_thumbnail_cols = ','.join(
['%s_%s_%s'%(cpmeas.IMAGE, C_THUMBNAIL, name)
for name in thumb_names])
else:
image_thumbnail_cols = ''
selected_thumbs = []
# Convert to comma-separated list
image_channel_colors = ','.join(image_channel_colors + ['none']*len(set(selected_thumbs).difference(selected_image_names)))
image_names_csl = ','.join(user_image_names)
group_statements = ''
if self.properties_wants_groups:
for group in self.group_field_groups:
group_statements += 'group_SQL_' + group.group_name.value + ' = SELECT ' + group.group_statement.value + ' FROM ' + spot_tables + '\n'
filter_statements = ''
if self.properties_wants_filters:
if self.create_filters_for_plates:
plate_key = self.properties_plate_metadata.value
metadata_groups = workspace.measurements.group_by_metadata([plate_key])
for metadata_group in metadata_groups:
plate_text = re.sub("[^A-Za-z0-9_]",'_',metadata_group.get(plate_key)) # Replace any odd characters with underscores
filter_name = 'Plate_%s'%plate_text
filter_statements += 'filter_SQL_' + filter_name + ' = SELECT ImageNumber'\
' FROM ' + spot_tables + \
' WHERE Image_Metadata_%s' \
' = "%s"\n'%(plate_key, metadata_group.get(plate_key))
for group in self.filter_field_groups:
filter_statements += 'filter_SQL_' + group.filter_name.value + ' = SELECT ImageNumber'\
' FROM ' + spot_tables + \
' WHERE ' + group.filter_statement.value + '\n'
image_url = self.properties_image_url_prepend.value \
if self.wants_properties_image_url_prepend else ""
plate_type = "" if self.properties_plate_type.value == NONE_CHOICE else self.properties_plate_type.value
plate_id = "" if self.properties_plate_metadata.value == NONE_CHOICE else "%s_%s_%s"%(cpmeas.IMAGE, cpmeas.C_METADATA, self.properties_plate_metadata.value)
well_id = "" if self.properties_well_metadata.value == NONE_CHOICE else "%s_%s_%s"%(cpmeas.IMAGE, cpmeas.C_METADATA, self.properties_well_metadata.value)
class_table = self.get_table_prefix() + self.properties_class_table_name.value
contents = """#%(date)s
# ==============================================
#
# CellProfiler Analyst 2.0 properties file
#
# ==============================================
# ==== Database Info ====
%(db_info)s
# ==== Database Tables ====
image_table = %(spot_tables)s
object_table = %(cell_tables)s
# ==== Database Columns ====
# Specify the database column names that contain unique IDs for images and
# objects (and optionally tables).
#
# table_id (OPTIONAL): This field lets Classifier handle multiple tables if
# you merge them into one and add a table_number column as a foreign
# key to your per-image and per-object tables.
# image_id: must be a foreign key column between your per-image and per-object
# tables
# object_id: the object key column from your per-object table
image_id = %(unique_id)s
object_id = %(object_id)s
plate_id = %(plate_id)s
well_id = %(well_id)s
# Also specify the column names that contain X and Y coordinates for each
# object within an image.
cell_x_loc = %(cell_x_loc)s
cell_y_loc = %(cell_y_loc)s
# ==== Image Path and File Name Columns ====
# Classifier needs to know where to find the images from your experiment.
# Specify the column names from your per-image table that contain the image
# paths and file names here.
#
# Individual image files are expected to be monochromatic and represent a single
# channel. However, any number of images may be combined by adding a new channel
# path and filename column to the per-image table of your database and then
# adding those column names here.
#
# NOTE: These lists must have equal length!
image_path_cols = %(image_path_cols)s
image_file_cols = %(image_file_cols)s
# CPA will now read image thumbnails directly from the database, if chosen in ExportToDatabase.
image_thumbnail_cols = %(image_thumbnail_cols)s
# Give short names for each of the channels (respectively)...
image_names = %(image_names_csl)s
# Specify a default color for each of the channels (respectively)
# Valid colors are: [red, green, blue, magenta, cyan, yellow, gray, none]
image_channel_colors = %(image_channel_colors)s
# ==== Image Accesss Info ====
image_url_prepend = %(image_url)s
# ==== Dynamic Groups ====
# Here you can define groupings to choose from when classifier scores your experiment. (eg: per-well)
# This is OPTIONAL, you may leave "groups = ".
# FORMAT:
# group_XXX = MySQL select statement that returns image-keys and group-keys. This will be associated with the group name "XXX" from above.
# EXAMPLE GROUPS:
# groups = Well, Gene, Well+Gene,
# group_SQL_Well = SELECT Per_Image_Table.TableNumber, Per_Image_Table.ImageNumber, Per_Image_Table.well FROM Per_Image_Table
# group_SQL_Gene = SELECT Per_Image_Table.TableNumber, Per_Image_Table.ImageNumber, Well_ID_Table.gene FROM Per_Image_Table, Well_ID_Table WHERE Per_Image_Table.well=Well_ID_Table.well
# group_SQL_Well+Gene = SELECT Per_Image_Table.TableNumber, Per_Image_Table.ImageNumber, Well_ID_Table.well, Well_ID_Table.gene FROM Per_Image_Table, Well_ID_Table WHERE Per_Image_Table.well=Well_ID_Table.well
%(group_statements)s
# ==== Image Filters ====
# Here you can define image filters to let you select objects from a subset of your experiment when training the classifier.
# FORMAT:
# filter_SQL_XXX = MySQL select statement that returns image keys you wish to filter out. This will be associated with the filter name "XXX" from above.
# EXAMPLE FILTERS:
# filters = EMPTY, CDKs,
# filter_SQL_EMPTY = SELECT TableNumber, ImageNumber FROM CPA_per_image, Well_ID_Table WHERE CPA_per_image.well=Well_ID_Table.well AND Well_ID_Table.Gene="EMPTY"
# filter_SQL_CDKs = SELECT TableNumber, ImageNumber FROM CPA_per_image, Well_ID_Table WHERE CPA_per_image.well=Well_ID_Table.well AND Well_ID_Table.Gene REGEXP 'CDK.*'
%(filter_statements)s
# ==== Meta data ====
# What are your objects called?
# FORMAT:
# object_name = singular object name, plural object name,
object_name = cell, cells,
# What size plates were used? 96, 384 or 5600? This is for use in the PlateViewer. Leave blank if none
plate_type = %(plate_type)s
# ==== Excluded Columns ====
# OPTIONAL
# Classifier uses columns in your per_object table to find rules. It will
# automatically ignore ID columns defined in table_id, image_id, and object_id
# as well as any columns that contain non-numeric data.
#
# Here you may list other columns in your per_object table that you wish the
# classifier to ignore when finding rules.
#
# You may also use regular expressions here to match more general column names.
#
# Example: classifier_ignore_columns = WellID, Meta_.*, .*_Position
# This will ignore any column named "WellID", any columns that start with
# "Meta_", and any columns that end in "_Position".
#
# A more restrictive example:
# classifier_ignore_columns = ImageNumber, ObjectNumber, .*Parent.*, .*Children.*, .*_Location_Center_.*,.*_Metadata_.*
classifier_ignore_columns = table_number_key_column, image_number_key_column, object_number_key_column
# ==== Other ====
# Specify the approximate diameter of your objects in pixels here.
image_tile_size = 50
# ======== Auto Load Training Set ========
# OPTIONAL
# You may enter the full path to a training set that you would like Classifier
# to automatically load when started.
training_set =
# ======== Area Based Scoring ========
# OPTIONAL
# You may specify a column in your per-object table which will be summed and
# reported in place of object-counts when scoring. The typical use for this
# is to report the areas of objects on a per-image or per-group basis.
area_scoring_column =
# ======== Output Per-Object Classes ========
# OPTIONAL
# Here you can specify a MySQL table in your Database where you would like
# Classifier to write out class information for each object in the
# object_table
class_table = %(class_table)s
# ======== Check Tables ========
# OPTIONAL
# [yes/no] You can ask classifier to check your tables for anomalies such
# as orphaned objects or missing column indices. Default is on.
# This check is run when Classifier starts and may take up to a minute if
# your object_table is extremely large.
check_tables = yes
"""%(locals())
result.append(Properties(properties_object_name,
file_name,
contents))
return result
def write_workspace_file(self, workspace):
from cellprofiler.utilities.version import version_number
'''If requested, write a workspace file with selected measurements'''
if self.db_type==DB_SQLITE:
name = os.path.splitext(self.sqlite_file.value)[0]
else:
name = self.db_name.value
tbl_prefix = self.get_table_prefix()
if tbl_prefix is not "":
if tbl_prefix.endswith('_'): tbl_prefix = tbl_prefix[:-1]
name = "_".join((name, tbl_prefix))
filename = '%s.workspace'%(name)
file_name = self.make_full_filename(filename,workspace)
fd = open(file_name,"wb")
header_text = """CellProfiler Analyst workflow
version: 1
CP version : %d\n""" % version_number
fd.write(header_text)
display_tool_text = ""
for workspace_group in self.workspace_measurement_groups:
display_tool = workspace_group.measurement_display.value
# A couple of tools are named a bit differently
if workspace_group.measurement_display.value == W_SCATTERPLOT:
display_tool = "Scatter"
elif workspace_group.measurement_display.value == W_DENSITYPLOT:
display_tool = "Density"
display_tool_text += """
%s"""%display_tool
axis_text = "x-axis" if workspace_group.measurement_display.value != W_PLATEVIEWER else "measurement"
if workspace_group.x_measurement_type.value == cpmeas.IMAGE:
axis_meas = "_".join((cpmeas.IMAGE, workspace_group.x_measurement_name.value))
elif workspace_group.x_measurement_type.value == cpmeas.OBJECT:
axis_meas = "_".join((workspace_group.x_object_name.value, workspace_group.x_measurement_name.value))
elif workspace_group.x_measurement_type.value == W_INDEX:
axis_meas = workspace_group.x_index_name.value
axis_table = "x-table" if workspace_group.measurement_display.value in (W_SCATTERPLOT, W_DENSITYPLOT) else "table"
table_name = self.get_table_name(cpmeas.OBJECT if workspace_group.x_measurement_type.value == cpmeas.OBJECT else cpmeas.IMAGE)
display_tool_text += """
\t%s: %s
\t%s: %s"""%(axis_text, axis_meas, axis_table, table_name)
if workspace_group.measurement_display.value in (W_SCATTERPLOT, W_DENSITYPLOT):
if workspace_group.y_measurement_type.value == cpmeas.IMAGE:
axis_meas = "_".join((cpmeas.IMAGE, workspace_group.y_measurement_name.value))
elif workspace_group.y_measurement_type.value == cpmeas.OBJECT:
axis_meas = "_".join((workspace_group.y_object_name.value, workspace_group.y_measurement_name.value))
elif workspace_group.y_measurement_type.value == W_INDEX:
axis_meas = workspace_group.y_index_name.value
table_name = self.get_table_name(cpmeas.OBJECT if workspace_group.y_measurement_type.value == cpmeas.OBJECT else cpmeas.IMAGE)
display_tool_text += """
\ty-axis: %s
\ty-table: %s"""%(axis_meas, table_name)
display_tool_text += "\n"
fd.write(display_tool_text)
fd.close()
def get_file_path_width(self, workspace):
"""Compute the file name and path name widths needed in table defs"""
m = workspace.measurements
#
# Find the length for the file name and path name fields
#
FileNameWidth = 128
PathNameWidth = 128
image_features = m.get_feature_names(cpmeas.IMAGE)
for feature in image_features:
if feature.startswith(C_FILE_NAME):
names = [name
for name in m.get_all_measurements(cpmeas.IMAGE,feature)
if name is not None]
if len(names) > 0:
FileNameWidth = max(FileNameWidth, np.max(map(len,names)))
elif feature.startswith(C_PATH_NAME):
names = [name
for name in m.get_all_measurements(cpmeas.IMAGE,feature)
if name is not None]
if len(names) > 0:
PathNameWidth = max(PathNameWidth, np.max(map(len,names)))
return FileNameWidth, PathNameWidth
def get_table_prefix(self):
if self.want_table_prefix.value:
return self.table_prefix.value
return ""
def get_table_name(self, object_name):
'''Return the table name associated with a given object
object_name - name of object or "Image", "Object" or "Well"
'''
return self.get_table_prefix()+'Per_'+object_name
def get_pipeline_measurement_columns(self, pipeline, image_set_list, remove_postgroup_key = False):
'''Get the measurement columns for this pipeline, possibly cached'''
d = self.get_dictionary(image_set_list)
if not d.has_key(D_MEASUREMENT_COLUMNS):
d[D_MEASUREMENT_COLUMNS] = pipeline.get_measurement_columns()
d[D_MEASUREMENT_COLUMNS] = self.filter_measurement_columns(
d[D_MEASUREMENT_COLUMNS])
if remove_postgroup_key:
d[D_MEASUREMENT_COLUMNS] = [x[:3] for x in d[D_MEASUREMENT_COLUMNS]]
return d[D_MEASUREMENT_COLUMNS]
def filter_measurement_columns(self, columns):
'''Filter out and properly sort measurement columns'''
columns = [x for x in columns
if not self.ignore_feature(x[0], x[1], True)]
#
# put Image ahead of any other object
# put Number_ObjectNumber ahead of any other column
#
def cmpfn(x, y):
if x[0] != y[0]:
if x[0] == cpmeas.IMAGE:
return -1
elif y[0] == cpmeas.IMAGE:
return 1
else:
return cmp(x[0], y[0])
if x[1] == M_NUMBER_OBJECT_NUMBER:
return -1
if y[1] == M_NUMBER_OBJECT_NUMBER:
return 1
return cmp(x[1], y[1])
columns.sort(cmp=cmpfn)
#
# Remove all but the last duplicate
#
duplicate = [
c0[0] == c1[0] and c0[1] == c1[1]
for c0, c1 in zip(columns[:-1],
columns[1:])] + [ False ]
columns = [x for x, y in zip(columns, duplicate) if not y]
return columns
def obfuscate(self):
'''Erase sensitive information about the database
This is run on a copy of the pipeline, so it's ok to erase info.
'''
self.db_host.value = ''.join(['*'] * len(self.db_host.value))
self.db_user.value = ''.join(['*'] * len(self.db_user.value))
self.db_name.value = ''.join(['*'] * len(self.db_name.value))
self.db_passwd.value = ''.join(['*'] * len(self.db_passwd.value))
def upgrade_settings(self,setting_values,variable_revision_number,
module_name, from_matlab):
DIR_DEFAULT_OUTPUT = "Default output folder"
DIR_DEFAULT_IMAGE = "Default input folder"
if from_matlab and variable_revision_number == 4:
setting_values = setting_values + [cps.NO]
variable_revision_number = 5
if from_matlab and variable_revision_number == 5:
if setting_values[-1] == cps.YES:
setting_values = setting_values[:-1] + ["Yes - V1.0 format"]
variable_revision_number = 6
if from_matlab and variable_revision_number == 6:
new_setting_values = [setting_values[0],setting_values[1]]
if setting_values[2] == cps.DO_NOT_USE:
new_setting_values.append(cps.NO)
new_setting_values.append("MyExpt_")
else:
new_setting_values.append(cps.YES)
new_setting_values.append(setting_values[2])
new_setting_values.append(setting_values[3])
if setting_values[4] == '.':
new_setting_values.append(cps.YES)
new_setting_values.append(setting_values[4])
else:
new_setting_values.append(cps.NO)
new_setting_values.append(setting_values[4])
if setting_values[5][:3]==cps.YES:
new_setting_values.append(cps.YES)
else:
new_setting_values.append(cps.NO)
from_matlab = False
variable_revision_number = 6
setting_values = new_setting_values
if from_matlab and variable_revision_number == 7:
#
# Added object names
#
setting_values = (setting_values[:-1] + [cpmeas.IMAGE] +
[cps.DO_NOT_USE] * 3 + setting_values[-1:])
variable_revision_number = 8
if from_matlab and variable_revision_number == 8:
#
# Added more object names
#
setting_values = (setting_values[:-1] +
[cps.DO_NOT_USE] * 3 + setting_values[-1:])
variable_revision_number = 9
if from_matlab and variable_revision_number == 9:
#
# Per-well export
#
setting_values = (setting_values[:-1] +
[cps.NO, cps.DO_NOT_USE, cps.DO_NOT_USE] +
setting_values[-1:])
variable_revision_number = 10
if from_matlab and variable_revision_number == 10:
new_setting_values = setting_values[0:2]
if setting_values[2] == cps.DO_NOT_USE:
new_setting_values.append(cps.NO)
new_setting_values.append("MyExpt_")
else:
new_setting_values.append(cps.YES)
new_setting_values.append(setting_values[2])
new_setting_values.append(setting_values[3])
if setting_values[4] == '.':
new_setting_values.append(cps.YES)
new_setting_values.append(setting_values[4])
else:
new_setting_values.append(cps.NO)
new_setting_values.append(setting_values[4])
if setting_values[18][:3]==cps.YES:
new_setting_values.append(cps.YES)
else:
new_setting_values.append(cps.NO)
#
# store_csvs
#
new_setting_values.append(cps.YES)
#
# DB host / user / password
#
new_setting_values += [ 'imgdb01','cpuser','password']
#
# SQLite file name
#
new_setting_values += [ 'DefaultDB.db' ]
#
# Aggregate mean, median & std dev
wants_mean = cps.NO
wants_std_dev = cps.NO
wants_median = cps.NO
for setting in setting_values[5:8]:
if setting == "Median":
wants_median = cps.YES
elif setting == "Mean":
wants_mean = cps.YES
elif setting == "Standard deviation":
wants_std_dev = cps.YES
new_setting_values += [wants_mean, wants_median, wants_std_dev]
#
# Object export
#
if setting_values[8] == "All objects":
new_setting_values += [ O_ALL, ""]
else:
objects_list = []
for setting in setting_values[8:15]:
if setting not in (cpmeas.IMAGE, cps.DO_NOT_USE):
objects_list.append(setting)
if len(objects_list) > 0:
new_setting_values += [ O_SELECT, ",".join(objects_list)]
else:
new_setting_values += [ O_NONE, ""]
setting_values = new_setting_values
from_matlab = False
variable_revision_number = 9
if (not from_matlab) and variable_revision_number == 6:
# Append default values for store_csvs, db_host, db_user,
# db_passwd, and sqlite_file to update to revision 7
setting_values += [False, 'imgdb01', 'cpuser', '', 'DefaultDB.db']
variable_revision_number = 7
if (not from_matlab) and variable_revision_number == 7:
# Added ability to selectively turn on aggregate measurements
# which were all automatically calculated in version 7
setting_values = setting_values + [True, True, True]
variable_revision_number = 8
if (not from_matlab) and variable_revision_number == 8:
# Made it possible to choose objects to save
#
setting_values += [ O_ALL, ""]
variable_revision_number = 9
if (not from_matlab) and variable_revision_number == 9:
# Added aggregate per well choices
#
setting_values = (setting_values[:-2] +
[False, False, False] +
setting_values[-2:])
variable_revision_number = 10
if (not from_matlab) and variable_revision_number == 10:
#
# Added a directory choice instead of a checkbox
#
if setting_values[5] == cps.NO or setting_values[6] == '.':
directory_choice = DIR_DEFAULT_OUTPUT
elif setting_values[6] == '&':
directory_choice = DIR_DEFAULT_IMAGE
else:
directory_choice = DIR_CUSTOM
setting_values = (setting_values[:5] + [directory_choice] +
setting_values[6:])
variable_revision_number = 11
if (not from_matlab) and variable_revision_number == 11:
#
# Added separate "database type" of CSV files and removed
# "store_csvs" setting
#
db_type = setting_values[0]
store_csvs = (setting_values[8] == cps.YES)
if db_type == DB_MYSQL and store_csvs:
db_type = DB_MYSQL_CSV
setting_values = ([ db_type ] + setting_values[1:8] +
setting_values[9:])
variable_revision_number = 12
if (not from_matlab) and variable_revision_number == 12:
#
# Added maximum column size
#
setting_values = setting_values + ["64"]
variable_revision_number = 13
if (not from_matlab) and variable_revision_number == 13:
#
# Added single/multiple table choice
#
setting_values = setting_values + [OT_COMBINE]
variable_revision_number = 14
if (not from_matlab) and variable_revision_number == 14:
#
# Combined directory_choice and output_folder into directory
#
dir_choice, custom_directory = setting_values[5:7]
if dir_choice in (DIR_CUSTOM, DIR_CUSTOM_WITH_METADATA):
if custom_directory.startswith('.'):
dir_choice = DEFAULT_OUTPUT_SUBFOLDER_NAME
elif custom_directory.startswith('&'):
dir_choice = DEFAULT_INPUT_SUBFOLDER_NAME
custom_directory = '.'+custom_directory[1:]
else:
dir_choice = ABSOLUTE_FOLDER_NAME
directory = cps.DirectoryPath.static_join_string(dir_choice,
custom_directory)
setting_values = (setting_values[:5] + [directory] +
setting_values[7:])
variable_revision_number = 15
setting_values = list(setting_values)
setting_values[OT_IDX] = OT_DICTIONARY.get(setting_values[OT_IDX],
setting_values[OT_IDX])
# Standardize input/output directory name references
SLOT_DIRCHOICE = 5
directory = setting_values[SLOT_DIRCHOICE]
directory = cps.DirectoryPath.upgrade_setting(directory)
setting_values[SLOT_DIRCHOICE] = directory
if (not from_matlab) and variable_revision_number == 15:
#
# Added 3 new args: url_prepend and thumbnail options
#
setting_values = setting_values + ["", cps.NO, ""]
variable_revision_number = 16
if (not from_matlab) and variable_revision_number == 16:
#
# Added binary choice for auto-scaling thumbnail intensities
#
setting_values = setting_values + [cps.NO]
variable_revision_number = 17
if (not from_matlab) and variable_revision_number == 17:
#
# Added choice for plate type in properties file
#
setting_values = setting_values + [NONE_CHOICE]
variable_revision_number = 18
if (not from_matlab) and variable_revision_number == 18:
#
# Added choices for plate and well metadata in properties file
#
setting_values = setting_values + [NONE_CHOICE, NONE_CHOICE]
variable_revision_number = 19
if (not from_matlab) and variable_revision_number == 19:
#
# Added configuration of image information, groups, filters in properties file
#
setting_values = setting_values + [cps.YES, "1", "1", "0"] # Hidden counts
setting_values = setting_values + ["None", cps.YES, "None", "gray"] # Image info
setting_values = setting_values + [cps.NO, "", "ImageNumber, Image_Metadata_Plate, Image_Metadata_Well"] # Group specifications
setting_values = setting_values + [cps.NO, cps.NO] # Filter specifications
variable_revision_number = 20
if (not from_matlab) and variable_revision_number == 20:
#
# Added configuration of workspace file
#
setting_values = setting_values[:SETTING_WORKSPACE_GROUP_COUNT] + \
["1"] + \
setting_values[SETTING_WORKSPACE_GROUP_COUNT:] # workspace_measurement_count
setting_values += [ cps.NO] # create_workspace_file
setting_values += [ W_SCATTERPLOT, # measurement_display
cpmeas.IMAGE, cpmeas.IMAGE, "", C_IMAGE_NUMBER, # x_measurement_type, x_object_name, x_measurement_name, x_index_name
cpmeas.IMAGE, cpmeas.IMAGE, "", C_IMAGE_NUMBER] # y_measurement_type, y_object_name, y_measurement_name, y_index_name
variable_revision_number = 21
if (not from_matlab) and variable_revision_number == 21:
#
# Added experiment name and location object
#
setting_values = (
setting_values[:SETTING_FIXED_SETTING_COUNT_V21] +
[ "MyExpt", cps.NONE ] +
setting_values[SETTING_FIXED_SETTING_COUNT_V21:])
variable_revision_number = 22
if (not from_matlab) and variable_revision_number == 22:
#
# Added class table properties field
#
setting_values = (
setting_values[:SETTING_FIXED_SETTING_COUNT_V22] +
[ "" ] +
setting_values[SETTING_FIXED_SETTING_COUNT_V22:])
variable_revision_number = 23
if (not from_matlab) and variable_revision_number == 23:
#
# Added wants_relationships_table
#
setting_values = (
setting_values[:SETTING_FIXED_SETTING_COUNT_V23] +
[ cps.NO ] +
setting_values[SETTING_FIXED_SETTING_COUNT_V23:])
variable_revision_number = 24
if (not from_matlab) and variable_revision_number == 24:
#
# Added allow_overwrite
#
setting_values = (
setting_values[:SETTING_FIXED_SETTING_COUNT_V24] +
[ OVERWRITE_DATA ] +
setting_values[SETTING_FIXED_SETTING_COUNT_V24:])
variable_revision_number = 25
if (not from_matlab) and variable_revision_number == 25:
#
# added wants_properties_image_url_prepend setting
#
wants_urls = len(setting_values[SETTING_OFFSET_PROPERTIES_IMAGE_URL_PREPEND_V26]) > 0
setting_values =\
setting_values[:SETTING_FIXED_SETTING_COUNT_V25] +\
[cps.YES if wants_urls else cps.NO] + \
setting_values[SETTING_FIXED_SETTING_COUNT_V25:]
variable_revision_number = 26
# Added view creation to object table settings
setting_values[OT_IDX] = OT_DICTIONARY.get(setting_values[OT_IDX],
setting_values[OT_IDX])
return setting_values, variable_revision_number, from_matlab
class ColumnNameMapping:
"""Represents a mapping of feature name to column name"""
def __init__(self,max_len=64):
self.__dictionary = {}
self.__mapped = False
self.__max_len = max_len
def add(self,feature_name):
"""Add a feature name to the collection"""
self.__dictionary[feature_name] = feature_name
self.__mapped = False
def __getitem__(self,feature_name):
"""Return the column name for a feature"""
if not self.__mapped:
self.do_mapping()
return self.__dictionary[feature_name]
def keys(self):
return self.__dictionary.keys()
def values(self):
if not self.__mapped:
self.do_mapping()
return self.__dictionary.values()
def do_mapping(self):
"""Scan the dictionary for feature names > max_len and shorten"""
reverse_dictionary = {}
problem_names = []
seeded_random = False
valid_name_regexp = "^[0-9a-zA-Z_$]+$"
for key in sorted(self.__dictionary.keys()):
value = self.__dictionary[key]
reverse_dictionary[value] = key
if len(value) > self.__max_len:
problem_names.append(value)
elif not re.match(valid_name_regexp, value):
problem_names.append(value)
for name in problem_names:
key = reverse_dictionary[name]
orig_name = name
if not re.match(valid_name_regexp, name):
name = re.sub("[^0-9a-zA-Z_$]","_",name)
if reverse_dictionary.has_key(name):
i = 1
while reverse_dictionary.has_key(name + str(i)):
i += 1
name = name + str(i)
starting_name = name
starting_positions = [x for x in [name.find("_"), 0]
if x != -1]
for pos in starting_positions:
# remove vowels
to_remove = len(name)-self.__max_len
if to_remove > 0:
remove_count = 0
for to_drop in (('a','e','i','o','u'),
('b','c','d','f','g','h','j','k','l','m','n',
'p','q','r','s','t','v','w','x','y','z'),
('A','B','C','D','E','F','G','H','I','J','K',
'L','M','N','O','P','Q','R','S','T','U','V',
'W','X','Y','Z')):
for index in range(len(name)-1,pos-1,-1):
if name[index] in to_drop:
name = name[:index]+name[index+1:]
remove_count += 1
if remove_count == to_remove:
break
if remove_count == to_remove:
break
rng = None
while name in reverse_dictionary.keys():
# if, improbably, removing the vowels hit an existing name
# try deleting "random" characters. This has to be
# done in a very repeatable fashion, so I use a message
# digest to initialize a random # generator and then
# rehash the message digest to get the next
if rng is None:
rng = random_number_generator(starting_name)
name = starting_name
while len(name) > self.__max_len:
index = rng.next() % len(name)
name = name[:index]+name[index+1:]
reverse_dictionary.pop(orig_name)
reverse_dictionary[name] = key
self.__dictionary[key] = name
self.__mapped = True
def random_number_generator(seed):
'''This is a very repeatable pseudorandom number generator
seed - a string to seed the generator
yields integers in the range 0-65535 on iteration
'''
m = hashlib.md5()
m.update(seed)
while True:
digest = m.digest()
m.update(digest)
yield ord(digest[0]) + 256 * ord(digest[1])
class SQLiteCommands(object):
'''This class ducktypes a connection and cursor to aggregate and bulk execute SQL'''
def __init__(self):
self.commands_and_bindings = []
def execute(self, query, bindings = None):
self.commands_and_bindings.append((query, bindings))
def commit(self):
pass
def close(self):
del self.commands_and_bindings
def rollback(self):
self.commands_and_bindings = []
def next(self):
raise NotImplementedError(
"The SQLite interaction handler can only write to the database")
def get_state(self):
return self.commands_and_bindings
def set_state(self, state):
self.commands_and_bindings = state
def execute_all(self, cursor):
for query, binding in self.commands_and_bindings:
execute(cursor, query, binding)
|
LeeKamentsky/CellProfiler
|
cellprofiler/modules/exporttodatabase.py
|
Python
|
gpl-2.0
| 210,109
|
[
"CDK"
] |
02a67c0a2af6eb146f14c492471d03c2eb59b61968bda987813c04798ae5d86c
|
import hashlib
import os
import posixpath
import stat
import re
from fnmatch import filter as fnfilter
from six import string_types
from fabric.state import output, connections, env
from fabric.utils import warn
from fabric.context_managers import settings
# TODO: use self.sftp.listdir_iter on Paramiko 1.15+
def _format_local(local_path, local_is_path):
"""Format a path for log output"""
if local_is_path:
return local_path
else:
# This allows users to set a name attr on their StringIO objects
# just like an open file object would have
return getattr(local_path, 'name', '<file obj>')
class SFTP(object):
"""
SFTP helper class, which is also a facade for ssh.SFTPClient.
"""
def __init__(self, host_string):
self.ftp = connections[host_string].open_sftp()
# Recall that __getattr__ is the "fallback" attribute getter, and is thus
# pretty safe to use for facade-like behavior as we're doing here.
def __getattr__(self, attr):
return getattr(self.ftp, attr)
def isdir(self, path):
try:
return stat.S_ISDIR(self.ftp.stat(path).st_mode)
except IOError:
return False
def islink(self, path):
try:
return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
except IOError:
return False
def exists(self, path):
try:
self.ftp.lstat(path).st_mode
except IOError:
return False
return True
def glob(self, path):
from fabric.state import win32
dirpart, pattern = os.path.split(path)
rlist = self.ftp.listdir(dirpart)
names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
ret = [path]
if len(names):
s = '/'
ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
if not win32:
ret = [posixpath.join(dirpart, name) for name in names]
return ret
def walk(self, top, topdown=True, onerror=None, followlinks=False):
from os.path import join
# We may not have read permission for top, in which case we can't get a
# list of the files the directory contains. os.path.walk always
# suppressed the exception then, rather than blow up for a minor reason
# when (say) a thousand readable directories are still left to visit.
# That logic is copied here.
try:
# Note that listdir and error are globals in this module due to
# earlier import-*.
names = self.ftp.listdir(top)
except Exception as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not self.islink(path):
for x in self.walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mkdir(self, path, use_sudo):
from fabric.api import sudo, hide
if use_sudo:
with hide('everything'):
sudo('mkdir "%s"' % path)
else:
self.ftp.mkdir(path)
def get(self, remote_path, local_path, use_sudo, local_is_path, rremote=None, temp_dir=""):
from fabric.api import sudo, hide
# rremote => relative remote path, so get(/var/log) would result in
# this function being called with
# remote_path=/var/log/apache2/access.log and
# rremote=apache2/access.log
rremote = rremote if rremote is not None else remote_path
# Handle format string interpolation (e.g. %(dirname)s)
path_vars = {
'host': env.host_string.replace(':', '-'),
'basename': os.path.basename(rremote),
'dirname': os.path.dirname(rremote),
'path': rremote
}
if local_is_path:
# Naive fix to issue #711
escaped_path = re.sub(r'(%[^()]*\w)', r'%\1', local_path)
local_path = os.path.abspath(escaped_path % path_vars )
# Ensure we give ssh.SFTPCLient a file by prepending and/or
# creating local directories as appropriate.
dirpath, filepath = os.path.split(local_path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, path_vars['basename'])
if output.running:
print("[%s] download: %s <- %s" % (
env.host_string,
_format_local(local_path, local_is_path),
remote_path
))
# Warn about overwrites, but keep going
if local_is_path and os.path.exists(local_path):
msg = "Local file %s already exists and is being overwritten."
warn(msg % local_path)
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(cp) it.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
target_path = posixpath.join(temp_dir, hasher.hexdigest())
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cwd=""):
sudo('cp -p "%s" "%s"' % (remote_path, target_path))
# The user should always own the copied file.
sudo('chown %s "%s"' % (env.user, target_path))
# Only root and the user has the right to read the file
sudo('chmod %o "%s"' % (0o400, target_path))
remote_path = target_path
try:
# File-like objects: reset to file seek 0 (to ensure full overwrite)
# and then use Paramiko's getfo() directly
getter = self.ftp.get
if not local_is_path:
local_path.seek(0)
getter = self.ftp.getfo
getter(remote_path, local_path)
finally:
# try to remove the temporary file after the download
if use_sudo:
with settings(hide('everything'), cwd=""):
sudo('rm -f "%s"' % remote_path)
# Return local_path object for posterity. (If mutated, caller will want
# to know.)
return local_path
def get_dir(self, remote_path, local_path, use_sudo, temp_dir):
# Decide what needs to be stripped from remote paths so they're all
# relative to the given remote_path
if os.path.basename(remote_path):
strip = os.path.dirname(remote_path)
else:
strip = os.path.dirname(os.path.dirname(remote_path))
# Store all paths gotten so we can return them when done
result = []
# Use our facsimile of os.walk to find all files within remote_path
for context, dirs, files in self.walk(remote_path):
# Normalize current directory to be relative
# E.g. remote_path of /var/log and current dir of /var/log/apache2
# would be turned into just 'apache2'
lcontext = rcontext = context.replace(strip, '', 1).lstrip('/')
# Prepend local path to that to arrive at the local mirrored
# version of this directory. So if local_path was 'mylogs', we'd
# end up with 'mylogs/apache2'
lcontext = os.path.join(local_path, lcontext)
# Download any files in current directory
for f in files:
# Construct full and relative remote paths to this file
rpath = posixpath.join(context, f)
rremote = posixpath.join(rcontext, f)
# If local_path isn't using a format string that expands to
# include its remote path, we need to add it here.
if "%(path)s" not in local_path \
and "%(dirname)s" not in local_path:
lpath = os.path.join(lcontext, f)
# Otherwise, just passthrough local_path to self.get()
else:
lpath = local_path
# Now we can make a call to self.get() with specific file paths
# on both ends.
result.append(self.get(rpath, lpath, use_sudo, True, rremote, temp_dir))
return result
def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
local_is_path, temp_dir):
from fabric.api import sudo, hide
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = posixpath.join(remote_path, basename)
if output.running:
print("[%s] put: %s -> %s" % (
env.host_string,
_format_local(local_path, local_is_path),
posixpath.join(pre, remote_path)
))
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(mv) it later.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
remote_path = posixpath.join(temp_dir, hasher.hexdigest())
# Read, ensuring we handle file-like objects correct re: seek pointer
putter = self.ftp.put
if not local_is_path:
old_pointer = local_path.tell()
local_path.seek(0)
putter = self.ftp.putfo
rattrs = putter(local_path, remote_path)
if not local_is_path:
local_path.seek(old_pointer)
# Handle modes if necessary
if (local_is_path and mirror_local_mode) or (mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
# Cast to octal integer in case of string
if isinstance(lmode, string_types):
lmode = int(lmode, 8)
lmode = lmode & 0o7777
rmode = rattrs.st_mode
# Only bitshift if we actually got an rmode
if rmode is not None:
rmode = (rmode & 0o7777)
if lmode != rmode:
if use_sudo:
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv
# command. (The target path has already been cwd-ified
# elsewhere.)
with settings(hide('everything'), cwd=""):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
else:
self.ftp.chmod(remote_path, lmode)
if use_sudo:
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cwd=""):
sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
# Revert to original remote_path for return value's sake
remote_path = target_path
return remote_path
def put_dir(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode, temp_dir):
if os.path.basename(local_path):
strip = os.path.dirname(local_path)
else:
strip = os.path.dirname(os.path.dirname(local_path))
remote_paths = []
for context, dirs, files in os.walk(local_path):
rcontext = context.replace(strip, '', 1)
# normalize pathname separators with POSIX separator
rcontext = rcontext.replace(os.sep, '/')
rcontext = rcontext.lstrip('/')
rcontext = posixpath.join(remote_path, rcontext)
if not self.exists(rcontext):
self.mkdir(rcontext, use_sudo)
for d in dirs:
n = posixpath.join(rcontext, d)
if not self.exists(n):
self.mkdir(n, use_sudo)
for f in files:
local_path = os.path.join(context, f)
n = posixpath.join(rcontext, f)
p = self.put(local_path, n, use_sudo, mirror_local_mode, mode,
True, temp_dir)
remote_paths.append(p)
return remote_paths
|
xLegoz/fabric
|
fabric/sftp.py
|
Python
|
bsd-2-clause
| 13,012
|
[
"VisIt"
] |
56c54554ea5befed29e670e7175a54e429841a40da8fef878e8219d13b29d1ac
|
#!/usr/bin/env python3
### VERY MUCH PYTHON 3 !!!
"""
Example for aiohttp.web basic server
Uses a background timer to read from a file to update a shared datastructure
Because it's going to be used as a simulator
Made available under the MIT license as follows:
Copyright 2017 Brian Bulkowski brian@bulkowski.org
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWportARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
py_ver = sys.version_info[0] + ( sys.version_info[1] / 10.0 )
if py_ver < 3.5:
raise "Must be using Python 3.5 or better"
import threading
import time
import datetime
import os
import traceback
import logging
import json
import argparse
import textwrap
from aiohttp import web
import aiohttp
import asyncio
import async_timeout
# import local shared code
from portal import Resonator, Portal
class Notification:
# app is my app, that has some interesting parameters
# payload is a string to be delivered JSON style
# URL is the remote endpoint to notify
def __init__(self, app, url, payload):
self.app = app
self.payload = payload
self.url = url
# queueing: if it's full just don't notify
def enqueue(self):
q = self.app['queue']
try:
q.put_nowait(self)
except QueueFull:
logger.warning(" can't queue for notification ")
pass
async def notify(self):
session = self.app['session']
logger.debug(" Notification: notify posting JSON to endpoint %s",self.url)
with async_timeout.timeout(1.0):
headers = {'content-type': 'application/json; charset=utf-8' }
async with session.post(self.url, data=self.payload) as resp:
# with session.post(self.url) as resp:
logger.debug("post notification: response code: %d",resp.status)
if resp.status != 200:
logger.warning(" post: response is not 200, is %d, ignoring",resp.status)
return
return
#
# Task that asynchronously polls the Techthulu module
# and looks for changes.
# based on those changes it will notify the different worker units,
# and update the stored file
#
# write the portal to disk - since it's local, do it synchronously
def write_file( filename, portal_str, mode="w" ):
logger.debug(" writing file %s string %s",filename,portal_str)
# open the file, write it
with open(filename, mode) as f:
f = open(filename, mode)
f.write(portal_str)
f.close()
logger.debug(" wrote to file ")
#
# Post to an endpoint
#
async def post_json(session, url, json_str):
logger.debug(" posting JSON to endpoint %s",url)
with async_timeout.timeout(2):
headers = {'content-type': 'application/json; charset=utf-8' }
async with session.post(url, data=json_str, headers=headers) as resp:
#logger.debug("fetch: response code: %d",resp.status)
if resp.status != 200:
logger.warning(" fetch: response is not 200, is %d, ignoring",resp.status)
return None
return await resp.text()
# a template from the docs
# just usef for basic health check ATM
async def fetch(session, url):
with async_timeout.timeout(2):
async with session.get(url) as resp:
#logger.debug("fetch: response code: %d",resp.status)
if resp.status != 200:
logger.warning(" fetch: response is not 200, is %d, ignoring",resp.status)
return None
return await resp.text()
# Actually make the JSON request to the simulator
# Since this is a co-routine, it can be called blocking or non-blocking
# We will do all the work here of updating the portal and calling the necessary handlers
async def portal_status(session, url, app):
# get urls to post responses to
g_config = app["config"]
driver_urls = []
for d in g_config["drivers"]:
driver_urls.append( g_config[d] )
with async_timeout.timeout(2):
async with session.get(url) as resp:
logger.debug(" response code: %s content-type %s",resp.status, resp.headers['Content-Type'])
if resp.status != 200:
return None
resp_type = resp.headers['Content-Type']
if resp_type.find("text/plain"):
resp_text = await resp.text()
elif resp_type.find("application/json"):
resp_text = await resp.text()
else:
logger.warning(" unknown content type %s , will treat as text",resp_type)
resp_text = await resp.text()
# important log! What did TecThulu say???
logger.debug(" status: response text %s",resp_text)
# parse the json object
try:
status_obj = json.loads(resp_text)
except ValueError:
logger.warning("portal_status: could not decode JSON response string from thulu %s",resp_text)
return None
except Exception as ex:
logger.warning(" could not decode JSON string, exception %s ",str(ex))
return None
# logger.debug(" json load success ")
# Determine if there are differences between the old and new object
# what_changed is a dict with things that changed
portal = app['portal']
# this sets the status and returns what changed
actions, what_changed = portal.setStatusJson(status_obj, logger)
# If the object has changed and actions are known,
if actions and what_changed:
logger.info(" something changed! %s ", what_changed)
# get a timestamp
timestamp = str(datetime.datetime.now())
# get the encoded string only once
with portal.lock:
portal_str = str(portal)
logger.debug(" changed: got string %s ",portal_str)
# write to the file
write_file(g_config["portalfile"], portal_str)
logger.debug(" changed: updated file")
# Send a JSON request to the drivers
logger.debug(" Notifying following clients: drivers %s ",driver_urls)
for a, b in actions:
logger.info(" action: %s : %s ", a, b)
notify_msg = json.dumps({'status': portal_str,'action': [ a, b ] ,'what_changed': what_changed,'time': timestamp})
# write notifications to a file
if app['debug'] == "DEBUG":
write_file(g_config["tracefile"], notify_msg + "\n", "a")
for u in driver_urls:
n = Notification(app, u, notify_msg)
n.enqueue()
else:
logger.debug(" nothing changed ")
return status_obj
# work routine that actually polls in a loop
async def thulu_poller(app):
g_config = app['config']
period = g_config["tecthulu_poll"]
logger = app['log']
logger.info(" started poller routine, running every %f seconds",period)
# reuse this object, no reason to create lots of them
async with aiohttp.ClientSession(loop=loop) as session:
app['session'] = session
while True:
logger.debug(" hello says the poller! ")
# todo: don't bother with user agent, other headers? ( skip_auto_headers )
try:
# async with aiohttp.ClientSession(loop=loop) as session:
html = await fetch(session, g_config["tecthulu_url"])
logger.debug( "Poller: thulu is up, root received valid response")
# gets the status and decodes the json into an object
res = await portal_status(session, g_config['tecthulu_url'], app)
except aiohttp.ClientConnectionError as ex:
logger.error( "Poller: could not connect to server, reason %s ",ex)
except asyncio.TimeoutError as ex:
logger.error( "Poller: timed out fetching from server, trying again ")
except Exception as ex:
traceback.print_exc()
logger.error( "Poller: unknown exception type %s",type(ex).__name__)
await asyncio.sleep(period)
async def timer(app):
period = 1.0
logger = app['log']
# logger.info(" started timer routine, running every %f seconds",period)
while True:
# logger.info(" hello says the timer! ")
await asyncio.sleep(period)
async def notifier_task(app):
logger.info(" notifier task started")
queue = app['queue']
while True:
work = await queue.get()
try:
await work.notify()
except Exception as e:
#logger.error(" notification failed, %s",str(e))
pass
#
# A number of debug / demo endpoints
# Note to self: you create a "Response" object, thn
# you manipulate it.
#
# this needs UTF8 because names might have utf8
async def statusJson(request):
portal = request.app['portal']
portal_str = ""
with portal.lock:
portal_str = str(portal)
return web.Response(text=portal_str , charset='utf-8', headers={'Access-Control-Allow-Origin':'*'})
async def hello(request):
return web.Response(text="Welcome to Magnus Flora Jarvis Server! Please replace me.")
async def health(request):
return web.Response(text="OK")
# background tasks are covered near the bottom of this:
# http://aiohttp.readthedocs.io/en/stable/web.html
# Whatever tasks you create here will be executed and cancelled properly
async def start_background_tasks(app):
app['timer_task'] = app.loop.create_task( timer(app))
app['poller_task'] = app.loop.create_task( thulu_poller(app))
app['notifier_task'] = app.loop.create_task( notifier_task(app))
async def cleanup_background_tasks(app):
app['log'].info(" cleaning up background tasks ")
app['timer_task'].cancel()
await app['timer_task']
app['poller_task'].cancel()
await app['poller_task']
app['notifier_task'].cancel()
await app['notifier_task']
def create_logger(args):
# create a logging object and add it to the app object
logger = logging.getLogger('MF_Jarvis')
logger.setLevel(args.debug)
# create a file output
fh = logging.FileHandler(args.log)
fh.setLevel(args.debug)
# create a console handler
ch = logging.StreamHandler()
ch.setLevel(args.debug)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
async def init(app, args, loop):
app['debug'] = args.debug
app.router.add_get('/', hello)
app.router.add_get('/health', health)
app.router.add_get('/status/json', statusJson)
# create a portal object and stash it, many will need it
app['portal'] = Portal(1, app['log'])
# worker queue for notifier tasks
app['queue'] = asyncio.Queue(loop = loop)
# background tasks are covered near the bottom of this:
# http://aiohttp.readthedocs.io/en/stable/web.html
app.on_startup.append(start_background_tasks)
app.on_cleanup.append(cleanup_background_tasks)
return
# Parse the command line options
parser = argparse.ArgumentParser(description="MagnusFlora Jarvis")
parser.add_argument('--config', '-c', help="JSON file with configuration", default="config.json", type=str)
parser.add_argument('--log', help="location of the log file", default="jarvis.log", type=str)
parser.add_argument('--debug', '-d', help=" debug level: CRITICAL ERROR WARNING INFO DEBUG", default="INFO", type=str)
args = parser.parse_args()
# Load config.json
try:
with open(args.config) as config_file:
g_config = json.load(config_file)
print(" g_config is: ",g_config)
except Exception as e:
print(" UNABLE TO OPEN CONFIGURATION FILE ",args.config)
print(e)
sys.exit(0)
logger = create_logger(args)
logger.info('starting MagnusFlora Jarvis: there will be %d cakes', 3 )
print("starting MagnusFlora Jarvis on port ",g_config["jarvis_port"])
# register all the async stuff
loop = asyncio.get_event_loop()
app = web.Application()
app['config'] = g_config
app['log'] = logger
loop.run_until_complete(init(app, args, loop))
# run the web server
web.run_app(app, port=g_config["jarvis_port"])
|
bbulkow/MagnusFlora
|
rest/jarvis.py
|
Python
|
mit
| 13,536
|
[
"Brian"
] |
5a21a78f0dc8ac0c24947efb9b3cc1dc24f724190843af9bb04b13952b956c3e
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured # noqa
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import SimpleLazyObject # noqa
from django.utils.importlib import import_module # noqa
from django.utils.module_loading import module_has_submodule # noqa
from django.utils.translation import ugettext_lazy as _
import six
from horizon import conf
from horizon.decorators import _current_component # noqa
from horizon.decorators import require_auth # noqa
from horizon.decorators import require_perms # noqa
from horizon import loaders
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except KeyError:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
class NotRegistered(Exception):
pass
@python_2_unicode_compatible
class HorizonComponent(object):
policy_rules = None
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __str__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return name
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
@access_cached
def can_access(self, context):
"""Return whether the user has role based access to this component.
This method is not intended to be overridden.
The result of the method is stored in per-session cache.
"""
return self.allowed(context)
def allowed(self, context):
"""Checks if the user is allowed to access this component.
This method should be overridden to return the result of
any policy checks required for the user to access this component
when more complex checks are required.
"""
return self._can_access(context['request'])
def _can_access(self, request):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
# this check is an OR check rather than an AND check that is the
# default in the policy engine, so calling each rule individually
if policy_check and self.policy_rules:
for rule in self.policy_rules:
if policy_check((rule,), request):
return True
return False
# default to allowed
return True
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
"""A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
.. staticmethod:: can_register
This optional static method can be used to specify conditions that
need to be satisfied to load this panel. Unlike ``permissions`` and
``allowed`` this method is intended to handle settings based
conditions rather than user based permission and policy checks.
The return value is boolean. If the method returns ``True``, then the
panel will be registered and available to user (if ``permissions`` and
``allowed`` runtime checks are also satisfied). If the method returns
``False``, then the panel will not be registered and will not be
available via normal navigation or direct URL access.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
"""Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %s: %s" % (self, exc))
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
@six.python_2_unicode_compatible
class PanelGroup(object):
"""A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", "default")
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
"""A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this dashboard should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
"""Returns the specified :class:~horizon.PanelGroup
or None if not registered
"""
return self._panel_groups.get(slug)
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return SortedDict(panel_groups)
def get_absolute_url(self):
"""Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
url_slug = panel.slug.replace('.', '/')
urlpatterns += patterns('',
url(r'^%s/' % url_slug,
include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'',
include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
"""Discovers panels to register from the current dashboard module."""
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, six.string_types) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
self._panel_groups = SortedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
"""Registers a :class:`~horizon.Panel` with this dashboard."""
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
"""Unregisters a :class:`~horizon.Panel` from this dashboard."""
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
def allowed(self, context):
"""Checks for role based access for this dashboard.
Checks for access to any panels in the dashboard and of the the
dashboard itself.
This method should be overridden to return the result of
any policy checks required for the user to access this dashboard
when more complex checks are required.
"""
# if the dashboard has policy rules, honor those above individual
# panels
if not self._can_access(context['request']):
return False
# check if access is allowed to a single panel,
# the default for each panel is True
for panel in self.get_panels():
if panel.can_access(context):
return True
return False
class Workflow(object):
pass
try:
from django.utils.functional import empty # noqa
except ImportError:
# Django 1.3 fallback
empty = None
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
"""The overarching class which encompasses all dashboards and panels."""
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
"""Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
"""Unregisters a :class:`~horizon.Dashboard` from Horizon."""
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
"""Returns the specified :class:`~horizon.Dashboard` instance."""
return self._registered(dashboard)
def get_dashboards(self):
"""Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = sorted(registered.values())
dashboards.extend(extra)
return dashboards
else:
return sorted(self._registry.values())
def get_default_dashboard(self):
"""Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
"""Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, six.string_types):
# Assume we've got a URL if there's a slash in it
if '/' in user_home:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
"""Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug,
include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
"""Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
def _load_panel_customization(self):
"""Applies the plugin-based panel configurations.
This method parses the panel customization from the ``HORIZON_CONFIG``
and make changes to the dashboard accordingly.
It supports adding, removing and setting default panels on the
dashboard. It also support registering a panel group.
"""
panel_customization = self._conf.get("panel_customization", [])
# Process all the panel groups first so that they exist before panels
# are added to them and Dashboard._autodiscover() doesn't wipe out any
# panels previously added when its panel groups are instantiated.
panel_configs = []
for config in panel_customization:
if config.get('PANEL'):
panel_configs.append(config)
elif config.get('PANEL_GROUP'):
self._process_panel_group_configuration(config)
else:
LOG.warning("Skipping %s because it doesn't have PANEL or "
"PANEL_GROUP defined.", config.__name__)
# Now process the panels.
for config in panel_configs:
self._process_panel_configuration(config)
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError:
LOG.warning("Could not load panel: %s", mod_path)
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
def _process_panel_group_configuration(self, config):
"""Adds a panel group to the dashboard."""
panel_group_slug = config.get('PANEL_GROUP')
try:
dashboard = config.get('PANEL_GROUP_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_DASHBOARD defined.", config.__name__)
return
dashboard_cls = self.get_dashboard(dashboard)
panel_group_name = config.get('PANEL_GROUP_NAME')
if not panel_group_name:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_NAME defined.", config.__name__)
return
# Create the panel group class
panel_group = type(panel_group_slug,
(PanelGroup, ),
{'slug': panel_group_slug,
'name': panel_group_name,
'panels': []},)
# Add the panel group to dashboard
panels = list(dashboard_cls.panels)
panels.append(panel_group)
dashboard_cls.panels = tuple(panels)
# Trigger the autodiscovery to completely load the new panel group
dashboard_cls._autodiscover_complete = False
dashboard_cls._autodiscover()
except Exception as e:
LOG.warning('Could not process panel group %(panel_group)s: '
'%(exc)s',
{'panel_group': panel_group_slug, 'exc': e})
class HorizonSite(Site):
"""A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
tqtran7/horizon
|
horizon/base.py
|
Python
|
apache-2.0
| 38,181
|
[
"VisIt"
] |
89d1bc02b32b8cf458ae62e02f80263d534c2f3196e396cd650fbb09b42f9a6a
|
#!/usr/bin/env python
import os
from StringIO import StringIO
import sys
import unittest
import pyrapi
import tseal.test_utils as test_utils
from seal.lib.aligner.hirapi import HiRapiAligner, HiRapiOpts
class TestHiRapiProperties(unittest.TestCase):
def setUp(self):
self.hi = HiRapiAligner('rapi_bwa')
def tearDown(self):
self.hi.release_resources()
def test_defaults(self):
self.assertTrue(self.hi.paired)
self.assertEqual(pyrapi.rapi.QENC_SANGER, self.hi.q_offset)
def test_get_plugin_info(self):
self.assertEquals('bwa-mem', self.hi.aligner_name)
self.assertTrue(self.hi.aligner_version)
self.assertTrue(self.hi.plugin_version)
def test_set_some_options(self):
self.hi.opts.n_threads = 11
self.assertEquals(11, self.hi.opts.n_threads)
self.hi.opts.mapq_min = 5
self.assertEquals(5, self.hi.opts.mapq_min)
self.hi.opts.isize_min = 250
self.assertEquals(250, self.hi.opts.isize_min)
self.hi.opts.isize_max = 500
self.assertEquals(500, self.hi.opts.isize_max)
class TestHiRapiAlignments(unittest.TestCase):
def setUp(self):
self.hi = HiRapiAligner('rapi_bwa')
self._align_mini_ref_seqs()
def tearDown(self):
self.hi.release_resources()
def test_load_reference_again(self):
# should "just work"
self.hi.load_ref(test_utils.MiniRefMemPath)
def test_sam(self):
io = StringIO()
self.hi.write_sam(io, include_header=False)
sam = io.getvalue()
expected_sam = test_utils.rapi_mini_ref_seqs_sam_no_header()
self.assertEquals(expected_sam, sam)
def _align_mini_ref_seqs(self):
self.hi.load_ref(test_utils.MiniRefMemPath)
reads = test_utils.get_mini_ref_seqs()
for row in reads:
if len(row) != 5:
raise RuntimeError("Unexpected number of fields in mini_ref read record")
self.hi.load_pair(*row)
self.hi.align_batch()
def test_multiple_batches(self):
io = StringIO()
# we clear the batch created by setUp and align more reads using the same instance.
# For each pair we clear the batch, load it, align it and generate sam.
reads = test_utils.get_mini_ref_seqs()
for row in reads:
if len(row) != 5:
raise RuntimeError("Unexpected number of fields in mini_ref read record")
self.hi.clear_batch()
self.hi.load_pair(*row)
self.hi.align_batch()
self.hi.write_sam(io, include_header=False)
io.write('\n')
sam = io.getvalue().rstrip('\n')
expected_sam = test_utils.rapi_mini_ref_seqs_sam_no_header()
self.assertEquals(expected_sam, sam)
class TestHiRapiBatch(unittest.TestCase):
def setUp(self):
self.hi = HiRapiAligner('rapi_bwa')
self.reads = test_utils.get_mini_ref_seqs()
for row in self.reads:
if len(row) != 5:
raise RuntimeError("Unexpected number of fields in mini_ref read record")
self.hi.load_pair(*row)
def tearDown(self):
self.hi.release_resources()
@unittest.skip("haven't decided whether we should support unicode input")
def test_unicode_strings(self):
self.hi.clear_batch()
self.hi.load_pair(
u'my_read_id',
u'AAAACTGACCCACACAGAAAAACTAATTGTGAGAACCAATATTATACTAAATTCATTTGA',
u'EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE',
u'CAAAAGTTAACCCATATGGAATGCAATGGAGGAAATCAATGACATATCAGATCTAGAAAC',
u'EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE')
frag = next(f for f in self.hi.ifragments())
reads = [ r for r in frag ]
self.assertEquals('my_read_id', reads[0].id)
self.assertEquals('my_read_id', reads[1].id)
self.assertEquals('AAAACTGACCCACACAGAAAAACTAATTGTGAGAACCAATATTATACTAAATTCATTTGA', reads[0].seq)
self.assertEquals('EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE', reads[0].qual)
def test_fragment_iteration(self):
read_id_counts = dict()
for frag in self.hi.ifragments():
for read in frag:
read_id = read.id
read_id_counts[read_id] = 1 + read_id_counts.get(read_id, 0)
# 5 pairs
self.assertEquals(5, len(read_id_counts))
unique_counts = set(read_id_counts.values())
# all ids appearing twice
self.assertEquals(1, len(unique_counts))
self.assertEquals(2, unique_counts.pop())
def test_batch_management(self):
self.assertEquals(10, self.hi.batch_size)
self.hi.clear_batch()
self.assertEquals(0, self.hi.batch_size)
self.hi.load_ref(test_utils.MiniRefMemPath)
self.hi.align_batch() # should not raise just because it's empty
for _ in self.hi.ifragments():
self.fail("iterating over an empty batch!")
def test_base_quality(self):
hi = HiRapiAligner('rapi_bwa', paired=False)
one_read = self.reads[0][0:3]
hi.q_offset = self.hi.Qenc_Sanger
hi.load_read('sanger_read', one_read[1], one_read[2])
# 64: Illumina base quality offset
# 33: Sanger base quality offset
ill_quality = ''.join( chr(ord(c) + (64-33)) for c in one_read[2] )
hi.q_offset = self.hi.Qenc_Illumina
hi.load_read('illumina_read', one_read[1], ill_quality)
loaded_qualities = [ frag[0].qual for frag in hi.ifragments() ]
self.assertEquals(2, len(loaded_qualities))
self.assertEquals(loaded_qualities[0], loaded_qualities[1])
def suite():
"""Get a suite with all the tests from this module"""
s = unittest.TestLoader().loadTestsFromTestCase(TestHiRapiProperties)
s.addTests(unittest.TestLoader().loadTestsFromTestCase(TestHiRapiAlignments))
s.addTests(unittest.TestLoader().loadTestsFromTestCase(TestHiRapiBatch))
return s
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
crs4/seal
|
tests/tseal/lib/aligner/test_hirapi.py
|
Python
|
gpl-3.0
| 6,120
|
[
"BWA"
] |
d6903bf40e776ea82c53a5e2b0ba3e40e046cbaeb7b861176f4da9ffc4d78c2a
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.privatecatalog_v1beta1.services.private_catalog import (
PrivateCatalogAsyncClient,
)
from google.cloud.privatecatalog_v1beta1.services.private_catalog import (
PrivateCatalogClient,
)
from google.cloud.privatecatalog_v1beta1.services.private_catalog import pagers
from google.cloud.privatecatalog_v1beta1.services.private_catalog import transports
from google.cloud.privatecatalog_v1beta1.types import private_catalog
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert PrivateCatalogClient._get_default_mtls_endpoint(None) is None
assert (
PrivateCatalogClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
PrivateCatalogClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
PrivateCatalogClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
PrivateCatalogClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
PrivateCatalogClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [PrivateCatalogClient, PrivateCatalogAsyncClient,]
)
def test_private_catalog_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudprivatecatalog.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.PrivateCatalogGrpcTransport, "grpc"),
(transports.PrivateCatalogGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_private_catalog_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [PrivateCatalogClient, PrivateCatalogAsyncClient,]
)
def test_private_catalog_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudprivatecatalog.googleapis.com:443"
def test_private_catalog_client_get_transport_class():
transport = PrivateCatalogClient.get_transport_class()
available_transports = [
transports.PrivateCatalogGrpcTransport,
]
assert transport in available_transports
transport = PrivateCatalogClient.get_transport_class("grpc")
assert transport == transports.PrivateCatalogGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PrivateCatalogClient, transports.PrivateCatalogGrpcTransport, "grpc"),
(
PrivateCatalogAsyncClient,
transports.PrivateCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
PrivateCatalogClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PrivateCatalogClient),
)
@mock.patch.object(
PrivateCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PrivateCatalogAsyncClient),
)
def test_private_catalog_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(PrivateCatalogClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(PrivateCatalogClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(PrivateCatalogClient, transports.PrivateCatalogGrpcTransport, "grpc", "true"),
(
PrivateCatalogAsyncClient,
transports.PrivateCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(PrivateCatalogClient, transports.PrivateCatalogGrpcTransport, "grpc", "false"),
(
PrivateCatalogAsyncClient,
transports.PrivateCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
PrivateCatalogClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PrivateCatalogClient),
)
@mock.patch.object(
PrivateCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PrivateCatalogAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_private_catalog_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [PrivateCatalogClient, PrivateCatalogAsyncClient]
)
@mock.patch.object(
PrivateCatalogClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PrivateCatalogClient),
)
@mock.patch.object(
PrivateCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PrivateCatalogAsyncClient),
)
def test_private_catalog_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PrivateCatalogClient, transports.PrivateCatalogGrpcTransport, "grpc"),
(
PrivateCatalogAsyncClient,
transports.PrivateCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_private_catalog_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
PrivateCatalogClient,
transports.PrivateCatalogGrpcTransport,
"grpc",
grpc_helpers,
),
(
PrivateCatalogAsyncClient,
transports.PrivateCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_private_catalog_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_private_catalog_client_client_options_from_dict():
with mock.patch(
"google.cloud.privatecatalog_v1beta1.services.private_catalog.transports.PrivateCatalogGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = PrivateCatalogClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
PrivateCatalogClient,
transports.PrivateCatalogGrpcTransport,
"grpc",
grpc_helpers,
),
(
PrivateCatalogAsyncClient,
transports.PrivateCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_private_catalog_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"cloudprivatecatalog.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="cloudprivatecatalog.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [private_catalog.SearchCatalogsRequest, dict,])
def test_search_catalogs(request_type, transport: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalogs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = private_catalog.SearchCatalogsResponse(
next_page_token="next_page_token_value",
)
response = client.search_catalogs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchCatalogsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchCatalogsPager)
assert response.next_page_token == "next_page_token_value"
def test_search_catalogs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalogs), "__call__") as call:
client.search_catalogs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchCatalogsRequest()
@pytest.mark.asyncio
async def test_search_catalogs_async(
transport: str = "grpc_asyncio", request_type=private_catalog.SearchCatalogsRequest
):
client = PrivateCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalogs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
private_catalog.SearchCatalogsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_catalogs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchCatalogsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchCatalogsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_catalogs_async_from_dict():
await test_search_catalogs_async(request_type=dict)
def test_search_catalogs_field_headers():
client = PrivateCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = private_catalog.SearchCatalogsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalogs), "__call__") as call:
call.return_value = private_catalog.SearchCatalogsResponse()
client.search_catalogs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_catalogs_field_headers_async():
client = PrivateCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = private_catalog.SearchCatalogsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalogs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
private_catalog.SearchCatalogsResponse()
)
await client.search_catalogs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_search_catalogs_pager(transport_name: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalogs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchCatalogsResponse(
catalogs=[
private_catalog.Catalog(),
private_catalog.Catalog(),
private_catalog.Catalog(),
],
next_page_token="abc",
),
private_catalog.SearchCatalogsResponse(catalogs=[], next_page_token="def",),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(),], next_page_token="ghi",
),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(), private_catalog.Catalog(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", ""),)),
)
pager = client.search_catalogs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, private_catalog.Catalog) for i in results)
def test_search_catalogs_pages(transport_name: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalogs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchCatalogsResponse(
catalogs=[
private_catalog.Catalog(),
private_catalog.Catalog(),
private_catalog.Catalog(),
],
next_page_token="abc",
),
private_catalog.SearchCatalogsResponse(catalogs=[], next_page_token="def",),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(),], next_page_token="ghi",
),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(), private_catalog.Catalog(),],
),
RuntimeError,
)
pages = list(client.search_catalogs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_catalogs_async_pager():
client = PrivateCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_catalogs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchCatalogsResponse(
catalogs=[
private_catalog.Catalog(),
private_catalog.Catalog(),
private_catalog.Catalog(),
],
next_page_token="abc",
),
private_catalog.SearchCatalogsResponse(catalogs=[], next_page_token="def",),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(),], next_page_token="ghi",
),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(), private_catalog.Catalog(),],
),
RuntimeError,
)
async_pager = await client.search_catalogs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, private_catalog.Catalog) for i in responses)
@pytest.mark.asyncio
async def test_search_catalogs_async_pages():
client = PrivateCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_catalogs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchCatalogsResponse(
catalogs=[
private_catalog.Catalog(),
private_catalog.Catalog(),
private_catalog.Catalog(),
],
next_page_token="abc",
),
private_catalog.SearchCatalogsResponse(catalogs=[], next_page_token="def",),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(),], next_page_token="ghi",
),
private_catalog.SearchCatalogsResponse(
catalogs=[private_catalog.Catalog(), private_catalog.Catalog(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_catalogs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [private_catalog.SearchProductsRequest, dict,])
def test_search_products(request_type, transport: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_products), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = private_catalog.SearchProductsResponse(
next_page_token="next_page_token_value",
)
response = client.search_products(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchProductsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchProductsPager)
assert response.next_page_token == "next_page_token_value"
def test_search_products_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_products), "__call__") as call:
client.search_products()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchProductsRequest()
@pytest.mark.asyncio
async def test_search_products_async(
transport: str = "grpc_asyncio", request_type=private_catalog.SearchProductsRequest
):
client = PrivateCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_products), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
private_catalog.SearchProductsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_products(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchProductsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchProductsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_products_async_from_dict():
await test_search_products_async(request_type=dict)
def test_search_products_field_headers():
client = PrivateCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = private_catalog.SearchProductsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_products), "__call__") as call:
call.return_value = private_catalog.SearchProductsResponse()
client.search_products(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_products_field_headers_async():
client = PrivateCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = private_catalog.SearchProductsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_products), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
private_catalog.SearchProductsResponse()
)
await client.search_products(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_search_products_pager(transport_name: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_products), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchProductsResponse(
products=[
private_catalog.Product(),
private_catalog.Product(),
private_catalog.Product(),
],
next_page_token="abc",
),
private_catalog.SearchProductsResponse(products=[], next_page_token="def",),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(),], next_page_token="ghi",
),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(), private_catalog.Product(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", ""),)),
)
pager = client.search_products(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, private_catalog.Product) for i in results)
def test_search_products_pages(transport_name: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_products), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchProductsResponse(
products=[
private_catalog.Product(),
private_catalog.Product(),
private_catalog.Product(),
],
next_page_token="abc",
),
private_catalog.SearchProductsResponse(products=[], next_page_token="def",),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(),], next_page_token="ghi",
),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(), private_catalog.Product(),],
),
RuntimeError,
)
pages = list(client.search_products(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_products_async_pager():
client = PrivateCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_products), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchProductsResponse(
products=[
private_catalog.Product(),
private_catalog.Product(),
private_catalog.Product(),
],
next_page_token="abc",
),
private_catalog.SearchProductsResponse(products=[], next_page_token="def",),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(),], next_page_token="ghi",
),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(), private_catalog.Product(),],
),
RuntimeError,
)
async_pager = await client.search_products(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, private_catalog.Product) for i in responses)
@pytest.mark.asyncio
async def test_search_products_async_pages():
client = PrivateCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_products), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchProductsResponse(
products=[
private_catalog.Product(),
private_catalog.Product(),
private_catalog.Product(),
],
next_page_token="abc",
),
private_catalog.SearchProductsResponse(products=[], next_page_token="def",),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(),], next_page_token="ghi",
),
private_catalog.SearchProductsResponse(
products=[private_catalog.Product(), private_catalog.Product(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_products(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [private_catalog.SearchVersionsRequest, dict,])
def test_search_versions(request_type, transport: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = private_catalog.SearchVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.search_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_search_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_versions), "__call__") as call:
client.search_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchVersionsRequest()
@pytest.mark.asyncio
async def test_search_versions_async(
transport: str = "grpc_asyncio", request_type=private_catalog.SearchVersionsRequest
):
client = PrivateCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
private_catalog.SearchVersionsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == private_catalog.SearchVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_versions_async_from_dict():
await test_search_versions_async(request_type=dict)
def test_search_versions_field_headers():
client = PrivateCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = private_catalog.SearchVersionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_versions), "__call__") as call:
call.return_value = private_catalog.SearchVersionsResponse()
client.search_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_versions_field_headers_async():
client = PrivateCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = private_catalog.SearchVersionsRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
private_catalog.SearchVersionsResponse()
)
await client.search_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_search_versions_pager(transport_name: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchVersionsResponse(
versions=[
private_catalog.Version(),
private_catalog.Version(),
private_catalog.Version(),
],
next_page_token="abc",
),
private_catalog.SearchVersionsResponse(versions=[], next_page_token="def",),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(),], next_page_token="ghi",
),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(), private_catalog.Version(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", ""),)),
)
pager = client.search_versions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, private_catalog.Version) for i in results)
def test_search_versions_pages(transport_name: str = "grpc"):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchVersionsResponse(
versions=[
private_catalog.Version(),
private_catalog.Version(),
private_catalog.Version(),
],
next_page_token="abc",
),
private_catalog.SearchVersionsResponse(versions=[], next_page_token="def",),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(),], next_page_token="ghi",
),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(), private_catalog.Version(),],
),
RuntimeError,
)
pages = list(client.search_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_versions_async_pager():
client = PrivateCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchVersionsResponse(
versions=[
private_catalog.Version(),
private_catalog.Version(),
private_catalog.Version(),
],
next_page_token="abc",
),
private_catalog.SearchVersionsResponse(versions=[], next_page_token="def",),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(),], next_page_token="ghi",
),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(), private_catalog.Version(),],
),
RuntimeError,
)
async_pager = await client.search_versions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, private_catalog.Version) for i in responses)
@pytest.mark.asyncio
async def test_search_versions_async_pages():
client = PrivateCatalogAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
private_catalog.SearchVersionsResponse(
versions=[
private_catalog.Version(),
private_catalog.Version(),
private_catalog.Version(),
],
next_page_token="abc",
),
private_catalog.SearchVersionsResponse(versions=[], next_page_token="def",),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(),], next_page_token="ghi",
),
private_catalog.SearchVersionsResponse(
versions=[private_catalog.Version(), private_catalog.Version(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_versions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.PrivateCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.PrivateCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PrivateCatalogClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.PrivateCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = PrivateCatalogClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = PrivateCatalogClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.PrivateCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PrivateCatalogClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.PrivateCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = PrivateCatalogClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.PrivateCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.PrivateCatalogGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.PrivateCatalogGrpcTransport,
transports.PrivateCatalogGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = PrivateCatalogClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.PrivateCatalogGrpcTransport,)
def test_private_catalog_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.PrivateCatalogTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_private_catalog_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.privatecatalog_v1beta1.services.private_catalog.transports.PrivateCatalogTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.PrivateCatalogTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"search_catalogs",
"search_products",
"search_versions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_private_catalog_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.privatecatalog_v1beta1.services.private_catalog.transports.PrivateCatalogTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PrivateCatalogTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_private_catalog_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.privatecatalog_v1beta1.services.private_catalog.transports.PrivateCatalogTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PrivateCatalogTransport()
adc.assert_called_once()
def test_private_catalog_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
PrivateCatalogClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.PrivateCatalogGrpcTransport,
transports.PrivateCatalogGrpcAsyncIOTransport,
],
)
def test_private_catalog_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.PrivateCatalogGrpcTransport, grpc_helpers),
(transports.PrivateCatalogGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_private_catalog_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"cloudprivatecatalog.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="cloudprivatecatalog.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.PrivateCatalogGrpcTransport,
transports.PrivateCatalogGrpcAsyncIOTransport,
],
)
def test_private_catalog_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_private_catalog_host_no_port():
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudprivatecatalog.googleapis.com"
),
)
assert client.transport._host == "cloudprivatecatalog.googleapis.com:443"
def test_private_catalog_host_with_port():
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudprivatecatalog.googleapis.com:8000"
),
)
assert client.transport._host == "cloudprivatecatalog.googleapis.com:8000"
def test_private_catalog_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PrivateCatalogGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_private_catalog_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PrivateCatalogGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.PrivateCatalogGrpcTransport,
transports.PrivateCatalogGrpcAsyncIOTransport,
],
)
def test_private_catalog_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.PrivateCatalogGrpcTransport,
transports.PrivateCatalogGrpcAsyncIOTransport,
],
)
def test_private_catalog_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_catalog_path():
catalog = "squid"
expected = "catalogs/{catalog}".format(catalog=catalog,)
actual = PrivateCatalogClient.catalog_path(catalog)
assert expected == actual
def test_parse_catalog_path():
expected = {
"catalog": "clam",
}
path = PrivateCatalogClient.catalog_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_catalog_path(path)
assert expected == actual
def test_product_path():
product = "whelk"
expected = "products/{product}".format(product=product,)
actual = PrivateCatalogClient.product_path(product)
assert expected == actual
def test_parse_product_path():
expected = {
"product": "octopus",
}
path = PrivateCatalogClient.product_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_product_path(path)
assert expected == actual
def test_version_path():
catalog = "oyster"
product = "nudibranch"
version = "cuttlefish"
expected = "catalogs/{catalog}/products/{product}/versions/{version}".format(
catalog=catalog, product=product, version=version,
)
actual = PrivateCatalogClient.version_path(catalog, product, version)
assert expected == actual
def test_parse_version_path():
expected = {
"catalog": "mussel",
"product": "winkle",
"version": "nautilus",
}
path = PrivateCatalogClient.version_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = PrivateCatalogClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = PrivateCatalogClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = PrivateCatalogClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = PrivateCatalogClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = PrivateCatalogClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = PrivateCatalogClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = PrivateCatalogClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = PrivateCatalogClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = PrivateCatalogClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = PrivateCatalogClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = PrivateCatalogClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.PrivateCatalogTransport, "_prep_wrapped_messages"
) as prep:
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.PrivateCatalogTransport, "_prep_wrapped_messages"
) as prep:
transport_class = PrivateCatalogClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = PrivateCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = PrivateCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(PrivateCatalogClient, transports.PrivateCatalogGrpcTransport),
(PrivateCatalogAsyncClient, transports.PrivateCatalogGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-private-catalog
|
tests/unit/gapic/privatecatalog_v1beta1/test_private_catalog.py
|
Python
|
apache-2.0
| 81,898
|
[
"Octopus"
] |
f629e3ab02ccad9d78af50b5968974166a7aee49d860bbaa696c2c7b5ef06f73
|
import pandas as pd
import mdtraj as md
from ace_x_y_nh2_parameters import *
larger = pd.read_csv("./data/larger_couplings.csv")
smaller = pd.read_csv("./data/smaller_couplings.csv")
reference = []
for aa in amino_acids:
value = smaller.ix["G"][aa]
xyz = ["G%s" % aa, 0, value]
reference.append(xyz)
value = larger.ix["G"][aa]
xyz = ["G%s" % aa, 1, value]
reference.append(xyz)
value = larger.ix[aa]["G"]
xyz = ["%sG" % aa, 0, value]
reference.append(xyz)
value = smaller.ix[aa]["G"]
xyz = ["%sG" % aa, 1, value]
reference.append(xyz)
reference = pd.DataFrame(reference, columns=["seq", "resSeq", "value"])
reference = reference.set_index(["seq", "resSeq"]).value
reference = reference.drop_duplicates()
data = []
for (ff, water, seq) in products:
try:
aa0, aa1 = seq.split("_")[1]
aa_string = "%s%s" % (aa0, aa1)
t = md.load("./dcd/%s_%s_%s.dcd" % (ff, water, seq), top="./pdbs/%s.pdb" % (seq))[1500:]
except:
continue
#phi = md.compute_phi(t)[1] * 180 / np.pi
#J0, J1 = scalar_couplings.J3_HN_HA(phi).mean(0)
J0, J1 = md.compute_J3_HN_HA(t)[1].mean(0)
data.append([ff, water, aa_string, 0, J0])
data.append([ff, water, aa_string, 1, J1])
data = pd.DataFrame(data, columns=["ff", "water", "seq", "resSeq", "value"])
X = data.pivot_table(cols=["seq", "resSeq"], rows=["ff", "water"], values="value")
delta = X - reference
Z = (delta / 0.36)
rms_by_model = (Z ** 2.).mean(1) ** 0.5
rms_by_model
|
choderalab/open-forcefield-group
|
nmr/ace_x_y_nh2/code/analyze_scalar_couplings.py
|
Python
|
gpl-2.0
| 1,531
|
[
"MDTraj"
] |
009772de2e27d7afdd97ca976992020201c204fd1aa35015567593e434d068f6
|
""" ResourceStatus
Module that acts as a helper for knowing the status of a resource.
It takes care of switching between the CS and the RSS.
The status is kept in the RSSCache object, which is a small wrapper on top of DictCache
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime, timedelta
import math
from time import sleep
import six
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Utilities.RSSCacheNoThread import RSSCache
from DIRAC.ResourceStatusSystem.Utilities.RssConfiguration import RssConfiguration
from DIRAC.ResourceStatusSystem.Utilities.InfoGetter import getPoliciesThatApply
@six.add_metaclass(DIRACSingleton)
class ResourceStatus(object):
"""
ResourceStatus helper that connects to CS if RSS flag is not Active.
It keeps the connection to the db / server as an object member, to avoid creating a new
one massively.
"""
def __init__(self, rssFlag=None):
"""
Constructor, initializes the rssClient.
"""
self.log = gLogger.getSubLogger(self.__class__.__name__)
self.rssConfig = RssConfiguration()
self.__opHelper = Operations()
self.rssClient = ResourceStatusClient()
self.rssFlag = rssFlag
if rssFlag is None:
self.rssFlag = self.__getMode()
cacheLifeTime = int(self.rssConfig.getConfigCache())
# RSSCache only affects the calls directed to RSS, if using the CS it is not used.
self.rssCache = RSSCache(cacheLifeTime, self.__updateRssCache)
def getElementStatus(self, elementName, elementType, statusType=None, default=None, vO=None):
"""
Helper function, tries to get information from the RSS for the given
Element, otherwise, it gets it from the CS.
:param elementName: name of the element or list of element names
:type elementName: str, list
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement)
:type statusType: None, str, list
:param default: defult value (meaningful only when rss is InActive)
:type default: str
:return: S_OK/S_ERROR
:rtype: dict
:Example:
>>> getElementStatus('CE42', 'ComputingElement')
S_OK( { 'CE42': { 'all': 'Active' } } } )
>>> getElementStatus('SE1', 'StorageElement', 'ReadAccess')
S_OK( { 'SE1': { 'ReadAccess': 'Banned' } } } )
>>> getElementStatus('SE1', 'ThisIsAWrongElementType', 'ReadAccess')
S_ERROR( xyz.. )
>>> getElementStatus('ThisIsAWrongName', 'StorageElement', 'WriteAccess')
S_ERROR( xyz.. )
>>> getElementStatus('A_file_catalog', 'FileCatalog')
S_OK( { 'A_file_catalog': { 'all': 'Active' } } } )
>>> getElementStatus('SE1', 'StorageElement', ['ReadAccess', 'WriteAccess'])
S_OK( { 'SE1': { 'ReadAccess': 'Banned' , 'WriteAccess': 'Active'} } } )
>>> getElementStatus('SE1', 'StorageElement')
S_OK( { 'SE1': { 'ReadAccess': 'Probing' ,
'WriteAccess': 'Active',
'CheckAccess': 'Degraded',
'RemoveAccess': 'Banned'} } } )
>>> getElementStatus(['CE1', 'CE2'], 'ComputingElement')
S_OK( {'CE1': {'all': 'Active'},
'CE2': {'all': 'Probing'}}}
"""
allowedParameters = ["StorageElement", "ComputingElement", "FTS", "Catalog"]
if elementType not in allowedParameters:
return S_ERROR("%s in not in the list of the allowed parameters: %s" % (elementType, allowedParameters))
# Apply defaults
if not statusType:
if elementType == "StorageElement":
statusType = ['ReadAccess', 'WriteAccess', 'CheckAccess', 'RemoveAccess']
elif elementType == "ComputingElement":
statusType = ['all']
elif elementType == "FTS":
statusType = ['all']
elif elementType == "Catalog":
statusType = ['all']
if self.rssFlag:
return self.__getRSSElementStatus(elementName, elementType, statusType, vO)
else:
return self.__getCSElementStatus(elementName, elementType, statusType, default)
def setElementStatus(self, elementName, elementType, statusType, status, reason=None, tokenOwner=None):
""" Tries set information in RSS and in CS.
:param elementName: name of the element
:type elementName: str
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement)
:type statusType: str
:param reason: reason for setting the status
:type reason: str
:param tokenOwner: owner of the token (meaningful only when rss is Active)
:type tokenOwner: str
:return: S_OK/S_ERROR
:rtype: dict
:Example:
>>> setElementStatus('CE42', 'ComputingElement', 'all', 'Active')
S_OK( xyz.. )
>>> setElementStatus('SE1', 'StorageElement', 'ReadAccess', 'Banned')
S_OK( xyz.. )
"""
if self.rssFlag:
return self.__setRSSElementStatus(elementName, elementType, statusType, status, reason, tokenOwner)
else:
return self.__setCSElementStatus(elementName, elementType, statusType, status)
################################################################################
def __updateRssCache(self):
""" Method used to update the rssCache.
It will try 5 times to contact the RSS before giving up
"""
meta = {'columns': ['Name', 'ElementType', 'StatusType', 'Status', 'VO']}
for ti in range(5):
rawCache = self.rssClient.selectStatusElement('Resource', 'Status', meta=meta)
if rawCache['OK']:
break
self.log.warn("Can't get resource's status", rawCache['Message'] + "; trial %d" % ti)
sleep(math.pow(ti, 2))
self.rssClient = ResourceStatusClient()
if not rawCache['OK']:
return rawCache
return S_OK(getCacheDictFromRawData(rawCache['Value']))
################################################################################
def __getRSSElementStatus(self, elementName, elementType, statusType, vO):
""" Gets from the cache or the RSS the Elements status. The cache is a
copy of the DB table. If it is not on the cache, most likely is not going
to be on the DB.
There is one exception: item just added to the CS, e.g. new Element.
The period between it is added to the DB and the changes are propagated
to the cache will be inconsistent, but not dangerous. Just wait <cacheLifeTime>
minutes.
:param elementName: name of the element or list of element names
:type elementName: str, list
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement,
otherwise it is 'all' or ['all'])
:type statusType: str, list
"""
cacheMatch = self.rssCache.match(elementName, elementType, statusType, vO)
self.log.debug('__getRSSElementStatus')
self.log.debug(cacheMatch)
return cacheMatch
def __getCSElementStatus(self, elementName, elementType, statusType, default):
""" Gets from the CS the Element status
:param elementName: name of the element
:type elementName: str
:param elementType: type of the element (StorageElement, ComputingElement, FTS, Catalog)
:type elementType: str
:param statusType: type of the status (meaningful only when elementType==StorageElement)
:type statusType: str, list
:param default: defult value
:type default: None, str
"""
# DIRAC doesn't store the status of ComputingElements nor FTS in the CS, so here we can just return 'Active'
if elementType in ('ComputingElement', 'FTS'):
return S_OK({elementName: {'all': 'Active'}})
# If we are here it is because elementType is either 'StorageElement' or 'Catalog'
if elementType == 'StorageElement':
cs_path = "/Resources/StorageElements"
elif elementType == 'Catalog':
cs_path = "/Resources/FileCatalogs"
statusType = ['Status']
if not isinstance(elementName, list):
elementName = [elementName]
if not isinstance(statusType, list):
statusType = [statusType]
result = {}
for element in elementName:
for sType in statusType:
# Look in standard location, 'Active' by default
res = gConfig.getValue("%s/%s/%s" % (cs_path, element, sType), 'Active')
result.setdefault(element, {})[sType] = res
if result:
return S_OK(result)
if default is not None:
defList = [[el, statusType, default] for el in elementName]
return S_OK(getDictFromList(defList))
_msg = "Element '%s', with statusType '%s' is unknown for CS."
return S_ERROR(DErrno.ERESUNK, _msg % (elementName, statusType))
def __setRSSElementStatus(self, elementName, elementType, statusType, status, reason, tokenOwner):
"""
Sets on the RSS the Elements status
"""
expiration = datetime.utcnow() + timedelta(days=1)
self.rssCache.acquireLock()
try:
res = self.rssClient.addOrModifyStatusElement('Resource', 'Status', name=elementName,
elementType=elementType, status=status,
statusType=statusType, reason=reason,
tokenOwner=tokenOwner, tokenExpiration=expiration)
if res['OK']:
self.rssCache.refreshCache()
if not res['OK']:
_msg = 'Error updating Element (%s,%s,%s)' % (elementName, statusType, status)
gLogger.warn('RSS: %s' % _msg)
return res
finally:
# Release lock, no matter what.
self.rssCache.releaseLock()
def __setCSElementStatus(self, elementName, elementType, statusType, status):
"""
Sets on the CS the Elements status
"""
# DIRAC doesn't store the status of ComputingElements nor FTS in the CS, so here we can just do nothing
if elementType in ('ComputingElement', 'FTS'):
return S_OK()
# If we are here it is because elementType is either 'StorageElement' or 'Catalog'
statuses = self.rssConfig.getConfigStatusType(elementType)
if statusType not in statuses:
gLogger.error("%s is not a valid statusType" % statusType)
return S_ERROR("%s is not a valid statusType: %s" % (statusType, statuses))
if elementType == 'StorageElement':
cs_path = "/Resources/StorageElements"
elif elementType == 'Catalog':
cs_path = "/Resources/FileCatalogs"
# FIXME: This a probably outdated location (new one is in /Operations/[]/Services/Catalogs)
# but needs to be VO-aware
statusType = 'Status'
csAPI = CSAPI()
csAPI.setOption("%s/%s/%s/%s" % (cs_path, elementName, elementType, statusType), status)
res = csAPI.commitChanges()
if not res['OK']:
gLogger.warn('CS: %s' % res['Message'])
return res
def __getMode(self):
"""
Gets flag defined (or not) on the RSSConfiguration.
If defined as 'Active', we use RSS, if not, we use the CS when possible (and WMS for Sites).
"""
res = self.rssConfig.getConfigState()
if res == 'Active':
if self.rssClient is None:
self.rssClient = ResourceStatusClient()
return True
self.rssClient = None
return False
def isStorageElementAlwaysBanned(self, seName, statusType):
""" Checks if the AlwaysBanned policy is applied to the SE given as parameter
:param seName: string, name of the SE
:param statusType: ReadAcces, WriteAccess, RemoveAccess, CheckAccess
:returns: S_OK(True/False)
"""
res = getPoliciesThatApply({'name': seName, 'statusType': statusType})
if not res['OK']:
self.log.error("isStorageElementAlwaysBanned: unable to get the information", res['Message'])
return res
isAlwaysBanned = 'AlwaysBanned' in [policy['type'] for policy in res['Value']]
return S_OK(isAlwaysBanned)
################################################################################
def getDictFromList(fromList):
"""
Auxiliary method that given a list returns a dictionary of dictionaries:
{ site1 : { statusType1 : st1, statusType2 : st2 }, ... }
"""
res = {}
for listElement in fromList:
site, sType, status = listElement
if site not in res:
res[site] = {}
res[site][sType] = status
return res
def getCacheDictFromRawData(rawList):
"""
Formats the raw data list, which we know it must have tuples of five elements.
( element1, element2, element3, elementt4, element5 ) into a dictionary of tuples with the format
{ ( element1, element2, element3, element5 ): element4 )}.
The resulting dictionary will be the new Cache.
It happens that element1 is elementName,
element2 is elementType,
element3 is statusType,
element4 is status.
element5 is vO
:Parameters:
**rawList** - `list`
list of three element tuples [( element1, element2, element3, element4, element5 ),... ]
:return: dict of the form { ( elementName, elementType, statusType, vO ) : status, ... }
"""
res = {}
for entry in rawList:
res.update({(entry[0], entry[1], entry[2], entry[4]): entry[3]})
return res
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Client/ResourceStatus.py
|
Python
|
gpl-3.0
| 14,021
|
[
"DIRAC"
] |
2984bd3fbbd756e4ba012c2280a57e7c5b4a8812a3aceb37a460ace79401dacf
|
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.Qt import Qt
class WorldView(QtWidgets.QWidget):
def __init__(self, scale, parent):
super(WorldView, self).__init__(parent)
self.world = parent.world
self.resize(self.world.area[0], self.world.area[1])
self.scale = scale
self.food_scale = scale * 0.5
self.mooses = []
self.flip_mooses = []
self.food = None
self.load_images()
def load_images(self):
self.mooses.append(QtGui.QImage('img/moose1.png'))
self.mooses.append(QtGui.QImage('img/moose2.png'))
self.mooses.append(QtGui.QImage('img/moose3.png'))
self.mooses.append(QtGui.QImage('img/moose4.png'))
self.food = QtGui.QImage('img/food.png')
self.mooses = [x.scaled(int(x.width() * self.scale), int(x.height() * self.scale)) for x in self.mooses]
self.flip_mooses = [x.copy().mirrored(horizontal=True, vertical=False) for x in self.mooses]
self.food = self.food.scaled(int(self.food.width() * self.food_scale), int(self.food.height() * self.food_scale))
def paintEvent(self, event):
paint = QtGui.QPainter()
paint.begin(self)
paint.fillRect(QtCore.QRectF(QtCore.QPointF(0, 0), QtCore.QPointF(self.width(), self.height())), QtGui.QColor(0, 100, 0, 255))
for food in self.world.food:
if not food.active:
continue
paint.drawImage(QtCore.QPointF(food.x, food.y), self.food)
for moose in self.world.animals:
if not moose.item.alive:
continue
if moose.xspeed > 0:
moo = self.flip_mooses[int(moose.item.shape, 2)]
else:
moo = self.mooses[int(moose.item.shape, 2)]
paint.drawImage(QtCore.QPointF(moose.x, moose.y + moose.yshift), moo)
paint.end()
class QWorld(QtWidgets.QWidget):
def __init__(self, world, scale=1.0):
super(QWorld, self).__init__()
self.world = world
self._layout = QtWidgets.QHBoxLayout()
self._view = WorldView(scale, self)
self.resize(800, 600)
self.setWindowTitle('Mooses')
self._layout.addWidget(self._view)
self.setLayout(self._layout)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self._view.update)
self.timer.start(100)
|
jroivas/moose-mating
|
qworld.py
|
Python
|
gpl-3.0
| 2,435
|
[
"MOOSE"
] |
6be9dec84d6a807336c5507913b33ebb487afba1325ff7ffedaf6a881daa827d
|
import sys
py_ver = sys.version_info[0] + ( sys.version_info[1] / 10.0 )
if py_ver < 3.5:
raise "Must be using Python 3.5 or better"
import os
import time
# this helps me know how long in time a file will play
import wave
import random
import platform
import subprocess
"""
A player of our sounds
Sound files are in our audio directory
This one uses 'Process' and launches outside processes to play sound
on the Rpi that should be 'aplay' which works asyncornizlly and nicely
However you can set different players
Made available under the MIT license as follows:
Copyright 2017 Brian Bulkowski brian@bulkowski.org
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# why are these lists? Because it's cool to have a selection of different sounds
# this will randomly play one from the list
game_audio_files = {
'portal_neutralized': [ '../audio/portal_neutralized.wav' ],
'portal_online': [ '../audio/portal_online.wav' ],
'reso_deployed': [ '../audio/resonator_deployed.wav' ],
'reso_destroyed': [ '../audio/resonator_destroyed.wav'],
'under_attack': [ '../audio/under_attack.wav' ],
}
background_sounds = [
'../audio/violin-test-PCM16.wav' ]
# insert the filename at the command: so 1 is right after the command
#command_filename_offset = 3
#command_template = [ 'aplay', ' -f','cd' ]
# globals - how do I play a sound?
# this kind of dumb. I should check for the existance of different commands
# instead of coding in something like this
if (platform.system() == 'Darwin'):
command_filename_offset = 1
command_template = [ "afplay" ]
elif (platform.system() == 'Linux'):
command_filename_offset = 1
command_template = [ "aplay" ]
else:
print ( "unknown operating system, can't play sounds ")
sys.exit(0)
# returns some kind of object to allow killing
def play_sound_start( filename ):
global command_template
global command_filename_offset
stat = os.stat( filename )
# let's check the length, in time
wf = wave.open(filename, 'rb')
bytes_per_second = wf.getnchannels() * wf.getframerate() * wf.getsampwidth()
sec = stat.st_size / bytes_per_second
print ("seconds is: ",sec)
ct = list(command_template)
ct.insert(command_filename_offset, filename)
print(" passing to popen: ", ct)
proc = subprocess.Popen( ct )
# print (" delaying ")
# time.sleep( sec - 1.0 )
# time.sleep( 2.0 )
# test: kill the sound, todo, pass back an object that can respond to a kill
return proc
def play_sound_end( proc ):
proc.kill()
# Playing sound one and two at the same time, as a test
# Although the RPI has the hardware capability of playing two sounds,
# other things don't. So this is a bad test.
#print( " starting two sounds " )
#s1 = play_sound_start( '../audio/portal_online.wav' )
#s2 = play_sound_start( '../audio/violin-test-PCM16.wav' )
#print ( " listen to the sounds! " )
#time.sleep(10.0)
#print ( " terminating the sounds " )
#play_sound_end( s1 )
#play_sound_end( s2 )
def loop_sounds():
for key in game_audio_files:
fl = game_audio_files[ key ]
if len(fl) == 0:
continue
elif len(fl) == 1:
fn = fl[0]
else:
fn = fl[ random.randint(0,len(fl)-1) ]
print (" playing audio file: ",fn )
# let's check the length
wf = wave.open(fn, 'rb')
print (" wave object: channels ",wf.getnchannels()," rate ",wf.getframerate()," samp width ",wf.getsampwidth() )
play_sound_start(fn)
loop_sounds()
|
bbulkow/MagnusFlora
|
samples/sound.cmd.py
|
Python
|
mit
| 4,356
|
[
"Brian"
] |
37d5261a67d9eb4b077811d190a2ae29ab5c07ea903afc0fc3ff6611d48fbac5
|
"""Ansible specific pylint plugin for checking format string usage."""
# (c) 2018, Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers import utils
from pylint.checkers.utils import check_messages
try:
from pylint.checkers.utils import parse_format_method_string
except ImportError:
# noinspection PyUnresolvedReferences
from pylint.checkers.strings import parse_format_method_string
MSGS = {
'E9305': ("Format string contains automatic field numbering "
"specification",
"ansible-format-automatic-specification",
"Used when a PEP 3101 format string contains automatic "
"field numbering (e.g. '{}').",
{'minversion': (2, 6)}),
'E9390': ("bytes object has no .format attribute",
"ansible-no-format-on-bytestring",
"Used when a bytestring was used as a PEP 3101 format string "
"as Python3 bytestrings do not have a .format attribute",
{'minversion': (3, 0)}),
}
class AnsibleStringFormatChecker(BaseChecker):
"""Checks string formatting operations to ensure that the format string
is valid and the arguments match the format string.
"""
__implements__ = (IAstroidChecker,)
name = 'string'
msgs = MSGS
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
"""Visit a call node."""
func = utils.safe_infer(node.func)
if (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and func.bound.name in ('str', 'unicode', 'bytes')):
if func.name == 'format':
self._check_new_format(node, func)
def _check_new_format(self, node, func):
""" Check the new string formatting """
if (isinstance(node.func, astroid.Attribute)
and not isinstance(node.func.expr, astroid.Const)):
return
try:
strnode = next(func.bound.infer())
except astroid.InferenceError:
return
if not isinstance(strnode, astroid.Const):
return
if isinstance(strnode.value, bytes):
self.add_message('ansible-no-format-on-bytestring', node=node)
return
if not isinstance(strnode.value, str):
return
if node.starargs or node.kwargs:
return
try:
num_args = parse_format_method_string(strnode.value)[1]
except utils.IncompleteFormatString:
return
if num_args:
self.add_message('ansible-format-automatic-specification',
node=node)
return
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleStringFormatChecker(linter))
|
nitzmahone/ansible
|
test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py
|
Python
|
gpl-3.0
| 3,124
|
[
"VisIt"
] |
ff85b46047961e6ad1c1f825412b43f61d7602aacbe79914787d9871dbc61622
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import difflib
import os
import re
def _diff(orig, new, short, verbose):
lines = difflib.unified_diff(orig, new)
if not lines:
return ''
return ''.join('%s %s' % (short, line) for line in lines)
def diff_files(orig, new, verbose=False):
"""Diff two files.
@return: True i the files differ otherwise False
:rtype: bool
"""
with open(orig) as f_orig:
with open(new) as f_new:
return _diff(f_orig.readlines(),
f_new.readlines(),
short=os.path.basename(orig),
verbose=verbose)
def diff_lines(orig_lines, new_lines, short='<stdin>', verbose=False):
"""Diff two files.
@return: True i the files differ otherwise False
:rtype: bool
"""
return _diff(orig_lines,
new_lines,
short=short,
verbose=verbose)
def diff_strings(orig, new, short='<input>', verbose=False):
"""Diff two strings.
@return: True i the files differ otherwise False
:rtype: bool
"""
def _tolines(s):
return [s + '\n' for line in s.split('\n')]
return _diff(_tolines(orig),
_tolines(new),
short=short,
verbose=verbose)
def diff_pdf_htmls(original_filename, filename):
for fname in [original_filename, filename]:
with open(fname) as f:
data = f.read()
# REPLACE all generated dates with %%DATE%%
data = re.sub(r'name="date" content="(.*)"',
r'name="date" content="%%DATE%%"', data)
# Remove poppler identifier and version
data = re.sub(r'<pdf2xml(.*)>',
r'<pdf2xml>', data)
with open(fname, 'w') as f:
f.write(data)
return diff_files(original_filename, filename)
|
andrebellafronte/stoq
|
stoqlib/lib/diffutils.py
|
Python
|
gpl-2.0
| 2,742
|
[
"VisIt"
] |
7e9b73f476b97bc043c045cb21bda864db83e24cfbe855338fec8e0f0082aaab
|
# -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
import urllib
import xbmcgui
from resources.lib import Utils
from kodi65 import utils
from kodi65 import addon
from kodi65 import VideoItem
from kodi65 import ItemList
GOOGLE_PLACES_KEY = 'AIzaSyCgfpm7hE_ufKMoiSUhoH75bRmQqV8b7P4'
BASE_URL = 'https://maps.googleapis.com/maps/api/place/'
CATEGORIES = {"accounting": addon.LANG(32000),
"airport": addon.LANG(32035),
"amusement_park": addon.LANG(32036),
"aquarium": addon.LANG(32037),
"art_gallery": addon.LANG(32038),
"atm": addon.LANG(32039),
"bakery": addon.LANG(32040),
"bank": addon.LANG(32041),
"bar": addon.LANG(32042),
"beauty_salon": addon.LANG(32016),
"bicycle_store": addon.LANG(32017),
"book_store": addon.LANG(32018),
"bowling_alley": addon.LANG(32023),
"bus_station": addon.LANG(32033),
"cafe": addon.LANG(32043),
"campground": addon.LANG(32044),
"car_dealer": addon.LANG(32045),
"car_rental": addon.LANG(32046),
"car_repair": addon.LANG(32047),
"car_wash": addon.LANG(32048),
"casino": addon.LANG(32049),
"cemetery": addon.LANG(32050),
"church": addon.LANG(32051),
"city_hall": addon.LANG(32052),
"clothing_store": addon.LANG(32053),
"convenience_store": addon.LANG(32054),
"courthouse": addon.LANG(32055),
"dentist": addon.LANG(32056),
"department_store": addon.LANG(32057),
"doctor": addon.LANG(32058),
"electrician": addon.LANG(32059),
"electronics_store": addon.LANG(32060),
"embassy": addon.LANG(32061),
"establishment": addon.LANG(32062),
"finance": addon.LANG(29957),
"fire_station": addon.LANG(32063),
"florist": addon.LANG(32064),
"food": addon.LANG(32006),
"funeral_home": addon.LANG(32065),
"furniture_store": addon.LANG(32066),
"gas_station": addon.LANG(32067),
"general_contractor": addon.LANG(32068),
"grocery_or_supermarket": addon.LANG(32069),
"gym": addon.LANG(32070),
"hair_care": addon.LANG(32071),
"hardware_store": addon.LANG(32072),
"health": addon.LANG(32073),
"hindu_temple": addon.LANG(32074),
"home_goods_store": addon.LANG(32075),
"hospital": addon.LANG(32076),
"insurance_agency": addon.LANG(32077),
"jewelry_store": addon.LANG(32078),
"laundry": addon.LANG(32079),
"lawyer": addon.LANG(32080),
"library": addon.LANG(14022),
"liquor_store": addon.LANG(32081),
"local_government_office": addon.LANG(32082),
"locksmith": addon.LANG(32083),
"lodging": addon.LANG(32084),
"meal_delivery": addon.LANG(32085),
"meal_takeaway": addon.LANG(32086),
"mosque": addon.LANG(32087),
"movie_rental": addon.LANG(32088),
"movie_theater": addon.LANG(32089),
"moving_company": addon.LANG(32090),
"museum": addon.LANG(32091),
"night_club": addon.LANG(32092),
"painter": addon.LANG(32093),
"park": addon.LANG(32094),
"parking": addon.LANG(32095),
"pet_store": addon.LANG(32096),
"pharmacy": addon.LANG(32097),
"physiotherapist": addon.LANG(32098),
"place_of_worship": addon.LANG(32099),
"plumber": addon.LANG(32100),
"police": addon.LANG(32101),
"post_office": addon.LANG(32102),
"real_estate_agency": addon.LANG(32103),
"restaurant": addon.LANG(32104),
"roofing_contractor": addon.LANG(32105),
"rv_park": addon.LANG(32106),
"school": addon.LANG(32107),
"shoe_store": addon.LANG(32108),
"spa": addon.LANG(32109),
"stadium": addon.LANG(32110),
"storage": addon.LANG(154),
"store": addon.LANG(32111),
"subway_station": addon.LANG(32112),
"synagogue": addon.LANG(32113),
"taxi_stand": addon.LANG(32114),
"train_station": addon.LANG(32115),
"travel_agency": addon.LANG(32116),
"university": addon.LANG(32117),
"veterinary_care": addon.LANG(32118),
"zoo": addon.LANG(32119)
}
class GooglePlaces():
def __init__(self):
pass
def select_category(self):
modeselect = [addon.LANG(32120)]
modeselect += [value for value in CATEGORIES.itervalues()]
index = xbmcgui.Dialog().select(addon.LANG(32121), modeselect)
if index > 0:
return CATEGORIES.keys()[index - 1]
elif index > -1:
return ""
else:
return None
def get_locations(self, lat, lon, radius, locationtype):
params = {"key": GOOGLE_PLACES_KEY,
"radius": min(30000, radius),
"location": "%s,%s" % (lat, lon),
"types": locationtype}
base_url = BASE_URL + 'nearbysearch/json?'
results = Utils.get_JSON_response(base_url + urllib.urlencode(params))
places = ItemList()
if "meta" in results and results['meta']['code'] == 400:
utils.log("LIMIT EXCEEDED")
return "", []
if "results" not in results:
return "", []
for place in results['results']:
try:
params = {"maxwidth": 400,
"photoreference": place['photos'][0]['photo_reference'],
"key": GOOGLE_PLACES_KEY}
photo = BASE_URL + 'photo?' + urllib.urlencode(params)
except:
photo = ""
description = place['vicinity'] if "vicinity" in place else place.get('formatted_address', "")
item = VideoItem(label=place['name'],
label2=" / ".join(place['types']))
item.set_artwork({"thumb": photo,
"icon": place['icon']})
item.set_info("rating", place['rating'] * 2.0 if "rating" in place else "")
item.set_properties({'description': description,
"lat": place['geometry']['location']['lat'],
"lon": place['geometry']['location']['lng']})
places.append(item)
return places
GP = GooglePlaces()
|
phil65/script.maps.browser
|
resources/lib/GooglePlaces.py
|
Python
|
gpl-2.0
| 7,111
|
[
"CASINO"
] |
6ced6431afd4c3bc5fc71294b428dd380a39a00faef5f492137508181d2a64f5
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
import setuptools
# To use a consistent encoding
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setuptools.setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='program_versions', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.5.4', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Know version changes of your favourites projects in your terminal.', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
long_description_content_type="text/markdown",
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/dupgit/versions', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Olivier Delhomme', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='olivier.delhomme@free.fr', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Version Control',
# Pick your license as you wish
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='version-checker version follower rss atom command-line', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=['versions', 'configuration', 'common', 'bylist', 'byproject', 'caches'],
packages=setuptools.find_packages(exclude=['tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['feedparser>=5.1.3', 'PyYAML>=3.11'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'versions=versions.versions:main',
],
},
)
|
dupgit/versions
|
setup.py
|
Python
|
gpl-3.0
| 7,035
|
[
"VisIt"
] |
dcc9a7e379ba439fc386c71b727896361628700daf949a1952d00043c2688c26
|
import pytest
import random
from pyshare import group, party, expense, payment
# Create test data.
group_names = ["Alfa", "Bravo", "Charlie", "Delta", "Echo", "Foxtrot", "Golf", "Hotel", "India", 'Juliett',
"Kilo", "Lima", "Mike", "November", "Oscar", "Papa", "Quebec", "Romeo", "Sierra", "Tango",
"Uniform", "Victor", "Whisky", "XRay", "Yankee", "Zulu"]
party_names = ["Michael", "Callie", "Brian", "Robert", "Viki", "Daniel", "Colin", "Trudy", "Beth", 'Rajesh',
"Savita", "Lindsay", "Matthew", "Kiel", "Kyle", "Chris", "Carol", "Jason", "Lisa", "Guy",
"Kala", "Ricky", "Taylor", "Xavier", "Yvette", "Zane"]
expense_types = ["Rent", "Groceries", "Maintenance", "Hotel", "Cat Litter", "Cable", "Internet", "Credit Card"
"Power", "Dry Cleaning", "Diapers"]
currency_types = ["USD", "EUR", "BRL", "CAD", "GBP", "HRK", "JPY", "NOK", "NZD", "PHP", "RON", "RUB", "TRY", "ZAR"]
# Create helper functions for generating random test data.
def rand_group() -> group.Group:
return group.Group(name=random.choice(group_names), currency=random.choice(currency_types))
def rand_party() -> party.Party:
return party.Party(name=random.choice(party_names))
def rand_expense() -> expense.Expense:
return expense.Expense(paid_for=random.choice(expense_types), currency=random.choice(currency_types),
amount=random.uniform(1.00, 999.99))
def rand_payment(e: expense.Expense) -> payment.Payment:
return payment.Payment(expense=e, paid_by=random.choice(e.parties_involved), currency=e.currency,
amount=e.amount)
class TestGroup:
@classmethod
def setup_class(cls):
cls.test_group = group.Group(name="Test", currency="USD")
cls.not_a_party = "This is not a party.Party, it's a string."
cls.test_party = party.Party(name="Michael")
cls.not_an_expense = ["This is not an expense.Expense, it's a list."]
cls.test_expense = expense.Expense(paid_for="Impulse purchase", currency="USD", amount=153.19)
cls.not_a_payment = {"This is not a payment.Payment": "This is a dict."}
cls.test_payment = payment.Payment(expense=cls.test_expense, paid_by=cls.test_party, currency="USD", amount=10)
def test_assertion_error_if_add_not_party(self):
with pytest.raises(AssertionError):
self.test_group.add_party(self.not_a_party)
def test_no_error_if_add_party(self):
self.test_group.add_party(self.test_party)
assert len(self.test_group.parties) == 1
def test_assertion_error_if_add_not_expense(self):
with pytest.raises(AssertionError):
self.test_group.add_expense(self.not_an_expense)
def test_no_error_if_add_expense(self):
self.test_group.add_expense(self.test_expense)
assert len(self.test_group.expenses) == 1
def test_assertion_error_if_add_not_payment(self):
with pytest.raises(AssertionError):
self.test_group.add_payment(self.not_a_payment)
def test_no_error_if_add_payment(self):
self.test_group.add_payment(self.test_payment)
assert len(self.test_group.payments) == 1
def test_add_expense_to_group_also_adds_parties(self):
# Set up new data for this test and make sure no parties in group
new_group = rand_group()
new_expense = rand_expense()
new_party = rand_party()
assert not new_group.parties
# Add party to expense, then add expense to group
new_expense.add_party(new_party)
new_group.add_expense(new_expense)
# Ensure expense party added to group
assert len(new_group.parties) == 1
def test_standardize_group_expenses(self):
g = rand_group()
expenses = {rand_expense(), rand_expense(), rand_expense(), rand_expense()}
for e in expenses:
g.add_expense(e)
# Currencies should not match
assert not g.currencies_match()
g.standardize_group_expenses()
# Now, expense currencies should match that of the group
assert g.currencies_match()
def test_standardize_group_expenses_and_payments(self):
g = rand_group()
# Add random expenses and linked payments
for i in range(5):
e = rand_expense()
e.add_party(rand_party())
g.add_expense(e)
g.add_payment(rand_payment(e))
# Currencies should not match
assert not g.currencies_match()
g.standardize_group_expenses()
g.standardize_group_payments()
# Now, expense and payment currencies should match that of the group
assert g.currencies_match()
|
rosemichaele/pyshare
|
tests/test_group.py
|
Python
|
gpl-3.0
| 4,723
|
[
"Brian"
] |
79470b6a685bb38355914795890856197519d198aea26ff54a39cdc3ed35e465
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from sisl._internal import set_module
from ..sile import Sile, SileCDF, SileBin
__all__ = ['SileOpenMX', 'SileCDFOpenMX', 'SileBinOpenMX']
@set_module("sisl.io.openmx")
class SileOpenMX(Sile):
pass
@set_module("sisl.io.openmx")
class SileCDFOpenMX(SileCDF):
pass
@set_module("sisl.io.openmx")
class SileBinOpenMX(SileBin):
pass
|
zerothi/sisl
|
sisl/io/openmx/sile.py
|
Python
|
mpl-2.0
| 547
|
[
"OpenMX"
] |
8cf2972d445f53cc6bb1bf70315825f37f6f53e731e717d06c9b724e1826c198
|
#!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: package_facts
short_description: package information as facts
description:
- Return information about installed packages as facts
options:
manager:
description:
- The package manager used by the system so we can query the package information.
- Since 2.8 this is a list and can support multiple package managers per system.
- The 'portage' and 'pkg' options were added in version 2.8.
default: ['auto']
choices: ['auto', 'rpm', 'apt', 'portage', 'pkg']
required: False
type: list
strategy:
description:
- This option controls how the module queries the package managers on the system.
C(first) means it will return only information for the first supported package manager available.
C(all) will return information for all supported and available package managers on the system.
choices: ['first', 'all']
default: 'first'
version_added: "2.8"
version_added: "2.5"
requirements:
- For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
- For Debian-based systems C(python-apt) package must be installed on targeted hosts.
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: Gather the rpm package facts
package_facts:
manager: auto
- name: Print the rpm package facts
debug:
var: ansible_facts.packages
- name: Check whether a package called foobar is installed
debug:
msg: "{{ ansible_facts.packages['foobar'] | length }} versions of foobar are installed!"
when: "'foobar' in ansible_facts.packages"
'''
RETURN = '''
ansible_facts:
description: facts to add to ansible_facts
returned: always
type: complex
contains:
packages:
description:
- Maps the package name to a non-empty list of dicts with package information.
- Every dict in the list corresponds to one installed version of the package.
- The fields described below are present for all package managers. Depending on the
package manager, there might be more fields for a package.
returned: when operating system level package manager is specified or auto detected manager
type: dict
contains:
name:
description: The package's name.
returned: always
type: str
version:
description: The package's version.
returned: always
type: str
source:
description: Where information on the package came from.
returned: always
type: str
sample: |-
{
"packages": {
"kernel": [
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
{
"name": "kernel",
"source": "rpm",
"version": "3.10.0",
...
},
...
],
"kernel-tools": [
{
"name": "kernel-tools",
"source": "rpm",
"version": "3.10.0",
...
}
],
...
}
}
sample_rpm:
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
sample_deb:
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
'''
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
class RPM(LibMgr):
LIB = 'rpm'
def list_installed(self):
return self._lib.TransactionSet().dbMatch()
def get_package_details(self, package):
return dict(name=package[self._lib.RPMTAG_NAME],
version=package[self._lib.RPMTAG_VERSION],
release=package[self._lib.RPMTAG_RELEASE],
epoch=package[self._lib.RPMTAG_EPOCH],
arch=package[self._lib.RPMTAG_ARCH],)
def is_available(self):
''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
we_have_lib = super(RPM, self).is_available()
if not we_have_lib and get_bin_path('rpm'):
self.warnings.append('Found "rpm" but %s' % (missing_required_lib('rpm')))
return we_have_lib
class APT(LibMgr):
LIB = 'apt'
def __init__(self):
self._cache = None
super(APT, self).__init__()
@property
def pkg_cache(self):
if self._cache is not None:
return self._cache
self._cache = self._lib.Cache()
return self._cache
def is_available(self):
''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
we_have_lib = super(APT, self).is_available()
if not we_have_lib:
for exe in ('apt', 'apt-get', 'aptitude'):
if get_bin_path(exe):
self.warnings.append('Found "%s" but %s' % (exe, missing_required_lib('apt')))
break
return we_have_lib
def list_installed(self):
# Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
cache = self.pkg_cache
return [pk for pk in cache.keys() if cache[pk].is_installed]
def get_package_details(self, package):
ac_pkg = self.pkg_cache[package].installed
return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
class PKG(CLIMgr):
CLI = 'pkg'
atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
pkg = dict(zip(self.atoms, package.split('\t')))
if 'arch' in pkg:
try:
pkg['arch'] = pkg['arch'].split(':')[2]
except IndexError:
pass
if 'automatic' in pkg:
pkg['automatic'] = bool(int(pkg['automatic']))
if 'category' in pkg:
pkg['category'] = pkg['category'].split('/', 1)[0]
if 'version' in pkg:
if ',' in pkg['version']:
pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
else:
pkg['port_epoch'] = 0
if '_' in pkg['version']:
pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
else:
pkg['revision'] = '0'
if 'vital' in pkg:
pkg['vital'] = bool(int(pkg['vital']))
return pkg
class PORTAGE(CLIMgr):
CLI = 'qlist'
atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
def list_installed(self):
rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
if rc != 0:
raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
return out.splitlines()
def get_package_details(self, package):
return dict(zip(self.atoms, package.split()))
def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
# start work
global module
module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
results = {'ansible_facts': {}}
managers = [x.lower() for x in module.params['manager']]
strategy = module.params['strategy']
if 'auto' in managers:
# keep order from user, we do dedupe below
managers.extend(PKG_MANAGER_NAMES)
managers.remove('auto')
unsupported = set(managers).difference(PKG_MANAGER_NAMES)
if unsupported:
if 'auto' in module.params['manager']:
msg = 'Could not auto detect a usable package manager, check warnings for details.'
else:
msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
module.fail_json(msg=msg)
found = 0
seen = set()
for pkgmgr in managers:
if found and strategy == 'first':
break
# dedupe as per above
if pkgmgr in seen:
continue
seen.add(pkgmgr)
try:
try:
# manager throws exception on init (calls self.test) if not usable.
manager = PKG_MANAGERS[pkgmgr]()
if manager.is_available():
found += 1
packages.update(manager.get_packages())
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
continue
for warning in getattr(manager, 'warnings', []):
module.warn(warning)
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
if found == 0:
msg = ('Could not detect a supported package manager from the following list: %s, '
'or the required Python library is not installed. Check warnings for details.' % managers)
module.fail_json(msg=msg)
# Set the facts, this will override the facts in ansible_facts that might exist from previous runs
# when using operating system level or distribution package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
|
Dhivyap/ansible
|
lib/ansible/modules/packaging/os/package_facts.py
|
Python
|
gpl-3.0
| 13,128
|
[
"Brian"
] |
b33c702511b9f588aace9a88a061321a111984a86abb5e958a71d5439a0dea0c
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 2015
Library for MG_Synth. Defines functions for reading motifs, generating
synthetic DNA sequences, computing likelihoods, etc
@author: ivanerill
"""
#The following code assumes that Biopython is installed
from Bio.Seq import Seq
from Bio import motifs
from Bio.Alphabet import IUPAC
#system calls
import sys
#random number generation
import random
#math functions
import math
import numpy as np
#------------------------------------------------------------------------------
def read_motif(motif_filename, verb=0):
"""Reads a motif as a collection of sites from a file
Reads a motif and uses the biopython.motifs class to store it. If the motif
is in FASTA format, it uses the parser directly. Otherwise, it loads and
reads a concatenated text file and creates the motif.
File type is determined by extension:
* FASTA for .fas, .fasta and .fa files
* One-per-line text file otherwise
Input:
* The motif filename; required
* Verbose mode (default=0)
Returns:
* the read motif
"""
#create file handler for reading file
try:
motif_file = open(motif_filename,"r")
except (IOError, OSError) as file_open_exception:
print "*** The file name provided:", motif_filename, " does not exist"
print "*** Error: ", file_open_exception.errno, " - ",\
file_open_exception.strerror
sys.exit()
#Figure out file type based on extension, read sites and create motif
extension = motif_filename.split('.')[-1]
if extension not in ['fas', 'fasta', 'fa']:
if verb: print 'Reading motif... raw text sequence mode assumed \
(one site per line, not FASTA parsing)'
sites = []
for line in motif_file:
sites.append(Seq(line.rstrip('\n\r'),IUPAC.unambiguous_dna))
mot = motifs.create(sites)
if verb: print mot.degenerate_consensus
else:
if verb: print 'Reading motif... attempting to parse FASTA file'
mot = motifs.read(motif_file,'sites')
motif_file.close()
return mot
#------------------------------------------------------------------------------
def sample_motif(mot, sampleN, verb=0):
"""
Samples sampleN instances from a motif, using the positional-frequencies
of the motif to generate proportional random instances of the motif
Input:
* the motif; required
* the number of samples to be generated; required
* Verbose mode (default=0)
Returns:
* the samples generated, as a list
"""
random.seed(None)
#the list to return
samples = []
#the column holder
cols = []
ind=0
#for each position in the motif
while ind<len(mot):
#get the frequency list (dict) for the column and sort descending
sorted_freqs=sorted(mot.pwm[:,ind].items(),key=lambda x: x[1],\
reverse=True)
cnt = 0
column = []
#for sample number to be drawn
while cnt < sampleN:
r = random.random()
cumsum = 0
#determine base to draw according to uniform random and freq list
for sf in sorted_freqs:
cumsum = cumsum + sf[1]
if (r<cumsum):
base = sf[0]
break
#append the base to the column
column.append(base)
cnt = cnt + 1
#append column to column list
#cols is a list of len(mot) elements, with each element
#being a list containing sampleN bases
#ex: [['A','A','G'],['G','T','T']] could be for sampleN=3 and length=2
#where a first position is dominated by A and the second by T
if verb: print column
cols.append(column)
ind=ind+1
#zip appended columns
#this will unpack (*) the elements of cols: ['A','A','G'] and ['G','T','T']
#and then zip them (which will merge the i-th element of each list and
#return it as a tuple): [('A', 'G'), ('A', 'T'), ('G', 'T')]
cols=zip(*cols)
#create samples
#this iterates through the list of tuples return by zip, and merges
#them into a string: ('A', 'G') becomes 'AG', the first motif instance
#'AT' the second, and 'GT' the third
for c in cols:
samples.append(Seq(''.join(c),IUPAC.unambiguous_dna))
#return samples
#['AG','AT','GT']
return samples
#------------------------------------------------------------------------------
def random_DNA(length, freqs, N, verb=0):
"""
Generates N random DNA sequences using the provided %GC content
Input:
* the length of the sequence to be generated; required
* the frequencies of each base as dictionary [A C G T]; required
ex: {'A': 0.96, 'C': 0.0, 'G': 0.0, 'T': 0.04}
* the number of sequences to be generated
* Verbose mode (default=0)
Returns:
* the generated sequences as a list of biopython seq objects
"""
random.seed(None)
seqlist=[]
sorted_freqs=sorted(freqs.items(),key=lambda x: x[1],reverse=True)
#notify user if freqs do not add to 1
if (sum(freqs.values())<>1):
print "Frequencies do not add up to one!"
ind=0
#for each sequence to be generated
while (ind<N):
#for each base to be added
cnt=0
seq=''
while (cnt<length):
r = random.random()
cumsum = 0
for sf in sorted_freqs:
cumsum = cumsum + sf[1]
if (r<cumsum):
base = sf[0]
break
#concatenate chosen base to growing DNA sequence
seq=seq+base
cnt=cnt + 1
#make sequence object
dnaseq=Seq(seq,IUPAC.unambiguous_dna)
#append sequence object to list
seqlist.append(dnaseq)
ind = ind + 1
return seqlist
#------------------------------------------------------------------------------
def sfmax_score_seqs(seq_list, pssm, rpssm=None, verb=0, mode=1):
"""
Scores a list of sequences using provided pssm and reverse pssm, applies
softmax function to return a single score per position
Input:
* the sequence list; required
* the pssm to score with; required
* the reverse pssm; computed if not provided
* Verbose mode (default=0)
Returns:
* the softmax scores, as a list (one list of scores per sequence)
"""
#list of list of scores to return
scorelist=[]
#compute rpssm if not provided
if (rpssm==None): rpssm=pssm.reverse_complement()
#compute scores for each sequence
for s in seq_list:
sc = pssm.calculate(s)
rsc = rpssm.calculate(s)
#handle sequences of length=motif length (0-dim array)
if (sc.size==1):
sc=[sc]
rsc=[rsc]
#apply softmax
scores=map(lambda x: math.log(2.0**x[0]+2.0**x[1],2),zip(sc,rsc))
#append list of scores to overall list (for each sequence)
scorelist.append(scores)
return scorelist
#------------------------------------------------------------------------------
def esfmax_score_seqs(seq_list, pssm, rpssm=None, verb=0, mode=1):
"""
Scores a list of sequences using provided pssm and reverse pssm, applies
softmax function to return a single score per position
Uses the natural log (Boltzmann) derivation of the softmax
Input:
* the sequence list; required
* the pssm to score with; required
* the reverse pssm; computed if not provided
* Verbose mode (default=0)
Returns:
* the softmax scores, as a list (one list of scores per sequence)
"""
#list of list of scores to return
scorelist=[]
#compute rpssm if not provided
if (rpssm==None): rpssm=pssm.reverse_complement()
#compute scores for each sequence
for s in seq_list:
sc = pssm.calculate(s)
rsc = rpssm.calculate(s)
#handle sequences of length=motif length (0-dim array)
if (sc.size==1):
sc=[sc]
rsc=[rsc]
#apply softmax
scores = np.log(np.exp(sc)+np.exp(rsc))
#append list of scores to overall list (for each sequence)
scorelist.append(scores)
return scorelist
#------------------------------------------------------------------------------
def ll_ratios(score_set, n_g, n_m, alpha, verb=0):
"""
Computes the likelihood ratio for a given set of score lists (each a score
list corresponding to one sequence), using provided background and
regulated mean/stdevs and mixing ratio and assuming a normal distribution
Input:
* the score list; required
* the background normal distribution; required
* the regulated normal distribution; required
* the mixing ratio; required
* Verbose mode (default=0)
Returns:
* the log likelihood ratio, as a list (one per sequence)
"""
#list of log-likelihood ratios to be returned, one per sequence/score list
llrs=[]
#for each score list
for score_list in score_set:
#compute the sum of log likelihood ratios for score array
lpd_b = n_g.logpdf(score_list)
lpd_f = np.log(alpha*n_m.pdf(score_list) + (1-alpha)*n_g.pdf(score_list))
sumlr = sum(lpd_b-lpd_f)
llrs.append(sumlr)
return (llrs)
#------------------------------------------------------------------------------
def PostP(LLR,PPR,mode=0,verb=0):
"""
Returns the posterior probability for a set of sequences, given their
log-likelihood ratios and the prior probability ratio
Input:
* the list of log-likelihood ratios; required
* the prior probability ratio; required
* mode: whether a list of posteriors or the aggregated is returned
* Verbose mode (default=0)
Returns:
* the posterior probabilities, as a list (one per sequence)
"""
if (mode):
#list to be returned
plist=[]
for loglr in LLR:
plist.append(1.0/(1.0+math.exp(loglr)*PPR))
return(plist)
else:
#return overall posterior
cumllr=0
for loglr in LLR:
cumllr=cumllr+loglr
return(1.0/(1.0+math.exp(cumllr)*PPR))
#------------------------------------------------------------------------------
def ThetaTh(theta,n_m, verb=0):
"""
Returns the sensitivity cut-off (th), given a theta value expressed as the
number of standard deviations with respect to the mean for the distribution
of known sites
Input:
* the minimum score, theta expressed as stdevs from mean motif score
* the regulated normal distribution; required
* Verbose mode (default=0)
Returns:
* the cutoff value
"""
#compute effective cut-off score (th) as theta stdevs below mean
mmean, mvar = n_m.stats(moments='mv')
th = mmean + theta * math.sqrt(mvar)
return th
#------------------------------------------------------------------------------
def NormPriors(th,n_g,n_m,alpha,rprom,tprom, promlen=300.0, verb=0):
"""
Returns the normalized priors, given total and regulated number of promoters,
the thresholding value th, the regulated and background models and an
assumed average length for promoter sequences
The normalized priors are obtained as follows:
- The number of non-regulated promoters is revised, given the expectation of
those promoters making it through the filtering step (i.e. we multiply
the known prior times the probability of observing a sequence with score
above th (after the filtering step) given the background model). This
gives us the expected number of total promoters in the new scenario
- The number of regulated promoters is also revised, in accordance to the
probability of observing a sequence with score above th given the mixture
model. This gives us the expected number of regulated promoters in the new
scenario.
Input:
* the minimum score, th
* the background normal distribution; required
* the regulated normal distribution; required
* the mixing ratio; required
* the number of regulated promoters; required
* the total number of promoters; required
* the average promoter sequence length; default=300
* Verbose mode (default=0)
Returns:
* the normalized prior for regulation
"""
#number of regulated promoters
nrprom=tprom-rprom
#get probability of observing a promoter with at least no score above
#th, given the background model
Ub = n_g.cdf(th) ** promlen
Ur = (alpha*n_m.cdf(th)+(1.0-alpha)*n_g.cdf(th)) ** promlen
if verb: print nrprom, " --> ", nrprom * (1-Ub)
if verb: print rprom, " --> ", rprom * (1-Ur)
#recompute the priors with the "expected" number of promoters after
#discarding all promoters with no scores above th
pr = (rprom * (1-Ur)) / ( (nrprom * (1-Ub)) + (rprom * (1-Ur)) )
pnr = 1 - pr
return [pr, pnr]
#------------------------------------------------------------------------------
def lNormFactor(score_set, th, n_g, n_m, alpha, verb=0):
"""
Returns, for a given set of scores, the log normalization ratios
with sensitivity adjustment for some minimum score th taken under
consideration (one per sequence)
These normalization ratios, which represent the log ratio between
the probability of observing a sequence (score_list) with at least one
score above the cutoff under the regulated and background models, will
be used to adjust the likelihood ratios in the computation of the posterior
Input:
* the list of scores for each sequence
* the minimum score, theta expressed as stdevs from mean motif score
* the background normal distribution; required
* the regulated normal distribution; required
* the mixing ratio; required
* Verbose mode (default=0)
Returns:
* the log normalization factors, as a list (one per sequence)
"""
#the list of ratios to be returned
lnormratios=[]
#for each list of scores (i.e. sequence)
for score_list in score_set:
#compute Ub/Ur (prob. of observing score_list with no scores above th)
#given background and regulated models
Ub = n_g.cdf(th) ** len(score_list)
Ur = (alpha*n_m.cdf(th)+(1.0-alpha)*n_g.cdf(th)) ** len(score_list)
lnormratios.append(math.log((1.0-Ur)/(1.0-Ub)))
return lnormratios
#------------------------------------------------------------------------------
def NormPostP(LLR,PPR,lnormf,mode=0,verb=0):
"""
Returns the posterior probability for a set of sequences, given their
log-likelihood ratios, the prior probability ratio and their normalization
factors
Input:
* the list of log-likelihood ratios; required
* the prior probability ratio; required
* the list of normalization factors; required
* mode: whether a list of posteriors or the aggregated is returned
* Verbose mode (default=0)
Returns:
* the posterior probabilities, as a list (one per sequence)
"""
if (mode):
#list to be returned
plist=[]
for loglr, lnorm in zip(LLR, lnormf):
plist.append(1.0/(1.0+math.exp(loglr+lnorm)*PPR))
return(plist)
else:
#return overall posterior
cumllr=0
for loglr, lnorm in zip(LLR, lnormf):
cumllr=cumllr+loglr+lnorm
return(1.0/(1.0+math.exp(cumllr)*PPR))
|
ErillLab/CogsNormalizedPosteriorProbabilityThetas
|
MGtest/MG_synth_lib.py
|
Python
|
gpl-3.0
| 16,106
|
[
"Biopython"
] |
30fc33e3b8fc9968278856c0f6d7e39327455eff124ca328a67bfd0bfab09206
|
from ase import Atoms
from gpaw import GPAW
from gpaw.wavefunctions.pw import PW
from gpaw.test import equal
for mode in ['fd', 'pw']:
print mode
hydrogen = Atoms('H',
cell=(2.5, 3, 3.5),
pbc=1,
calculator=GPAW(txt=None, mode=mode))
hydrogen.get_potential_energy()
dens = hydrogen.calc.density
ham = hydrogen.calc.hamiltonian
ham.poisson.eps = 1e-20
dens.interpolate()
dens.calculate_pseudo_charge()
ham.update(dens)
ham.get_energy(hydrogen.calc.occupations)
y = (ham.vt_sG[0, 0, 0, 0] - ham.vt_sG[0, 0, 0, 1]) * ham.gd.dv
x = 0.0001
dens.nt_sG[0, 0, 0, 0] += x
dens.nt_sG[0, 0, 0, 1] -= x
dens.interpolate()
dens.calculate_pseudo_charge()
ham.update(dens)
e1 = ham.get_energy(hydrogen.calc.occupations) - ham.Ekin
dens.nt_sG[0, 0, 0, 0] -= 2 * x
dens.nt_sG[0, 0, 0, 1] += 2 * x
dens.interpolate()
dens.calculate_pseudo_charge()
ham.update(dens)
e2 = ham.get_energy(hydrogen.calc.occupations) - ham.Ekin
equal(y, (e1 - e2) / (2 * x), 2e-8)
|
ajylee/gpaw-rtxs
|
gpaw/test/potential.py
|
Python
|
gpl-3.0
| 1,104
|
[
"ASE",
"GPAW"
] |
e01bcb7bee845153601ed9795eb5d7088130650ccca1f0a8752b0b5e8a396326
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
try:
from setuptools import setup
except:
from distutils.core import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'cached_property<2.0',
'pyparsing<3.0',
'tornado<5.0',
'python-daemon<3.0',
]
if os.environ.get('READTHEDOCS', None) == 'True':
install_requires.append('sqlalchemy')
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
setup(
name='luigi',
version='1.3.0',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='Erik Bernhardsson',
author_email='erikbern@spotify.com',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
)
|
drincruz/luigi
|
setup.py
|
Python
|
apache-2.0
| 2,826
|
[
"VisIt"
] |
85d34f837a4782c88291432751f757339cbfbf6d4d9206f58cd09b925f39ec64
|
import COPASI
import sys
dm = COPASI.CRootContainer.addDatamodel()
def checkModel(file_name):
if not dm.loadModel(file_name):
dm.importSBML(file_name)
a = COPASI.CModelAnalyzer(dm.getModel())
res = a.getReactionResults()
for result in res:
if result.hasIssue(): # only output problematic reactions
print(result.getResultString(False, True))
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: python checkModel <copasi | sbml filename)")
sys.exit(1)
checkModel(sys.argv[1])
|
jonasfoe/COPASI
|
copasi/bindings/python/examples/checkModel.py
|
Python
|
artistic-2.0
| 534
|
[
"COPASI"
] |
37fe328cfb96bf1b2a1ceaacbeb07f0343f7ac537588d8fe050a65ce015ba28d
|
# -*- coding: utf-8 -*-
from .NeuroML import NeuroML, loadNeuroML_L123
from .NetworkML import NetworkML
from .MorphML import MorphML
from .ChannelML import ChannelML
import tempfile
import logging
debug_ = False
if debug_:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='__moose.nml__.log'
)
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M'
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('moose.nml').addHandler(console)
_logger = logging.getLogger('moose.nml')
_logger.debug("Loading NML library")
|
dharmasam9/moose-core
|
python/moose/neuroml/__init__.py
|
Python
|
gpl-3.0
| 954
|
[
"MOOSE"
] |
48c16453b957c5ca9a6e5bcb18954fbb8843356f9a90fb7616b7e355e48f3b9e
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def offset_gaussian():
# Connect to a pre-existing cluster
insurance = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/insurance.csv"))
insurance["offset"] = insurance["Holders"].log()
from h2o.estimators.gbm import H2OGradientBoostingEstimator
gbm = H2OGradientBoostingEstimator(ntrees=600,
max_depth=1,
min_rows=1,
learn_rate=0.1,
distribution="gaussian")
gbm.train(x=range(3), y="Claims", training_frame=insurance, offset_column="offset")
predictions = gbm.predict(insurance)
# Comparison result generated from R's gbm:
# fit2 <- gbm(Claims ~ District + Group + Age+ offset(log(Holders)) , interaction.depth = 1,n.minobsinnode = 1,
# shrinkage = .1,bag.fraction = 1,train.fraction = 1,
# data = Insurance, distribution ="gaussian", n.trees = 600)
# pg = predict(fit2, newdata = Insurance, type = "response", n.trees=600)
# pr = pg - - log(Insurance$Holders)
assert abs(44.33016 - gbm._model_json['output']['init_f']) < 1e-5, "expected init_f to be {0}, but got {1}". \
format(44.33016, gbm._model_json['output']['init_f'])
assert abs(1491.135 - gbm.mse()) < 1e-2, "expected mse to be {0}, but got {1}".format(1491.135, gbm.mse())
assert abs(49.23438 - predictions.mean()) < 1e-2, "expected prediction mean to be {0}, but got {1}". \
format(49.23438, predictions.mean())
assert abs(-45.5720659304 - predictions.min()) < 1e-2, "expected prediction min to be {0}, but got {1}". \
format(-45.5720659304, predictions.min())
assert abs(207.387 - predictions.max()) < 1e-2, "expected prediction max to be {0}, but got {1}". \
format(207.387, predictions.max())
if __name__ == "__main__":
pyunit_utils.standalone_test(offset_gaussian)
else:
offset_gaussian()
|
pchmieli/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_offset_gaussian_gbm.py
|
Python
|
apache-2.0
| 1,982
|
[
"Gaussian"
] |
fce15197055c320955ae284ff3b780a105d53d949b0fd3d5bc96a3c7b889987b
|
#!/usr/bin/python
'''
Script to find a route through an SSW sector map
'''
# Copyright 2009, 2015-2016 Squiffle
from __future__ import absolute_import
from __future__ import print_function
import ssw_sector_map2 as ssw_sector_map
import ssw_map_utils, ssw_societies, ssw_utils
import operator, sys, getopt, datetime
version = 1.00
fout = sys.stdout
def usage(progname, map_file):
'''
Prints usage information
'''
print("Usage: %s [-d {a|e|i|o|t}] [-e] [-m] [map_filename] sector [sectors]" % progname)
print()
print(" Find route to visit the specified sectors")
print(" Looks for a route to the first sector from anywhere. If more")
print(" sectors are listed, looks for a route to travel through them.")
print(" Currently only tries to visit them in the specified order.")
print()
print(" -d|--drones {a|e|i|o|t} - avoid drones not belonging to the specified society")
print(" -e|--empire - assume that unexplored sectors contain Amaranth drones")
print(" -h|--help - print this usage messge")
print(" -m|--missing_links - dump the list of found missing links")
print(" map_filename defaults to %s" % map_file)
print()
print(" Version %.2f. Brought to you by Squiffle" % version)
def main(*arguments):
'''
Do whatever the user wants !
Returns the parsed map.
'''
# Defaults, changeable from the command line
default_map_file = "ssw_sector_map.htm"
map_file = default_map_file
society = None
unexplored_sector_society = None
sectors_to_visit = []
dump_missing_links = False
global fout
# Parse command-line options
try:
opts, args = getopt.getopt(arguments,"ed:hm",["empire","drones=","help","missing_links"])
except getopt.GetoptError:
usage(sys.argv[0], map_file)
sys.exit(2)
for arg in args:
try:
val = int(arg)
if val in ssw_sector_map.all_sectors:
sectors_to_visit.append(val)
else:
usage(sys.argv[0], default_map_file)
sys.exit(2)
except:
if map_file == default_map_file:
map_file = arg
else:
usage(sys.argv[0], default_map_file)
sys.exit(2)
for opt,arg in opts:
if (opt == '-d') or (opt == '--drones'):
try:
society = ssw_societies.adjective(arg)
except ssw_societies.Invalid_Society:
print('Unrecognised society "%s" - should be one of %s' % (arg, ssw_societies.initials))
usage(sys.argv[0], map_file)
sys.exit(2)
elif (opt == '-e') or (opt == '--empire'):
unexplored_sector_society = ssw_societies.adjective('a')
elif (opt == '-h') or (opt == '--help'):
usage(sys.argv[0], default_map_file)
sys.exit(0)
elif (opt == '-m') or (opt == '--missing_links'):
dump_missing_links = True
if (len(sectors_to_visit) == 0) and not dump_missing_links:
usage(sys.argv[0], default_map_file)
sys.exit(2)
# Read and parse the sector map
page = open(map_file)
p = ssw_sector_map.SectorMapParser(page)
# Don't print warnings if we're extracting missing links,
# because there will likely be lots of "missing link" warnings
map_valid,reason = p.valid(dump_missing_links)
if not map_valid:
print("Sector map file is invalid - %s" % reason, file=fout)
sys.exit(2)
# Now add in any invariant information that we don't know
p.enhance_map()
if len(sectors_to_visit) > 0:
# Find and print the route
# TODO Find the best route through the listed sectors
# Note that first time through the loop, from_sector == to_sector,
# which means "find a route to this sector from anywhere"
from_sector = sectors_to_visit[0]
total_distance = 0
overall_route = []
drones = []
possible_drones = False
for to_sector in sectors_to_visit:
#print "Finding route from %d to %d" % (from_sector, to_sector)
(distance, route_str, drone_list, poss) = p.shortest_route(from_sector,
to_sector,
society,
unexplored_sector_society)
total_distance += distance
overall_route.append(route_str)
drones += drone_list
from_sector = to_sector
possible_drones = possible_drones or poss
print("Total distance is %d" % total_distance)
for route in overall_route:
print(route, end=' ')
print(ssw_utils.drones_str(drones, possible_drones))
if dump_missing_links:
var_str = "cycle_%d_links = " % p.cycle()
indent = len(var_str) + 1
indent_str = ' ' * indent
print(var_str, end=' ')
missing_link_str = str(p.missing_links)
# Split the very long string over multiple lines
start_idx = 0
end_idx = 0
while (end_idx < len(missing_link_str)):
idx = missing_link_str.find(']', end_idx + 1)
if (idx == -1):
#print "Got to the end (idx == -1)"
end_idx = len(missing_link_str)
print(indent_str + missing_link_str[start_idx-1:])
else:
total_len = indent + idx + 1 - start_idx
#print "idx = %d. Total_len = %d" % (idx, total_len)
if (total_len >= 79):
# We already printed the variable on the first line
if (start_idx > 0):
print(indent_str, end=' ')
# Include the following comma
print(missing_link_str[start_idx:end_idx+2])
start_idx = end_idx + 3
#print "Set start_idx to %d" % start_idx
end_idx = idx
#print "Set end_idx to %d" % end_idx
# Check that this is today's map
if not ssw_map_utils.is_todays(p):
print()
print("**** Map is more than 24 hours old")
# Check for unknown sectors with jellyfish
unknown_sectors_with_jellyfish = ssw_map_utils.unknown_sectors_with_jellyfish(p)
if len(unknown_sectors_with_jellyfish) > 0:
print()
print("**** Don't forget to feed the empaths at New Ceylon")
print("**** That will explore %d sector(s) : %s" % (len(unknown_sectors_with_jellyfish),
str(sorted(list(unknown_sectors_with_jellyfish)))))
# Return the parsed map, in case we're a mere utility
return p
if __name__ == '__main__':
main(*sys.argv[1:])
|
UEWBot/ssw-scripts
|
ssw_route.py
|
Python
|
gpl-3.0
| 6,954
|
[
"VisIt"
] |
e96305a567ee2bc91c3541ba715c19d9235cf25ce212ff8c84f830374e17dfdf
|
from distutils.core import setup
DESCRIPTION = "General tools for Astronomical Time Series in Python"
LONG_DESCRIPTION = """
nufftpy: Non-Uniform FFT in Python
==================================
This is a pure python implementation of the NUFFT.
For more information, visit http://github.com/jakevdp/nufftpy
"""
NAME = "nufftpy"
AUTHOR = "Jake VanderPlas"
AUTHOR_EMAIL = "jakevdp@uw.edu"
MAINTAINER = "Jake VanderPlas"
MAINTAINER_EMAIL = "jakevdp@uw.edu"
URL = 'http://github.com/jakevdp/nufftpy'
DOWNLOAD_URL = 'http://github.com/jakevdp/nufftpy'
LICENSE = 'BSD 3-clause'
import nufftpy
VERSION = nufftpy.__version__
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['nufftpy',
'nufftpy.tests',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'],
)
|
jakevdp/nufftpy
|
setup.py
|
Python
|
bsd-2-clause
| 1,442
|
[
"VisIt"
] |
67fc01cdf85738e2decb7cc1c461af2243f33a25975a0e2d10546f8de3669af7
|
#!/usr/bin/env python
# This example demonstrates how to use 2D Delaunay triangulation.
# We create a fancy image of a 2D Delaunay triangulation. Points are
# randomly generated.
import vtk
from vtk.util.colors import *
# Generate some random points
math = vtk.vtkMath()
points = vtk.vtkPoints()
for i in range(0, 50):
points.InsertPoint(i, math.Random(0, 1), math.Random(0, 1), 0.0)
# Create a polydata with the points we just created.
profile = vtk.vtkPolyData()
profile.SetPoints(points)
# Perform a 2D Delaunay triangulation on them.
delny = vtk.vtkDelaunay2D()
delny.SetInput(profile)
delny.SetTolerance(0.001)
mapMesh = vtk.vtkPolyDataMapper()
mapMesh.SetInputConnection(delny.GetOutputPort())
meshActor = vtk.vtkActor()
meshActor.SetMapper(mapMesh)
meshActor.GetProperty().SetColor(.1, .2, .4)
# We will now create a nice looking mesh by wrapping the edges in tubes,
# and putting fat spheres at the points.
extract = vtk.vtkExtractEdges()
extract.SetInputConnection(delny.GetOutputPort())
tubes = vtk.vtkTubeFilter()
tubes.SetInputConnection(extract.GetOutputPort())
tubes.SetRadius(0.01)
tubes.SetNumberOfSides(6)
mapEdges = vtk.vtkPolyDataMapper()
mapEdges.SetInputConnection(tubes.GetOutputPort())
edgeActor = vtk.vtkActor()
edgeActor.SetMapper(mapEdges)
edgeActor.GetProperty().SetColor(peacock)
edgeActor.GetProperty().SetSpecularColor(1, 1, 1)
edgeActor.GetProperty().SetSpecular(0.3)
edgeActor.GetProperty().SetSpecularPower(20)
edgeActor.GetProperty().SetAmbient(0.2)
edgeActor.GetProperty().SetDiffuse(0.8)
ball = vtk.vtkSphereSource()
ball.SetRadius(0.025)
ball.SetThetaResolution(12)
ball.SetPhiResolution(12)
balls = vtk.vtkGlyph3D()
balls.SetInputConnection(delny.GetOutputPort())
balls.SetSourceConnection(ball.GetOutputPort())
mapBalls = vtk.vtkPolyDataMapper()
mapBalls.SetInputConnection(balls.GetOutputPort())
ballActor = vtk.vtkActor()
ballActor.SetMapper(mapBalls)
ballActor.GetProperty().SetColor(hot_pink)
ballActor.GetProperty().SetSpecularColor(1, 1, 1)
ballActor.GetProperty().SetSpecular(0.3)
ballActor.GetProperty().SetSpecularPower(20)
ballActor.GetProperty().SetAmbient(0.2)
ballActor.GetProperty().SetDiffuse(0.8)
# Create the rendering window, renderer, and interactive renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(ballActor)
ren.AddActor(edgeActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(150, 150)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
# Interact with the data.
iren.Initialize()
renWin.Render()
iren.Start()
|
naucoin/VTKSlicerWidgets
|
Examples/Modelling/Python/DelMesh.py
|
Python
|
bsd-3-clause
| 2,665
|
[
"VTK"
] |
38a0da65f02ac5353f0cc074eddd02d6ce01987eb4e997205bcf3fff03b181be
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/minimal-charged-particles.py", box_l=6.0, int_steps=100)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/samples/test_minimal-charged-particles.py
|
Python
|
gpl-3.0
| 1,024
|
[
"ESPResSo"
] |
340779d1e053141e48281d9f1fbb0165d71d3acf31c88602b5706051626430a5
|
"""HTTPS module based on the GFAL2_StorageBase class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from DIRAC
from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase
from DIRAC import gLogger
class GFAL2_HTTPSStorage(GFAL2_StorageBase):
""" .. class:: GFAL2_HTTPSStorage
HTTP interface to StorageElement using gfal2
"""
# davs is for https with direct access + third party
_INPUT_PROTOCOLS = ['file', 'http', 'https', 'dav', 'davs']
_OUTPUT_PROTOCOLS = ['http', 'https', 'dav', 'davs']
def __init__(self, storageName, parameters):
""" c'tor
"""
# # init base class
super(GFAL2_HTTPSStorage, self).__init__(storageName, parameters)
self.srmSpecificParse = False
self.log = gLogger.getSubLogger("GFAL2_HTTPSStorage")
self.pluginName = 'GFAL2_HTTPS'
# We don't need extended attributes for metadata
self._defaultExtendedAttributes = None
|
yujikato/DIRAC
|
src/DIRAC/Resources/Storage/GFAL2_HTTPSStorage.py
|
Python
|
gpl-3.0
| 980
|
[
"DIRAC"
] |
d8979328223b14be9a3e6642460dd51193819591408d989ad52d5328bbafec6c
|
#!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: xattr
version_added: "1.3"
short_description: Manage user defined extended attributes
description:
- Manages filesystem user defined extended attributes, requires that they are enabled
on the target filesystem and that the setfattr/getfattr utilities are present.
options:
path:
description:
- The full path of the file/object to get the facts of.
- Before 2.3 this option was only usable as I(name).
aliases: [ name ]
required: true
namespace:
description:
- Namespace of the named name/key.
default: user
version_added: "2.7"
key:
description:
- The name of a specific Extended attribute key to set/retrieve.
value:
description:
- The value to set the named name/key to, it automatically sets the C(state) to 'set'.
state:
description:
- defines which state you want to do.
C(read) retrieves the current value for a C(key) (default)
C(present) sets C(name) to C(value), default if value is set
C(all) dumps all data
C(keys) retrieves all keys
C(absent) deletes the key
choices: [ absent, all, keys, present, read ]
default: read
follow:
description:
- If C(yes), dereferences symlinks and sets/gets attributes on symlink target,
otherwise acts on symlink itself.
type: bool
default: 'yes'
notes:
- As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
author:
- Brian Coca (@bcoca)
'''
EXAMPLES = '''
- name: Obtain the extended attributes of /etc/foo.conf
xattr:
path: /etc/foo.conf
- name: Set the key 'user.foo' to value 'bar'
xattr:
path: /etc/foo.conf
key: foo
value: bar
- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914'
xattr:
path: /mnt/bricks/brick1
namespace: trusted
key: glusterfs.volume-id
value: "0x817b94343f164f199e5b573b4ea1f914"
- name: Remove the key 'user.foo'
xattr:
path: /etc/foo.conf
key: foo
state: absent
- name: Remove the key 'trusted.glusterfs.volume-id'
xattr:
path: /mnt/bricks/brick1
namespace: trusted
key: glusterfs.volume-id
state: absent
'''
import os
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def get_xattr_keys(module, path, follow):
cmd = [module.get_bin_path('getfattr', True)]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
cmd.append(path)
return _run_xattr(module, cmd)
def get_xattr(module, path, key, follow):
cmd = [module.get_bin_path('getfattr', True)]
# prevents warning and not sure why it's not default
cmd.append('--absolute-names')
if not follow:
cmd.append('-h')
if key is None:
cmd.append('-d')
else:
cmd.append('-n %s' % key)
cmd.append(path)
return _run_xattr(module, cmd, False)
def set_xattr(module, path, key, value, follow):
cmd = [module.get_bin_path('setfattr', True)]
if not follow:
cmd.append('-h')
cmd.append('-n %s' % key)
cmd.append('-v %s' % value)
cmd.append(path)
return _run_xattr(module, cmd)
def rm_xattr(module, path, key, follow):
cmd = [module.get_bin_path('setfattr', True)]
if not follow:
cmd.append('-h')
cmd.append('-x %s' % key)
cmd.append(path)
return _run_xattr(module, cmd, False)
def _run_xattr(module, cmd, check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception as e:
module.fail_json(msg="%s!" % to_native(e))
# result = {'raw': out}
result = {}
for line in out.splitlines():
if line.startswith('#') or line == '':
pass
elif '=' in line:
(key, val) = line.split('=')
result[key] = val.strip('"')
else:
result[line] = ''
return result
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['name']),
namespace=dict(type='str', default='user'),
key=dict(type='str'),
value=dict(type='str'),
state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
follow=dict(type='bool', default=True),
),
supports_check_mode=True,
)
path = module.params.get('path')
namespace = module.params.get('namespace')
key = module.params.get('key')
value = module.params.get('value')
state = module.params.get('state')
follow = module.params.get('follow')
if not os.path.exists(path):
module.fail_json(msg="path not found or not accessible!")
changed = False
msg = ""
res = {}
if key is None and state in ['absent', 'present']:
module.fail_json(msg="%s needs a key parameter" % state)
# Prepend the key with the namespace if defined
if (
key is not None and
namespace is not None and
len(namespace) > 0 and
not (namespace == 'user' and key.startswith('user.'))):
key = '%s.%s' % (namespace, key)
if (state == 'present' or value is not None):
current = get_xattr(module, path, key, follow)
if current is None or key not in current or value != current[key]:
if not module.check_mode:
res = set_xattr(module, path, key, value, follow)
changed = True
res = current
msg = "%s set to %s" % (key, value)
elif state == 'absent':
current = get_xattr(module, path, key, follow)
if current is not None and key in current:
if not module.check_mode:
res = rm_xattr(module, path, key, follow)
changed = True
res = current
msg = "%s removed" % (key)
elif state == 'keys':
res = get_xattr_keys(module, path, follow)
msg = "returning all keys"
elif state == 'all':
res = get_xattr(module, path, None, follow)
msg = "dumping all"
else:
res = get_xattr(module, path, key, follow)
msg = "returning %s" % key
module.exit_json(changed=changed, msg=msg, xattr=res)
if __name__ == '__main__':
main()
|
orgito/ansible
|
lib/ansible/modules/files/xattr.py
|
Python
|
gpl-3.0
| 6,842
|
[
"Brian"
] |
f05307811f326a943066cc1c3a9c1e168fb58716d31d38a4619497270876e556
|
##############################################################################
# pymbar: A Python Library for MBAR
#
# Copyright 2016-2017 University of Colorado Boulder,
# Copyright 2010-2017 Memorial Sloan-Kettering Cancer Center
# Portions of this software are Copyright (c) 2010-2016 University of Virginia
#
# Authors: Michael Shirts, John Chodera
# Contributors: Kyle Beauchamp
#
# pymbar is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
#
# You should have received a copy of the MIT License along with pymbar.
##############################################################################
"""
Please reference the following if you use this code in your research:
[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from multiple equilibrium states.
J. Chem. Phys. 129:124105, 2008. http://dx.doi.org/10.1063/1.2978177
This module contains implementations of
* EXP - unidirectional estimator for free energy differences based on Zwanzig relation / exponential averaging
"""
#=============================================================================================
# * Fix computeBAR and computeEXP to be BAR() and EXP() to make them easier to find.
# * Make functions that don't need to be exported (like logsum) private by prefixing an underscore.
# * Make asymptotic covariance matrix computation more robust to over/underflow.
# * Double-check correspondence of comments to equation numbers once manuscript has been finalized.
# * Change self.nonzero_N_k_indices to self.states_with_samples
#=============================================================================================
__authors__ = "Michael R. Shirts and John D. Chodera."
__license__ = "MIT"
#=============================================================================================
# IMPORTS
#=============================================================================================
import numpy as np
from pymbar.utils import logsumexp
#=============================================================================================
# One-sided exponential averaging (EXP).
#=============================================================================================
def EXP(w_F, compute_uncertainty=True, is_timeseries=False, return_dict=False):
"""Estimate free energy difference using one-sided (unidirectional) exponential averaging (EXP).
Parameters
----------
w_F : np.ndarray, float
w_F[t] is the forward work value from snapshot t. t = 0...(T-1) Length T is deduced from vector.
compute_uncertainty : bool, optional, default=True
if False, will disable computation of the statistical uncertainty (default: True)
is_timeseries : bool, default=False
if True, correlation in data is corrected for by estimation of statisitcal inefficiency (default: False)
Use this option if you are providing correlated timeseries data and have not subsampled the data to produce uncorrelated samples.
return_dict : bool, default False
If true, returns are a dictionary, else they are a tuple
Returns
-------
'Delta_f' : float
Free energy difference
If return_dict, key is 'Delta_f'
'dDelta_f': float
Estimated standard deviation of free energy difference
If return_dict, key is 'dDelta_f'
Notes
-----
If you are prodividing correlated timeseries data, be sure to set the 'timeseries' flag to True
Examples
--------
Compute the free energy difference given a sample of forward work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> results = EXP(w_F, return_dict=True)
>>> print('Forward free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f']))
Forward free energy difference is 1.088 +- 0.076 kT
>>> results = EXP(w_R, return_dict=True)
>>> print('Reverse free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f']))
Reverse free energy difference is -1.073 +- 0.082 kT
"""
result_vals = dict()
result_list = []
# Get number of work measurements.
T = float(np.size(w_F)) # number of work measurements
# Estimate free energy difference by exponential averaging using DeltaF = - log < exp(-w_F) >
DeltaF = - (logsumexp(- w_F) - np.log(T))
if compute_uncertainty:
# Compute x_i = np.exp(-w_F_i - max_arg)
max_arg = np.max(-w_F) # maximum argument
x = np.exp(-w_F - max_arg)
# Compute E[x] = <x> and dx
Ex = x.mean()
# Compute effective number of uncorrelated samples.
g = 1.0 # statistical inefficiency
if is_timeseries:
# Estimate statistical inefficiency of x timeseries.
import timeseries
g = timeseries.statisticalInefficiency(x, x)
# Estimate standard error of E[x].
dx = np.std(x) / np.sqrt(T / g)
# dDeltaF = <x>^-1 dx
dDeltaF = (dx / Ex)
# Return estimate of free energy difference and uncertainty.
result_vals['Delta_f'] = DeltaF
result_vals['dDelta_f'] = dDeltaF
result_list.append(DeltaF)
result_list.append(dDeltaF)
else:
result_vals['Delta_f'] = DeltaF
result_list.append(DeltaF)
if return_dict:
return result_vals
return tuple(result_list)
#=============================================================================================
# Gaussian approximation to exponential averaging (Gauss).
#=============================================================================================
def EXPGauss(w_F, compute_uncertainty=True, is_timeseries=False, return_dict=False):
"""Estimate free energy difference using gaussian approximation to one-sided (unidirectional) exponential averaging.
Parameters
----------
w_F : np.ndarray, float
w_F[t] is the forward work value from snapshot t. t = 0...(T-1) Length T is deduced from vector.
compute_uncertainty : bool, optional, default=True
if False, will disable computation of the statistical uncertainty (default: True)
is_timeseries : bool, default=False
if True, correlation in data is corrected for by estimation of statisitcal inefficiency (default: False)
Use this option if you are providing correlated timeseries data and have not subsampled the data to produce uncorrelated samples.
return_dict : bool, default False
If true, returns are a dictionary, else they are a tuple
Returns
-------
'Delta_f' : float
Free energy difference between the two states
If return_dict, key is 'Delta_f'
'dDelta_f': float
Estimated standard deviation of free energy difference between the two states.
If return_dict, key is 'dDelta_f'
Notes
-----
If you are prodividing correlated timeseries data, be sure to set the 'timeseries' flag to True
Examples
--------
Compute the free energy difference given a sample of forward work values.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> results = EXPGauss(w_F, return_dict=True)
>>> print('Forward Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f']))
Forward Gaussian approximated free energy difference is 1.049 +- 0.089 kT
>>> results = EXPGauss(w_R, return_dict=True)
>>> print('Reverse Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f']))
Reverse Gaussian approximated free energy difference is -1.073 +- 0.080 kT
"""
# Get number of work measurements.
T = float(np.size(w_F)) # number of work measurements
var = np.var(w_F)
# Estimate free energy difference by Gaussian approximation, dG = <U> - 0.5*var(U)
DeltaF = np.average(w_F) - 0.5 * var
result_vals = dict()
result_list = []
if compute_uncertainty:
# Compute effective number of uncorrelated samples.
g = 1.0 # statistical inefficiency
T_eff = T
if is_timeseries:
# Estimate statistical inefficiency of x timeseries.
import timeseries
g = timeseries.statisticalInefficiency(w_F, w_F)
T_eff = T / g
# Estimate standard error of E[x].
dx2 = var / T_eff + 0.5 * var * var / (T_eff - 1)
dDeltaF = np.sqrt(dx2)
# Return estimate of free energy difference and uncertainty.
result_vals['Delta_f'] = DeltaF
result_vals['dDelta_f'] = dDeltaF
result_list.append(DeltaF)
result_list.append(dDeltaF)
else:
result_vals['Delta_f'] = DeltaF
result_list.append(DeltaF)
if return_dict:
return result_vals
return tuple(result_list)
#=============================================================================================
# For compatibility with 2.0.1-beta
#=============================================================================================
deprecation_warning = """
Warning
-------
This method name is deprecated, and provided for backward-compatibility only.
It may be removed in future versions.
"""
def computeEXP(*args, **kwargs):
return EXP(*args, **kwargs)
computeEXP.__doc__ = EXP.__doc__ + deprecation_warning
def computeEXPGauss(*args, **kwargs):
return EXPGauss(*args, **kwargs)
computeEXPGauss.__doc__ = EXPGauss.__doc__ + deprecation_warning
def _compatibilityDoctests():
"""
Backwards-compatibility doctests.
>>> from pymbar import testsystems
>>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0)
>>> [DeltaF, dDeltaF] = computeEXP(w_F)
>>> [DeltaF, dDeltaF] = computeEXPGauss(w_F)
"""
pass
|
choderalab/pymbar
|
pymbar/exp.py
|
Python
|
mit
| 10,191
|
[
"Gaussian"
] |
9c506d003422e77cb9d40c1c9da7536d08d7f07447559b06da92418c7739da6b
|
import numpy as np
import matplotlib.pyplot as plt
from iterative_policy_evaluation import print_values, print_policy
from grid import standard_grid, negative_grid
from monte_carlo_random import play_game, random_action, EPS, GAMMA, ALL_POSSIBLE_ACTIONS
LEARNING_RATE = 0.001
if __name__ == '__main__':
grid = standard_grid()
print('rewards')
print_values(grid.rewards, grid)
# define a policy
policy = {
(2, 0) : 'U',
(1, 0) : 'U',
(0, 0) : 'R',
(0, 1) : 'R',
(0, 2) : 'R',
(1, 2) : 'U',
(2, 1) : 'L',
(2, 2) : 'U',
(2, 3) : 'L'
}
theta = np.random.randn(4) / 2
def s2x(s):
return np.array([s[0] - 1, s[1] - 1.5, s[0] * s[1] - 3, 1])
deltas = []
t = 1.0
for it in range(20000):
if it % 100 == 0:
t += 0.01
alpha = LEARNING_RATE / t
biggest_change = 0
states_and_returns = play_game(grid, policy)
seen_states = set() # First-visit MC method
for s, G in states_and_returns:
if s not in seen_states:
old_theta = theta.copy()
# predict the return using parameters
x = s2x(s)
V_hat = theta.dot(x)
theta += alpha * (G - V_hat) * x
biggest_change = max(biggest_change, np.abs(theta - old_theta).sum())
seen_states.add(s)
deltas.append(biggest_change)
plt.plot(deltas)
plt.show()
# obtain predicted values for V
V = {}
for s in grid.actions:
V[s] = theta.dot(s2x(s))
print('values')
print_values(V, grid)
print('policy')
print_policy(policy, grid)
|
InnovArul/codesmart
|
Assignments/Jul-Nov-2017/reinforcement_learning_udemy/rl/monte_carlo_approximation.py
|
Python
|
gpl-2.0
| 1,822
|
[
"VisIt"
] |
6f9d9200a4bb73e73d66a8892d2f86708cceacf887542d81267ee45199b2be8d
|
import os.path as osp
work_dir = "/home/salotz/Dropbox/devel/mastic/work/pi_stacking"
ref_benzene_PDB_path = osp.join(work_dir, "ref_benzene.pdb")
ref_benzene_MOL_path = osp.join(work_dir, "benzene.mol")
from rdkit import Chem
ref_benzene_PDB_rdkit = Chem.MolFromPDBFile(ref_benzene_PDB_path, removeHs=False, sanitize=False)
ref_benzene_MOL_rdkit = Chem.MolFromMolFile(ref_benzene_MOL_path, sanitize=True)
from mastic.interfaces.rdkit import AssignBondOrdersFromTemplate
ref_benzene_rdkit = AssignBondOrdersFromTemplate(ref_benzene_MOL_rdkit, ref_benzene_PDB_rdkit)
from mastic.interfaces.rdkit import RDKitMoleculeWrapper
benzene_rdkit_wrapper = RDKitMoleculeWrapper(ref_benzene_rdkit, mol_name="benzene")
ref_benzene_coords = benzene_rdkit_wrapper.get_conformer_coords(0)
Benzene_Molecule = benzene_rdkit_wrapper.make_molecule_type(find_features=True)
import os.path as osp
import mastic.system as masticsys
member_types = [Benzene_Molecule, Benzene_Molecule]
system_attrs = {'molecule_source' : 'rdkit'}
Benzene_Benzene_System = masticsys.SystemType("Benzene_Benzene_System",
member_types=member_types,
**system_attrs)
# when we make associations for assymmetric interactions we need to
# define an association of A -> B and B -> A so we define the receptor
# -> ligand interactions and ligand -> receptor interactions, this
# really only means the donors -> acceptors from the members.
selection_map_AB = [(0, None), (1, None)]
selection_types = [None, None]
assoc1_attrs = {'info' : 'benzene1-benzene2'}
Benzene1_Benzene2_Association = \
masticsys.AssociationType("Benzene1_Benzene2_Association",
system_type=Benzene_Benzene_System,
selection_map=selection_map_AB,
selection_types=selection_types,
**assoc1_attrs)
Benzene_Benzene_System.add_association_type(Benzene1_Benzene2_Association)
selection_map_BA = selection_map_AB[::-1]
assoc2_attrs = {'info' : 'benzene2-benzene1'}
Benzene2_Benzene1_Association = \
masticsys.AssociationType("Benzene2_Benzene1_Association",
system_type=Benzene_Benzene_System,
selection_map=selection_map_BA,
selection_types=selection_types,
**assoc2_attrs)
Benzene_Benzene_System.add_association_type(Benzene2_Benzene1_Association)
import pickle
system_pkl_path = osp.join(".", "Benzene_Benzene_SystemType.pkl")
with open(system_pkl_path, 'wb') as wf:
pickle.dump(Benzene_Benzene_System, wf)
|
salotz/mast
|
work/pi_stacking/make_benzenebenzene_systemtype_pkl.py
|
Python
|
mit
| 2,758
|
[
"RDKit"
] |
53e94702c24e2b869404053e4316f7c8db1b978ffd6af92fd01ccafe633682d7
|
"""User API view functions"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
from flask import (
Blueprint,
abort,
current_app,
jsonify,
make_response,
redirect,
request,
session,
url_for,
)
from flask_babel import force_locale
from flask_user import roles_required
from sqlalchemy import and_, func
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.exceptions import Unauthorized
from ..audit import auditable_event
from ..cache import cache
from ..database import db
from ..date_tools import FHIR_datetime
from ..extensions import oauth, user_manager
from ..models.app_text import MailResource, UserInviteEmail_ATMA, app_text
from ..models.audit import Audit
from ..models.auth import Token
from ..models.client import Client, client_event_dispatch
from ..models.communication import load_template_args
from ..models.group import Group
from ..models.intervention import Intervention
from ..models.message import EmailMessage
from ..models.organization import Organization
from ..models.questionnaire_bank import trigger_date
from ..models.qb_timeline import QB_StatusCacheKey, invalidate_users_QBT
from ..models.questionnaire_response import QuestionnaireResponse
from ..models.relationship import Relationship
from ..models.role import ROLE, Role
from ..models.table_preference import TablePreference
from ..models.url_token import url_token
from ..models.user import (
INVITE_PREFIX,
User,
UserRelationship,
current_user,
get_user,
permanently_delete_user,
validate_email,
)
from ..models.user_consent import UserConsent
from ..models.user_document import UserDocument
from ..type_tools import check_int
from .auth import logout
from .crossdomain import crossdomain
user_api = Blueprint('user_api', __name__, url_prefix='/api')
@user_api.route('/me')
@crossdomain()
@oauth.require_oauth()
def me():
"""Access basics for current user
returns authenticated user's id, username and email in JSON
---
tags:
- User
operationId: me
produces:
- application/json
responses:
200:
description: successful operation
schema:
id: user
required:
- id
- username
- email
properties:
id:
type: integer
format: int64
description: TrueNTH ID for user
username:
type: string
description: User's username - which will always match the email
email:
type: string
description: User's preferred email address, same as username
401:
description: if missing valid OAuth token
security:
- ServiceToken: []
"""
user = current_user()
if user.current_encounter().auth_method == 'url_authenticated':
return jsonify(id=user.id)
return jsonify(
id=user.id, username=user.username, email=user.email)
@user_api.route('/account', methods=('POST',))
@crossdomain()
@oauth.require_oauth() # for service token access, oauth must come first
@roles_required(
[ROLE.APPLICATION_DEVELOPER.value, ROLE.ADMIN.value, ROLE.SERVICE.value,
ROLE.STAFF.value, ROLE.STAFF_ADMIN.value])
def account():
"""Create a user account
Due to complicated rules with respect to staff users being able to edit
the account generated by this endpoint, all data necessary to secure edit
rights on the new account must be included in the initial call. This will
typically include `organizations`, `consents` and `roles`.
On success, a simple JSON object is returned defining the new user's id.
If the user creating the account doesn't provide adequate details to secure
edit rights, a 400 will be generated.
Beyond account creation, additional endpoints may be used to adjust the
account details including:
1. PUT /api/demographics/{id}, with known details for the new user
2. PUT /api/user/{id}/roles to grant additional user role(s)
3. PUT /api/intervention/{name} grants the user access to the intervention.
---
tags:
- User
operationId: createAccount
parameters:
- in: body
name: body
schema:
id: account_args
properties:
organizations:
type: array
items:
type: object
required:
- organization_id
properties:
organization_id:
type: string
description:
Optional organization identifier defining the
organization the new user will belong to.
consents:
type: array
items:
type: object
required:
- organization_id
- agreement_url
properties:
organization_id:
type: integer
format: int64
description:
Organization identifier defining with whom the consent
agreement applies
acceptance_date:
type: string
format: date-time
description:
optional UTC date-time for when the agreement expires,
defaults to utcnow
expires:
type: string
format: date-time
description:
optional UTC date-time for when the agreement expires,
defaults to utcnow plus 5 years
agreement_url:
type: string
description: URL pointing to agreement text
staff_editable:
type: boolean
description:
set True if consenting to enable account editing by staff
include_in_reports:
type: boolean
description:
set True if consenting to share data in reports
send_reminders:
type: boolean
description:
set True if consenting to receive reminders when
assessments are due
roles:
type: array
items:
type: object
required:
- name
properties:
name:
type: string
description:
Role name, always a lower case string
with no white space.
description:
type: string
description: Plain text describing the role.
produces:
- application/json
responses:
200:
description:
"Returns {user_id: id}"
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
acting_user = current_user()
if acting_user.has_role(ROLE.ADMIN.value, ROLE.SERVICE.value):
adequate_perms = True
else:
adequate_perms = False
error = ('without organizations, consents and roles, subsequent '
'calls on this user object will fail with a 401')
if not request.json:
abort(400, error)
if not all(required in request.json for required in (
'organizations', 'consents', 'roles')):
abort(400, error)
user = User()
db.session.add(user)
if request.json and 'organizations' in request.json:
try:
org_list = [Organization.query.filter_by(
id=org['organization_id']).one()
for org in request.json['organizations']]
user.update_orgs(org_list, acting_user=acting_user,
excuse_top_check=True)
if org_list:
user.timezone = org_list[0].timezone
except NoResultFound:
abort(
400,
"Organization in {} not found, check "
"/api/organization for existence.".format(
request.json['organizations']))
if request.json and 'consents' in request.json:
try:
consent_list = []
for consent in request.json['consents']:
if 'user_id' not in consent:
consent['user_id'] = user.id
elif consent['user_id'] != user.id:
raise ValueError("consent user_id differs from path")
if 'research_study_id' not in consent:
consent['research_study_id'] = 0
consent_list.append(UserConsent.from_json(consent))
user.update_consents(consent_list, acting_user=acting_user)
except ValueError as e:
abort(400, "ill formed consents:".format(e))
if request.json and 'roles' in request.json:
try:
role_list = [Role.query.filter_by(name=role.get('name')).one()
for role in request.json.get('roles')]
user.update_roles(role_list, acting_user=current_user())
except NoResultFound:
abort(400, "one or more roles ill defined "
"{}".format(request.json.get('roles')))
db.session.commit()
auditable_event(
"new account generated for {} <{}>".format(user, user._email),
user_id=current_user().id, subject_id=user.id,
context='account')
if not adequate_perms:
# Make sure acting user has permission to edit the newly
# created user, or generate a 400 and purge the user.
try:
acting_user.check_role('edit', other_id=user.id)
except Unauthorized:
permanently_delete_user(
username=user.username, user_id=user.id,
acting_user=acting_user)
abort(400, "Inaccessible user created - review consent and roles")
# Force a renewal of the visit / qb_status cache so the new user has
# accurate info. Pad by a second to get around microsecond floor problems
now = datetime.utcnow() + relativedelta(seconds=1)
QB_StatusCacheKey().update(now)
return jsonify(user_id=user.id)
@user_api.route('/user/<int:user_id>', methods=('DELETE',))
@crossdomain()
@roles_required([ROLE.ADMIN.value, ROLE.STAFF_ADMIN.value])
@oauth.require_oauth()
def delete_user(user_id):
"""Delete the named user from the system
Mark the given user as deleted. The user isn't actually deleted,
but marked as such to maintain the audit trail. After deletion,
all other operations on said user are prohibited.
---
tags:
- User
operationId: delete_user
parameters:
- name: user_id
in: path
description: TrueNTH user ID to delete
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description: successful operation
schema:
id: response_deleted
required:
- message
properties:
message:
type: string
description: Result, typically "deleted"
400:
description:
Invalid requests, such as deleting a user owning client applications.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if the user isn't found
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, 'edit')
try:
user.delete_user(acting_user=current_user())
except ValueError as v:
return jsonify(message=str(v))
return jsonify(message="deleted")
@user_api.route('/user/<int:user_id>/reactivate', methods=('POST',))
@crossdomain()
@roles_required([ROLE.ADMIN.value, ROLE.STAFF_ADMIN.value])
@oauth.require_oauth()
def reactivate_user(user_id):
"""Reactivate a previously deleted user
Reactivate a previously deleted user - brings the account back to
valid status.
---
tags:
- User
operationId: reactivate_user
parameters:
- name: user_id
in: path
description: TrueNTH user ID to reactivate
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description: successful operation
schema:
id: response_reactivated
required:
- message
properties:
message:
type: string
description: Result, typically "reactivated"
400:
description:
Invalid requests, such as reactivating a user that wasn't in a
deleted state.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if the user isn't found
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, permission='edit', include_deleted=True)
try:
user.reactivate_user(acting_user=current_user())
except ValueError as v:
response = jsonify(message="{}".format(v))
response.status_code = 400
return response
return jsonify(message="reactivated")
@user_api.route('/user/<int:user_id>/access_url')
@crossdomain()
@oauth.require_oauth() # for service token access, oauth must come first
@roles_required(
[ROLE.APPLICATION_DEVELOPER.value, ROLE.ADMIN.value, ROLE.SERVICE.value,
ROLE.STAFF.value, ROLE.STAFF_ADMIN.value])
def access_url(user_id):
"""Returns simple JSON with one-time, unique access URL for given user
Generates a single use access token for the given user as a
one click, weak authentication access to the system.
NB - user must be a member of the WRITE_ONLY role or ACCESS_ON_VERIFY,
and not a member of privileged roles, as a safeguard from abuse.
---
tags:
- User
operationId: access_url
parameters:
- name: user_id
in: path
description: TrueNTH user ID to grant access via unique URL
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description: successful operation
schema:
id: response_unique_URL
required:
- access_url
properties:
access_url:
type: string
description: The unique URL providing one time access
400:
description:
if the user has too many privileges for weak authentication
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
404:
description: if the user isn't found
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, permission='edit')
not_allowed = {
ROLE.ADMIN.value,
ROLE.APPLICATION_DEVELOPER.value,
ROLE.SERVICE.value}
has = {role.name for role in user.roles}
if not has.isdisjoint(not_allowed):
abort(400, "Access URL not provided for privileged accounts")
if {ROLE.ACCESS_ON_VERIFY.value, ROLE.WRITE_ONLY.value}.isdisjoint(has):
# KEEP this restriction. Weak authentication (which the
# returned URL provides) should only be available for these roles
abort(
400,
"Access URL restricted to ACCESS_ON_VERIFY or WRITE_ONLY roles")
# generate URL token
token = url_token(user_id)
url = url_for(
'portal.access_via_token', token=token, _external=True)
auditable_event("generated URL token {}".format(token),
user_id=current_user().id, subject_id=user.id,
context='authentication')
return jsonify(access_url=url)
@user_api.route('/user/<int:user_id>/consent')
@crossdomain()
@oauth.require_oauth()
def user_consents(user_id):
"""Returns simple JSON listing user's valid consent agreements
Returns the list of consent agreements between the requested user
and the respective organizations. Consents are ordered by
``acceptance_date``, most recent first.
NB does include deleted and expired consents. Deleted consents will
include audit details regarding the deletion. The expires timestamp in UTC
is also returned for all consents.
Consents include a number of options, each of which will only be in the
returned JSON if defined.
---
tags:
- User
- Consent
- Organization
operationId: user_consents
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the list of consent agreements for the requested user.
schema:
id: consents
properties:
consent_agreements:
type: array
items:
type: object
required:
- user_id
- organization_id
- acceptance_date
- recorded
- expires
- agreement_url
- research_study_id
properties:
user_id:
type: string
description:
User identifier defining with whom the consent agreement
applies
organization_id:
type: string
description:
Organization identifier defining with whom the consent
agreement applies
acceptance_date:
type: string
format: date-time
description:
Original UTC date-time from the moment the agreement was
signed or put in place by some other workflow
recorded:
$ref: "#/definitions/audits"
expires:
type: string
format: date-time
description:
UTC date-time for when the agreement expires, typically 5
years from the original signing date
agreement_url:
type: string
description: URL pointing to agreement text
staff_editable:
type: boolean
description:
True if consenting to enable account editing by staff
include_in_reports:
type: boolean
description:
True if consenting to share data in reports
send_reminders:
type: boolean
description:
True if consenting to receive reminders when
assessments are due
research_study_id:
type: string
description:
Research Study identifier to which the consent
agreement applies
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(user_id, 'view')
return jsonify(consent_agreements=[c.as_json() for c in
user.all_consents])
@user_api.route('/user/<int:user_id>/consent', methods=('POST',))
@crossdomain()
@oauth.require_oauth()
def set_user_consents(user_id):
"""Add a consent agreement for the user with named organization
Used to add a consent agreements between a user and an organization.
Assumed to have just been agreed to. Include 'expires' if
necessary, defaults to now and five years from now (both in UTC).
NB only one valid consent should be in place between a user and an
organization per research study. Therefore, if this POST would create
a second consent on the given (user, organization, research study), the
existing consent will be marked deleted.
Research Studies were added since the initial implementation of this API.
Therefore, exclusion of a ``research_study_id`` will implicitly use a value
of 0 (zero) as the research_study_id.
---
tags:
- User
- Consent
- Organization
operationId: post_user_consent
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
id: post_consent_agreement
required:
- organization_id
- agreement_url
properties:
organization_id:
type: integer
format: int64
description:
Organization identifier defining with whom the consent
agreement applies
acceptance_date:
type: string
format: date-time
description:
optional UTC date-time for when the agreement is initially
valid, defaults to utcnow. Dates in the future are not valid
expires:
type: string
format: date-time
description:
optional UTC date-time for when the agreement expires,
defaults to utcnow plus 5 years
agreement_url:
type: string
description: URL pointing to agreement text
staff_editable:
type: boolean
description:
set True if consenting to enable account editing by staff
include_in_reports:
type: boolean
description:
set True if consenting to share data in reports
send_reminders:
type: boolean
description:
set True if consenting to receive reminders when
assessments are due
research_study_id:
type: integer
format: int64
description:
Research Study identifier defining which research study the
consent agreement applies to. Include to override the default
value of 0 (zero).
responses:
200:
description: successful operation
schema:
id: response_ok
required:
- message
properties:
message:
type: string
description: Result, typically "ok"
400:
description: if the request includes invalid data
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
security:
- ServiceToken: []
"""
current_app.logger.debug('post user consent called w/: {}'.format(
request.json))
user = get_user(user_id, 'edit')
if not request.json:
abort(400, "Requires JSON with submission including "
"HEADER 'Content-Type: application/json'")
if ('acceptance_date' in request.json
and FHIR_datetime.parse(request.json['acceptance_date'])
> datetime.utcnow()):
abort(400, "Future `acceptance_date` not permitted")
request.json['user_id'] = user_id
try:
consent = UserConsent.from_json(request.json)
if 'research_study_id' not in request.json:
consent.research_study_id = 0
consent_list = [consent, ]
user.update_consents(
consent_list=consent_list, acting_user=current_user())
# Moving consent dates potentially invalidates
# (questionnaire_response: visit_name) associations.
cache.delete_memoized(trigger_date)
QuestionnaireResponse.purge_qb_relationship(
subject_id=user.id,
research_study_id=consent.research_study_id,
acting_user_id=current_user().id)
# The updated consent may have altered the cached assessment
# status - invalidate this user's data at this time.
invalidate_users_QBT(
user_id=user.id, research_study_id=consent.research_study_id)
except ValueError as e:
abort(400, str(e))
return jsonify(message="ok")
@user_api.route('/user/<int:user_id>/consent/withdraw',
methods=('POST', 'PUT'))
@crossdomain()
@oauth.require_oauth()
def withdraw_user_consent(user_id):
"""Withdraw existing consent agreement for the user with named organization
Used to withdraw a consent agreements between a user and an organization.
If a consent exists for the given user/org, the consent will be marked
deleted, and a matching consent (with new status/option values) will be
created in its place.
NB Invalid to request a withdrawal date prior to current consent.
---
tags:
- User
- Consent
- Organization
operationId: withdraw_user_consent
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
id: withdraw_consent_agreement
required:
- organization_id
properties:
acceptance_date:
type: string
format: date-time
description:
optional UTC date-time for when the withdrawal occurred if
other than the defaults of utcnow.
Dates in the future are not valid
organization_id:
type: integer
format: int64
description:
Organization identifier defining with whom the consent
agreement applies
research_study_id:
type: integer
format: int64
description:
Research Study identifier defining which research study the
consent agreement applies to. Include to override the default
value of 0 (zero).
responses:
200:
description: successful operation
schema:
id: response_ok
required:
- message
properties:
message:
type: string
description: Result, typically "ok"
400:
description: if the request includes invalid data
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description:
if user_id doesn't exist, or it no consent found
for given user org combination
security:
- ServiceToken: []
"""
current_app.logger.debug('withdraw user consent called w/: '
'{}'.format(request.json))
user = get_user(user_id, permission='edit')
if not request.json:
abort(400, "Requires JSON with submission including "
"HEADER 'Content-Type: application/json'")
org_id = request.json.get('organization_id')
if not org_id:
abort(400, "missing required organization ID")
research_study_id = request.json.get('research_study_id', 0)
acceptance_date = None
if 'acceptance_date' in request.json:
acceptance_date = FHIR_datetime.parse(request.json['acceptance_date'])
if acceptance_date > datetime.utcnow():
abort(400, "Future `acceptance_date` not permitted")
current_app.logger.debug('withdraw user consent called for user {} '
'and org {}'.format(user.id, org_id))
return withdraw_consent(
user=user, org_id=org_id, acceptance_date=acceptance_date,
acting_user=current_user(), research_study_id=research_study_id)
def withdraw_consent(
user, org_id, acceptance_date, acting_user, research_study_id):
"""execute consent withdrawal - view and test friendly function"""
uc = UserConsent.query.filter_by(
user_id=user.id, organization_id=org_id, status='consented',
research_study_id=research_study_id).first()
if not uc:
abort(
404,
"no UserConsent found for user ID {}, org ID {}, research study "
"ID {}".format(user.id, org_id, research_study_id))
try:
if not acceptance_date:
acceptance_date = datetime.utcnow()
if acceptance_date <= uc.acceptance_date:
raise ValueError(
"Can't suspend with acceptance date prior to existing consent")
suspended = UserConsent(
user_id=user.id, organization_id=org_id, status='suspended',
acceptance_date=acceptance_date, agreement_url=uc.agreement_url,
research_study_id=research_study_id)
suspended.send_reminders = False
suspended.include_in_reports = True
suspended.staff_editable = (not current_app.config.get('GIL'))
user.update_consents(
consent_list=[suspended], acting_user=acting_user)
# NB - we do NOT call QuestionnaireResponse.purge_qb_relationship()
# in this case, as the user is withdrawing, not altering initial
# consent dates. Doing so does alter the QB_timeline from point of
# withdrawal forward, so force QB_timeline renewal
invalidate_users_QBT(
user_id=user.id, research_study_id=research_study_id)
except ValueError as e:
abort(400, str(e))
return jsonify(suspended.as_json())
@user_api.route('/user/<int:user_id>/consent', methods=('DELETE',))
@crossdomain()
@oauth.require_oauth()
def delete_user_consents(user_id):
"""Delete a consent agreement between the user and the named organization
Used to delete consent agreements between a user and an organization.
---
tags:
- User
- Consent
- Organization
operationId: delete_user_consent
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
id: consent_agreement
required:
- organization_id
properties:
organization_id:
type: integer
format: int64
description:
Organization identifier defining with whom the consent
agreement applies
research_study_id:
type: integer
format: int64
description:
Research Study identifier defining which research study the
consent agreement applies to. Include to override the default
value of 0 (zero).
responses:
200:
description: successful operation
schema:
id: response_ok
required:
- message
properties:
message:
type: string
description: Result, typically "ok"
400:
description: if the request includes invalid data
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
security:
- ServiceToken: []
"""
from portal.trigger_states.empro_states import EMPRO_STUDY_ID
current_app.logger.debug('delete user consent called w/: {}'.format(
request.json))
user = get_user(user_id, 'edit')
remove_uc = None
research_study_id = request.json.get('research_study_id', 0)
try:
id_to_delete = int(request.json['organization_id'])
except ValueError:
abort(400, "requires integer value for `organization_id`")
for uc in user.valid_consents:
if (
uc.organization.id == id_to_delete and
uc.research_study_id == research_study_id):
remove_uc = uc
break
if not remove_uc:
abort(404, "matching user consent not found")
audit_comment = 'Deleted consent agreement'
if research_study_id == EMPRO_STUDY_ID:
audit_comment = 'Deleted EMPRO consent agreement'
remove_uc.deleted = Audit(
user_id=current_user().id, subject_id=user_id,
comment=audit_comment, context='consent')
remove_uc.status = 'deleted'
# The deleted consent may have altered the cached assessment
# status, even the qb assignments - force re-eval by invalidating now
cache.delete_memoized(trigger_date)
QuestionnaireResponse.purge_qb_relationship(
subject_id=user_id,
research_study_id=research_study_id,
acting_user_id=current_user().id)
invalidate_users_QBT(user_id=user_id, research_study_id=research_study_id)
db.session.commit()
return jsonify(message="ok")
@user_api.route('/user/<int:user_id>/encounter', methods=('GET',))
@crossdomain()
@oauth.require_oauth()
def current_encounter(user_id):
"""Return current/latest encounter for logged in user
NB: only expected use at this time is current user. raises
RuntimeError if called on another, to avoid creating false,
failsafe encounters.
---
tags:
- User
- Encounter
operationId: current_encounter
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the current encounter for the requested user. NB only
the ``current_user`` is supported at this time.
schema:
id: encounter
required:
- id
- status
- patient
- auth_method
properties:
id:
type: integer
format: int64
description:
Current encounter identifier
status:
description:
Plain text describing the encounter status,
expect ``in-progress`` for "current" encounter.
type: string
enum:
- planned
- arrived
- in-progress
- onleave
- finished
- cancelled
patient:
description: Reference to patient owning the encounter
$ref: "#/definitions/Reference"
auth_method:
description: Form of encounter authentication
type: string
enum:
- password_authenticated
- url_authenticated
- staff_authenticated
- staff_handed_to_patient
- service_token_authenticated
- url_authenticated_and_verified
- failsafe
400:
description:
Only supported for current user - any other will result in 400
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
"""
user = current_user()
if user_id != user.id:
abort(400, "Only current_user's encounter accessible")
return jsonify(user.current_encounter().as_fhir())
@user_api.route('/user/<int:user_id>/groups')
@crossdomain()
@oauth.require_oauth()
def user_groups(user_id):
"""Returns simple JSON defining user's groups
Returns the list of groups the requested user belongs to.
---
tags:
- User
- Group
operationId: user_groups
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the list of groups the requested user belongs to.
schema:
id: groups
required:
- name
- description
properties:
name:
type: string
description:
Group name, always a lower case string with no white space.
description:
type: string
description: Plain text describing the group.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(user_id, 'view')
return jsonify(groups=[g.as_json() for g in user.groups])
@user_api.route('/user/<int:user_id>/groups', methods=('PUT',))
@crossdomain()
@oauth.require_oauth()
def set_user_groups(user_id):
"""Set groups for user, returns simple JSON defining user groups
Used to set group assignments for a user. Include all groups
the user should be a member of. If user previously belonged to
groups not included, the missing groups will be deleted from the user.
Only the 'name' field of the groups is referenced. Must match
current groups in the system.
Returns a list of all groups user belongs to after change.
---
tags:
- User
- Group
operationId: set_user_groups
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
id: nested_groups
properties:
groups:
type: array
items:
type: object
required:
- name
properties:
name:
type: string
description:
The string defining the name of each group
the user should belong to. Must exist as an
available group in the system.
responses:
200:
description:
Returns a list of all groups user belongs to after change.
schema:
id: user_groups
required:
- name
- description
properties:
name:
type: string
description:
Group name, always a lower case string with no white space.
description:
type: string
description: Plain text describing the group.
400:
description: if the request includes an unknown group.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
security:
- ServiceToken: []
"""
user = get_user(user_id, 'edit')
if not request.json or 'groups' not in request.json:
abort(400, "Requires 'groups' list")
remove_if_not_requested = {group.id: group for group in user.groups}
requested_groups = [r['name'] for r in request.json['groups']]
matching_groups = Group.query.filter(Group.name.in_(
requested_groups)).all()
if len(matching_groups) != len(requested_groups):
abort(400, "One or more groups requested not available")
# Add any requested not already set on user
for requested_group in matching_groups:
if requested_group not in user.groups:
user.groups.append(requested_group)
auditable_event("added {} to user {}".format(
requested_group, user.id), user_id=current_user().id,
subject_id=user.id, context='group')
else:
remove_if_not_requested.pop(requested_group.id)
for stale_group in remove_if_not_requested.values():
user.groups.remove(stale_group)
auditable_event("deleted {} from user {}".format(
stale_group, user.id), user_id=current_user().id,
subject_id=user.id, context='group')
if user not in db.session:
db.session.add(user)
db.session.commit()
# Return user's updated group list
return jsonify(groups=[g.as_json() for g in user.groups])
@user_api.route('/relationships')
@crossdomain()
@oauth.require_oauth()
def system_relationships():
"""Returns simple JSON defining all system relationships
Returns a list of all known relationships.
---
tags:
- User
- Relationship
operationId: system_relationships
produces:
- application/json
responses:
200:
description: Returns a list of all known relationships.
schema:
id: relationships
required:
- name
- description
properties:
name:
type: string
description:
relationship name, a lower case string with no white space.
description:
type: string
description: Plain text describing the relationship.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view system relationships
security:
- ServiceToken: []
"""
results = [{'name': r.name, 'description': r.description}
for r in Relationship.query.all()]
return jsonify(relationships=results)
@user_api.route('/user/<int:user_id>/relationships')
@crossdomain()
@oauth.require_oauth()
def relationships(user_id):
"""Returns simple JSON defining user relationships
Relationships may exist between user accounts. A user may have
any number of relationships. The relationship
is a one-way definition defined to extend permissions to appropriate
users, such as intimate partners or service account sponsors.
The JSON returned includes all relationships for the given user both
as subject and as part of the relationship predicate.
---
tags:
- User
- Relationship
operationId: getrelationships
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the list of relationships the requested user belongs to.
schema:
id: user_relationships
required:
- user
- has the relationship
- with
properties:
user:
type: integer
format: int64
description: id of user acting as subject
has the relationship:
type: string
description:
The string defining the name of each relationship the user
should belong to. Must exist as an available relationship
in the system.
with:
type: integer
format: int64
description: id of user acting as part of predicate
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(user_id, 'view')
results = []
for r in user.relationships:
results.append({'user': r.user_id,
'has the relationship': r.relationship.name,
'with': r.other_user_id})
# add in any relationships where the user is on the predicate side
predicates = UserRelationship.query.filter_by(other_user_id=user.id)
for r in predicates:
results.append({'user': r.user_id,
'has the relationship': r.relationship.name,
'with': r.other_user_id})
return jsonify(relationships=results)
@user_api.route('/user/register-now')
@crossdomain()
@oauth.require_oauth()
def register_now():
"""Target for triggering registration of account
Some flows generate accounts that are not yet ``registered``,
such as when given the ``access_on_verify`` role.
When it's desirable to promote the user to a registered account
(eg when they've completed a flow like MUSIC P3P, where stakeholders
wanted to avoid the potential disruption of registration), redirect
to this endpoint to trigger promotion to a registered account.
Session variables capture the state, and redirect the user
through the common registration mechanism.
---
tags:
- User
operationId: registernow
produces:
- application/json
responses:
302:
description:
Redirects user-agent to user.registration after validation
and state storage.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
400:
description:
if user is already registered or not eligible for some reason
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = current_user()
if user.is_registered():
abort(400, "User already registered")
ready, reason = user.email_ready()
if not ready:
abort(400, reason)
# Need to logout current user, or the opportunity to register
# isn't available. This also clears the session, so do this
# step first
logout(
prevent_redirect=True,
reason='give un-registered chance to register new account')
user.mask_email()
db.session.commit()
session['invited_verified_user_id'] = user.id
return redirect(url_for('user.register', email=user.email))
@user_api.route('/user/<int:user_id>/relationships', methods=('PUT',))
@crossdomain()
@oauth.require_oauth()
def set_relationships(user_id):
"""Set relationships for user, returns JSON defining user relationships
Used to set relationship assignments for a user, both in a subject
and predicate role. The provided list of relationships will be definitive,
resulting in deletion of previously existing relationships omitted from
the given list (again where user_id is acting as the relationship
subject or part of predicate).
Returns a list of all relationships user belongs to after change.
---
tags:
- User
- Relationship
operationId: setrelationships
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
id: user_relationships
required:
- user
- has the relationship
- with
properties:
user:
type: integer
format: int64
description: id of user acting as subject
has the relationship:
type: string
description:
The string defining the name of each relationship the user
should belong to. Must exist as an available relationship
in the system.
with:
type: integer
format: int64
description: id of user acting as part of predicate
responses:
200:
description:
Returns a list of all relationships user belongs to after change.
schema:
id: user_relationships
required:
- user
- has the relationship
- with
properties:
user:
type: integer
format: int64
description: id of user acting as subject
has the relationship:
type: string
description:
The string defining the name of each relationship the user
should belong to. Must exist as an available relationship
in the system.
with:
type: integer
format: int64
description: id of user acting as part of predicate
400:
description: if the request includes an unknown relationship.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(user_id, 'edit')
if not request.json or 'relationships' not in request.json:
abort(400, "Requires relationship list in JSON")
# First confirm all the data is valid and the user has permission
current_relationships = [r.name for r in Relationship.query]
for r in request.json['relationships']:
if r['has the relationship'] not in current_relationships:
abort(404, "Unknown relationship '{}' can't be added".format(
r['has the relationship']))
if r['user'] == r['with']:
abort(400, "Relationship must be between two different users")
if user_id not in (r['user'], r['with']):
abort(401, "Path user must be part of relationship")
subjects = [ur for ur in user.relationships]
predicates = [ur for ur in
UserRelationship.query.filter_by(other_user_id=user.id)]
remove_if_not_requested = {ur.id: ur for ur in subjects + predicates}
# Add any requested that don't exist, track what isn't mentioned for
# deletion.
audit_adds = [] # preserve till post commit
audit_dels = [] # preserve till post commit
for r in request.json['relationships']:
rel_id = Relationship.query.with_entities(
Relationship.id).filter_by(name=r['has the relationship']).first()
kwargs = {'user_id': r['user'],
'relationship_id': rel_id[0],
'other_user_id': r['with']}
existing = UserRelationship.query.filter_by(**kwargs).first()
if not existing:
user_relationship = UserRelationship(**kwargs)
db.session.add(user_relationship)
audit_adds.append(user_relationship)
else:
remove_if_not_requested.pop(existing.id)
for ur in remove_if_not_requested.values():
audit_dels.append(''.format(ur))
db.session.delete(ur)
db.session.commit()
for ad in audit_adds:
auditable_event("added {}".format(ad),
user_id=current_user().id, subject_id=user.id,
context='relationship')
for ad in audit_dels:
auditable_event("deleted {}".format(ad),
user_id=current_user().id, subject_id=user.id,
context='relationship')
# Return user's updated relationship list
return relationships(user.id)
@user_api.route('/user/<int:user_id>/email_ready')
@crossdomain()
@oauth.require_oauth()
def email_ready(user_id):
"""See if given user is 'email ready'
A user is considered email ready, if the account has adequate data
to 1.) send email (a valid email address) and 2.) attributes
required to finish a reset password process if initiated.
Returns JSON detailing if ready, and reason not if applicable.
---
tags:
- User
operationId: email_ready
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- name: ignore_preference
in: query
description: Set for checks that should ignore a users preference
to not receive email, such as a password reset action
required: false
type: string
responses:
200:
description:
Returns JSON describing {ready=True} or
{ready=False; reason=description}
schema:
id: ready_result
required:
- ready
properties:
ready:
type: boolean
description: result of email ready check
reason:
type: string
description:
detailed description defined only if the user is NOT ready
to receive email
401:
description: if missing valid OAuth token
security:
- ServiceToken: []
"""
user = get_user(user_id, 'view')
ignore_preference = request.args.get('ignore_preference', False)
ready, reason = user.email_ready(ignore_preference)
if ready:
return jsonify(ready=ready)
else:
return jsonify(ready=ready, reason=reason)
@user_api.route('/unique_email')
@crossdomain()
def unique_email():
"""Confirm a given email is unique
For upfront validation of email addresses, determine if the given
email is unique - i.e. unknown to the system. If it is known, but
belongs to the authenticated user (or user_id if provided), it will
still be considered unique.
Returns json unique=True or unique=False
---
tags:
- User
operationId: unique_email
produces:
- application/json
parameters:
- name: email
in: query
description:
email to validate
required: true
type: string
- name: user_id
in: query
description:
optional user_id, defaults to current user, necessary for admins
editing other users.
required: false
type: string
responses:
200:
description:
Returns JSON describing unique=True or unique=False
schema:
id: unique_result
required:
- unique
properties:
unique:
type: boolean
description: result of unique check
400:
description: if email param is poorly defined
401:
description: if missing valid OAuth token
security:
- ServiceToken: []
"""
email = request.args.get('email')
validate_email(email)
# find matching account by email regardless of case
match = User.query.filter(func.lower(User.email) == email.lower())
if match.count() > 1:
current_app.logger.error(
'there are >1 emails that match {}'.format(email)
)
return jsonify(unique=False)
if match.count() == 1:
# If the user is the authenticated user or provided user_id,
# it still counts as unique
user_id = request.args.get('user_id')
if not user_id:
# Note the extra oauth verify step, so this method can also
# be used by unauth'd users (say during registration).
valid, req = oauth.verify_request(['email'])
if valid:
user_id = req.user.id
else:
user = current_user()
user_id = user.id if user else None
else:
user_id = check_int(user_id)
result = match.one()
if user_id != result.id:
return jsonify(unique=False)
# Look out for "masked" emails, as they'll create collisions down the road
masked = INVITE_PREFIX + email
match = User.query.filter(func.lower(User.email) == masked.lower())
if match.count():
return jsonify(unique=False)
return jsonify(unique=True)
@user_api.route('/user/<int:user_id>/user_documents')
@crossdomain()
@oauth.require_oauth()
def user_documents(user_id):
"""Returns simple JSON defining user documents
Returns the list of the user's user documents.
---
tags:
- User
- User Document
operationId: get_user_documents
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- name: document_type
in: query
description:
optional document_type to filter results
required: false
type: string
produces:
- application/json
responses:
200:
description:
Returns the list of user documents for the requested user.
schema:
id: user_documents
properties:
user_documents:
type: array
items:
type: object
required:
- id
- user_id
- document_type
- uploaded_at
- filename
- filetype
properties:
id:
type: integer
format: int64
description: identifier for the user document
user_id:
type: integer
format: int64
description:
User identifier defining to whom the document belongs
document_type:
type: string
description:
Type of document uploaded (e.g. patient report pdf,
user avatar image, etc)
uploaded_at:
type: string
format: date-time
description:
Original UTC date-time from the moment the document was
uploaded to the portal
filename:
type: string
description: Filename of the uploaded document file
filetype:
type: string
description: Filetype of the uploaded document file
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(user_id, 'view')
doctype = request.args.get('document_type')
if doctype:
results = user.documents.filter_by(document_type=doctype)
else:
results = user.documents
return jsonify(user_documents=[d.as_json() for d in
results])
@user_api.route('/user/<int:user_id>/user_documents/<int:doc_id>')
@crossdomain()
@oauth.require_oauth()
def download_user_document(user_id, doc_id):
"""Download a user document belonging to a user
Used to download the file contents of a user document.
---
tags:
- User
- User Document
operationId: download_user_document
produces:
- application/pdf
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- name: doc_id
in: path
description: User Document ID
required: true
type: integer
format: int64
responses:
200:
description:
Returns the file contents of the requested user document
400:
description: if the request includes invalid data or references
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id or doc_id doesn't exist
security:
- ServiceToken: []
"""
user = get_user(user_id, 'edit')
download_ud = None
for ud in user.documents:
if ud.id == doc_id:
download_ud = ud
break
if not download_ud:
abort(404, "matching user document not found")
file_contents = None
try:
file_contents = ud.get_file_contents()
except ValueError as e:
abort(400, str(e))
response = make_response(file_contents)
response.headers["Content-Type"] = 'application/{}'.format(ud.filetype)
response.headers["Content-Disposition"] = 'attachment; filename={}'.format(
ud.filename)
return response
@user_api.route('/user/<int:user_id>/patient_report', methods=('POST',))
@crossdomain()
@oauth.require_oauth()
def upload_user_document(user_id):
"""Add a Patient Report for the user
(e.g. from WiserCare, P3P, Symptom Tracker, etc)
File must be included in the POST call, and must be a valid PDF file.
File will be stored on server using uuid as filename; file metadata
(including reference uuid) will be stored in the db.
---
tags:
- User
- User Document
- Patient Report
operationId: post_patient_report
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
properties:
file:
type: file
description: File to upload
responses:
200:
description: successful operation
schema:
id: response_ok
required:
- message
properties:
message:
type: string
description: Result, typically "ok"
400:
description: if the request includes invalid data
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
security:
- ServiceToken: []
"""
user = get_user(user_id, 'edit')
def posted_filename(req):
"""Return file regardless of POST convention
Depending on POST convention, filename is either the key or
the second part of the file tuple, not always available as 'file'.
:return: the posted file
"""
if not req.files or len(req.files) != 1:
abort(
400, "no file found - please POST a single file using "
"standard multipart/form-data parameters")
key = next(iter(req.files.keys())) # either 'file' or actual filename
return req.files[key]
filedata = posted_filename(request)
contributor = None
if 'Authorization' in request.headers:
token = request.headers['Authorization'].split()[1]
intervention = Intervention.query.join(Client).join(Token).filter(
and_(Token.access_token == token,
Token.client_id == Client.client_id,
Client.client_id == Intervention.client_id)).first()
else:
intervention = Intervention.query.join(Client).join(Token).filter(
and_(Token.user_id == current_user().id,
Token.client_id == Client.client_id,
Client.client_id == Intervention.client_id)).first()
if intervention:
contributor = intervention.description
data = {'user_id': user_id, 'document_type': "PatientReport",
'allowed_extensions': ['pdf'], 'contributor': contributor}
try:
doc = UserDocument.from_post(filedata, data)
except ValueError as e:
abort(400, str(e))
except OSError as e:
current_app.logger.error('patient_report post error - {}'.format(e))
abort(500, str(e))
db.session.add(doc)
db.session.commit()
auditable_event("patient report {} posted for user {}".format(
doc.uuid, user_id), user_id=current_user().id, subject_id=user.id,
context='assessment')
# This is a notifiable event; trigger any applicable notifications
data.update({"document_id": doc.id})
del data['allowed_extensions']
client_event_dispatch(event="user_document_upload", user=user, **data)
return jsonify(message="ok")
@user_api.route('/user/<int:user_id>/password_reset', methods=('POST',))
@crossdomain()
@oauth.require_oauth() # for service token access, oauth must come first
@roles_required(
[ROLE.ADMIN.value, ROLE.STAFF_ADMIN.value, ROLE.STAFF.value,
ROLE.INTERVENTION_STAFF.value])
def trigger_password_reset_email(user_id):
"""Trigger a password reset email for the specified user
Allows admins, staff, etc to manually trigger password reset emails to
patients, allowing them to easily change their passwords and login without
knowing their current password or email.
---
tags:
- User
operationId: send_password_reset
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
responses:
200:
description: successful operation
schema:
id: response_ok
required:
- message
properties:
message:
type: string
description: Result, typically "ok"
400:
description: if the request includes invalid data
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, permission='edit')
if '@' not in getattr(user, 'email', ''):
abort(400, "invalid email address")
try:
with force_locale(user.locale_code):
user_manager.send_reset_password_email(user.email)
except ValueError as e:
current_app.logger.debug('failed to send reset password email to %s', user.email)
abort(400, str(e))
auditable_event(
"password reset email triggered for user {}".format(user_id),
user_id=current_user().id,
subject_id=user_id,
context='login',
)
return jsonify(message="ok")
@user_api.route('/user/<int:user_id>/table_preferences/<string:table_name>')
@crossdomain()
@oauth.require_oauth()
def get_table_preferences(user_id, table_name):
"""Returns simple JSON defining user table preferences
Returns the user's view preferences for the given table.
---
tags:
- User
operationId: get_table_preferences
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- name: table_name
in: path
description: Portal UI Table Name
required: true
type: string
format: int64
produces:
- application/json
responses:
200:
description:
Returns JSON of the user's table view preferences.
schema:
id: table_preferences
properties:
user_id:
type: integer
format: int64
description: TrueNTH user ID
table_name:
type: string
description: Name of table in portal UI
sort_field:
type: string
description: Field on which to sort the table
sort_order:
type: string
description: Method to use for sorting (asc or desc)
filters:
type: object
description: JSON describing filter fields and values
updated_at:
type: string
format: date-time
description: Last updated datetime
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
404:
description:
if no TablePreference found for given user_id and table_name
security:
- ServiceToken: []
"""
user = get_user(user_id, 'view')
pref = TablePreference.query.filter_by(
table_name=table_name, user_id=user.id).first()
# 404 case handled by get_user() above. Return
# empty list if no preferences yet exist.
if not pref:
return jsonify({})
return jsonify(pref.as_json())
@user_api.route(
'/user/<int:user_id>/table_preferences/<string:table_name>',
methods=('PUT', 'POST'))
@crossdomain()
@oauth.require_oauth()
def set_table_preferences(user_id, table_name):
"""Add a consent agreement for the user with named organization
Used to add a consent agreements between a user and an organization.
Assumed to have just been agreed to. Include 'expires' if
necessary, defaults to now and five years from now (both in UTC).
NB only one valid consent should be in place between a user and an
organization. Therefore, if this POST would create a second consent on the
given user / organization, the existing consent will be marked deleted.
---
tags:
- User
operationId: set_table_preferences
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- name: table_name
in: path
description: Portal UI Table Name
required: true
type: string
format: int64
- in: body
name: body
schema:
id: set_preferences
properties:
sort_field:
type: string
description: Field on which to sort the table
sort_order:
type: string
description: Method to use for sorting (asc or desc)
filters:
type: object
description: JSON describing filter fields and values
responses:
200:
description:
Returns JSON of the user's table view preferences.
schema:
id: table_preferences
properties:
user_id:
type: integer
format: int64
description: TrueNTH user ID
table_name:
type: string
description: Name of table in portal UI
sort_field:
type: string
description: Field on which to sort the table
sort_order:
type: string
description: Method to use for sorting (asc or desc)
filters:
type: object
description: JSON describing filter fields and values
updated_at:
type: string
format: date-time
description: Last updated datetime
400:
description: if the request includes invalid data
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, 'view')
if not request.json:
abort(400, "no table preference data provided")
req = request.json
req['user_id'] = user.id
req['table_name'] = table_name
pref = TablePreference.from_json(req)
db.session.add(pref)
db.session.commit()
return jsonify(pref.as_json())
@user_api.route('/user/<int:user_id>/invite', methods=('POST',))
@crossdomain()
@oauth.require_oauth() # for service token access, oauth must come first
@roles_required([ROLE.SERVICE.value])
def invite(user_id):
"""Send invite email message to given user
It is expected that the named user has the expected roles and
affiliations such as organization to determine the appropriate
email context to send.
Include query param `?preview=True`
to have the email content generated but not sent.
Only available via service token.
---
tags:
- User
operationId: user_invite
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- name: preview
in: query
description: Set to simply preview the message - don't send!
required: false
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns success of call (i.e. {message="sent"}, or JSON of the
generated message if `preview` is set.
schema:
id: user_invite
properties:
sender:
type: string
description: Email message sender
recipients:
type: string
description: Email message recipients
subject:
type: string
description: Email message subject
body:
type: string
description: Email message body, includes footer
400:
description:
if given user lacks a legitimate looking email address
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, 'edit')
validate_email(user.email)
sender = current_app.config.get("MAIL_DEFAULT_SENDER")
org = user.first_top_organization()
org_name = org.name if org else None
name_key = UserInviteEmail_ATMA.name_key(org=org_name)
args = load_template_args(user=user)
mail = MailResource(
app_text(name_key), locale_code=user.locale_code, variables=args)
email = EmailMessage(
subject=mail.subject, body=mail.body, recipients=user.email,
sender=sender, user_id=user.id)
if request.args.get('preview'):
message = email.as_json()
else:
email.send_message()
db.session.add(email)
db.session.commit()
message = "okay"
return jsonify(message=message)
@user_api.route('/user/<int:user_id>/messages')
@crossdomain()
@oauth.require_oauth()
@roles_required(
[ROLE.ADMIN.value, ROLE.STAFF_ADMIN.value, ROLE.STAFF.value,
ROLE.CLINICIAN.value, ROLE.INTERVENTION_STAFF.value])
def get_user_messages(user_id):
"""Returns simple JSON defining user email messages
Returns JSON of all messages where the receipient_id matches the given
user.
---
tags:
- User
operationId: get_user_messages
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns JSON of the user's email messages.
schema:
id: user_messages
properties:
sender:
type: string
description: Email message sender
recipients:
type: string
description: Email message recipients
subject:
type: string
description: Email message subject
body:
type: string
description: Email message body
sent_at:
type: string
format: date-time
description: Datetime of when email message was sent
user_id:
type: integer
format: int64
description: TrueNTH user ID
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
get_user(user_id, 'view')
messages = []
for em in EmailMessage.query.filter(
EmailMessage.recipient_id == user_id):
messages.append(em.as_json())
return jsonify(messages=messages)
@user_api.route('/user/<int:user_id>/questionnaire_bank')
@crossdomain()
@oauth.require_oauth()
def get_current_user_qb(user_id):
"""Returns JSON defining user's current QuestionnaireBank
Returns JSON of the user's current QuestionnaireBank. Date is
assumed as UTCnow, unless specific as-of date provided.
---
tags:
- User
operationId: get_current_user_qb
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- name: research_study_id
in: query
description: research study id, defaults to 0
required: false
type: integer
- name: as_of_date
in: query
description: Optional datetime for user-specific QB (otherwise, now)
required: false
type: string
format: date-time
produces:
- application/json
responses:
200:
description:
Returns JSON of the user's current QB info
400:
description: invalid query parameters
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
from ..models.qb_status import QB_Status
user = get_user(user_id, 'view')
date = request.args.get('as_of_date')
# allow date and time info to be available
date = FHIR_datetime.parse(date) if date else datetime.utcnow()
research_study_id = int(request.args.get('research_study_id', 0))
qstats = QB_Status(
user=user, research_study_id=research_study_id, as_of_date=date)
qbd = qstats.current_qbd()
if not qbd:
qbd_json = {'questionnaire_bank': None}
else:
qbd_json = qbd.as_json()
expiry = qstats.expired_date
qbd_json['relative_expired'] = (
FHIR_datetime.as_fhir(expiry) if expiry else None)
return jsonify(qbd_json)
|
uwcirg/true_nth_usa_portal
|
portal/views/user.py
|
Python
|
bsd-3-clause
| 79,010
|
[
"VisIt"
] |
a6eac7adcccf2bfdf6f73d8e08ac9bfa483e773c30b9173e4efadba6dae5ab4a
|
########################################################################
# This example demonstrates synaptic summation in a branched neuron.
# Copyright (C) Upinder S. Bhalla NCBS 2018
# Released under the terms of the GNU Public License V3.
########################################################################
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.widgets import Slider, Button, RadioButtons
import numpy as np
import warnings
import moose
import rdesigneur as rd
numDendSeg = 10 # Applies both to dend and to branches.
interval1 = 0.015
interval2 = 0.010
lines = []
tplot = []
axes = []
sliders = []
spikingProto = []
spikingDistrib = []
RM = 1.0
RA = 1.0
CM = 0.01
#length = 0.0005
#dia = 2e-6
runtime = 0.05
elecPlotDt = 0.0005
sliderMode = "Gbar"
rec = []
class lineWrapper():
def __init__( self ):
self.YdendLines = 0
self.Ybranch1Lines = 0
self.Ybranch2Lines = 0
class RecInfo():
def __init__(self, name, tau1 = 2e-3, inputFreq = 100.0, Ek = 0.0, geom = 500e-6 ):
self.name = name
self.Gbar = 10.0
self.tau1 = tau1
self.tau2 = 9e-3
self.Ek = Ek # Reversal potential
self.onset = 0.01
self.inputFreq = inputFreq
self.inputDuration = 0.01
self.geom = geom
self.setFunc = {'Gbar': self.setGbar, 'Onset': self.setOnset,
'Tau': self.setTau, 'Freq': self.setFreq,
'Duration': self.setDuration, 'Elec': self.setElec
}
self.getFunc = {'Gbar': (lambda x: x.Gbar),
'Onset': lambda x: 1000*x.onset,
'Tau': self.getTau,
'Freq': lambda x: x.inputFreq,
'Duration': lambda x: 1000*x.inputDuration,
'Elec': lambda x: x.geom*1e6 }
def setGbar( self, val ):
self.Gbar = val
#print "########### GBAR = ", val
updateDisplay()
def setOnset( self, val ):
self.onset = val/1000.0
updateDisplay()
def setFreq( self, val ):
self.inputFreq = val
updateDisplay()
def setDuration( self, val ):
self.inputDuration = val/1000.0
updateDisplay()
def setElec( self, val ): #messy because we set geom to handle both
# length and dia. The correct value are picked up based on rec index
# in the field assingment during construction
self.geom = val/1e6
updateDisplay()
def setTau( self, val ): #messy because we set taus and Ek both.
#print self.name, val
if self.name == 'glu_Jn':
self.tau1 = val / 1000.0
elif self.name == 'glu_Br1':
self.tau2 = val / 1000.0
elif self.name == 'glu_Br2':
self.Ek = val / 1000.0
elif self.name == 'gaba_Jn':
self.tau1 = val / 1000.0
elif self.name == 'gaba_Br1':
self.tau2 = val / 1000.0
elif self.name == 'gaba_Br2':
self.Ek = val / 1000.0
updateDisplay()
def getTau( self, dummy ): #messy because we set taus and Ek both.
#print self.name
if self.name == 'glu_Jn':
return self.tau1 * 1000
elif self.name == 'glu_Br1':
return self.tau2 * 1000
elif self.name == 'glu_Br2':
return self.Ek * 1000
elif self.name == 'gaba_Jn':
return self.tau1 * 1000
elif self.name == 'gaba_Br1':
return self.tau2 * 1000
elif self.name == 'gaba_Br2':
return self.Ek * 1000
def set( self, val ):
if sliderMode in self.setFunc:
self.setFunc[ sliderMode ]( val )
def get( self ):
if sliderMode in self.getFunc:
return self.getFunc[ sliderMode ](self)
return 0.0
def makeYmodel():
length = rec[0].geom
dia = rec[3].geom
segLen = length / numDendSeg
cp = [['make_glu()', 'glu'],['make_GABA()', 'GABA']]
cp.extend( spikingProto )
cd = [
['glu', 'dend9', 'Gbar', str(rec[0].Gbar)],
['glu', 'branch1_9', 'Gbar', str(rec[1].Gbar)],
['glu', 'branch2_9', 'Gbar', str(rec[2].Gbar)],
['GABA', 'dend9', 'Gbar', str(rec[3].Gbar)],
['GABA', 'branch1_9', 'Gbar', str(rec[4].Gbar)],
['GABA', 'branch2_9', 'Gbar', str(rec[5].Gbar)]
]
cd.extend( spikingDistrib )
rdes = rd.rdesigneur(
elecPlotDt = elecPlotDt,
stealCellFromLibrary = True,
verbose = False,
#chanProto = [['make_glu()', 'glu'],['make_GABA()', 'GABA']],
chanProto = cp,
# cellProto syntax: ['ballAndStick', 'name', somaDia, somaLength, dendDia, dendLength, numDendSegments ]
# The numerical arguments are all optional
cellProto =
[['ballAndStick', 'cellBase', dia, segLen, dia, length, numDendSeg]],
passiveDistrib = [[ '#', 'RM', str(RM), 'CM', str(CM), 'RA', str(RA) ]],
chanDistrib = cd,
#chanDistrib = [
#['glu', 'dend9', 'Gbar', str(rec[0].Gbar)],
#['glu', 'branch1_9', 'Gbar', str(rec[1].Gbar)],
#['glu', 'branch2_9', 'Gbar', str(rec[2].Gbar)],
#['GABA', 'dend9', 'Gbar', str(rec[3].Gbar)],
#['GABA', 'branch1_9', 'Gbar', str(rec[4].Gbar)],
#['GABA', 'branch2_9', 'Gbar', str(rec[5].Gbar)],
#],
stimList = [
['dend9', '1','glu', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( rec[0].inputFreq, rec[0].onset, rec[0].onset + rec[0].inputDuration) ],
['branch1_9', '1','glu', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( rec[1].inputFreq, rec[1].onset, rec[1].onset + rec[1].inputDuration) ],
['branch2_9', '1','glu', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( rec[2].inputFreq, rec[2].onset, rec[2].onset + rec[2].inputDuration) ],
['dend9', '1','GABA', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( rec[3].inputFreq, rec[3].onset, rec[3].onset + rec[3].inputDuration) ],
['branch1_9', '1','GABA', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( rec[4].inputFreq, rec[4].onset, rec[4].onset + rec[4].inputDuration) ],
['branch2_9', '1','GABA', 'periodicsyn', '{}*(t>{:.3f} && t<{:.3f})'.format( rec[5].inputFreq, rec[5].onset, rec[5].onset + rec[5].inputDuration) ],
#['dend9,branch1_9,branch2_9', '1','glu', 'periodicsyn', '100*(t>0.01 && t<0.02)'],
],
plotList = [
['soma,dend9,branch1_9,branch2_9', '1','.', 'Vm'],
],
)
# Modify some library values based on the slider controls
glu = moose.element( '/library/glu' )
gaba = moose.element( '/library/GABA' )
glu.tau1 = rec[0].tau1
glu.tau2 = rec[1].tau2
glu.Ek = rec[2].Ek
gaba.tau1 = rec[3].tau1
gaba.tau2 = rec[4].tau2
gaba.Ek = rec[5].Ek
# Build the arms of the Y for a branching cell.
pa = moose.element( '/library/cellBase' )
x1 = length
x2 = length
y1 = 0.0
y2 = 0.0
dx1 = rec[1].geom / ( numDendSeg * np.sqrt(2.0) )
dx2 = rec[2].geom / ( numDendSeg * np.sqrt(2.0) )
dia1 = rec[4].geom
dia2 = rec[5].geom
dy1 = dx1
dy2 = -dx2
prevc1 = moose.element( '/library/cellBase/dend{}'.format( numDendSeg-1 ) )
prevc2 = prevc1
for i in range( numDendSeg ):
c1 = rd.buildCompt( pa, 'branch1_{}'.format(i), RM = RM, CM = CM, RA = RA, dia = dia1, x=x1, y=y1, dx = dx1, dy = dy1 )
c2 = rd.buildCompt( pa, 'branch2_{}'.format(i), RM = RM, CM = CM, RA = RA, dia = dia2, x=x2, y=y2, dx = dx2, dy = dy2 )
moose.connect( prevc1, 'axial', c1, 'raxial' )
moose.connect( prevc2, 'axial', c2, 'raxial' )
prevc1 = c1
prevc2 = c2
x1 += dx1
y1 += dy1
x2 += dx2
y2 += dy2
rdes.elecid.buildSegmentTree() # rebuild it as we've added the branches
rdes.buildModel()
# Permit fast spiking input.
#for i in moose.wildcardFind( '/model/##[ISA=RandSpike]' ):
for i in moose.wildcardFind( '/model/elec/#/#/#/#/synInput_rs' ):
#print i.path, i.refractT
i.refractT = 0.002
'''
moose.le( '/model/elec/dend9/glu/sh/synapse/synInput_rs' )
moose.showmsg( '/model/elec/dend9/glu/sh/synapse/synInput_rs' )
moose.showfield( '/model/stims/stim0' )
'''
def main():
global rec
rec.append( RecInfo( 'glu_Jn' ) )
rec.append( RecInfo( 'glu_Br1' ) )
rec.append( RecInfo( 'glu_Br2' ) )
rec.append( RecInfo( 'gaba_Jn', tau1 = 4e-3, inputFreq =100.0, Ek=-0.07, geom = 2e-6 ) )
rec.append( RecInfo( 'gaba_Br1', tau1 = 4e-3, inputFreq=100.0, Ek=-0.07, geom = 2e-6 ) )
rec.append( RecInfo( 'gaba_Br2', tau1 = 4e-3, inputFreq=100.0, Ek=-0.07, geom = 2e-6 ) )
warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib")
makeDisplay()
quit()
class stimToggle():
def __init__( self, toggle, ax ):
self.duration = 1
self.toggle = toggle
self.ax = ax
def click( self, event ):
global spikingProto
global spikingDistrib
if self.duration < 0.5:
self.duration = 1.0
self.toggle.label.set_text( "Spiking off" )
self.toggle.color = "yellow"
self.toggle.hovercolor = "yellow"
spikingProto = []
spikingDistrib = []
else:
self.duration = 0.001
self.toggle.label.set_text( "Spiking on" )
self.toggle.color = "orange"
self.toggle.hovercolor = "orange"
spikingProto = [['make_Na()', 'Na'], ['make_K_DR()', 'K_DR']]
spikingDistrib = [['Na', 'soma', 'Gbar', '300' ],['K_DR', 'soma', 'Gbar', '250' ]]
updateDisplay()
def printSomaVm():
print("This is somaVm" )
def updateDisplay():
makeYmodel()
tabvec = moose.wildcardFind( '/model/graphs/plot#' )
moose.element( '/model/elec/' ).name = 'Y'
vecYdend = moose.wildcardFind( '/model/Y/soma,/model/Y/dend#' )
vecYbranch1 = moose.wildcardFind( '/model/Y/branch1#' )
vecYbranch2 = moose.wildcardFind( '/model/Y/branch2#' )
moose.reinit()
dt = interval1
currtime = 0.0
for i in lines:
moose.start( dt )
currtime += dt
#print "############## NumDendData = ", len( vecYdend )
i.YdendLines.set_ydata( [v.Vm*1000 for v in vecYdend] )
i.Ybranch1Lines.set_ydata( [v.Vm*1000 for v in vecYbranch1] )
i.Ybranch2Lines.set_ydata( [v.Vm*1000 for v in vecYbranch2] )
dt = interval2
moose.start( runtime - currtime )
#print "############## len (tabvec) = ", len( tabvec[0].vector )
for i, tab in zip( tplot, tabvec ):
i.set_ydata( tab.vector * 1000 )
moose.delete( '/model' )
moose.delete( '/library' )
def doQuit( event ):
quit()
def makeDisplay():
global lines
global tplot
global axes
global sliders
img = mpimg.imread( 'synapticSummation.png' )
fig = plt.figure( figsize=(10,12) )
png = fig.add_subplot(321)
imgplot = plt.imshow( img )
plt.axis('off')
ax1 = fig.add_subplot(322)
plt.ylabel( 'Vm (mV)' )
plt.ylim( -80, -20 )
plt.xlabel( 'time (ms)' )
plt.title( "Membrane potential vs time at 4 positions." )
t = np.arange( 0.0, runtime + elecPlotDt / 2.0, elecPlotDt ) * 1000 #ms
#print "############## len t = ", len(t), " numDendSeg = " , numDendSeg
for i,col,name in zip( range( 4 ), ['b-', 'g-', 'r-', 'm-' ], ['soma', 'jn', 'br1', 'br2'] ):
ln, = ax1.plot( t, np.zeros(len(t)), col, label='pos= ' + name )
tplot.append(ln)
plt.legend()
ax2 = fig.add_subplot(312)
plt.ylabel( 'Vm (mV)' )
plt.ylim( -70, 0.0 )
plt.xlabel( 'Position (microns)' )
#ax2.autoscale( enable = True, axis = 'y' )
plt.title( "Membrane potential as a function of position along cell." )
#for i,col in zip( range( 5 ), ['k', 'b', 'g', 'y', 'm' ] ):
lt = interval1
for i,col in zip( range( 3 ), ['g', 'b', 'k' ] ):
lw = lineWrapper()
lw.YdendLines, = ax2.plot( np.arange(0, numDendSeg+1, 1 ),
np.zeros(numDendSeg+1), col + '-', label = str(lt*1000) + " ms" )
lw.Ybranch1Lines, = ax2.plot( np.arange(0, numDendSeg, 1) + numDendSeg + 1,
np.zeros(numDendSeg), col + ':' )
lw.Ybranch2Lines, = ax2.plot( np.arange(0, numDendSeg, 1) + numDendSeg + 1,
np.zeros(numDendSeg) + numDendSeg + 1, col + '.' )
lines.append( lw )
lt += interval2
plt.legend()
ax = fig.add_subplot(313)
plt.axis('off')
axcolor = 'palegreen'
axStim = plt.axes( [0.02,0.005, 0.20,0.03], facecolor='green' )
axReset = plt.axes( [0.25,0.005, 0.30,0.03], facecolor='blue' )
axQuit = plt.axes( [0.60,0.005, 0.30,0.03], facecolor='blue' )
for x in np.arange( 0.05, 0.31, 0.05 ):
axes.append( plt.axes( [0.25, x, 0.65, 0.03], facecolor=axcolor ) )
#aInit = Slider( axAinit, 'A init conc', 0, 10, valinit=1.0, valstep=0.2)
rax = plt.axes([0.02, 0.05, 0.10, 0.28], facecolor="#EEEFFF")
mode = RadioButtons(rax, ('Gbar', 'Onset', 'Tau', 'Duration', 'Freq', 'Elec'))
stim = Button( axStim, 'Spiking off', color = 'yellow' )
stimObj = stimToggle( stim, axStim )
reset = Button( axReset, 'Reset', color = 'cyan' )
q = Button( axQuit, 'Quit', color = 'pink' )
for i in range( len( axes ) ):
sliders.append( Slider( axes[i], rec[i].name+" Gbar", 0.01, 100, valinit = 10) )
sliders[-1].on_changed( rec[i].set )
'''
sliders[3].facecolor = "red"
sliders[3].label.set_text( "abbaHippo")
print( "{}".format( sliders[3].poly ) )
sliders[3].poly.set_color( "red")
sliders[3].valmax = 1234
sliders[3].ax.set_xlim( 0, 1234.0)
'''
def resetParms( event ):
for i in sliders:
i.reset()
mh = modeHandler()
mode.on_clicked( mh.setMode )
stim.on_clicked( stimObj.click )
reset.on_clicked( resetParms )
q.on_clicked( doQuit )
updateDisplay()
plt.show()
class modeInfo():
def __init__( self, name = "glu_jn", fg = "blue",
vmin = 0.001, vmax = 100.0, default = 20.0 ):
self.name = name
self.fg = fg
self.vmin = vmin
self.vmax = vmax
self.default = default
self.current = default
class modeHandler():
def __init__(self):
gbarMode = [ modeInfo( name = i.name ) for i in rec ]
onsetMode = [ modeInfo( name = i.name, fg = "cyan",
vmin = 0.0, vmax = 40.0, default = 10.0 ) for i in rec ]
tauMode = [ modeInfo( name = 'Tau1 glu', fg = "yellow",
vmin = 0.1, vmax = 20.0, default = 2.0 ),
modeInfo( name = 'Tau2 glu', fg = "yellow",
vmin = 0.1, vmax = 20.0, default = 9.0 ),
modeInfo( name = 'Erev glu', fg = "yellow",
vmin = -40.0, vmax = 40.0, default = 0.0 ),
modeInfo( name = 'Tau1 GABA', fg = "yellow",
vmin = 0.1, vmax = 20.0, default = 4.0 ),
modeInfo( name = 'Tau2 GABA', fg = "yellow",
vmin = 0.1, vmax = 20.0, default = 9.0 ),
modeInfo( name = 'Erev GABA', fg = "yellow",
vmin = -80.0, vmax = -40.0, default = -65.0 )
]
freqMode = [ modeInfo( name = i.name, fg = "pink", vmin = 0.0,
vmax = 200.0, default = 100.0 ) for i in rec ]
durationMode = [ modeInfo( name = i.name, fg = "green", vmin = 0.0,
vmax = 40.0, default = 10.0 ) for i in rec ]
elecMode = [
modeInfo( name = 'Dend Length', fg = "maroon",
vmin = 50, vmax = 2000, default = 500 ),
modeInfo( name = 'Br1 Length', fg = "maroon",
vmin = 50, vmax = 2000, default = 500 ),
modeInfo( name = 'Br2 Length', fg = "maroon",
vmin = 50, vmax = 2000, default = 500 ),
modeInfo( name = 'Dend Dia', fg = "maroon",
vmin = 0.1, vmax = 10.0, default = 2.0 ),
modeInfo( name = 'Br1 Dia', fg = "maroon",
vmin = 0.1, vmax = 10.0, default = 1.26 ),
modeInfo( name = 'Br2 Dia', fg = "maroon",
vmin = 0.1, vmax = 10.0, default = 1.26 )
]
self.labels = { "Gbar": gbarMode, "Onset": onsetMode,
"Freq": freqMode, "Duration": durationMode,
"Tau": tauMode, "Elec": elecMode }
def setMode( self, label ):
global sliderMode
if label in self.labels:
modes = self.labels[label]
sliderMode = label
for s,m,r in zip( sliders, modes, rec ):
if label == "Tau" or label == "Elec":
s.label.set_text( m.name )
else:
s.label.set_text( label + " " + m.name )
s.valmin = m.vmin
s.valmax = m.vmax
s.poly.set_color( m.fg )
s.ax.set_xlim( m.vmin, m.vmax )
#print m.current, r.get()
s.set_val( r.get() )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
BhallaLab/moose-examples
|
tutorials/Electrophys/ephys3_synaptic_summation.py
|
Python
|
gpl-2.0
| 17,152
|
[
"MOOSE",
"NEURON"
] |
56d17e4035398e00f2a2d78285498c64fef822500d4a62995dbcda82f282f7ee
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from wideeditor import WideEditor
from customeditor import CustomEditor
from camelot.view.art import Icon
class RichTextEditor(CustomEditor, WideEditor):
def __init__(self, parent=None, **kwargs):
CustomEditor.__init__(self, parent)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setSpacing(0)
self.layout.setMargin(0)
self.setSizePolicy( QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding )
class CustomTextEdit(QtGui.QTextEdit):
"""A TextEdit editor that sends editingFinished events when the text was changed
and focus is lost
"""
editingFinished = QtCore.pyqtSignal()
def __init__(self, parent):
super(CustomTextEdit, self).__init__(parent)
self._changed = False
self.setTabChangesFocus( True )
self.textChanged.connect( self._handle_text_changed )
def focusOutEvent(self, event):
if self._changed:
self.editingFinished.emit()
super(CustomTextEdit, self).focusOutEvent( event )
def _handle_text_changed(self):
self._changed = True
def setTextChanged(self, state=True):
self._changed = state
def setHtml(self, html):
QtGui.QTextEdit.setHtml(self, html)
self._changed = False
self.textedit = CustomTextEdit(self)
self.textedit.editingFinished.connect( self.emit_editing_finished )
self.textedit.setAcceptRichText(True)
self.initButtons()
# #
# # Layout
# #
# self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.textedit)
self.setLayout(self.layout)
#
# Format
#
self.textedit.setFontWeight(QtGui.QFont.Normal)
self.textedit.setFontItalic(False)
self.textedit.setFontUnderline(False)
#self.textedit.setFocus(Qt.OtherFocusReason)
self.update_alignment()
self.textedit.currentCharFormatChanged.connect(self.update_format)
self.textedit.cursorPositionChanged.connect(self.update_text)
@QtCore.pyqtSlot()
def emit_editing_finished(self):
if self.textedit._changed:
self.editingFinished.emit()
def set_editable(self, editable):
self.textedit.setReadOnly(editable==False)
self.toolbar.setShown(editable==True)
def set_field_attributes(self, editable=True, background_color=None, **kwargs):
self.set_editable(editable)
self.set_background_color(background_color)
def initButtons(self):
self.toolbar = QtGui.QToolBar(self)
self.toolbar.setContentsMargins(0, 0, 0, 0)
self.bold_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/format-text-bold.png').getQIcon()
self.bold_button.setIcon(icon)
self.bold_button.setAutoRaise(True)
self.bold_button.setCheckable(True)
self.bold_button.setFocusPolicy( Qt.ClickFocus )
self.bold_button.setMaximumSize(QtCore.QSize(20, 20))
self.bold_button.setShortcut(QtGui.QKeySequence('Ctrl+B'))
self.bold_button.clicked.connect(self.set_bold)
self.italic_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/format-text-italic.png').getQIcon()
self.italic_button.setIcon(icon)
self.italic_button.setAutoRaise(True)
self.italic_button.setCheckable(True)
self.italic_button.setFocusPolicy( Qt.ClickFocus )
self.italic_button.setMaximumSize(QtCore.QSize(20, 20))
self.italic_button.setShortcut(QtGui.QKeySequence('Ctrl+I'))
self.italic_button.clicked.connect(self.set_italic)
self.underline_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/format-text-underline.png').getQIcon()
self.underline_button.setIcon(icon)
self.underline_button.setAutoRaise(True)
self.underline_button.setCheckable(True)
self.underline_button.setFocusPolicy( Qt.ClickFocus )
self.underline_button.setMaximumSize(QtCore.QSize(20, 20))
self.underline_button.setShortcut(QtGui.QKeySequence('Ctrl+U'))
self.underline_button.clicked.connect(self.set_underline)
self.copy_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/edit-copy.png').getQIcon()
self.copy_button.setIcon(icon)
self.copy_button.setAutoRaise(True)
self.copy_button.setMaximumSize(QtCore.QSize(20, 20))
self.copy_button.setFocusPolicy( Qt.ClickFocus )
self.copy_button.clicked.connect(self.textedit.copy)
self.cut_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/edit-cut.png').getQIcon()
self.cut_button.setIcon(icon)
self.cut_button.setAutoRaise(True)
self.cut_button.setMaximumSize(QtCore.QSize(20, 20))
self.cut_button.clicked.connect(self.textedit.cut)
self.cut_button.setFocusPolicy( Qt.ClickFocus )
self.paste_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/edit-paste.png').getQIcon()
self.paste_button.setIcon(icon)
self.paste_button.setAutoRaise(True)
self.paste_button.setMaximumSize(QtCore.QSize(20, 20))
self.paste_button.setFocusPolicy( Qt.ClickFocus )
self.paste_button.clicked.connect(self.textedit.paste)
self.alignleft_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/format-justify-left.png').getQIcon()
self.alignleft_button.setIcon(icon)
self.alignleft_button.setAutoRaise(True)
self.alignleft_button.setCheckable(True)
self.alignleft_button.setMaximumSize(QtCore.QSize(20, 20))
self.alignleft_button.setFocusPolicy( Qt.ClickFocus )
self.alignleft_button.clicked.connect(self.set_alignleft)
self.aligncenter_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/format-justify-center.png').getQIcon()
self.aligncenter_button.setIcon(icon)
self.aligncenter_button.setAutoRaise(True)
self.aligncenter_button.setCheckable(True)
self.aligncenter_button.setMaximumSize(QtCore.QSize(20, 20))
self.aligncenter_button.setFocusPolicy( Qt.ClickFocus )
self.aligncenter_button.clicked.connect(self.set_aligncenter)
self.alignright_button = QtGui.QToolButton(self)
icon = Icon('tango/16x16/actions/format-justify-right.png').getQIcon()
self.alignright_button.setIcon(icon)
self.alignright_button.setAutoRaise(True)
self.alignright_button.setCheckable(True)
self.alignright_button.setMaximumSize(QtCore.QSize(20, 20))
self.alignright_button.setFocusPolicy( Qt.ClickFocus )
self.alignright_button.clicked.connect(self.set_alignright)
self.color_button = QtGui.QToolButton(self)
self.color_button.setAutoRaise(True)
self.color_button.setMaximumSize(QtCore.QSize(20, 20))
self.color_button.setFocusPolicy( Qt.ClickFocus )
self.color_button.clicked.connect(self.set_color)
self.toolbar.addWidget(self.copy_button)
self.toolbar.addWidget(self.cut_button)
self.toolbar.addWidget(self.paste_button)
self.toolbar.addSeparator()
self.toolbar.addWidget(self.bold_button)
self.toolbar.addWidget(self.italic_button)
self.toolbar.addWidget(self.underline_button)
self.toolbar.addSeparator()
self.toolbar.addWidget(self.alignleft_button)
self.toolbar.addWidget(self.aligncenter_button)
self.toolbar.addWidget(self.alignright_button)
self.toolbar.addSeparator()
self.toolbar.addWidget(self.color_button)
#
# Layout
#
self.layout.addWidget(self.toolbar)
#
# Button methods
#
def set_bold(self):
if self.bold_button.isChecked():
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setFontWeight(QtGui.QFont.Bold)
else:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setFontWeight(QtGui.QFont.Normal)
def set_italic(self, bool):
if bool:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setFontItalic(True)
else:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setFontItalic(False)
def set_underline(self, bool):
if bool:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setFontUnderline(True)
else:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setFontUnderline(False)
def set_alignleft(self, bool):
if bool:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setAlignment(Qt.AlignLeft)
self.update_alignment(Qt.AlignLeft)
def set_aligncenter(self, bool):
if bool:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setAlignment(Qt.AlignCenter)
self.update_alignment(Qt.AlignCenter)
def set_alignright(self, bool):
if bool:
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setAlignment(Qt.AlignRight)
self.update_alignment(Qt.AlignRight)
def update_alignment(self, al=None):
if al is None:
al = self.textedit.alignment()
if al == Qt.AlignLeft:
self.alignleft_button.setChecked(True)
self.aligncenter_button.setChecked(False)
self.alignright_button.setChecked(False)
elif al == Qt.AlignCenter:
self.aligncenter_button.setChecked(True)
self.alignleft_button.setChecked(False)
self.alignright_button.setChecked(False)
elif al == Qt.AlignRight:
self.alignright_button.setChecked(True)
self.alignleft_button.setChecked(False)
self.aligncenter_button.setChecked(False)
def set_color(self):
color = QtGui.QColorDialog.getColor(self.textedit.textColor())
if color.isValid():
self.textedit.setFocus(Qt.OtherFocusReason)
self.textedit.setTextColor(color)
pixmap = QtGui.QPixmap(16, 16)
pixmap.fill(color)
self.color_button.setIcon(QtGui.QIcon(pixmap))
def update_color(self):
color = self.textedit.textColor()
pixmap = QtGui.QPixmap(16, 16)
pixmap.fill(color)
self.color_button.setIcon(QtGui.QIcon(pixmap))
def update_format(self, format):
font = format.font()
self.bold_button.setChecked(font.bold())
self.italic_button.setChecked(font.italic())
self.underline_button.setChecked(font.underline())
self.update_alignment(self.textedit.alignment())
def update_text(self):
self.update_alignment()
self.update_color()
def get_value(self):
from xml.dom import minidom
tree = minidom.parseString(unicode(self.textedit.toHtml()).encode('utf-8'))
value = u''.join([node.toxml() for node in tree.getElementsByTagName('html')[0].getElementsByTagName('body')[0].childNodes])
return CustomEditor.get_value(self) or value
def set_value(self, value):
value = CustomEditor.set_value(self, value)
if value!=None:
if unicode(self.textedit.toHtml())!=value:
self.update_alignment()
self.textedit.setHtml(value)
self.update_color()
else:
self.textedit.clear()
|
kurtraschke/camelot
|
camelot/view/controls/editors/richtexteditor.py
|
Python
|
gpl-2.0
| 12,882
|
[
"VisIt"
] |
3e276677cd46ac547b1606c91489a9f2454e27fc57db62dee941c749fee0762d
|
"""Main entry point for distributed next-gen sequencing pipelines.
Handles running the full pipeline based on instructions
"""
from __future__ import print_function
from collections import defaultdict
import copy
import os
import sys
import resource
import tempfile
import toolz as tz
from bcbio import log, heterogeneity, hla, structural, utils
from bcbio.cwl.inspect import initialize_watcher
from bcbio.distributed import prun
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.log import logger, DEFAULT_LOG_DIR
from bcbio.ngsalign import alignprep
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import (archive, config_utils, disambiguate, region,
run_info, qcsummary, rnaseq)
from bcbio.provenance import profile, system
from bcbio.variation import (ensemble, genotype, population, validate, joint,
peddy)
from bcbio.chipseq import peaks, atac
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None,
parallel=None, workflow=None):
"""Run variant analysis, handling command line options.
"""
# Set environment to standard to use periods for decimals and avoid localization
locale_to_use = utils.get_locale()
os.environ["LC_ALL"] = locale_to_use
os.environ["LC"] = locale_to_use
os.environ["LANG"] = locale_to_use
workdir = utils.safe_makedir(os.path.abspath(workdir))
os.chdir(workdir)
config, config_file = config_utils.load_system_config(config_file, workdir)
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
logger.info(f"System YAML configuration: {os.path.abspath(config_file)}.")
logger.info(f"Locale set to {locale_to_use}.")
if config.get("log_dir", None) is None:
config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR)
if parallel["type"] in ["local", "clusterk"]:
_setup_resources()
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
elif parallel["type"] == "ipython":
assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)"
if parallel["scheduler"] != "sge":
assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)"
elif not parallel["queue"]:
parallel["queue"] = ""
_run_toplevel(config, config_file, workdir, parallel,
fc_dir, run_info_yaml)
else:
raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def _setup_resources():
"""Attempt to increase resource limits up to hard limits.
This allows us to avoid out of file handle limits where we can
move beyond the soft limit up to the hard limit.
"""
target_procs = 10240
cur_proc, max_proc = resource.getrlimit(resource.RLIMIT_NPROC)
target_proc = min(max_proc, target_procs) if max_proc > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NPROC, (max(cur_proc, target_proc), max_proc))
cur_hdls, max_hdls = resource.getrlimit(resource.RLIMIT_NOFILE)
target_hdls = min(max_hdls, target_procs) if max_hdls > 0 else target_procs
resource.setrlimit(resource.RLIMIT_NOFILE, (max(cur_hdls, target_hdls), max_hdls))
def _run_toplevel(config, config_file, work_dir, parallel,
fc_dir=None, run_info_yaml=None):
"""
Run toplevel analysis, processing a set of input files.
config_file -- Main YAML configuration file with system parameters
fc_dir -- Directory of fastq files to process
run_info_yaml -- YAML configuration file specifying inputs to process
"""
dirs = run_info.setup_directories(work_dir, fc_dir, config, config_file)
config_file = os.path.join(dirs["config"], os.path.basename(config_file))
pipelines, config = _pair_samples_with_pipelines(run_info_yaml, config)
system.write_info(dirs, parallel, config)
with tx_tmpdir(config if parallel.get("type") == "local" else None) as tmpdir:
tempfile.tempdir = tmpdir
for pipeline, samples in pipelines.items():
for xs in pipeline(config, run_info_yaml, parallel, dirs, samples):
pass
# ## Generic pipeline framework
def _wres(parallel, progs, fresources=None, ensure_mem=None):
"""Add resource information to the parallel environment on required programs and files.
Enables spinning up required machines and operating in non-shared filesystem
environments.
progs -- Third party tools used in processing
fresources -- Required file-based resources needed. These will be transferred on non-shared
filesystems.
ensure_mem -- Dictionary of required minimum memory for programs used. Ensures
enough memory gets allocated on low-core machines.
"""
parallel = copy.deepcopy(parallel)
parallel["progs"] = progs
if fresources:
parallel["fresources"] = fresources
if ensure_mem:
parallel["ensure_mem"] = ensure_mem
return parallel
def variant2pipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
# Assign GATK supplied memory if required for post-process recalibration
align_programs = ["aligner", "samtools", "sambamba"]
if any(tz.get_in(["algorithm", "recalibrate"], utils.to_single_data(d)) in [True, "gatk"] for d in samples):
align_programs.append("gatk")
with prun.start(_wres(parallel, align_programs,
(["reference", "fasta"], ["reference", "aligner"], ["files"])),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment preparation", dirs):
samples = run_parallel("prep_align_inputs", samples)
samples = run_parallel("disambiguate_split", [samples])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
samples = disambiguate.resolve(samples, run_parallel)
samples = alignprep.merge_split_alignments(samples, run_parallel)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = run_parallel("calculate_sv_bins", [samples])
samples = run_parallel("calculate_sv_coverage", samples)
samples = run_parallel("normalize_sv_coverage", [samples])
samples = region.clean_sample_data(samples)
with profile.report("hla typing", dirs):
samples = hla.run(samples, run_parallel)
## Variant calling on sub-regions of the input file (full cluster)
with prun.start(_wres(parallel, ["gatk", "picard", "variantcaller"]),
samples, config, dirs, "full",
multiplier=region.get_max_counts(samples), max_multicore=1) as run_parallel:
with profile.report("alignment post-processing", dirs):
samples = region.parallel_prep_region(samples, run_parallel)
with profile.report("variant calling", dirs):
samples = genotype.parallel_variantcall_region(samples, run_parallel)
with profile.report("joint squaring off/backfilling", dirs):
samples = joint.square_off(samples, run_parallel)
## Finalize variants, BAMs and population databases (per-sample multicore cluster)
with prun.start(_wres(parallel, ["gatk", "gatk-vqsr", "snpeff", "bcbio_variation",
"gemini", "samtools", "fastqc", "sambamba",
"bcbio-variation-recall", "qsignature",
"svcaller", "kraken", "preseq"]),
samples, config, dirs, "multicore2",
multiplier=structural.parallel_multiplier(samples)) as run_parallel:
with profile.report("variant post-processing", dirs):
samples = run_parallel("postprocess_variants", samples)
samples = run_parallel("split_variants_by_sample", samples)
with profile.report("prepped BAM merging", dirs):
samples = region.delayed_bamprep_merge(samples, run_parallel)
with profile.report("validation", dirs):
samples = run_parallel("compare_to_rm", samples)
samples = genotype.combine_multiple_callers(samples)
with profile.report("ensemble calling", dirs):
samples = ensemble.combine_calls_parallel(samples, run_parallel)
with profile.report("validation summary", dirs):
samples = validate.summarize_grading(samples)
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "initial")
with profile.report("structural variation", dirs):
samples = structural.run(samples, run_parallel, "standard")
with profile.report("structural variation ensemble", dirs):
samples = structural.run(samples, run_parallel, "ensemble")
with profile.report("structural variation validation", dirs):
samples = run_parallel("validate_sv", samples)
with profile.report("heterogeneity", dirs):
samples = heterogeneity.run(samples, run_parallel)
with profile.report("population database", dirs):
samples = population.prep_db_parallel(samples, run_parallel)
# after SV calling and SNV merging
with profile.report("create CNV PON", dirs):
samples = structural.create_cnv_pon(samples)
with profile.report("peddy check", dirs):
samples = peddy.run_peddy_parallel(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("archive", dirs):
samples = archive.compress(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def _debug_samples(i, samples):
print("---", i, len(samples))
for sample in (utils.to_single_data(x) for x in samples):
print(" ", sample["description"], sample.get("region"), \
utils.get_in(sample, ("config", "algorithm", "variantcaller")), \
utils.get_in(sample, ("config", "algorithm", "jointcaller")), \
utils.get_in(sample, ("metadata", "batch")), \
[x.get("variantcaller") for x in sample.get("variants", [])], \
sample.get("work_bam"), \
sample.get("vrn_file"))
def standardpipeline(config, run_info_yaml, parallel, dirs, samples):
## Alignment and preparation requiring the entire input file (multicore cluster)
with prun.start(_wres(parallel, ["aligner", "samtools", "sambamba"]),
samples, config, dirs, "multicore") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with profile.report("callable regions", dirs):
samples = run_parallel("prep_samples", [samples])
samples = run_parallel("postprocess_alignment", samples)
samples = run_parallel("combine_sample_regions", [samples])
samples = region.clean_sample_data(samples)
## Quality control
with prun.start(_wres(parallel, ["fastqc", "qsignature", "kraken", "gatk", "samtools", "preseq"]),
samples, config, dirs, "multicore2") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def rnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["samtools", "cufflinks"]),
samples, config, dirs, "rnaseqcount") as run_parallel:
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
with profile.report("transcript assembly", dirs):
samples = rnaseq.assemble_transcripts(run_parallel, samples)
with profile.report("estimate expression (threaded)", dirs):
samples = rnaseq.quantitate_expression_parallel(samples, run_parallel)
with prun.start(_wres(parallel, ["dexseq", "express"]), samples, config,
dirs, "rnaseqcount-singlethread", max_multicore=1) as run_parallel:
with profile.report("estimate expression (single threaded)", dirs):
samples = rnaseq.quantitate_expression_noparallel(samples, run_parallel)
samples = rnaseq.combine_files(samples)
with prun.start(_wres(parallel, ["gatk", "vardict"]), samples, config,
dirs, "rnaseq-variation") as run_parallel:
with profile.report("RNA-seq variant calling", dirs):
samples = rnaseq.rnaseq_variant_calling(samples, run_parallel)
with prun.start(_wres(parallel, ["samtools", "fastqc", "qualimap",
"kraken", "gatk", "preseq"], ensure_mem={"qualimap": 4}),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
with profile.report("bcbioRNAseq loading", dirs):
tools_on = dd.get_in_samples(samples, dd.get_tools_on)
bcbiornaseq_on = tools_on and "bcbiornaseq" in tools_on
if bcbiornaseq_on:
if len(samples) < 3:
logger.warn("bcbioRNASeq needs at least three samples total, skipping.")
elif len(samples) > 100:
logger.warn("Over 100 samples, skipping bcbioRNASeq.")
else:
run_parallel("run_bcbiornaseqload", [sample])
logger.info("Timing: finished")
return samples
def fastrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
ww = initialize_watcher(samples)
with prun.start(_wres(parallel, ["samtools"]), samples, config,
dirs, "fastrnaseq") as run_parallel:
with profile.report("fastrnaseq", dirs):
samples = rnaseq.fast_rnaseq(samples, run_parallel)
ww.report("fastrnaseq", samples)
samples = rnaseq.combine_files(samples)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
ww.report("qcsummary", samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def singlecellrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["samtools", "rapmap"]), samples, config,
dirs, "singlecell-rnaseq") as run_parallel:
with profile.report("singlecell-rnaseq", dirs):
samples = rnaseq.singlecell_rnaseq(samples, run_parallel)
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for samples in samples:
run_parallel("upload_samples_project", [samples])
logger.info("Timing: finished")
return samples
def smallrnaseqpipeline(config, run_info_yaml, parallel, dirs, samples):
# causes a circular import at the top level
from bcbio.srna.group import report as srna_report
samples = rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples)
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"bowtie": 8, "bowtie2": 8, "star": 2}),
[samples[0]], config, dirs, "alignment") as run_parallel:
with profile.report("prepare", dirs):
samples = run_parallel("seqcluster_prepare", [samples])
with profile.report("seqcluster alignment", dirs):
samples = run_parallel("srna_alignment", [samples])
with prun.start(_wres(parallel, ["aligner", "picard", "samtools"],
ensure_mem={"tophat": 10, "tophat2": 10, "star": 2, "hisat2": 8}),
samples, config, dirs, "alignment_samples",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ["picard", "miraligner"]),
samples, config, dirs, "annotation") as run_parallel:
with profile.report("small RNA annotation", dirs):
samples = run_parallel("srna_annotation", samples)
with prun.start(_wres(parallel, ["seqcluster", "mirge"],
ensure_mem={"seqcluster": 8}),
[samples[0]], config, dirs, "cluster") as run_parallel:
with profile.report("cluster", dirs):
samples = run_parallel("seqcluster_cluster", [samples])
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("report", dirs):
srna_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
return samples
def chipseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["aligner", "picard"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
with profile.report("alignment", dirs):
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_sample", samples)
samples = run_parallel("disambiguate_split", [samples])
samples = run_parallel("process_alignment", samples)
with profile.report("disambiguation", dirs):
samples = disambiguate.resolve(samples, run_parallel)
samples = run_parallel("clean_chipseq_alignment", samples)
with prun.start(_wres(parallel, ["peakcaller"]),
samples, config, dirs, "peakcalling",
multiplier = peaks._get_multiplier(samples)) as run_parallel:
with profile.report("peakcalling", dirs):
samples = peaks.peakcall_prepare(samples, run_parallel)
samples = peaks.call_consensus(samples)
samples = run_parallel("run_chipseq_count", samples)
samples = peaks.create_peaktable(samples)
with prun.start(_wres(parallel, ["picard", "fastqc"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
samples = atac.create_ataqv_report(samples)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def wgbsseqpipeline(config, run_info_yaml, parallel, dirs, samples):
with prun.start(_wres(parallel, ["fastqc", "picard"], ensure_mem={"fastqc" : 4}),
samples, config, dirs, "trimming") as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
samples = run_parallel("trim_bs_sample", samples)
with prun.start(_wres(parallel, ["aligner", "bismark", "picard", "samtools"]),
samples, config, dirs, "multicore",
multiplier=alignprep.parallel_multiplier(samples)) as run_parallel:
with profile.report("alignment", dirs):
samples = run_parallel("process_alignment", samples)
with prun.start(_wres(parallel, ['samtools']), samples, config, dirs,
'deduplication') as run_parallel:
with profile.report('deduplicate', dirs):
samples = run_parallel('deduplicate_bismark', samples)
with prun.start(_wres(parallel, ["caller"], ensure_mem={"caller": 5}),
samples, config, dirs, "multicore2",
multiplier=24) as run_parallel:
with profile.report("cpg calling", dirs):
samples = run_parallel("cpg_calling", samples)
with prun.start(_wres(parallel, ["picard", "fastqc", "samtools"]),
samples, config, dirs, "qc") as run_parallel:
with profile.report("quality control", dirs):
samples = qcsummary.generate_parallel(samples, run_parallel)
with profile.report("upload", dirs):
samples = run_parallel("upload_samples", samples)
for sample in samples:
run_parallel("upload_samples_project", [sample])
logger.info("Timing: finished")
return samples
def rnaseq_prep_samples(config, run_info_yaml, parallel, dirs, samples):
"""
organizes RNA-seq and small-RNAseq samples, converting from BAM if
necessary and trimming if necessary
"""
pipeline = dd.get_in_samples(samples, dd.get_analysis)
trim_reads_set = any([tz.get_in(["algorithm", "trim_reads"], d) for d in dd.sample_data_iterator(samples)])
resources = ["picard"]
needs_trimming = (_is_smallrnaseq(pipeline) or trim_reads_set)
if needs_trimming:
resources.append("atropos")
with prun.start(_wres(parallel, resources),
samples, config, dirs, "trimming",
max_multicore=1 if not needs_trimming else None) as run_parallel:
with profile.report("organize samples", dirs):
samples = run_parallel("organize_samples", [[dirs, config, run_info_yaml,
[x[0]["description"] for x in samples]]])
samples = run_parallel("prepare_sample", samples)
if needs_trimming:
with profile.report("adapter trimming", dirs):
if _is_smallrnaseq(pipeline):
samples = run_parallel("trim_srna_sample", samples)
else:
samples = run_parallel("trim_sample", samples)
return samples
def _get_pipeline(item):
from bcbio.log import logger
analysis_type = item.get("analysis", "").lower()
if analysis_type not in SUPPORTED_PIPELINES:
logger.error("Cannot determine which type of analysis to run, "
"set in the run_info under details.")
sys.exit(1)
else:
return SUPPORTED_PIPELINES[analysis_type]
def _pair_samples_with_pipelines(run_info_yaml, config):
"""Map samples defined in input file to pipelines to run.
"""
samples = config_utils.load_config(run_info_yaml)
if isinstance(samples, dict):
resources = samples.pop("resources")
samples = samples["details"]
else:
resources = {}
ready_samples = []
for sample in samples:
if "files" in sample:
del sample["files"]
# add any resources to this item to recalculate global configuration
usample = copy.deepcopy(sample)
usample.pop("algorithm", None)
if "resources" not in usample:
usample["resources"] = {}
for prog, pkvs in resources.items():
if prog not in usample["resources"]:
usample["resources"][prog] = {}
if pkvs is not None:
for key, val in pkvs.items():
usample["resources"][prog][key] = val
config = config_utils.update_w_custom(config, usample)
sample["resources"] = {}
ready_samples.append(sample)
paired = [(x, _get_pipeline(x)) for x in ready_samples]
d = defaultdict(list)
for x in paired:
d[x[1]].append([x[0]])
return d, config
SUPPORTED_PIPELINES = {"variant2": variant2pipeline,
"snp calling": variant2pipeline,
"variant": variant2pipeline,
"standard": standardpipeline,
"minimal": standardpipeline,
"rna-seq": rnaseqpipeline,
"smallrna-seq": smallrnaseqpipeline,
"chip-seq": chipseqpipeline,
"wgbs-seq": wgbsseqpipeline,
"fastrna-seq": fastrnaseqpipeline,
"scrna-seq": singlecellrnaseqpipeline}
def _is_smallrnaseq(pipeline):
return pipeline.lower() == "smallrna-seq"
|
chapmanb/bcbio-nextgen
|
bcbio/pipeline/main.py
|
Python
|
mit
| 27,572
|
[
"Bowtie"
] |
e0545f035a77d29895989c663deb2cc7e387549721f6c12da959858148c56b00
|
"""
Bok choy acceptance tests for problems in the LMS
"""
from textwrap import dedent
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.tests.helpers import UniqueCourseTest
from openedx.core.lib.tests import attr
class ProblemsTest(UniqueCourseTest):
"""
Base class for tests of problems in the LMS.
"""
def setUp(self):
super().setUp()
self.username = f"test_student_{self.unique_id[0:8]}"
self.email = f"{self.username}@example.com"
self.password = "keep it secret; keep it safe."
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
problem = self.get_problem()
sequential = self.get_sequential()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
sequential.add_children(problem)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(
self.browser,
username=self.username,
email=self.email,
password=self.password,
course_id=self.course_id,
staff=True
).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
def get_sequential(self):
""" Subclasses can override this to add a sequential with metadata """
return XBlockFixtureDesc('sequential', 'Test Subsection')
class CAPAProblemA11yBaseTestMixin:
"""Base TestCase Class to verify CAPA problem accessibility."""
def test_a11y(self):
"""
Verifies that there are no accessibility issues for a particular problem type
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# Set the scope to the problem question
problem_page.a11y_audit.config.set_scope(
include=['.wrapper-problem-response']
)
# Run the accessibility audit.
problem_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class CAPAProblemChoiceA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for checkboxes and multiplechoice CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<choiceresponse>
<label>question 1 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<checkboxgroup>
<choice correct="true">True</choice>
<choice correct="false">False</choice>
</checkboxgroup>
</choiceresponse>
<multiplechoiceresponse>
<label>question 2 text here</label>
<description>description 2 text 1</description>
<description>description 2 text 2</description>
<choicegroup type="MultipleChoice">
<choice correct="false">Alpha <choicehint>A hint</choicehint></choice>
<choice correct="true">Beta</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemTextInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify TextInput problem accessibility."""
def get_problem(self):
"""
TextInput problem XML.
"""
xml = dedent("""
<problem>
<stringresponse answer="fight" type="ci">
<label>who wishes to _____ must first count the cost.</label>
<description>Appear weak when you are strong, and strong when you are weak.</description>
<description>In the midst of chaos, there is also opportunity.</description>
<textline size="40"/>
</stringresponse>
<stringresponse answer="force" type="ci">
<label>A leader leads by example not by _____.</label>
<description>The supreme art of war is to subdue the enemy without fighting.</description>
<description>Great results, can be achieved with small forces.</description>
<textline size="40"/>
</stringresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'TEXTINPUT PROBLEM', data=xml)
@attr('a11y')
class CAPAProblemDropDownA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""TestCase Class to verify accessibility for dropdowns(optioninput) CAPA problems."""
def get_problem(self):
"""
Problem structure.
"""
xml = dedent("""
<problem>
<optionresponse>
<p>You can use this template as a guide to the simple editor markdown and OLX markup to use for
dropdown problems. Edit this component to replace this template with your own assessment.</p>
<label>Which of the following is a fruit</label>
<description>Choose wisely</description>
<optioninput>
<option correct="False">radish</option>
<option correct="True">appple</option>
<option correct="False">carrot</option>
</optioninput>
</optionresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'Problem A11Y TEST', data=xml)
@attr('a11y')
class ProblemNumericalInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests NumericalInput accessibility."""
def get_problem(self):
"""NumericalInput problem XML."""
xml = dedent("""
<problem>
<numericalresponse answer="10*i">
<label>The square of what number is -100?</label>
<description>Use scientific notation to answer.</description>
<formulaequationinput/>
</numericalresponse>
</problem>""")
return XBlockFixtureDesc('problem', 'NUMERICALINPUT PROBLEM', data=xml)
@attr('a11y')
class ProblemMathExpressionInputA11yTest(CAPAProblemA11yBaseTestMixin, ProblemsTest):
"""Tests MathExpressionInput accessibility."""
def get_problem(self):
"""MathExpressionInput problem XML."""
xml = dedent(r"""
<problem>
<script type="loncapa/python">
derivative = "n*x^(n-1)"
</script>
<formularesponse type="ci" samples="x,n@1,2:3,4#10" answer="$derivative">
<label>Let \( x\) be a variable, and let \( n\) be an arbitrary constant. What is the derivative of \( x^n\)?</label>
<description>Enter the equation</description>
<responseparam type="tolerance" default="0.00001"/>
<formulaequationinput size="40"/>
</formularesponse>
</problem>""")
return XBlockFixtureDesc('problem', 'MATHEXPRESSIONINPUT PROBLEM', data=xml)
|
edx/edx-platform
|
common/test/acceptance/tests/lms/test_lms_problems.py
|
Python
|
agpl-3.0
| 7,716
|
[
"VisIt"
] |
f1725f2a2f86cc13afe29db48aac8f62e6134beb18bb82d74de258b655819a21
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of losses for disentanglement learning.
Implementation of VAE based models for unsupervised learning of disentangled
representations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from disentanglement_lib.methods.shared import architectures # pylint: disable=unused-import
from disentanglement_lib.methods.shared import losses # pylint: disable=unused-import
from disentanglement_lib.methods.shared import optimizers # pylint: disable=unused-import
from disentanglement_lib.methods.unsupervised import gaussian_encoder_model
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import gin.tf
from tensorflow.contrib import tpu as contrib_tpu
class BaseVAE(gaussian_encoder_model.GaussianEncoderModel):
"""Abstract base class of a basic Gaussian encoder model."""
def model_fn(self, features, labels, mode, params):
"""TPUEstimator compatible model function."""
del labels
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
data_shape = features.get_shape().as_list()[1:]
z_mean, z_logvar = self.gaussian_encoder(features, is_training=is_training)
z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
reconstructions = self.decode(z_sampled, data_shape, is_training)
per_sample_loss = losses.make_reconstruction_loss(features, reconstructions)
reconstruction_loss = tf.reduce_mean(per_sample_loss)
kl_loss = compute_gaussian_kl(z_mean, z_logvar)
regularizer = self.regularizer(kl_loss, z_mean, z_logvar, z_sampled)
loss = tf.add(reconstruction_loss, regularizer, name="loss")
elbo = tf.add(reconstruction_loss, kl_loss, name="elbo")
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = optimizers.make_vae_optimizer()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_global_step())
train_op = tf.group([train_op, update_ops])
tf.summary.scalar("reconstruction_loss", reconstruction_loss)
tf.summary.scalar("elbo", -elbo)
logging_hook = tf.train.LoggingTensorHook({
"loss": loss,
"reconstruction_loss": reconstruction_loss,
"elbo": -elbo
},
every_n_iter=100)
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[logging_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(make_metric_fn("reconstruction_loss", "elbo",
"regularizer", "kl_loss"),
[reconstruction_loss, -elbo, regularizer, kl_loss]))
else:
raise NotImplementedError("Eval mode not supported.")
def gaussian_encoder(self, input_tensor, is_training):
"""Applies the Gaussian encoder to images.
Args:
input_tensor: Tensor with the observations to be encoded.
is_training: Boolean indicating whether in training mode.
Returns:
Tuple of tensors with the mean and log variance of the Gaussian encoder.
"""
return architectures.make_gaussian_encoder(
input_tensor, is_training=is_training)
def decode(self, latent_tensor, observation_shape, is_training):
"""Decodes the latent_tensor to an observation."""
return architectures.make_decoder(
latent_tensor, observation_shape, is_training=is_training)
def shuffle_codes(z):
"""Shuffles latent variables across the batch.
Args:
z: [batch_size, num_latent] representation.
Returns:
shuffled: [batch_size, num_latent] shuffled representation across the batch.
"""
z_shuffle = []
for i in range(z.get_shape()[1]):
z_shuffle.append(tf.random_shuffle(z[:, i]))
shuffled = tf.stack(z_shuffle, 1, name="latent_shuffled")
return shuffled
def compute_gaussian_kl(z_mean, z_logvar):
"""Compute KL divergence between input Gaussian and Standard Normal."""
return tf.reduce_mean(
0.5 * tf.reduce_sum(
tf.square(z_mean) + tf.exp(z_logvar) - z_logvar - 1, [1]),
name="kl_loss")
def make_metric_fn(*names):
"""Utility function to report tf.metrics in model functions."""
def metric_fn(*args):
return {name: tf.metrics.mean(vec) for name, vec in zip(names, args)}
return metric_fn
@gin.configurable("vae")
class BetaVAE(BaseVAE):
"""BetaVAE model."""
def __init__(self, beta=gin.REQUIRED):
"""Creates a beta-VAE model.
Implementing Eq. 4 of "beta-VAE: Learning Basic Visual Concepts with a
Constrained Variational Framework"
(https://openreview.net/forum?id=Sy2fzU9gl).
Args:
beta: Hyperparameter for the regularizer.
Returns:
model_fn: Model function for TPUEstimator.
"""
self.beta = beta
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
del z_mean, z_logvar, z_sampled
return self.beta * kl_loss
def anneal(c_max, step, iteration_threshold):
"""Anneal function for anneal_vae (https://arxiv.org/abs/1804.03599).
Args:
c_max: Maximum capacity.
step: Current step.
iteration_threshold: How many iterations to reach c_max.
Returns:
Capacity annealed linearly until c_max.
"""
return tf.math.minimum(c_max * 1.,
c_max * 1. * tf.to_float(step) / iteration_threshold)
@gin.configurable("annealed_vae")
class AnnealedVAE(BaseVAE):
"""AnnealedVAE model."""
def __init__(self,
gamma=gin.REQUIRED,
c_max=gin.REQUIRED,
iteration_threshold=gin.REQUIRED):
"""Creates an AnnealedVAE model.
Implementing Eq. 8 of "Understanding disentangling in beta-VAE"
(https://arxiv.org/abs/1804.03599).
Args:
gamma: Hyperparameter for the regularizer.
c_max: Maximum capacity of the bottleneck.
iteration_threshold: How many iterations to reach c_max.
"""
self.gamma = gamma
self.c_max = c_max
self.iteration_threshold = iteration_threshold
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
del z_mean, z_logvar, z_sampled
c = anneal(self.c_max, tf.train.get_global_step(), self.iteration_threshold)
return self.gamma * tf.math.abs(kl_loss - c)
@gin.configurable("factor_vae")
class FactorVAE(BaseVAE):
"""FactorVAE model."""
def __init__(self, gamma=gin.REQUIRED):
"""Creates a FactorVAE model.
Implementing Eq. 2 of "Disentangling by Factorizing"
(https://arxiv.org/pdf/1802.05983).
Args:
gamma: Hyperparameter for the regularizer.
"""
self.gamma = gamma
def model_fn(self, features, labels, mode, params):
"""TPUEstimator compatible model function."""
del labels
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
data_shape = features.get_shape().as_list()[1:]
z_mean, z_logvar = self.gaussian_encoder(features, is_training=is_training)
z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar)
z_shuffle = shuffle_codes(z_sampled)
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
logits_z, probs_z = architectures.make_discriminator(
z_sampled, is_training=is_training)
_, probs_z_shuffle = architectures.make_discriminator(
z_shuffle, is_training=is_training)
reconstructions = self.decode(z_sampled, data_shape, is_training)
per_sample_loss = losses.make_reconstruction_loss(
features, reconstructions)
reconstruction_loss = tf.reduce_mean(per_sample_loss)
kl_loss = compute_gaussian_kl(z_mean, z_logvar)
standard_vae_loss = tf.add(reconstruction_loss, kl_loss, name="VAE_loss")
# tc = E[log(p_real)-log(p_fake)] = E[logit_real - logit_fake]
tc_loss_per_sample = logits_z[:, 0] - logits_z[:, 1]
tc_loss = tf.reduce_mean(tc_loss_per_sample, axis=0)
regularizer = kl_loss + self.gamma * tc_loss
factor_vae_loss = tf.add(
standard_vae_loss, self.gamma * tc_loss, name="factor_VAE_loss")
discr_loss = tf.add(
0.5 * tf.reduce_mean(tf.log(probs_z[:, 0])),
0.5 * tf.reduce_mean(tf.log(probs_z_shuffle[:, 1])),
name="discriminator_loss")
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer_vae = optimizers.make_vae_optimizer()
optimizer_discriminator = optimizers.make_discriminator_optimizer()
all_variables = tf.trainable_variables()
encoder_vars = [var for var in all_variables if "encoder" in var.name]
decoder_vars = [var for var in all_variables if "decoder" in var.name]
discriminator_vars = [var for var in all_variables \
if "discriminator" in var.name]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op_vae = optimizer_vae.minimize(
loss=factor_vae_loss,
global_step=tf.train.get_global_step(),
var_list=encoder_vars + decoder_vars)
train_op_discr = optimizer_discriminator.minimize(
loss=-discr_loss,
global_step=tf.train.get_global_step(),
var_list=discriminator_vars)
train_op = tf.group(train_op_vae, train_op_discr, update_ops)
tf.summary.scalar("reconstruction_loss", reconstruction_loss)
logging_hook = tf.train.LoggingTensorHook({
"loss": factor_vae_loss,
"reconstruction_loss": reconstruction_loss
},
every_n_iter=50)
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=factor_vae_loss,
train_op=train_op,
training_hooks=[logging_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=factor_vae_loss,
eval_metrics=(make_metric_fn("reconstruction_loss", "regularizer",
"kl_loss"),
[reconstruction_loss, regularizer, kl_loss]))
else:
raise NotImplementedError("Eval mode not supported.")
def compute_covariance_z_mean(z_mean):
"""Computes the covariance of z_mean.
Uses cov(z_mean) = E[z_mean*z_mean^T] - E[z_mean]E[z_mean]^T.
Args:
z_mean: Encoder mean, tensor of size [batch_size, num_latent].
Returns:
cov_z_mean: Covariance of encoder mean, tensor of size [num_latent,
num_latent].
"""
expectation_z_mean_z_mean_t = tf.reduce_mean(
tf.expand_dims(z_mean, 2) * tf.expand_dims(z_mean, 1), axis=0)
expectation_z_mean = tf.reduce_mean(z_mean, axis=0)
cov_z_mean = tf.subtract(
expectation_z_mean_z_mean_t,
tf.expand_dims(expectation_z_mean, 1) * tf.expand_dims(
expectation_z_mean, 0))
return cov_z_mean
def regularize_diag_off_diag_dip(covariance_matrix, lambda_od, lambda_d):
"""Compute on and off diagonal regularizers for DIP-VAE models.
Penalize deviations of covariance_matrix from the identity matrix. Uses
different weights for the deviations of the diagonal and off diagonal entries.
Args:
covariance_matrix: Tensor of size [num_latent, num_latent] to regularize.
lambda_od: Weight of penalty for off diagonal elements.
lambda_d: Weight of penalty for diagonal elements.
Returns:
dip_regularizer: Regularized deviation from diagonal of covariance_matrix.
"""
covariance_matrix_diagonal = tf.diag_part(covariance_matrix)
covariance_matrix_off_diagonal = covariance_matrix - tf.diag(
covariance_matrix_diagonal)
dip_regularizer = tf.add(
lambda_od * tf.reduce_sum(covariance_matrix_off_diagonal**2),
lambda_d * tf.reduce_sum((covariance_matrix_diagonal - 1)**2))
return dip_regularizer
@gin.configurable("dip_vae")
class DIPVAE(BaseVAE):
"""DIPVAE model."""
def __init__(self,
lambda_od=gin.REQUIRED,
lambda_d_factor=gin.REQUIRED,
dip_type="i"):
"""Creates a DIP-VAE model.
Based on Equation 6 and 7 of "Variational Inference of Disentangled Latent
Concepts from Unlabeled Observations"
(https://openreview.net/pdf?id=H1kG7GZAW).
Args:
lambda_od: Hyperparameter for off diagonal values of covariance matrix.
lambda_d_factor: Hyperparameter for diagonal values of covariance matrix
lambda_d = lambda_d_factor*lambda_od.
dip_type: "i" or "ii".
"""
self.lambda_od = lambda_od
self.lambda_d_factor = lambda_d_factor
self.dip_type = dip_type
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
cov_z_mean = compute_covariance_z_mean(z_mean)
lambda_d = self.lambda_d_factor * self.lambda_od
if self.dip_type == "i": # Eq 6 page 4
# mu = z_mean is [batch_size, num_latent]
# Compute cov_p(x) [mu(x)] = E[mu*mu^T] - E[mu]E[mu]^T]
cov_dip_regularizer = regularize_diag_off_diag_dip(
cov_z_mean, self.lambda_od, lambda_d)
elif self.dip_type == "ii":
cov_enc = tf.matrix_diag(tf.exp(z_logvar))
expectation_cov_enc = tf.reduce_mean(cov_enc, axis=0)
cov_z = expectation_cov_enc + cov_z_mean
cov_dip_regularizer = regularize_diag_off_diag_dip(
cov_z, self.lambda_od, lambda_d)
else:
raise NotImplementedError("DIP variant not supported.")
return kl_loss + cov_dip_regularizer
def gaussian_log_density(samples, mean, log_var):
pi = tf.constant(math.pi)
normalization = tf.log(2. * pi)
inv_sigma = tf.exp(-log_var)
tmp = (samples - mean)
return -0.5 * (tmp * tmp * inv_sigma + log_var + normalization)
def total_correlation(z, z_mean, z_logvar):
"""Estimate of total correlation on a batch.
We need to compute the expectation over a batch of: E_j [log(q(z(x_j))) -
log(prod_l q(z(x_j)_l))]. We ignore the constants as they do not matter
for the minimization. The constant should be equal to (num_latents - 1) *
log(batch_size * dataset_size)
Args:
z: [batch_size, num_latents]-tensor with sampled representation.
z_mean: [batch_size, num_latents]-tensor with mean of the encoder.
z_logvar: [batch_size, num_latents]-tensor with log variance of the encoder.
Returns:
Total correlation estimated on a batch.
"""
# Compute log(q(z(x_j)|x_i)) for every sample in the batch, which is a
# tensor of size [batch_size, batch_size, num_latents]. In the following
# comments, [batch_size, batch_size, num_latents] are indexed by [j, i, l].
log_qz_prob = gaussian_log_density(
tf.expand_dims(z, 1), tf.expand_dims(z_mean, 0),
tf.expand_dims(z_logvar, 0))
# Compute log prod_l p(z(x_j)_l) = sum_l(log(sum_i(q(z(z_j)_l|x_i)))
# + constant) for each sample in the batch, which is a vector of size
# [batch_size,].
log_qz_product = tf.reduce_sum(
tf.reduce_logsumexp(log_qz_prob, axis=1, keepdims=False),
axis=1,
keepdims=False)
# Compute log(q(z(x_j))) as log(sum_i(q(z(x_j)|x_i))) + constant =
# log(sum_i(prod_l q(z(x_j)_l|x_i))) + constant.
log_qz = tf.reduce_logsumexp(
tf.reduce_sum(log_qz_prob, axis=2, keepdims=False),
axis=1,
keepdims=False)
return tf.reduce_mean(log_qz - log_qz_product)
@gin.configurable("beta_tc_vae")
class BetaTCVAE(BaseVAE):
"""BetaTCVAE model."""
def __init__(self, beta=gin.REQUIRED):
"""Creates a beta-TC-VAE model.
Based on Equation 4 with alpha = gamma = 1 of "Isolating Sources of
Disentanglement in Variational Autoencoders"
(https://arxiv.org/pdf/1802.04942).
If alpha = gamma = 1, Eq. 4 can be written as ELBO + (1 - beta) * TC.
Args:
beta: Hyperparameter total correlation.
"""
self.beta = beta
def regularizer(self, kl_loss, z_mean, z_logvar, z_sampled):
tc = (self.beta - 1.) * total_correlation(z_sampled, z_mean, z_logvar)
return tc + kl_loss
|
google-research/disentanglement_lib
|
disentanglement_lib/methods/unsupervised/vae.py
|
Python
|
apache-2.0
| 16,472
|
[
"Gaussian"
] |
921ed5aaecb9b035521cba2b801aaeeb51d9b75162fe313adc49859eddffff25
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Perform fragmentation of molecules.
"""
import logging
import copy
from monty.json import MSONable
from pymatgen.analysis.graphs import MoleculeGraph, MolGraphSplitError
from pymatgen.analysis.local_env import OpenBabelNN
from pymatgen.io.babel import BabelMolAdaptor
__author__ = "Samuel Blau"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Samuel Blau"
__email__ = "samblau1@gmail.com"
__status__ = "Beta"
__date__ = "8/21/19"
logger = logging.getLogger(__name__)
class Fragmenter(MSONable):
"""
Molecule fragmenter class.
"""
def __init__(self, molecule, edges=None, depth=1, open_rings=False, use_metal_edge_extender=False,
opt_steps=10000, prev_unique_frag_dict=None, assume_previous_thoroughness=True):
"""
Standard constructor for molecule fragmentation
Args:
molecule (Molecule): The molecule to fragment.
edges (list): List of index pairs that define graph edges, aka molecule bonds. If not set,
edges will be determined with OpenBabel. Defaults to None.
depth (int): The number of levels of iterative fragmentation to perform, where each level
will include fragments obtained by breaking one bond of a fragment one level up.
Defaults to 1. However, if set to 0, instead all possible fragments are generated
using an alternative, non-iterative scheme.
open_rings (bool): Whether or not to open any rings encountered during fragmentation.
Defaults to False. If true, any bond that fails to yield disconnected graphs when
broken is instead removed and the entire structure is optimized with OpenBabel in
order to obtain a good initial guess for an opened geometry that can then be put
back into QChem to be optimized without the ring just reforming.
use_metal_edge_extender (bool): Whether or not to attempt to add additional edges from
O, N, F, or Cl to any Li or Mg atoms present that OpenBabel may have missed. Defaults
to False. Most important for ionic bonding. Note that additional metal edges may yield
new "rings" (e.g. -C-O-Li-O- in LiEC) that will not play nicely with ring opening.
opt_steps (int): Number of optimization steps when opening rings. Defaults to 10000.
prev_unique_frag_dict (dict): A dictionary of previously identified unique fragments.
Defaults to None. Typically only used when trying to find the set of unique fragments
that come from multiple molecules.
assume_previous_thoroughness (bool): Whether or not to assume that a molecule / fragment
provided in prev_unique_frag_dict has all of its unique subfragments also provided in
prev_unique_frag_dict. Defaults to True. This is an essential optimization when trying
to find the set of unique fragments that come from multiple molecules if all of those
molecules are being fully iteratively fragmented. However, if you're passing a
prev_unique_frag_dict which includes a molecule and its fragments that were generated
at insufficient depth to find all possible subfragments to a fragmentation calculation
of a different molecule that you aim to find all possible subfragments of and which has
common subfragments with the previous molecule, this optimization will cause you to
miss some unique subfragments.
"""
self.assume_previous_thoroughness = assume_previous_thoroughness
self.open_rings = open_rings
self.opt_steps = opt_steps
if edges is None:
self.mol_graph = MoleculeGraph.with_local_env_strategy(molecule, OpenBabelNN())
else:
edges = {(e[0], e[1]): None for e in edges}
self.mol_graph = MoleculeGraph.with_edges(molecule, edges)
if ("Li" in molecule.composition or "Mg" in molecule.composition) and use_metal_edge_extender:
self.mol_graph = metal_edge_extender(self.mol_graph)
self.prev_unique_frag_dict = prev_unique_frag_dict or {}
self.new_unique_frag_dict = {} # new fragments from the given molecule not contained in prev_unique_frag_dict
self.all_unique_frag_dict = {} # all fragments from just the given molecule
self.unique_frag_dict = {} # all fragments from both the given molecule and prev_unique_frag_dict
if depth == 0: # Non-iterative, find all possible fragments:
# Find all unique fragments besides those involving ring opening
self.all_unique_frag_dict = self.mol_graph.build_unique_fragments()
# Then, if self.open_rings is True, open all rings present in self.unique_fragments
# in order to capture all unique fragments that require ring opening.
if self.open_rings:
self._open_all_rings()
else: # Iterative fragment generation:
self.fragments_by_level = {}
# Loop through the number of levels,
for level in range(depth):
# If on the first level, perform one level of fragmentation on the principle molecule graph:
if level == 0:
self.fragments_by_level["0"] = self._fragment_one_level({str(
self.mol_graph.molecule.composition.alphabetical_formula) + " E" + str(
len(self.mol_graph.graph.edges())): [self.mol_graph]})
else:
num_frags_prev_level = 0
for key in self.fragments_by_level[str(level - 1)]:
num_frags_prev_level += len(self.fragments_by_level[str(level - 1)][key])
if num_frags_prev_level == 0:
# Nothing left to fragment, so exit the loop:
break
else: # If not on the first level, and there are fragments present in the previous level, then
# perform one level of fragmentation on all fragments present in the previous level:
self.fragments_by_level[str(level)] = self._fragment_one_level(
self.fragments_by_level[str(level-1)])
if self.prev_unique_frag_dict == {}:
self.new_unique_frag_dict = copy.deepcopy(self.all_unique_frag_dict)
else:
for frag_key in self.all_unique_frag_dict:
if frag_key not in self.prev_unique_frag_dict:
self.new_unique_frag_dict[frag_key] = copy.deepcopy(self.all_unique_frag_dict[frag_key])
else:
for fragment in self.all_unique_frag_dict[frag_key]:
found = False
for prev_frag in self.prev_unique_frag_dict[frag_key]:
if fragment.isomorphic_to(prev_frag):
found = True
if not found:
if frag_key not in self.new_unique_frag_dict:
self.new_unique_frag_dict[frag_key] = [fragment]
else:
self.new_unique_frag_dict[frag_key].append(fragment)
self.new_unique_fragments = 0
for frag_key in self.new_unique_frag_dict:
self.new_unique_fragments += len(self.new_unique_frag_dict[frag_key])
if self.prev_unique_frag_dict == {}:
self.unique_frag_dict = self.new_unique_frag_dict
self.total_unique_fragments = self.new_unique_fragments
else:
self.unique_frag_dict = copy.deepcopy(self.prev_unique_frag_dict)
for frag_key in self.new_unique_frag_dict:
if frag_key in self.unique_frag_dict:
for new_frag in self.new_unique_frag_dict[frag_key]:
self.unique_frag_dict[frag_key].append(new_frag)
else:
self.unique_frag_dict[frag_key] = copy.deepcopy(self.new_unique_frag_dict[frag_key])
self.total_unique_fragments = 0
for frag_key in self.unique_frag_dict:
self.total_unique_fragments += len(self.unique_frag_dict[frag_key])
def _fragment_one_level(self, old_frag_dict):
"""
Perform one step of iterative fragmentation on a list of molecule graphs. Loop through the graphs,
then loop through each graph's edges and attempt to remove that edge in order to obtain two
disconnected subgraphs, aka two new fragments. If successful, check to see if the new fragments
are already present in self.unique_fragments, and append them if not. If unsucessful, we know
that edge belongs to a ring. If we are opening rings, do so with that bond, and then again
check if the resulting fragment is present in self.unique_fragments and add it if it is not.
"""
new_frag_dict = {}
for old_frag_key in old_frag_dict:
for old_frag in old_frag_dict[old_frag_key]:
for edge in old_frag.graph.edges:
bond = [(edge[0], edge[1])]
fragments = []
try:
fragments = old_frag.split_molecule_subgraphs(bond, allow_reverse=True)
except MolGraphSplitError:
if self.open_rings:
fragments = [open_ring(old_frag, bond, self.opt_steps)]
for fragment in fragments:
new_frag_key = str(fragment.molecule.composition.alphabetical_formula)+" E"+str(
len(fragment.graph.edges()))
proceed = True
if self.assume_previous_thoroughness and self.prev_unique_frag_dict != {}:
if new_frag_key in self.prev_unique_frag_dict:
for unique_fragment in self.prev_unique_frag_dict[new_frag_key]:
if unique_fragment.isomorphic_to(fragment):
proceed = False
break
if proceed:
if new_frag_key not in self.all_unique_frag_dict:
self.all_unique_frag_dict[new_frag_key] = [fragment]
new_frag_dict[new_frag_key] = [fragment]
else:
found = False
for unique_fragment in self.all_unique_frag_dict[new_frag_key]:
if unique_fragment.isomorphic_to(fragment):
found = True
break
if not found:
self.all_unique_frag_dict[new_frag_key].append(fragment)
if new_frag_key in new_frag_dict:
new_frag_dict[new_frag_key].append(fragment)
else:
new_frag_dict[new_frag_key] = [fragment]
return new_frag_dict
def _open_all_rings(self):
"""
Having already generated all unique fragments that did not require ring opening,
now we want to also obtain fragments that do require opening. We achieve this by
looping through all unique fragments and opening each bond present in any ring
we find. We also temporarily add the principle molecule graph to self.unique_fragments
so that its rings are opened as well.
"""
mol_key = str(self.mol_graph.molecule.composition.alphabetical_formula) + " E" + str(
len(self.mol_graph.graph.edges()))
self.all_unique_frag_dict[mol_key] = [self.mol_graph]
new_frag_keys = {"0": []}
new_frag_key_dict = {}
for key in self.all_unique_frag_dict:
for fragment in self.all_unique_frag_dict[key]:
ring_edges = fragment.find_rings()
if ring_edges != []:
for bond in ring_edges[0]:
new_fragment = open_ring(fragment, [bond], self.opt_steps)
frag_key = str(new_fragment.molecule.composition.alphabetical_formula) + " E" + str(
len(new_fragment.graph.edges()))
if frag_key not in self.all_unique_frag_dict:
if frag_key not in new_frag_keys["0"]:
new_frag_keys["0"].append(copy.deepcopy(frag_key))
new_frag_key_dict[frag_key] = copy.deepcopy([new_fragment])
else:
found = False
for unique_fragment in new_frag_key_dict[frag_key]:
if unique_fragment.isomorphic_to(new_fragment):
found = True
break
if not found:
new_frag_key_dict[frag_key].append(copy.deepcopy(new_fragment))
else:
found = False
for unique_fragment in self.all_unique_frag_dict[frag_key]:
if unique_fragment.isomorphic_to(new_fragment):
found = True
break
if not found:
self.all_unique_frag_dict[frag_key].append(copy.deepcopy(new_fragment))
for key in new_frag_key_dict:
self.all_unique_frag_dict[key] = copy.deepcopy(new_frag_key_dict[key])
idx = 0
while len(new_frag_keys[str(idx)]) != 0:
new_frag_key_dict = {}
idx += 1
new_frag_keys[str(idx)] = []
for key in new_frag_keys[str(idx - 1)]:
for fragment in self.all_unique_frag_dict[key]:
ring_edges = fragment.find_rings()
if ring_edges != []:
for bond in ring_edges[0]:
new_fragment = open_ring(fragment, [bond], self.opt_steps)
frag_key = str(new_fragment.molecule.composition.alphabetical_formula) + " E" + str(
len(new_fragment.graph.edges()))
if frag_key not in self.all_unique_frag_dict:
if frag_key not in new_frag_keys[str(idx)]:
new_frag_keys[str(idx)].append(copy.deepcopy(frag_key))
new_frag_key_dict[frag_key] = copy.deepcopy([new_fragment])
else:
found = False
for unique_fragment in new_frag_key_dict[frag_key]:
if unique_fragment.isomorphic_to(new_fragment):
found = True
break
if not found:
new_frag_key_dict[frag_key].append(copy.deepcopy(new_fragment))
else:
found = False
for unique_fragment in self.all_unique_frag_dict[frag_key]:
if unique_fragment.isomorphic_to(new_fragment):
found = True
break
if not found:
self.all_unique_frag_dict[frag_key].append(copy.deepcopy(new_fragment))
for key in new_frag_key_dict:
self.all_unique_frag_dict[key] = copy.deepcopy(new_frag_key_dict[key])
self.all_unique_frag_dict.pop(mol_key)
def open_ring(mol_graph, bond, opt_steps):
"""
Function to actually open a ring using OpenBabel's local opt. Given a molecule
graph and a bond, convert the molecule graph into an OpenBabel molecule, remove
the given bond, perform the local opt with the number of steps determined by
self.steps, and then convert the resulting structure back into a molecule graph
to be returned.
"""
obmol = BabelMolAdaptor.from_molecule_graph(mol_graph)
obmol.remove_bond(bond[0][0] + 1, bond[0][1] + 1)
obmol.localopt(steps=opt_steps, forcefield='uff')
return MoleculeGraph.with_local_env_strategy(obmol.pymatgen_mol, OpenBabelNN())
def metal_edge_extender(mol_graph):
"""
Function to identify and add missed edges in ionic bonding of Li and Mg ions.
"""
metal_sites = {"Li": {}, "Mg": {}}
coordinators = ["O", "N", "F", "Cl"]
num_new_edges = 0
for idx in mol_graph.graph.nodes():
if mol_graph.graph.nodes()[idx]["specie"] in metal_sites:
metal_sites[mol_graph.graph.nodes()[idx]["specie"]][idx] = [site[2] for site in
mol_graph.get_connected_sites(idx)]
for metal in metal_sites:
for idx in metal_sites[metal]:
for ii, site in enumerate(mol_graph.molecule):
if ii != idx and ii not in metal_sites[metal][idx]:
if str(site.specie) in coordinators:
if site.distance(mol_graph.molecule[idx]) < 2.5:
mol_graph.add_edge(idx, ii)
num_new_edges += 1
metal_sites[metal][idx].append(ii)
total_metal_edges = 0
for metal in metal_sites:
for idx in metal_sites[metal]:
total_metal_edges += len(metal_sites[metal][idx])
if total_metal_edges == 0:
for metal in metal_sites:
for idx in metal_sites[metal]:
for ii, site in enumerate(mol_graph.molecule):
if ii != idx and ii not in metal_sites[metal][idx]:
if str(site.specie) in coordinators:
if site.distance(mol_graph.molecule[idx]) < 3.5:
mol_graph.add_edge(idx, ii)
num_new_edges += 1
metal_sites[metal][idx].append(ii)
total_metal_edges = 0
for metal in metal_sites:
for idx in metal_sites[metal]:
total_metal_edges += len(metal_sites[metal][idx])
return mol_graph
|
mbkumar/pymatgen
|
pymatgen/analysis/fragmenter.py
|
Python
|
mit
| 19,035
|
[
"pymatgen"
] |
ec9446be44af69e309afb3be7bc94ab76ad75f43a492035d1f45d66230b0ddc0
|
# coding=utf-8
import telegram
import telegram.ext
import config
import datetime
import utils
import urllib
import html
import logging
import time
import magic
import os
import re
import mimetypes
import pydub
import threading
import traceback
import base64
from . import db, speech
from .whitelisthandler import WhitelistHandler
from channel import EFBChannel, EFBMsg, MsgType, MsgSource, TargetType, ChannelType
from channelExceptions import EFBChatNotFound, EFBMessageTypeNotSupported, EFBMessageError
from .msgType import get_msg_type, TGMsgType
from moviepy.editor import VideoFileClip
class Flags:
# General Flags
CANCEL_PROCESS = "cancel"
# Chat linking
CONFIRM_LINK = 0x11
EXEC_LINK = 0x12
# Start a chat
START_CHOOSE_CHAT = 0x21
# Command
COMMAND_PENDING = 0x31
class TelegramChannel(EFBChannel):
"""
EFB Channel - Telegram (Master)
Requires python-telegram-bot
Author: Eana Hufwe <https://github.com/blueset>
External Services:
You may need API keys from following service providers to enjoy more functions.
Baidu Speech Recognition API: http://yuyin.baidu.com/
Bing Speech API: https://www.microsoft.com/cognitive-services/en-us/speech-api
Additional configs:
eh_telegram_master = {
"token": "Telegram bot token",
"admins": [12345678, 87654321],
"bing_speech_api": ["token1", "token2"],
"baidu_speech_api": {
"app_id": 123456,
"api_key": "APIkey",
"secret_key": "secret_key"
}
}
"""
# Meta Info
channel_name = "Telegram Master"
channel_emoji = "✈"
channel_id = "eh_telegram_master"
channel_type = ChannelType.Master
supported_message_types = {MsgType.Text, MsgType.File, MsgType.Audio,
MsgType.Command, MsgType.Image, MsgType.Link, MsgType.Location,
MsgType.Sticker, MsgType.Video}
# Data
slaves = None
bot = None
msg_status = {}
msg_storage = {}
me = None
_stop_polling = False
# Constants
TYPE_DICT = {
TGMsgType.Text: MsgType.Text,
TGMsgType.Audio: MsgType.Audio,
TGMsgType.Document: MsgType.File,
TGMsgType.Photo: MsgType.Image,
TGMsgType.Sticker: MsgType.Sticker,
TGMsgType.Video: MsgType.Video,
TGMsgType.Voice: MsgType.Audio,
TGMsgType.Location: MsgType.Location,
TGMsgType.Venue: MsgType.Location,
}
def __init__(self, queue, mutex, slaves):
"""
Initialization.
Args:
queue (queue.Queue): global message queue
slaves (dict): Dictionary of slaves
"""
super().__init__(queue, mutex)
self.slaves = slaves
try:
self.bot = telegram.ext.Updater(getattr(config, self.channel_id)['token'])
except (AttributeError, KeyError):
raise ValueError("Token is not properly defined. Please define it in `config.py`.")
mimetypes.init(files=["mimetypes"])
self.admins = getattr(config, self.channel_id)['admins']
self.logger = logging.getLogger("plugins.%s.TelegramChannel" % self.channel_id)
self.me = self.bot.bot.get_me()
self.bot.dispatcher.add_handler(WhitelistHandler(self.admins))
self.bot.dispatcher.add_handler(telegram.ext.CommandHandler("link", self.link_chat_show_list, pass_args=True))
self.bot.dispatcher.add_handler(telegram.ext.CommandHandler("chat", self.start_chat_list, pass_args=True))
self.bot.dispatcher.add_handler(telegram.ext.CommandHandler("recog", self.recognize_speech, pass_args=True))
self.bot.dispatcher.add_handler(telegram.ext.CallbackQueryHandler(self.callback_query_dispatcher))
self.bot.dispatcher.add_handler(telegram.ext.CommandHandler("start", self.start, pass_args=True))
self.bot.dispatcher.add_handler(telegram.ext.CommandHandler("extra", self.extra_help))
self.bot.dispatcher.add_handler(telegram.ext.CommandHandler("help", self.help))
self.bot.dispatcher.add_handler(telegram.ext.CommandHandler("unlink_all", self.unlink_all))
self.bot.dispatcher.add_handler(
telegram.ext.RegexHandler(r"^/(?P<id>[0-9]+)_(?P<command>[a-z0-9_-]+)", self.extra_call,
pass_groupdict=True))
self.bot.dispatcher.add_handler(telegram.ext.MessageHandler(
telegram.ext.Filters.text |
telegram.ext.Filters.photo |
telegram.ext.Filters.sticker |
telegram.ext.Filters.document |
telegram.ext.Filters.venue |
telegram.ext.Filters.location |
telegram.ext.Filters.audio |
telegram.ext.Filters.voice |
telegram.ext.Filters.video,
self.msg
))
self.bot.dispatcher.add_error_handler(self.error)
# Truncate string by bytes
# Written by Mark Tolonen
# http://stackoverflow.com/a/13738452/1989455
@staticmethod
def _utf8_lead_byte(b):
"""A UTF-8 intermediate byte starts with the bits 10xxxxxx."""
return (b & 0xC0) != 0x80
def _utf8_byte_truncate(self, text, max_bytes):
"""If text[max_bytes] is not a lead byte, back up until a lead byte is
found and truncate before that character."""
utf8 = text.encode('utf8')
if len(utf8) <= max_bytes:
return utf8.decode()
i = max_bytes
while i > 0 and not self._utf8_lead_byte(utf8[i]):
i -= 1
return utf8[:i].decode()
def callback_query_dispatcher(self, bot, update):
"""
Dispatch a callback query based on the message session status.
Args:
bot (telegram.bot.Bot): bot
update (telegram.Update): update
"""
# Get essential information about the query
query = update.callback_query
chat_id = query.message.chat.id
text = query.data
msg_id = update.callback_query.message.message_id
msg_status = self.msg_status.get("%s.%s" % (chat_id, msg_id), None)
# dispatch the query
if msg_status in [Flags.CONFIRM_LINK]:
self.link_chat_confirm(bot, chat_id, msg_id, text)
elif msg_status in [Flags.EXEC_LINK]:
self.link_chat_exec(bot, chat_id, msg_id, text)
elif msg_status == Flags.START_CHOOSE_CHAT:
self.make_chat_head(bot, chat_id, msg_id, text)
elif msg_status == Flags.COMMAND_PENDING:
self.command_exec(bot, chat_id, msg_id, text)
else:
bot.editMessageText(text="Session expired. Please try again. (SE01)",
chat_id=chat_id,
message_id=msg_id)
@staticmethod
def _reply_error(bot, update, errmsg):
"""
A wrap that directly reply a message with error details.
Returns:
telegram.Message: Message sent
"""
return bot.send_message(update.message.chat.id, errmsg, reply_to_message_id=update.message.message_id)
def process_msg(self, msg):
"""
Process a message from slave channel and deliver it to the user.
Args:
msg (EFBMsg): The message.
"""
try:
xid = datetime.datetime.now().timestamp()
self.logger.debug("%s, Msg text: %s", xid, msg.text)
self.logger.debug("%s, process_msg_step_0", xid)
chat_uid = "%s.%s" % (msg.channel_id, msg.origin['uid'])
tg_chats = db.get_chat_assoc(slave_uid=chat_uid)
tg_chat = None
multi_slaves = False
if tg_chats:
tg_chat = tg_chats[0]
slaves = db.get_chat_assoc(master_uid=tg_chat)
if slaves and len(slaves) > 1:
multi_slaves = True
msg_prefix = "" # For group member name
tg_chat_assoced = False
if msg.source != MsgSource.Group:
msg.member = {"uid": -1, "name": "", "alias": ""}
# Generate chat text template & Decide type target
tg_dest = getattr(config, self.channel_id)['admins'][0]
self.logger.debug("%s, process_msg_step_1, tg_dest=%s, msg.origin=%s", xid, tg_dest, str(msg.origin))
if msg.source == MsgSource.Group:
self.logger.debug("msg.member: %s", str(msg.member))
msg_prefix = msg.member['name'] if msg.member['name'] == msg.member['alias'] or not msg.member['alias'] \
else "%s (%s)" % (msg.member['alias'], msg.member['name'])
if tg_chat: # if this chat is linked
tg_dest = int(tg_chat.split('.')[1])
tg_chat_assoced = True
if tg_chat and not multi_slaves: # if singly linked
if msg_prefix: # if group message
msg_template = "%s:\n%s" % (msg_prefix, "%s")
else:
msg_template = "%s"
elif msg.source == MsgSource.User:
emoji_prefix = msg.channel_emoji + utils.Emojis.get_source_emoji(msg.source)
name_prefix = msg.origin["name"] if msg.origin["alias"] == msg.origin["name"] or not msg.origin['alias'] \
else "%s (%s)" % (msg.origin["alias"], msg.origin["name"])
msg_template = "%s %s:\n%s" % (emoji_prefix, name_prefix, "%s")
elif msg.source == MsgSource.Group:
emoji_prefix = msg.channel_emoji + utils.Emojis.get_source_emoji(msg.source)
name_prefix = msg.origin["name"] if msg.origin["alias"] == msg.origin["name"] or not msg.origin['alias'] \
else "%s (%s)" % (msg.origin["alias"], msg.origin["name"])
msg_template = "%s %s [%s]:\n%s" % (emoji_prefix, msg_prefix, name_prefix, "%s")
elif msg.source == MsgSource.System:
emoji_prefix = msg.channel_emoji + utils.Emojis.get_source_emoji(msg.source)
name_prefix = msg.origin["name"] if msg.origin["alias"] == msg.origin["name"] or not msg.origin['alias'] \
else "%s (%s)" % (msg.origin["alias"], msg.origin["name"])
msg_template = "%s %s:\n%s" % (emoji_prefix, name_prefix, "%s")
else:
msg_template = "Unknown message source (%s)\n%s" % (msg.source, "%s")
# Type dispatching
self.logger.debug("%s, process_msg_step_2", xid)
append_last_msg = False
if msg.type == MsgType.Text:
parse_mode = "HTML" if self._flag("text_as_html", False) else None
if tg_chat_assoced:
last_msg = db.get_last_msg_from_chat(tg_dest)
if last_msg:
if last_msg.msg_type == "Text":
append_last_msg = str(last_msg.slave_origin_uid) == "%s.%s" % (msg.channel_id, msg.origin['uid'])
if msg.source == MsgSource.Group:
append_last_msg &= str(last_msg.slave_member_uid) == str(msg.member['uid'])
append_last_msg &= datetime.datetime.now() - last_msg.time <= datetime.timedelta(
seconds=self._flag('join_msg_threshold_secs', 15))
else:
append_last_msg = False
else:
append_last_msg = False
self.logger.debug("Text: Append last msg: %s", append_last_msg)
self.logger.debug("%s, process_msg_step_3_0, tg_dest = %s, tg_chat_assoced = %s, append_last_msg = %s",
xid, tg_dest, tg_chat_assoced, append_last_msg)
if tg_chat_assoced and append_last_msg:
self.logger.debug("%s, process_msg_step_3_0_1", xid)
msg.text = "%s\n%s" % (last_msg.text, msg.text)
try:
tg_msg = self.bot.bot.editMessageText(chat_id=tg_dest,
message_id=last_msg.master_msg_id.split(".", 1)[1],
text=msg_template % msg.text,
parse_mode=parse_mode)
except telegram.error.BadRequest:
tg_msg = self.bot.bot.editMessageText(chat_id=tg_dest,
message_id=last_msg.master_msg_id.split(".", 1)[1],
text=msg_template % msg.text)
else:
self.logger.debug("%s, process_msg_step_3_0_3", xid)
try:
tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % msg.text, parse_mode=parse_mode)
except telegram.error.BadRequest:
tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % msg.text)
self.logger.debug("%s, process_msg_step_3_0_4, tg_msg = %s", xid, tg_msg)
self.logger.debug("%s, process_msg_step_3_1", xid)
elif msg.type == MsgType.Link:
thumbnail = urllib.parse.quote(msg.attributes["image"] or "", safe="?=&#:/")
thumbnail = "<a href=\"%s\">🔗</a>" % thumbnail if thumbnail else "🔗"
text = "%s <a href=\"%s\">%s</a>\n%s" % \
(thumbnail,
urllib.parse.quote(msg.attributes["url"], safe="?=&#:/"),
html.escape(msg.attributes["title"] or msg.attributes["url"]),
html.escape(msg.attributes["description"] or ""))
if msg.text:
text += "\n\n" + msg.text
try:
tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % text, parse_mode="HTML")
except telegram.error.BadRequest:
text = "🔗 %s\n%s\n\n%s" % (html.escape(msg.attributes["title"] or ""),
html.escape(msg.attributes["description"] or ""),
urllib.parse.quote(msg.attributes["url"] or "", safe="?=&#:/"))
if msg.text:
text += "\n\n" + msg.text
tg_msg = self.bot.bot.send_message(tg_dest, text=msg_template % msg.text)
elif msg.type in [MsgType.Image, MsgType.Sticker]:
self.logger.debug("%s, process_msg_step_3_2", xid)
self.logger.debug("Received %s\nPath: %s\nMIME: %s", msg.type, msg.path, msg.mime)
self.logger.debug("Path: %s\nSize: %s", msg.path, os.stat(msg.path).st_size)
if os.stat(msg.path).st_size == 0:
os.remove(msg.path)
tg_msg = self.bot.bot.send_message(tg_dest,
msg_template % ("Error: Empty %s received. (MS01)" % msg.type))
else:
if not msg.text:
if msg.type == MsgType.Image:
msg.text = "sent a picture."
elif msg.type == MsgType.Sticker:
msg.text = "sent a sticker."
if msg.mime == "image/gif":
tg_msg = self.bot.bot.sendDocument(tg_dest, msg.file, caption=msg_template % msg.text)
else:
try:
tg_msg = self.bot.bot.sendPhoto(tg_dest, msg.file, caption=msg_template % msg.text)
except telegram.error.BadRequest:
tg_msg = self.bot.bot.sendDocument(tg_dest, msg.file, caption=msg_template % msg.text)
os.remove(msg.path)
self.logger.debug("%s, process_msg_step_3_3", xid)
elif msg.type == MsgType.File:
if os.stat(msg.path).st_size == 0:
os.remove(msg.path)
tg_msg = self.bot.bot.send_message(tg_dest,
msg_template % ("Error: Empty %s received. (MS02)" % msg.type))
else:
if not msg.filename:
file_name = os.path.basename(msg.path)
msg.text = "sent a file."
else:
file_name = msg.filename
tg_msg = self.bot.bot.send_document(tg_dest, msg.file, caption=msg_template % msg.text,
filename=file_name)
os.remove(msg.path)
elif msg.type == MsgType.Audio:
if os.stat(msg.path).st_size == 0:
os.remove(msg.path)
return self.bot.bot.send_message(tg_dest,
msg_template % ("Error: Empty %s received. (MS03)" % msg.type))
msg.text = msg.text or ''
self.logger.debug("%s, process_msg_step_4_1, no_conversion = %s", xid,
self._flag("no_conversion", False))
if self._flag("no_conversion", False):
self.logger.debug("%s, process_msg_step_4_2, mime = %s", xid, msg.mime)
if msg.mime == "audio/mpeg":
tg_msg = self.bot.bot.sendAudio(tg_dest, msg.file, caption=msg_template % msg.text)
else:
tg_msg = self.bot.bot.sendDocument(tg_dest, msg.file, caption=msg_template % msg.text)
else:
pydub.AudioSegment.from_file(msg.file).export("%s.ogg" % msg.path,
format="ogg",
codec="libopus",
bitrate="65536",
parameters=["-vbr", "on", "-compression_level", "10"])
ogg_file = open("%s.ogg" % msg.path, 'rb')
tg_msg = self.bot.bot.sendVoice(tg_dest, ogg_file, caption=msg_template % msg.text)
os.remove("%s.ogg" % msg.path)
os.remove(msg.path)
elif msg.type == MsgType.Location:
self.logger.info("---\nsending venue\nlat: %s, long: %s\ntitle: %s\naddr: %s",
msg.attributes['latitude'], msg.attributes['longitude'], msg.text, msg_template % "")
tg_msg = self.bot.bot.sendVenue(tg_dest, latitude=msg.attributes['latitude'],
longitude=msg.attributes['longitude'], title=msg.text,
address=msg_template % "")
elif msg.type == MsgType.Video:
if os.stat(msg.path).st_size == 0:
os.remove(msg.path)
return self.bot.bot.send_message(tg_dest, msg_template % ("Error: Empty %s recieved" % msg.type))
if not msg.text:
msg.text = "sent a video."
tg_msg = self.bot.bot.sendVideo(tg_dest, msg.file, caption=msg_template % msg.text)
os.remove(msg.path)
elif msg.type == MsgType.Command:
buttons = []
for i, ival in enumerate(msg.attributes['commands']):
buttons.append([telegram.InlineKeyboardButton(ival['name'], callback_data=str(i))])
tg_msg = self.bot.bot.send_message(tg_dest, msg_template % msg.text,
reply_markup=telegram.InlineKeyboardMarkup(buttons))
self.msg_status["%s.%s" % (tg_dest, tg_msg.message_id)] = Flags.COMMAND_PENDING
self.msg_storage["%s.%s" % (tg_dest, tg_msg.message_id)] = {"channel": msg.channel_id,
"text": msg_template % msg.text,
"commands": msg.attributes['commands']}
else:
tg_msg = self.bot.bot.send_message(tg_dest, msg_template % "Unsupported incoming message type. (UT01)")
self.logger.debug("%s, process_msg_step_4", xid)
if msg.source in (MsgSource.User, MsgSource.Group):
msg_log = {"master_msg_id": "%s.%s" % (tg_msg.chat.id, tg_msg.message_id),
"text": msg.text or "Sent a %s." % msg.type,
"msg_type": msg.type,
"sent_to": "Master",
"slave_origin_uid": "%s.%s" % (msg.channel_id, msg.origin['uid']),
"slave_origin_display_name": msg.origin['alias'],
"slave_member_uid": msg.member['uid'],
"slave_member_display_name": msg.member['alias'],
"slave_message_uid": msg.uid}
if tg_chat_assoced and append_last_msg:
msg_log['update'] = True
db.add_msg_log(**msg_log)
self.logger.debug("%s, process_msg_step_5", xid)
except Exception as e:
self.logger.error(repr(e) + traceback.format_exc())
def slave_chats_pagination(self, storage_id, offset=0, filter=""):
"""
Generate a list of (list of) `InlineKeyboardButton`s of chats in slave channels,
based on the status of message located by `storage_id` and the paging from
`offset` value.
Args:
storage_id (str): Message_storage ID for generating the buttons list.
offset (int): Offset for pagination
Returns:
tuple (str, list of list of InlineKeyboardButton):
A tuple: legend, chat_btn_list
`legend` is the legend of all Emoji headings in the entire list.
`chat_btn_list` is a list which can be fit into `telegram.InlineKeyboardMarkup`.
"""
legend = [
"%s: Linked" % utils.Emojis.LINK_EMOJI,
"%s: User" % utils.Emojis.USER_EMOJI,
"%s: Group" % utils.Emojis.GROUP_EMOJI
]
if self.msg_storage.get(storage_id, None):
chats = self.msg_storage[storage_id]['chats']
channels = self.msg_storage[storage_id]['channels']
count = self.msg_storage[storage_id]['count']
else:
rfilter = re.compile(filter, re.DOTALL | re.IGNORECASE)
if filter:
self.logger.debug("Filter string: %s", filter)
chats = []
channels = {}
for slave_id in self.slaves:
slave = self.slaves[slave_id]
slave_chats = slave.get_chats()
channels[slave.channel_id] = {
"channel_name": slave.channel_name,
"channel_emoji": slave.channel_emoji
}
for chat in slave_chats:
c = {
"channel_id": slave.channel_id,
"channel_name": slave.channel_name,
"channel_emoji": slave.channel_emoji,
"chat_name": chat['name'],
"chat_alias": chat['alias'],
"chat_uid": chat['uid'],
"type": chat['type']
}
entry_string = "Channel: %s\nName: %s\nAlias: %s\nID: %s\nType: %s" \
% (c['channel_name'], c['chat_name'], c['chat_alias'], c['chat_uid'], c['type'])
if not filter or rfilter.search(entry_string):
chats.append(c)
count = len(chats)
self.msg_storage[storage_id] = {
"offset": offset,
"count": len(chats),
"chats": chats.copy(),
"channels": channels.copy()
}
for ch in channels:
legend.append("%s: %s" % (channels[ch]['channel_emoji'], channels[ch]['channel_name']))
# Build inline button list
chat_btn_list = []
chats_per_page = self._flag("chats_per_page", 10)
for i in range(offset, min(offset + chats_per_page, count)):
chat = chats[i]
linked = utils.Emojis.LINK_EMOJI if bool(
db.get_chat_assoc(slave_uid="%s.%s" % (chat['channel_id'], chat['chat_uid']))) else ""
chat_type = utils.Emojis.get_source_emoji(chat['type'])
chat_name = chat['chat_alias'] if chat['chat_name'] == chat['chat_alias'] else "%s (%s)" % (
chat['chat_alias'], chat['chat_name'])
button_text = "%s%s: %s %s" % (chat['channel_emoji'], chat_type, chat_name, linked)
button_callback = "chat %s" % i
chat_btn_list.append([telegram.InlineKeyboardButton(button_text, callback_data=button_callback)])
# Pagination
page_number_row = []
if offset - chats_per_page >= 0:
page_number_row.append(telegram.InlineKeyboardButton("< Prev", callback_data="offset %s" % (
offset - chats_per_page)))
page_number_row.append(telegram.InlineKeyboardButton("Cancel", callback_data=Flags.CANCEL_PROCESS))
if offset + chats_per_page < count:
page_number_row.append(telegram.InlineKeyboardButton("Next >", callback_data="offset %s" % (
offset + chats_per_page)))
chat_btn_list.append(page_number_row)
return legend, chat_btn_list
def link_chat_show_list(self, bot, update, args=None):
"""
Show the list of available chats for linking.
Triggered by `/link`.
Args:
bot: Telegram Bot instance
update: Message update
"""
args = args or []
self.link_chat_gen_list(bot, self.admins[0], filter=" ".join(args))
def link_chat_gen_list(self, bot, chat_id, message_id=None, offset=0, filter=""):
"""
Generate the list for chat linking, and update it to a message.
Args:
bot: Telegram Bot instance
chat_id: Chat ID
message_id: ID of message to be updated, None to send a new message.
offset: Offset for pagination.
"""
if not message_id:
message_id = bot.send_message(chat_id, "Processing...").message_id
msg_text = "Please choose the chat you want to link with ...\n\nLegend:\n"
legend, chat_btn_list = self.slave_chats_pagination("%s.%s" % (chat_id, message_id), offset, filter=filter)
for i in legend:
msg_text += "%s\n" % i
msg = bot.editMessageText(chat_id=chat_id, message_id=message_id, text=msg_text,
reply_markup=telegram.InlineKeyboardMarkup(chat_btn_list))
self.msg_status["%s.%s" % (chat_id, msg.message_id)] = Flags.CONFIRM_LINK
def link_chat_confirm(self, bot, tg_chat_id, tg_msg_id, callback_uid):
"""
Confirmation of chat linking. Triggered by callback message on status `Flags.CONFIRM_LINK`.
Args:
bot: Telegram Bot instance
tg_chat_id: Chat ID
tg_msg_id: Message ID triggered the callback
callback_uid: Callback message
"""
if callback_uid.split()[0] == "offset":
return self.link_chat_gen_list(bot, tg_chat_id, message_id=tg_msg_id, offset=int(callback_uid.split()[1]))
if callback_uid == Flags.CANCEL_PROCESS:
txt = "Cancelled."
self.msg_status.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
self.msg_storage.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
return bot.editMessageText(text=txt,
chat_id=tg_chat_id,
message_id=tg_msg_id)
if callback_uid[:4] != "chat":
txt = "Invalid parameter (%s). (IP01)" % callback_uid
self.msg_status.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
self.msg_storage.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
return bot.editMessageText(text=txt,
chat_id=tg_chat_id,
message_id=tg_msg_id)
callback_uid = int(callback_uid.split()[1])
chat = self.msg_storage["%s.%s" % (tg_chat_id, tg_msg_id)]['chats'][callback_uid]
chat_uid = "%s.%s" % (chat['channel_id'], chat['chat_uid'])
chat_display_name = chat['chat_name'] if chat['chat_name'] == chat['chat_alias'] else "%s (%s)" % (
chat['chat_alias'], chat['chat_name'])
chat_display_name = "'%s' from '%s %s'" % (chat_display_name, chat['channel_emoji'], chat['channel_name'])
linked = bool(db.get_chat_assoc(slave_uid=chat_uid))
self.msg_status["%s.%s" % (tg_chat_id, tg_msg_id)] = Flags.EXEC_LINK
self.msg_storage["%s.%s" % (tg_chat_id, tg_msg_id)] = {
"chat_uid": chat_uid,
"chat_display_name": chat_display_name,
"chats": [chat.copy()],
"tg_chat_id": tg_chat_id,
"tg_msg_id": tg_msg_id
}
txt = "You've selected chat %s." % chat_display_name
if linked:
txt += "\nThis chat has already linked to Telegram."
txt += "\nWhat would you like to do?"
link_url = "https://telegram.me/%s?startgroup=%s" % (
self.me.username, urllib.parse.quote(self.b64en("%s.%s" % (tg_chat_id, tg_msg_id))))
self.logger.debug("Telegram start trigger for linking chat: %s", link_url)
if linked:
btn_list = [telegram.InlineKeyboardButton("Relink", url=link_url),
telegram.InlineKeyboardButton("Unlink", callback_data="unlink 0")]
else:
btn_list = [telegram.InlineKeyboardButton("Link", url=link_url)]
btn_list.append(telegram.InlineKeyboardButton("Cancel", callback_data=Flags.CANCEL_PROCESS))
bot.editMessageText(text=txt,
chat_id=tg_chat_id,
message_id=tg_msg_id,
reply_markup=telegram.InlineKeyboardMarkup([btn_list]))
def link_chat_exec(self, bot, tg_chat_id, tg_msg_id, callback_uid):
"""
Action to link a chat. Triggered by callback message with status `Flags.EXEC_LINK`.
Args:
bot: Telegram Bot instance
tg_chat_id: Chat ID
tg_msg_id: Message ID triggered the callback
callback_uid: Callback message
"""
if callback_uid == Flags.CANCEL_PROCESS:
txt = "Cancelled."
self.msg_status.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
self.msg_storage.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
return bot.editMessageText(text=txt,
chat_id=tg_chat_id,
message_id=tg_msg_id)
cmd, chat_lid = callback_uid.split()
chat = self.msg_storage["%s.%s" % (tg_chat_id, tg_msg_id)]['chats'][int(chat_lid)]
chat_uid = "%s.%s" % (chat['channel_id'], chat['chat_uid'])
chat_display_name = chat['chat_name'] if chat['chat_name'] == chat['chat_alias'] else "%s (%s)" % (
chat['chat_alias'], chat['chat_name'])
chat_display_name = "'%s' from '%s %s'" % (chat_display_name, chat['channel_emoji'], chat['channel_name']) \
if chat['channel_name'] else "'%s'" % chat_display_name
self.msg_status.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
self.msg_storage.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
if cmd == "unlink":
db.remove_chat_assoc(slave_uid=chat_uid)
txt = "Chat %s is unlinked." % (chat_display_name)
return bot.editMessageText(text=txt, chat_id=tg_chat_id, message_id=tg_msg_id)
txt = "Command '%s' (%s) is not recognised, please try again" % (cmd, callback_uid)
bot.editMessageText(text=txt, chat_id=tg_chat_id, message_id=tg_msg_id)
def unlink_all(self, bot, update):
if update.message.chat.id == update.message.from_user.id:
return bot.send_message(update.message.chat.id, "Send `/unlink_all` to a group to unlink all remote chats "
"from it.",
parse_mode=telegram.ParseMode.MARKDOWN,
reply_to_message_id=update.message.message_id)
assocs = db.get_chat_assoc(master_uid="%s.%s" % (self.channel_id, update.message.chat.id))
if len(assocs) < 1:
return bot.send_message(update.message.chat.id, "No chat is linked to the group.",
reply_to_message_id=update.message.message_id)
else:
db.remove_chat_assoc(master_uid="%s.%s" % (self.channel_id, update.message.chat.id))
return bot.send_message(update.message.chat.id, "All chats has been unlinked from this group. (%s)" % len(assocs),
reply_to_message_id=update.message.message_id)
def start_chat_list(self, bot, update, args=None):
"""
Send a list to for chat list generation.
Triggered by `/list`.
Args:
bot: Telegram Bot instance
update: Message update
args: Arguments from the command message
"""
args = args or []
msg_id = self.chat_head_req_generate(bot, self.admins[0], filter=" ".join(args))
self.msg_status["%s.%s" % (self.admins[0], msg_id)] = Flags.START_CHOOSE_CHAT
def chat_head_req_generate(self, bot, chat_id, message_id=None, offset=0, filter=""):
"""
Generate the list for chat head, and update it to a message.
Args:
bot: Telegram Bot instance
chat_id: Chat ID
message_id: ID of message to be updated, None to send a new message.
offset: Offset for pagination.
filter: Regex String used as a filter.
"""
if not message_id:
message_id = bot.send_message(chat_id, text="Processing...").message_id
legend, chat_btn_list = self.slave_chats_pagination("%s.%s" % (chat_id, message_id), offset, filter=filter)
msg_text = "Choose a chat you want to start with...\n\nLegend:\n"
for i in legend:
msg_text += "%s\n" % i
bot.editMessageText(text=msg_text,
chat_id=chat_id,
message_id=message_id,
reply_markup=telegram.InlineKeyboardMarkup(chat_btn_list))
return message_id
def make_chat_head(self, bot, tg_chat_id, tg_msg_id, callback_uid):
"""
Create a chat head. Triggered by callback message with status `Flags.START_CHOOSE_CHAT`.
Args:
bot: Telegram Bot instance
tg_chat_id: Chat ID
tg_msg_id: Message ID triggered the callback
callback_uid: Callback message
"""
if callback_uid.split()[0] == "offset":
return self.chat_head_req_generate(bot, tg_chat_id, message_id=tg_msg_id,
offset=int(callback_uid.split()[1]))
if callback_uid == Flags.CANCEL_PROCESS:
txt = "Cancelled."
self.msg_status.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
self.msg_storage.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
return bot.editMessageText(text=txt,
chat_id=tg_chat_id,
message_id=tg_msg_id)
if callback_uid[:4] != "chat":
txt = "Invalid parameter. (%s)" % callback_uid
self.msg_status.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
self.msg_storage.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
return bot.editMessageText(text=txt,
chat_id=tg_chat_id,
message_id=tg_msg_id)
callback_uid = int(callback_uid.split()[1])
chat = self.msg_storage["%s.%s" % (tg_chat_id, tg_msg_id)]['chats'][callback_uid]
chat_uid = "%s.%s" % (chat['channel_id'], chat['chat_uid'])
chat_display_name = chat['chat_name'] if chat['chat_name'] == chat['chat_alias'] else "%s (%s)" % (
chat['chat_alias'], chat['chat_name'])
chat_display_name = "'%s' from '%s %s'" % (chat_display_name, chat['channel_emoji'], chat['channel_name'])
self.msg_status.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
self.msg_storage.pop("%s.%s" % (tg_chat_id, tg_msg_id), None)
txt = "Reply to this message to chat with %s." % chat_display_name
msg_log = {"master_msg_id": "%s.%s" % (tg_chat_id, tg_msg_id),
"text": txt,
"msg_type": "Text",
"sent_to": "Master",
"slave_origin_uid": chat_uid,
"slave_origin_display_name": chat_display_name,
"slave_member_uid": None,
"slave_member_display_name": None}
db.add_msg_log(**msg_log)
bot.editMessageText(text=txt, chat_id=tg_chat_id, message_id=tg_msg_id)
def command_exec(self, bot, chat_id, message_id, callback):
"""
Run a command from a command message.
Triggered by callback message with status `Flags.COMMAND_PENDING`.
Args:
bot: Telegram Bot instance
chat_id: Chat ID
message_id: Message ID triggered the callback
callback: Callback message
"""
if not callback.isdecimal():
msg = "Invalid parameter: %s. (CE01)" % callback
self.msg_status.pop("%s.%s" % (chat_id, message_id), None)
self.msg_storage.pop("%s.%s" % (chat_id, message_id), None)
return bot.editMessageText(text=msg, chat_id=chat_id, message_id=message_id)
elif not (0 <= int(callback) < len(self.msg_storage["%s.%s" % (chat_id, message_id)])):
msg = "Index out of bound: %s. (CE02)" % callback
self.msg_status.pop("%s.%s" % (chat_id, message_id), None)
self.msg_storage.pop("%s.%s" % (chat_id, message_id), None)
return bot.editMessageText(text=msg, chat_id=chat_id, message_id=message_id)
callback = int(callback)
channel_id = self.msg_storage["%s.%s" % (chat_id, message_id)]['channel']
command = self.msg_storage["%s.%s" % (chat_id, message_id)]['commands'][callback]
msg = self.msg_storage["%s.%s" % (chat_id, message_id)]['text'] + "\n------\n" + getattr(
self.slaves[channel_id], command['callable'])(*command['args'], **command['kwargs'])
self.msg_status.pop("%s.%s" % (chat_id, message_id), None)
self.msg_storage.pop("%s.%s" % (chat_id, message_id), None)
return bot.editMessageText(text=msg, chat_id=chat_id, message_id=message_id)
def extra_help(self, bot, update):
"""
Show list of extra functions and their usage.
Triggered by `/extra`.
Args:
bot: Telegram Bot instance
update: Message update
"""
msg = "List of slave channel features:"
for n, i in enumerate(sorted(self.slaves)):
i = self.slaves[i]
msg += "\n\n<b>%s %s</b>" % (i.channel_emoji, i.channel_name)
xfns = i.get_extra_functions()
if xfns:
for j in xfns:
fn_name = "/%s_%s" % (n, j)
msg += "\n\n%s <b>(%s)</b>\n%s" % (
fn_name, xfns[j].name, xfns[j].desc.format(function_name=fn_name))
else:
msg += "No command found."
bot.send_message(update.message.chat.id, msg, parse_mode="HTML")
def extra_call(self, bot, update, groupdict=None):
"""
Call an extra function from slave channel.
Args:
bot: Telegram Bot instance
update: Message update
groupdict: Parameters offered by the message
"""
if int(groupdict['id']) >= len(self.slaves):
return self._reply_error(bot, update, "Invalid slave channel ID. (XC01)")
ch = self.slaves[sorted(self.slaves)[int(groupdict['id'])]]
fns = ch.get_extra_functions()
if groupdict['command'] not in fns:
return self._reply_error(bot, update, "Command not found in selected channel. (XC02)")
header = "%s %s: %s\n-------\n" % (ch.channel_emoji, ch.channel_name, fns[groupdict['command']].name)
msg = bot.send_message(update.message.chat.id, header + "Please wait...")
result = fns[groupdict['command']](" ".join(update.message.text.split(' ', 1)[1:]))
bot.editMessageText(text=header + result, chat_id=update.message.chat.id, message_id=msg.message_id)
def msg(self, bot, update):
"""
Process, wrap and deliver messages from user.
Args:
bot: Telegram Bot instance
update: Message update
"""
self.logger.debug("----\nMsg from tg user:\n%s", update.message.to_dict())
target = None
multi_slaves = False
assoc = None
if update.message.chat.id != update.message.from_user.id: # from group
assocs = db.get_chat_assoc(master_uid="%s.%s" % (self.channel_id, update.message.chat.id))
if len(assocs) == 1:
assoc = assocs[0]
elif len(assocs) > 1:
multi_slaves = True
reply_to = bool(getattr(update.message, "reply_to_message", None))
private_chat = update.message.chat.id == update.message.from_user.id
if private_chat:
if reply_to:
try:
assoc = db.get_msg_log("%s.%s" % (
update.message.reply_to_message.chat.id,
update.message.reply_to_message.message_id)).slave_origin_uid
except:
return self._reply_error(bot, update,
"Message is not found in database. Please try with another one. (UC03)")
else:
return self._reply_error(bot, update,
"Please reply to an incoming message. (UC04)")
else: # group chat
if multi_slaves:
if reply_to:
try:
assoc = db.get_msg_log("%s.%s" % (
update.message.reply_to_message.chat.id,
update.message.reply_to_message.message_id)).slave_origin_uid
except:
return self._reply_error(bot, update,
"Message is not found in database. "
"Please try with another one. (UC05)")
else:
return self._reply_error(bot, update,
"This group is linked to multiple remote chats. "
"Please reply to an incoming message. "
"To unlink all remote chats, please send /unlink_all . (UC06)")
elif assoc:
if reply_to:
try:
targetlog = db.get_msg_log(
"%s.%s" % (
update.message.reply_to_message.chat.id, update.message.reply_to_message.message_id))
target = targetlog.slave_origin_uid
targetChannel, targetUid = target.split('.', 1)
except:
return self._reply_error(bot, update,
"Message is not found in database. "
"Please try with another message. (UC07)")
else:
return self._reply_error(bot, update,
"This group is not linked to any chat. (UC06)")
self.logger.debug("Destination chat = %s", assoc)
channel, uid = assoc.split('.', 2)
if channel not in self.slaves:
return self._reply_error(bot, update, "Internal error: Channel not found.")
try:
m = EFBMsg(self)
m.uid = "%s.%s" % (update.message.chat.id, update.message.message_id)
mtype = get_msg_type(update.message)
# Chat and author related stuff
m.origin['uid'] = update.message.from_user.id
if getattr(update.message.from_user, "last_name", None):
m.origin['alias'] = "%s %s" % (update.message.from_user.first_name, update.message.from_user.last_name)
else:
m.origin['alias'] = update.message.from_user.first_name
if getattr(update.message.from_user, "username", None):
m.origin['name'] = "@%s" % update.message.from_user.id
else:
m.origin['name'] = m.origin['alias']
m.destination = {
'channel': channel,
'uid': uid,
'name': '',
'alias': ''
}
if target:
if targetChannel == channel:
trgtMsg = EFBMsg(self.slaves[targetChannel])
trgtMsg.type = MsgType.Text
trgtMsg.text = targetlog.text
trgtMsg.member = {
"name": targetlog.slave_member_display_name,
"alias": targetlog.slave_member_display_name,
"uid": targetlog.slave_member_uid
}
trgtMsg.origin = {
"name": targetlog.slave_origin_display_name,
"alias": targetlog.slave_origin_display_name,
"uid": targetlog.slave_origin_uid.split('.', 2)[1]
}
m.target = {
"type": TargetType.Message,
"target": trgtMsg
}
# Type specific stuff
self.logger.debug("Msg type: %s", mtype)
if self.TYPE_DICT.get(mtype, None):
m.type = self.TYPE_DICT[mtype]
else:
raise EFBMessageTypeNotSupported()
if m.type not in self.slaves[channel].supported_message_types:
raise EFBMessageTypeNotSupported()
if mtype == TGMsgType.Text:
m.type = MsgType.Text
m.text = update.message.text
elif mtype == TGMsgType.Photo:
m.type = MsgType.Image
m.text = update.message.caption
m.path, m.mime = self._download_file(update.message, update.message.photo[-1], m.type)
m.file = open(m.path, "rb")
elif mtype == TGMsgType.Sticker:
m.type = MsgType.Sticker
m.text = ""
m.path, m.mime = self._download_file(update.message, update.message.sticker, m.type)
m.file = open(m.path, "rb")
elif mtype == TGMsgType.Document:
m.text = update.message.caption
self.logger.debug("tg: Document file received")
m.filename = getattr(update.message.document, "file_name", None) or None
if update.message.document.mime_type == "video/mp4":
self.logger.debug("tg: Telegram GIF received")
m.type = MsgType.Image
m.path, m.mime = self._download_gif(update.message, update.message.document, m.type)
else:
m.type = MsgType.File
m.path, m.mime = self._download_file(update.message, update.message.document, m.type)
m.mime = update.message.document.mime_type or m.mime
m.file = open(m.path, "rb")
elif mtype == TGMsgType.Video:
m.type = MsgType.Video
m.text = update.message.caption
m.path, m.mime = self._download_file(update.message, update.message.video, m.type)
m.file = open(m.path, "rb")
elif mtype == TGMsgType.Audio:
m.type = MsgType.Audio
m.text = "%s - %s\n%s" % (
update.message.audio.title, update.message.audio.performer, update.message.caption)
m.path, m.mime = self._download_file(update.message, update.message.audio, m.type)
elif mtype == TGMsgType.Voice:
m.type = MsgType.Audio
m.text = update.message.caption
m.path, m.mime = self._download_file(update.message, update.message.voice, m.type)
elif mtype == TGMsgType.Location:
m.type = MsgType.Location
m.text = "Location"
m.attributes = {
"latitude": update.message.location.latitude,
"longitude": update.message.location.longitude
}
elif mtype == TGMsgType.Venue:
m.type = MsgType.Location
m.text = update.message.location.title + "\n" + update.message.location.adderss
m.attributes = {
"latitude": update.message.venue.location.latitude,
"longitude": update.message.venue.location.longitude
}
else:
return self._reply_error(bot, update, "Message type not supported. (MN02)")
self.slaves[channel].send_message(m)
except EFBChatNotFound:
return self._reply_error(bot, update, "Chat is not reachable from the slave channel. (CN01)")
except EFBMessageTypeNotSupported:
return self._reply_error(bot, update, "Message type not supported. (MN01)")
except EFBMessageError as e:
return self._reply_error(bot, update, "Message is not sent. (MN01)\n\n%s" % str(e))
def _download_file(self, tg_msg, file_obj, msg_type):
"""
Download media file from telegram platform.
Args:
tg_msg: Telegram message instance
file_obj: File object
msg_type: Type of message
Returns:
tuple of str[2]: Full path of the file, MIME type
"""
path = os.path.join("storage", self.channel_id)
if not os.path.exists(path):
os.makedirs(path)
size = getattr(file_obj, "file_size", None)
file_id = file_obj.file_id
if size and size > 20 * 1024 ** 2:
raise EFBMessageError("Attachment is too large. Maximum 20 MB. (AT01)")
f = self.bot.bot.getFile(file_id)
fname = "%s_%s_%s_%s" % (msg_type, tg_msg.chat.id, tg_msg.message_id, int(time.time()))
fullpath = os.path.join(path, fname)
f.download(fullpath)
mime = getattr(file_obj, "mime_type", magic.from_file(fullpath, mime=True))
if type(mime) is bytes:
mime = mime.decode()
guess_ext = mimetypes.guess_extension(mime) or ".unknown"
if guess_ext == ".unknown":
self.logger.warning("File %s with mime %s has no matching extensions.", fullpath, mime)
ext = ".jpeg" if mime == "image/jpeg" else guess_ext
os.rename(fullpath, "%s%s" % (fullpath, ext))
fullpath = "%s%s" % (fullpath, ext)
return fullpath, mime
def _download_gif(self, tg_msg, file_id, msg_type):
"""
Download and convert GIF image.
Args:
tg_msg: Telegram message instance
file_id: File ID
msg_type: Type of message
Returns:
tuple of str[2]: Full path of the file, MIME type
"""
fullpath, mime = self._download_file(tg_msg, file_id, msg_type)
VideoFileClip(fullpath).write_gif(fullpath + ".gif", program="ffmpeg")
return fullpath + ".gif", "image/gif"
def start(self, bot, update, args=[]):
"""
Process bot command `/start`.
Args:
bot: Telegram Bot instance
update: Message update
args: Arguments from message
"""
if update.message.from_user.id != update.message.chat.id: # from group
try:
data = self.msg_storage[self.b64de(args[0])]
except KeyError:
update.message.reply_text("Session expired or unknown parameter. (SE02)")
chat_uid = data["chat_uid"]
chat_display_name = data["chat_display_name"]
slave_channel, slave_chat_uid = chat_uid.split('.', 1)
if slave_channel in self.slaves:
db.add_chat_assoc(master_uid="%s.%s" % (self.channel_id, update.message.chat.id),
slave_uid=chat_uid,
multiple_slave=self._flag("multiple_slave_chats", False))
txt = "Chat '%s' is now linked." % chat_display_name
unlink_btn = telegram.InlineKeyboardMarkup(
[[telegram.InlineKeyboardButton("Unlink", callback_data="unlink 0")]])
new_msg = bot.send_message(update.message.chat.id, text=txt, reply_markup=unlink_btn)
self.msg_status[args[0]] = \
self.msg_status["%s.%s" % (update.message.chat.id, new_msg.message_id)] = \
Flags.EXEC_LINK
self.msg_storage[args[0]] = \
self.msg_storage["%s.%s" % (update.message.chat.id, new_msg.message_id)] = \
{"chats": data['chats']}
bot.editMessageText(chat_id=data["tg_chat_id"],
message_id=data["tg_msg_id"],
text=txt,
reply_markup=unlink_btn)
self.msg_status.pop(args[0], False)
else:
txt = "Welcome to EH Forwarder Bot: EFB Telegram Master Channel.\n\n" \
"To learn more, please visit https://github.com/blueset/ehForwarderBot ."
bot.send_message(update.message.from_user.id, txt)
def help(self, bot, update):
txt = "EFB Telegram Master Channel\n" \
"/link\n" \
" Link a remote chat to an empty Telegram group.\n" \
" Followed by a regular expression to filter results.\n" \
"/chat\n" \
" Generate a chat head to start a conversation.\n" \
" Followed by a regular expression to filter results.\n" \
"/extra\n" \
" List all extra function from slave channels.\n" \
"/unlink_all\n" \
" Unlink all remote chats in this chat.\n" \
"/recog\n" \
" Reply to a voice message to convert it to text.\n" \
" Followed by a language code to choose a specific lanugage.\n" \
" You have to enable speech to text in the config file first.\n" \
"/help\n" \
" Print this command list."
bot.send_message(update.message.from_user.id, txt)
def recognize_speech(self, bot, update, args=[]):
"""
Recognise voice message. Triggered by `/recog`.
Args:
bot: Telegram Bot instance
update: Message update
args: Arguments from message
"""
class speechNotImplemented:
lang_list = []
def __init__(self, *args, **kwargs):
pass
def recognize(self, *args, **kwargs):
return ["Not enabled or error in configuration."]
if not getattr(update.message, "reply_to_message", None):
txt = "/recog [lang_code]\nReply to a voice with this command to recognize it.\n" \
"mples:\n/recog\n/recog zh\n/recog en\n(RS01)"
return self._reply_error(bot, update, txt)
if not getattr(update.message.reply_to_message, "voice"):
return self._reply_error(bot, update,
"Reply only to a voice with this command to recognize it. (RS02)")
try:
baidu_speech = speech.BaiduSpeech(getattr(config, self.channel_id)['baidu_speech_api'])
except:
baidu_speech = speechNotImplemented()
try:
bing_speech = speech.BingSpeech(getattr(config, self.channel_id)['bing_speech_api'])
except:
bing_speech = speechNotImplemented()
if len(args) > 0 and (args[0][:2] not in ['zh', 'en', 'ja'] and args[0] not in bing_speech.lang_list):
return self._reply_error(bot, update, "Language is not supported. Try with zh, ja or en. (RS03)")
if update.message.reply_to_message.voice.duration > 60:
return self._reply_error(bot, update, "Only voice shorter than 60s is supported. (RS04)")
path, mime = self._download_file(update.message, update.message.reply_to_message.voice, MsgType.Audio)
results = {}
if len(args) == 0:
results['Baidu (English)'] = baidu_speech.recognize(path, "en")
results['Baidu (Mandarin)'] = baidu_speech.recognize(path, "zh")
results['Bing (English)'] = bing_speech.recognize(path, "en-US")
results['Bing (Mandarin)'] = bing_speech.recognize(path, "zh-CN")
results['Bing (Japanese)'] = bing_speech.recognize(path, "ja-JP")
elif args[0][:2] == 'zh':
results['Baidu (Mandarin)'] = baidu_speech.recognize(path, "zh")
if args[0] in bing_speech.lang_list:
results['Bing (%s)' % args[0]] = bing_speech.recognize(path, args[0])
else:
results['Bing (Mandarin)'] = bing_speech.recognize(path, "zh-CN")
elif args[0][:2] == 'en':
results['Baidu (English)'] = baidu_speech.recognize(path, "en")
if args[0] in bing_speech.lang_list:
results['Bing (%s)' % args[0]] = bing_speech.recognize(path, args[0])
else:
results['Bing (English)'] = bing_speech.recognize(path, "en-US")
elif args[0][:2] == 'ja':
results['Bing (Japanese)'] = bing_speech.recognize(path, "ja-JP")
elif args[0][:2] == 'ct':
results['Baidu (Cantonese)'] = baidu_speech.recognize(path, "ct")
elif args[0] in bing_speech.lang_list:
results['Bing (%s)' % args[0]] = bing_speech.recognize(path, args[0])
msg = ""
for i in results:
msg += "\n*%s*:\n" % i
for j in results[i]:
msg += "%s\n" % j
msg = "Results:\n%s" % msg
bot.send_message(update.message.reply_to_message.chat.id, msg,
reply_to_message_id=update.message.reply_to_message.message_id,
parse_mode=telegram.ParseMode.MARKDOWN)
os.remove(path)
def poll(self):
"""
Message polling process.
"""
self.polling_from_tg()
while True:
try:
m = self.queue.get()
if m is None:
break
self.logger.info("Got message from queue\nType: %s\nText: %s\n----" % (m.type, m.text))
threading.Thread(target=self.process_msg, args=(m,)).start()
self.queue.task_done()
self.logger.info("Msg sent to TG, task_done marked.")
except Exception as e:
self.logger.error("Error occurred during message polling")
self.logger.error(repr(e))
self.bot.stop()
self.poll()
self.logger.debug("Gracefully stopping %s (%s).", self.channel_name, self.channel_id)
self.bot.stop()
self.logger.debug("%s (%s) gracefully stopped.", self.channel_name, self.channel_id)
def polling_from_tg(self):
"""
Poll message from Telegram Bot API. Can be used to extend
"""
self.bot.start_polling(timeout=10)
def error(self, bot, update, error):
"""
Print error to console, and send error message to first admin.
Triggered by python-telegram-bot error callback.
"""
if "Conflict: terminated by other long poll or webhook (409)" in str(error):
msg = 'Please immediately turn off all EFB instances.\nAnother bot instance or web-hook detected.'
self.logger.error(msg)
bot.send_message(getattr(config, self.channel_id)['admins'][0], msg)
else:
try:
bot.send_message(getattr(config, self.channel_id)['admins'][0],
"EFB Telegram Master channel encountered error <code>%s</code> "
"caused by update <code>%s</code>.\n\n"
"Report issue: <a href=\"https://github.com/blueset/ehForwarderBot/issues/new\">GitHub Issue Page</a>" %
(html.escape(str(error)), html.escape(str(update))), parse_mode="HTML")
except:
bot.send_message(getattr(config, self.channel_id)['admins'][0],
"EFB Telegram Master channel encountered error\n%s\n"
"caused by update\n%s\n\n"
"Report issue: https://github.com/blueset/ehForwarderBot/issues/new" %
(html.escape(str(error)), html.escape(str(update))))
self.logger.error('ERROR! Update %s caused error %s' % (update, error))
def _flag(self, key, value):
"""
Retrieve value for experimental flags.
Args:
key: Key of the flag.
value: Default/fallback value.
Returns:
Value for the flag.
"""
return getattr(config, self.channel_id).get('flags', dict()).get(key, value)
@property
def stop_polling(self):
return self._stop_polling
@stop_polling.setter
def stop_polling(self, val):
if val:
self.queue.put(None)
self._stop_polling = val
@staticmethod
def b64en(s):
return base64.b64encode(s.encode(), b"-_").decode().rstrip("=")
@staticmethod
def b64de(s):
return base64.b64decode((s + '=' * (- len(s) % 4)).encode(), b"-_").decode()
|
RoyXiang/ehForwarderBot
|
plugins/eh_telegram_master/__init__.py
|
Python
|
gpl-3.0
| 63,061
|
[
"VisIt"
] |
3c55b58da82807dd62bd5155b98e45db5b7da4ffa3995586d8675968bcc0a557
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
#!/usr/bin/env python
# Input:
# lattice
# incar dict?
# potenDir
# Output at outDir:
# 4 (or more? ) files
# The cifMap represents the raw cif file.
# The icsdMap has a few simple computed values,
# and is retrieved by getIcsdMap, which calls mkIcsdMap.
# The vaspMap has more complex computed values,
# and is retrieved by getVaspMap, which calls mkVaspMap.
import datetime
import os
import re
import shlex
import sys
import traceback
import numpy as np
def badparms(msg):
print('\nError: %s' % (msg,))
print('Parms:')
print('')
print(' Specify either -inFile or (inDir and updateLog).')
print(' -buglev <int> debug level')
print(' -inFile <string> input cif file')
print('')
print(' -inDir <string> dir containing cif files')
print(' -updateLog <string> updated log of cif files processed')
print('')
print(' -inPotenDir <string> input dir containing pseudopotential subdirs')
print(' Or none.')
print(' -outPoscar <string> output POSCAR file')
print(' Or none.')
print(' -outPotcar <string> output POTCAR file')
print(' Or none.')
print('')
print('Example:')
print('./readCif.py -buglev 5 -inFile icsd_027856.cif -inPotenDir ~/vladan/td.pseudos/pseudos -outPoscar temp.poscar -outPotcar temp.potcar | less')
sys.exit(1)
#====================================================================
# xxx all xxx
#====================================================================
def main():
buglev = None
inFile = None
inDir = None
updateLog = None
inPotenDir = None
outPoscar = None
outPotcar = None
if len(sys.argv) % 2 != 1:
badparms('Parms must be key/value pairs')
for iarg in range(1, len(sys.argv), 2):
key = sys.argv[iarg]
val = sys.argv[iarg + 1]
if key == '-buglev':
buglev = int(val)
elif key == '-inFile':
inFile = val
elif key == '-inDir':
inDir = val
elif key == '-updateLog':
updateLog = val
elif key == '-inPotenDir':
inPotenDir = val
elif key == '-outPoscar':
outPoscar = val
elif key == '-outPotcar':
outPotcar = val
else:
badparms('unknown key: "%s"' % (key,))
if buglev == None:
badparms('parm not specified: -buglev')
if inFile != None:
if inDir != None or updateLog != None:
badparms('May specify either -inFile or (inDir and updateLog)')
else:
if inDir == None or updateLog == None:
badparms('May specify either -inFile or (inDir and updateLog)')
if inPotenDir == None:
badparms('parm not specified: -inPotenDir')
if outPoscar == None:
badparms('parm not specified: -outPoscar')
if outPotcar == None:
badparms('parm not specified: -outPotcar')
if inFile != None:
cifRdr = CifReader(buglev, typeMap, inFile)
icsdMap = cifRdr.getIcsdMap()
vaspMap = cifRdr.getVaspMap()
if outPoscar != 'none':
writePoscar(inFile, vaspMap, outPoscar)
if inPotenDir != 'none' and outPotcar != 'none':
writePotcar(vaspMap, inPotenDir, outPotcar)
else:
doneNames = []
fin = open(updateLog)
while True:
line = fin.readline()
if len(line) == 0:
break
line = line.strip()
if len(line) > 0 and not line.startswith('#'):
doneNames.append(line)
fin.close()
if buglev >= 2:
print('main: doneNames: %s' % (doneNames,))
doTree(buglev, doneNames, inDir, updateLog)
#====================================================================
def doTree(buglev, doneNames, typeMap, inPath, updateLog):
if buglev >= 1:
print('doTree.entry: inPath: %s' % (inPath,))
if os.path.isfile(inPath):
if inPath.endswith('.cif'):
if inPath in doneNames:
if buglev >= 1:
print('main: already done: inPath: %s' % (inPath,))
else:
if buglev >= 1:
print('doTree: before inPath: %s' % (inPath,))
try:
cifRdr = CifReader(buglev, typeMap, inPath)
icsdMap = cifRdr.getIcsdMap()
except CifException as exc:
traceback.print_exc(None, sys.stdout)
print("main: caught: %s" % (exc,))
if buglev >= 1:
print('doTree: after inPath: %s' % (inPath,))
# Update the log
fout = open(updateLog, 'a')
print(inPath, file=fout)
fout.close()
else:
if buglev >= 1:
print('doTree: not a cif: %s' % (inPath,))
elif os.path.isdir(inPath):
fnames = sorted(os.listdir(inPath))
for fn in fnames:
subPath = '%s/%s' % (inPath, fn,)
doTree(buglev, doneNames, typeMap, subPath, updateLog) # recursion
#====================================================================
class CifReader:
# Map: fieldName -> 'int' or 'float'
typeMap = {
'_database_code_ICSD': 'int', # xxx stg?
# '_citation_journal_volume' : 'listInt',
# '_citation_page_first' : 'listInt', # may be "174106-1"
# '_citation_page_last' : 'listInt',
# '_citation_year' : 'listInt',
'_cell_length_a': 'float',
'_cell_length_b': 'float',
'_cell_length_c': 'float',
'_cell_angle_alpha': 'float',
'_cell_angle_beta': 'float',
'_cell_angle_gamma': 'float',
'_cell_volume': 'float',
'_cell_formula_units_Z': 'int',
'_symmetry_Int_Tables_number': 'int',
'_atom_type_oxidation_number': 'listFloat',
'_atom_site_symmetry_multiplicity': 'listInt',
'_atom_site_fract_x': 'listFloat',
'_atom_site_fract_y': 'listFloat',
'_atom_site_fract_z': 'listFloat',
'_atom_site_occupancy': 'listFloat',
'_atom_site_attached_hydrogens': 'listInt',
'_atom_site_B_iso_or_equiv': 'listFloat',
'_symmetry_equiv_pos_site_id': 'listInt',
}
def __init__(self, buglev, inFile):
# Note: don't strip lines.
# We need to see the trailing spaces.
# See the kluge for icsd_159700.cif.
self.buglev = buglev
self.inFile = inFile
fin = open(inFile)
self.lines = fin.readlines()
fin.close()
self.nline = len(self.lines)
self.iline = 0 # current line num
if self.nline == 0:
self.line = None
else:
self.line = self.lines[0]
self.nline = len(self.lines)
self.cifMapDone = False
self.icsdMapDone = False
self.vaspMapDone = False
self.errorList = [] # list of tuples: [(errorCode, errorMsg, iline)]
def throwerr(self, msg):
from .. import error
fullMsg = 'Error: %s iline: %d' % (msg, self.iline)
if self.line != None:
fullMsg += ' line: %s' % (repr(self.line),)
raise error.root(fullMsg)
def throwcif(self, msg):
from .. import error
fullMsg = 'Error: %s iline: %d' % (msg, self.iline)
if self.line != None:
fullMsg += ' line: %s' % (repr(self.line),)
raise error.CifException(fullMsg)
def noteError(self, errorCode, errorMsg):
self.errorList.append((errorCode, errorMsg, self.iline))
# Return tuple: (numErrorMsgs, formattedErrorMsg)
def formatErrorMsg(self):
useLongFormat = True
if useLongFormat:
msgs = ['%s:%d' % (tup[0], tup[2]) for tup in self.errorList]
msgJoin = ','.join(msgs)
else:
mmap = {}
for tup in self.errorList:
if not mmap.has_key(tup[0]):
self.errorMap[msg] = 0
self.errorMap[tup[0]] += 1
keys = sorted(mmap.keys())
# Make list of strings like: 'msg:numOccur'
msgs = []
for key in keys:
msgs.append('%s:%d' % (key, mmap[key],))
msgJoin = ','.join(msgs)
return (len(self.errorList), msgJoin)
def advanceLine(self, msg):
self.iline += 1
if self.iline > self.nline:
self.throwerr('iline > nline')
if self.iline == self.nline:
self.line = None
else:
self.line = self.lines[self.iline]
if self.buglev >= 5 and msg != None:
self.printLine(msg)
def printLine(self, msg):
print('%s: iline: %d line: %s' % (msg, self.iline, repr(self.line),))
def getCifMap(self):
if not self.cifMapDone:
self.mkCifMap()
self.cifMapDone = True
return self.cifMap
def getIcsdMap(self):
if not self.icsdMapDone:
self.mkIcsdMap()
self.icsdMapDone = True
return self.icsdMap
def getVaspMap(self):
if not self.vaspMapDone:
self.mkVaspMap()
self.vaspMapDone = True
return self.vaspMap
#====================================================================
def mkCifMap(self):
if self.buglev >= 2:
print('mkCifMap: entry')
# Scan down for '_data'
while self.iline < self.nline and \
(self.line == '' or self.line.startswith('#')):
self.advanceLine('mkCifMap: hd')
if self.iline >= self.nline:
self.throwerr('file is empty')
if self.buglev >= 5:
self.printLine('mkCifMap.a')
self.cifMap = {} # name/value map
if not self.line.startswith('data_'):
self.throwcif('no data stmt at start')
dsname = self.line[5:].strip()
if len(dsname) == 0:
self.throwcif('dsname len == 0')
self.cifMap['dsname'] = dsname
self.advanceLine('mkCifMap: got dsname')
while self.iline < self.nline:
if len(self.line.strip()) == 0 or self.line.startswith('#'):
self.advanceLine('mkCifMap: skip comment')
elif self.line.rstrip() == 'loop_':
self.advanceLine('mkCifMap: start loop')
self.readLoop()
else:
self.readStandard()
(self.cifMap['numError'], self.cifMap['errorMsg']) \
= self.formatErrorMsg()
if self.buglev >= 2:
print('\nmkCifMap: cifMap:')
keys = sorted(self.cifMap.keys())
for key in keys:
print(' key: %s value: %s (%s)' \
% (key, repr(self.cifMap[key]), type(self.cifMap[key]).__name__,))
if self.buglev >= 2:
print('mkCifMap: exit')
#====================================================================
# Read a standard (not loop) cif item.
def readStandard(self):
if self.buglev >= 5:
self.printLine('readStandard.entry')
if not self.line.startswith('_'):
self.throwcif('invalid name')
ix = self.line.find(' ')
# _name value or _name 'value' or _name 'value'
if ix >= 0:
name = self.line[:ix].strip()
stg = self.line[ix:].strip()
if stg.startswith('"'):
if not stg.endswith('"'):
self.throwcif('no ending quote')
stg = stg[1:-1]
elif stg.startswith('\''):
if not stg.endswith('\''):
self.throwcif('no ending quote')
stg = stg[1:-1]
value = self.stripUncert(stg)
self.advanceLine('readStandard: got single value')
# _name\nvalue or _name\n;value ... ;
else:
name = self.line.strip()
self.advanceLine('readStandard: start multiline')
if self.iline >= self.nline:
self.throwcif('value past end of file')
# Value is delimited by ';'
if self.line.startswith(';'):
value = self.line[1:].strip() # get remainder of initial ';' line
self.advanceLine('readStandard: pass initial semicolon')
while True:
if self.iline >= self.nline:
self.throwcif('value past end of file')
if self.line.startswith(';'):
if self.line.rstrip('\r\n') != ';':
self.throwcif('invalid ending semicolon')
self.advanceLine('got last semicolon')
break
if len(value) > 0:
value += ' '
value += self.line.strip()
self.advanceLine('readStandard: multiline')
# No ';' -- entire value is on next line
else:
value = self.stripUncert(self.line)
cleanVal = self.cleanString(value)
self.cifMap[name] = self.convertType(name, cleanVal)
if self.buglev >= 5:
self.printLine('readStandard.exit')
#====================================================================
# Read a loop cif item.
def readLoop(self):
if self.buglev >= 5:
self.printLine('readLoop.entry')
# Read the keys
keys = []
while True:
if self.iline >= self.nline:
self.throwcif('loop past end of file')
if self.buglev >= 5:
self.printLine('readLoop.a')
if not self.line.startswith('_'):
break
keys.append(self.line.strip())
self.advanceLine('readLoop: got key')
nkey = len(keys)
if self.buglev >= 5:
print(' keys: %s' % (keys,))
# Make list of empty sublists. One sublist per key.
valmat = []
for ii in range(nkey):
valmat.append([])
# Read values
loopDone = False
while not loopDone:
if self.buglev >= 5:
self.printLine('readLoop.b')
values = [] # values for this one line
while len(values) < nkey:
# Test for end
if self.line == None \
or self.line.startswith('loop_') \
or self.line.startswith('_') \
or self.line.startswith('#'):
loopDone = True
break
if self.iline >= self.nline:
self.throwcif('value past end of file')
if self.line.startswith(';'):
value = self.line[1:].strip() # get remainder of initial ';' line
self.advanceLine('readLoop: pass initial semicolon')
while True:
if self.iline >= self.nline:
self.throwcif('value past end of file')
if self.line.startswith(';'):
if self.line.rstrip('\r\n') != ';':
self.throwcif('invalid ending semicolon')
self.advanceLine('got last semicolon')
break
if len(value) > 0:
value += ' '
value += self.line.strip()
self.advanceLine('readLoop: multiline')
vals = [value]
if self.buglev >= 5:
print(' got semicolon value: ', vals)
else: # else no ';'
if self.iline >= self.nline:
self.throwcif('value past end of file')
tmpline = self.line
# Kluge. Sometimes have two spaces ' ' instead of ' 0 '.
# Example: icsd_418816.cif
if keys[0] == '_atom_site_label':
ix = tmpline.find(' ')
if ix >= 0:
tmpline = tmpline[:ix] + ' 0 ' + tmpline[(ix + 2):]
self.noteError('atomMiss0',
'readLoop: missing 0 in atom_site loop')
vals = shlex.split(tmpline) # split, retaining quoted substrings
# Kluge: some lines in the citation loop look like:
# 2 'Phase Transition' 1992 38- 127 220 PHTRDP
# The '38-' should be two tokens, '38' and '-',
# for the _citation_journal_volume and _citation_journal_issue.
# Or
# 1970 131- 139 146 ZEKGAX
# The '131-' should be two tokens, '131' and '-'.
if keys[0] == '_citation_id':
for ix in range(len(vals)):
if re.match('^[0-9]+-$', vals[ix]):
vals[ix] = vals[ix][:-1]
vals.insert(ix + 1, '-')
self.noteError('citeNoSpace',
'readLoop: no space before - in citation loop')
# Kluge: sometimes the _atom_site_attached_hydrogens
# is '-' instead of '0'.
# Example: icsd_163077.cif
if len(keys) >= 9 and keys[8] == '_atom_site_attached_hydrogens' \
and len(vals) >= 9 and vals[8] == '-':
vals[8] = '0'
self.noteError('hydroDash',
'readLoop: - instead of 0 for hydrogens')
self.advanceLine('readLoop: pass single line record')
if self.buglev >= 5:
print(' got line vals: ', vals)
values += vals
# end while len(values) < nkey
if self.buglev >= 5:
print(' nkey: %d nval: %s' % (nkey, len(values),))
print(' values: %s' % (values,))
# Kluge Fixups:
# icsd_108051.cif
# primary 'Zeitschrift fuer Metallkunde' 1983 74 358 389 8 ZEMTAE
# The '8' is spurious
if self.cifMap['_database_code_ICSD'] == '108051' \
and keys[0] == '_citation_id' \
and len(values) >= 7 \
and values[0] == 'primary' \
and values[6] == '8':
del values[6]
self.noteError('icsd108051', 'readLoop: fixup 108051')
# icsd_108051.cif
# primary 'Journal of the American Chemical Society' 2004 126 38 11780 11780 11781 JACSAT
# The second '11780' is spurious
if self.cifMap['_database_code_ICSD'] == '170563' \
and keys[0] == '_citation_id' \
and len(values) >= 7 \
and values[0] == 'primary' \
and values[6] == '11780':
del values[6]
self.noteError('icsd170563', 'readLoop: fixup 170563')
# icsd_418816.cif
if self.cifMap['_database_code_ICSD'] == '418816' \
and keys[0] == '_atom_site_label' \
and len(values) >= 7 \
and values[0] == 'primary' \
and values[6] == '11780':
del values[6]
self.noteError('icsd418816', 'readLoop: fixup 418816')
if len(values) != 0:
if len(values) != nkey:
self.throwcif(
'wrong num values. nkey: %d nval: %d\n keys: %s\n values: %s'
% (nkey, len(values), keys, values,))
# Get a value for each column
for ii in range(nkey):
uval = self.stripUncert(values[ii])
cleanVal = self.cleanString(uval)
valmat[ii].append(cleanVal)
for ii in range(nkey):
tvec = self.convertType(keys[ii], valmat[ii])
self.cifMap[keys[ii]] = tvec
if self.buglev >= 5:
print('readLoop: loop final: key: %s values: %s' \
% (keys[ii], valmat[ii],))
if self.buglev >= 5:
self.printLine('readLoop.exit')
#====================================================================
# If value has a standard uncertainty, like '3.44(5)',
# strip off the uncertainty and discard it.
def stripUncert(self, stg):
# Kluge: allow entries with no ending right paren.
# For example in icsd_180377.cif,
# Si3 Si4+ 6 i 0.447403(11) -0.447403(11 0.30818(6) 0.8333 0 0.00624(8)
mat = re.match('^([-0-9.]+)\([0-9]+\)?$', stg)
if mat != None:
if not stg.endswith(')'): # If ending paren, complain.
self.noteError('uncertParen',
'stripUncert: no R paren in uncertainty: "%s"' % (stg,))
stg = stg[: len(mat.group(1))] # strip off uncertainty
return stg
#====================================================================
# Strip out illegal chars.
# Example: icsd_054779.cif
# _chemical_name_systematic 'Magnesium Silicide (5/6) - Beta<F0>'
def cleanString(self, stg):
hasError = False
ii = 0
while ii < len(stg):
ix = ord(stg[ii])
if not (ix >= 32 and ix <= 126):
tup = euroCharMap.get(ix, None)
if tup != None:
# Should an ISO_8859-15 char be an error? Probably not.
#hasError = True
stg = stg[:ii] + tup[1] + stg[(ii + 1):]
else:
hasError = True
stg = stg[:ii] + stg[(ii + 1):]
ii += 1
if hasError:
self.noteError('badChar',
'cleanString: invalid char dec %d in stg: "%s"' % (ix, stg,))
return stg
#====================================================================
# If name is in typeMap, convert value to the right type.
# Else leave it as a string.
def convertType(self, name, value):
if self.typeMap.has_key(name):
ftype = self.typeMap[name]
if ftype == 'int':
try:
tvalue = int(value)
except Exception as exc:
self.throwcif('mkCifMap: invalid int for field "%s". value: "%s"'
% (name, value,))
elif ftype == 'float':
try:
tvalue = float(value)
except Exception as exc:
self.throwcif('mkCifMap: invalid float for field "%s". value: "%s"'
% (name, value,))
elif ftype == 'listInt':
if not isinstance(value, list):
throwerr('value not a list')
nval = len(value)
tvalue = nval * [None]
for ii in range(nval):
try:
tvalue[ii] = int(value[ii])
except Exception as exc:
self.throwcif('mkCifMap: invalid int for field "%s". value: "%s"'
% (name, value[ii],))
elif ftype == 'listFloat':
if not isinstance(value, list):
throwerr('value not a list')
nval = len(value)
tvalue = nval * [None]
for ii in range(nval):
try:
tvalue[ii] = float(value[ii])
except Exception as exc:
self.throwcif(
'mkCifMap: invalid float for field "%s". value: "%s"'
% (name, value[ii],))
else:
throwerr('unknown fieldType: "%s"' % (ftype,))
else:
tvalue = value
return tvalue
#====================================================================
# Sets self.icsdMap
def mkIcsdMap(self):
timea = datetime.datetime.now()
if self.buglev >= 2:
print('mkIcsdMap: entry')
self.getCifMap() # insure self.cifMap is built
# Check for required fields in cifMap
requiredFields = [
'_database_code_ICSD',
'_chemical_name_systematic',
'_chemical_formula_structural',
'_chemical_formula_sum',
'_cell_length_a',
'_cell_length_b',
'_cell_length_c',
'_cell_angle_alpha',
'_cell_angle_beta',
'_cell_angle_gamma',
'_cell_volume',
'_cell_formula_units_Z',
'_symmetry_space_group_name_H-M',
'_symmetry_Int_Tables_number',
'_atom_type_symbol',
'_atom_type_oxidation_number',
'_atom_site_label',
'_atom_site_type_symbol',
'_atom_site_symmetry_multiplicity',
'_atom_site_Wyckoff_symbol',
'_atom_site_fract_x',
'_atom_site_fract_y',
'_atom_site_fract_z',
'_atom_site_occupancy',
'_atom_site_attached_hydrogens',
]
missFields = []
for fld in requiredFields:
if self.cifMap.get(fld, None) == None:
missFields.append(fld)
if len(missFields) > 0:
self.throwcif('icsdMap: missing required fields: %s' % (missFields,))
# Optional fields
optionalFields = [
'_chemical_name_mineral',
]
missFields = []
for fld in optionalFields:
if self.cifMap.get(fld, None) == None:
missFields.append(fld)
if len(missFields) > 0:
##print('icsdMap: missing optional fields: %s' % (missFields,))
pass
elements = [
'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca',
'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn',
'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr',
'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn',
'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd',
'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb',
'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg',
'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th',
'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm',
'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds',
'Rg', 'Cn', 'Uut', 'Fl', 'Uup', 'Lv', 'Uus', 'Uuo',
'D', # Deuterium
]
ferromagneticElements = ['Co', 'Cr', 'Fe', 'Ni']
icmap = self.cifMap.copy()
# Using the 'chemical_formula_sum' like 'Mo Se2',
# get formulaNames = ['Mo', 'Se'] and formulaNums = [1, 2].
# The formulaNums may be floats.
# Kluge: sometimes the formulas omit a space, like icsd_171803.cif
# '... Nb0.04 O20 Si4.49Ta0.01 Ti0.59'
# So instead of using toks = chemSum.split()
# we must find the tokens by scanning.
chemSum = icmap['_chemical_formula_sum'] # 'Mo Se2'
formulaNames = []
formulaNums = []
stg = chemSum
while True:
stg = stg.strip()
if len(stg) == 0:
break
# mata = re.match(r'^([a-zA-Z]+) *([.0-9]+)', stg) # 'Se2' or Ca1.37
mata = re.match(r'^([a-zA-Z]+)([.0-9]+)', stg) # 'Se2' or Ca1.37
if mata != None:
formulaNames.append(mata.group(1))
formulaNums.append(float(mata.group(2)))
stg = stg[len(mata.group(0)):]
# If no space before the next group, complain.
if len(stg) > 0 and not stg.startswith(' '):
self.noteError('chemSumSpace',
'mkIcsdMap: no space in chemSum: "%s"' % (chemSum,))
else:
matb = re.match(r'^([a-zA-Z]+)', stg) # 'Mo'
if mata != None:
formulaNames.append(mata.group(1))
formulaNums.append(1.0)
stg = stg[len(matb.group(0)):]
else:
throwcif('unknown chem form sum: "%s"' % (chemSum,), None, None)
for nm in formulaNames:
if nm not in elements:
throwcif('unknown element "%s" in chem form sum: "%s"'
% (nm, chemSum,), None, None)
icmap['formulaNames'] = formulaNames
icmap['formulaNums'] = formulaNums
# Make a new field _atom_site_slabel = stripped atom_site_label.
# Strip arbitrary trailing integer from label 'Mo1' to get 'Mo'.
# Labels are sequential: Mg1, Mg2, Mg3, ...
labels = icmap['_atom_site_label']
nlabel = len(labels)
slabels = nlabel * [None]
for ii in range(nlabel):
slabels[ii] = re.sub('\d+$', '', labels[ii])
icmap['_atom_site_slabel'] = slabels
# Make a new field _atom_site_oxidation_num,
# which is the oxidation number for each atom in _atom_site_type_symbol.
syms = icmap['_atom_site_type_symbol']
nsite = len(syms)
ox_nums = nsite * [None]
for ii in range(nsite):
tsym = syms[ii]
jj = icmap['_atom_type_symbol'].index(tsym)
ox_nums[ii] = icmap['_atom_type_oxidation_number'][jj]
icmap['_atom_site_oxidation_num'] = ox_nums
# Set numCellAtom = total num atoms in cell
mults = icmap['_atom_site_symmetry_multiplicity']
icmap['numCellAtom'] = sum(mults)
# Set numCellFerro = total num ferromagnetic atoms in cell
numferro = 0
for ii in range(nlabel):
if slabels[ii] in ferromagneticElements:
numferro += mults[ii]
icmap['numCellFerro'] = numferro
# Max delta of formulaNums from integers
fnums = icmap['formulaNums']
fnumDelta = 0
for fnum in fnums:
dif = fnum % 1
fnumDelta = max(fnumDelta, min(dif, 1 - dif))
icmap['formulaDelta'] = fnumDelta
# Max delta of occupancies from integers
occs = icmap['_atom_site_occupancy']
occuDelta = 0
for occ in occs:
dif = occ % 1
occuDelta = max(occuDelta, min(dif, 1 - dif))
icmap['occuDelta'] = occuDelta
self.icsdMap = icmap
(self.icsdMap['numError'], self.icsdMap['errorMsg']) \
= self.formatErrorMsg()
if self.buglev >= 2:
print('\nmkIcsdMap: icsdMap:')
keys = sorted(self.icsdMap.keys())
for key in keys:
print(' key: %s value: %s (%s)' \
% (key, repr(self.icsdMap[key]), type(self.icsdMap[key]).__name__,))
timeb = datetime.datetime.now()
if self.buglev >= 1:
print('mkIcsdMap: icsd: %7d num: %4d chemSum: %s' \
% (self.icsdMap['_database_code_ICSD'],
self.icsdMap['numCellAtom'],
self.icsdMap['_chemical_formula_sum'],))
if self.buglev >= 2:
print('mkIcsdMap: %20s time: %10.5f' \
% ('all', (timeb - timea).total_seconds(),))
timea = timeb
if self.buglev >= 2:
print('mkIcsdMap: exit')
#====================================================================
# Sets self.vaspMap
def mkVaspMap(self):
timea = datetime.datetime.now()
if self.buglev >= 2:
print('mkVaspMap: entry')
self.getIcsdMap() # insure self.icsdMap is built
# Cell
# Adapted from crystal/read.py:icsd_cif
lena = self.icsdMap['_cell_length_a']
lenb = self.icsdMap['_cell_length_b']
lenc = self.icsdMap['_cell_length_c']
alpha = self.icsdMap['_cell_angle_alpha']
beta = self.icsdMap['_cell_angle_beta']
gamma = self.icsdMap['_cell_angle_gamma']
a1 = lena * np.array([1., 0., 0.])
a2 = lenb * np.array([np.cos(gamma * np.pi / 180.), np.sin(gamma * np.pi / 180.), 0.])
c1 = lenc * np.cos(beta * np.pi / 180.)
c2 = lenc / np.sin(gamma * np.pi / 180.) * (-np.cos(beta * np.pi / 180.)
* np.cos(gamma * np.pi / 180.) + np.cos(alpha * np.pi / 180.))
a3 = np.array([c1, c2, np.sqrt(lenc**2 - (c1**2 + c2**2))])
cell = np.array([a1, a2, a3]) # a1, a2, a3 are the rows.
if self.buglev >= 2:
print('mkVaspMap: a1: %s' % (a1,))
print('mkVaspMap: a2: %s' % (a2,))
print('mkVaspMap: a3: %s' % (a3,))
print('mkVaspMap: cell:\n%s' % (cell,))
# Symmetry ops: _symmetry_equiv_pos_as_xyz
# Set transMats = list of 3x4 transformation matrices
symStgs = self.icsdMap['_symmetry_equiv_pos_as_xyz']
if self.buglev >= 5:
print('\nmkVaspMap: symStgs: %s\n' % (symStgs,))
# ['x, x-y, -z+1/2', '-x+y, y, -z+1/2', ...]
transMats = []
for stg in symStgs:
transMat = []
specs = stg.split()
if len(specs) != 3:
throwcif('bad symStg: %s' % (stg,), None, None)
for ii in range(len(specs)):
specs[ii] = specs[ii].rstrip(',') # get rid of trailing comma
values = symParser(specs[ii])
if self.buglev >= 5:
print('mkVaspMap: ii: %d spec: %s values: %s' \
% (ii, specs[ii], values,))
transMat.append(values)
transMat = np.array(transMat, dtype=float)
if self.buglev >= 10:
print('\nmkVaspMap: transMat:\n%s\n' % (transMat,))
transMats.append(transMat)
timeb = datetime.datetime.now()
if self.buglev >= 2:
print('mkVaspMap: %20s time: %10.5f' \
% ('symmetry ops', (timeb - timea).total_seconds(),))
timea = timeb
# atom_site positions
slabels = self.icsdMap['_atom_site_slabel'] # labels w/o numeric suffix
types = self.icsdMap['_atom_site_type_symbol']
symbols = self.icsdMap['_atom_site_Wyckoff_symbol']
fracxs = self.icsdMap['_atom_site_fract_x']
fracys = self.icsdMap['_atom_site_fract_y']
fraczs = self.icsdMap['_atom_site_fract_z']
occs = self.icsdMap['_atom_site_occupancy']
hyds = self.icsdMap['_atom_site_attached_hydrogens']
nlabel = len(slabels)
# Set wyckoffs = [
# ['Mo', ['0.3333', '0.6667', '0.25']],
# ['S', ['0.3333', '0.6667', '0.621']]
# ]
wyckoffs = []
for ii in range(nlabel):
wyc = [slabels[ii], [fracxs[ii], fracys[ii], fraczs[ii]]]
wyckoffs.append(wyc)
if self.buglev >= 2:
print('mkVaspMap: wyc: %s' % (wyc,))
# Get list of unique symbols: ['Mo', 'S']
syms = [ww[0] for ww in wyckoffs]
uniqueSyms = list(set(syms)) # unique values
if self.buglev >= 2:
print('mkVaspMap: uniqueSyms: %s' % (uniqueSyms,))
timeb = datetime.datetime.now()
if self.buglev >= 2:
print('mkVaspMap: %20s time: %10.5f' \
% ('unique syms', (timeb - timea).total_seconds(),))
timea = timeb
dtma = 0
dtmb = 0
dtmc = 0
dtmd = 0
# Set posVecs = list of sublists, one sublist per uniqueSym.
# Each sublist is a list of unique position vectors,
# generated by:
# posvec = np.dot( transMat, wycPos)
# where
# transmat is in transmats, generated by the symmetry ops above
# wycPos comes from wyckoffs.
if self.buglev >= 2:
print('\nmkVaspMap: Get posVecs')
posVecs = [] # parallel array with uniqueSyms
for ii in range(len(uniqueSyms)):
posVecs.append([])
for wyc in wyckoffs:
if self.buglev >= 2:
print(' wyc: ', wyc)
wycSym = wyc[0]
wycPos = wyc[1] + [1.0]
for transMat in transMats:
tma = datetime.datetime.now()
if self.buglev >= 10:
print(' transMat:\n%s' % (transMat,))
if self.buglev >= 10:
print(' wycPos: %s' % (wycPos,))
posVec = np.dot(transMat, wycPos)
if self.buglev >= 10:
print(' raw posVec: %s' % (posVec,))
# Insure posVec elements are in the unit cube
for ii in range(len(posVec)):
if posVec[ii] < 0:
posVec[ii] += 1
if posVec[ii] >= 1:
posVec[ii] -= 1
if self.buglev >= 10:
print(' final posVec: %s' % (posVec,))
tmb = datetime.datetime.now()
dtma += (tmb - tma).total_seconds()
tma = tmb
# If posVec is new, append posVec to positions for this symbol.
ix = uniqueSyms.index(wycSym)
tmb = datetime.datetime.now()
dtmb += (tmb - tma).total_seconds()
tma = tmb
if posVecs[ix] == []:
if self.buglev >= 5:
print(' append posVec a: %s' % (posVec,))
posVecs[ix].append(posVec)
else:
# If posVec is close to an ele already in posVecs, ignore posVec.
# The following is too slow: about 20 seconds for 2000 ions.
# norms = [np.linalg.norm( oldVec - posVec)
# for oldVec in posVecs[ix]]
# if min( norms) < 0.01: ...
#
# So find a faster way.
# The way used below takes about 15 seconds for 2000 ions.
#
# Perhaps in the future we could use a way that
# keeps each vector posVecs[ix] in sorted order.
# Then we can use a single binary search for dual purpose:
# to see if a duplicate exists, and find the insertion point.
minNorm = np.Infinity
for pv in posVecs[ix]:
norm = 0
for ii in range(len(posVec)):
delta = posVec[ii] - pv[ii]
norm += delta * delta
norm = np.sqrt(norm)
if norm < minNorm:
minNorm = norm
tmb = datetime.datetime.now()
dtmc += (tmb - tma).total_seconds()
tma = tmb
if minNorm < 0.01:
if self.buglev >= 5:
print(' duplicate posVec: %s' % (posVec,))
else:
if self.buglev >= 5:
print(' append posVec b: %s' % (posVec,))
posVecs[ix].append(posVec)
tmb = datetime.datetime.now()
dtmd += (tmb - tma).total_seconds()
tma = tmb
# Write posvecs to file 'tempplot'.
writePlot = False
if writePlot:
fout = open('tempplot', 'w')
for ii in range(len(uniqueSyms)):
print('mkVaspMap: posvecs for %s:' % (uniqueSyms[ii],))
for pvec in posVecs[ii]:
print(' %s' % (pvec,))
print('%s %s' % (ii, pvec,), file=fout)
fout.close()
timeb = datetime.datetime.now()
if self.buglev >= 2:
print('mkVaspMap: %20s time: %10.5f' \
% ('posVecs', (timeb - timea).total_seconds(),))
print('mkVaspMap: dtma time: %10.5f' % (dtma,))
print('mkVaspMap: dtmb time: %10.5f' % (dtmb,))
print('mkVaspMap: dtmc time: %10.5f' % (dtmc,))
print('mkVaspMap: dtmd time: %10.5f' % (dtmd,))
cellTrans = np.transpose(cell)
if self.buglev >= 5:
print('mkVaspMap: cell:\n%s' % (cell,))
print('mkVaspMap: cellTrans:\n%s' % (cellTrans,))
print('\nmkVaspMap: posVecs:')
for ii in range(len(uniqueSyms)):
print(' posVecs for ii: %d sym: %s' % (ii, uniqueSyms[ii],))
for posVec in posVecs[ii]:
print(' posVec: %s' % (posVec,))
atomVecs = []
for ii in range(len(uniqueSyms)):
atomVecs.append([])
for posVec in posVecs[ii]:
atomVec = np.dot(cellTrans, posVec)
atomVecs[-1].append(atomVec)
if self.buglev >= 5:
print('\nmkVaspMap: atomVecs:')
for ii in range(len(uniqueSyms)):
print(' atomVecs for ii: %d sym: %s' % (ii, uniqueSyms[ii],))
for atomVec in atomVecs[ii]:
print(' atomVec: %s' % (atomVec,))
self.vaspMap = {}
self.vaspMap['cellBasis'] = cell
self.vaspMap['uniqueSyms'] = uniqueSyms
self.vaspMap['posVecs'] = posVecs
self.vaspMap['posScaleFactor'] = 1.0
timeb = datetime.datetime.now()
if self.buglev >= 2:
print('mkVaspMap: %20s time: %10.5f' \
% ('finish', (timeb - timea).total_seconds(),))
timea = timeb
if self.buglev >= 2:
print('\nmkVaspMap: self.vaspMap:')
keys = sorted(self.vaspMap.keys())
for key in keys:
print(' key: %s value: %s (%s)' \
% (key, repr(self.vaspMap[key]),
type(self.vaspMap[key]).__name__,))
if self.buglev >= 2:
print('mkVaspMap: exit')
#====================================================================
def writePoscar(
sysName,
vaspMap,
outPoscar): # output file name
fout = open(outPoscar, 'w')
# Name
print(sysName, file=fout)
# Universal scaling factor == lattice constant
print('%g' % (vaspMap['posScaleFactor'],), file=fout)
# Lattice vectors == basis vectors (rows) of the unit cell
basis = vaspMap['cellBasis']
for ii in range(3):
for jj in range(3):
print(' %14.7g' % (basis[ii, jj],), end=' ', file=fout)
print('', file=fout) # newline
usyms = vaspMap['uniqueSyms'] # parallel array
posVecs = vaspMap['posVecs'] # parallel array
if len(usyms) != len(posVecs):
throwerr('writePoscar: len(usyms) != len( posVecs)', None, None)
# Num atoms of each species
for ii in range(len(posVecs)):
print('%d' % (len(posVecs[ii]),), end=' ', file=fout)
print('', file=fout)
# Cartesian vs direct coords
print('direct', file=fout)
# Atom positions
for ii in range(len(usyms)):
for posVec in posVecs[ii]:
msg = ''
for jj in range(len(posVec)):
msg += ' %14.7g' % (posVec[jj],)
print(msg, file=fout)
fout.close()
#====================================================================
def writePotcar(
vaspMap,
inPotenDir,
outPotcar): # output file name
usyms = vaspMap['uniqueSyms'] # parallel array
fout = open(outPotcar, 'w')
for usym in usyms:
fin = open(inPotenDir + '/' + usym + '/POTCAR')
content = fin.read()
fin.close()
fout.write(content)
fout.close()
fout.close()
#====================================================================
# Lexer: x+0.22*y ==> 'x', '+', 0.22, '*', 'y'
def symLexer(stg):
res = []
while True:
# Skip white space
while len(stg) > 0 and stg[0] == ' ':
stg.remove(0)
if len(stg) == 0:
break
numstg = ''
while len(stg) > 0 and (stg[0] >= '0' and stg[0] <= '9' or stg[0] == '.'):
numstg += stg[0]
stg = stg[1:]
if len(numstg) > 0:
res.append(float(numstg))
else:
res.append(stg[0])
stg = stg[1:]
return res
#====================================================================
# Parser: x+0.22*y ==> array with 4 elements: xCoeff, yCoeff, zCoeff, aconst
def symParser(stg):
toks = symLexer(stg)
names = 'xyz'
# Simple syntax checks.
# Essentially make sure operators are surrounded by operands,
# and operands are surrounded by operators.
tok = toks[0] # first token
if not (tok in names or tok == '-' or type(tok == float)):
throwcif('unknown syntax: "%s"' % (stg,), None, None)
tok = toks[-1] # last token
if not (isinstance(tok, float) or tok in names):
throwcif('unknown syntax: "%s"' % (stg,), None, None)
for ii in range(1, len(toks) - 1): # all other tokens
if isinstance(toks[ii], float):
if (toks[ii - 1] not in '-+/') or (toks[ii + 1] not in '-+/'):
throwcif('unknown syntax: "%s"' % (stg,), None, None)
elif toks[ii] in names:
if (toks[ii - 1] not in '-+') or (toks[ii + 1] not in '-+'):
throwcif('unknown syntax: "%s"' % (stg,), None, None)
elif toks[ii] == '/':
if not isinstance(toks[ii - 1], float) or not isinstance(toks[ii + 1], float):
throwcif('unknown syntax: "%s"' % (stg,), None, None)
elif toks[ii] in '-+':
if not isinstance(toks[ii - 1], float) and toks[ii - 1] not in names:
throwcif('unknown syntax: "%s"' % (stg,), None, None)
if not isinstance(toks[ii + 1], float) and toks[ii + 1] not in names:
throwcif('unknown syntax: "%s"' % (stg,), None, None)
else:
throwcif('unknown syntax: "%s"' % (stg,), None, None)
# Replace [vala, /, valb] with [vala/valb]
ii = 0
while ii < len(toks):
if toks[ii] == '/':
toks[ii - 1] = toks[ii - 1] / toks[ii + 1]
del toks[ii:(ii + 2)]
ii += 1
# Scan for our symbols
values = 4 * [None]
for ii in range(len(toks)):
tok = toks[ii]
if isinstance(tok, float):
if values[3] != None:
throwcif('unknown syntax: "%s"'
% (stg,), None, None)
values[3] = tok
elif tok in names:
inm = names.find(tok)
if values[inm] != None:
throwcif('unknown syntax: "%s"'
% (stg,), None, None)
if ii == 0:
values[inm] = 1.0 # leading 'x' or 'y' or 'z'
elif toks[ii - 1] == '-':
values[inm] = -1.0
elif toks[ii - 1] == '+':
values[inm] = 1.0
else:
throwcif('unknown syntax: "%s"' % (stg,), None, None)
elif tok in '-+':
pass
else:
throwcif('unknown syntax: "%s"' % (stg,), None, None)
for ii in range(len(values)):
if values[ii] == None:
values[ii] = 0.
return values
#====================================================================
# Convert from iso_8859-15 to ascii. See man iso_8895-15.
# The default Postgresql database allows only ascii.
#
# Another approach would be to change the entire database
# by initializing it with
# initdb -E LATIN9
# but that changes the entire database.
# We just need to handle 1 column of 1 table.
euroChars = [
[160, ' ', 'NO-BREAK SPACE'],
[161, '!', 'INVERTED EXCLAMATION MARK'],
[162, ' c ', 'CENT SIGN'],
[163, ' pound ', 'POUND SIGN'],
[164, ' euro ', 'EURO SIGN'],
[165, ' yen ', 'YEN SIGN'],
[166, 'S', 'LATIN CAPITAL LETTER S WITH CARON'],
[167, ' sect ', 'SECTION SIGN'],
[168, 's', 'LATIN SMALL LETTER S WITH CARON'],
[169, 'c', 'COPYRIGHT SIGN'],
[170, ' ', 'FEMININE ORDINAL INDICATOR'],
[171, ' ', 'LEFT-POINTING DOUBLE ANGLE QUOTATION MARK'],
[172, ' not ', 'NOT SIGN'],
[173, '-', 'SOFT HYPHEN'],
[174, ' reg ', 'REGISTERED SIGN'],
[175, '-', 'MACRON'],
[176, ' deg ', 'DEGREE SIGN'],
[177, ' +/- ', 'PLUS-MINUS SIGN'],
[178, ' 2 ', 'SUPERSCRIPT TWO'],
[179, ' 3 ', 'SUPERSCRIPT THREE'],
[180, 'Z', 'LATIN CAPITAL LETTER Z WITH CARON'],
[181, 'u', 'MICRO SIGN'],
[182, ' P ', 'PILCROW SIGN'],
[183, '.', 'MIDDLE DOT'],
[184, 'z', 'LATIN SMALL LETTER Z WITH CARON'],
[185, ' 1 ', 'SUPERSCRIPT ONE'],
[186, 'o', 'MASCULINE ORDINAL INDICATOR'],
[187, ' > ', 'RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK'],
[188, 'OE', 'LATIN CAPITAL LIGATURE OE'],
[189, 'oe', 'LATIN SMALL LIGATURE OE'],
[190, 'Y', 'LATIN CAPITAL LETTER Y WITH DIAERESIS'],
[191, '?', 'INVERTED QUESTION MARK'],
[192, 'A', 'LATIN CAPITAL LETTER A WITH GRAVE'],
[193, 'A', 'LATIN CAPITAL LETTER A WITH ACUTE'],
[194, 'A', 'LATIN CAPITAL LETTER A WITH CIRCUMFLEX'],
[195, 'A', 'LATIN CAPITAL LETTER A WITH TILDE'],
[196, 'A', 'LATIN CAPITAL LETTER A WITH DIAERESIS'],
[197, 'A', 'LATIN CAPITAL LETTER A WITH RING ABOVE'],
[198, 'AE', 'LATIN CAPITAL LETTER AE'],
[199, 'C', 'LATIN CAPITAL LETTER C WITH CEDILLA'],
[200, 'E', 'LATIN CAPITAL LETTER E WITH GRAVE'],
[201, 'E', 'LATIN CAPITAL LETTER E WITH ACUTE'],
[202, 'E', 'LATIN CAPITAL LETTER E WITH CIRCUMFLEX'],
[203, 'E', 'LATIN CAPITAL LETTER E WITH DIAERESIS'],
[204, 'I', 'LATIN CAPITAL LETTER I WITH GRAVE'],
[205, 'I', 'LATIN CAPITAL LETTER I WITH ACUTE'],
[206, 'I', 'LATIN CAPITAL LETTER I WITH CIRCUMFLEX'],
[207, 'I', 'LATIN CAPITAL LETTER I WITH DIAERESIS'],
[208, 'D', 'LATIN CAPITAL LETTER ETH'],
[209, 'N', 'LATIN CAPITAL LETTER N WITH TILDE'],
[210, 'O', 'LATIN CAPITAL LETTER O WITH GRAVE'],
[211, 'O', 'LATIN CAPITAL LETTER O WITH ACUTE'],
[212, 'O', 'LATIN CAPITAL LETTER O WITH CIRCUMFLEX'],
[213, 'O', 'LATIN CAPITAL LETTER O WITH TILDE'],
[214, 'O', 'LATIN CAPITAL LETTER O WITH DIAERESIS'],
[215, 'x', 'MULTIPLICATION SIGN'],
[216, 'O', 'LATIN CAPITAL LETTER O WITH STROKE'],
[217, 'U', 'LATIN CAPITAL LETTER U WITH GRAVE'],
[218, 'U', 'LATIN CAPITAL LETTER U WITH ACUTE'],
[219, 'U', 'LATIN CAPITAL LETTER U WITH CIRCUMFLEX'],
[220, 'U', 'LATIN CAPITAL LETTER U WITH DIAERESIS'],
[221, 'Y', 'LATIN CAPITAL LETTER Y WITH ACUTE'],
[222, ' ', 'LATIN CAPITAL LETTER THORN'],
[223, 's', 'LATIN SMALL LETTER SHARP S'],
[224, 'a', 'LATIN SMALL LETTER A WITH GRAVE'],
[225, 'a', 'LATIN SMALL LETTER A WITH ACUTE'],
[226, 'a', 'LATIN SMALL LETTER A WITH CIRCUMFLEX'],
[227, 'a', 'LATIN SMALL LETTER A WITH TILDE'],
[228, 'a', 'LATIN SMALL LETTER A WITH DIAERESIS'],
[229, 'a', 'LATIN SMALL LETTER A WITH RING ABOVE'],
[230, 'ae', 'LATIN SMALL LETTER AE'],
[231, 'c', 'LATIN SMALL LETTER C WITH CEDILLA'],
[232, 'e', 'LATIN SMALL LETTER E WITH GRAVE'],
[233, 'e', 'LATIN SMALL LETTER E WITH ACUTE'],
[234, 'e', 'LATIN SMALL LETTER E WITH CIRCUMFLEX'],
[235, 'e', 'LATIN SMALL LETTER E WITH DIAERESIS'],
[236, 'i', 'LATIN SMALL LETTER I WITH GRAVE'],
[237, 'i', 'LATIN SMALL LETTER I WITH ACUTE'],
[238, 'i', 'LATIN SMALL LETTER I WITH CIRCUMFLEX'],
[239, 'i', 'LATIN SMALL LETTER I WITH DIAERESIS'],
[240, 'o', 'LATIN SMALL LETTER ETH'],
[241, 'n', 'LATIN SMALL LETTER N WITH TILDE'],
[242, 'o', 'LATIN SMALL LETTER O WITH GRAVE'],
[243, 'o', 'LATIN SMALL LETTER O WITH ACUTE'],
[244, 'o', 'LATIN SMALL LETTER O WITH CIRCUMFLEX'],
[245, 'o', 'LATIN SMALL LETTER O WITH TILDE'],
[246, 'o', 'LATIN SMALL LETTER O WITH DIAERESIS'],
[247, '/', 'DIVISION SIGN'],
[248, 'o', 'LATIN SMALL LETTER O WITH STROKE'],
[249, 'u', 'LATIN SMALL LETTER U WITH GRAVE'],
[250, 'u', 'LATIN SMALL LETTER U WITH ACUTE'],
[251, 'u', 'LATIN SMALL LETTER U WITH CIRCUMFLEX'],
[252, 'u', 'LATIN SMALL LETTER U WITH DIAERESIS'],
[253, 'y', 'LATIN SMALL LETTER Y WITH ACUTE'],
[254, ' ', 'LATIN SMALL LETTER THORN'],
[255, 'y', 'LATIN SMALL LETTER Y WITH DIAERESIS'],
]
euroCharMap = {}
for tup in euroChars:
euroCharMap[tup[0]] = tup
#====================================================================
if __name__ == '__main__':
main()
|
pylada/pylada-light
|
src/pylada/crystal/readCif.py
|
Python
|
gpl-3.0
| 55,318
|
[
"CRYSTAL",
"VASP"
] |
887d43753a9a399658cb5ab598c5dcf4b69d8d2b68929b74dee3362005539c35
|
#!/usr/bin/env python
# VTK Viewer
# Written 2012-2013 Hal Canary <http://cs.unc.edu/~hal>
# Copyright 2012-2013 University of North Carolina at Chapel Hill.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# LICENSE.md in this repository or
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
CONTROLS:
left mouse button: rotation
right mouse button: zooming
middle mouse button: panning
ctrl + left mouse button: spinning
ctrl + shift + left mouse button: zooming
shift + left mouse button: panning
j: joystick
t: trackball
c: camera mode
a: actor mode
3: toggle stereo mode
e: exit the application
e: quit the application
f: fly to the picked point
r: reset the camera
p: pick
s: surface representation
w: wireframe representation
Set the STEREO_TYPE environment variable to control stereo type.
STEREO_TYPE=CRYSTAL_EYES
STEREO_TYPE=RED_BLUE
STEREO_TYPE=INTERLACED
STEREO_TYPE=LEFT
STEREO_TYPE=RIGHT
STEREO_TYPE=DRESDEN
STEREO_TYPE=ANAGLYPH
STEREO_TYPE=CHECKERBOARD
STEREO_TYPE=SPLITVIEWPORT_HORIZONTAL
Set the COLORMAP environment variable to change the scalar color map.
It should be the location of a ParaView-style xml colormap file.
"""
useage = """
Useage: vtkviewer.py FILE [MORE FILES...]
Supported File Formats:
*.vtk - VTK Legacy File
*.vtp - VTK Polygonal Data File
*.vtu - VTK Unstructured Grid Data File
*.ply - Stanford Polygon File
*.obj - Wavefront Object file
*.stl - Stereolithography File
*.pdb - Protein Data Bank File
Controls:
's' - surface
'w' - wireframe
'r' - reset and center camera
'q' - quit
'3' - toggle stereo mode
More Info:
https://github.com/HalCanary/vtkviewer
"""
import vtk
import sys
import os
import glob
import xml.etree.ElementTree
class VTKViewer(object):
def __init__(self):
self.renWin = vtk.vtkRenderWindow()
self.renWin.SetSize(800,600)
self.renWin.SetWindowName("VTK Viewer")
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.renWin)
self.renderer = vtk.vtkRenderer()
self.renWin.AddRenderer(self.renderer)
interactorStyle = self.iren.GetInteractorStyle()
interactorStyle.SetCurrentStyleToTrackballCamera()
if "STEREO_TYPE" in os.environ:
vtkStereoType = VTKViewer.GetVTKStereoType(
os.environ["STEREO_TYPE"])
if vtkStereoType is not None:
self.renWin.SetStereoType (vtkStereoType)
else:
print '?%s?' % stereoType
def Start(self):
self.renWin.Render()
self.iren.Start()
@staticmethod
def GetDefaultColorMap(dataRange):
colorMap = vtk.vtkColorTransferFunction()
colorMap.SetColorSpaceToLab()
colorMap.AddRGBPoint(dataRange[0], 0.865, 0.865, 0.865)
colorMap.AddRGBPoint(dataRange[1], 0.706, 0.016, 0.150)
colorMap.Build()
return colorMap
@staticmethod
def LoadColorMap(file_name):
"""
ParaView has a XML colormap format:
<ColorMap space="RGB">
<Point x="0.0" r="0.0" g="0.0" b="0.0"/>
<Point x="0.4" r="0.901961" g="0.0" b="0.0"/>
<Point x="0.8" r="0.901961" g="0.901961" b="0.0"/>
<Point x="1.0" r="1.0" g="1.0" b="1.0"/>
<NaN r="0.0" g="0.498039" b="1.0"/>
</ColorMap>
"""
colorMap = vtk.vtkColorTransferFunction()
root = xml.etree.ElementTree.parse(file_name).getroot()
if root.tag != "ColorMap":
raise Exception('Wrong Kind of XML File')
return None
if "space" in root.attrib:
space = root.attrib["space"]
if space == "RGB":
colorMap.SetColorSpaceToRGB()
elif space == "Lab":
colorMap.SetColorSpaceToLab()
elif space == "Wrapped":
colorMap.SetColorSpaceToHSV()
colorMap.HSVWrapOn()
elif space == "Diverging":
colorMap.SetColorSpaceToDiverging()
else:
colorMap.SetColorSpaceToHSV()
else:
colorMap.SetColorSpaceToHSV()
point_found = False
for point in root:
if point.tag == "Point":
point_found = True
a, r,g,b, h,s,v = 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
if 'x' not in point.attrib:
continue
x = float(point.attrib['x'])
if 'o' in point.attrib:
a = float(point.attrib['o'])
if 'r' in point.attrib:
if 'g' not in point.attrib or 'b' not in point.attrib:
continue
r = float(point.attrib['r'])
g = float(point.attrib['g'])
b = float(point.attrib['b'])
colorMap.AddRGBPoint(x, r, g, b)
elif 'h' in point.attrib:
if 's' not in point.attrib or 'v' not in point.attrib:
continue
h = float(point.attrib['h'])
s = float(point.attrib['s'])
v = float(point.attrib['v'])
colorMap.AddHSVPoint(x, h, s, v)
else: ## 'r' or 'h' required
continue
elif point.tag == "NaN":
r, g, b = 0.25, 0.0, 0.0
if 'r' in point.attrib:
r = float(point.attrib['r'])
if 'g' in point.attrib:
g = float(point.attrib['g'])
if 'b' in point.attrib:
b = float(point.attrib['b'])
colorMap.SetNanColor(r, g, b)
## NAN doesn't support HSV. Why not?
if not point_found:
return None
colorMap.Build()
return colorMap
def AddPolyData(self, polyData, colorMap=None):
"""
colorMap should be a vtkScalarsToColors (or derived class) object
"""
if colorMap is None:
colorMap = VTKViewer.GetDefaultColorMap(polyData.GetScalarRange())
polyDataMapper = vtk.vtkPolyDataMapper()
polyDataMapper.SetLookupTable(colorMap)
if polyData.GetPointData().GetNormals() is None:
polyDataNormals = vtk.vtkPolyDataNormals()
try:
polyDataNormals.SetInputData(polyData)
except:
polyDataNormals.SetInput(polyData)
polyDataNormals.SetFeatureAngle(90.0)
polyDataMapper.SetInputConnection(
polyDataNormals.GetOutputPort())
else:
try:
polyDataMapper.SetInputData(polyData)
except:
polyDataMapper.SetInput(polyData)
actor = vtk.vtkActor()
actor.GetProperty().SetPointSize(3)
actor.SetMapper(polyDataMapper)
self.renderer.AddActor(actor)
def AddFile(self, file_name, colorMap=None):
file_name_lower = file_name.lower()
if file_name_lower.endswith('.vtk'):
polyData = VTKViewer.ReadLegacyVTK(file_name)
elif file_name_lower.endswith(".vtp"):
polyData = VTKViewer.readPolyData(
file_name, vtk.vtkXMLPolyDataReader)
elif file_name_lower.endswith(".ply"):
polyData = VTKViewer.readPolyData(
file_name, vtk.vtkPLYReader)
elif file_name_lower.endswith(".obj"):
polyData = VTKViewer.readPolyData(
file_name, vtk.vtkOBJReader)
elif file_name_lower.endswith(".stl"):
polyData = VTKViewer.readPolyData(
file_name, vtk.vtkSTLReader)
elif file_name_lower.endswith(".vtu"):
polyData = VTKViewer.readDataSet(
file_name, vtk.vtkXMLUnstructuredGridReader)
elif file_name_lower.endswith(".pdb"):
polyData = VTKViewer.ReadPDB(file_name)
elif file_name_lower.endswith(".vti"):
polyData = VTKViewer.readDataSet(
file_name, vtk.vtkXMLImageDataReader)
elif file_name_lower.endswith(".vts"):
polyData = VTKViewer.readDataSet(
file_name, vtk.vtkXMLStructuredGridReader)
elif file_name_lower.endswith(".vtr"):
polyData = VTKViewer.readDataSet(
file_name, vtk.vtkXMLRectilinearGridReader)
else:
print file_name, ": BAD FILE NAME. Should end",
print "in VTK, VTP, PLY, OBJ, STL, VTU, or PDB."
raise Exception()
self.AddPolyData(polyData, colorMap)
return
@staticmethod
def ReadPDB(file_name):
pdb = vtk.vtkPDBReader()
pdb.SetFileName(file_name)
pdb.SetHBScale(1.0)
pdb.SetBScale(1.0)
pdb.Update()
sphere = vtk.vtkSphereSource()
sphere.SetCenter(0, 0, 0)
sphere.SetRadius(1)
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(pdb.GetOutputPort())
glyph.SetSourceConnection(sphere.GetOutputPort())
glyph.SetOrient(1)
glyph.SetColorMode(1)
glyph.SetScaleMode(2)
glyph.SetScaleFactor(.25)
glyph.Update()
tube = vtk.vtkTubeFilter()
tube.SetInputConnection(pdb.GetOutputPort())
tube.SetNumberOfSides(6)
tube.CappingOff()
tube.SetRadius(0.2)
tube.SetVaryRadius(0)
tube.SetRadiusFactor(10)
tube.Update()
tubeMesh = vtk.vtkPolyData()
tubeMesh.ShallowCopy(tube.GetOutput())
N = tubeMesh.GetNumberOfPoints()
rgb_colors = tubeMesh.GetPointData().GetArray("rgb_colors")
if rgb_colors is not None:
if rgb_colors.GetNumberOfComponents() == 3:
for i in xrange(N):
rgb_colors.SetTupleValue(i, (127,127,127))
appendFilter = vtk.vtkAppendPolyData()
appendFilter.AddInputConnection(glyph.GetOutputPort())
try:
appendFilter.AddInputData(tubeMesh)
except:
appendFilter.AddInput(tubeMesh)
appendFilter.Update()
polyData = vtk.vtkPolyData()
polyData.ShallowCopy(appendFilter.GetOutput())
return polyData
@staticmethod
def ConvertDataSetToSurface(algorithmOutputPort):
dataSetSurfaceFilter = vtk.vtkDataSetSurfaceFilter()
dataSetSurfaceFilter.SetInputConnection(algorithmOutputPort)
dataSetSurfaceFilter.Update()
polyData = vtk.vtkPolyData()
polyData.ShallowCopy(dataSetSurfaceFilter.GetOutput())
return polyData
@staticmethod
def readPolyData(file_name, readerType):
reader = readerType()
reader.SetFileName(file_name)
reader.Update()
polyData = vtk.vtkPolyData()
polyData.ShallowCopy(reader.GetOutput())
return polyData
@staticmethod
def readDataSet(file_name, readerType):
reader = readerType()
reader.SetFileName(file_name)
reader.Update()
return VTKViewer.ConvertDataSetToSurface(
reader.GetOutputPort())
@staticmethod
def ReadLegacyVTK(file_name):
reader = vtk.vtkDataSetReader()
reader.SetFileName(file_name)
reader.Update()
if None != reader.GetPolyDataOutput():
polyData = vtk.vtkPolyData()
polyData.ShallowCopy(reader.GetPolyDataOutput())
return polyData
if None != reader.GetUnstructuredGridOutput():
return VTKViewer.ConvertDataSetToSurface(reader.GetOutputPort())
if None != reader.GetStructuredPointsOutput():
return VTKViewer.ConvertDataSetToSurface(reader.GetOutputPort())
if None != reader.GetStructuredGridOutput():
return VTKViewer.ConvertDataSetToSurface(reader.GetOutputPort())
if None != reader.GetRectilinearGridOutput():
return VTKViewer.ConvertDataSetToSurface(reader.GetOutputPort())
else:
raise Exception("unsupported: ????????\n")
@staticmethod
def GetVTKStereoType(stereoType):
if (stereoType == "CRYSTAL_EYES"):
return (vtk.VTK_STEREO_CRYSTAL_EYES)
elif (stereoType == "RED_BLUE"):
return (vtk.VTK_STEREO_RED_BLUE)
elif (stereoType == "INTERLACED"):
return (vtk.VTK_STEREO_INTERLACED)
elif (stereoType == "LEFT"):
return (vtk.VTK_STEREO_LEFT)
elif (stereoType == "RIGHT"):
return (vtk.VTK_STEREO_RIGHT)
elif (stereoType == "DRESDEN"):
return (vtk.VTK_STEREO_DRESDEN)
elif (stereoType == "ANAGLYPH"):
return (vtk.VTK_STEREO_ANAGLYPH)
elif (stereoType == "CHECKERBOARD"):
return (vtk.VTK_STEREO_CHECKERBOARD)
elif (stereoType == "SPLITVIEWPORT_HORIZONTAL"):
return (vtk.VTK_STEREO_SPLITVIEWPORT_HORIZONTAL)
else:
return None
if __name__ == '__main__':
if len(sys.argv) == 1:
print useage
exit(1)
vtkviewer = VTKViewer()
if "COLORMAP" in os.environ:
colormap = VTKViewer.LoadColorMap(os.environ["COLORMAP"])
else:
colormap = None
for arg in sys.argv[1:]:
fileNames = glob.glob(arg)
if len(fileNames) == 0:
print "what:", arg
else:
for fileName in fileNames:
if os.path.isfile(fileName):
vtkviewer.AddFile(fileName,colormap)
else:
print "what:", fileName
vtkviewer.Start()
|
HalCanary/vtkviewer
|
vtkviewer.py
|
Python
|
apache-2.0
| 11,787
|
[
"ParaView",
"VTK"
] |
c73ad032d7af281494239de73fc0a2b297913e4adf04563e26287e68440e95e1
|
# Find regions that have zero coverage
# Copyright (C) 2015 Harold Pimentel
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import array
import math
import os
import pysam
import sys
from datetime import datetime, date, time
def right_now():
curr_time = datetime.now()
return curr_time.strftime("%c")
def print_log(aString):
print >> sys.stderr, "[%s] %s" % (right_now(), aString)
return
def status_bar(cur, total, barLength):
percent = float(cur) / float(total)
numStars = int(math.floor(percent * barLength))
sys.stdout.write("\r[")
for i in xrange(numStars):
sys.stdout.write("*")
for i in xrange(barLength - numStars):
sys.stdout.write(" ")
sys.stdout.write("]")
sys.stdout.write(" " + str(cur) + " / " + str(total))
sys.stdout.flush()
return
def unique(seq):
# Not order preserving
keys = {}
for e in seq:
keys[e] = 1
return keys.keys()
# make dictionary of all transcripts. when hashed, get array ength of trans
# for all reads, find the trans it maps to then add 1 to it the count
def readXprs(fileName, colName):
"""
fileName - a filename that points to a "results.xprs" file
colName - a column name (i.e. fpkm, eff_counts, etc...)
Returns: a dictionary that contains the fpkm
"""
fileHandle = open(fileName, "r")
firstLine = fileHandle.readline()
firstLine = firstLine.split()
# Find the column index
whichCol = -1
for colIdx in xrange(len(firstLine)):
if (firstLine[colIdx] == colName):
whichCol = colIdx
break
if whichCol == -1:
print_log("Error: " + colName + " is not a valid column name")
return
xprsDict = {}
lower_thresh = 5.0/1000.0
for line in fileHandle:
line = line.split()
if float(line[4]) != 0:
#xprsDict[line[1]] = float(line[whichCol])
# make sure the rate is bigger than the prior
rate = float(line[6]) / float(line[2])
if rate > lower_thresh:
# compute the rate: expected counts per effective base
# use the effective length for the rate used in the computations
# print >> sys.stderr, line[1], rate
xprsDict[line[1]] = float(line[6]) / float(line[3])
fileHandle.close()
return xprsDict
# @profile
def computeZeroRegions(trans, rate):
# utrans = unique(trans)
utrans = trans.keys()
utrans.sort()
# utrans.sort()
ps = []
ranges = []
L = utrans[0] - 1
# if L >= 1:
# print trans
if L > 1:
# ps.append( math.exp(-rate*L) )
ps.append( -rate*L ) # taking the log
ranges.append( (1, utrans[0] - 1) )
for i in xrange(len(utrans) - 1):
L = utrans[i + 1] - utrans[i] - 1
# if L >= 1:
if L > 1:
# ps.append( math.exp(-rate*L) )
ps.append( -rate*L ) # taking the log pval
ranges.append( (utrans[i] + 1, utrans[i + 1] - 1) )
if len(ps) == 0:
return None
return (ranges, ps)
def computeStartingPos(xprsResults, bamFile):
# create an empty list of lists
trans = [[] for x in xrange(len(bamFile.lengths))]
try:
while True:
read = bamFile.next()
if read.tid != -1:
# trans[read.tid].append(read.qstart)
trans[read.tid].append(read.pos)
except StopIteration:
pass
return trans
# faster than its counterpart
def computeStartingPos2(xprsResults, bamFile):
# create an empty list of lists
trans = [{} for x in xrange(len(bamFile.lengths))]
try:
while True:
read = bamFile.next()
if read.tid != -1:
# read.pos += 1 # make it 1 based
startPos = read.pos + 1 # make it 1 based
if read.is_reverse:
startPos = startPos + read.rlen - 1
if startPos in trans[read.tid]:
trans[read.tid][startPos] += 1
else:
trans[read.tid][startPos] = 1
except StopIteration:
pass
return trans
# @profile
def computeTests(trans, xprsResults, bamFile):
ntrans = len(trans)
tests = [None] * ntrans
for i in xrange(ntrans):
t = trans[i]
curTest = None
if i % 50 == 0:
status_bar(i, ntrans, 50)
if bamFile.getrname(i) in xprsResults:
curTest = computeZeroRegions(t, xprsResults[ bamFile.getrname(i) ])
tests[i] = curTest
print # clear the screen
return tests
# @profile
def printRegions(tests, outHandle, bamFile):
outHandle.write("reference\tstart\tend\tpvalue\n")
for tid in xrange(len(tests)):
# for each test per transcript
if tests[tid] is None or len(tests[tid]) != 2:
continue
status_bar(tid, len(tests), 50)
target = bamFile.getrname(tid)
for (range, p) in zip(tests[tid][0], tests[tid][1]):
outLine = target + "\t" + str(range[0]) + "\t" + str(range[1]) + "\t" + str(p) + "\n"
outHandle.write(outLine)
print
def main():
"""docstring for main"""
# read xprs data -- returns dict with fpkm
xprsFName = sys.argv[1]
print_log("Reading file " + xprsFName)
xprsResults = readXprs(xprsFName, "fpkm")
# read bam header -- returns hash with count array
# XXX: Only BAM currently
samFName = sys.argv[2]
print_log("Opening BAM file " + samFName)
samFile = pysam.Samfile(samFName, "rb")
print_log("Compute the number of reads starting at each position in the transcriptome")
trans = computeStartingPos2(xprsResults, samFile)
print_log("Computing Pr[zero region]")
tests = computeTests(trans, xprsResults, samFile)
outHandle = open(sys.argv[3], "w")
print_log("Printing...")
printRegions( tests, outHandle, samFile )
outHandle.close()
samFile.close()
if __name__ == '__main__':
main()
# python ~/zeroCoverage/zeroCoverage.py ~/er/human/ec-jl-794/Sample_Orthochromatic/xprs/results.xprs ~/er/human/ec-jl-794/Sample_Orthochromatic/align.bam ~/zeroCoverage/tests.out
# python -m cProfile ~/zeroCoverage/zeroCoverage.py ~/er/human/ec-jl-794/Sample_Orthochromatic/xprs/results.xprs ~/er/human/ec-jl-794/Sample_Orthochromatic/align.bam ~/zeroCoverage/tests.out
# kernprof.py -l -o zcHash.lprof ~/zeroCoverage/zeroCoverage.py ~/er/human/ec-jl-794/Sample_Orthochromatic/xprs/results.xprs ~/er/human/ec-jl-794/Sample_Orthochromatic/align.bam ~/zeroCoverage/tests.out.hash
|
pachterlab/kma
|
inst/pre-process/zeroCoverage.py
|
Python
|
gpl-2.0
| 7,172
|
[
"pysam"
] |
0e2a31f2154c33fee7370d2242bf2bab6105a3ce12710d5baf585c130f549e79
|
import socket
import sys
import base64
answers = {
'0264cf610d20d90ced78d4f1ca621763ea183234c20f12d00fe1171074e71ba3': 'nic Fronti',
'02a1deee284afc3acd59d1b68cf5c9ad40e4ccf47ba99db55e38df8f1136ef5e': 'g plans, dictating voicemail. P',
'02a72b19e546bc4ef56610c1f5c200ccef462907241a22909fbe341da20d92a2': 'assed it to me',
'03fa91cfa171866b0e8d36fe1695372933b841dff463ad600133299dbcbe2dc6': 'one is high-powered in ',
'041fe89c83218dc7266d3ee5c1a7f81f62e6dae31423c1c07417cb7223c24dbb': ' that I was in deep shit. Even Dan, sitting in',
'0472030383c575a8a86f25b01978837d7cc8c71b77365b491a8e3b192ee17bd7': 'Dozois, ',
'07bb1dee10d3e8cae56fd2cf1ccb731905e30af0d0cb422550d10f1c3e60aa2b': '-- fucking work it or call in',
'0949a34e3baad07cd7b19b30112fdd2f4fcaaf0a83dcfa88719aa0177c855ba5': 'going to wake up ',
'09528abc215d365971f2bc827b5e6bbe8e21e9047dc4a4093e8f38f2730a8f89': ' and scared and yes, pas',
'0a4def1cae72a724e81042f7565182ca70b7ffb2de9314bd6380b03c42e9bb84': 'elt his ',
'0c50e38e313c79633234998a12be09a1154f7baaaa7d9071ae6fdd01e1ce2353': ' to meet Tim Fung. H',
'0d14681cdb2a8d3bdcaee247d73e331803b2b97d636aafea596fee53bb1ca99a': 'd. It showed.',
'0dd119e9df7eedaa528040d0e88d619002e28711ac6779994e832e1beab59a61': ', "what\'s the deal?" I took her ha',
'0e3a65d6ac2d107b20ae0edd161f0cdbd6111ddb025f8803794078f9a4e54118': 'I take a ch',
'0ea35e7ea7c0d69e78336ebe4851d0ed4ab7c9fe787b4bf7a2fde4dc5321a0fa': 'es back for a second, then sai',
'0fd012430a9ee2b98716dca100cf20d0ccc791ef5767f3650f956323df5e3716': 'reland conference roo',
'125c75a74db7092b05d4e840fa5413fcb35456a0b7611173201b08e4121d3930': 'egan, then gave up and looked at Lil. Lil ',
'12c6a539a838c232281f8b5aa7fc645300c639ad4c92dbb30791b941d7fa02b8': 'hinking up my answer, he helped me to ',
'134347cceb9b328d4aad505f21411494e17aeeba74639ec97d68f94bee93dc28': 'lumgubbery!" ',
'135eb94f2796e763308d79332731d302a06caaf83f6116598d696ee10730c7b5': 'with me." I\'d rehearsed t',
'145e8e2a5f30c528b0a1b973fa2892a0364edd5640646cb879ab109742064d41': 'an call her instead. We met her up at Im',
'148383574f2ad598200f156721279479d1f0d15cd9012b3dfc2b6350a52b2aa4': 'im out of the room. _I guess I\'ll take what I ',
'16246fa5b37d0e34d5232438ff9d03671288c7e8f5c0892e6a7e60c1a2ad285d': ' slacks and tattered sp',
'17069dbca3643cdf219b89225c4f639002a3dd33b1ebf94d632c72cbbe093df8': ' visit, I\'d gotten stuck. Dan had unstuck ',
'171e9bf57c43816814b6e8164ae6136ef5cb13cbd69feb1718bca09f35f2051b': ' replaced -- and stay',
'1b89d03ad0d730a3e01fb38bd9882e82904e6ccddc4a56feda2e6098a5c23676': 'exciting stuff, huh?" he sai',
'1cba6c15a97e5d1c545f667ac26f55e99057de5da07b325303b83fc7edd54b04': 'totype. "This is it -- our upli',
'1d63bb7bbdb5b3f48ad515a28f7807aae90d782b319d9995393fc669fd68ba8d': 'ion systems to peer-to-peer adhocr',
'1db54b63cc9ddb34e97a622006571bd0111e3c0fe3767d27576a9cd688ac7aae': 'u can grow a',
'1fcb8da891478bdb2cadf2cdca5b6fcd417393daab0914ce0275bf97468cb22d': 'of the adversity they\'d',
'200a57808234a269380711e7246afa03102bcffa58d41feeca4554703f176acf': 'uneep\'s lab, jangling o',
'20785070b4b8c10418fede9f8e77136a0b5268f8aedca8df5a66b1e6050f9889': 'was neat and efficient in her ev',
'224db1248ed1d49b62a8c3f1ed2c5523d9606219f40a13e625cf9154fe6a3f17': 'nd he\'s not',
'2275b37479cf370f4c197a42032ed8dd7b8f8a75e857bafecdbf4f227e549f8a': ' about it?" Rita shook her head. "It\'s',
'231c3978a61f5593cf642670b2d2c21fde26dab4dd25a10f80a7be595b4cd5a1': 'home, as',
'23797eee348fb81de9feb5731d518fe810b0feb2513c590184778a682e4579ef': 'into inadvertent contac',
'25a37530e084b1cad644fd55cd59f54ea07e7c7e018fb7aa20a695d60a3f808f': '# This is how yo',
'28243e593f24c3ea0d20123294d89a1201280244521570e9d181480fe5251417': ' live, I\'d like to record you saying so, just i',
'28a257226295d80ff5dc9c11d1ae88f28dfe6130397b5d370c501d07f8e3e250': ' licenses terminated provided such individuals ',
'2a51dcba63a0233b639f8d112ed69daeac8003489bef436848336d573a2161f3': 'ifted to one of',
'2ac5707d2f9e5a8bde81b05fd2a3a13cfd172e742db43b208f5583078fdef4ee': 'isney World. "Oh, I don\'t think',
'2ae6c38bfc2fdc969b78843d079e7dfdcf8b2a69ad9027e1cfc435a7eb14da52': 'sons for being so attached',
'2b8018af6899206522a51e2fd77931ec58d7b68b21c8525035bd7c1c45c6318c': ' you to know." He ',
'2c91fb42bbd0adf7b517da8f065cd0b939d41074c20e4e15b240cab01059abdf': 'sibly ha',
'2cb3648de2a9aa1ceb51c8ad92012ada15e1913ae232f84c552063b8db0ce458': ' that happens, there\'s nothing',
'31e167c0f0b49633d3aa968a3088c034558acf784cb47758273304d21d5258c3': 'ccumulated more esteem f',
'333a44322cf93b248a2c573b237c11dc1e1c516af0370eb0a831409a858d1a3f': 'll here in ten thousand years, I\'m goi',
'374b3312595e129ef5a4e33924aef379e99e791b4e464ba78c8d82f11355d93c': 'p the stageside mic and thank',
'38efaeb7a96a0328f04063693bed4cba4d6d879d6e2e7d5aaa2f88bbc5704c00': ' to the Ima',
'3f349dcd55522fae7fc1c80f45790413a7b82f1236cef0bc6305f68558563382': 'n a. This License and the rights g',
'40e75225724686a7dfa5920bbad7e344ed12dd877a6c9d7c5f5cc9103587e165': 'ing spirits made t',
'4159ab6c10ed7aacf3f80ef2d39e1e801da95ee47c151a3b41d03b1d453724ba': ' I said. "Just going somewhere else, startin',
'4175383e2268faf37f5d1aae6a5e9abae72fdd369df816498cd0638bef40f32d': 'itment." E',
'423592f270eeb7abd5522fd976c2974a75c812b8ecb7414fa5d506265a3120db': ' there. Growing up in a mounta',
'4384d277186f7676b3f1f8834fb766a03ea9621f291ab70487b7f4fbbd68c4c2': 'n deadheading, b',
'43a9c357ea6db33807e805c949eb2bb0348769049978c21dd2c005a7a2d201b2': 'ribute, d',
'43edd896828826cc44673ecbc4f3624e5f1ebfc899f831b4a968d4e1359757be': 'trapped down, in full-on four-poi',
'43f84996e0a40eefc001a6539260c1e9183a8cf79d37c2463d7e3e48ef79b944': 'ember. Her parents -- ',
'4426dd7bebaf73bae6f469b817e03114d6e51ca35fdaa83cb30141ff35608b17': 'ban settings, and liked what I had to say ther',
'45a65ffdc7aa7dccbbe4061d510dc864c022d2b3c5fadb965a97096f334babe1': 'st before I left fo',
'460df3d1a1576affb2340ca3db0c108700daf71c7dbfd063da89980b6e222ee4': 'astern Standard Tribe http:/',
'468c64ef4622699d91470fa19366bed0041211b9fb6a9571b26536128699034d': ' a merciful gap in memory fr',
'4761047d78862fdc7c839bd57557f25aac5c62343d5c8d641a5cbdc8ac922aaa': 'for the geosynch broadband constellati',
'486e951f638426031ed5d8c6c605726addd8db153139910c3f938b0c4a7743d2': 'e other hand, were fascinating people, m',
'48d3cdd38b769d0c2107ecb5326c3c47df184c8e0e4910f8f3b7e18fa7c65568': 'ly digitally perform the W',
'4a56c86198905c81a2313d20b160fbb95acacb45d2f46847a9f2a95370d73c71': 'in on you -- you\'ve got all the Whuffie you ',
'4a9c68ddc436a7e3643c209c4ec9d50ff31934f26dfed267f763c7f72e297ee3': ' of getting into character. So',
'4ccc70698cab47b0523b999bebe3f62565160514135d700ac754aab89dc3a021': '\'d been staring into all afternoon, and',
'4dc1e2fd005e70daa984160175e58b1107710c295d6a1b017f77f4a41ec20dbf': 'ity poverty',
'4dc6a806613b4e8de33a3e1467432c3621189fbe9d062b42ffe62268e96fe125': 'll all be fine. ',
'4eaa7b8bc08cf497d57a4ec87771d3238e0606f1fe06e26c92c12b59921d8f8b': 'ay, I take a chance when I strike up a',
'511d23b162e7f63eb87d781fb4d6637de119eeaba78490611fb02a56fd8a654c': 'e box he was holding and headed to the M',
'52c5971130541da1f40b782995e0a3e8b5dd12ae1dd9a21f20cd41f3cc5b9d90': 'es she was co',
'52fa6e7a597ebce3f9d02e713410d6b6881e2e55612319d83ee818154ceef839': 'impose any terms ',
'53de6c30a3e668681a2a2599b9d9fd36f5162f84493518ea5ca936ebb6f23b78': 'd help." ',
'5658a640b9f14dc30f910a76e58e5fcb355a2c0fd330613dbe923ac7ad0fea4d': ' The Park became a touchstone for ',
'57a37730fbfe21b613be85bf73101314c39f627e96c2bb1e0e3ee62a010d8c2e': 'hrough me as I realized tha',
'5a1c21c952f862948b4430315635730b7ed74fda13c513ed51fdef191250965f': 'ontrol access or use of the Work in a mann',
'5ab1cb9c367fef492826326cb63d9e31761e1643d949d8a9ec6001d80f7fea42': 'ce the Bitchu',
'5c07a63629859487b913355cb56b6f52dea64a61306e95607d4d8aafe40e40f7': 'e to it that he gets it. "Next,',
'5c28784fd1e7e89499e8180be9325e0b8b10390ff85bb66d7ca5cdbf1c9b66d4': ' carefull',
'5da46ee5a5aac02ef9faf69cb215c9fd15c6aef15add7b4f05a5da843c7caff3': 'nd bitter, lea',
'5deecc86fa5522136ba68d9b120006e2c9872d970239bb3f6bc55c64281fab21': 'nd novel: Easte',
'5e55b3b5634229ff23d8c72f9416c5215f90bf4e5e80efce54f3cfdac323bde1': 'Boing (http://bo',
'65cee2d777b4f8c1a83ca5acd7314568a08ed56bb5e089518f092d7a621ef020': 'ced with the sterile, thou',
'66359bd5ff2ff64ff84b9dc707fc04dc11c5256d62af45a6cbfb2076d54421a9': '. It was a truism among the first-gen ad-hocs',
'6636803e173464976466c53d8a0a71d0be1e76eb52792001b7777ca530f3ed8f': 'ternately moping',
'6732114076e77b50cbace22c0316b275079c5ee27b69c53275994ff9dadf91de': 'ook my head, con',
'67ba70eddb28a56a8b3de659e3ca1e06859ba5fa3c406bd3762bbe1d1768ef1b': 'a success." Another castmember stood.',
'6b7670c2c34de6a0f8901dac76bc995f5f328bf92b113a25730837ceee709b1e': 'World. Identical sims would stack li',
'6bf430eca22487f7ac6625423651a54b01ae674cb1d8ca4a9a151ad57b01ac85': 'd them, _flump_, and I turne',
'6cc76c2234435124ba1d0ad8dd198c26cb4f51bfd33fcb450b01b031d796bcd0': 'im and the rest, as they say, is hi',
'6d416afb71f14bdf35458b00a4a3feefb839ac625e0113861cda56cdcbd1e8db': 'g a litt',
'6da0479dbec2fb922508e9c28364b347aee4606000e638691cabaf99469fe880': 'er-than-necessary clunk. Sometimes, Lil ',
'7079342ca58b0ebc39df29231edea9eaba441a3024fad3553f486d53f9e85da3': 'very allowance. He\'s',
'70b642c9650f15692d2eef8489c0162bacec8418c2533e74f8b341f6b1a2a8f4': 'issionary -- one of those fringe',
'70eabc764f38e020cf873084fe3f243aeb8c27828ea98cb1545d16fd0fa173a8': 's going to be a hit. Dan took a shot ',
'716376f8b468543a5287d8bed95298891ed860171d649896cfa1d3576e3ec7c5': 'least political in the Park',
'71e031d0b44f2e6be154a1c0f10cd26808d16a48df195f3249eadb527e5d2685': '# They were not happy to be returned from t',
'724502396b92d33fc879c0d4fb968b605bb8909d1d7de54958bcdf2dd582813b': 'r. Once you took backup-and-',
'72616b1e772ee73f2b216afa53c1a8e0cd2bed14780e1256c080d576657db04c': 'one was talking. "I was supposed to ',
'748b8778339a5b3aa0184964b05d3fe1e8d1e9274e9586cd8bc95eb5fbf56c8d': ' tear the Park down. D',
'74ee0b263a23d235aabcd1825662c8f809c90823e521dfbccf1ddbde0b53b45b': ' respect for the person she\'d been. Now s',
'75a136e8f4390de5dac5c53fa0712958a38b9d297d183053838e416b6dd2ef32': 'ked my Whuff',
'7a3b81311fe2dd0f757ede871e2057cb3f65a46b7eeb381d5a68f8ca2fc692a8': ' struck a chord in me. My person',
'7c6450f10676464c55b617f7dddbc4c48e0d4cba4aa3de753cd512a44fff3d02': 'for. Good custodi',
'8325033e1ebc256a8eb6e9a8c77e8638a9f18e2dc24fedc276a906505daa0e8b': 'ense Grant. Subject to the terms and condit',
'866bfb5feb59804b4953039af608c5b718d1830073162e8fe9a5c76604765a7a': ' stories from the Pirates project in Beijing',
'8c0d0e13b2ea0172ebf48a6c02cd03928a744b3a3d68ff6f66689e7ff5d12e48': 'but we got the job ',
'8c19a962c63c682311157a09583580cd4decc89671a445b65d226486aef74a28': ' pot to piss in. Or ',
'91d72f44a9e03ec34e1bd83ae5f0abcce30e2e3a9db214f5c4d3821768110af5': ' a speck with a tie-mic, droning o',
'93bcb7d0d95a0f103c03bf66e0601410e49b7e7e5317ee7766817bc263fcc4f5': 'ove with Lil again. To wake up to a M',
'95664a9ffc97d8b926bdeebd0c77ffb263ef34768f3e623cd1f2ac0230686a56': 'sleeping bag on the floor of the ',
'95ebccc89a29eca3af729add11eb2e8ae1891856f0c5d804e9d0efb665a31e21': 'ven out that night to co',
'970db110df07f54fc5dff12ec784690adaf8d3f37ccf3d0b86354751c0cf8809': 'no point. That\'s why we\'ve got you strapped do',
'9890477eda0c251dfa8d0e427e881cf6116e8b593417d2a7b846c0448d7fe033': 'Sociology Departmen',
'992d15462c08f6c4687e780aaf71457769437c56c1a9e869fd3b4c65c90c3a96': 'nd I stood awkwardly to the side',
'99e2b946c1bea4a6e4e7c4c45b64ef24e45612cbf8817634b5f93d1e4ed289f9': '"That\'s not really my area," Tim said. "I\'m a p',
'9ae220e24b03cfd800f8f3428ea21c5e19a43ae60b490395ff881304c60c959f': ' My nose itched. I tried to ignore it, ',
'9d31fd82a518ecf2b8a3ab1b51c79c58524a88d4fd257edf13f102c95c376a98': 'n a package I could slip in my shirt pocket',
'9dee5104e9e88495dfa7a9e744b8833a033506dd7b690c75737dd3d1b099988e': 'ringe the cop',
'9e3984e45953d7bab2a71851d71143c5bebc942ce69d173c55fc37ded0dade40': ' Nassau. "Well?" she asked. She looked rea',
'9fe6bedbacaa5db37395589099bac43dabf55b3a4eddcadf7962e7663b3a0032': ' one had actually written ',
'a1bf1862ee5abd940af4ef259de0cd9fa652ecf7e0876b8d18d324f5e07f816a': 'cense, and You must includ',
'a1ca1dc79f42cff39202467d11b19c1222ab8ab3b6a7de1a374b8c13e0d7282f': 'y pulled the trigger. ',
'a1cfc505a16c9021e8bce82963a79097e45f6eddbd77b940341f46e2d1ab8ebf': '"NO!" he shouted back. "You don\'t _have to_',
'a1e4e95b4c480acfbf28da55a4aa866c68a5f9f70aecbcc5b6909e630befa6cf': 'hat is repeated thro',
'a2823d9b70dd1f34cb453253fa1e0ae781c25c8f6c8645eb7b67b37b255d7c89': ' of our cul-de-sac. ',
'a3755825e2cc7dd6336f9ec496745e9cc208a679fd36e69568f412f38e2aab0d': ' streamlined process. The good part is',
'a3c84b9508a45bba36fdacd9596166217428d77d1fcdc9f96ba76f3accf6f80e': ' could hardly believe that this was my vibrant',
'a53eafae2cdd47adfb96003e24922db85d5db0e1e4fa6223db2c1b539201bc3d': ' in the Bit',
'a6c8d544a5e5371d838adb4ee8caa54d8c808c898877a5a993564fcd22fc9e66': 'ey do this somewhere else? Why did they have t',
'a7b895c73ec3a856dd094486da2526f7b348d0389a4adfb79259f1e556d05b1c': 'he total, taking th',
'a92a94b9add3dc09e4eb492c511ee257e5c3a815f2ddde279871096e02edcb79': '"It was part of the d',
'aaf006b87e19efb4aaf9486355b97e19c5478e595125da1cf5a5b51bb67a7ff4': ' ever develop goo',
'ada47f7db3024c933b332312308161abe8ef74345ee3200bb92bd8ebf3048569': 'de the thing?" "Be',
'af31f0ea0db5dddd63d1f146cfc626384a3094a24c1709d3e570e003892fc3ff': 'eing called son',
'b048edb0cbe9ef4d6a0482cf71dba1f9a9b8ff46c76d85b1bdafac6dfa0a0bdf': 'tle and mask',
'b3a8a90cc984eaa2e2bc470234866d8ea52d5dd4914170d9ee37f61564cd5b34': 'Once I am outside the door, point it at your h',
'b413395737489ef6c2b1f39ac63656477cc39e4569b3c7a0c752396a172c4b6b': 'the exclusive rights of the copyright owner ',
'b62ffa0a8ba7736a9ab5e9609a22b166019b717d2b79721442ff81c1b415ffbd': 'can help. Just stonewall them -- tell them ',
'b803de80e1540b373293d4214a9bea790cf7f13bfc239d211070ab92a6712955': 'bject to t',
'bbeaf93af50a23493931ea3930cf24d6d024c2093bddba8f3dc878b90720a92d': ' -- not derivative wo',
'bf79ba3932cc3c525d5f61bfb45d7b786f9b98ac7aba0392e8d853784e347545': 'as one of the Seven Sisters -',
'bfce1e3abc86103db88f223e5da1e5db808a1f26c48504d6a8cc7ae9ddc7aa72': 'll restore you from your last backup.',
'c052f1ef9319981bf2d18f80fcff66d3b438ad6e7175921d58b6fbe7741ce470': 'ecognizing',
'c07eb47e7f357eedd009008b97e9433a4b08933344aa1bcf66748768a68bb6c6': ' "Not really. . . I mean, not in so many',
'c0ba61e267c9b93f89239fe3edee3adea5fd5f400e8302f009a934d6ba429838': 'it was pure brain-reward, a jo',
'c1d17b455c1e7a31aa18f55b08f6fcb00702471d140b3b6a071960bcf29cf7d4': 'd by), and hard-faced anonym',
'c20c4ce2f69a9159585594373e4e8b6d59ef98e6eb0f5a034fd3ac97abb26467': 'as stated abo',
'c5ef21e41afa77aae0121c22ad126b47010f869540031c4ac2911eeea2889aa4': 'ne, I\'ll conced',
'c79cd3fa1bb74c951205c0882ec1b51a55de6d47dfd58600843d5c4acd807c52': 'important the Mansion was to the people we wer',
'c84a3713a4f6e49004aaed1d19ed0618652afde8dc94bab8628ccddcecb80629': 'on\'t want refresh because it would',
'c9246d3a03ce8c5f726e6c0508e14c7ee1b13f13d7b1b1818d85c376cae5e014': 's a single castmemb',
'cd4268d0ed4afad2e219e073145eb6d492fe0dc3de218e7312fee9437abe7338': ' I carefully examined their queuing styl',
'cd837728f86bee28c2adc278f2bab9acd3ae51cf9c9f344762c5b26c1c09d72c': 'hat having Juliu',
'ce4b27028d075e72a9ed4579a57b949f93e02501321d07151ddd3e60ec20041a': 'disclaimer of',
'ce59c0fd4173263b58588314474e79d972af9144dd33c7d2ae38dfabf611d439': 'ions, Julius," Debr',
'cfaf019392f7afa9d4faba1e08a9e6489338bf5e0b8374d3aac721eb78b639ca': 'uberant laugh I\'d remember forever. And i',
'd1f4ebb60808cea2f954a09c646ff3e2b033adb07743e3c7a52d202245842997': 'their while, a',
'd3e7d1dab995f94c2a747be9ddbbda3028e9c9ea1a0188c0a07e34ddafc5eda9': ' not going to let y',
'd6d51825c890b9fec910d7f37c1bcc57390d34640b9c40afbee7e1825553b3f7': ' mischief. Th',
'd713408923cb409b183697c8be69aac8335c0d191cd39fca5ebba750623ec076': 'lated over t',
'd7cde44440eb930615d8e450701eca29f17fb98dfe2ed9f997bfbbad6c643449': 'e up and running in no time. "Now, as far',
'd813e7644c246de260bac5f9073de0637fe4e6e61efde185be315c71a067ef93': ' finding the path of ',
'd82ced9d9bf9e6ffe62b660aa5dd83d12e55b5b201bcf20f8c1d8b9c0779905f': 'ant my backup was stale-dating, ',
'd8467c0127a992831c0907815061fa9ebb551abcfea0fa39e17061e44c4affe5': 'ere was a hollow, ro',
'd8da0c12fd8ab57d673f9ce830a4fc2fd83e436695a45b51c8f587464a52982f': 'he sent Dan out of the room and rolle',
'd91154067c3294e61ed39c18f54c9efb71d623f778cfdc1b4c2b4586ac4f8947': ' few minutes before the P',
'd96940b9d51c4d952208d3b5531904d6ccf6a349b3f42870a3cf45dd4c662a37': 'k." He waggled a ',
'db122e0b3472fbc14bb6e75953dada763259c41848000748f94f186c65d21982': 'lous ways, ',
'dcbcd9db76acbc3ee39df4f0330be7553f9b12ce7a52ad9e86f30bf4fda7217c': '============= A Place So Fo',
'e1d44a0860aba3ce3331cd7acbf704b35849f0882912cb80ee94266a49bbc712': 'remember." The smell of rejuve ',
'e2bdfb453b96fe420cda13ddcc2ab2faac4ae20604d184065222e61ce2849279': ' brought over',
'e2f6cc82e48fbcd5729bbfb8884df35683c7de860b8e0fee74b5f9a85c9385df': 'I\'d had your smarts at twenty-three," he said',
'e5d252b3cac8203111607d87b7a7f45eab1780cf0fcb28eb25e516bbdf057fac': 'tural relevance a',
'e61ef62844af68a95b2dbbd29ce7376796b3a421ae6047a023dfb6a1f5e0be6f': 'od point, Lisa. The offer we\'re making to ',
'e6aff3d486a217bb5b4896eb3791da4c4071233e419423ce155e57e2a13b6036': ' her hair. S',
'ea6473d754f717ef79f369e5c2b4250aecad02e9c46fe7a4e156cd1a38a07282': ' the revolut',
'eb0ddccbfc9438d8c371c11c01e8f74bc6abbfbe5afe4d9a91087691fbd3fb27': 'me I\'d long since forgotten but whose exuberan',
'eb2ce81d067cdcbb916168192f9d8660c330f510e0f598c51a422d2751f65d82': 'th him, the way he stuck with you',
'eb8567b2407172086e58811c2cf00fb89cdca5dddb90f87a29ee0e2e046e7520': ' too precisely, too tho',
'ec82c146c0492d34726ac3d268194454bc95fd81af1ac8ae2fcda450c323031c': 'ached for the safety bar, I felt that it',
'ed0a3fafd19895113102673dd69591d6390554fd45ba0ffb0d4859b3f5f00f2b': 'il\'s folks had taken over the run',
'edadb2154ce2087814496af4e27ac3fe095855cd06ec98255f618009852231a2': 'the Presidents, the Li',
'eeb4f76a92eda100ea7dbebd2c16136826b1614f3635f93488dafcda597af099': 'y visit this place for ',
'eed8ec078fa0b1c0051cb76d27f7d55fc609d3ba84d9f41147ac0aca041b9944': ' a coral-slow battle for a stool at the',
'eee4c0b921f8abc1b55e6d22ff6ed17d62f6f0c8f019f5ba19a38c90408baf9b': 'of voices f',
'f37ddb17c9c6be45fc3d3e2fc3bc47ecccbee8302161dc75a0835f0dcd7ef595': 'e-dumped in a few seconds. Debra sai',
'f48a625f4a3b3babdaec1543d10c165b599d3282a714e34a04574d993c8617fb': 'p in front of the gate, and were groanin',
'fa22849be95c77765c45fa88c6bd36fd07b2c04b3ae56b0e2a9b4d7771bba2ec': '," she said. "I mean, the worst possib',
'fb659864538aabf3ce94e24274a6357e873e25a1e69ae2ff4a8f0b5eaf38bef4': ' I\'m going to get Lil and her pe',
'fcdab3ffed0d6f1b9c0372f85f319fc57396d48b3af499d9bc38d79dec90d3ac': 'ming to the point. "We read you',
'fd78bede3034235947d68673b07f0f2d31ab105ebcf558f0fda1cd6e811e4231': 'red welts from the sting',
'fd9fefa2dd400378cb038df483572bd1b47f3ef4568436cef30cab231e592ef4': 'for a co',
'fde085f4b965f552af65d936e4918d309f8406d9c2e015c1ac242c25ac3a10df': ', and it wa',
'fe6166dbdb09203d04dd5f80980de8158eb8d9552a7b2eb6707d0d5f013cfea5': 'cense in summary: http://creati',
'fe7be52bb80bfba1ed469c358c7b13703d8a133960123869899af34c24e515c5': 'e people I\'d met when I was doi',
'ff7da981ed65400601558f7b4d65e7b70d8561b44f1b6af9bf8f8c87073fa8de': 'icced nervously as I watched my progress',
'ffc59b0732832dc3f9d077bd23a03dbdf3416236b64d065993a14487c0c9ea61': 'c research on stuff th'
}
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('cm2k-sorcery_13de8e6bf26e435fc43efaf46b488eae.quals.shallweplayaga.me', 12002)
sock.connect(server_address)
input = sock.recv(1024).strip().split('\n')[1]
print "File: ", input
answer = base64.b64encode(answers[input])
print "Answer: ", answer
sock.send(answer + '\n')
while True:
input = sock.recv(1024).strip()
if input.find('flag') != -1:
print "FLAG: ", input
exit(0)
print "File: ", input
answer = base64.b64encode(answers[input])
print "Answer: ", answer
sock.send(answer + '\n')
|
sinfocol/ctfs
|
writeups/2017/defcon-qualifiers/crackme-2000/sorcery.py
|
Python
|
gpl-3.0
| 20,475
|
[
"VisIt",
"exciting"
] |
08f5825df7e14f3f8892de1bc86f4c31d806dc740626d44a56567f29279a5ba1
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from functools import reduce
from pyscf import gto
from pyscf import scf
from pyscf import cc
from pyscf import ao2mo
from pyscf import fci
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = 2
mol.basis = '3-21g'
mol.build()
mf = scf.RHF(mol).run()
def tearDownModule():
global mol, mf
del mol, mf
def finger(a):
return numpy.dot(a.ravel(), numpy.cos(numpy.arange(a.size)))
class KnownValues(unittest.TestCase):
def test_ccsd(self):
mycc = cc.CCSD(mf)
ecc = mycc.kernel()[0]
norb = mf.mo_coeff.shape[1]
nelec = mol.nelec
h2e = ao2mo.restore(1, ao2mo.kernel(mf._eri, mf.mo_coeff), norb)
h1e = reduce(numpy.dot, (mf.mo_coeff.T, mf.get_hcore(), mf.mo_coeff))
eci, civec = fci.direct_spin0.kernel(h1e, h2e, norb, nelec)
dm1ref = fci.direct_spin0.make_rdm1(civec, norb, nelec)
eci = eci + mol.energy_nuc() - mf.e_tot
self.assertAlmostEqual(eci, ecc, 7)
l1, l2 = mycc.solve_lambda()
self.assertAlmostEqual(finger(l1), 0.0106196828089, 5)
dm1 = mycc.make_rdm1()
self.assertAlmostEqual(abs(dm1ref-dm1).max(), 0, 5)
if __name__ == "__main__":
print("Full Tests for 2e CCSD")
unittest.main()
|
gkc1000/pyscf
|
pyscf/cc/test/test_2e.py
|
Python
|
apache-2.0
| 2,031
|
[
"PySCF"
] |
af22d8f74c6474e3bbab4cd8c02e89150cd2af68fa5461b9228b3405c5d062d7
|
# # #
# Current implementation of the cru ts31 (ts32) delta downscaling procedure
#
# Author: Michael Lindgren (malindgren@alaska.edu)
# # #
import numpy as np
def write_gtiff( output_arr, template_meta, output_filename, compress=True ):
'''
DESCRIPTION:
------------
output a GeoTiff given a numpy ndarray, rasterio-style
metadata dictionary, and and output_filename.
If a multiband file is to be processed, the Longitude
dimension is expected to be the right-most.
--> dimensions should be (band, latitude, longitude)
ARGUMENTS:
----------
output_arr = [numpy.ndarray] with longitude as the right-most dimension
template_meta = [dict] rasterio-style raster meta dictionary. Typically
found in a template raster by: rasterio.open( fn ).meta
output_filename = [str] path to and name of the output GeoTiff to be
created. currently only 'GTiff' is supported.
compress = [bool] if True (default) LZW-compression is applied to the
output GeoTiff. If False, no compression is applied.
* this can also be added (along with many other gdal creation options)
to the template meta as a key value pair template_meta.update( compress='lzw' ).
See Rasterio documentation for more details. This is just a common one that is
RETURNS:
--------
string path to the new output_filename created
'''
import os
if 'transform' in template_meta.keys():
_ = template_meta.pop( 'transform' )
if not output_filename.endswith( '.tif' ):
UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
output_filename = os.path.splitext( output_filename )[0] + '.tif'
if output_arr.ndim == 2:
# add in a new dimension - can get you into trouble with very large rasters...
output_arr = output_arr[ np.newaxis, ... ]
elif output_arr.ndim < 2:
raise ValueError( 'output_arr must have at least 2 dimensions' )
nbands, nrows, ncols = output_arr.shape
if template_meta[ 'count' ] != nbands:
raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
if compress == True and 'compress' not in template_meta.keys():
template_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
for band in range( 1, nbands+1 ):
out.write( output_arr[ band-1, ... ], band )
return output_filename
def shiftgrid( lon0, datain, lonsin, start=True, cyclic=360.0 ):
import numpy as np
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def padded_bounds( rst, npixels, crs ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
return new_bounds
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', linear
'''
import numpy as np
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def generate_anomalies( df, meshgrid_tuple, lons_pcll, template_raster_fn, src_transform, src_crs, src_nodata, output_filename, *args, **kwargs ):
'''
run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
extent, resolution, origin (template_raster).
This function is intended to be used to run a pathos.multiprocessing Pool's map function
across a list of pre-computed arguments.
RETURNS:
[str] path to the output filename generated
'''
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
# update some meta configs
template_meta.update( crs={'init':'epsg:3338'} )
template_meta.update( compress='lzw' )
interp_arr = xyz_to_grid( np.array(df['lon'].tolist()), \
np.array(df['lat'].tolist()), \
np.array(df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' )
src_nodata = -9999.0 # nodata
interp_arr[ np.isnan( interp_arr ) ] = src_nodata
dat, lons = shiftgrid( 180., interp_arr, lons_pcll, start=False )
output_arr = np.empty_like( template_raster.read( 1 ) )
reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
dst_nodata=None, resampling=RESAMPLING.cubic_spline, num_threads=1, SOURCE_EXTRA=1000 )
# mask it with the internal mask in the template raster, where 0 is oob.
output_arr = np.ma.masked_where( template_raster.read_masks( 1 ) == 0, output_arr )
output_arr.fill_value = template_meta[ 'nodata' ]
output_arr = output_arr.filled()
return write_gtiff( output_arr, template_meta, output_filename, compress=True )
def fn_month_grouper( x ):
'''
take a filename and return the month element of the naming convention
'''
return os.path.splitext(os.path.basename(x))[0].split( '_' )[5]
def downscale_cru_historical( file_list, cru_cl20_arr, output_path, downscaling_operation ):
'''
take a list of cru_historical anomalies filenames, groupby month,
then downscale with the cru_cl20 climatology as a numpy 2d ndarray
that is also on the same grid as the anomalies files.
(intended to be the akcan 1km/2km extent).
operation can be one of 'mult', 'add', 'div' and represents the
downscaling operation to be use to scale the anomalies on top of the baseline.
this is based on how the anomalies were initially calculated.
RETURNS:
output path location of the new downscaled files.
'''
from functools import partial
def f( anomaly_fn, baseline_arr, output_path, downscaling_operation ):
def add( cru, anom ):
return cru + anom
def mult( cru, anom ):
return cru * anom
def div( cru, anom ):
# return cru / anom
# this one may not be useful, but the placeholder is here
return NotImplementedError
cru_ts31 = rasterio.open( anomaly_fn )
meta = cru_ts31.meta
meta.update( compress='lzw', crs={'init':'epsg:3338'} )
cru_ts31 = cru_ts31.read( 1 )
operation_switch = { 'add':add, 'mult':mult, 'div':div }
# this is hardwired stuff for this fairly hardwired script.
output_filename = os.path.basename( anomaly_fn ).replace( 'anom', 'downscaled' )
output_filename = os.path.join( output_path, output_filename )
# both files need to be masked here since we use a RIDICULOUS oob value...
# for both tas and cld, values less than -200 are out of the range of acceptable values and it
# grabs the -3.4... mask values. so lets mask using this
baseline_arr = np.ma.masked_where( baseline_arr < -200, baseline_arr )
cru_ts31 = np.ma.masked_where( cru_ts31 < -200, cru_ts31 )
output_arr = operation_switch[ downscaling_operation ]( baseline_arr, cru_ts31 )
output_arr[ np.isinf(output_arr) ] = meta[ 'nodata' ]
if 'transform' in meta.keys():
meta.pop( 'transform' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( output_arr, 1 )
return output_filename
partial_f = partial( f, baseline_arr=cru_cl20_arr, output_path=output_path, downscaling_operation=downscaling_operation )
cru_ts31 = file_list.apply( lambda fn: partial_f( anomaly_fn=fn ) )
return output_path
if __name__ == '__main__':
import rasterio, xray, os, glob, affine
from rasterio.warp import reproject, RESAMPLING
import geopandas as gpd
import pandas as pd
import numpy as np
from collections import OrderedDict
from shapely.geometry import Point
from pathos import multiprocessing as mp
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
parser.add_argument( "-hi", "--cru_ts31", action='store', dest='cru_ts31', type=str, help="path to historical tas/cld CRU TS3.1 input NetCDF file" )
parser.add_argument( "-ci", "--cl20_path", action='store', dest='cl20_path', type=str, help="path to historical CRU TS2.0 Climatology input directory in single-band GTiff Format" )
parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="path to ALFRESCO Formatted template raster to match outputs to." )
parser.add_argument( "-base", "--base_path", action='store', dest='base_path', type=str, help="string path to the folder to put the output files into" )
parser.add_argument( "-bt", "--year_begin", action='store', dest='year_begin', type=int, help="string in format YYYY of the beginning year in the series" )
parser.add_argument( "-et", "--year_end", action='store', dest='year_end', type=int, help="string in format YYYY of the ending year in the series" )
parser.add_argument( "-cbt", "--climatology_begin_time", nargs='?', const='196101', action='store', dest='climatology_begin', type=str, help="string in format YYYY of the beginning year of the climatology period" )
parser.add_argument( "-cet", "--climatology_end_time", nargs='?', const='199012', action='store', dest='climatology_end', type=str, help="string in format YYYY of the ending year of the climatology period" )
parser.add_argument( "-nc", "--ncores", nargs='?', const=2, action='store', dest='ncores', type=int, help="integer valueof number of cores to use. default:2" )
parser.add_argument( "-at", "--anomalies_calc_type", nargs='?', const='absolute', action='store', dest='anomalies_calc_type', type=str, help="string of 'proportional' or 'absolute' to inform of anomalies calculation type to perform." )
parser.add_argument( "-m", "--metric", nargs='?', const='metric', action='store', dest='metric', type=str, help="string of whatever the metric type is of the outputs to put in the filename." )
parser.add_argument( "-dso", "--downscaling_operation", action='store', dest='downscaling_operation', type=str, help="string of 'add', 'mult', 'div', which refers to the type or downscaling operation to use." )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string of the abbreviation used to identify the variable (i.e. cld)." )
# parse args
args = parser.parse_args()
# unpack args
ncores = args.ncores
base_path = args.base_path
cru_ts31 = args.cru_ts31
cl20_path = args.cl20_path
template_raster_fn = args.template_raster_fn
anomalies_calc_type = args.anomalies_calc_type
downscaling_operation = args.downscaling_operation
climatology_begin = args.climatology_begin
climatology_end = args.climatology_end
year_begin = args.year_begin
year_end = args.year_end
variable = args.variable
metric = args.metric
# make some output directories if they are not there already to dump
# our output files
anomalies_path = os.path.join( base_path, variable, 'anom' )
if not os.path.exists( anomalies_path ):
os.makedirs( anomalies_path )
downscaled_path = os.path.join( base_path, variable, 'downscaled' )
if not os.path.exists( downscaled_path ):
os.makedirs( downscaled_path )
# open with xray
cru_ts31 = xray.open_dataset( cru_ts31 )
# open template raster
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
template_meta.update( crs={'init':'epsg:3338'} )
# make a mask with values of 0=nodata and 1=data
template_raster_mask = template_raster.read_masks( 1 )
template_raster_mask[ template_raster_mask == 255 ] = 1
# calculate the anomalies
# this is temporary name change for the tmp (tas) data naming diff.
if variable == 'tas':
variable = 'tmp'
clim_ds = cru_ts31.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = clim_ds[ variable ].groupby( 'time.month' ).mean( 'time' )
if anomalies_calc_type == 'relative':
anomalies = cru_ts31[ variable ].groupby( 'time.month' ) / climatology
if anomalies_calc_type == 'absolute':
anomalies = cru_ts31[ variable ].groupby( 'time.month' ) - climatology
# reset the variable if tas
if variable == 'tmp':
variable = 'tas'
# rotate the anomalies to pacific centered latlong -- this is already in the greenwich latlong
dat_pcll, lons_pcll = shiftgrid( 0., anomalies, anomalies.lon.data )
# # generate an expanded extent (from the template_raster) to interpolate across
template_raster = rasterio.open( template_raster_fn )
# output_resolution = (1000.0, 1000.0) # hardwired, but we are building this for IEM which requires 1km
template_meta = template_raster.meta
# # interpolate to a new grid
# get longitudes and latitudes using meshgrid
lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ] # mesh the lons/lats
# convert into GeoDataFrame and drop all the NaNs
df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in dat_pcll ]
xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
# meshgrid_tuple = np.meshgrid( lons_pcll, anomalies.lat.data )
# argument setup
src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
src_crs = {'init':'epsg:4326'}
src_nodata = -9999.0
# output_filenames setup
years = np.arange( int(year_begin), int(year_end)+1, 1 ).astype( str ).tolist()
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
month_year = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( anomalies_path, '_'.join([ variable,metric,'cru_ts323_anom',month,year])+'.tif' )
for month, year in month_year ]
# build a list of keyword args to pass to the pool of workers.
args_list = [ {'df':df, 'meshgrid_tuple':(xi, yi), 'lons_pcll':lons_pcll, \
'template_raster_fn':template_raster_fn, 'src_transform':src_transform, \
'src_crs':src_crs, 'src_nodata':src_nodata, 'output_filename':fn } \
for df, fn in zip( df_list, output_filenames ) ]
# interpolate / reproject / resample the anomalies to match template_raster_fn
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: generate_anomalies( **args ), args_list )
pool.close()
# To Complete the CRU TS3.1 Downscaling we need the following:
# read in the pre-processed CL2.0 Cloud Climatology
l = sorted( glob.glob( os.path.join( cl20_path, '*.tif' ) ) ) # this could catch you.
cl20_dict = { month:rasterio.open( fn ).read( 1 ) for month, fn in zip( months, l ) }
# group the data by months
out = pd.Series( out )
out_months = out.apply( fn_month_grouper )
months_grouped = out.groupby( out_months )
# unpack groups for parallelization and make a list of tuples of arguments to pass to the downscale function
mg = [(i,j) for i,j in months_grouped ]
args_list = [ ( i[1], cl20_dict[i[0]], downscaled_path, downscaling_operation ) for i in mg ]
# downscale / write to disk
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: downscale_cru_historical( *args ), args_list )
pool.close()
# # # # # HOW TO RUN THE APPLICATION # # # # # # #
# # input args -- argparse it
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_ts31'
# cru_ts31 = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.cld.dat.nc' # 'cru_ts_3_10.1901.2009.tmp.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'relative' # 'absolute'
# downscaling_operation = 'mult' # 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2009'
# variable = 'cld' # 'tas'
# metric = 'pct' # 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# # # # #TAS# # # # # # #
# # input args -- argparse it
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '5'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_ts31'
# cru_ts31 = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.tmp.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/tas/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'absolute'
# downscaling_operation = 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2009'
# variable = 'tas'
# metric = 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# # # CRU TS 3.23 -- update:
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323'
# cru_ts31 = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.cld.dat.nc' # 'cru_ts_3_10.1901.2009.tmp.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'relative' # 'absolute'
# downscaling_operation = 'mult' # 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2014'
# variable = 'cld' # 'tas'
# metric = 'pct' # 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
# # TAS
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323'
# cru_ts31 = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.tmp.dat.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'absolute'
# downscaling_operation = 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2014'
# variable = tas'
# metric = 'C'
# args_tuples = [ ('hi', cru_ts31), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -- tas_cld_cru_ts31_to_cl20_downscaling.py ' + args )
|
ua-snap/downscale
|
snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/tas_cld_cru_ts31_to_cl20_downscaling.py
|
Python
|
mit
| 23,577
|
[
"NetCDF"
] |
0707cb53dda80aec439d85642f4de50254d07b8fe8e3fd6e4a902b044f181893
|
#!/usr/bin/env python
import numpy as np
import tex2elas
import CijUtil
def read_texture_file(filename):
"""Read data from a theia texture file"""
# Deal with compressed files.
import os
if (os.path.splitext(filename)[1] == '.gz'):
import gzip
f = gzip.open(filename, 'rb')
else:
f = open(filename, 'r')
# Stuff everything into a dict and a list
# for now. Sort this out later (we will probably
# want to have objects at some point
header_data = {}
particles = []
header_lines = 5
particle_header_lines = 9
for line in f:
if header_lines == 5:
header_data['theia_lun'] = int(line)
header_lines = header_lines - 1
elif header_lines == 4:
header_data['npartsallo'] = int(line)
header_lines = header_lines - 1
elif header_lines == 3:
header_data['npartsused'] = int(line)
header_lines = header_lines - 1
elif header_lines == 2:
header_data['n_expected_particles'] = int(line)
header_lines = header_lines - 1
elif header_lines == 1:
header_data['nseen_particles'] = int(line)
header_lines = header_lines - 1
elif header_lines == 0:
if particle_header_lines == 9:
this_particle = {}
this_particle['process_id'] = int(line)
particle_header_lines = particle_header_lines - 1
elif particle_header_lines == 8:
this_particle['particle_id'] = int(line)
particle_header_lines = particle_header_lines - 1
elif particle_header_lines == 7:
this_particle['old_particle_id'] = int(line)
particle_header_lines = particle_header_lines - 1
elif particle_header_lines == 6:
this_particle['old_process_id'] = int(line)
particle_header_lines = particle_header_lines - 1
elif particle_header_lines == 5:
this_particle['particle_class'] = line.strip()
particle_header_lines = particle_header_lines - 1
elif particle_header_lines == 4:
this_particle['particle_position'] = np.array(
[line[0:12], line[12:24], line[24:36]])
particle_header_lines = particle_header_lines - 1
elif particle_header_lines == 3:
this_particle['idata_count'] = int(line)
if this_particle['idata_count'] > 0:
particle_header_lines = particle_header_lines - 1
else:
particle_header_lines = particle_header_lines - 2
elif particle_header_lines == 2:
this_particle['particle_idata'] = np.array(
[line.rstrip('\r\n')[i:i+12] for i in xrange(0, len(line.rstrip('\r\n')), 12)]
)
particle_header_lines = particle_header_lines - 1
elif particle_header_lines == 1:
this_particle['rdata_count'] = int(line)
if this_particle['rdata_count'] > 0:
particle_header_lines = particle_header_lines - 1
else:
particles.append(this_particle)
particle_header_lines = 9
elif particle_header_lines == 0:
this_particle['particle_rdata'] = np.array(
[line.rstrip('\r\n')[i:i+14] for i in xrange(0, len(line.rstrip('\r\n')), 14)]
)
particles.append(this_particle)
particle_header_lines = 9
f.close()
return header_data, particles
class drex_particle(object):
def __init__(self, position, pclass, rdata, idata):
self.position = position
self.pclas = pclass
self.num_grains = idata[0]
self._unpack_drex_rdata(rdata, self.num_grains)
def olivine_cij(self, scheme='Voigt'):
# Single crystal olivine elasticity from DRex - should
# check that this is up to date.
ol_cij_single = np.zeros((6,6))
ol_cij_single[0,0] = 320.71
ol_cij_single[1,1] = 197.25
ol_cij_single[2,2] = 234.32
ol_cij_single[0,1] = 69.84
ol_cij_single[1,0] = ol_cij_single[0,1]
ol_cij_single[2,0] = 71.22
ol_cij_single[0,2] = ol_cij_single[2,0]
ol_cij_single[1,2] = 74.80
ol_cij_single[2,1] = ol_cij_single[1,2]
ol_cij_single[3,3] = 63.77
ol_cij_single[4,4] = 77.67
ol_cij_single[5,5] = 78.36
ol_cij = tex2elas.calc_cij(ol_cij_single, self.g_ol,
self.volfrac_ol, scheme=scheme)
return ol_cij
def enstatite_cij(self, scheme='Voigt'):
# Single crystal enstatite elasticity from DRex - should
# check that this is up to date.
en_cij_single = np.zeros((6,6))
en_cij_single[0,0] = 236.9
en_cij_single[1,1] = 180.5
en_cij_single[2,2] = 230.4
en_cij_single[0,1] = 79.6
en_cij_single[1,0] = en_cij_single[0,1]
en_cij_single[2,0] = 63.2
en_cij_single[0,2] = en_cij_single[2,0]
en_cij_single[1,2] = 56.8
en_cij_single[2,1] = en_cij_single[1,2]
en_cij_single[3,3] = 84.3
en_cij_single[4,4] = 79.4
en_cij_single[5,5] = 80.1
en_cij = tex2elas.calc_cij(en_cij_single, self.g_en,
self.volfrac_en, scheme=scheme)
return en_cij
def bulk_cij(self, scheme='Voigt'):
if scheme=='Voigt':
cij = self.fraction_olivine*self.olivine_cij(scheme='Voigt') + (1.0 -
self.fraction_olivine)*self.enstatite_cij(scheme='Voigt')
elif scheme == 'Reuss':
cij = self.fraction_olivine*self.olivine_cij(scheme='Reuss') + (1.0 -
self.fraction_olivine)*self.enstatite_cij(scheme='Reuss')
elif scheme == 'Hill':
cij = (self.bulk_cij(scheme='Voigt') + self.bulk_cij(scheme='Reuss'))/2.0
else:
raise ValueError('Scheme argument must be one of Voigt, Reuss or Hill')
return cij
def bulk_anisotropy(self, scheme='Voigt'):
"""Calculate and return the universal anisotropy index
"""
Ua, Ua_sigma = CijUtil.uAniso(self.bulk_cij(scheme=scheme))
# We don't use the error (Ua_sigma)
# as we don't know the error on the
# single crystal elasticity.
return Ua
def _unpack_drex_rdata(self, data, ngr):
"""Data is a 1D numpy array. This function pulls out the usefull info"""
self.g_ol = np.copy(data[0:3*3*ngr].reshape((3,3,ngr)).T)
self.g_en = np.copy(data[3*3*ngr:2*3*3*ngr].reshape((3,3,ngr)).T)
self.volfrac_ol = np.copy(data[2*3*3*ngr:2*3*3*ngr+ngr])
self.volfrac_en = np.copy(data[2*3*3*ngr+ngr:2*3*3*ngr+2*ngr])
self.fraction_olivine = np.copy(data[3*3*ngr*2+ngr*4+10])
def process_drex_particles(particles):
drex_particles = []
for particle in particles:
if particle['particle_class'] == 'drex':
try:
drex_particles.append(drex_particle(
particle['particle_position'].astype(np.float),
particle['particle_class'], particle['particle_rdata'].astype(np.float),
particle['particle_idata'].astype(np.int32)))
except:
print particle['particle_rdata']
print "Skipped this particle"
raise
return drex_particles
def assert_is_rotmat(rotmat):
"""Checks that a numpy matrix is a rotation matrix
Rotation matricies must be of shape (3,3), have a
determinent of 1 and have the property of the
inverse being equal to the transpose. We check
all three and raise an AssertionError if this
is not the case."""
assert rotmat.shape == (3,3)
np.testing.assert_array_almost_equal(np.linalg.det(rotmat), 1.0)
np.testing.assert_array_almost_equal(rotmat.transpose(), np.linalg.inv(rotmat))
def plot_particle_list(particle_list):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = []
ys = []
zs = []
ua = []
for particle in particle_list:
xs.append(particle.position[0])
ys.append(particle.position[1])
zs.append(particle.position[2])
ua.append(particle.bulk_anisotropy(scheme='Hill'))
ax.scatter(xs, ys, zs, c=ua)
plt.show()
def print_particle_list(particle_list):
for particle in particle_list:
print particle.position[0], particle.position[1], particle.position[2], particle.bulk_anisotropy(scheme='Hill')
if __name__ == '__main__':
import sys
import argparse
parser = argparse.ArgumentParser(description=
'Read data from Theia particle data files, convert it to elasticity, and print to STDOUT')
parser.add_argument('theiafile', help='Theia data file(s)', nargs='+')
parser.add_argument('-s', '--scheme', help='Elasticity averaging scheme',
choices=['Hill', 'Voigt', 'Reuss'], default='Hill')
parser.add_argument('-p', '--plot', action='store_true',
help='Plot the universal anisotropy index distribution in 3D')
#parser.add_argument('-o', '--outfile', help='Ouput plot to a file')
args = parser.parse_args()
# Read particles out of all files, concating output
all_particles = []
for this_file in args.theiafile:
sys.stderr.write("Reading data from {0}\n".format(this_file))
header_data, particles = read_texture_file(this_file)
all_particles.extend(particles)
sys.stderr.write("{0} particles read\n".format(len(all_particles)))
drex_particles = process_drex_particles(all_particles)
sys.stderr.write("{0} DRex particles processed\n".format(len(drex_particles)))
if args.plot:
plot_particle_list(drex_particles)
else:
print_particle_list(drex_particles)
|
andreww/theia_tools
|
theiaptfile.py
|
Python
|
mit
| 10,323
|
[
"CRYSTAL"
] |
a5c9c73a41082e0d7529aeb8f88abe416312552468a0065189a6fbd58ca9f233
|
'''
Import Guardian articles from JSON
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import json
import re
from urllib import unquote
from elasticsearch import helpers
sys.path.insert(0,'../')
from models.article import Article
from controllers.sentimentExtractor import SentimentExtractor
from elasticsearch import Elasticsearch, ConnectionTimeout
from elasticsearch_dsl.connections import connections
import random
connections.create_connection(hosts=['http://controcurator.org:80/ess'])
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
from controllers.guardian_scraper import GuardianScraper
gs = GuardianScraper()
# first we make an index of all social media items and the parents they belong to
# then for each parent we get the social media items of that parent and put them as comment in the article
articlelist = ['https://www.theguardian.com/football/live/2017/apr/11/borussia-dortmund-v-monaco-champions-league-quarter-final-first-leg-live',
'https://www.theguardian.com/business/live/2017/mar/22/markets-tumble-as-impatience-mounts-over-trumps-policies-business-live',
'https://www.theguardian.com/us-news/2017/apr/11/alabama-church-police-force-senate-vote',
'https://www.theguardian.com/uk-news/2017/mar/17/girl-aged-11-to-become-britains-youngest-mother',
'https://www.theguardian.com/music/2017/apr/12/john-geils-jr-founder-of-the-j-geils-band-dies-aged-71',
'https://www.theguardian.com/sport/2017/apr/11/sam-warburton-lions-doubt-knee-injury-six-weeks-rugby-union',
'https://www.theguardian.com/uk-news/2017/apr/11/homeless-man-guilty-of-murdering-hotel-worker',
'https://www.theguardian.com/society/2017/apr/12/two-in-five-gps-in-south-west-of-england-plan-to-quit-survey-finds',
'https://www.theguardian.com/sport/2017/mar/22/usa-japan-world-baseball-classic-semi-final-report',
'https://www.theguardian.com/business/2017/apr/12/tesco-profits-1bn-growth-supermarket',
'https://www.theguardian.com/commentisfree/2017/apr/11/donald-trump-russia-rex-tillersons-visit-syria',
'https://www.theguardian.com/uk-news/2017/mar/21/twins-found-white-cliffs-dover-parents-ashes-muriel-bernard-burgess-scott-enion',
'https://www.theguardian.com/business/2017/apr/11/developing-countries-demands-for-better-life-must-be-met-says-world-bank-head',
'https://www.theguardian.com/football/2017/apr/11/barcelona-neymar-clasico-ban',
'https://www.theguardian.com/politics/2017/apr/12/devon-and-cornwall-pcc-expenses-inquiry-prosecutors',
'https://www.theguardian.com/technology/2017/apr/11/gordon-ramsay-father-in-law-admits-hacking-company-computers',
'https://www.theguardian.com/business/2017/apr/11/trump-china-summit-xi-jinping-business',
'https://www.theguardian.com/science/2017/mar/22/face-medieval-cambridge-man-emerges-700-years-after-death',
'https://www.theguardian.com/us-news/2017/mar/22/donald-trump-president-impeached-liberals-history-process',
'https://www.theguardian.com/books/2017/apr/11/x-men-illustrator-alleged-anti-christian-messages-marvel-ardian-syaf',
'https://www.theguardian.com/business/2017/apr/12/burger-king-ok-google-commercial',
'https://www.theguardian.com/business/2017/apr/12/edf-customers-price-rise-electricity-gas-energy',
'https://www.theguardian.com/football/2017/apr/11/tony-adams-vows-to-give-granada-players-a-kick-up-the-arse',
'https://www.theguardian.com/football/2017/mar/22/football-transfer-rumours-jermain-defoe-back-to-west-ham',
'https://www.theguardian.com/global-development/2017/apr/11/india-acts-to-help-acid-attack-victims',
'https://www.theguardian.com/tv-and-radio/2017/mar/22/in-your-face-david-attenborough-grayson-perry-takes-home-rts-awards',
'https://www.theguardian.com/world/2017/mar/18/mafia-godfather-banned-bishop-sicily-michele-pennisi',
'https://www.theguardian.com/commentisfree/2017/apr/11/france-left-europe-jean-luc-melenchon-presidential-election',
'https://www.theguardian.com/commentisfree/2017/apr/11/sean-spicers-hitler-holocaust-speak-volumes',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-shock-team-bus-explosions',
'https://www.theguardian.com/football/2017/mar/22/which-football-manager-has-been-sacked-by-one-club-the-most-times',
'https://www.theguardian.com/sport/2017/apr/11/pennsylvania-woman-jail-threats-youth-football-league-officials',
'https://www.theguardian.com/sport/2017/apr/12/afl-players-expecting-bumper-new-pay-offer-as-end-to-dispute-nears',
'https://www.theguardian.com/sport/2017/apr/12/mark-cavendish-diagnosed-epstein-barr-virus-cycling',
'https://www.theguardian.com/sport/blog/2017/mar/22/talking-horses-best-wednesday-bets-for-warwick-and-newcastle',
'https://www.theguardian.com/uk-news/2017/apr/11/boris-johnson-full-support-failure-secure-sanctions-syria-russia',
'https://www.theguardian.com/world/2017/mar/22/brussels-unveil-terror-victims-memorial-one-year-after-attacks',
'https://www.theguardian.com/business/2017/mar/22/nervous-markets-take-fright-at-prospect-of-trump-failing-to-deliver',
'https://www.theguardian.com/commentisfree/2016/dec/21/i-lost-my-mum-seven-weeks-ago-our-readers-on-coping-with-grief-at-christmas',
'https://www.theguardian.com/fashion/2017/mar/22/fiorucci-why-the-disco-friendly-label-is-perfect-for-2017',
'https://www.theguardian.com/law/2017/apr/12/judge-sacked-over-online-posts-calling-his-critics-donkeys',
'https://www.theguardian.com/society/2017/apr/11/national-social-care-service-centralised-nhs',
'https://www.theguardian.com/sport/2017/apr/12/gb-team-pursuit-world-track-cycling-championships-bronze',
'https://www.theguardian.com/sport/2017/mar/17/toronto-london-broncos-challenge-cup-hull-fc-widnes-leeds-wakefield-super-league',
'https://www.theguardian.com/sport/2017/mar/22/nfl-game-speed-changes-officiating-fewer-commercials-roger-goodell',
'https://www.theguardian.com/sport/2017/mar/22/taumalolo-becomes-one-of-richest-players-in-rugby-league-with-10m-cowboys-deal',
'https://www.theguardian.com/tv-and-radio/2017/mar/22/n-word-taboo-tv-carmichael-show-atlanta-insecure-language',
'https://www.theguardian.com/us-news/2017/mar/22/fbi-muslim-employees-discrimination-religion-middle-east-travel',
'https://www.theguardian.com/us-news/2017/mar/22/zapier-pay-employees-move-silicon-valley-startup',
'https://www.theguardian.com/world/2017/mar/22/gay-clergyman-jeffrey-johns-turned-down-welsh-bishop-twice-before-claims',
'https://www.theguardian.com/world/2017/mar/22/israel-whisky-gin-bottles-first-world-war',
'https://www.theguardian.com/world/2017/mar/23/apple-paid-no-tax-in-new-zealand-for-at-least-a-decade-reports-say',
'https://www.theguardian.com/books/2017/mar/22/comics-chavez-redline-transformers-v-gi-joe',
'https://www.theguardian.com/business/2017/apr/11/uk-inflation-rate-stays-three-year-high',
'https://www.theguardian.com/commentisfree/2017/apr/12/charlie-gard-legal-aid',
'https://www.theguardian.com/commentisfree/2017/mar/22/rights-gig-economy-self-employed-worker',
'https://www.theguardian.com/lifeandstyle/2017/mar/22/using-the-pill-can-protect-women-from-certain-cancers-for-up-to-30-years',
'https://www.theguardian.com/music/2017/apr/11/michael-buble-wife-says-son-noah-is-recovering-from-cancer',
'https://www.theguardian.com/society/2017/apr/11/are-you-a-scout-or-scout-leader-tell-us-your-story',
'https://www.theguardian.com/society/2017/apr/11/bullying-and-violence-grip-out-of-control-guys-marsh-jail-dorset',
'https://www.theguardian.com/sport/2017/apr/11/boston-celtics-no1-seed-nba-eastern-conference-cleveland-cavaliers',
'https://www.theguardian.com/stage/2017/mar/22/trisha-brown-obituary',
'https://www.theguardian.com/us-news/2017/apr/11/us-universal-healthcare-single-payer-rallies',
'https://www.theguardian.com/us-news/2017/apr/12/manchester-by-the-sea-inspired-couple-kill-son-house-fire',
'https://www.theguardian.com/us-news/2017/mar/17/hillary-clinton-ready-to-return-politics',
'https://www.theguardian.com/us-news/2017/mar/22/us-border-agent-sexually-assaults-teenage-sisters-texas',
'https://www.theguardian.com/world/2017/apr/11/hundreds-of-refugees-missing-after-dunkirk-camp-fire',
'https://www.theguardian.com/world/2017/mar/22/unicef-condemns-sale-cambodian-breast-milk-us-mothers-firm-ambrosia-labs']
query = {
"query": {
"bool": {
"must_not": [
{
"term": {
"source": "vaccination"
}
},
{
"constant_score": {
"filter": {
"missing": {
"field": "parent.url"
}
}
}
},
{
"constant_score": {
"filter": {
"missing": {
"field": "source"
}
}
}
}
]
}
},
"from": 10000,
"size": 10000
}
query = {
"query": {
"bool": {
"must": [
{
"term": {
"parent.url": "controcuratorguardian"
}
}
],
"must_not": [],
"should": []
}
},
"from": 0,
"size": 10000
}
downloaded = {}
response = es.search(index="crowdynews", body=query)
data = response['hits']['hits']
actions = []
for socmed in data:
url = unquote(re.sub('\/v1\/(.*[a-z])\/controcuratorguardian\?q=','',str(socmed['_source']['parent']['url']),))
if url not in articlelist:
print "NOT INTERESTING"#, NEXT"
#continue
print url
query = {
"query": {
"constant_score": {
"filter": {
"term": {
"url": url
}
}
}
},
"from": 0,
"size": 1
}
response = es.search(index="controcurator", doc_type="article", body=query)
if len(response['hits']['hits']) == 0:
print "ARTICLE NOT FOUND"
if url not in downloaded:
guardianresult = gs.retrieveArticle(url,True)[0]
downloaded[url] = guardianresult
print "DOWNLOADED"
else:
guardianresult = downloaded[url]
print "FOUND IN CACHE"
if not guardianresult:
print "ARTICLE HAS NO CONTENT, NEXT"
continue
body = guardianresult['response']['content']['blocks']['body'][0]
id = guardianresult['response']['content']['blocks']['body'][0]['id']
url = guardianresult['response']['content']['webUrl']
dtype = guardianresult['response']['content']['type']
if dtype <> 'article':
print "NOT AN ARTICLE, NEXT"
continue
# try to find thumbnail
# thumb = data['response']['content']['blocks']['body'][0]['elements']
webtitle = guardianresult['response']['content']['webTitle'].encode('UTF-8').split(' | ')[0]
section = guardianresult['response']['content']['sectionId']
pubdate = guardianresult['response']['content']['webPublicationDate']
comments = guardianresult['response']['content']['blocks']['comments']
try:
print len(comments)
except TypeError:
comments = {}
if len(comments) < 10:
print "TOO FEW COMMENTS",len(comments)
continue
if len(comments) > 1000:
print "MANY COMMENTS",len(comments)
#continue
html = guardianresult['response']['content']['blocks']['body'][0]['bodyHtml'].decode('ascii', 'ignore')
paragraphs = html.split('</p>')
stripHtml = lambda text: re.compile('(?!<p>|<br>|<br />)<.*?>').sub('', text)
text = u'</p>'.join([stripHtml(p) for p in paragraphs]) + '</p>'
article = Article(meta={'id': id})
print id
article.url = url
article.type = "article"
article.source = "crowdynews"
article.published = pubdate
article.document = {"title": webtitle, 'text': text}
newcomments = []
for comment in comments:
if comment['reply-to'] == "":
comment['sentiment'] = SentimentExtractor.getSentiment(comment['text'])
newcomments.append(comment)
print "COMMENT SAVED"
article.comments = newcomments
article.features = {"controversy": {"random": random.random()},"socmed":len(newcomments)}
try:
article.save(index='controcurator')
print "SAVED ARTICLE"
except ConnectionTimeout:
es = Elasticsearch(['http://controcurator.org/ess/'],port=80)
article.save(index='controcurator')
print "SAVED ARTICLE"
# reload article from database
query = {
"query": {
"constant_score": {
"filter": {
"term": {
"url": url
}
}
}
},
"from": 0,
"size": 1
}
response = es.search(index="controcurator", doc_type="article", body=query)
if len(response['hits']['hits']) == 0:
print "ERROR: ARTICLE WAS NOT DOWNLOADED, NEXT"
continue
article = response['hits']['hits'][0]
print "ARTICLE FOUND IN DB"
if 'comments' not in article['_source']:
print "ARTICLE HAS NO COMMENTS"
article['_source']['comments'] = []
comment_ids = [comment['id'] for comment in article['_source']['comments']]
if socmed['_source']['id'] in comment_ids:
print "EXISTING SOCMED, NEXT"
continue
comment = {
"author-id": socmed['_source']['author']['userid'],
"author": socmed['_source']['author']['name'],
"type" : socmed['_source']['service'],
"timestamp": socmed['_source']['date'],
"reply-to": "",
"id": socmed['_source']['id'],
}
if 'raw_text' in socmed['_source']:
comment["text"] = socmed['_source']['raw_text']
else:
comment["text"] = socmed['_source']['text']
comment['sentiment'] = SentimentExtractor.getSentiment(comment['text'])
article['_source']['comments'].append(comment)
Article.get(id=article['_id']).update(comments=article['_source']['comments'])
print 'COMPLETED',article['_id']
|
ControCurator/controcurator
|
cronjobs/importSocMed.py
|
Python
|
mit
| 13,308
|
[
"VisIt"
] |
58254bc16cc4e37c6ec800e4c887224c35b255cfe3811c19a0cb87ae482794cf
|
#!/bin/env python3
import sys
import os
if len(sys.argv) < 2:
print('')
print('Usage: %s POSCAR gjf_file1 gjf_file2 ...' % sys.argv[0].split('/')[-1])
print('')
exit(1)
print('############### Replacing images with gvfile ###############')
print('')
os.system('gjf2vas.py %s > /dev/null' % (' '.join(sys.argv[1:])))
folder_id = 1
for gjf_file in sys.argv[2:]:
print('%28s → %02d' % (gjf_file, folder_id))
vasp_file = '%s.vasp' % gjf_file[:-4]
os.system('mv %s %02d/POSCAR' % (vasp_file, folder_id))
folder_id += 1
print('')
print(' ############### DONE ###############\n')
|
Mabinogiysk/VASP-script
|
nebgjf2vas.py
|
Python
|
gpl-2.0
| 622
|
[
"VASP"
] |
7e33c5e17fde5b0e2ff92f542a9d741d3e9c41b16ecd40942c7dc7aec09477e2
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.deprecation import deprecated
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph, NearestNeighbors
from ..manifold import spectral_embedding
from ._kmeans import k_means
def discretize(
vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components),
)
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError("SVD did not converge")
return labels
def spectral_clustering(
affinity,
*,
n_clusters=8,
n_components=None,
eigen_solver=None,
random_state=None,
n_init=10,
eigen_tol=0.0,
assign_labels="kmeans",
verbose=False,
):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the Laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ("kmeans", "discretize"):
raise ValueError(
"The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given" % assign_labels
)
if isinstance(affinity, np.matrix):
raise TypeError(
"spectral_clustering does not support passing in affinity as an "
"np.matrix. Please convert to a numpy array with np.asarray. For "
"more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html", # noqa
)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
# We now obtain the real valued solution matrix to the
# relaxed Ncut problem, solving the eigenvalue problem
# L_sym x = lambda x and recovering u = D^-1/2 x.
# The first eigenvector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = spectral_embedding(
affinity,
n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol,
drop_first=False,
)
if verbose:
print(f"Computing label assignment using {assign_labels}")
if assign_labels == "kmeans":
_, labels, _ = k_means(
maps, n_clusters, random_state=random_state, n_init=n_init, verbose=verbose
)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(ClusterMixin, BaseEstimator):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex, or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster, such as when clusters are
nested circles on the 2D plane.
If the affinity matrix is the adjacency matrix of a graph, this method
can be used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
a kernel function such the Gaussian (aka RBF) kernel with Euclidean
distance ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, a user-provided affinity matrix can be specified by
setting ``affinity='precomputed'``.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
n_clusters : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : str or callable, default='rbf'
How to construct the affinity matrix.
- 'nearest_neighbors': construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf': construct the affinity matrix using a radial basis function
(RBF) kernel.
- 'precomputed': interpret ``X`` as a precomputed affinity matrix,
where larger values indicate greater similarity between instances.
- 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph
of precomputed distances, and construct a binary affinity matrix
from the ``n_neighbors`` nearest neighbors of each instance.
- one of the kernels supported by
:func:`~sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : int, default=10
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when ``eigen_solver='arpack'``.
assign_labels : {'kmeans', 'discretize'}, default='kmeans'
The strategy for assigning labels in the embedding space. There are two
ways to assign labels after the Laplacian embedding. k-means is a
popular choice, but it can be sensitive to initialization.
Discretization is another approach which is less sensitive to random
initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict of str to any, default=None
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, default=None
The number of parallel jobs to run when `affinity='nearest_neighbors'`
or `affinity='precomputed_nearest_neighbors'`. The neighbors search
will be done in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Attributes
----------
affinity_matrix_ : array-like of shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only after calling
``fit``.
labels_ : ndarray of shape (n_samples,)
Labels of each point
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.cluster import SpectralClustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralClustering(n_clusters=2,
... assign_labels='discretize',
... random_state=0).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering
SpectralClustering(assign_labels='discretize', n_clusters=2,
random_state=0)
Notes
-----
A distance matrix for which 0 indicates identical elements and high values
indicate very dissimilar elements can be transformed into an affinity /
similarity matrix that is well-suited for the algorithm by
applying the Gaussian (aka RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
An alternative is to take a symmetric version of the k-nearest neighbors
connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(
self,
n_clusters=8,
*,
eigen_solver=None,
n_components=None,
random_state=None,
n_init=10,
gamma=1.0,
affinity="rbf",
n_neighbors=10,
eigen_tol=0.0,
assign_labels="kmeans",
degree=3,
coef0=1,
kernel_params=None,
n_jobs=None,
verbose=False,
):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.n_components = n_components
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = self._validate_data(
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
ensure_min_samples=2,
)
allow_squared = self.affinity in [
"precomputed",
"precomputed_nearest_neighbors",
]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn(
"The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``."
)
if self.affinity == "nearest_neighbors":
connectivity = kneighbors_graph(
X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs
)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed":
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params["gamma"] = self.gamma
params["degree"] = self.degree
params["coef0"] = self.coef0
self.affinity_matrix_ = pairwise_kernels(
X, metric=self.affinity, filter_params=True, **params
)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(
self.affinity_matrix_,
n_clusters=self.n_clusters,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels,
verbose=self.verbose,
)
return self
def fit_predict(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix,
and return cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
def _more_tags(self):
return {
"pairwise": self.affinity
in ["precomputed", "precomputed_nearest_neighbors"]
}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `_pairwise` was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26)."
)
@property
def _pairwise(self):
return self.affinity in ["precomputed", "precomputed_nearest_neighbors"]
|
huzq/scikit-learn
|
sklearn/cluster/_spectral.py
|
Python
|
bsd-3-clause
| 24,295
|
[
"Brian",
"Gaussian"
] |
5a4ad881c717f3db34b2b4abb57930d67e3707447b4cadb2861a3627fbddfbda
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import logging
import sys
import warnings
from mnemonic import Mnemonic
from . import MINIMUM_FIRMWARE_VERSION, exceptions, messages, tools
if sys.version_info.major < 3:
raise Exception("Trezorlib does not support Python 2 anymore.")
LOG = logging.getLogger(__name__)
VENDORS = ("bitcointrezor.com", "trezor.io")
MAX_PASSPHRASE_LENGTH = 50
DEPRECATION_ERROR = """
Incompatible Trezor library detected.
(Original error: {})
""".strip()
OUTDATED_FIRMWARE_ERROR = """
Your Trezor firmware is out of date. Update it with the following command:
trezorctl firmware-update
Or visit https://wallet.trezor.io/
""".strip()
def get_buttonrequest_value(code):
# Converts integer code to its string representation of ButtonRequestType
return [
k
for k in dir(messages.ButtonRequestType)
if getattr(messages.ButtonRequestType, k) == code
][0]
def get_default_client(path=None, ui=None, **kwargs):
"""Get a client for a connected Trezor device.
Returns a TrezorClient instance with minimum fuss.
If no path is specified, finds first connected Trezor. Otherwise performs
a prefix-search for the specified device. If no UI is supplied, instantiates
the default CLI UI.
"""
from .transport import get_transport
from .ui import ClickUI
transport = get_transport(path, prefix_search=True)
if ui is None:
ui = ClickUI()
return TrezorClient(transport, ui, **kwargs)
class TrezorClient:
"""Trezor client, a connection to a Trezor device.
This class allows you to manage connection state, send and receive protobuf
messages, handle user interactions, and perform some generic tasks
(send a cancel message, initialize or clear a session, ping the device).
You have to provide a transport, i.e., a raw connection to the device. You can use
`trezorlib.transport.get_transport` to find one.
You have to provide an UI implementation for the three kinds of interaction:
- button request (notify the user that their interaction is needed)
- PIN request (on T1, ask the user to input numbers for a PIN matrix)
- passphrase request (ask the user to enter a passphrase)
See `trezorlib.ui` for details.
You can supply a `state` you saved in the previous session. If you do,
the user might not need to enter their passphrase again.
"""
def __init__(self, transport, ui=None, state=None):
LOG.info("creating client instance for device: {}".format(transport.get_path()))
self.transport = transport
self.ui = ui
self.state = state
if ui is None:
warnings.warn("UI class not supplied. This will probably crash soon.")
self.session_counter = 0
self.init_device()
def open(self):
if self.session_counter == 0:
self.transport.begin_session()
self.session_counter += 1
def close(self):
if self.session_counter == 1:
self.transport.end_session()
self.session_counter -= 1
def cancel(self):
self._raw_write(messages.Cancel())
def call_raw(self, msg):
__tracebackhide__ = True # for pytest # pylint: disable=W0612
self._raw_write(msg)
return self._raw_read()
def _raw_write(self, msg):
__tracebackhide__ = True # for pytest # pylint: disable=W0612
self.transport.write(msg)
def _raw_read(self):
__tracebackhide__ = True # for pytest # pylint: disable=W0612
return self.transport.read()
def _callback_pin(self, msg):
try:
pin = self.ui.get_pin(msg.type)
except exceptions.Cancelled:
self.call_raw(messages.Cancel())
raise
if not pin.isdigit():
self.call_raw(messages.Cancel())
raise ValueError("Non-numeric PIN provided")
resp = self.call_raw(messages.PinMatrixAck(pin=pin))
if isinstance(resp, messages.Failure) and resp.code in (
messages.FailureType.PinInvalid,
messages.FailureType.PinCancelled,
messages.FailureType.PinExpected,
):
raise exceptions.PinException(resp.code, resp.message)
else:
return resp
def _callback_passphrase(self, msg):
if msg.on_device:
passphrase = None
else:
try:
passphrase = self.ui.get_passphrase()
except exceptions.Cancelled:
self.call_raw(messages.Cancel())
raise
passphrase = Mnemonic.normalize_string(passphrase)
if len(passphrase) > MAX_PASSPHRASE_LENGTH:
self.call_raw(messages.Cancel())
raise ValueError("Passphrase too long")
resp = self.call_raw(messages.PassphraseAck(passphrase=passphrase))
if isinstance(resp, messages.PassphraseStateRequest):
self.state = resp.state
return self.call_raw(messages.PassphraseStateAck())
else:
return resp
def _callback_button(self, msg):
__tracebackhide__ = True # for pytest # pylint: disable=W0612
# do this raw - send ButtonAck first, notify UI later
self._raw_write(messages.ButtonAck())
self.ui.button_request(msg.code)
return self._raw_read()
@tools.session
def call(self, msg):
self.check_firmware_version()
resp = self.call_raw(msg)
while True:
if isinstance(resp, messages.PinMatrixRequest):
resp = self._callback_pin(resp)
elif isinstance(resp, messages.PassphraseRequest):
resp = self._callback_passphrase(resp)
elif isinstance(resp, messages.ButtonRequest):
resp = self._callback_button(resp)
elif isinstance(resp, messages.Failure):
if resp.code == messages.FailureType.ActionCancelled:
raise exceptions.Cancelled
raise exceptions.TrezorFailure(resp)
else:
return resp
@tools.session
def init_device(self):
resp = self.call_raw(messages.Initialize(state=self.state))
if not isinstance(resp, messages.Features):
raise exceptions.TrezorException("Unexpected initial response")
else:
self.features = resp
if self.features.vendor not in VENDORS:
raise RuntimeError("Unsupported device")
# A side-effect of this is a sanity check for broken protobuf definitions.
# If the `vendor` field doesn't exist, you probably have a mismatched
# checkout of trezor-common.
self.version = (
self.features.major_version,
self.features.minor_version,
self.features.patch_version,
)
self.check_firmware_version(warn_only=True)
def is_outdated(self):
if self.features.bootloader_mode:
return False
model = self.features.model or "1"
required_version = MINIMUM_FIRMWARE_VERSION[model]
return self.version < required_version
def check_firmware_version(self, warn_only=False):
if self.is_outdated():
if warn_only:
warnings.warn(OUTDATED_FIRMWARE_ERROR, stacklevel=2)
else:
raise exceptions.OutdatedFirmwareError(OUTDATED_FIRMWARE_ERROR)
@tools.expect(messages.Success, field="message")
def ping(
self,
msg,
button_protection=False,
pin_protection=False,
passphrase_protection=False,
):
# We would like ping to work on any valid TrezorClient instance, but
# due to the protection modes, we need to go through self.call, and that will
# raise an exception if the firmware is too old.
# So we short-circuit the simplest variant of ping with call_raw.
if not button_protection and not pin_protection and not passphrase_protection:
# XXX this should be: `with self:`
try:
self.open()
return self.call_raw(messages.Ping(message=msg))
finally:
self.close()
msg = messages.Ping(
message=msg,
button_protection=button_protection,
pin_protection=pin_protection,
passphrase_protection=passphrase_protection,
)
return self.call(msg)
def get_device_id(self):
return self.features.device_id
@tools.expect(messages.Success, field="message")
@tools.session
def clear_session(self):
return self.call_raw(messages.ClearSession())
def MovedTo(where):
def moved_to(*args, **kwargs):
msg = "Function has been moved to " + where
raise RuntimeError(DEPRECATION_ERROR.format(msg))
return moved_to
class ProtocolMixin(object):
"""Fake mixin for old-style software that constructed TrezorClient class
from separate mixins.
Now it only simulates existence of original attributes to prevent some early
crashes, and raises errors when any of the attributes are actually called.
"""
def __init__(self, *args, **kwargs):
warnings.warn("TrezorClient mixins are not supported anymore")
self.tx_api = None # Electrum checks that this attribute exists
super().__init__(*args, **kwargs)
def set_tx_api(self, tx_api):
warnings.warn("set_tx_api is deprecated, use new arguments to sign_tx")
@staticmethod
def expand_path(n):
warnings.warn(
"expand_path is deprecated, use tools.parse_path",
DeprecationWarning,
stacklevel=2,
)
return tools.parse_path(n)
# Device functionality
wipe_device = MovedTo("device.wipe")
recovery_device = MovedTo("device.recover")
reset_device = MovedTo("device.reset")
backup_device = MovedTo("device.backup")
set_u2f_counter = MovedTo("device.set_u2f_counter")
apply_settings = MovedTo("device.apply_settings")
apply_flags = MovedTo("device.apply_flags")
change_pin = MovedTo("device.change_pin")
# Firmware functionality
firmware_update = MovedTo("firmware.update")
# BTC-like functionality
get_public_node = MovedTo("btc.get_public_node")
get_address = MovedTo("btc.get_address")
sign_tx = MovedTo("btc.sign_tx")
sign_message = MovedTo("btc.sign_message")
verify_message = MovedTo("btc.verify_message")
# CoSi functionality
cosi_commit = MovedTo("cosi.commit")
cosi_sign = MovedTo("cosi.sign")
# Ethereum functionality
ethereum_get_address = MovedTo("ethereum.get_address")
ethereum_sign_tx = MovedTo("ethereum.sign_tx")
ethereum_sign_message = MovedTo("ethereum.sign_message")
ethereum_verify_message = MovedTo("ethereum.verify_message")
# Lisk functionality
lisk_get_address = MovedTo("lisk.get_address")
lisk_get_public_key = MovedTo("lisk.get_public_key")
lisk_sign_message = MovedTo("lisk.sign_message")
lisk_verify_message = MovedTo("lisk.verify_message")
lisk_sign_tx = MovedTo("lisk.sign_tx")
# NEM functionality
nem_get_address = MovedTo("nem.get_address")
nem_sign_tx = MovedTo("nem.sign_tx")
# Stellar functionality
stellar_get_address = MovedTo("stellar.get_address")
stellar_sign_transaction = MovedTo("stellar.sign_tx")
# Miscellaneous cryptographic functionality
get_entropy = MovedTo("misc.get_entropy")
sign_identity = MovedTo("misc.sign_identity")
get_ecdh_session_key = MovedTo("misc.get_ecdh_session_key")
encrypt_keyvalue = MovedTo("misc.encrypt_keyvalue")
decrypt_keyvalue = MovedTo("misc.decrypt_keyvalue")
# Debug device functionality
load_device_by_mnemonic = MovedTo("debuglink.load_device_by_mnemonic")
load_device_by_xprv = MovedTo("debuglink.load_device_by_xprv")
class BaseClient:
"""Compatibility proxy for original BaseClient class.
Prevents early crash in Electrum forks and possibly other software.
"""
def __init__(self, *args, **kwargs):
warnings.warn("TrezorClient mixins are not supported anymore")
self.trezor_client = TrezorClient(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.trezor_client, key)
# further Electrum compatibility
proto = None
|
romanz/python-trezor
|
trezorlib/client.py
|
Python
|
lgpl-3.0
| 13,041
|
[
"VisIt"
] |
3b307c6816b661ce2abf4ce39391fc8d88e6a3e3d11a46902cdddf0f5e92cf2b
|
""" BHMM: A toolkit for Bayesian hidden Markov model analysis of single-molecule trajectories.
This project provides tools for estimating the number of metastable states, rate
constants between the states, equilibrium populations, distributions
characterizing the states, and distributions of these quantities from
single-molecule data. This data could be FRET data, single-molecule pulling
data, or any data where one or more observables are recorded as a function of
time. A Hidden Markov Model (HMM) is used to interpret the observed dynamics,
and a distribution of models that fit the data is sampled using Bayesian
inference techniques and Markov chain Monte Carlo (MCMC), allowing for both the
characterization of uncertainties in the model and modeling of the expected
information gain by new experiments.
"""
from __future__ import print_function
import os
from os.path import relpath, join
import versioneer
from setuptools import setup, Extension, find_packages
DOCLINES = __doc__.split("\n")
########################
CLASSIFIERS = """\
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)
Programming Language :: Python
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
################################################################################
# USEFUL SUBROUTINES
################################################################################
def find_package_data(data_root, package_root):
files = []
for root, dirnames, filenames in os.walk(data_root):
for fn in filenames:
files.append(relpath(join(root, fn), package_root))
return files
################################################################################
# SETUP
################################################################################
def extensions():
from Cython.Build import cythonize
from numpy import get_include
np_inc = get_include()
extensions = [
Extension('bhmm.hidden.impl_c.hidden',
sources = ['./bhmm/hidden/impl_c/hidden.pyx',
'./bhmm/hidden/impl_c/_hidden.c'],
include_dirs = ['/bhmm/hidden/impl_c/', np_inc]),
Extension('bhmm.output_models.impl_c.discrete',
sources = ['./bhmm/output_models/impl_c/discrete.pyx',
'./bhmm/output_models/impl_c/_discrete.c'],
include_dirs = ['/bhmm/output_models/impl_c/', np_inc]),
Extension('bhmm.output_models.impl_c.gaussian',
sources = ['./bhmm/output_models/impl_c/gaussian.pyx',
'./bhmm/output_models/impl_c/_gaussian.c'],
include_dirs = ['/bhmm/output_models/impl_c/', np_inc]),
Extension('bhmm._external.clustering.kmeans_clustering_64',
sources=['./bhmm/_external/clustering/src/clustering.c',
'./bhmm/_external/clustering/src/kmeans.c'],
include_dirs=['./bhmm/_external/clustering/include',
np_inc],
extra_compile_args=['-std=c99','-O3', '-DCLUSTERING_64']),
Extension('bhmm._external.clustering.kmeans_clustering_32',
sources=['./bhmm/_external/clustering/src/clustering.c',
'./bhmm/_external/clustering/src/kmeans.c'],
include_dirs=['./bhmm/_external/clustering/include',
np_inc],
extra_compile_args=['-std=c99','-O3']),
]
return cythonize(extensions)
class lazy_cythonize(list):
"""evaluates extension list lazyly.
pattern taken from http://tinyurl.com/qb8478q"""
def __init__(self, callback):
self._list, self.callback = None, callback
def c_list(self):
if self._list is None: self._list = self.callback()
return self._list
def __iter__(self):
for e in self.c_list(): yield e
def __getitem__(self, ii): return self.c_list()[ii]
def __len__(self): return len(self.c_list())
setup(
name='bhmm',
author='John Chodera and Frank Noe',
author_email='john.chodera@choderalab.org',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='LGPL',
url='https://github.com/bhmm/bhmm',
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers=CLASSIFIERS.splitlines(),
package_dir={'bhmm': 'bhmm'},
packages=find_packages(),
# NOTE: examples installs to bhmm.egg/examples/, NOT bhmm.egg/bhmm/examples/.
# You need to do utils.get_data_filename("../examples/*/setup/").
package_data={'bhmm': find_package_data('examples', 'bhmm') +
find_package_data('bhmm/tests/data', 'bhmm')},
zip_safe=False,
install_requires=[
'numpy',
'scipy',
'msmtools',
],
setup_requires=[
'cython',
'numpy',
],
ext_modules=lazy_cythonize(extensions),
)
|
bhmm/bhmm
|
setup.py
|
Python
|
lgpl-3.0
| 5,358
|
[
"Gaussian"
] |
c63723120319018d54e3c17fda31f37d555f86123df81a787f5ec6aaeda5ed73
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (C) 2013-2015 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals
import os, sys, datetime, getpass, collections, re, json, argparse, copy, hashlib, io, time, subprocess, glob, logging
import shlex # respects quoted substrings when splitting
import requests
logging.basicConfig(level=logging.INFO)
from ..compat import (USING_PYTHON2, basestring, str, input, wrap_stdio_in_codecs, decode_command_line_args,
unwrap_stream, sys_encoding)
wrap_stdio_in_codecs()
decode_command_line_args()
import dxpy
from ..cli import try_call, prompt_for_yn, INTERACTIVE_CLI
from ..cli import workflow as workflow_cli
from ..cli.cp import cp
from ..cli.download import (download_one_file, download)
from ..cli.parsers import (no_color_arg, delim_arg, env_args, stdout_args, all_arg, json_arg, parser_dataobject_args,
parser_single_dataobject_output_args, process_properties_args,
find_by_properties_and_tags_args, process_find_by_property_args, process_dataobject_args,
process_single_dataobject_output_args, find_executions_args, add_find_executions_search_gp,
set_env_from_args, extra_args, process_extra_args, DXParserError, exec_input_args,
instance_type_arg, process_instance_type_arg)
from ..cli.exec_io import (ExecutableInputs, format_choices_or_suggestions)
from ..cli.org import (get_org_invite_args, add_membership, remove_membership,
update_membership)
from ..exceptions import (err_exit, DXError, DXCLIError, DXAPIError, network_exceptions, default_expected_exceptions,
format_exception)
from ..utils import warn, group_array_by_field, normalize_timedelta, normalize_time_input
from ..app_categories import APP_CATEGORIES
from ..utils.printing import (CYAN, BLUE, YELLOW, GREEN, RED, WHITE, UNDERLINE, BOLD, ENDC, DNANEXUS_LOGO,
DNANEXUS_X, set_colors, set_delimiter, get_delimiter, DELIMITER, fill,
tty_rows, tty_cols, pager)
from ..utils.pretty_print import format_tree, format_table
from ..utils.resolver import (pick, paginate_and_pick, is_hashid, is_data_obj_id, is_container_id, is_job_id,
is_analysis_id, get_last_pos_of_char, resolve_container_id_or_name, resolve_path,
resolve_existing_path, get_app_from_path, resolve_app, get_exec_handler,
split_unescaped, ResolutionError, get_first_pos_of_char,
resolve_to_objects_or_project)
from ..utils.completer import (path_completer, DXPathCompleter, DXAppCompleter, LocalCompleter,
ListCompleter, MultiCompleter)
from ..utils.describe import (print_data_obj_desc, print_desc, print_ls_desc, get_ls_l_desc, print_ls_l_desc,
get_io_desc, get_find_executions_string)
try:
import colorama
colorama.init()
except:
pass
if '_ARGCOMPLETE' not in os.environ:
try:
# Hack: on some operating systems, like Mac, readline spews
# escape codes into the output at import time if TERM is set to
# xterm (or xterm-256color). This can be a problem if dx is
# being used noninteractively (e.g. --json) and its output will
# be redirected or parsed elsewhere.
#
# http://reinout.vanrees.org/weblog/2009/08/14/readline-invisible-character-hack.html
old_term_setting = None
if 'TERM' in os.environ and os.environ['TERM'].startswith('xterm'):
old_term_setting = os.environ['TERM']
os.environ['TERM'] = 'vt100'
import readline
if old_term_setting:
os.environ['TERM'] = old_term_setting
if 'libedit' in readline.__doc__:
print('Warning: incompatible readline module detected (libedit), tab completion disabled', file=sys.stderr)
except ImportError:
if os.name != 'nt':
print('Warning: readline module is not available, tab completion disabled', file=sys.stderr)
state = {"interactive": False,
"colors": "auto",
"delimiter": None,
"currentproj": None}
parser_map = {}
parser_categories_sorted = ["all", "session", "fs", "data", "metadata", "workflow", "exec", "other"]
parser_categories = {"all": {"desc": "\t\tAll commands",
"cmds": []},
"session": {"desc": "\tManage your login session",
"cmds": []},
"fs": {"desc": "\t\tNavigate and organize your projects and files",
"cmds": []},
"data": {"desc": "\t\tView, download, and upload data",
"cmds": []},
"metadata": {"desc": "\tView and modify metadata for projects, data, and executions",
"cmds": []},
"workflow": {"desc": "\tView and modify workflows",
"cmds": []},
"exec": {"desc": "\t\tManage and run apps, applets, and workflows",
"cmds": []},
"other": {"desc": "\t\tMiscellaneous advanced utilities",
"cmds": []}}
class ResultCounter():
def __init__(self):
self.counter = 0
def __call__(self):
self.counter += 1
return ('\n' if self.counter > 1 else '') + UNDERLINE() + 'Result ' + \
str(self.counter) + ':' + ENDC()
def get_json_from_stdin():
user_json_str = input('Type JSON here> ')
user_json = None
try:
user_json = json.loads(user_json_str)
except ValueError:
parser.exit(1, 'Error: user input could not be parsed as JSON\n')
return None
return user_json
def set_cli_colors(args=argparse.Namespace()):
if 'color' in args:
state['colors'] = args.color
if state['colors'] == 'auto':
set_colors(sys.stdout.isatty())
else:
set_colors(state['colors'] == 'on')
def set_delim(args=argparse.Namespace()):
if 'delimiter' in args:
state['delimiter'] = args.delimiter
else:
state['delimiter'] = None
set_delimiter(state['delimiter'])
# Loading command line arguments
args_list = sys.argv[1:]
# Loading other variables used for pretty-printing
if "LESS" in os.environ:
os.environ["LESS"] = os.environ["LESS"] + " -RS"
else:
os.environ["LESS"] = "-RS"
# This completer is for the command-line in the shell. It assumes the
# first word is always a subcommand and that if the first word is a
# subcommand with further subcommands, then the second word must be an
# appropriate sub-subcommand.
class DXCLICompleter():
subcommands = {'find': ['data ', 'projects ', 'apps ', 'jobs ', 'executions ', 'analyses '],
'new': ['record ', 'project ', 'workflow '],
'add': ['developers ', 'users ', 'stage '],
'remove': ['developers ', 'users ', 'stage '],
'update': ['stage ', 'workflow ']}
silent_commands = set(['import'])
def __init__(self):
self.commands = [subcmd + ' ' for subcmd in subparsers.choices.keys() if subcmd not in self.silent_commands]
self.matches = []
self.text = None
def get_command_matches(self, prefix):
self.matches = [cmd for cmd in self.commands if cmd.startswith(prefix)]
def get_subcommand_matches(self, command, prefix):
if command in self.subcommands:
self.matches = [command + ' ' + sub for sub in self.subcommands[command] if sub.startswith(prefix)]
def get_matches(self, text, want_prefix=False):
self.text = text
space_pos = get_last_pos_of_char(' ', text)
words = split_unescaped(' ', text)
if len(words) > 0 and space_pos == len(text) - 1:
words.append('')
num_words = len(words)
self.matches = []
if num_words == 0:
self.get_command_matches('')
elif num_words == 1:
self.get_command_matches(words[0])
elif num_words == 2 and words[0] in self.subcommands:
self.get_subcommand_matches(words[0], words[1])
else:
if words[0] == 'run':
path_matches = path_completer(words[-1],
classes=['applet', 'workflow'],
visibility="visible")
elif words[0] in ['cd', 'rmdir', 'mkdir', 'tree']:
path_matches = path_completer(words[-1],
expected='folder')
elif words[0] in ['export']:
path_matches = path_completer(words[-1],
classes=['gtable'])
elif words[0] in ['head']:
path_matches = path_completer(words[-1],
classes=['gtable', 'file'])
elif words[0] in ['cat', 'download']:
path_matches = path_completer(words[-1],
classes=['file'])
elif words[0] in ['ls', 'rm', 'mv', 'cp']:
path_matches = path_completer(words[-1])
elif words[0] in ['get_details', 'set_details', 'set_visibility', 'add_types', 'remove_types', 'close', 'get']:
path_matches = path_completer(words[-1])
elif words[0] in ['describe', 'rename', 'set_properties', 'unset_properties']:
path_matches = path_completer(words[-1], include_current_proj=True)
elif words[0] in ['rmproject', 'invite']:
path_matches = path_completer(words[-1], expected='project', include_current_proj=True)
else:
path_matches = []
if want_prefix:
self.matches = [text[:space_pos + 1] + match for match in path_matches]
else:
self.matches = path_matches
# Also find app name matches and append to
# self.matches, preferably a list of installed apps
if words[0] in ['run', 'install', 'uninstall']:
try:
initial_results = list(dxpy.find_apps(describe={"fields": {"name": True,
"installed": True}}))
if words[0] in ['run', 'uninstall']:
filtered_results = [result for result in initial_results if
result['describe']['installed']]
else:
filtered_results = [result for result in initial_results if
not result['describe']['installed']]
app_names = [result['describe']['name'] for result in filtered_results]
app_matches = [name for name in app_names if name.startswith(words[-1])]
if want_prefix:
self.matches += [text[:space_pos + 1] + match for match in app_matches]
else:
self.matches += app_matches
except:
pass
return self.matches
def complete(self, text, state):
if state == 0:
self.get_matches(text, want_prefix=True)
if state < len(self.matches):
return self.matches[state]
else:
return None
def login(args):
if not state['interactive']:
args.save = True
default_authserver = 'https://auth.dnanexus.com'
using_default = False
if args.auth_token and not args.token:
args.token = args.auth_token
args.auth_token = None
# API server should have already been set up if --host or one of
# the --special-host flags has been set.
if args.token is None:
if args.host is not None or args.port is not None:
if args.host is None or args.port is None:
parser.exit(2, fill('Error: Only one of --host and --port were provided; provide either both or neither of the values') + '\n')
protocol = args.protocol or ("https" if (args.port == 443) else "http")
authserver = protocol + '://' + args.host
authserver += ':' + str(args.port)
else:
authserver = default_authserver
using_default = authserver == default_authserver
def get_token(**data):
return dxpy.DXHTTPRequest(authserver+"/system/newAuthToken", data,
prepend_srv=False, auth=None, always_retry=True)
def get_credentials(reuse=None, get_otp=False):
if reuse:
username, password = reuse
else:
username = None
while not username:
if 'DX_USERNAME' in os.environ:
username = input('Username [' + os.environ['DX_USERNAME'] + ']: ') or os.environ['DX_USERNAME']
else:
username = input('Username: ')
dxpy.config.write("DX_USERNAME", username)
with unwrap_stream('stdin'):
password = getpass.getpass()
otp = input('Verification code: ') if get_otp else None
return dict(username=username, password=password, otp=otp)
print('Acquiring credentials from ' + authserver)
attempt, using_otp, reuse = 1, False, None
while attempt <= 3:
try:
credentials = get_credentials(reuse=reuse, get_otp=using_otp)
token_res = get_token(expires=normalize_time_input(args.timeout, future=True), **credentials)
break
except (KeyboardInterrupt, EOFError):
err_exit()
except dxpy.DXAPIError as e:
if e.name == 'OTPRequiredError':
using_otp = True
reuse = (credentials['username'], credentials['password'])
continue
elif e.name in ('UsernameOrPasswordError', 'OTPMismatchError'):
if attempt < 3:
if e.name == 'UsernameOrPasswordError':
warn("Incorrect username and/or password")
else:
warn("Incorrect verification code")
attempt += 1
continue
else:
err_exit("Incorrect username and/or password", arg_parser=parser)
else:
err_exit("Login error: {}".format(e), arg_parser=parser)
except Exception as e:
err_exit("Login error: {}".format(e), arg_parser=parser)
sec_context=json.dumps({'auth_token': token_res["access_token"], 'auth_token_type': token_res["token_type"]})
if using_default:
set_api(dxpy.DEFAULT_APISERVER_PROTOCOL, dxpy.DEFAULT_APISERVER_HOST, dxpy.DEFAULT_APISERVER_PORT, args.save)
else:
sec_context = '{"auth_token":"' + args.token + '","auth_token_type":"Bearer"}'
# Ensure correct API server
if args.host is None:
set_api(dxpy.DEFAULT_APISERVER_PROTOCOL, dxpy.DEFAULT_APISERVER_HOST, dxpy.DEFAULT_APISERVER_PORT, args.save)
using_default = True
os.environ['DX_SECURITY_CONTEXT'] = sec_context
dxpy.set_security_context(json.loads(sec_context))
if args.save:
dxpy.config.write("DX_SECURITY_CONTEXT", sec_context)
# If login via token, obtain current username from auth server.
if args.token is not None:
host, port = None, None
if dxpy.APISERVER_HOST not in ['api.dnanexus.com', 'stagingapi.dnanexus.com']:
host, port = args.host, args.port
try:
dxpy.config.write("DX_USERNAME", dxpy.user_info(host, port)['username'])
except DXError as details:
# Consider failure to obtain username to be a non-fatal error.
print("Could not obtain username from auth server. Consider setting both --host and --port.", file=sys.stderr)
print(fill(str(details)), file=sys.stderr)
if using_default or args.staging:
try:
greeting = dxpy.api.system_greet({'client': 'dxclient', 'version': 'v'+dxpy.TOOLKIT_VERSION})
if greeting.get('messages'):
print(BOLD("New messages from ") + DNANEXUS_LOGO())
for message in greeting['messages']:
print(BOLD("Date: ") + datetime.datetime.fromtimestamp(message['date']/1000).ctime())
print(BOLD("Subject: ") + fill(message['title'], subsequent_indent=' '*9))
body = message['body'].splitlines()
if len(body) > 0:
print(BOLD("Message: ") + body[0])
for line in body[1:]:
print(' '*9 + line)
except Exception as e:
warn("Error while retrieving greet data: {}".format(e))
args.current = False
args.name = None
args.level = 'CONTRIBUTE'
args.public = False
if args.host is not None and not args.staging and not using_default:
setenv(args)
elif args.projects:
pick_and_set_project(args)
if args.save and not args.token:
msg = "You are now logged in. Your credentials are stored in {conf_dir} and will expire in {timeout}. {tip}"
tip = "Use " + BOLD("dx login --timeout") + " to control the expiration date, or " + BOLD("dx logout") + \
" to end this session."
print(fill(msg.format(conf_dir=dxpy.config.get_user_conf_dir(),
timeout=datetime.timedelta(seconds=normalize_time_input(args.timeout)/1000),
tip=tip)))
def logout(args):
if dxpy.AUTH_HELPER is not None:
authserver = dxpy.get_auth_server_name(args.host, args.port)
print("Deleting credentials from {}...".format(authserver))
token = dxpy.AUTH_HELPER.security_context["auth_token"]
try:
token_sig = hashlib.sha256(token).hexdigest()
response = dxpy.DXHTTPRequest(authserver + "/system/destroyAuthToken",
dict(tokenSignature=token_sig),
prepend_srv=False,
max_retries=1)
print("Deleted token with signature", token_sig)
except dxpy.DXAPIError as e:
print(format_exception(e))
except:
err_exit()
if state["interactive"]:
dxpy.AUTH_HELPER = None
else:
dxpy.config.write("DX_SECURITY_CONTEXT", None)
def set_api(protocol, host, port, write):
dxpy.config.update(DX_APISERVER_PROTOCOL=protocol,
DX_APISERVER_HOST=host,
DX_APISERVER_PORT=port)
if write:
dxpy.config.save()
def set_project(project, write, name=None):
if dxpy.JOB_ID is None:
dxpy.config["DX_PROJECT_CONTEXT_ID"] = project
dxpy.config["DX_PROJECT_CONTEXT_NAME"] = name
else:
dxpy.config["DX_WORKSPACE_ID"] = project
if write:
dxpy.config.save()
dxpy.set_workspace_id(project)
def set_wd(folder, write):
dxpy.config.update(DX_CLI_WD=folder)
if write:
dxpy.config.save()
# Will raise KeyboardInterrupt, EOFError
def prompt_for_env_var(prompt_str, env_var_str):
prompt = prompt_str
default = None
if env_var_str in os.environ:
default = os.environ[env_var_str]
prompt += ' [' + default + ']: '
else:
prompt += ': '
while True:
value = input(prompt)
if value != '':
return value
elif default is not None:
return default
def pick_and_set_project(args):
try:
result_generator = dxpy.find_projects(describe=True,
name=args.name, name_mode='glob',
level=('VIEW' if args.public else args.level),
explicit_perms=(not args.public if not args.public else None),
public=(args.public if args.public else None),
first_page_size=10)
except:
err_exit('Error while listing available projects')
any_results = False
first_pass = True
while True:
results = []
for _ in range(10):
try:
results.append(next(result_generator))
any_results = True
except StopIteration:
break
except:
err_exit('Error while listing available projects')
if not any_results:
parser.exit(0, '\n' + fill("No projects to choose from. You can create one with the command " +
BOLD("dx new project") + ". To pick from projects for which you only have " +
" VIEW permissions, use " + BOLD("dx select --level VIEW") + " or " +
BOLD("dx select --public") + ".") + '\n')
elif len(results) == 0:
parser.exit(1, 'No projects left to choose from.\n')
if first_pass:
if not args.public and args.level == "CONTRIBUTE":
print('')
print(fill("Note: Use " + BOLD("dx select --level VIEW") + " or " + BOLD("dx select --public") +
" to select from projects for which you only have VIEW permissions."))
first_pass = False
project_ids = [result['id'] for result in results]
# Eliminate current default if it is not a found project
try:
default = project_ids.index(dxpy.WORKSPACE_ID)
except:
default = None
print("")
if args.public:
print("Available public projects:")
else:
print("Available projects ({level} or higher):".format(level=args.level))
choice = try_call(pick,
[result['describe']['name'] + ' (' + result['level'] + ')' for result in results],
default,
more_choices=(len(results) == 10))
if choice == 'm':
continue
else:
print('Setting current project to: ' + BOLD(results[choice]['describe']['name']))
set_project(project_ids[choice], not state['interactive'] or args.save, name=results[choice]['describe']['name'])
state['currentproj'] = results[choice]['describe']['name']
set_wd('/', not state['interactive'] or args.save)
return
def whoami(args):
if dxpy.AUTH_HELPER is None:
parser.exit(3, 'You are not logged in; run "dx login" to obtain a token.\n')
user_id = dxpy.whoami()
if args.user_id:
print(user_id)
else:
print(dxpy.api.user_describe(user_id)['handle'])
def setenv(args):
if not state['interactive']:
args.save = True
if args.current:
dxpy.config.save()
else:
try:
api_protocol = prompt_for_env_var('API server protocol (choose "http" or "https")', 'DX_APISERVER_PROTOCOL')
api_host = prompt_for_env_var('API server host', 'DX_APISERVER_HOST')
api_port = prompt_for_env_var('API server port', 'DX_APISERVER_PORT')
set_api(api_protocol, api_host, api_port, args.save)
except:
parser.exit(1, '\n')
if args.projects:
args.name = None
args.public = False
args.current = False
args.level = 'CONTRIBUTE'
pick_and_set_project(args)
def clearenv(args):
if args.interactive:
print("The clearenv command is not available in the interactive shell")
return
dxpy.config.clear(reset=args.reset)
def env(args):
if args.bash:
if dxpy.AUTH_HELPER is not None:
print("export DX_SECURITY_CONTEXT='" + json.dumps(dxpy.AUTH_HELPER.security_context) + "'")
if dxpy.APISERVER_PROTOCOL is not None:
print("export DX_APISERVER_PROTOCOL=" + dxpy.APISERVER_PROTOCOL)
if dxpy.APISERVER_HOST is not None:
print("export DX_APISERVER_HOST=" + dxpy.APISERVER_HOST)
if dxpy.APISERVER_PORT is not None:
print("export DX_APISERVER_PORT=" + dxpy.APISERVER_PORT)
if dxpy.WORKSPACE_ID is not None:
print("export DX_PROJECT_CONTEXT_ID=" + dxpy.WORKSPACE_ID)
elif args.dx_flags:
flags_str = ''
if dxpy.AUTH_HELPER is not None:
token = dxpy.AUTH_HELPER.security_context.get('auth_token', None)
if token is not None:
flags_str += ' --auth-token ' + token
if dxpy.APISERVER_PROTOCOL is not None:
flags_str += ' --apiserver-protocol ' + dxpy.APISERVER_PROTOCOL
if dxpy.APISERVER_HOST is not None:
flags_str += ' --apiserver-host ' + dxpy.APISERVER_HOST
if dxpy.APISERVER_PORT is not None:
flags_str += ' --apiserver-port ' + dxpy.APISERVER_PORT
if dxpy.WORKSPACE_ID is not None:
flags_str += ' --project-context-id ' + dxpy.WORKSPACE_ID
print(flags_str)
else:
if dxpy.AUTH_HELPER is not None:
print("Auth token used\t\t" + dxpy.AUTH_HELPER.security_context.get("auth_token", "none"))
print("API server protocol\t" + dxpy.APISERVER_PROTOCOL)
print("API server host\t\t" + dxpy.APISERVER_HOST)
print("API server port\t\t" + dxpy.APISERVER_PORT)
print("Current workspace\t" + str(dxpy.WORKSPACE_ID))
if "DX_PROJECT_CONTEXT_NAME" in os.environ:
print(u'Current workspace name\t"{n}"'.format(n=dxpy.config.get("DX_PROJECT_CONTEXT_NAME")))
print("Current folder\t\t" + dxpy.config.get("DX_CLI_WD", "None"))
print("Current user\t\t" + str(os.environ.get("DX_USERNAME")))
def get_pwd():
pwd_str = None
if dxpy.WORKSPACE_ID is not None:
if state['currentproj'] is None:
try:
proj_name = dxpy.api.project_describe(dxpy.WORKSPACE_ID)['name']
state['currentproj'] = proj_name
except:
pass
if state['currentproj'] is not None:
pwd_str = state['currentproj'] + ':' + dxpy.config.get('DX_CLI_WD', u'/')
return pwd_str
def pwd(args):
pwd_str = get_pwd()
if pwd_str is not None:
print(pwd_str)
else:
parser.exit(1, 'Current project is not set\n')
def api(args):
json_input = json.loads(args.input_json)
if args.input is not None:
with (sys.stdin if args.input == '-' else open(args.input, 'r')) as fd:
data = fd.read()
try:
json_input = json.loads(data)
except ValueError:
parser.exit(1, 'Error: file contents could not be parsed as JSON\n')
resp = None
try:
resp = dxpy.DXHTTPRequest('/' + args.resource + '/' + args.method,
json_input)
except:
err_exit()
try:
print(json.dumps(resp, indent=4))
except ValueError:
parser.exit(1, 'Error: server response could not be parsed as JSON\n')
def invite(args):
# If --project is a valid project (ID or name), then appending ":"
# should not hurt the path resolution.
if ':' not in args.project:
args.project += ':'
project, _none, _none = try_call(resolve_existing_path,
args.project, 'project')
if args.invitee != 'PUBLIC' and not '-' in args.invitee and not '@' in args.invitee:
args.invitee = 'user-' + args.invitee.lower()
try:
resp = dxpy.api.project_invite(project, {"invitee": args.invitee, "level": args.level})
except:
err_exit()
print('Invited ' + args.invitee + ' to ' + project + ' (' + resp['state'] + ')')
def uninvite(args):
# If --project is a valid project (ID or name), then appending ":"
# should not hurt the path resolution.
if ':' not in args.project:
args.project += ':'
project, _none, _none = try_call(resolve_existing_path,
args.project, 'project')
if args.entity != 'PUBLIC' and not '-' in args.entity:
args.entity = 'user-' + args.entity.lower()
try:
dxpy.api.project_decrease_permissions(project, {args.entity: None})
except:
err_exit()
print('Uninvited ' + args.entity + ' from ' + project)
def select(args):
if args.project is not None:
if get_last_pos_of_char(':', args.project) != -1:
args.path = args.project
else:
args.path = args.project + ':'
cd(args)
print("Selected project", split_unescaped(":", args.project)[0].replace("\\:", ":"))
else:
pick_and_set_project(args)
def cd(args):
# entity_result should be None because expected='folder'
project, folderpath = try_call(resolve_existing_path, args.path, 'folder')[:2]
if project is not None:
project_name = try_call(dxpy.get_handler(project).describe)['name']
# It is obvious what the project is
if project != dxpy.WORKSPACE_ID or 'DX_PROJECT_CONTEXT_NAME' not in os.environ:
# Cache ID and name if necessary
set_project(project, not state['interactive'], name=project_name)
state['currentproj'] = project_name
else:
parser.exit(1, 'Error: No current project was given\n')
# TODO: attempt to add caching later if it's an issue
# if project in cached_project_paths and folderpath in cached_project_paths[project]:
# set_wd(folderpath, not interactive)
try:
dxproj = dxpy.get_handler(dxpy.WORKSPACE_ID)
dxproj.list_folder(folder=folderpath)
except:
parser.exit(1, fill(folderpath + ': No such file or directory found in project ' + dxpy.WORKSPACE_ID) + '\n')
return
set_wd(folderpath, not state['interactive'])
def cmp_names(x):
return x['describe']['name'].lower()
def ls(args):
project, folderpath, entity_results = try_call(resolve_existing_path, # TODO: this needs to honor "ls -a" (all) (args.obj/args.folders/args.full)
args.path,
ask_to_resolve=False)
if project is None:
parser.exit(1, fill('Current project must be set or specified before any data can be listed') + '\n')
dxproj = dxpy.get_handler(project)
only = ""
if args.obj and not args.folders and not args.full:
only = "objects"
elif not args.obj and args.folders and not args.full:
only = "folders"
else:
only = "all"
resp = None
if entity_results is None:
try:
# Request the minimal set of describe fields possible
if args.brief:
describe_input = dict(fields={'id': True, 'name': True})
elif args.verbose:
describe_input = True
else:
describe_input = dict(fields={'id': True, 'class': True, 'name': True})
resp = dxproj.list_folder(folder=folderpath,
describe=describe_input,
only=only,
includeHidden=args.all)
# Listing the folder was successful
if args.verbose:
print(UNDERLINE('Project:') + ' ' + dxproj.describe()['name'] + ' (' + project + ')')
print(UNDERLINE('Folder :') + ' ' + folderpath)
if not args.obj:
folders_to_print = ['/.', '/..'] if args.all else []
folders_to_print += resp['folders']
for folder in folders_to_print:
if args.full:
print(BOLD() + BLUE() + folder + ENDC())
else:
print(BOLD() + BLUE() + os.path.basename(folder) + '/' + ENDC())
if not args.folders:
resp["objects"] = sorted(resp["objects"], key=cmp_names)
if args.verbose:
if len(resp['objects']) > 0:
print(BOLD() + 'State' + DELIMITER('\t') + 'Last modified' + DELIMITER(' ') + 'Size' + DELIMITER(' ') + 'Name' + DELIMITER(' (') + 'ID' + DELIMITER(')') + ENDC())
else:
print("No data objects found in the folder")
if not args.brief and not args.verbose:
name_counts = collections.Counter(obj['describe']['name'] for obj in resp['objects'])
for obj in resp['objects']:
if args.brief:
print(obj['id'])
elif args.verbose:
print_ls_l_desc(obj['describe'], include_project=False)
else:
print_ls_desc(obj['describe'], print_id=True if name_counts[obj['describe']['name']] > 1 else False)
except:
err_exit()
else:
# We have results to describe
name_counts = collections.Counter(obj['describe']['name'] for obj in entity_results)
for result in entity_results:
# TODO: Figure out the right way to reason when to hide hidden files:
# if result['describe']['hidden'] and not args.all:
# continue
if result['describe']['project'] == project:
if args.brief:
print(result['id'])
elif args.verbose:
print_ls_l_desc(result['describe'], include_project=False)
else:
print_ls_desc(result['describe'], print_id=True if name_counts[result['describe']['name']] > 1 else False)
def mkdir(args):
had_error = False
for path in args.paths:
# Resolve the path and add it to the list
try:
project, folderpath, _none = resolve_path(path, expected='folder')
except ResolutionError as details:
print(fill('Could not resolve "' + path + '": ' + str(details)))
had_error = True
continue
if project is None:
print(fill('Could not resolve the project of "' + path + '"'))
try:
dxpy.api.project_new_folder(project, {"folder": folderpath, "parents": args.parents})
except Exception as details:
print("Error while creating " + folderpath + " in " + project)
print(" " + str(details))
had_error = True
if had_error:
parser.exit(1)
def rmdir(args):
had_error = False
for path in args.paths:
try:
project, folderpath, _none = resolve_path(path, expected='folder')
except ResolutionError as details:
print(fill('Could not resolve "' + path + '": ' + str(details)))
had_error = True
continue
if project is None:
print(fill('Could not resolve the project of "' + path + '"'))
try:
dxpy.api.project_remove_folder(project, {"folder": folderpath})
except Exception as details:
print("Error while removing " + folderpath + " in " + project)
print(" " + str(details))
had_error = True
if had_error:
parser.exit(1)
def rm(args):
had_error = False
projects = {}
for path in args.paths:
# Resolve the path and add it to the list
try:
project, folderpath, entity_results = resolve_existing_path(path, allow_mult=True, all_mult=args.all)
except Exception as details:
print(fill('Could not resolve "' + path + '": ' + str(details)))
had_error = True
continue
if project is None:
had_error = True
print(fill('Could not resolve "' + path + '" to a project'))
continue
if project not in projects:
projects[project] = {"folders": [], "objects": []}
if entity_results is None:
if folderpath is not None:
if not args.recursive:
print(fill(u'Did not find "' + path + '" as a data object; if it is a folder, cannot remove it without setting the "-r" flag'))
had_error = True
continue
else:
projects[project]['folders'].append(folderpath)
else:
print(fill('Path ' + path + ' resolved to a project; cannot remove a project using "rm"'))
had_error = True
continue
else:
projects[project]['objects'] += [result['id'] for result in entity_results]
for project in projects:
for folder in projects[project]['folders']:
try:
# set force as true so the underlying API requests are idempotent
dxpy.api.project_remove_folder(project,
{"folder": folder, "recurse": True, "force": True},
always_retry=True)
except Exception as details:
print("Error while removing " + folder + " from " + project)
print(" " + str(details))
had_error = True
try:
# set force as true so the underlying API requests are idempotent
dxpy.api.project_remove_objects(project,
{"objects": projects[project]['objects'], "force": True},
always_retry=True)
except Exception as details:
print("Error while removing " + json.dumps(projects[project]['objects']) + " from " + project)
print(" " + str(details))
had_error = True
if had_error:
# TODO: 'dx rm' and related commands should separate out user error exceptions and internal code exceptions
parser.exit(1)
def rmproject(args):
had_error = False
for project in args.projects:
# Be forgiving if they offer an extraneous colon
substrings = split_unescaped(':', project)
if len(substrings) > 1 or (len(substrings) == 1 and project[0] == ':'):
print(fill('Unable to remove "' + project + '": a nonempty string was found to the right of an unescaped colon'))
had_error = True
continue
if len(substrings) == 0:
if project[0] == ':':
print(fill('Unable to remove ":": to remove the current project, use its name or ID'))
had_error = True
continue
proj_id = try_call(resolve_container_id_or_name, substrings[0])
if proj_id is None:
print(fill('Unable to remove "' + project + '": could not resolve to a project ID'))
had_error = True
continue
try:
proj_desc = dxpy.api.project_describe(proj_id)
if args.confirm:
value = input(fill('About to delete project "' + proj_desc['name'] + '" (' + proj_id + ')') + '\nPlease confirm [y/n]: ')
if len(value) == 0 or value.lower()[0] != 'y':
had_error = True
print(fill('Aborting deletion of project "' + proj_desc['name'] + '"'))
continue
try:
dxpy.api.project_destroy(proj_id, {"terminateJobs": not args.confirm})
except dxpy.DXAPIError as apierror:
if apierror.name == 'InvalidState':
value = input(fill('WARNING: there are still unfinished jobs in the project.') + '\nTerminate all jobs and delete the project? [y/n]: ')
if len(value) == 0 or value.lower()[0] != 'y':
had_error = True
print(fill('Aborting deletion of project "' + proj_desc['name'] + '"'))
continue
dxpy.api.project_destroy(proj_id, {"terminateJobs": True})
else:
raise apierror
if not args.quiet:
print(fill('Successfully deleted project "' + proj_desc['name'] + '"'))
except EOFError:
print('')
parser.exit(1)
except KeyboardInterrupt:
print('')
parser.exit(1)
except Exception as details:
print(fill('Was unable to remove ' + project + ', ' + str(details)))
had_error = True
if had_error:
parser.exit(1)
# ONLY for within the SAME project. Will exit fatally otherwise.
def mv(args):
dest_proj, dest_path, _none = try_call(resolve_path, args.destination, expected='folder')
try:
if dest_path is None:
raise ValueError()
dx_dest = dxpy.get_handler(dest_proj)
dx_dest.list_folder(folder=dest_path, only='folders')
except:
if dest_path is None:
parser.exit(1, 'Cannot move to a hash ID\n')
# Destination folder path is new => renaming
if len(args.sources) != 1:
# Can't rename more than one object
parser.exit(1, 'The destination folder does not exist\n')
last_slash_pos = get_last_pos_of_char('/', dest_path)
if last_slash_pos == 0:
dest_folder = '/'
else:
dest_folder = dest_path[:last_slash_pos]
dest_name = dest_path[last_slash_pos + 1:].replace('\/', '/')
try:
dx_dest.list_folder(folder=dest_folder, only='folders')
except:
parser.exit(1, 'The destination folder does not exist\n')
# Either rename the data object or rename the folder
src_proj, src_path, src_results = try_call(resolve_existing_path,
args.sources[0],
allow_mult=True, all_mult=args.all)
if src_proj != dest_proj:
parser.exit(1, fill('Error: Using "mv" for moving something from one project to another is unsupported.') + '\n')
if src_results is None:
if src_path == '/':
parser.exit(1, fill('Cannot rename root folder; to rename the project, please use the "dx rename" subcommand.') + '\n')
try:
dxpy.api.project_rename_folder(src_proj, {"folder": src_path, "newpath": dest_path})
return
except:
err_exit()
else:
try:
if src_results[0]['describe']['folder'] != dest_folder:
dxpy.api.project_move(src_proj,
{"objects": [result['id'] for result in src_results],
"destination": dest_folder})
for result in src_results:
dxpy.DXHTTPRequest('/' + result['id'] + '/rename',
{"project": src_proj,
"name": dest_name})
return
except:
err_exit()
if len(args.sources) == 0:
parser.exit(1, 'No sources provided to move\n')
src_objects = []
src_folders = []
for source in args.sources:
src_proj, src_folderpath, src_results = try_call(resolve_existing_path,
source,
allow_mult=True, all_mult=args.all)
if src_proj != dest_proj:
parser.exit(1, fill('Using "mv" for moving something from one project to another is unsupported. Please use "cp" and "rm" instead.') + '\n')
if src_results is None:
src_folders.append(src_folderpath)
else:
src_objects += [result['id'] for result in src_results]
try:
dxpy.api.project_move(src_proj,
{"objects": src_objects,
"folders": src_folders,
"destination": dest_path})
except:
err_exit()
def tree(args):
project, folderpath, _none = try_call(resolve_existing_path, args.path,
expected='folder')
if project is None:
parser.exit(1, fill('Current project must be set or specified before any data can be listed') + '\n')
dxproj = dxpy.get_handler(project)
tree = collections.OrderedDict()
try:
folders = [folder for folder in dxproj.describe(input_params={"folders": True})['folders']
if folder.startswith((folderpath + '/') if folderpath != '/' else '/')]
folders = [ folder[len(folderpath):] for folder in folders ]
for folder in folders:
subtree = tree
for path_element in folder.split("/"):
if path_element == "":
continue
path_element_desc = BOLD() + BLUE() + path_element + ENDC()
subtree.setdefault(path_element_desc, collections.OrderedDict())
subtree = subtree[path_element_desc]
for item in sorted(dxpy.find_data_objects(project=project, folder=folderpath,
recurse=True, describe=True),
key=cmp_names):
subtree = tree
for path_element in item['describe']['folder'][len(folderpath):].split("/"):
if path_element == "":
continue
path_element_desc = BOLD() + BLUE() + path_element + ENDC()
subtree = subtree[path_element_desc]
if args.long:
item_desc = get_ls_l_desc(item['describe'])
else:
item_desc = item['describe']['name']
if item['describe']['class'] in ['applet', 'workflow']:
item_desc = BOLD() + GREEN() + item_desc + ENDC()
subtree[item_desc] = None
print(format_tree(tree, root=(BOLD() + BLUE() + args.path + ENDC())))
except:
err_exit()
def describe(args):
try:
if len(args.path) == 0:
raise DXCLIError('Must provide a nonempty string to be described')
# Attempt to resolve name
# First, if it looks like a hash id, do that.
json_input = {}
json_input["properties"] = True
if args.name and (args.verbose or args.details or args.json):
raise DXCLIError('Cannot request --name in addition to one of --verbose, --details, or --json')
# Always retrieve details too (just maybe don't render them)
json_input["details"] = True
if is_data_obj_id(args.path):
# Should prefer the current project's version if possible
if dxpy.WORKSPACE_ID is not None:
try:
# But only put it in the JSON if you still have
# access.
dxpy.api.project_list_folder(dxpy.WORKSPACE_ID)
json_input['project'] = dxpy.WORKSPACE_ID
except dxpy.DXAPIError as details:
if details.code != requests.codes.not_found:
raise
# Otherwise, attempt to look for it as a data object or
# execution
try:
project, _folderpath, entity_results = resolve_existing_path(args.path,
expected='entity',
ask_to_resolve=False,
describe=json_input)
except ResolutionError as details:
# PermissionDenied or InvalidAuthentication
if str(details).endswith('code 401'):
# Surface permissions-related errors here (for data
# objects, jobs, and analyses). Other types of errors
# may be recoverable below.
#
# TODO: better way of obtaining the response code when
# the exception corresponds to an API error
raise DXCLIError(str(details))
project, entity_results = None, None
found_match = False
json_output = []
get_result_str = ResultCounter()
# Could be a project
json_input = {}
json_input['countObjects'] = True
json_input['properties'] = True
if args.verbose:
json_input["permissions"] = True
json_input['appCaches'] = True
if entity_results is None:
if args.path[-1] == ':' and project is not None:
# It is the project.
try:
desc = dxpy.api.project_describe(project, json_input)
found_match = True
if args.json:
json_output.append(desc)
elif args.name:
print(desc['name'])
else:
print(get_result_str())
print_desc(desc, args.verbose)
except dxpy.DXAPIError as details:
if details.code != requests.codes.not_found:
raise
elif is_container_id(args.path):
try:
desc = dxpy.api.project_describe(args.path, json_input)
found_match = True
if args.json:
json_output.append(desc)
elif args.name:
print(desc['name'])
else:
print(get_result_str())
print_desc(desc, args.verbose)
except dxpy.DXAPIError as details:
if details.code != requests.codes.not_found:
raise
# Found data object or is an id
if entity_results is not None:
if len(entity_results) > 0:
found_match = True
for result in entity_results:
if args.json:
json_output.append(result['describe'])
elif args.name:
print(result['describe']['name'])
else:
print(get_result_str())
print_desc(result['describe'], args.verbose or args.details)
if not is_hashid(args.path) and ':' not in args.path:
# Could be an app name
if args.path.startswith('app-'):
try:
desc = dxpy.api.app_describe(args.path)
if args.json:
json_output.append(desc)
elif args.name:
print(desc['name'])
else:
print(get_result_str())
print_desc(desc, args.verbose)
found_match = True
except dxpy.DXAPIError as details:
if details.code != requests.codes.not_found:
raise
else:
for result in dxpy.find_apps(name=args.path, describe=True):
if args.json:
json_output.append(result['describe'])
elif args.name:
print(result['describe']['name'])
else:
print(get_result_str())
print_desc(result['describe'], args.verbose)
found_match = True
if args.path.startswith('user-'):
# User
try:
desc = dxpy.api.user_describe(args.path, {"appsInstalled": True, "subscriptions": True})
found_match = True
if args.json:
json_output.append(desc)
elif args.name:
print(str(desc['first']) + ' ' + str(desc['last']))
else:
print(get_result_str())
print_desc(desc, args.verbose)
except dxpy.DXAPIError as details:
if details.code != requests.codes.not_found:
raise
elif args.path.startswith('org-') or args.path.startswith('team-'):
# Org or team
try:
desc = dxpy.DXHTTPRequest('/' + args.path + '/describe', {})
found_match = True
if args.json:
json_output.append(desc)
elif args.name:
print(desc['id'])
else:
print(get_result_str())
print_desc(desc, args.verbose)
except dxpy.DXAPIError as details:
if details.code != requests.codes.not_found:
raise
if args.json:
if args.multi:
print(json.dumps(json_output, indent=4))
elif len(json_output) > 1:
raise DXCLIError('More than one match found for ' + args.path + '; to get all of them in JSON format, also provide the --multi flag.')
elif len(json_output) == 0:
raise DXCLIError('No match found for ' + args.path)
else:
print(json.dumps(json_output[0], indent=4))
elif not found_match:
raise DXCLIError("No matches found for " + args.path)
except:
err_exit()
def _validate_new_user_input(args):
# TODO: Support interactive specification of `args.username`.
# TODO: Support interactive specification of `args.email`.
if args.org is None and len(DXNewUserOrgArgsAction.user_specified_opts) > 0:
raise DXCLIError("Cannot specify {opts} without specifying --org".format(
opts=DXNewUserOrgArgsAction.user_specified_opts
))
def _get_user_new_args(args):
"""
PRECONDITION: `_validate_new_user_input()` has been called on `args`.
"""
user_new_args = {"username": args.username,
"email": args.email}
if args.first is not None:
user_new_args["first"] = args.first
if args.last is not None:
user_new_args["last"] = args.last
if args.middle is not None:
user_new_args["middle"] = args.middle
if args.token_duration is not None:
user_new_args["tokenDuration"] = args.token_duration
if args.occupation is not None:
user_new_args["occupation"] = args.occupation
if args.set_bill_to is True:
user_new_args["billTo"] = args.org
return user_new_args
def new_user(args):
_validate_new_user_input(args)
# Create user account.
#
# We prevent retries here because authserver is closing the server-side
# connection in certain situations. We cannot simply set `always_retry` to
# False here because we receive a 504 error code from the server.
# TODO: Allow retries when authserver issue is resolved.
dxpy.DXHTTPRequest(dxpy.get_auth_server_name() + "/user/new",
_get_user_new_args(args),
prepend_srv=False,
max_retries=0)
if args.org is not None:
# Invite new user to org.
dxpy.api.org_invite(args.org, get_org_invite_args(args))
if args.brief:
print("user-" + args.username)
else:
print(fill("Created new user account (user-{u})".format(
u=args.username
)))
def new_project(args):
if args.name == None:
if INTERACTIVE_CLI:
args.name = input("Enter name for new project: ")
else:
parser.exit(1, parser_new_project.format_help() +
fill("No project name supplied, and input is not interactive") + '\n')
try:
resp = dxpy.api.project_new({"name": args.name})
if args.brief:
print(resp['id'])
else:
print(fill('Created new project called "' + args.name + '" (' + resp['id'] + ')'))
if args.select or (INTERACTIVE_CLI and prompt_for_yn("Switch to new project now?", default=False)):
set_project(resp['id'], write=True, name=args.name)
set_wd('/', write=True)
except:
err_exit()
def new_record(args):
try_call(process_dataobject_args, args)
try_call(process_single_dataobject_output_args, args)
init_from = None
if args.init is not None:
init_project, _init_folder, init_result = try_call(resolve_existing_path,
args.init,
expected='entity')
init_from = dxpy.DXRecord(dxid=init_result['id'], project=init_project)
if args.output is None:
project = dxpy.WORKSPACE_ID
folder = dxpy.config.get('DX_CLI_WD', u'/')
name = None
else:
project, folder, name = resolve_path(args.output)
dxrecord = None
try:
dxrecord = dxpy.new_dxrecord(project=project, name=name,
tags=args.tags, types=args.types,
hidden=args.hidden, properties=args.properties,
details=args.details,
folder=folder,
close=args.close,
parents=args.parents, init_from=init_from)
if args.brief:
print(dxrecord.get_id())
else:
print_desc(dxrecord.describe(incl_properties=True, incl_details=True), args.verbose)
except:
err_exit()
def new_gtable(args):
try_call(process_dataobject_args, args)
try_call(process_single_dataobject_output_args, args)
if args.output is None:
project = dxpy.WORKSPACE_ID
folder = dxpy.config.get('DX_CLI_WD', u'/')
name = None
else:
project, folder, name = resolve_path(args.output)
args.columns = split_unescaped(',', args.columns)
for i in range(len(args.columns)):
if ':' in args.columns[i]:
try:
col_name, col_type = args.columns[i].split(':')
except ValueError:
parser.exit(1, 'Too many colons found in column spec ' + args.columns[i] + '\n')
if col_type.startswith('bool'):
col_type = 'boolean'
else:
col_name = args.columns[i]
col_type = 'string'
args.columns[i] = {'name': col_name, 'type': col_type}
args.indices = [] if args.indices is None else json.loads(args.indices)
if args.gri is not None:
args.indices.append(dxpy.DXGTable.genomic_range_index(args.gri[0], args.gri[1], args.gri[2]))
args.types = ['gri'] if args.types is None else args.types + ['gri']
try:
dxgtable = dxpy.new_dxgtable(project=project, name=name,
tags=args.tags, types=args.types,
hidden=args.hidden, properties=args.properties,
details=args.details,
folder=folder,
parents=args.parents,
columns=args.columns,
indices=args.indices)
if args.brief:
print(dxgtable.get_id())
else:
print_desc(dxgtable.describe(incl_properties=True, incl_details=True))
except:
err_exit()
def set_visibility(args):
had_error = False
# Attempt to resolve name
_project, _folderpath, entity_results = try_call(resolve_existing_path,
args.path,
expected='entity',
allow_mult=True, all_mult=args.all)
if entity_results is None:
parser.exit(1, fill('Could not resolve "' + args.path + '" to a name or ID') + '\n')
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/setVisibility',
{"hidden": (args.visibility == 'hidden')})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
def get_details(args):
# Attempt to resolve name
_project, _folderpath, entity_result = try_call(resolve_existing_path,
args.path, expected='entity')
if entity_result is None:
parser.exit(1, fill('Could not resolve "' + args.path + '" to a name or ID') + '\n')
try:
print(json.dumps(dxpy.DXHTTPRequest('/' + entity_result['id'] + '/getDetails', {}), indent=4))
except:
err_exit()
def set_details(args):
had_error = False
# Attempt to resolve name
_project, _folderpath, entity_results = try_call(resolve_existing_path,
args.path, expected='entity',
allow_mult=True, all_mult=args.all)
if entity_results is None:
err_exit(exception=ResolutionError('Could not resolve "' + args.path + '" to a name or ID'),
expected_exceptions=(ResolutionError,))
# Throw error if both -f/--details-file and details supplied.
if args.details is not None and args.details_file is not None:
err_exit(exception=DXParserError('Cannot provide both -f/--details-file and details'),
expected_exceptions=(DXParserError,))
elif args.details is not None:
try:
details = json.loads(args.details)
except ValueError as e:
err_exit('Error: Details could not be parsed as JSON', expected_exceptions=(ValueError,), exception=e)
elif args.details_file is not None:
with (sys.stdin if args.details_file == '-' else open(args.details_file, 'r')) as fd:
data = fd.read()
try:
details = json.loads(data)
except ValueError as e:
err_exit('Error: File contents could not be parsed as JSON', expected_exceptions=(ValueError,),
exception=e)
# Throw error if missing arguments.
else:
err_exit(exception=DXParserError('Must set one of -f/--details-file or details'),
expected_exceptions=(DXParserError,))
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/setDetails', details)
except (dxpy.DXAPIError,) + network_exceptions as exc_details:
print(format_exception(exc_details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
def add_types(args):
had_error = False
# Attempt to resolve name
_project, _folderpath, entity_results = try_call(resolve_existing_path,
args.path,
expected='entity',
allow_mult=True, all_mult=args.all)
if entity_results is None:
parser.exit(1, fill('Could not resolve "' + args.path + '" to a name or ID') + '\n')
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/addTypes',
{"types": args.types})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
def remove_types(args):
had_error = False
# Attempt to resolve name
_project, _folderpath, entity_results = try_call(resolve_existing_path,
args.path,
expected='entity',
allow_mult=True, all_mult=args.all)
if entity_results is None:
parser.exit(1, fill('Could not resolve "' + args.path + '" to a name or ID') + '\n')
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/removeTypes',
{"types": args.types})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
def add_tags(args):
had_error = False
# Attempt to resolve name
project, _folderpath, entity_results = try_call(resolve_to_objects_or_project,
args.path,
args.all)
if entity_results is not None:
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/addTags',
{"project": project,
"tags": args.tags})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
elif not project.startswith('project-'):
parser.exit(1, 'Cannot add tags to a non-project data container\n')
else:
try:
dxpy.DXHTTPRequest('/' + project + '/addTags',
{"tags": args.tags})
except:
err_exit()
def remove_tags(args):
had_error = False
# Attempt to resolve name
project, _folderpath, entity_results = try_call(resolve_to_objects_or_project,
args.path,
args.all)
if entity_results is not None:
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/removeTags',
{"project": project,
"tags": args.tags})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
elif not project.startswith('project-'):
parser.exit(1, 'Cannot remove tags from a non-project data container\n')
else:
try:
dxpy.DXHTTPRequest('/' + project + '/removeTags',
{"tags": args.tags})
except:
err_exit()
def rename(args):
had_error = False
# Attempt to resolve name
project, _folderpath, entity_results = try_call(resolve_to_objects_or_project,
args.path,
args.all)
if entity_results is not None:
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/rename',
{"project": project,
"name": args.name})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
elif not project.startswith('project-'):
parser.exit(1, 'Cannot rename a non-project data container\n')
else:
try:
dxpy.api.project_update(project, {"name": args.name})
except:
err_exit()
def set_properties(args):
had_error = False
# Attempt to resolve name
project, _folderpath, entity_results = try_call(resolve_to_objects_or_project,
args.path,
args.all)
try_call(process_properties_args, args)
if entity_results is not None:
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/setProperties',
{"project": project,
"properties": args.properties})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
elif not project.startswith('project-'):
parser.exit(1, 'Cannot set properties on a non-project data container\n')
else:
try:
dxpy.api.project_set_properties(project, {"properties": args.properties})
except:
err_exit()
def unset_properties(args):
had_error = False
# Attempt to resolve name
project, _folderpath, entity_results = try_call(resolve_to_objects_or_project,
args.path,
args.all)
properties = {}
for prop in args.properties:
properties[prop] = None
if entity_results is not None:
for result in entity_results:
try:
dxpy.DXHTTPRequest('/' + result['id'] + '/setProperties',
{"project": project,
"properties": properties})
except (dxpy.DXAPIError,) + network_exceptions as details:
print(format_exception(details), file=sys.stderr)
had_error = True
if had_error:
parser.exit(1)
elif not project.startswith('project-'):
parser.exit(1, 'Cannot unset properties on a non-project data container\n')
else:
try:
dxpy.api.project_set_properties(project, {"properties": properties})
except:
err_exit()
def make_download_url(args):
project, _folderpath, entity_result = try_call(resolve_existing_path, args.path, expected='entity')
if entity_result is None:
parser.exit(1, fill('Could not resolve ' + args.path + ' to a data object') + '\n')
if entity_result['describe']['class'] != 'file':
parser.exit(1, fill('Error: dx download is only for downloading file objects') + '\n')
if args.filename is None:
args.filename = entity_result['describe']['name']
try:
dxfile = dxpy.DXFile(entity_result['id'], project=project)
url, _headers = dxfile.get_download_url(preauthenticated=True,
duration=normalize_timedelta(args.duration)/1000 if args.duration else 24*3600,
filename=args.filename,
project=project)
print(url)
except:
err_exit()
def get(args):
# Attempt to resolve name
project, _folderpath, entity_result = try_call(resolve_existing_path,
args.path, expected='entity')
if entity_result is None:
parser.exit(3, fill('Could not resolve ' + args.path + ' to a data object') + '\n')
if entity_result['describe']['class'] == 'file':
download_one_file(project, entity_result['describe'], entity_result['describe']['name'], args)
return
if entity_result['describe']['class'] not in ['record', 'applet']:
parser.exit(3, 'Error: The given object is of class ' + entity_result['describe']['class'] + ' but an object of class file, record, or applet was expected\n')
fd = None
if entity_result['describe']['class'] == 'applet':
if args.output == '-':
parser.exit(3, 'Error: An applet cannot be dumped to stdout, please specify a directory\n')
output_base = args.output or '.'
applet_name = entity_result['describe']['name'].replace('/', '%2F')
if os.path.isdir(output_base):
output_path = os.path.join(output_base, applet_name)
else:
output_path = output_base
if os.path.isfile(output_path):
if not args.overwrite:
parser.exit(3, fill('Error: path "' + output_path + '" already exists but -f/--overwrite was not set') + '\n')
os.unlink(output_path)
# Here, output_path either points to a directory or a nonexistent path
if not os.path.exists(output_path):
os.mkdir(output_path)
# Here, output_path points to a directory
if len(os.listdir(output_path)):
# For safety, refuse to remove an existing non-empty
# directory automatically.
parser.exit(3, fill('Error: path "' + output_path + '" is an existing directory. Please remove it and try again.') + '\n')
# Now output_path points to a empty directory, so we're ready to
# go.
elif args.output == '-':
fd = sys.stdout
else:
filename = args.output
if filename is None:
filename = entity_result['describe']['name'].replace('/', '%2F')
if args.output is None and not args.no_ext:
if entity_result['describe']['class'] == 'record':
filename += '.json'
if not args.overwrite and os.path.exists(filename):
parser.exit(1, fill('Error: path "' + filename + '" already exists but -f/--overwrite was not set') + '\n')
try:
fd = open(filename, 'w')
except:
err_exit('Error opening destination file ' + filename)
if entity_result['describe']['class'] == 'record':
try:
details = dxpy.DXHTTPRequest('/' + entity_result['id'] + '/getDetails',
{})
except:
err_exit()
fd.write(json.dumps(details, indent=4))
elif entity_result['describe']['class'] == 'applet':
from dxpy.utils.app_unbuilder import dump_applet
dump_applet(dxpy.DXApplet(entity_result['id'], project=project), output_path)
if fd is not None and args.output != '-':
fd.close()
def cat(args):
for path in args.path:
project, _folderpath, entity_result = try_call(resolve_existing_path, path)
if entity_result is None:
parser.exit(1, fill('Could not resolve ' + path + ' to a data object') + '\n')
if entity_result['describe']['class'] != 'file':
parser.exit(1, fill('Error: expected a file object') + '\n')
try:
dxfile = dxpy.DXFile(entity_result['id'], project=project)
while True:
chunk = dxfile.read(1024*1024)
if len(chunk) == 0:
break
sys.stdout.buffer.write(chunk)
except:
err_exit()
def download_or_cat(args):
if args.output == '-':
cat(parser.parse_args(['cat'] + args.paths))
return
download(args)
def head(args):
# Attempt to resolve name
project, _folderpath, entity_result = try_call(resolve_existing_path,
args.path, expected='entity')
if entity_result is None:
parser.exit(1, fill('Could not resolve ' + args.path + ' to a data object') + '\n')
if not entity_result['describe']['class'] in ['gtable', 'file']:
parser.exit(1, 'Error: The given object is of class ' + entity_result['describe']['class'] + ' but an object of class gtable or file was expected\n')
handler = dxpy.get_handler(entity_result['id'], project=project)
counter = 0
if args.lines > 0:
try:
if handler._class == 'file':
handler._read_bufsize = 1024*32
for line in handler:
print(line)
counter += 1
if counter == args.lines:
break
else:
if args.gri is not None:
try:
lo = int(args.gri[1])
hi = int(args.gri[2])
except:
parser.exit(1, fill('Error: the LO and HI arguments to --gri must be integers') + '\n')
gri_query = dxpy.DXGTable.genomic_range_query(args.gri[0],
lo,
hi,
args.gri_mode,
args.gri_name)
table_text, table_rows, table_cols = format_table(list(handler.iterate_query_rows(query=gri_query, limit=args.lines)),
column_specs = entity_result['describe']['columns'],
report_dimensions=True,
max_col_width=args.max_col_width)
else:
table_text, table_rows, table_cols = format_table(list(handler.iterate_rows(start=args.starting,
end=args.starting + args.lines)),
column_specs = entity_result['describe']['columns'],
report_dimensions=True,
max_col_width=args.max_col_width)
more_rows = entity_result['describe']['length'] - args.starting - args.lines
if more_rows > 0:
table_text += "\n{nrows} more rows".format(nrows=more_rows)
if sys.stdout.isatty():
if tty_rows <= table_rows or tty_cols <= table_cols:
try:
pipe = os.popen('less -RS', 'w')
pipe.write(table_text.encode(sys_encoding) if USING_PYTHON2 else table_text)
pipe.close()
return
except:
pass
sys.stdout.write(table_text + '\n')
except StopIteration:
pass
except:
err_exit()
def upload(args, **kwargs):
if args.output is not None and args.path is not None:
raise DXParserError('Error: Cannot provide both the -o/--output and --path/--destination arguments')
elif args.path is None:
args.path = args.output
if len(args.filename) > 1 and args.path is not None and not args.path.endswith("/"):
# When called as "dx upload x --dest /y", we upload to "/y"; with --dest "/y/", we upload to "/y/x".
# Called as "dx upload x y --dest /z", z is implicitly a folder, so append a slash to avoid incorrect path
# resolution.
args.path += "/"
paths = copy.copy(args.filename)
for path in paths:
args.filename = path
upload_one(args, **kwargs)
upload_seen_paths = set()
def upload_one(args):
try_call(process_dataobject_args, args)
args.show_progress = args.show_progress and not args.brief
if args.path is None:
project = dxpy.WORKSPACE_ID
folder = dxpy.config.get('DX_CLI_WD', u'/')
name = None if args.filename == '-' else os.path.basename(args.filename)
else:
project, folder, name = resolve_path(args.path)
if name is None and args.filename != '-':
name = os.path.basename(args.filename)
if os.path.isdir(args.filename):
if not args.recursive:
parser.exit("Error: {f} is a directory but the -r/--recursive option was not given".format(f=args.filename))
norm_path = os.path.realpath(args.filename)
if norm_path in upload_seen_paths:
print("Skipping {f}: directory loop".format(f=args.filename), file=sys.stderr)
return
else:
upload_seen_paths.add(norm_path)
dir_listing = os.listdir(args.filename)
if len(dir_listing) == 0: # Create empty folder
dxpy.api.project_new_folder(project, {"folder": os.path.join(folder, os.path.basename(args.filename)),
"parents": True})
else:
for f in dir_listing:
sub_args = copy.copy(args)
sub_args.mute = True
sub_args.filename = os.path.join(args.filename, f)
sub_args.path = u"{p}:{f}/{sf}/".format(p=project, f=folder, sf=os.path.basename(args.filename))
sub_args.parents = True
upload_one(sub_args)
else:
try:
dxfile = dxpy.upload_local_file(filename=(None if args.filename == '-' else args.filename),
file=(sys.stdin.buffer if args.filename == '-' else None),
name=name,
tags=args.tags,
types=args.types,
hidden=args.hidden,
project=project,
properties=args.properties,
details=args.details,
folder=folder,
parents=args.parents,
show_progress=args.show_progress)
if args.wait:
dxfile._wait_on_close()
if args.brief:
print(dxfile.get_id())
elif not args.mute:
print_desc(dxfile.describe(incl_properties=True, incl_details=True))
except:
err_exit()
def import_csv(args):
sys.argv = [sys.argv[0] + ' import csv'] + args.importer_args
from dxpy.scripts import dx_csv_to_gtable
dx_csv_to_gtable.main()
def import_tsv(args):
sys.argv = [sys.argv[0] + ' import tsv'] + args.importer_args
from dxpy.scripts import dx_tsv_to_gtable
dx_tsv_to_gtable.main()
importers = {
"tsv": import_tsv,
"csv": import_csv
}
def dximport(args):
if args.format.lower() not in importers:
parser.exit(1, fill('Unsupported format: "' + args.format + '". For a list of supported formats, run "dx help import"') + '\n')
importers[args.format.lower()](args)
def export_fastq(args):
sys.argv = [sys.argv[0] + ' export fastq'] + args.exporter_args
from dxpy.scripts import dx_reads_to_fastq
dx_reads_to_fastq.main()
def export_sam(args):
sys.argv = [sys.argv[0] + ' export sam'] + args.exporter_args
from dxpy.scripts import dx_mappings_to_sam
dx_mappings_to_sam.main()
def export_csv(args):
sys.argv = [sys.argv[0] + ' export csv'] + args.exporter_args
from dxpy.scripts import dx_gtable_to_csv
dx_gtable_to_csv.main()
def export_tsv(args):
sys.argv = [sys.argv[0] + ' export tsv'] + args.exporter_args
from dxpy.scripts import dx_gtable_to_tsv
dx_gtable_to_tsv.main()
def export_vcf(args):
sys.argv = [sys.argv[0] + ' export vcf'] + args.exporter_args
from dxpy.scripts import dx_variants_to_vcf
dx_variants_to_vcf.main()
exporters = {
"tsv": export_tsv,
"csv": export_csv,
"fastq": export_fastq,
"sam": export_sam,
"vcf": export_vcf,
}
def export(args):
if args.format.lower() not in exporters:
parser.exit(1, fill('Unsupported format: "' + args.format + '". For a list of supported formats, run "dx help export"') + '\n')
exporters[args.format.lower()](args)
def find_executions(args):
try_call(process_find_by_property_args, args)
if not (args.origin_jobs or args.all_jobs):
args.trees = True
if args.origin_jobs and args.parent is not None and args.parent != 'none':
return
project = dxpy.WORKSPACE_ID
origin = None
more_results = False
include_io = (args.verbose and args.json) or args.show_outputs
id_desc = None
# Now start parsing flags
if args.id is not None:
id_desc = try_call(dxpy.api.job_describe, args.id, {"io": False})
origin = id_desc.get('originJob', None)
if args.origin_jobs and args.id != origin:
return
if args.origin is not None and origin != args.origin:
return
project = None
args.user = None
else:
origin = args.origin
if args.project is not None:
if get_last_pos_of_char(':', args.project) == -1:
args.project = args.project + ':'
project, _none, _none = try_call(resolve_existing_path,
args.project, 'project')
if args.user is not None and args.user != 'self' and not args.user.startswith('user-'):
args.user = 'user-' + args.user.lower()
if args.all_projects:
project = None
query = {'classname': args.classname,
'launched_by': args.user,
'executable': args.executable,
'project': project,
'state': args.state,
'origin_job': origin,
'parent_job': "none" if args.origin_jobs else args.parent,
'describe': {"io": include_io},
'created_after': args.created_after,
'created_before': args.created_before,
'name': args.name,
'name_mode': 'glob',
'tags': args.tag,
'properties': args.properties,
'include_subjobs': False if args.no_subjobs else True,
'root_execution': args.root_execution}
if args.num_results < 1000 and not args.trees:
query['limit'] = args.num_results + 1
json_output = [] # for args.json
def build_tree(root, executions_by_parent, execution_descriptions, is_cached_result=False):
tree, root_string = {}, ''
if args.json:
json_output.append(execution_descriptions[root])
elif args.brief:
print(root)
else:
root_string = get_find_executions_string(execution_descriptions[root],
has_children=root in executions_by_parent,
show_outputs=args.show_outputs,
is_cached_result=is_cached_result)
tree[root_string] = collections.OrderedDict()
for child_execution in executions_by_parent.get(root, {}):
child_is_cached_result = is_cached_result or \
(root.startswith('analysis-') and \
execution_descriptions[child_execution].get('parentAnalysis') != root)
subtree, _subtree_root = build_tree(child_execution,
executions_by_parent,
execution_descriptions,
is_cached_result=child_is_cached_result)
if tree:
tree[root_string].update(subtree)
return tree, root_string
def process_tree(result, executions_by_parent, execution_descriptions):
tree, root = build_tree(result['id'], executions_by_parent, execution_descriptions)
if tree:
print(format_tree(tree[root], root))
try:
num_processed_results = 0
roots = collections.OrderedDict()
for execution_result in dxpy.find_executions(**query):
if args.trees:
if args.classname == 'job':
root = execution_result['describe']['originJob']
else:
root = execution_result['describe']['rootExecution']
if root not in roots:
num_processed_results += 1
else:
num_processed_results += 1
if (num_processed_results > args.num_results):
more_results = True
break
if args.json:
json_output.append(execution_result['describe'])
elif args.trees:
roots[root] = root
if args.classname == 'analysis' and root.startswith('job-'):
# Analyses in trees with jobs at their root found in "dx find analyses" are displayed unrooted,
# and only the last analysis found is displayed.
roots[root] = execution_result['describe']['id']
elif args.brief:
print(execution_result['id'])
elif not args.trees:
print(format_tree({}, get_find_executions_string(execution_result['describe'],
has_children=False,
single_result=True,
show_outputs=args.show_outputs)))
if args.trees:
executions_by_parent, descriptions = collections.defaultdict(list), {}
root_field = 'origin_job' if args.classname == 'job' else 'root_execution'
parent_field = 'masterJob' if args.no_subjobs else 'parentJob'
query = {'classname': args.classname,
'describe': {"io": include_io},
'include_subjobs': False if args.no_subjobs else True,
root_field: list(roots.keys())}
if not args.all_projects:
# If the query doesn't specify a project, the server finds all projects to which the user has explicit
# permissions, but doesn't search through public projects.
# In "all projects" mode, we don't specify a project in the initial query, and so don't need to specify
# one in the follow-up query here (because the initial query can't return any jobs in projects to which
# the user doesn't have explicit permissions).
# When searching in a specific project, we set a project in the query here, in case this is a public
# project and the user doesn't have explicit permissions (otherwise, the follow-up query would return
# empty results).
query['project'] = project
def process_execution_result(execution_result):
execution_desc = execution_result['describe']
parent = execution_desc.get(parent_field) or execution_desc.get('parentAnalysis')
descriptions[execution_result['id']] = execution_desc
if parent:
executions_by_parent[parent].append(execution_result['id'])
# If an analysis with cached children, also insert those
if execution_desc['class'] == 'analysis':
for stage_desc in execution_desc['stages']:
if stage_desc['execution']['parentAnalysis'] != execution_result['id'] and \
(args.classname != 'analysis' or stage_desc['execution']['class'] == 'analysis'):
# this is a cached stage (with a different parent)
executions_by_parent[execution_result['id']].append(stage_desc['execution']['id'])
if stage_desc['execution']['id'] not in descriptions:
descriptions[stage_desc['execution']['id']] = stage_desc['execution']
# Short-circuit the find_execution API call(s) if there are
# no root executions (and therefore we would have gotten 0
# results anyway)
if len(roots.keys()) > 0:
for execution_result in dxpy.find_executions(**query):
process_execution_result(execution_result)
# ensure roots are sorted by their creation time
sorted_roots = sorted(roots.values(), key=lambda x: -descriptions[x]['created'])
for root in sorted_roots:
process_tree(descriptions[roots[root]], executions_by_parent, descriptions)
if args.json:
print(json.dumps(json_output, indent=4))
if more_results and get_delimiter() is None and not (args.brief or args.json):
print(fill("* More results not shown; use -n to increase number of results or --created-before to show older results", subsequent_indent=' '))
except:
err_exit()
def find_data(args):
# --folder deprecated to --path.
if args.folder is None and args.path is not None:
args.folder = args.path
elif args.folder is not None and args.path is not None:
err_exit(exception=DXParserError('Cannot supply both --folder and --path.'),
expected_exceptions=(DXParserError,))
try_call(process_find_by_property_args, args)
if args.all_projects:
args.project = None
args.folder = None
args.recurse = True
elif args.project is None:
args.project = dxpy.WORKSPACE_ID
else:
if get_last_pos_of_char(':', args.project) == -1:
args.project = args.project + ':'
if args.folder is not None and get_last_pos_of_char(':', args.folder) != -1:
err_exit(exception=DXParserError('Cannot supply both --project and --path PROJECTID:FOLDERPATH.'),
expected_exceptions=(DXParserError,))
args.project, _none, _none = try_call(resolve_existing_path,
args.project, 'project')
if args.folder is not None and not args.folder.startswith('/'):
args.project, args.folder, _none = try_call(resolve_path, args.folder, expected='folder')
try:
results = dxpy.find_data_objects(classname=args.classname,
state=args.state,
visibility=args.visibility,
properties=args.properties,
name=args.name,
name_mode='glob',
typename=args.type,
tags=args.tag, link=args.link,
project=args.project,
folder=args.folder,
recurse=(args.recurse if not args.recurse else None),
modified_after=args.mod_after,
modified_before=args.mod_before,
created_after=args.created_after,
created_before=args.created_before,
describe=(not args.brief))
if args.json:
print(json.dumps(list(results), indent=4))
return
if args.brief:
for result in results:
print(result['project'] + ':' + result['id'])
else:
for result in results:
if args.verbose:
print("")
print_data_obj_desc(result["describe"])
else:
print_ls_l_desc(result["describe"], include_folder=True, include_project=args.all_projects)
except:
err_exit()
def find_projects(args):
try_call(process_find_by_property_args, args)
try:
results = dxpy.find_projects(name=args.name, name_mode='glob',
properties=args.properties, tags=args.tag,
level=('VIEW' if args.public else args.level),
describe=(not args.brief),
explicit_perms=(not args.public if not args.public else None),
public=(args.public if args.public else None),
created_after=args.created_after,
created_before=args.created_before)
if args.json:
print(json.dumps(list(results), indent=4))
return
if args.brief:
for result in results:
print(result['id'])
else:
for result in results:
print(result["id"] + DELIMITER(" : ") + result['describe']['name'] +
DELIMITER(' (') + result["level"] + DELIMITER(')'))
except:
err_exit()
def find_apps(args):
def maybe_x(result):
return DNANEXUS_X() if result['describe']['billTo'] == 'org-dnanexus' else ' '
try:
raw_results = dxpy.find_apps(name=args.name, name_mode='glob', category=args.category,
all_versions=args.all,
published=(not args.unpublished),
billed_to=args.billed_to,
created_by=args.creator,
developer=args.developer,
created_after=args.created_after,
created_before=args.created_before,
modified_after=args.mod_after,
modified_before=args.mod_before,
describe={"fields": {"name": True,
"installed": args.installed,
"title": not args.brief,
"version": not args.brief,
"published": args.verbose,
"billTo": not args.brief}})
if args.installed:
maybe_filtered_by_install = (result for result in raw_results if result['describe']['installed'])
else:
maybe_filtered_by_install = raw_results
if args.brief:
results = ({"id": result['id']} for result in maybe_filtered_by_install)
else:
results = sorted(maybe_filtered_by_install, key=lambda result: result['describe']['name'])
if args.json:
print(json.dumps(list(results), indent=4))
return
if args.brief:
for result in results:
print(result['id'])
elif not args.verbose:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER("), v") + result["describe"]["version"])
else:
for result in results:
print(maybe_x(result) + DELIMITER(" ") + result["id"] + DELIMITER(" ") + result['describe'].get('title', result['describe']['name']) + DELIMITER(' (') + result["describe"]["name"] + DELIMITER('), v') + result['describe']['version'] + DELIMITER(" (") + ("published" if result["describe"].get("published", 0) > 0 else "unpublished") + DELIMITER(")"))
except:
err_exit()
def close(args):
if '_DX_FUSE' in os.environ:
from xattr import xattr
handlers = []
had_error = False
for path in args.path:
# Attempt to resolve name
try:
project, _folderpath, entity_results = resolve_existing_path(path,
expected='entity',
allow_mult=True,
all_mult=args.all)
except:
project, entity_results = None, None
if entity_results is None:
print(fill('Could not resolve "' + path + '" to a name or ID'))
had_error = True
else:
for result in entity_results:
try:
obj = dxpy.get_handler(result['id'], project=project)
if '_DX_FUSE' in os.environ:
xattr(path)['state'] = 'closed'
else:
obj.close()
handlers.append(obj)
except Exception as details:
print(fill(str(details)))
if args.wait:
for handler in handlers:
handler._wait_on_close()
if had_error:
parser.exit(1)
def wait(args):
had_error = False
for path in args.path:
if is_job_id(path) or is_analysis_id(path):
dxexecution = dxpy.get_handler(path)
print("Waiting for " + path + " to finish running...")
try_call(dxexecution.wait_on_done)
print("Done")
else:
# Attempt to resolve name
try:
project, _folderpath, entity_result = resolve_existing_path(path, expected='entity')
except:
project, entity_result = None, None
if entity_result is None:
print(fill('Could not resolve ' + path + ' to a data object'))
had_error = True
else:
handler = dxpy.get_handler(entity_result['id'], project=project)
print("Waiting for " + path + " to close...")
try_call(handler._wait_on_close)
print("Done")
if had_error:
parser.exit(1)
def build(args):
from dxpy.scripts import dx_build_app
sys.argv = ['dx build'] + sys.argv[2:]
dx_build_app.main()
def process_list_of_usernames(thing):
return ['user-' + name.lower() if name != 'PUBLIC' and
not name.startswith('org-') and
not name.startswith('user-')
else name
for name in thing]
def add_users(args):
app_desc = try_call(resolve_app, args.app)
args.users = process_list_of_usernames(args.users)
try:
dxpy.api.app_add_authorized_users(app_desc['id'], input_params={"authorizedUsers": args.users})
except:
err_exit()
def remove_users(args):
app_desc = try_call(resolve_app, args.app)
args.users = process_list_of_usernames(args.users)
try:
dxpy.api.app_remove_authorized_users(app_desc['id'], input_params={"authorizedUsers": args.users})
except:
err_exit()
def list_users(args):
app_desc = try_call(resolve_app, args.app)
for user in app_desc['authorizedUsers']:
print(user)
def add_developers(args):
app_desc = try_call(resolve_app, args.app)
args.developers = process_list_of_usernames(args.developers)
if any(entity.startswith('org-') for entity in args.developers):
err_exit('Error: organizations as developers of an app is currently unsupported', code=3)
try:
dxpy.api.app_add_developers(app_desc['id'], input_params={"developers": args.developers})
except:
err_exit()
def list_developers(args):
app_desc = try_call(resolve_app, args.app)
try:
for user in dxpy.api.app_list_developers(app_desc['id'])['developers']:
print(user)
except:
err_exit()
def remove_developers(args):
app_desc = try_call(resolve_app, args.app)
args.developers = process_list_of_usernames(args.developers)
try:
dxpy.api.app_remove_developers(app_desc['id'], input_params={"developers": args.developers})
except:
err_exit()
def install(args):
app_desc = try_call(resolve_app, args.app)
try:
dxpy.api.app_install(app_desc['id'])
print('Installed the ' + app_desc['name'] + ' app')
except:
err_exit()
def uninstall(args):
app_desc = get_app_from_path(args.app)
if app_desc is None:
parser.exit(1, 'Could not find the app\n')
else:
try:
dxpy.api.app_uninstall(app_desc['id'])
print('Uninstalled the ' + app_desc['name'] + ' app')
except:
err_exit()
def run_one(args, executable, dest_proj, dest_path, preset_inputs=None, input_name_prefix=None,
is_the_only_job=True):
# following may throw if the executable is a workflow with no
# input spec available (because a stage is inaccessible)
exec_inputs = try_call(ExecutableInputs, executable, input_name_prefix=input_name_prefix)
if args.input_json is None and args.filename is None:
# --input-json and --input-json-file completely override input
# from the cloned job
exec_inputs.update(args.input_from_clone, strip_prefix=False)
if args.sys_reqs_from_clone and not isinstance(args.instance_type, basestring):
args.instance_type = dict({stage: reqs['instanceType'] for stage, reqs in args.sys_reqs_from_clone.items()},
**(args.instance_type or {}))
if preset_inputs is not None:
exec_inputs.update(preset_inputs, strip_prefix=False)
try_call(exec_inputs.update_from_args, args)
input_json = exec_inputs.inputs
run_kwargs = {
"project": dest_proj,
"folder": dest_path,
"name": args.name,
"tags": args.tags,
"properties": args.properties,
"details": args.details,
"depends_on": args.depends_on or None,
"allow_ssh": args.allow_ssh,
"debug": {"debugOn": args.debug_on} if args.debug_on else None,
"delay_workspace_destruction": args.delay_workspace_destruction,
"priority": ("high" if args.watch else args.priority),
"instance_type": args.instance_type,
"stage_instance_types": args.stage_instance_types,
"stage_folders": args.stage_folders,
"rerun_stages": args.rerun_stages,
"extra_args": args.extra_args
}
if not args.brief:
print()
print('Using input JSON:')
print(json.dumps(input_json, indent=4))
print()
if isinstance(executable, dxpy.DXWorkflow):
try:
dry_run = dxpy.api.workflow_dry_run(executable.get_id(),
executable._get_run_input(input_json, **run_kwargs))
# print which stages are getting rerun
# Note: information may be out of date if the dryRun
# is performed too soon after the candidate execution
# has been constructed (and the jobs have not yet been
# created in the system); this errs on the side of
# assuming such stages will be re-run.
num_cached_stages = len([stage for stage in dry_run['stages'] if
'parentAnalysis' in stage['execution'] and
stage['execution']['parentAnalysis'] != dry_run['id']])
if num_cached_stages > 0:
print(fill('The following ' + str(num_cached_stages) + ' stage(s) will reuse results from a previous analysis:'))
for i, stage in enumerate(dry_run['stages']):
if 'parentAnalysis' in stage['execution'] and \
stage['execution']['parentAnalysis'] != dry_run['id']:
stage_name = stage['execution']['name']
print(' Stage ' + str(i) + ': ' + stage_name + \
' (' + stage['execution']['id'] + ')')
print()
except DXAPIError:
# Just don't print anything for now if the dryRun
# method is not yet available
pass
if args.priority == "normal" and not args.brief:
special_access = set()
executable_desc = executable.describe()
write_perms = ['UPLOAD', 'CONTRIBUTE', 'ADMINISTER']
def check_for_special_access(access_spec):
if not access_spec:
return
if access_spec.get('developer'):
special_access.add('access to apps as a developer')
if access_spec.get('network'):
special_access.add('Internet access')
if access_spec.get('project') in write_perms or \
access_spec.get('allProjects') in write_perms:
special_access.add('write access to one or more projects')
if isinstance(executable, dxpy.DXWorkflow):
for stage_desc in executable_desc['stages']:
stage_exec_desc = dxpy.describe(stage_desc['executable'])
check_for_special_access(stage_exec_desc.get('access'))
else:
check_for_special_access(executable_desc.get('access'))
if special_access:
print(fill(BOLD("WARNING") + ": You have requested that jobs be run under " +
BOLD("normal") +
" priority, which may cause them to be restarted at any point, but " +
"the executable you are trying to run has " +
"requested extra permissions (" + ", ".join(sorted(special_access)) + "). " +
"Unexpected side effects or failures may occur if the executable has not " +
"been written to behave well when restarted."))
print()
# Ask for confirmation if a tty and if input was not given as a
# single JSON.
if args.confirm and INTERACTIVE_CLI:
if not prompt_for_yn('Confirm running the executable with this input', default=True):
parser.exit(0)
if not args.brief:
print(fill("Calling " + executable.get_id() + " with output destination " + dest_proj + ":" + dest_path,
subsequent_indent=' ') + '\n')
try:
dxexecution = executable.run(input_json, **run_kwargs)
if not args.brief:
print(dxexecution._class.capitalize() + " ID: " + dxexecution.get_id())
else:
print(dxexecution.get_id())
sys.stdout.flush()
if args.wait and is_the_only_job:
dxexecution.wait_on_done()
elif args.confirm and INTERACTIVE_CLI and not (args.watch or args.ssh) and isinstance(dxexecution, dxpy.DXJob):
answer = input("Watch launched job now? [Y/n] ")
if len(answer) == 0 or answer.lower()[0] == 'y':
args.watch = True
if is_the_only_job and isinstance(dxexecution, dxpy.DXJob):
if args.watch:
watch_args = parser.parse_args(['watch', dxexecution.get_id()])
print('')
print('Job Log')
print('-------')
watch(watch_args)
elif args.ssh:
ssh_args = parser.parse_args(['ssh', dxexecution.get_id()])
ssh(ssh_args, ssh_config_verified=True)
except Exception:
err_exit()
return dxexecution
def print_run_help(executable="", alias=None):
if executable == "":
parser_map['run'].print_help()
else:
exec_help = 'usage: dx run ' + executable + ('' if alias is None else ' --alias ' + alias)
handler = try_call(get_exec_handler, executable, alias)
try:
exec_desc = handler.describe()
except:
err_exit()
exec_help += ' [-iINPUT_NAME=VALUE ...]\n\n'
if isinstance(handler, dxpy.bindings.DXApp):
exec_help += BOLD("App: ")
exec_details = exec_desc['details']
else:
exec_help += BOLD(exec_desc['class'].capitalize() + ": ")
exec_details = handler.get_details()
advanced_inputs = exec_details.get("advancedInputs", []) if isinstance(exec_details, dict) else []
exec_help += exec_desc.get('title', exec_desc['name']) + '\n\n'
summary = exec_desc.get('summary', '')
if summary != '':
exec_help += fill(summary) + "\n\n"
# Contact URL here
if isinstance(handler, dxpy.bindings.DXApp):
exec_help += "See the app page for more information:\n https://platform.dnanexus.com/app/" + exec_desc['name'] +"\n\n"
exec_help += BOLD("Inputs:")
advanced_inputs_help = "Advanced Inputs:"
if 'inputSpec' in exec_desc:
if len(exec_desc['inputSpec']) == 0:
exec_help += " <none>\n"
else:
for group, params in group_array_by_field(exec_desc['inputSpec']).items():
if group is not None:
exec_help += "\n " + BOLD(group)
for param in params:
param_string = "\n "
param_string += UNDERLINE(param.get('label', param['name'])) + ": "
param_string += get_io_desc(param, app_help_version=True) + "\n"
helpstring = param.get('help', '')
stanzas = []
if 'choices' in param:
stanzas.append(format_choices_or_suggestions('Choices:',
param['choices'],
param['class']))
if helpstring != '':
stanzas.append(fill(helpstring, initial_indent=' ', subsequent_indent=' '))
if param.get('suggestions'):
stanzas.append(format_choices_or_suggestions('Suggestions:',
param['suggestions'],
param['class']))
param_string += "\n\n".join(stanzas) + ("\n" if stanzas else "")
if param['name'] in advanced_inputs:
advanced_inputs_help += param_string
else:
exec_help += param_string
if len(advanced_inputs) > 0:
exec_help += "\n" + advanced_inputs_help
else:
exec_help += " no specification provided"
exec_help += "\n"
exec_help += BOLD("Outputs:")
if 'outputSpec' in exec_desc:
if len(exec_desc['outputSpec']) == 0:
exec_help += " <none>\n"
else:
for param in exec_desc['outputSpec']:
exec_help += "\n "
exec_help += UNDERLINE(param.get('label', param['name'])) + ": "
exec_help += get_io_desc(param) + "\n"
helpstring = param.get('help', '')
if helpstring != '':
exec_help += fill(helpstring,
initial_indent=' ',
subsequent_indent=' ') + "\n"
else:
exec_help += " no specification provided"
pager(exec_help)
parser.exit(0)
def print_run_input_help():
print('Help: Specifying input for dx run\n')
print(fill('There are several ways to specify inputs. In decreasing order of precedence, they are:'))
print('''
1) inputs given in the interactive mode
2) inputs listed individually with the -i/--input command line argument
3) JSON given in --input-json
4) JSON given in --input-json-file
5) if cloning a job with --clone, the input that the job was run with
(this will get overridden completely if -j/--input-json or
-f/--input-json-file are provided)
6) default values set in a workflow or an executable's input spec
''')
print('SPECIFYING INPUTS BY NAME\n\n' + fill('Use the -i/--input flag to specify each input field by ' + BOLD('name') + ' and ' + BOLD('value') + '.', initial_indent=' ', subsequent_indent=' '))
print('''
Syntax : -i<input name>=<input value>
Example: dx run myApp -inum=34 -istr=ABC -igtables=reads1 -igtables=reads2
''')
print(fill('The example above runs an app called "myApp" with 3 inputs called num (class int), str (class string), and gtables (class array:gtable). (For this method to work, the app must have an input spec so inputs can be interpreted correctly.) The same input field can be used multiple times if the input class is an array.', initial_indent=' ', subsequent_indent=' '))
print('\n' + fill(BOLD('Job-based object references') + ' can also be provided using the <job id>:<output name> syntax:', initial_indent=' ', subsequent_indent=' '))
print('''
Syntax : -i<input name>=<job id>:<output name>
Example: dx run mapper -ireads=job-B0fbxvGY00j9jqGQvj8Q0001:reads
''')
print(fill('You can ' + BOLD('extract an element of an array output') +
' using the <job id>:<output name>.<element> syntax:',
initial_indent=' ', subsequent_indent=' '))
print('''
Syntax : -i<input name>=<job id>:<output name>.<element>
Example: dx run mapper -ireadsfile=job-B0fbxvGY00j9jqGQvj8Q0001:reads.1
# Extracts second element of array output
''')
print(fill('When executing ' + BOLD('workflows') + ', stage inputs can be specified using the <stage key>.<input name>=<value> syntax:', initial_indent=' ', subsequent_indent=' '))
print('''
Syntax : -i<stage key>.<input name>=<input value>
Example: dx run my_workflow -i1.reads="My reads file"
SPECIFYING JSON INPUT
''')
print(fill('JSON input can be used directly using the -j/--input-json or -f/--input-json-file flags. When running an ' + BOLD('app') + ' or ' + BOLD('applet') + ', the keys should be the input field names for the app or applet. When running a ' + BOLD('workflow') + ', the keys should be the input field names for each stage, prefixed by the stage key and a period, e.g. "1.reads" for the "reads" input of stage "1".', initial_indent=' ', subsequent_indent=' ') + '\n')
parser.exit(0)
def run(args):
if args.help:
print_run_help(args.executable, args.alias)
if args.allow_ssh is not None:
args.allow_ssh = [i for i in args.allow_ssh if i is not None]
if args.allow_ssh == [] or ((args.ssh or args.debug_on) and not args.allow_ssh):
args.allow_ssh = ['*']
if args.ssh or args.allow_ssh or args.debug_on:
verify_ssh_config()
try_call(process_extra_args, args)
try_call(process_properties_args, args)
if args.clone is None and args.executable == "":
parser.exit(2, parser_map['run'].format_help() +
fill("Error: Either the executable must be specified, or --clone must be used to indicate a job or analysis to clone") + "\n")
args.input_from_clone, args.sys_reqs_from_clone = {}, {}
dest_proj, dest_path = None, None
if args.project is not None:
if args.folder is not None and not args.clone:
err_exit(exception=DXCLIError(
"Options --project and --folder/--destination cannot be specified together"
))
dest_proj = args.project
if args.folder is not None:
dest_proj, dest_path, _none = try_call(resolve_existing_path,
args.folder,
expected='folder')
# at this point, allow the --clone options to set the destination
# project and path if available
# Process the --stage-output-folder and
# --stage-relative-output-folder options if provided
if args.stage_output_folder or args.stage_relative_output_folder:
stage_folders = {}
for stage, stage_folder in args.stage_output_folder:
_proj, stage_folder, _none = try_call(resolve_existing_path,
stage_folder,
expected='folder')
stage_folders[stage] = stage_folder
for stage, stage_folder in args.stage_relative_output_folder:
stage_folders[stage] = stage_folder.lstrip('/')
if stage_folders:
args.stage_folders = stage_folders
clone_desc = None
if args.clone is not None:
# Resolve job ID or name
if is_job_id(args.clone) or is_analysis_id(args.clone):
clone_desc = dxpy.api.job_describe(args.clone)
else:
iterators = []
if ":" in args.clone:
colon_pos = args.clone.find(":")
try:
# Resolve args.clone[:args.clone.find(":")] to a project name or ID
# And find jobs in that with that name
proj_id = resolve_container_id_or_name(args.clone[:colon_pos])
if proj_id is not None:
execution_name_or_id = args.clone[colon_pos + 1:]
if is_job_id(execution_name_or_id) or is_analysis_id(execution_name_or_id):
clone_desc = dxpy.api.job_describe(execution_name_or_id)
else:
iterators.append(dxpy.find_executions(name=execution_name_or_id,
describe={"io": False},
project=proj_id))
except:
pass
if clone_desc is None:
if dxpy.WORKSPACE_ID is not None:
try:
iterators.append(dxpy.find_jobs(name=args.clone,
describe={"io": False},
project=dxpy.WORKSPACE_ID))
except:
pass
import itertools
result_choice = paginate_and_pick(itertools.chain(*iterators),
(lambda result:
get_find_executions_string(result["describe"],
has_children=False,
single_result=True)))
if result_choice == "none found":
parser.exit(1, "dx run --clone: No matching execution found. Please use a valid job or analysis name or ID.\n")
elif result_choice == "none picked":
parser.exit(1)
else:
clone_desc = dxpy.api.job_describe(result_choice["id"])
if args.folder is None:
dest_proj = dest_proj or clone_desc["project"]
dest_path = clone_desc["folder"]
# set name, tags, properties, and priority from the cloned
# execution if the options have not been explicitly set
if args.name is None:
match_obj = re.search("\(re-run\)$", clone_desc["name"])
if match_obj is None:
args.name = clone_desc["name"] + " (re-run)"
else:
args.name = clone_desc["name"]
for metadata in 'tags', 'properties', 'priority':
if getattr(args, metadata) is None:
setattr(args, metadata, clone_desc.get(metadata))
if clone_desc['class'] == 'job':
if args.executable == "":
args.executable = clone_desc.get("applet", clone_desc.get("app", ""))
args.input_from_clone = clone_desc["runInput"]
args.sys_reqs_from_clone = clone_desc["systemRequirements"]
if args.details is None:
args.details = {
"clonedFrom": {
"id": clone_desc["id"],
"executable": clone_desc.get("applet", clone_desc.get("app", "")),
"project": clone_desc["project"],
"folder": clone_desc["folder"],
"name": clone_desc["name"],
"runInput": clone_desc["runInput"],
"systemRequirements": clone_desc["systemRequirements"]
}
}
else:
# make a temporary workflow
args.executable = dxpy.api.workflow_new({"project": dest_proj,
"initializeFrom": {"id": clone_desc["id"]},
"temporary": True})["id"]
handler = try_call(get_exec_handler, args.executable, args.alias)
if args.depends_on and isinstance(handler, dxpy.DXWorkflow):
err_exit(exception=DXParserError("-d/--depends-on cannot be supplied when running workflows."),
expected_exceptions=(DXParserError,))
# if the destination project has still not been set, use the
# current project
if dest_proj is None:
dest_proj = dxpy.WORKSPACE_ID
if dest_proj is None:
err_exit(exception=DXCLIError(
'Unable to find project to run the app in. ' +
'Please run "dx select" to set the working project, or use --folder=project:path'
))
is_workflow = isinstance(handler, dxpy.DXWorkflow)
# if the destination path has still not been set, use the current
# directory as the default; but only do this if not running a
# workflow with outputFolder already set
if dest_path is None:
if is_workflow:
dest_path = getattr(handler, 'outputFolder', None)
if dest_path is None:
dest_path = dxpy.config.get('DX_CLI_WD', u'/')
process_instance_type_arg(args, is_workflow)
run_one(args, handler, dest_proj, dest_path)
def terminate(args):
for jobid in args.jobid:
try:
dxpy.api.job_terminate(jobid)
except:
err_exit()
def shell(orig_args):
if orig_args.filename is not None:
try:
with io.open(orig_args.filename, 'rb') as script:
for line in script:
args = [word.decode(sys_encoding) for word in shlex.split(line)]
parsed_args = parser.parse_args(args)
set_cli_colors(parsed_args)
args.func(parsed_args)
exit(0)
except:
err_exit()
elif not INTERACTIVE_CLI:
for line in sys.stdin.read().splitlines():
if len(line) > 0:
args = [word.decode('utf-8') for word in shlex.split(line.encode('utf-8'))]
parsed_args = parser.parse_args(args)
set_cli_colors(parsed_args)
parsed_args.func(parsed_args)
exit(0)
if state['interactive']:
return
state['interactive'] = True
# WARNING: Following two lines may not be platform-independent and
# should be made so.
try:
import rlcompleter
readline.parse_and_bind("tab: complete")
readline.set_completer_delims("")
readline.set_completer(DXCLICompleter().complete)
except:
pass
while True:
# Reset the completer once we're done grabbing input
try:
if readline.get_completer() is None:
readline.set_completer(DXCLICompleter().complete)
readline.clear_history()
readline.read_history_file(os.path.expanduser('~/.dnanexus_config/.dx_history'))
except:
pass
try:
prompt = '> '
pwd_str = get_pwd()
if pwd_str is not None:
prompt = pwd_str + prompt
cmd = input(prompt)
except EOFError:
print("")
exit(0)
except KeyboardInterrupt:
print("")
continue
if cmd == '':
continue
try:
sys.argv[1:] = [word.decode('utf-8') for word in shlex.split(cmd.encode('utf-8'))]
args = parser.parse_args(sys.argv[1:])
set_cli_colors(args)
set_delim(args)
if args.func == clearenv:
args.interactive = True
args.func(args)
except StopIteration:
exit(0)
except BaseException as details:
if not isinstance(details, SystemExit):
print(str(details) + '\n')
def watch(args):
level_colors = {level: RED() for level in ("EMERG", "ALERT", "CRITICAL", "ERROR")}
level_colors.update({level: YELLOW() for level in ("WARNING", "STDERR")})
level_colors.update({level: GREEN() for level in ("NOTICE", "INFO", "DEBUG", "STDOUT")})
msg_callback, log_client = None, None
if args.get_stdout:
args.levels = ['STDOUT']
args.format = "{msg}"
args.job_info = False
elif args.get_stderr:
args.levels = ['STDERR']
args.format = "{msg}"
args.job_info = False
elif args.get_streams:
args.levels = ['STDOUT', 'STDERR']
args.format = "{msg}"
args.job_info = False
elif args.format is None:
if args.job_ids:
args.format = BLUE("{job_name} ({job})") + " {level_color}{level}" + ENDC() + " {msg}"
else:
args.format = BLUE("{job_name}") + " {level_color}{level}" + ENDC() + " {msg}"
if args.timestamps:
args.format = u"{timestamp} " + args.format
def msg_callback(message):
message['timestamp'] = str(datetime.datetime.fromtimestamp(message.get('timestamp', 0)/1000))
message['level_color'] = level_colors.get(message.get('level', ''), '')
message['job_name'] = log_client.seen_jobs[message['job']]['name'] if message['job'] in log_client.seen_jobs else message['job']
print(args.format.format(**message))
from dxpy.utils.job_log_client import DXJobLogStreamClient
input_params = {"numRecentMessages": args.num_recent_messages,
"recurseJobs": args.tree,
"tail": args.tail}
if args.levels:
input_params['levels'] = args.levels
if not re.match("^job-[0-9a-zA-Z]{24}$", args.jobid):
err_exit(args.jobid + " does not look like a DNAnexus job ID")
log_client = DXJobLogStreamClient(args.jobid, input_params=input_params, msg_callback=msg_callback,
msg_output_format=args.format, print_job_info=args.job_info)
# Note: currently, the client is synchronous and blocks until the socket is closed.
# If this changes, some refactoring may be needed below
try:
if not args.quiet:
print("Watching job %s%s. Press Ctrl+C to stop." % (args.jobid, (" and sub-jobs" if args.tree else "")), file=sys.stderr)
log_client.connect()
except Exception as details:
parser.exit(3, fill(str(details)) + '\n')
def ssh_config(args):
user_id = try_call(dxpy.whoami)
if args.revoke:
dxpy.api.user_update(user_id, {"sshPublicKey": None})
print(fill("SSH public key has been revoked"))
else:
dnanexus_conf_dir = dxpy.config.get_user_conf_dir()
if not os.path.exists(dnanexus_conf_dir):
msg = "The DNAnexus configuration directory {d} does not exist. Use {c} to create it."
err_exit(msg.format(d=dnanexus_conf_dir, c=BOLD("dx login")))
print(fill("Select an SSH key pair to use when connecting to DNAnexus jobs. The public key will be saved to your " +
"DNAnexus account (readable only by you). The private key will remain on this computer.") + "\n")
key_dest = os.path.join(dnanexus_conf_dir, 'ssh_id')
pub_key_dest = key_dest + ".pub"
if os.path.exists(os.path.realpath(key_dest)) and os.path.exists(os.path.realpath(pub_key_dest)):
print(BOLD("dx") + " is already configured to use the SSH key pair at:\n {}\n {}".format(key_dest,
pub_key_dest))
if pick(["Use this SSH key pair", "Select or create another SSH key pair..."]) == 1:
os.remove(key_dest)
os.remove(pub_key_dest)
else:
update_pub_key(user_id, pub_key_dest)
return
elif os.path.exists(key_dest) or os.path.exists(pub_key_dest):
os.remove(key_dest)
os.remove(pub_key_dest)
keys = [k for k in glob.glob(os.path.join(os.path.expanduser("~/.ssh"), "*.pub")) if os.path.exists(k[:-4])]
choices = ['Generate a new SSH key pair using ssh-keygen'] + keys + ['Select another SSH key pair...']
choice = pick(choices, default=0)
if choice == 0:
try:
subprocess.check_call(['ssh-keygen', '-f', key_dest] + args.ssh_keygen_args)
except subprocess.CalledProcessError:
err_exit("Unable to generate a new SSH key pair", expected_exceptions=(subprocess.CalledProcessError, ))
else:
if choice == len(choices) - 1:
key_src = input('Enter the location of your SSH key: ')
pub_key_src = key_src + ".pub"
if os.path.exists(key_src) and os.path.exists(pub_key_src):
print("Using {} and {} as the key pair".format(key_src, pub_key_src))
elif key_src.endswith(".pub") and os.path.exists(key_src[:-4]) and os.path.exists(key_src):
key_src, pub_key_src = key_src[:-4], key_src
print("Using {} and {} as the key pair".format(key_src, pub_key_src))
else:
err_exit("Unable to find {k} and {k}.pub".format(k=key_src))
else:
key_src, pub_key_src = choices[choice][:-4], choices[choice]
os.symlink(key_src, key_dest)
os.symlink(pub_key_src, pub_key_dest)
update_pub_key(user_id, pub_key_dest)
def update_pub_key(user_id, pub_key_file):
with open(pub_key_file) as fh:
pub_key = fh.read()
dxpy.api.user_update(user_id, {"sshPublicKey": pub_key})
print("Updated public key for user {}".format(user_id))
print(fill("Your account has been configured for use with SSH. Use " + BOLD("dx run") + " with the --allow-ssh, " +
"--ssh, or --debug-on options to launch jobs and connect to them."))
def verify_ssh_config():
try:
with open(os.path.join(dxpy.config.get_user_conf_dir(), 'ssh_id.pub')) as fh:
user_desc = try_call(dxpy.api.user_describe, try_call(dxpy.whoami))
if 'sshPublicKey' not in user_desc:
raise DXError("User's SSH public key is not set")
if fh.read() != user_desc['sshPublicKey']:
raise DXError("Public key mismatch")
except Exception as e:
msg = RED("Warning:") + " Unable to verify configuration of your account for SSH connectivity: {}".format(e) + \
". SSH connection will likely fail. To set up your account for SSH, quit this command and run " + \
BOLD("dx ssh_config") + ". Continue with the current command?"
if not prompt_for_yn(fill(msg), default=False):
err_exit(expected_exceptions=(IOError, DXError))
def ssh(args, ssh_config_verified=False):
if not re.match("^job-[0-9a-zA-Z]{24}$", args.job_id):
err_exit(args.job_id + " does not look like a DNAnexus job ID")
job_desc = try_call(dxpy.describe, args.job_id)
if job_desc['state'] in ['done', 'failed', 'terminated']:
err_exit(args.job_id + " is in a terminal state, and you cannot connect to it")
if not ssh_config_verified:
verify_ssh_config()
sys.stdout.write("Waiting for {} to start...".format(args.job_id))
sys.stdout.flush()
while job_desc['state'] not in ['running', 'debug_hold']:
time.sleep(1)
job_desc = dxpy.describe(args.job_id)
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.write("Resolving job hostname and SSH host key...")
sys.stdout.flush()
host, host_key = None, None
for i in range(90):
host = job_desc.get('host')
host_key = job_desc.get('sshHostKey') or job_desc['properties'].get('ssh_host_rsa_key')
if host and host_key:
break
else:
time.sleep(1)
job_desc = dxpy.describe(args.job_id)
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
if not (host and host_key):
msg = "Cannot resolve hostname or hostkey for {}. Please check your permissions and run settings."
err_exit(msg.format(args.job_id))
known_hosts_file = os.path.expanduser('~/.dnanexus_config/ssh_known_hosts')
with open(known_hosts_file, 'a') as fh:
fh.write("{job_id}.dnanex.us {key}\n".format(job_id=args.job_id, key=host_key.rstrip()))
import socket
connected = False
sys.stdout.write("Checking connectivity to {}...".format(host))
sys.stdout.flush()
for i in range(12):
try:
socket.create_connection((host, 22), timeout=5)
connected = True
break
except Exception:
time.sleep(2)
sys.stdout.write(".")
sys.stdout.flush()
if connected:
sys.stdout.write(GREEN("OK") + "\n")
else:
msg = "Failed to connect to {h}. Please check your connectivity and try {cmd} again."
err_exit(msg.format(h=host, cmd=BOLD("dx ssh {}".format(args.job_id))),
exception=DXCLIError())
print("Connecting to", host)
ssh_args = ['ssh', '-i', os.path.expanduser('~/.dnanexus_config/ssh_id'),
'-o', 'HostKeyAlias={}.dnanex.us'.format(args.job_id),
'-o', 'UserKnownHostsFile={}'.format(known_hosts_file),
'-l', 'dnanexus', host]
ssh_args += args.ssh_args
exit_code = subprocess.call(ssh_args)
try:
job_desc = dxpy.describe(args.job_id)
if job_desc['state'] == 'running':
msg = "Job {job_id} is still running. Terminate now?".format(job_id=args.job_id)
if prompt_for_yn(msg, default=False):
dxpy.api.job_terminate(args.job_id)
print("Terminated {}.".format(args.job_id))
except default_expected_exceptions as e:
tip = "Unable to check the state of {job_id}. Please check it and use " + BOLD("dx terminate {job_id}") + \
" to stop it if necessary."
print(fill(tip.format(job_id=args.job_id)))
exit(exit_code)
def upgrade(args):
if len(args.args) == 0:
try:
greeting = dxpy.api.system_greet({'client': 'dxclient', 'version': 'v'+dxpy.TOOLKIT_VERSION}, auth=None)
if greeting['update']['available']:
recommended_version = greeting['update']['version']
else:
err_exit("Your SDK is up to date.", code=0)
except default_expected_exceptions as e:
print(e)
recommended_version = "current"
print("Upgrading to", recommended_version)
args.args = [recommended_version]
try:
cmd = os.path.join(os.environ['DNANEXUS_HOME'], 'build', 'upgrade.sh')
args.args.insert(0, cmd)
os.execv(cmd, args.args)
except:
err_exit()
def print_help(args):
if args.command_or_category is None:
parser_help.print_help()
elif args.command_or_category in parser_categories:
print('dx ' + args.command_or_category + ': ' + parser_categories[args.command_or_category]['desc'].lstrip())
print('\nCommands:\n')
for cmd in parser_categories[args.command_or_category]['cmds']:
print(' ' + cmd[0] + ' '*(18-len(cmd[0])) + fill(cmd[1], width_adjustment=-20, subsequent_indent=' '*20))
elif args.command_or_category not in parser_map:
parser.exit(1, 'Unrecognized command: ' + args.command_or_category + '\n')
elif args.command_or_category == 'export' and args.subcommand is not None:
if args.subcommand not in exporters:
parser.exit(1, 'Unsupported format for dx export: ' + args.subcommand + '\n')
new_args = argparse.Namespace()
setattr(new_args, 'exporter_args', ['-h'])
exporters[args.subcommand](new_args)
elif args.command_or_category == 'import' and args.subcommand is not None:
if args.subcommand not in importers:
parser.exit(1, 'Unsupported format for dx import: ' + args.subcommand + '\n')
new_args = argparse.Namespace()
setattr(new_args, 'importer_args', ['-h'])
importers[args.subcommand](new_args)
elif args.command_or_category == 'run':
if args.subcommand is None:
parser_map[args.command_or_category].print_help()
else:
print_run_help(args.subcommand)
elif args.subcommand is None:
parser_map[args.command_or_category].print_help()
elif (args.command_or_category + ' ' + args.subcommand) not in parser_map:
parser.exit(1, 'Unrecognized command and subcommand combination: ' + args.command_or_category + ' ' + args.subcommand + '\n')
else:
parser_map[args.command_or_category + ' ' + args.subcommand].print_help()
def exit_shell(args):
if state['interactive']:
raise StopIteration()
class runHelp(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if namespace.executable is None:
setattr(namespace, 'executable', '')
setattr(namespace, 'help', True)
class runInputHelp(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print_run_input_help()
class SetStagingEnv(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, 'host', 'stagingauth.dnanexus.com')
setattr(namespace, 'port', '443')
setattr(namespace, 'protocol', 'https')
setattr(namespace, 'staging', True)
set_api(protocol='https', host='stagingapi.dnanexus.com', port='443',
write=(not state['interactive'] or namespace.save))
class PrintDXVersion(argparse.Action):
# Prints to stdout instead of the default stderr that argparse
# uses (note: default changes to stdout in 3.4)
def __call__(self, parser, namespace, values, option_string=None):
print('dx v%s' % (dxpy.TOOLKIT_VERSION,))
parser.exit(0)
class PrintCategoryHelp(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print('usage: ' + parser.prog + ' --category CATEGORY')
print()
print(fill('List only the apps that belong to a particular category by providing a category name.'))
print()
print('Common category names include:')
print(' ' + '\n '.join(sorted(APP_CATEGORIES)))
parser.exit(0)
# Callable "action" class used by the "dx new user" parser for org-related
# arguments to allow us to distinguish between user-specified arguments and
# default arguments. If an argument has a `default` that is a bool, then its
# `nargs` will be 0.
#
# PRECONDITION: If an argument has a `default` that is a bool, then specifying
# that argument on the command-line must imply the logical opposite of its
# `default`.
class DXNewUserOrgArgsAction(argparse.Action):
user_specified_opts = []
def __init__(self, option_strings, dest, required=False, default=None,
nargs=None, **kwargs):
if isinstance(default, bool):
nargs = 0
super(DXNewUserOrgArgsAction, self).__init__(
option_strings=option_strings, dest=dest, required=required,
default=default, nargs=nargs, **kwargs
)
# __call__ is only invoked when the user specifies this `option_string` on
# the command-line.
def __call__(self, parser, namespace, values, option_string):
DXNewUserOrgArgsAction.user_specified_opts.append(option_string)
if isinstance(self.default, bool):
setattr(namespace, self.dest, not self.default)
else:
setattr(namespace, self.dest, values)
class DXArgumentParser(argparse.ArgumentParser):
def _print_message(self, message, file=None):
if message:
pager(message, file=file)
def _check_value(self, action, value):
# Override argparse.ArgumentParser._check_value to eliminate "u'x'" strings in output that result from repr()
# calls in the original, and to line wrap the output
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
choices = fill("(choose from {})".format(", ".join(action.choices)))
msg = "invalid choice: {choice}\n{choices}".format(choice=value, choices=choices)
if len(args_list) == 1:
from dxpy.utils import spelling_corrector
suggestion = spelling_corrector.correct(value, action.choices)
if suggestion in action.choices:
msg += "\n\nDid you mean: " + BOLD("dx " + suggestion)
err = argparse.ArgumentError(action, msg)
if USING_PYTHON2:
err.message = err.message.encode(sys_encoding)
if err.argument_name is not None:
err.argument_name = err.argument_name.encode(sys_encoding)
raise err
def exit(self, status=0, message=None):
if isinstance(status, basestring):
message = message + status if message else status
status = 1
if message:
self._print_message(message, sys.stderr)
sys.exit(status)
def error(self, message):
if USING_PYTHON2:
message = message.decode(sys_encoding)
self.exit(2, '{help}\n{prog}: error: {msg}\n'.format(help=self.format_help(),
prog=self.prog,
msg=message))
def register_subparser(subparser, subparsers_action=None, categories=('other', )):
name = re.sub('^dx ', '', subparser.prog)
if subparsers_action is None:
subparsers_action = subparsers
if isinstance(categories, basestring):
categories = (categories, )
if subparsers_action == subparsers:
subparsers_action_name = subparsers_action._choices_actions[-1].dest
else:
subparsers_action_name = subparsers._choices_actions[-1].dest + ' ' + subparsers_action._choices_actions[-1].dest
parser_map[name] = subparser
# Some subparsers may not have help associated with them. Those that lack
# help, will not have an item in the _choices_actions list. So, to determine
# if the present subparser has a help, we'll get the name for this subparser,
# compare it to the name in the last _choices_actions list, and only if
# they match can we be confident that it has a help.
if subparsers_action_name == name:
_help = subparsers_action._choices_actions[-1].help
parser_categories['all']['cmds'].append((name, _help))
for category in categories:
parser_categories[category]['cmds'].append((name, _help))
parser = DXArgumentParser(description=DNANEXUS_LOGO() + ' Command-Line Client, API v%s, client v%s' % (dxpy.API_VERSION, dxpy.TOOLKIT_VERSION) + '\n\n' + fill('dx is a command-line client for interacting with the DNAnexus platform. You can log in, navigate, upload, organize and share your data, launch analyses, and more. For a quick tour of what the tool can do, see') + '\n\n https://wiki.dnanexus.com/Command-Line-Client/Quickstart\n\n' + fill('For a breakdown of dx commands by category, run "dx help".') + '\n\n' + fill('dx exits with exit code 3 if invalid input is provided or an invalid operation is requested, and exit code 1 if an internal error is encountered. The latter usually indicate bugs in dx; please report them at') + "\n\n https://github.com/dnanexus/dx-toolkit/issues",
formatter_class=argparse.RawTextHelpFormatter,
parents=[env_args],
usage='%(prog)s [-h] [--version] command ...')
parser.add_argument('--version', action=PrintDXVersion, nargs=0, help="show program's version number and exit")
subparsers = parser.add_subparsers(help=argparse.SUPPRESS, dest='command')
subparsers.metavar = 'command'
parser_login = subparsers.add_parser('login', help='Log in (interactively or with an existing API token)',
description='Log in interactively and acquire credentials. Use "--token" to log in with an existing API token.',
prog='dx login', parents=[env_args])
parser_login.add_argument('--token', help='Authentication token to use')
host_action = parser_login.add_argument('--host', help='Log into the given auth server host (port must also be given)')
port_action = parser_login.add_argument('--port', type=int, help='Log into the given auth server port (host must also be given)')
protocol_action = parser_login.add_argument('--protocol', help='Use the given protocol to contact auth server (by default, the correct protocol is guessed based on --port)')
host_action.help = port_action.help = protocol_action.help = argparse.SUPPRESS
parser_login.add_argument('--noprojects', dest='projects', help='Do not print available projects', action='store_false')
parser_login.add_argument('--save', help='Save token and other environment variables for future sessions',
action='store_true')
parser_login.add_argument('--timeout', default='30d',
help='Timeout for this login token (in seconds, or use suffix s, m, h, d, w, M, y)')
parser_login.add_argument('--staging', nargs=0, help=argparse.SUPPRESS, action=SetStagingEnv)
parser_login.set_defaults(staging=False, func=login)
register_subparser(parser_login, categories='session')
parser_logout = subparsers.add_parser('logout',
help='Log out and remove credentials',
description='Log out and remove credentials',
prog='dx logout',
parents=[env_args])
parser_logout.add_argument('--host', help='Log out of the given auth server host (port must also be given)')
parser_logout.add_argument('--port', type=int, help='Log out of the given auth server port (host must also be given)')
parser_logout.set_defaults(func=logout)
register_subparser(parser_logout, categories='session')
parser_shell = subparsers.add_parser('sh', help='dx shell interpreter',
description='When run with no arguments, this command launches an interactive shell. Otherwise, it will load the filename provided and interpret each nonempty line as a command to execute. In both cases, the "dx" is expected to be omitted from the command or line.',
prog='dx sh',
parents=[env_args])
parser_shell.add_argument('filename', help='File of dx commands to execute', nargs='?', default=None)
parser_shell.set_defaults(func=shell)
register_subparser(parser_shell, categories='session')
parser_exit = subparsers.add_parser('exit', help='Exit out of the interactive shell',
description='Exit out of the interactive shell', prog='dx exit')
parser_exit.set_defaults(func=exit_shell)
register_subparser(parser_exit, categories='session')
parser_whoami = subparsers.add_parser('whoami', help='Print the username of the current user',
description='Print the username of the current user, ' +
'in the form "user-USERNAME"',
prog='dx whoami',
parents=[env_args])
parser_whoami.add_argument('--id', help='Print user ID instead of username', action='store_true', dest='user_id')
parser_whoami.set_defaults(func=whoami)
register_subparser(parser_whoami, categories='session')
parser_env = subparsers.add_parser('env', help='Print all environment variables in use',
description=fill('Prints all environment variables in use as they have been resolved from environment variables and configuration files. For more details, see') + '\n\nhttps://wiki.dnanexus.com/Command-Line-Client/Environment-Variables',
formatter_class=argparse.RawTextHelpFormatter, prog='dx env',
parents=[env_args])
parser_env.add_argument('--bash', help=fill('Prints a list of bash commands to export the environment variables', width_adjustment=-14),
action='store_true')
parser_env.add_argument('--dx-flags', help=fill('Prints the dx options to override the environment variables', width_adjustment=-14),
action='store_true')
parser_env.set_defaults(func=env)
register_subparser(parser_env, categories='session')
parser_setenv = subparsers.add_parser('setenv',
help='Sets environment variables for the session',
description='Sets environment variables for communication with the API server',
prog='dx setenv')
parser_setenv.add_argument('--noprojects', dest='projects', help='Do not print available projects', action='store_false')
parser_setenv.add_argument('--save', help='Save settings for future sessions. Only one set of settings can be saved at a time. Always set to true if login is run in a non-interactive session',
action='store_true')
parser_setenv.add_argument('--current', help='Do not prompt for new values and just save current settings for future sessions. Overrides --save to be true.',
action='store_true')
parser_setenv.set_defaults(func=setenv)
register_subparser(parser_setenv, categories='other')
parser_clearenv = subparsers.add_parser('clearenv', help='Clears all environment variables set by dx',
description='Clears all environment variables set by dx. More specifically, it removes local state stored in ~/.dnanexus_config/environment. Does not affect the environment variables currently set in your shell.', prog='dx clearenv')
parser_clearenv.add_argument('--reset', help='Reset dx environment variables to empty values. Use this to avoid interference between multiple dx sessions when using shell environment variables.',
action='store_true')
parser_clearenv.set_defaults(func=clearenv, interactive=False)
register_subparser(parser_clearenv, categories='session')
parser_invite = subparsers.add_parser('invite',
help='Invite another user to a project or make it public',
description='Invite a DNAnexus entity to a project. Use "PUBLIC" as the invitee and "VIEW" as the level to make the project public. If the invitee is not recognized as a DNAnexus ID or is not "PUBLIC", it will be treated as a username, i.e. "dx invite alice : VIEW" is equivalent to inviting the user with user ID "user-alice" to view your current default project.',
prog='dx invite',
parents=[env_args])
parser_invite.add_argument('invitee', help='Entity to invite')
parser_invite.add_argument('project', help='Project to invite the invitee to', default=':', nargs='?')
parser_invite.add_argument('level', help='Permissions level the new member should have',
choices=['VIEW', 'UPLOAD', 'CONTRIBUTE', 'ADMINISTER'], default='VIEW', nargs='?')
parser_invite.set_defaults(func=invite)
# parser_invite.completer = TODO
register_subparser(parser_invite, categories='other')
parser_uninvite = subparsers.add_parser('uninvite',
help='Revoke others\' permissions on a project you administer',
description='Revoke others\' permissions on a project you administer. Use "PUBLIC" as the entity to make the project no longer public. If the entity is not recognized as a DNAnexus ID or is not "PUBLIC", it will be treated as a username, i.e. "dx uninvite alice :" is equivalent to revoking the permissions of the user with user ID "user-alice" to your current default project.',
prog='dx uninvite',
parents=[env_args])
parser_uninvite.add_argument('entity', help='Entity to uninvite')
parser_uninvite.add_argument('project', help='Project to revoke permissions from', default=':', nargs='?')
parser_uninvite.set_defaults(func=uninvite)
register_subparser(parser_uninvite, categories='other')
parser_ls = subparsers.add_parser('ls', help='List folders and/or objects in a folder',
description='List folders and/or objects in a folder',
parents=[no_color_arg, delim_arg, env_args, stdout_args],
prog='dx ls')
parser_ls.add_argument('-a', '--all', help='show hidden files', action='store_true')
ls_output_args = parser_ls.add_mutually_exclusive_group()
ls_output_args.add_argument('-l', '--long', dest='verbose', help='Alias for "verbose"', action='store_true')
parser_ls.add_argument('--obj', help='show only objects', action='store_true')
parser_ls.add_argument('--folders', help='show only folders', action='store_true')
parser_ls.add_argument('--full', help='show full paths of folders', action='store_true')
ls_path_action = parser_ls.add_argument('path', help='Folder (possibly in another project) to list the contents of, default is the current directory in the current project. Syntax: projectID:/folder/path',
nargs='?', default='.')
ls_path_action.completer = DXPathCompleter()
parser_ls.set_defaults(func=ls)
register_subparser(parser_ls, categories='fs')
parser_tree = subparsers.add_parser('tree', help='List folders and objects in a tree',
description='List folders and objects in a tree',
parents=[no_color_arg, env_args],
prog='dx tree')
parser_tree.add_argument('-a', '--all', help='show hidden files', action='store_true')
parser_tree.add_argument('-l', '--long', help='use a long listing format', action='store_true')
tree_path_action = parser_tree.add_argument('path', help='Folder (possibly in another project) to list the contents of, default is the current directory in the current project. Syntax: projectID:/folder/path',
nargs='?', default='.')
tree_path_action.completer = DXPathCompleter(expected='folder')
parser_tree.set_defaults(func=tree)
register_subparser(parser_tree, categories='fs')
parser_pwd = subparsers.add_parser('pwd', help='Print current working directory',
description='Print current working directory',
prog='dx pwd',
parents=[env_args])
parser_pwd.set_defaults(func=pwd)
register_subparser(parser_pwd, categories='fs')
parser_select = subparsers.add_parser('select', help='List and select a project to switch to',
description='Interactively list and select a project to switch to. By default, only lists projects for which you have at least CONTRIBUTE permissions. Use --public to see the list of public projects.',
prog='dx select',
parents=[env_args])
select_project_action = parser_select.add_argument('project', help='Name or ID of a project to switch to; if not provided a list will be provided for you',
nargs='?', default=None)
select_project_action.completer = DXPathCompleter(expected='project', include_current_proj=False)
parser_select.add_argument('--name', help='Name of the project (wildcard patterns supported)')
parser_select.add_argument('--level', choices=['VIEW', 'UPLOAD', 'CONTRIBUTE', 'ADMINISTER'],
help='Minimum level of permissions expected', default='CONTRIBUTE')
parser_select.add_argument('--public', help='Include ONLY public projects (will automatically set --level to VIEW)',
action='store_true')
parser_select.set_defaults(func=select, save=False)
register_subparser(parser_select, categories='fs')
parser_cd = subparsers.add_parser('cd', help='Change the current working directory',
description='Change the current working directory', prog='dx cd',
parents=[env_args])
cd_path_action = parser_cd.add_argument('path', nargs='?', default='/',
help='Folder (possibly in another project) to which to change the current working directory, default is "/" in the current project')
cd_path_action.completer = DXPathCompleter(expected='folder')
parser_cd.set_defaults(func=cd)
register_subparser(parser_cd, categories='fs')
parser_cp = subparsers.add_parser('cp', help='Copy objects and/or folders between different projects',
formatter_class=argparse.RawTextHelpFormatter,
description=fill('Copy objects and/or folders between different projects. Folders will automatically be copied recursively. To specify which project to use as a source or destination, prepend the path or ID of the object/folder with the project ID or name and a colon.') + '''
EXAMPLES
''' + fill('The first example copies a gtable in a project called "FirstProj" to the current directory of the current project. The second example copies the object named "reads" in the current directory to the folder /folder/path in the project with ID "project-B0VK6F6gpqG6z7JGkbqQ000Q", and finally renaming it to "newname".', width_adjustment=-2, subsequent_indent=' ') + '''
$ dx cp FirstProj:gtable-B0XBQFygpqGK8ZPjbk0Q000q .
$ dx cp reads project-B0VK6F6gpqG6z7JGkbqQ000Q:/folder/path/newname
''',
prog='dx cp',
parents=[env_args, all_arg])
cp_sources_action = parser_cp.add_argument('sources', help='Objects and/or folder names to copy', metavar='source',
nargs='+')
cp_sources_action.completer = DXPathCompleter()
parser_cp.add_argument('destination', help=fill('Folder into which to copy the sources or new pathname (if only one source is provided). Must be in a different project/container than all source paths.', width_adjustment=-15))
parser_cp.set_defaults(func=cp)
register_subparser(parser_cp, categories='fs')
parser_mv = subparsers.add_parser('mv', help='Move or rename objects and/or folders inside a project',
formatter_class=argparse.RawTextHelpFormatter,
description=fill('Move or rename data objects and/or folders inside a single project. To copy data between different projects, use \'dx cp\' instead.'),
prog='dx mv',
parents=[env_args, all_arg])
mv_sources_action = parser_mv.add_argument('sources', help='Objects and/or folder names to move', metavar='source',
nargs='+')
mv_sources_action.completer = DXPathCompleter()
parser_mv.add_argument('destination', help=fill('Folder into which to move the sources or new pathname (if only one source is provided). Must be in the same project/container as all source paths.', width_adjustment=-15))
parser_mv.set_defaults(func=mv)
register_subparser(parser_mv, categories='fs')
parser_mkdir = subparsers.add_parser('mkdir', help='Create a new folder',
description='Create a new folder', prog='dx mkdir',
parents=[env_args])
parser_mkdir.add_argument('-p', '--parents', help='no error if existing, create parent directories as needed',
action='store_true')
mkdir_paths_action = parser_mkdir.add_argument('paths', help='Paths to folders to create', metavar='path', nargs='+')
mkdir_paths_action.completer = DXPathCompleter(expected='folder')
parser_mkdir.set_defaults(func=mkdir)
register_subparser(parser_mkdir, categories='fs')
parser_rmdir = subparsers.add_parser('rmdir', help='Remove a folder',
description='Remove a folder', prog='dx rmdir',
parents=[env_args])
rmdir_paths_action = parser_rmdir.add_argument('paths', help='Paths to folders to remove', metavar='path', nargs='+')
rmdir_paths_action.completer = DXPathCompleter(expected='folder')
parser_rmdir.set_defaults(func=rmdir)
register_subparser(parser_rmdir, categories='fs')
parser_rm = subparsers.add_parser('rm', help='Remove data objects and folders',
description='Remove data objects and folders.', prog='dx rm',
parents=[env_args, all_arg])
rm_paths_action = parser_rm.add_argument('paths', help='Paths to remove', metavar='path', nargs='+')
rm_paths_action.completer = DXPathCompleter()
parser_rm.add_argument('-r', '--recursive', help='Recurse into a directory', action='store_true')
parser_rm.set_defaults(func=rm)
register_subparser(parser_rm, categories='fs')
# data
parser_describe = subparsers.add_parser('describe', help='Describe a remote object',
description=fill('Describe a DNAnexus entity. Use this command to describe data objects by name or ID, jobs, apps, users, organizations, etc. If using the "--json" flag, it will thrown an error if more than one match is found (but if you would like a JSON array of the describe hashes of all matches, then provide the "--multi" flag). Otherwise, it will always display all results it finds.') + '\n\nNOTES:\n\n- ' + fill('The project found in the path is used as a HINT when you are using an object ID; you may still get a result if you have access to a copy of the object in some other project, but if it exists in the specified project, its description will be returned.') + '\n\n- ' + fill('When describing apps or applets, options marked as advanced inputs will be hidden unless --verbose is provided'),
formatter_class=argparse.RawTextHelpFormatter,
parents=[json_arg, no_color_arg, delim_arg, env_args],
prog='dx describe')
parser_describe.add_argument('--details', help='Include details of data objects', action='store_true')
parser_describe.add_argument('--verbose', help='Include all possible metadata', action='store_true')
parser_describe.add_argument('--name', help='Only print the matching names, one per line', action='store_true')
parser_describe.add_argument('--multi', help=fill('If the flag --json is also provided, then returns a JSON array of describe hashes of all matching results', width_adjustment=-24),
action='store_true')
describe_path_action = parser_describe.add_argument('path', help=fill('Object ID or path to an object (possibly in another project) to describe.', width_adjustment=-24))
describe_path_action.completer = DXPathCompleter()
parser_describe.set_defaults(func=describe)
register_subparser(parser_describe, categories=('data', 'metadata'))
parser_upload = subparsers.add_parser('upload', help='Upload file(s) or directory',
description='Upload local file(s) or directory. If "-" is provided, stdin will be used instead. By default, the filename will be used as its new name. If --path/--destination is provided with a path ending in a slash, the filename will be used, and the folder path will be used as a destination. If it does not end in a slash, then it will be used as the final name.',
parents=[parser_dataobject_args, stdout_args, env_args],
prog="dx upload")
upload_filename_action = parser_upload.add_argument('filename', nargs='+',
help='Local file or directory to upload ("-" indicates stdin input); provide multiple times to upload multiple files or directories')
upload_filename_action.completer = LocalCompleter()
parser_upload.add_argument('-o', '--output', help=argparse.SUPPRESS) # deprecated; equivalent to --path/--destination
parser_upload.add_argument('--path', '--destination',
help=fill('DNAnexus path to upload file(s) to (default uses current project and folder if not provided)', width_adjustment=-24),
nargs='?')
parser_upload.add_argument('-r', '--recursive', help='Upload directories recursively', action='store_true')
parser_upload.add_argument('--wait', help='Wait until the file has finished closing', action='store_true')
parser_upload.add_argument('--no-progress', help='Do not show a progress bar', dest='show_progress',
action='store_false', default=sys.stderr.isatty())
parser_upload.set_defaults(func=upload, mute=False)
register_subparser(parser_upload, categories='data')
parser_download = subparsers.add_parser('download', help='Download file(s)',
description='Download the contents of a file object or multiple objects. Use "-o -" to direct the output to stdout.',
prog='dx download',
parents=[env_args])
parser_download_paths_arg = parser_download.add_argument('paths', help='Data object ID or name, or folder to download',
nargs='+', metavar='path')
parser_download_paths_arg.completer = DXPathCompleter(classes=['file'])
parser_download.add_argument('-o', '--output', help='Local filename or directory to be used ("-" indicates stdout output); if not supplied or a directory is given, the object\'s name on the platform will be used, along with any applicable extensions')
parser_download.add_argument('-f', '--overwrite', help='Overwrite the local file if necessary', action='store_true')
parser_download.add_argument('-r', '--recursive', help='Download folders recursively', action='store_true')
parser_download.add_argument('-a', '--all', help='If multiple objects match the input, download all of them',
action='store_true')
parser_download.add_argument('--no-progress', help='Do not show a progress bar', dest='show_progress',
action='store_false', default=sys.stderr.isatty())
parser_download.set_defaults(func=download_or_cat)
register_subparser(parser_download, categories='data')
parser_make_download_url = subparsers.add_parser('make_download_url', help='Create a file download link for sharing',
description='Creates a pre-authenticated link that can be used to download a file without logging in.',
prog='dx make_download_url')
path_action = parser_make_download_url.add_argument('path', help='Data object ID or name to access')
path_action.completer = DXPathCompleter(classes=['file'])
parser_make_download_url.add_argument('--duration', help='Time for which the URL will remain valid (in seconds, or use suffix s, m, h, d, w, M, y). Default: 1 day')
parser_make_download_url.add_argument('--filename', help='Name that the server will instruct the client to save the file as (default is the filename)')
parser_make_download_url.set_defaults(func=make_download_url)
register_subparser(parser_make_download_url, categories='data')
parser_cat = subparsers.add_parser('cat', help='Print file(s) to stdout', prog='dx cat',
parents=[env_args])
cat_path_action = parser_cat.add_argument('path', help='File ID or name(s) to print to stdout', nargs='+')
cat_path_action.completer = DXPathCompleter(classes=['file'])
parser_cat.set_defaults(func=cat)
register_subparser(parser_cat, categories='data')
parser_head = subparsers.add_parser('head',
help='Print part of a file or gtable',
description='Print the first part of a file or a gtable. By default, prints the first 10 lines or rows, respectively. Additional query parameters can be provided in the case of gtables. The output for gtables is formatted for human-readability; to print rows in a machine-readable format, see "dx export tsv".',
parents=[no_color_arg, env_args],
prog='dx head')
parser_head.add_argument('-n', '--lines', type=int, metavar='N', help='Print the first N lines or rows (default 10)',
default=10)
head_gtable_args = parser_head.add_argument_group(title='GTable-specific options')
head_gtable_args.add_argument('-w', '--max-col-width', type=int, help='Maximum width of each column to display',
default=32)
head_gtable_args.add_argument('--starting', type=int, help='Specify starting row ID', default=0)
head_gtable_args.add_argument('--gri', nargs=3, metavar=('CHR', 'LO', 'HI'), help='Specify chromosome name, low coordinate, and high coordinate for Genomic Range Index')
head_gtable_args.add_argument('--gri-mode',
help='Specify the mode of the GRI query (\'overlap\' or \'enclose\'; default \'overlap\')',
default="overlap")
head_gtable_args.add_argument('--gri-name',
help='Override the default name of the Genomic Range Index (default: "gri"))',
default="gri")
head_path_action = parser_head.add_argument('path', help='File or gtable ID or name to access')
head_path_action.completer = DXPathCompleter(classes=['file', 'gtable'])
parser_head.set_defaults(func=head)
register_subparser(parser_head, categories='data')
parser_import = subparsers.add_parser('import',
help='Import (convert and upload) a local table or genomic file',
description=fill('Import a local file to the DNAnexus platform as a GenomicTable.') + '\n\n' + fill('For more details on how to import from a particular format, run ') + '\n $ dx help import <format>' + '\n\nSupported formats:\n\n ' + '\n '.join(sorted(importers)),
formatter_class=argparse.RawTextHelpFormatter,
prog='dx import',
parents=[env_args])
parser_import.add_argument('format', help='Format to import from')
import_args_action = parser_import.add_argument('importer_args', help=fill('Arguments passed to the importer', width_adjustment=-24),
nargs=argparse.REMAINDER)
import_args_action.completer = LocalCompleter()
parser_import.set_defaults(func=dximport)
register_subparser(parser_import, categories='data')
parser_export = subparsers.add_parser('export',
help='Export (download and convert) a gtable into a local file',
description=fill('Export a GenomicTable into a local file with a particular file format.') + '\n\n' + fill('For more details on how to convert into a particular format, run ') + '\n $ dx help export <format>' + '\n\nSupported formats:\n\n ' + '\n '.join(sorted(exporters)),
formatter_class=argparse.RawTextHelpFormatter,
prog='dx export',
parents=[env_args])
parser_export.add_argument('format', help='Format to export to')
parser_export.add_argument('exporter_args', help=fill('Arguments passed to the exporter', width_adjustment=-24),
nargs=argparse.REMAINDER)
parser_export.set_defaults(func=export)
register_subparser(parser_export, categories='data')
from dxpy.scripts.dx_build_app import parser as build_parser
build_parser.prog = 'dx build'
build_parser.set_defaults(mode="applet")
parser_build = subparsers.add_parser('build', help='Upload and build a new applet/app',
description='Build an applet or app object from a local source directory. You can use ' + BOLD("dx-app-wizard") + ' to generate a skeleton directory with the necessary files.',
prog='dx build',
add_help=False,
parents=[build_parser, env_args]
)
parser_build.set_defaults(func=build)
register_subparser(parser_build, categories='exec')
parser_add = subparsers.add_parser('add', help='Add one or more items to a list',
description='Use this command with one of the availabile subcommands to perform various actions such as adding other users to the list of developers or authorized users of an app',
prog='dx add')
subparsers_add = parser_add.add_subparsers(parser_class=DXArgumentParser)
subparsers_add.metavar = 'list_type'
register_subparser(parser_add, categories=())
parser_add_users = subparsers_add.add_parser('users', help='Add authorized users for an app',
description='Add users or orgs to the list of authorized users of an app. Published versions of the app will only be accessible to users represented by this list and to developers of the app. Unpublished versions are restricted to the developers.',
prog='dx add users', parents=[env_args])
parser_add_users.add_argument('app', help='Name or ID of an app').completer = DXAppCompleter(installed=True)
parser_add_users.add_argument('users', metavar='authorizedUser',
help='One or more users or orgs to add',
nargs='+')
parser_add_users.set_defaults(func=add_users)
register_subparser(parser_add_users, subparsers_action=subparsers_add, categories='exec')
parser_add_developers = subparsers_add.add_parser('developers', help='Add developers for an app',
description='Add users to the list of developers for an app. Developers are able to build and publish new versions of the app, and add or remove others from the list of developers and authorized users.',
prog='dx add developers', parents=[env_args])
parser_add_developers.add_argument('app', help='Name or ID of an app').completer = DXAppCompleter(installed=True)
parser_add_developers.add_argument('developers', metavar='developer', help='One or more users to add',
nargs='+')
parser_add_developers.set_defaults(func=add_developers)
register_subparser(parser_add_developers, subparsers_action=subparsers_add, categories='exec')
parser_add_stage = subparsers_add.add_parser('stage', help='Add a stage to a workflow',
description='Add a stage to a workflow. Default inputs for the stage can also be set at the same time.',
parents=[exec_input_args, stdout_args, env_args,
instance_type_arg],
prog='dx add stage')
parser_add_stage.add_argument('workflow', help='Name or ID of a workflow').completer = DXPathCompleter(classes=['workflow'])
parser_add_stage.add_argument('executable', help='Name or ID of an executable to add as a stage in the workflow').completer = MultiCompleter([DXAppCompleter(),
DXPathCompleter(classes=['applet'])])
parser_add_stage.add_argument('--alias', '--version', '--tag', dest='alias',
help='Tag or version of the app to add if the executable is an app (default: "default" if an app)')
parser_add_stage.add_argument('--name', help='Stage name')
add_stage_folder_args = parser_add_stage.add_mutually_exclusive_group()
add_stage_folder_args.add_argument('--output-folder', help='Path to the output folder for the stage (interpreted as an absolute path)')
add_stage_folder_args.add_argument('--relative-output-folder', help='A relative folder path for the stage (interpreted as relative to the workflow\'s output folder)')
parser_add_stage.set_defaults(func=workflow_cli.add_stage)
register_subparser(parser_add_stage, subparsers_action=subparsers_add, categories='workflow')
parser_add_member = subparsers_add.add_parser("member", help="Grant a user membership to an org", description="Grant a user membership to an org", prog="dx add member", parents=[stdout_args, env_args])
parser_add_member.add_argument("org_id", help="ID of the org")
parser_add_member.add_argument("username", help="Username")
parser_add_member.add_argument("--level", required=True, choices=["ADMIN", "MEMBER"], help="Org membership level that will be granted to the specified user")
parser_add_member.add_argument("--allow-billable-activities", default=False, action="store_true", help='Grant the specified user "createProjectsAndApps" in the org')
parser_add_member.add_argument("--no-app-access", default=True, action="store_false", dest="app_access", help='Disable "appAccess" for the specified user in the org')
parser_add_member.add_argument("--project-access", choices=["ADMINISTER", "CONTRIBUTE", "UPLOAD", "VIEW", "NONE"], default="CONTRIBUTE", help='The default implicit maximum permission the specified user will receive to projects explicitly shared with the org; default CONTRIBUTE')
parser_add_member.add_argument("--no-email", default=False, action="store_true", help="Disable org invitation email notification to the specified user")
parser_add_member.set_defaults(func=add_membership)
register_subparser(parser_add_member, subparsers_action=subparsers_add, categories="other")
parser_list = subparsers.add_parser('list', help='Print the members of a list',
description='Use this command with one of the availabile subcommands to perform various actions such as printing the list of developers or authorized users of an app.',
prog='dx list')
subparsers_list = parser_list.add_subparsers(parser_class=DXArgumentParser)
subparsers_list.metavar = 'list_type'
register_subparser(parser_list, categories=())
parser_list_users = subparsers_list.add_parser('users', help='List authorized users for an app',
description='List the authorized users of an app. Published versions of the app will only be accessible to users represented by this list and to developers of the app. Unpublished versions are restricted to the developers',
prog='dx list users', parents=[env_args])
parser_list_users.add_argument('app', help='Name or ID of an app').completer = DXAppCompleter(installed=True)
parser_list_users.set_defaults(func=list_users)
register_subparser(parser_list_users, subparsers_action=subparsers_list, categories='exec')
parser_list_developers = subparsers_list.add_parser('developers', help='List developers for an app',
description='List the developers for an app. Developers are able to build and publish new versions of the app, and add or remove others from the list of developers and authorized users.',
prog='dx list developers', parents=[env_args])
parser_list_developers.add_argument('app', help='Name or ID of an app').completer = DXAppCompleter(installed=True)
parser_list_developers.set_defaults(func=list_developers)
register_subparser(parser_list_developers, subparsers_action=subparsers_list, categories='exec')
parser_list_stages = subparsers_list.add_parser('stages', help='List the stages in a workflow',
description='List the stages in a workflow.',
parents=[env_args],
prog='dx list stages')
parser_list_stages.add_argument('workflow', help='Name or ID of a workflow').completer = DXPathCompleter(classes=['workflow'])
parser_list_stages.set_defaults(func=workflow_cli.list_stages)
register_subparser(parser_list_stages, subparsers_action=subparsers_list, categories='workflow')
parser_remove = subparsers.add_parser('remove', help='Remove one or more items to a list',
description='Use this command with one of the available subcommands to perform various actions such as removing other users from the list of developers or authorized users of an app.',
prog='dx remove')
subparsers_remove = parser_remove.add_subparsers(parser_class=DXArgumentParser)
subparsers_remove.metavar = 'list_type'
register_subparser(parser_remove, categories=())
parser_remove_users = subparsers_remove.add_parser('users', help='Remove authorized users for an app',
description='Remove users or orgs from the list of authorized users of an app. Published versions of the app will only be accessible to users represented by this list and to developers of the app. Unpublished versions are restricted to the developers',
prog='dx remove users', parents=[env_args])
parser_remove_users.add_argument('app', help='Name or ID of an app').completer = DXAppCompleter(installed=True)
parser_remove_users.add_argument('users', metavar='authorizedUser',
help='One or more users or orgs to remove',
nargs='+')
parser_remove_users.set_defaults(func=remove_users)
register_subparser(parser_remove_users, subparsers_action=subparsers_remove, categories='exec')
parser_remove_developers = subparsers_remove.add_parser('developers', help='Remove developers for an app',
description='Remove users from the list of developers for an app. Developers are able to build and publish new versions of the app, and add or remove others from the list of developers and authorized users.',
prog='dx remove developers', parents=[env_args])
parser_remove_developers.add_argument('app', help='Name or ID of an app').completer = DXAppCompleter(installed=True)
parser_remove_developers.add_argument('developers', metavar='developer', help='One or more users to remove',
nargs='+')
parser_remove_developers.set_defaults(func=remove_developers)
register_subparser(parser_remove_developers, subparsers_action=subparsers_remove, categories='exec')
parser_remove_stage = subparsers_remove.add_parser('stage', help='Remove a stage from a workflow',
description='Remove a stage from a workflow. The stage should be indicated either by an integer (0-indexed, i.e. "0" for the first stage), or a stage ID.',
parents=[stdout_args, env_args],
prog='dx remove stage')
parser_remove_stage.add_argument('workflow', help='Name or ID of a workflow').completer = DXPathCompleter(classes=['workflow'])
parser_remove_stage.add_argument('stage', help='Stage (index or ID) of the workflow to remove')
parser_remove_stage.set_defaults(func=workflow_cli.remove_stage)
register_subparser(parser_remove_stage, subparsers_action=subparsers_remove, categories='workflow')
parser_remove_member = subparsers_remove.add_parser("member", help="Revoke the org membership of a user", description="Revoke the org membership of a user", prog="dx remove member", parents=[stdout_args, env_args])
parser_remove_member.add_argument("org_id", help="ID of the org")
parser_remove_member.add_argument("username", help="Username")
parser_remove_member.add_argument("--keep-explicit-project-permissions", default=True, action="store_false", dest="revoke_project_permissions", help="Disable revocation of explicit project permissions of the specified user to projects billed to the org; implicit project permissions (i.e. those granted to the specified user via his membership in this org) will always be revoked")
parser_remove_member.add_argument("--keep-explicit-app-permissions", default=True, action="store_false", dest="revoke_app_permissions", help="Disable revocation of explicit app developer and user permissions of the specified user to apps billed to the org; implicit app permissions (i.e. those granted to the specified user via his membership in this org) will always be revoked")
parser_remove_member.set_defaults(func=remove_membership)
register_subparser(parser_remove_member, subparsers_action=subparsers_remove, categories="other")
parser_update = subparsers.add_parser('update', help='Update certain types of metadata',
description='''
Use this command with one of the available targets listed below to update
their metadata that are not covered by the other
subcommands.''',
prog='dx update')
subparsers_update = parser_update.add_subparsers(parser_class=DXArgumentParser)
subparsers_update.metavar = 'target'
register_subparser(parser_update, categories=())
parser_update_workflow = subparsers_update.add_parser('workflow', help='Update the metadata for a workflow',
description='Update the metadata for an existing workflow',
parents=[stdout_args, env_args],
prog='dx update workflow')
parser_update_workflow.add_argument('workflow', help='Name or ID of a workflow').completer = DXPathCompleter(classes=['workflow'])
update_workflow_title_args = parser_update_workflow.add_mutually_exclusive_group()
update_workflow_title_args.add_argument('--title', help='Workflow title')
update_workflow_title_args.add_argument('--no-title', help='Unset the workflow title', action='store_true')
parser_update_workflow.add_argument('--summary', help='Workflow summary')
parser_update_workflow.add_argument('--description', help='Workflow description')
update_workflow_output_folder_args = parser_update_workflow.add_mutually_exclusive_group()
update_workflow_output_folder_args.add_argument('--output-folder', help='Default output folder for the workflow')
update_workflow_output_folder_args.add_argument('--no-output-folder', help='Unset the default output folder for the workflow', action='store_true')
parser_update_workflow.set_defaults(func=workflow_cli.update_workflow)
register_subparser(parser_update_workflow, subparsers_action=subparsers_update, categories='workflow')
parser_update_stage = subparsers_update.add_parser('stage', help='Update the metadata for a stage in a workflow',
description='Update the metadata for a stage in a workflow',
parents=[exec_input_args, stdout_args, env_args,
instance_type_arg],
prog='dx update stage')
parser_update_stage.add_argument('workflow', help='Name or ID of a workflow').completer = DXPathCompleter(classes=['workflow'])
parser_update_stage.add_argument('stage', help='Stage (index or ID) of the workflow to update')
parser_update_stage.add_argument('--executable', help='Name or ID of an executable to replace in the stage').completer = MultiCompleter([DXAppCompleter(),
DXPathCompleter(classes=['applet'])])
parser_update_stage.add_argument('--alias', '--version', '--tag', dest='alias',
help='Tag or version of the app to use if replacing the stage executable with an app (default: "default" if an app)')
parser_update_stage.add_argument('--force',
help='Whether to replace the executable even if it the new one cannot be verified as compatible with the previous version',
action='store_true')
update_stage_name_args = parser_update_stage.add_mutually_exclusive_group()
update_stage_name_args.add_argument('--name', help='Stage name')
update_stage_name_args.add_argument('--no-name', help='Unset the stage name', action='store_true')
update_stage_folder_args = parser_update_stage.add_mutually_exclusive_group()
update_stage_folder_args.add_argument('--output-folder', help='Path to the output folder for the stage (interpreted as an absolute path)')
update_stage_folder_args.add_argument('--relative-output-folder', help='A relative folder path for the stage (interpreted as relative to the workflow\'s output folder)')
parser_update_stage.set_defaults(func=workflow_cli.update_stage)
register_subparser(parser_update_stage, subparsers_action=subparsers_update, categories='workflow')
parser_update_member = subparsers_update.add_parser("member", help="Update the membership of a user in an org", description="Update the membership of a user in an org", prog="dx update member", parents=[stdout_args, env_args])
parser_update_member.add_argument("org_id", help="ID of the org")
parser_update_member.add_argument("username", help="Username")
parser_update_member.add_argument("--level", required=True, choices=["ADMIN", "MEMBER"], help="The new org membership level of the specified user")
parser_update_member.add_argument("--allow-billable-activities", choices=["true", "false"], help='The new "createProjectsAndApps" membership permission of the specified user in the org')
parser_update_member.add_argument("--app-access", choices=["true", "false"], help='The new "appAccess" membership permission of the specified user in the org')
parser_update_member.add_argument("--project-access", choices=["ADMINISTER", "CONTRIBUTE", "UPLOAD", "VIEW", "NONE"], help='The new default implicit maximum permission the specified user will receive to projects explicitly shared with the org')
parser_update_member.set_defaults(func=update_membership)
register_subparser(parser_update_member, subparsers_action=subparsers_update, categories="other")
parser_install = subparsers.add_parser('install', help='Install an app',
description='Install an app by name. To see a list of apps you can install, hit <TAB> twice after "dx install" or run "' + BOLD('dx find apps') + '" to see a list of available apps.', prog='dx install',
parents=[env_args])
install_app_action = parser_install.add_argument('app', help='ID or name of app to install')
install_app_action.completer = DXAppCompleter(installed=False)
parser_install.set_defaults(func=install)
register_subparser(parser_install, categories='exec')
parser_uninstall = subparsers.add_parser('uninstall', help='Uninstall an app',
description='Uninstall an app by name.', prog='dx uninstall',
parents=[env_args])
uninstall_app_action = parser_uninstall.add_argument('app', help='ID or name of app to uninstall')
uninstall_app_action.completer = DXAppCompleter(installed=True)
parser_uninstall.set_defaults(func=uninstall)
register_subparser(parser_uninstall, categories='exec')
parser_run = subparsers.add_parser('run', help='Run an applet, app, or workflow', add_help=False,
description=(fill('Run an applet, app, or workflow. To see a list of executables you can run, hit <TAB> twice after "dx run" or run "' + BOLD('dx find apps') + '" to see a list of available apps.') + '\n\n' + fill('If any inputs are required but not specified, an interactive mode for selecting inputs will be launched. Inputs can be set in multiple ways. Run "' + BOLD('dx run --input-help') + '" for more details.') + '\n\n' + fill('Run "' + BOLD('dx run --instance-type-help') + '" to see a list of specifications for computers available to run executables.')),
prog='dx run',
formatter_class=argparse.RawTextHelpFormatter,
parents=[exec_input_args, stdout_args, env_args, extra_args,
instance_type_arg])
run_executable_action = parser_run.add_argument('executable',
help=fill('Name or ID of an applet, app, or workflow to run; must be provided if --clone is not set', width_adjustment=-24),
nargs="?", default="")
run_executable_action.completer = MultiCompleter([DXAppCompleter(),
DXPathCompleter(classes=['applet', 'workflow'], visibility="visible")])
parser_run.add_argument('-d', '--depends-on',
help=fill('ID of job, analysis, or data object that must be in the "done" or ' +
'"closed" state, as appropriate, before this executable can be run; ' +
'repeat as necessary (e.g. "--depends-on id1 ... --depends-on idN"). ' +
'Cannot be supplied when running workflows',
width_adjustment=-24),
action='append', type=str)
parser_run.add_argument('-h', '--help', help='show this help message and exit', nargs=0, action=runHelp)
parser_run.add_argument('--clone', help=fill('Job or analysis ID or name from which to use as default options (will use the exact same executable ID, destination project and folder, job input, instance type requests, and a similar name unless explicitly overridden by command-line arguments)', width_adjustment=-24))
parser_run.add_argument('--alias', '--version', dest='alias',
help=fill('Alias (tag) or version of the app to run (default: "default" if an app)', width_adjustment=-24))
parser_run.add_argument('--destination', '--folder', metavar='PATH', dest='folder', help=fill('The full project:folder path in which to output the results. By default, the current working directory will be used.', width_adjustment=-24))
parser_run.add_argument('--project', metavar='PROJECT',
help=fill('Project name or ID in which to run the executable. This can also ' +
'be specified together with the output folder in --destination.',
width_adjustment=-24))
parser_run.add_argument('--stage-output-folder', metavar=('STAGE_ID', 'FOLDER'),
help=fill('A stage identifier (ID, name, or index), and a folder path to ' +
'use as its output folder',
width_adjustment=-24),
nargs=2,
action='append',
default=[])
parser_run.add_argument('--stage-relative-output-folder', metavar=('STAGE_ID', 'FOLDER'),
help=fill('A stage identifier (ID, name, or index), and a relative folder ' +
'path to the workflow output folder to use as the output folder',
width_adjustment=-24),
nargs=2,
action='append',
default=[])
parser_run.add_argument('--rerun-stage', metavar='STAGE_ID', dest='rerun_stages',
help=fill('A stage (using its ID, name, or index) to rerun, or "*" to ' +
'indicate all stages should be rerun; repeat as necessary',
width_adjustment=-24),
action='append')
parser_run.add_argument('--name', help=fill('Name for the job (default is the app or applet name)', width_adjustment=-24))
parser_run.add_argument('--property', dest='properties', metavar='KEY=VALUE',
help=(fill('Key-value pair to add as a property; repeat as necessary,',
width_adjustment=-24) + '\n' +
fill('e.g. "--property key1=val1 --property key2=val2"',
width_adjustment=-24, initial_indent=' ', subsequent_indent=' ',
break_on_hyphens=False)),
action='append')
parser_run.add_argument('--tag', metavar='TAG', dest='tags', help=fill('Tag for the resulting execution; repeat as necessary,', width_adjustment=-24) + '\n' + fill('e.g. "--tag tag1 --tag tag2"', width_adjustment=-24, break_on_hyphens=False, initial_indent=' ', subsequent_indent=' '), action='append')
parser_run.add_argument('--delay-workspace-destruction',
help=fill('Whether to keep the job\'s temporary workspace around for debugging purposes for 3 days after it succeeds or fails', width_adjustment=-24),
action='store_true')
parser_run.add_argument('--priority',
choices=['normal', 'high'],
help='Request a scheduling priority for all resulting jobs')
parser_run.add_argument('-y', '--yes', dest='confirm', help='Do not ask for confirmation', action='store_false')
parser_run.add_argument('--wait', help='Wait until the job is done before returning', action='store_true')
parser_run.add_argument('--watch', help="Watch the job after launching it; sets --priority high", action='store_true')
parser_run.add_argument('--allow-ssh', action='append', nargs='?', metavar='ADDRESS',
help=fill('Configure the job to allow SSH access; sets --priority high. If an argument is ' +
'supplied, it is interpreted as an IP or hostname mask to allow connections from, ' +
'e.g. "--allow-ssh 1.2.3.4 --allow-ssh berkeley.edu"',
width_adjustment=-24))
parser_run.add_argument('--ssh',
help=fill("Configure the job to allow SSH access and connect to it after launching; " +
"sets --priority high",
width_adjustment=-24),
action='store_true')
parser_run.add_argument('--debug-on', action='append', choices=['AppError', 'AppInternalError', 'ExecutionError'],
help=fill("Configure the job to hold for debugging when any of the listed errors occur",
width_adjustment=-24))
parser_run.add_argument('--input-help',
help=fill('Print help and examples for how to specify inputs',
width_adjustment=-24),
action=runInputHelp, nargs=0)
parser_run.set_defaults(func=run, verbose=False, help=False, details=None,
stage_instance_types=None, stage_folders=None)
register_subparser(parser_run, categories='exec')
parser_watch = subparsers.add_parser('watch', help='Watch logs of a job and its subjobs', prog='dx watch',
description='Monitors logging output from a running job',
parents=[env_args, no_color_arg])
parser_watch.add_argument('jobid', help='ID of the job to watch')
# .completer = TODO
parser_watch.add_argument('-n', '--num-recent-messages', help='Number of recent messages to get',
type=int, default=1024*256)
parser_watch.add_argument('--tree', help='Include the entire job tree', action='store_true')
parser_watch.add_argument('-l', '--levels', action='append', choices=["EMERG", "ALERT", "CRITICAL", "ERROR", "WARNING",
"NOTICE", "INFO", "DEBUG", "STDERR", "STDOUT"])
parser_watch.add_argument('--get-stdout', help='Extract stdout only from this job', action='store_true')
parser_watch.add_argument('--get-stderr', help='Extract stderr only from this job', action='store_true')
parser_watch.add_argument('--get-streams', help='Extract only stdout and stderr from this job', action='store_true')
parser_watch.add_argument('--no-timestamps', help='Omit timestamps from messages', action='store_false',
dest='timestamps')
parser_watch.add_argument('--job-ids', help='Print job ID in each message', action='store_true')
parser_watch.add_argument('--no-job-info', help='Omit job info and status updates', action='store_false',
dest='job_info')
parser_watch.add_argument('-q', '--quiet', help='Do not print extra info messages', action='store_true')
parser_watch.add_argument('-f', '--format', help='Message format. Available fields: job, level, msg, date')
parser_watch.add_argument('--no-wait', '--no-follow', action='store_false', dest='tail',
help='Exit after the first new message is received, instead of waiting for all logs')
parser_watch.set_defaults(func=watch)
register_subparser(parser_watch, categories='exec')
parser_ssh_config = subparsers.add_parser('ssh_config', help='Configure SSH keys for your DNAnexus account',
description='Configure SSH access credentials for your DNAnexus account',
prog='dx ssh_config',
parents=[env_args])
parser_ssh_config.add_argument('ssh_keygen_args', help='Command-line arguments to pass to ssh-keygen',
nargs=argparse.REMAINDER)
parser_ssh_config.add_argument('--revoke', help='Revoke SSH public key associated with your DNAnexus account; you will no longer be able to SSH into any jobs.', action='store_true')
parser_ssh_config.set_defaults(func=ssh_config)
register_subparser(parser_ssh_config, categories='exec')
parser_ssh = subparsers.add_parser('ssh', help='Connect to a running job via SSH',
description='Use an SSH client to connect to a job being executed on the DNAnexus ' +
'platform. The job must be launched using "dx run --allow-ssh" or ' +
'equivalent API options. Use "dx ssh_config" or the Profile page on ' +
'the DNAnexus website to configure SSH for your DNAnexus account.',
prog='dx ssh',
parents=[env_args])
parser_ssh.add_argument('job_id', help='Name of job to connect to')
parser_ssh.add_argument('ssh_args', help='Command-line arguments to pass to the SSH client', nargs=argparse.REMAINDER)
parser_ssh.set_defaults(func=ssh)
register_subparser(parser_ssh, categories='exec')
parser_terminate = subparsers.add_parser('terminate', help='Terminate job(s)',
description='Terminate a job or jobs that have not yet finished',
prog='dx terminate',
parents=[env_args])
parser_terminate.add_argument('jobid', help='ID of the job to terminate', nargs='+')
parser_terminate.set_defaults(func=terminate)
parser_map['terminate'] = parser_terminate
parser_categories['all']['cmds'].append((subparsers._choices_actions[-1].dest, subparsers._choices_actions[-1].help))
parser_categories['exec']['cmds'].append((subparsers._choices_actions[-1].dest, subparsers._choices_actions[-1].help))
parser_rmproject = subparsers.add_parser('rmproject', help='Delete a project',
description='Delete projects and all their associated data',
prog='dx rmproject',
parents=[env_args])
projects_action = parser_rmproject.add_argument('projects', help='Projects to remove', metavar='project', nargs='+')
projects_action.completer = DXPathCompleter(expected='project', include_current_proj=True)
parser_rmproject.add_argument('-y', '--yes', dest='confirm', help='Do not ask for confirmation', action='store_false')
parser_rmproject.add_argument('-q', '--quiet', help='Do not print purely informational messages', action='store_true')
parser_rmproject.set_defaults(func=rmproject)
register_subparser(parser_rmproject, categories='fs')
parser_new = subparsers.add_parser('new', help='Create a new project or data object',
description='Use this command with one of the available subcommands (classes) to create a new project or data object from scratch. Not all data types are supported. See \'dx upload\' for files and \'dx build\' for applets.',
prog="dx new")
subparsers_new = parser_new.add_subparsers(parser_class=DXArgumentParser)
subparsers_new.metavar = 'class'
register_subparser(parser_new, categories='data')
parser_new_user = subparsers_new.add_parser("user", help="Create a new user account", description="Create a new user account", parents=[stdout_args, env_args], prog="dx new user")
parser_new_user_user_opts = parser_new_user.add_argument_group("User options")
parser_new_user_user_opts.add_argument("-u", "--username", required=True, help="Username")
parser_new_user_user_opts.add_argument("--email", required=True, help="Email address")
parser_new_user_user_opts.add_argument("--first", help="First name")
parser_new_user_user_opts.add_argument("--middle", help="Middle name")
parser_new_user_user_opts.add_argument("--last", help="Last name")
parser_new_user_user_opts.add_argument("--token-duration", type=int, help="Time duration (ms) for which the newly generated auth token for the new user will be valid")
parser_new_user_user_opts.add_argument("--occupation", help="Occupation")
parser_new_user_org_opts = parser_new_user.add_argument_group("Org options", "Optionally invite the new user to an org with the specified parameters")
parser_new_user_org_opts.add_argument("--org", help="ID of the org")
parser_new_user_org_opts.add_argument("--level", choices=["ADMIN", "MEMBER"], default="MEMBER", action=DXNewUserOrgArgsAction, help="Org membership level that will be granted to the new user; default MEMBER")
parser_new_user_org_opts.add_argument("--set-bill-to", default=False, action=DXNewUserOrgArgsAction, help='Set the default "billTo" field of the new user to the org; implies --allow-billable-activities')
parser_new_user_org_opts.add_argument("--allow-billable-activities", default=False, action=DXNewUserOrgArgsAction, help='Grant the new user "createProjectsAndApps" in the org')
parser_new_user_org_opts.add_argument("--no-app-access", default=True, action=DXNewUserOrgArgsAction, dest="app_access", help='Disable "appAccess" for the new user in the org')
parser_new_user_org_opts.add_argument("--project-access", choices=["ADMINISTER", "CONTRIBUTE", "UPLOAD", "VIEW", "NONE"], default="CONTRIBUTE", action=DXNewUserOrgArgsAction, help='The "projectAccess" to grant the new user in the org; default CONTRIBUTE')
parser_new_user_org_opts.add_argument("--no-email", default=False, action=DXNewUserOrgArgsAction, help="Disable org invitation email notification to the new user")
parser_new_user.set_defaults(func=new_user)
register_subparser(parser_new_user, subparsers_action=subparsers_new,
categories="other")
parser_new_project = subparsers_new.add_parser('project', help='Create a new project',
description='Create a new project',
parents=[stdout_args, env_args],
prog='dx new project')
parser_new_project.add_argument('name', help='Name of the new project', nargs='?')
parser_new_project.add_argument('-s', '--select', help='Select the new project as current after creating',
action='store_true')
parser_new_project.set_defaults(func=new_project)
register_subparser(parser_new_project, subparsers_action=subparsers_new, categories='fs')
parser_new_record = subparsers_new.add_parser('record', help='Create a new record',
description='Create a new record',
parents=[parser_dataobject_args, parser_single_dataobject_output_args,
stdout_args, env_args],
formatter_class=argparse.RawTextHelpFormatter,
prog='dx new record')
init_action = parser_new_record.add_argument('--init', help='Path to record from which to initialize all metadata')
parser_new_record.add_argument('--close', help='Close the record immediately after creating it', action='store_true')
init_action.completer = DXPathCompleter(classes=['record'])
parser_new_record.set_defaults(func=new_record)
register_subparser(parser_new_record, subparsers_action=subparsers_new, categories='fs')
parser_new_workflow = subparsers_new.add_parser('workflow', help='Create a new workflow',
description='Create a new workflow',
parents=[parser_dataobject_args, parser_single_dataobject_output_args,
stdout_args, env_args],
formatter_class=argparse.RawTextHelpFormatter,
prog='dx new workflow')
parser_new_workflow.add_argument('--title', help='Workflow title')
parser_new_workflow.add_argument('--summary', help='Workflow summary')
parser_new_workflow.add_argument('--description', help='Workflow description')
parser_new_workflow.add_argument('--output-folder', help='Default output folder for the workflow')
init_action = parser_new_workflow.add_argument('--init', help=fill('Path to workflow or an analysis ID from which to initialize all metadata', width_adjustment=-24))
init_action.completer = DXPathCompleter(classes=['workflow'])
parser_new_workflow.set_defaults(func=workflow_cli.new_workflow)
register_subparser(parser_new_workflow, subparsers_action=subparsers_new, categories='workflow')
parser_new_gtable = subparsers_new.add_parser('gtable', add_help=False, #help='Create a new gtable',
description='Create a new gtable from scratch. See \'dx import\' for importing special file formats (e.g. csv, fastq) into GenomicTables.',
parents=[parser_dataobject_args, parser_single_dataobject_output_args,
stdout_args, env_args],
formatter_class=argparse.RawTextHelpFormatter,
prog='dx new gtable')
parser_new_gtable.add_argument('--columns',
help=fill('Comma-separated list of column names to use, e.g. "col1,col2,col3"; columns with non-string types can be specified using "name:type" syntax, e.g. "col1:int,col2:boolean". If not given, the first line of the file will be used to infer column names.', width_adjustment=-24),
required=True)
new_gtable_indices_args = parser_new_gtable.add_mutually_exclusive_group()
new_gtable_indices_args.add_argument('--gri', nargs=3, metavar=('CHR', 'LO', 'HI'),
help=fill('Specify column names to be used as chromosome, lo, and hi columns for a genomic range index (name will be set to "gri"); will also add the type "gri"', width_adjustment=-24))
new_gtable_indices_args.add_argument('--indices', help='JSON for specifying any other indices')
parser_new_gtable.set_defaults(func=new_gtable)
#parser_new_gtable.completer = DXPathCompleter(classes=['gtable'])
register_subparser(parser_new_gtable, subparsers_action=subparsers_new, categories='fs')
parser_get_details = subparsers.add_parser('get_details', help='Get details of a data object',
description='Get the JSON details of a data object.', prog="dx get_details",
parents=[env_args])
parser_get_details.add_argument('path', help='ID or path to data object to get details for').completer = DXPathCompleter()
parser_get_details.set_defaults(func=get_details)
register_subparser(parser_get_details, categories='metadata')
parser_set_details = subparsers.add_parser('set_details', help='Set details on a data object',
description='Set the JSON details of a data object.', prog="dx set_details",
parents=[env_args, all_arg])
parser_set_details.add_argument('path', help='ID or path to data object to modify').completer = DXPathCompleter()
parser_set_details.add_argument('details', help='JSON to store as details', nargs='?')
parser_set_details.add_argument('-f', '--details-file', help='Path to local file containing JSON to store as details')
parser_set_details.set_defaults(func=set_details)
register_subparser(parser_set_details, categories='metadata')
parser_set_visibility = subparsers.add_parser('set_visibility', help='Set visibility on a data object',
description='Set visibility on a data object.', prog='dx set_visibility',
parents=[env_args, all_arg])
parser_set_visibility.add_argument('path', help='ID or path to data object to modify').completer = DXPathCompleter()
parser_set_visibility.add_argument('visibility', choices=['hidden', 'visible'],
help='Visibility that the object should have')
parser_set_visibility.set_defaults(func=set_visibility)
register_subparser(parser_set_visibility, categories='metadata')
parser_add_types = subparsers.add_parser('add_types', help='Add types to a data object',
description='Add types to a data object. See https://wiki.dnanexus.com/pages/Types/ for a list of DNAnexus types.',
prog='dx add_types',
parents=[env_args, all_arg])
parser_add_types.add_argument('path', help='ID or path to data object to modify').completer = DXPathCompleter()
parser_add_types.add_argument('types', nargs='+', metavar='type', help='Types to add')
parser_add_types.set_defaults(func=add_types)
register_subparser(parser_add_types, categories='metadata')
parser_remove_types = subparsers.add_parser('remove_types', help='Remove types from a data object',
description='Remove types from a data object. See https://wiki.dnanexus.com/pages/Types/ for a list of DNAnexus types.',
prog='dx remove_types',
parents=[env_args, all_arg])
parser_remove_types.add_argument('path', help='ID or path to data object to modify').completer = DXPathCompleter()
parser_remove_types.add_argument('types', nargs='+', metavar='type', help='Types to remove')
parser_remove_types.set_defaults(func=remove_types)
register_subparser(parser_remove_types, categories='metadata')
parser_tag = subparsers.add_parser('tag', help='Tag a project, data object, or execution', prog='dx tag',
description='Tag a project, data object, or execution. Note that a project context must be either set or specified for data object IDs or paths.',
parents=[env_args, all_arg])
parser_tag.add_argument('path', help='ID or path to project, data object, or execution to modify').completer = DXPathCompleter()
parser_tag.add_argument('tags', nargs='+', metavar='tag', help='Tags to add')
parser_tag.set_defaults(func=add_tags)
register_subparser(parser_tag, categories='metadata')
parser_untag = subparsers.add_parser('untag', help='Untag a project, data object, or execution', prog='dx untag',
description='Untag a project, data object, or execution. Note that a project context must be either set or specified for data object IDs or paths.',
parents=[env_args, all_arg])
parser_untag.add_argument('path', help='ID or path to project, data object, or execution to modify').completer = DXPathCompleter()
parser_untag.add_argument('tags', nargs='+', metavar='tag', help='Tags to remove')
parser_untag.set_defaults(func=remove_tags)
register_subparser(parser_untag, categories='metadata')
parser_rename = subparsers.add_parser('rename',
help='Rename a project or data object',
description='Rename a project or data object. To rename folders, use \'dx mv\' instead. Note that a project context must be either set or specified to rename a data object. To specify a project or a project context, append a colon character ":" after the project ID or name.',
prog='dx rename',
parents=[env_args, all_arg])
path_action = parser_rename.add_argument('path', help='Path to project or data object to rename')
path_action.completer = DXPathCompleter(include_current_proj=True)
parser_rename.add_argument('name', help='New name')
parser_rename.set_defaults(func=rename)
register_subparser(parser_rename, categories='metadata')
parser_set_properties = subparsers.add_parser('set_properties', help='Set properties of a project, data object, or execution',
description='Set properties of a project, data object, or execution. Note that a project context must be either set or specified for data object IDs or paths.', prog='dx set_properties',
parents=[env_args, all_arg])
parser_set_properties.add_argument('path', help='ID or path to project, data object, or execution to modify').completer = DXPathCompleter()
parser_set_properties.add_argument('properties', nargs='+', metavar='propertyname=value',
help='Key-value pairs of property names and their new values')
parser_set_properties.set_defaults(func=set_properties)
register_subparser(parser_set_properties, categories='metadata')
parser_unset_properties = subparsers.add_parser('unset_properties', help='Unset properties of a project, data object, or execution',
description='Unset properties of a project, data object, or execution. Note that a project context must be either set or specified for data object IDs or paths.',
prog='dx unset_properties',
parents=[env_args, all_arg])
path_action = parser_unset_properties.add_argument('path', help='ID or path to project, data object, or execution to modify')
path_action.completer = DXPathCompleter()
parser_unset_properties.add_argument('properties', nargs='+', metavar='propertyname', help='Property names to unset')
parser_unset_properties.set_defaults(func=unset_properties)
register_subparser(parser_unset_properties, categories='metadata')
parser_close = subparsers.add_parser('close', help='Close data object(s)',
description='Close a remote data object or set of objects.',
prog='dx close',
parents=[env_args, all_arg])
parser_close.add_argument('path', help='Path to a data object to close', nargs='+').completer = DXPathCompleter()
parser_close.add_argument('--wait', help='Wait for the object(s) to close', action='store_true')
parser_close.set_defaults(func=close)
register_subparser(parser_close, categories=('data', 'metadata'))
parser_wait = subparsers.add_parser('wait', help='Wait for data object(s) to close or job(s) to finish',
description='Polls the state of specified data object(s) or job(s) until they are all in the desired state. Waits until the "closed" state for a data object, and for any terminal state for a job ("terminated", "failed", or "done"). Exits with a non-zero code if a job reaches a terminal state that is not "done".',
prog='dx wait',
parents=[env_args])
path_action = parser_wait.add_argument('path', help='Path to a data object or job ID to wait for', nargs='+')
path_action.completer = DXPathCompleter()
parser_wait.set_defaults(func=wait)
register_subparser(parser_wait, categories=('data', 'metadata', 'exec'))
parser_get = subparsers.add_parser('get', help='Download records, applets, and files',
description='Download the contents of some types of data (records, applets, and files). For gtables, see "dx export". Downloading an applet will attempt to reconstruct a source directory that can be used to rebuild the app with "dx build". Use "-o -" to direct the output to stdout.',
prog='dx get',
parents=[env_args])
parser_get.add_argument('path', help='Data object ID or name to access').completer = DXPathCompleter(classes=['file', 'record', 'applet'])
parser_get.add_argument('-o', '--output', help='local file path where the data is to be saved ("-" indicates stdout output for objects of class file and record). If not supplied, the object\'s name on the platform will be used, along with any applicable extensions. For applets, if OUTPUT does not exist, an applet source directory will be created there; if OUTPUT is an existing directory, a new directory with the applet\'s name will be created inside it.')
parser_get.add_argument('--no-ext', help='If -o is not provided, do not add an extension to the filename', action='store_true')
parser_get.add_argument('-f', '--overwrite', help='Overwrite the local file if necessary', action='store_true')
parser_get.set_defaults(func=get)
register_subparser(parser_get, categories='data')
parser_find = subparsers.add_parser('find', help='Search functionality over various DNAnexus entities',
description='Search functionality over various DNAnexus entities.',
prog='dx find')
subparsers_find = parser_find.add_subparsers(parser_class=DXArgumentParser)
subparsers_find.metavar = 'category'
register_subparser(parser_find, categories=())
parser_find_apps = subparsers_find.add_parser('apps', help='List available apps',
description='Finds apps with the given search parameters. Use --category to restrict by a category; common categories are available as tab completions and can be listed with --category-help.',
parents=[stdout_args, json_arg, delim_arg, env_args],
prog='dx find apps')
parser_find_apps.add_argument('--name', help='Name of the app')
parser_find_apps.add_argument('--category', help='Category of the app').completer = ListCompleter(APP_CATEGORIES)
parser_find_apps.add_argument('--category-help',
help='Print a list of common app categories',
nargs=0,
action=PrintCategoryHelp)
parser_find_apps.add_argument('-a', '--all', help='Return all versions of each app', action='store_true')
parser_find_apps.add_argument('--unpublished', help='Return only unpublished apps (if omitted, returns only published apps)', action='store_true')
parser_find_apps.add_argument('--installed', help='Return only installed apps', action='store_true')
parser_find_apps.add_argument('--billed-to', help='User or organization responsible for the app')
parser_find_apps.add_argument('--creator', help='Creator of the app version')
parser_find_apps.add_argument('--developer', help='Developer of the app')
parser_find_apps.add_argument('--created-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the app version was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.add_argument('--created-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the app version was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.add_argument('--mod-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the app was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.add_argument('--mod-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the app was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_apps.set_defaults(func=find_apps)
register_subparser(parser_find_apps, subparsers_action=subparsers_find, categories='exec')
parser_find_jobs = subparsers_find.add_parser('jobs', help='List jobs in your project',
description=fill('Finds jobs with the given search parameters. By default, output is formatted to show the last several job trees that you\'ve run in the current project.') + '''
EXAMPLES
''' + fill('The following will show the full job tree containing the job ID given (it does not have to be the origin job).', subsequent_indent=' ') + '''
$ dx find jobs --id job-B13f83KgpqG0PB8P0xkQ000X
''' + fill('The following will find all jobs that start with the string "bwa"', subsequent_indent=' ') + '''
$ dx find jobs --name bwa*
''',
parents=[find_executions_args, stdout_args, json_arg, no_color_arg,
delim_arg, env_args, find_by_properties_and_tags_args],
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve',
prog='dx find jobs')
add_find_executions_search_gp(parser_find_jobs)
parser_find_jobs.set_defaults(func=find_executions, classname='job')
parser_find_jobs.completer = DXPathCompleter(expected='project')
register_subparser(parser_find_jobs, subparsers_action=subparsers_find, categories='exec')
parser_find_analyses = subparsers_find.add_parser('analyses', help='List analyses in your project',
description=fill('Finds analyses with the given search parameters. By default, output is formatted to show the last several job trees that you\'ve run in the current project.'),
parents=[find_executions_args, stdout_args, json_arg, no_color_arg,
delim_arg, env_args, find_by_properties_and_tags_args],
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve',
prog='dx find analyses')
add_find_executions_search_gp(parser_find_analyses)
parser_find_analyses.set_defaults(func=find_executions, classname='analysis')
parser_find_analyses.completer = DXPathCompleter(expected='project')
register_subparser(parser_find_analyses, subparsers_action=subparsers_find, categories='exec')
parser_find_executions = subparsers_find.add_parser('executions', help='List executions (jobs and analyses) in your project',
description=fill('Finds executions (jobs and analyses) with the given search parameters. By default, output is formatted to show the last several job trees that you\'ve run in the current project.'),
parents=[find_executions_args, stdout_args, json_arg, no_color_arg,
delim_arg, env_args, find_by_properties_and_tags_args],
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve',
prog='dx find executions')
add_find_executions_search_gp(parser_find_executions)
parser_find_executions.set_defaults(func=find_executions, classname=None)
parser_find_executions.completer = DXPathCompleter(expected='project')
register_subparser(parser_find_executions, subparsers_action=subparsers_find, categories='exec')
parser_find_data = subparsers_find.add_parser('data', help='Find data objects',
description='Finds data objects with the given search parameters. By' +
' default, restricts the search to the current project if set. To ' +
'search over all projects (excludes public projects), use ' +
'--all-projects (overrides --path and --norecurse).',
parents=[stdout_args, json_arg, no_color_arg, delim_arg, env_args,
find_by_properties_and_tags_args],
prog='dx find data')
parser_find_data.add_argument('--class', dest='classname', choices=['record', 'file', 'gtable', 'applet', 'workflow'], help='Data object class')
parser_find_data.add_argument('--state', choices=['open', 'closing', 'closed', 'any'], help='State of the object')
parser_find_data.add_argument('--visibility', choices=['hidden', 'visible', 'either'], default='visible', help='Whether the object is hidden or not')
parser_find_data.add_argument('--name', help='Name of the object')
parser_find_data.add_argument('--type', help='Type of the data object')
parser_find_data.add_argument('--link', help='Object ID that the data object links to')
parser_find_data.add_argument('--all-projects', '--allprojects', help='Extend search to all projects (excluding public projects)', action='store_true')
parser_find_data.add_argument('--project', help=argparse.SUPPRESS)
parser_find_data.add_argument('--folder', help=argparse.SUPPRESS).completer = DXPathCompleter(expected='folder')
parser_find_data.add_argument('--path', help='Project and/or folder in which to restrict the results',
metavar='PROJECT:FOLDER').completer = DXPathCompleter(expected='folder')
parser_find_data.add_argument('--norecurse', dest='recurse', help='Do not recurse into subfolders', action='store_false')
parser_find_data.add_argument('--mod-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the object was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.add_argument('--mod-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the object was last modified (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.add_argument('--created-after', help='Date (e.g. 2012-01-01) or integer timestamp after which the object was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.add_argument('--created-before', help='Date (e.g. 2012-01-01) or integer timestamp before which the object was created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_data.set_defaults(func=find_data)
register_subparser(parser_find_data, subparsers_action=subparsers_find, categories=('data', 'metadata'))
parser_find_projects = subparsers_find.add_parser('projects', help='Find projects',
description='Finds projects with the given search parameters. Use the --public flag to list all public projects.',
parents=[stdout_args, json_arg, delim_arg, env_args, find_by_properties_and_tags_args],
prog='dx find projects')
parser_find_projects.add_argument('--name', help='Name of the project')
parser_find_projects.add_argument('--level', choices=['VIEW', 'UPLOAD', 'CONTRIBUTE', 'ADMINISTER'],
help='Minimum level of permissions expected')
parser_find_projects.add_argument('--public',
help='Include ONLY public projects (will automatically set --level to VIEW)',
action='store_true')
parser_find_projects.add_argument('--created-after',
help='Date (e.g. 2012-01-01) or integer timestamp after which the project was ' +
'created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_projects.add_argument('--created-before',
help='Date (e.g. 2012-01-01) or integer timestamp after which the project was ' +
'created (negative number means ms in the past, or use suffix s, m, h, d, w, M, y)')
parser_find_projects.set_defaults(func=find_projects)
register_subparser(parser_find_projects, subparsers_action=subparsers_find, categories='data')
parser_api = subparsers.add_parser('api', help='Call an API method',
formatter_class=argparse.RawTextHelpFormatter,
description=fill('Call an API method directly. The JSON response from the API server will be returned if successful. No name resolution is performed; DNAnexus IDs must always be provided. The API specification can be found at') + '''
https://wiki.dnanexus.com/API-Specification-v1.0.0/Introduction
EXAMPLE
In the following example, a project's description is changed.
$ dx api project-B0VK6F6gpqG6z7JGkbqQ000Q update '{"description": "desc"}'
{
"id": "project-B0VK6F6gpqG6z7JGkbqQ000Q"
}
''',
prog='dx api',
parents=[env_args])
parser_api.add_argument('resource', help=fill('One of "system", a class name (e.g. "record"), or an entity ID such as "record-xxxx". Use "app-name/1.0.0" to refer to version "1.0.0" of the app named "name".', width_adjustment=-17))
parser_api.add_argument('method', help=fill('Method name for the resource as documented by the API specification', width_adjustment=-17))
parser_api.add_argument('input_json', nargs='?', default="{}", help='JSON input for the method (if not given, "{}" is used)')
parser_api.add_argument('--input', help=fill('Load JSON input from FILENAME ("-" to use stdin)', width_adjustment=-17))
parser_api.set_defaults(func=api)
# parser_api.completer = TODO
register_subparser(parser_api)
parser_upgrade = subparsers.add_parser('upgrade', help='Upgrade dx-toolkit (the DNAnexus SDK and this program)',
description='Upgrades dx-toolkit (the DNAnexus SDK and this program) to the latest recommended version, or to a specified version and platform.',
prog='dx upgrade')
parser_upgrade.add_argument('args', nargs='*')
parser_upgrade.set_defaults(func=upgrade)
register_subparser(parser_upgrade)
category_list = '\n '.join([category + parser_categories[category]['desc'] for category in parser_categories_sorted])
parser_help = subparsers.add_parser('help', help='Display help messages and dx commands by category',
description=fill('Displays the help message for the given command (and subcommand if given), or displays the list of all commands in the given category.') + '\n\nCATEGORIES\n\n ' + category_list + '''
EXAMPLE
''' + fill('To find all commands related to running and monitoring a job and then display the help message for the command "run", run', subsequent_indent=' ') + '''
$ dx help exec
<list of all execution-related dx commands>
$ dx help run
<help message for dx run>
''', formatter_class=argparse.RawTextHelpFormatter, prog='dx help')
parser_help.add_argument('command_or_category', help=fill('Display the help message for the given command, or the list of all available commands for the given category', width_adjustment=-24), nargs='?', default=None)
parser_help.add_argument('subcommand', help=fill('Display the help message for the given subcommand of the command', width_adjustment=-23), nargs='?', default=None)
parser_help.set_defaults(func=print_help)
# TODO: make this completer conditional on whether "help run" is in args
# parser_help.completer = MultiCompleter([DXAppCompleter(),
# DXPathCompleter(classes=['applet'])])
parser_map['help'] = parser_help # TODO: a special help completer
parser_map['help run'] = parser_help
for category in parser_categories:
parser_categories[category]['cmds'].append(('help', subparsers._choices_actions[-1].help))
parser_categories['all']['cmds'].sort()
def main():
# Bash argument completer hook
if '_ARGCOMPLETE' in os.environ:
import argcomplete
argcomplete.autocomplete(parser,
always_complete_options=False,
exclude=['import', 'gtable'],
output_stream=sys.stdout if '_DX_ARC_DEBUG' in os.environ else None)
if len(args_list) > 0:
args = parser.parse_args(args_list)
dxpy.USER_AGENT += " {prog}-{command}".format(prog=parser.prog, command=getattr(args, 'command', ''))
set_cli_colors(args)
set_delim(args)
set_env_from_args(args)
try:
args.func(args)
# Flush buffered data in stdout before interpreter shutdown to ignore broken pipes
sys.stdout.flush()
except:
err_exit()
else:
parser.print_help()
sys.exit(1)
if __name__ == '__main__':
main()
|
andyshinn/dx-toolkit
|
src/python/dxpy/scripts/dx.py
|
Python
|
apache-2.0
| 245,324
|
[
"BWA"
] |
55211f24834520f871836ebfc9beb1455d7d71ef4aaeeac9e7b07ecd89aafaa1
|
########################################################################
# $HeadURL$
########################################################################
""" PieGraph represents a pie graph
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
import numpy, math, time
from matplotlib.patches import Wedge, Shadow
from matplotlib.cbook import is_string_like
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
class PieGraph( PlotBase ):
def __init__( self, data, ax, prefs, *args, **kw ):
PlotBase.__init__( self, data, ax, prefs, *args, **kw )
self.pdata = data
def pie( self, explode = None,
colors = None,
autopct = None,
pctdistance = 0.6,
shadow = False
):
start = time.time()
labels = self.pdata.getLabels()
if labels[0][0] == "NoLabels":
try:
self.pdata.initialize(key_type='string')
self.pdata.sortLabels()
labels = self.pdata.getLabels()
nLabels = self.pdata.getNumberOfLabels()
explode = [0.] * nLabels
if nLabels > 0:
explode[0] = 0.1
except Exception,x:
print "PieGraph Error: can not interpret data for the plot"
#labels.reverse()
values = [l[1] for l in labels]
x = numpy.array( values, numpy.float64 )
self.legendData = labels
sx = float( numpy.sum( x ) )
if sx > 1: x = numpy.divide( x, sx )
labels = [l[0] for l in labels]
if explode is None: explode = [0] * len( x )
assert( len( x ) == len( labels ) )
assert( len( x ) == len( explode ) )
plot_axis_labels = self.prefs.get( 'plot_axis_labels', True )
center = 0, 0
radius = 1.1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in zip( x, labels, explode ):
x, y = center
theta2 = theta1 + frac
thetam = 2 * math.pi * 0.5 * ( theta1 + theta2 )
x += expl * math.cos( thetam )
y += expl * math.sin( thetam )
color = self.palette.getColor( label )
w = Wedge( ( x, y ), radius, 360. * theta1, 360. * theta2,
facecolor = color,
lw = pixelToPoint( 0.5, self.dpi ),
edgecolor = '#999999' )
slices.append( w )
self.ax.add_patch( w )
w.set_label( label )
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = Shadow( w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder( 0.9 * w.get_zorder() )
self.ax.add_patch( shad )
if plot_axis_labels:
if frac > 0.03:
xt = x + 1.05 * radius * math.cos( thetam )
yt = y + 1.05 * radius * math.sin( thetam )
thetam %= 2 * math.pi
if 0 < thetam and thetam < math.pi:
valign = 'bottom'
elif thetam == 0 or thetam == math.pi:
valign = 'center'
else:
valign = 'top'
if thetam > math.pi / 2.0 and thetam < 3.0 * math.pi / 2.0:
halign = 'right'
elif thetam == math.pi / 2.0 or thetam == 3.0 * math.pi / 2.0:
halign = 'center'
else:
halign = 'left'
t = self.ax.text( xt, yt, label,
size = pixelToPoint( self.prefs['subtitle_size'], self.dpi ),
horizontalalignment = halign,
verticalalignment = valign )
t.set_family( self.prefs['font_family'] )
t.set_fontname( self.prefs['font'] )
t.set_size( pixelToPoint( self.prefs['text_size'], self.dpi ) )
texts.append( t )
if autopct is not None:
xt = x + pctdistance * radius * math.cos( thetam )
yt = y + pctdistance * radius * math.sin( thetam )
if is_string_like( autopct ):
s = autopct % ( 100. * frac )
elif callable( autopct ):
s = autopct( 100. * frac )
else:
raise TypeError( 'autopct must be callable or a format string' )
t = self.ax.text( xt, yt, s,
horizontalalignment = 'center',
verticalalignment = 'center' )
t.set_family( self.prefs['font_family'] )
t.set_fontname( self.prefs['font'] )
t.set_size( pixelToPoint( self.prefs['text_size'], self.dpi ) )
autotexts.append( t )
theta1 = theta2
i += 1
self.legendData.reverse()
self.ax.set_xlim( ( -1.25, 1.25 ) )
self.ax.set_ylim( ( -1.25, 1.25 ) )
self.ax.set_axis_off()
if autopct is None: return slices, texts
else: return slices, texts, autotexts
min_amount = .1
def getLegendData( self ):
return self.legendData
def draw( self ):
self.ylabel = ''
self.prefs['square_axis'] = True
PlotBase.draw( self )
def my_display( x ):
if x > 100 * self.min_amount:
return '%.1f' % x + '%'
else:
return ""
nLabels = self.pdata.getNumberOfLabels()
explode = [0.] * nLabels
if nLabels > 0:
explode[0] = 0.1
self.wedges, text_labels, percent = self.pie( explode = explode, autopct = my_display )
|
sposs/DIRAC
|
Core/Utilities/Graphs/PieGraph.py
|
Python
|
gpl-3.0
| 5,528
|
[
"DIRAC"
] |
75adbc95de466f1ff3fbf4de0d8afdde0a0cfb0e35574ce8ec11af2eecd608c1
|
import struct
import array
import string
import numpy
import datetime
from netCDF4 import Dataset
from os import listdir
from os import system
import os.path
import os
user = os.environ['USER']
lock_fname = 'lock_tower_merge_for_{}'.format(user)
if os.path.isfile(lock_fname):
print('ERROR: cannot run tower_merge.py, locked by {}'.format(lock_fname))
else :
command = 'touch {}'.format(lock_fname)
system('touch {}'.format(lock_fname) )
#quit()
def read_record(file, mode):
reclen,=struct.unpack(mode[0]+'i',f.read(4))
if ( mode[1] == 'i' or mode[1] == 'f'):
size = int(reclen/4)
elif ( mode[1] == 'd' ):
size = int(reclen/8)
else:
print('ERROR: unsupported data type for FORTRAN RECORD READ')
exit
print(mode,size)
read_string = mode[0]+mode[1]*size
data=struct.unpack(read_string,f.read(reclen))
recend,=struct.unpack(mode[0]+'i',f.read(4))
if ( reclen != recend ) :
print('ERROR: FORTRAN RECORD READ - end tag does not match record size')
return data
# SETUP DIRECTORY WHERE TO PUT PROCESSED DATA
#
timestamp=datetime.datetime.now().strftime("%Y%m%d-%I%M%S")
dump_dir='towerdump_'+timestamp
sys_command='mkdir ' + dump_dir
system(sys_command)
# ##########################################################################################
# GET GRID AND TOWER STRIDE INFORMATION
# ##########################################################################################
f = open('dns.ini','r')
stride = -1
for line in f:
if 'Stride' in line:
stride = [int(i) for i in (line.split('=', 2)[1].split(','))]
if ( stride==-1) :
print('ERROR: Keyword stride not found in dns.ini')
exit(1)
f.close()
f = open('grid','rb')
stg=f.read(4)
i1,= struct.unpack('>i',stg) # READ AS BIG ENDIAN
i2,= struct.unpack('<i',stg) # READ AS LITTLE ENDIAN
if ( i1 == 12 ):
endian='>'
print('BIG ENDIAN DATA')
reclen = i1
elif ( i2 == 12):
endian='<'
print('LITTLE ENDIAN DATA')
reclen = i2
else:
print('ERROR: Cannot determine Endianness from first Integer in grid file')
print(' Assuming BIG gives:', i1)
print(' Assuming LITTLE gives:', i2)
quit
iread=endian+'i'
fread=endian+'f'
dread=endian+'d'
f.seek(0)
griddims = [i for i in read_record(f,iread)]
gridsize = [d for d in read_record(f,dread)]
xgrid = [x for x in read_record(f,dread)]
ygrid = [y for y in read_record(f,dread)]
zgrid = [z for z in read_record(f,dread)]
print('GRID: ', griddims)
print(' ', gridsize)
print('STRIDE:', stride)
# ##########################################################################################
# CONSTRUCT TOWER INFORMATION
# ##########################################################################################
xtower = xgrid[0:griddims[0]:stride[0]]
ytower = ygrid[0:griddims[1]:stride[1]]
ztower = zgrid[0:griddims[2]:stride[2]]
itowerx = range(1,griddims[0]+1,stride[0])
itowery = range(1,griddims[1]+1,stride[1])
itowerz = range(1,griddims[2]+1,stride[2])
ntowerx = len(itowerx)
ntowery = len(itowery)
ntowerz = len(itowerz)
# ##########################################################################################
# INITIALIZE PROCESSING OF TOWERS
# ##########################################################################################
# Find out which time slices to process
#
slices = []
varcount = -1
from glob import glob
for f in glob('tower.mean.*.?'):
dummy = f.split('.',4)
print(dummy)
[start,end] = dummy[2].split('-')
ivar = int(dummy[3])
if ( ivar > varcount ):
varcount = ivar
if [start,end] not in slices:
slices.append([start,end])
end = sorted([int(i[1]) for i in slices], key=int)
start=sorted([int(i[0]) for i in slices], key=int)
slices=[ [start[i],end[i] ] for i in range(len(slices))]
start_iteration = min(start)
end_iteration = max(end)
ntimes = max(end) - min(start) + 1
print('NTIMES:', ntimes, 'TOWER SIZE:', ntowery, 'SLICES:', len(end))
# ##########################################################################################
# BUILD NETCDF FILE
# ##########################################################################################
for slc in slices:
ncfile = Dataset('tower_new.nc','w',format='NETCDF4')
nc_xdim = ncfile.createDimension('x',ntowerx)
nc_ydim = ncfile.createDimension('y',ntowery)
nc_zdim = ncfile.createDimension('z',ntowerz)
nc_tdim = ncfile.createDimension('t',slc[1]-slc[0]+1)
nc_xvar = ncfile.createVariable('x','f4',('x',) )
nc_xvar.longname='streamwise distance normalized by Rossby Radius'
nc_xvar.units='m'
nc_xvar.axis='x'
nc_yvar = ncfile.createVariable('y','f4',('y',) )
nc_yvar.long_name='height normalized by Rossby Radius'
nc_yvar.positive='up'
nc_yvar.units='m'
nc_zvar = ncfile.createVariable('z','f4',('z',) )
nc_zvar.long_name = 'spanwise distance normalized by Rossby Radius'
nc_zvar.units='m'
nc_zvar.axis='z'
nc_tvar = ncfile.createVariable('t','f4',('t',) )
nc_tvar.long_name='time'
nc_tvar.units='days since 0001-01-01 00:00'
nc_tvar.calendar='none'
nc_tvar.axis='T'
nc_itvar= ncfile.createVariable('it','i4',('t',) )
nc_zvar.long_name='Iteration'
nc_zvar.units='1'
nc_uvar = ncfile.createVariable('u','f4',('t','z','x','y',),zlib=False,least_significant_digit=4)
nc_uvar.description = 'Streamwise Velocity U, instantaneous'
nc_uvar.units = 'm s^-1; normalized by reference velocity'
nc_vvar = ncfile.createVariable('v','f4',('t','z','x','y',),zlib=False,least_significant_digit=4)
nc_vvar.description = 'VerticalVelocity W, instantaneous'
nc_vvar.units = 'm s^-1; normalized by reference velocity'
nc_wvar = ncfile.createVariable('w','f4',('t','z','x','y',),zlib=False,least_significant_digit=4)
nc_wvar.description = 'Spanwise Velocity V, instantaneous'
nc_wvar.units = 'm s^-1; normalized by reference velocity'
nc_pvar = ncfile.createVariable('p','f4',('t','z','x','y',),zlib=False,least_significant_digit=4)
nc_pvar.description = 'Local Ageostrophic pressure, instantaneous'
nc_pvar.units = 'kg s^-2 m^-1; normalized by reference pressure'
nc_svar = ncfile.createVariable('s','f4',('t','z','x','y',),zlib=False,least_significant_digit=4)
nc_svar.description = 'Scalar concentration S, instantaneous'
nc_svar.units = '1; normalized by reference value at the boundary'
nc_umvar = ncfile.createVariable('uM','f4',('t','y',),zlib=False,least_significant_digit=4)
nc_umvar.description = 'Streamwise Velocity U, averaged'
nc_umvar.units = 'm s^-1; normalized by reference velocity'
nc_vmvar = ncfile.createVariable('vM','f4',('t','y',),zlib=False,least_significant_digit=4)
nc_vmvar.description = 'VerticalVelocity W, averaged'
nc_vmvar.units = 'm s^-1; normalized by reference velocity'
nc_wmvar = ncfile.createVariable('wM','f4',('t','y',),zlib=False,least_significant_digit=4)
nc_wmvar.description = 'Spanwise Velocity V, averaged'
nc_wmvar.units = 'm s^-1; normalized by reference velocity'
nc_pmvar = ncfile.createVariable('pM','f4',('t','y',),zlib=False,least_significant_digit=4)
nc_pmvar.description = 'Local Ageostrophic pressure, averaged'
nc_pmvar.units = 'kg s^-2 m^-1; normalized by reference pressure'
nc_smvar = ncfile.createVariable('sM','f4',('t','y',),zlib=False,least_significant_digit=4)
nc_smvar.description = 'Scalar concentration S, averaged'
nc_smvar.units = '1; normalized by reference value at the boundary'
# ##########################################################################################
# PUT GRID DATA
# ##########################################################################################
nc_xvar[:] = xtower
nc_yvar[:] = ytower
nc_zvar[:] = ztower
# ##########################################################################################
# PROCESS TOWERS
# ##########################################################################################
slc_string = '.'+str(slc[0]).zfill(6) + '-' + str(slc[1]).zfill(6)
files = []
vname = ['','u','v','w','p','s']
print(datetime.datetime.now().strftime("%Y%m%d %I:%M:%S%p") ,\
': processing time slice', slc[0],'-',slc[1],'/',end_iteration)
for ivar in range(1,int(varcount)+1):
name = 'tower.mean' + slc_string + '.' + str(ivar)
try:
with open(name):
files.append(name)
except IOError:
print('ERROR problem opening file ', name)
for i in itowerx:
for k in itowerz:
name = 'tower.'+str(i).zfill(6)+'x'+str(k).zfill(6) \
+ slc_string + '.' + str(ivar).zfill(1)
try:
with open(name):
files.append(name)
except IOError:
print('ERROR problem opening file ', name)
tower_jmax = len(itowery)
ntimes_loc = slc[1] - slc[0] + 1
iteration_rt=array.array('d',(-1. for i in range(0,ntimes_loc)))
iteration_it=array.array('i',(-1 for i in range(0,ntimes_loc)))
for f in files:
datf = open(f,'rb')
ivar = int(f.split('.',4)[3])
position = f.split('.',2)[1]
if ( position == 'mean'):
[xpos,zpos] = [-1,-1]
idx_i = -1
idx_k = -1
else:
[xpos,zpos] = position.split('x',2)
idx_i = int(xpos)/stride[0]
idx_k = int(zpos)/stride[2]
times_loc = slc[1] - slc[0] + 1
data_raw = datf.read((tower_jmax+2)*8*times_loc)
read_string = endian+ 'd'*(tower_jmax+2)*times_loc
data_flt = struct.unpack(read_string,data_raw)
data = numpy.reshape(data_flt,[times_loc,tower_jmax+2])
# MOVE FILE TO dump directory for this batch
datf.close()
sys_command='mv ' + f + ' ' + dump_dir
system(sys_command)
time_it = data[:,1]
time_rt = data[:,0]
t_index = [int(it-slc[0]+1) for it in time_it]
its=min(t_index); ite=max(t_index)+1;
data = data[:,2:tower_jmax+2]
for i in range(its,ite):
if ( iteration_it[i] > 0 and int(time_it[i-its]) != iteration_it[i] ):
print('ERROR: Timestamp in file', f, '(', int(time_it[i-its]),')')
print(' Does not agree with expected time:', iteration_it[i])
exit(1)
iteration_rt[i] = time_rt[i-its]
iteration_it[i] = int(time_it[i-its])
if ( idx_i >= 0 and idx_k >= 0 ):
if ( ivar == 1 ) :
nc_uvar[its:ite,idx_k,idx_i,:] = data
if ( ivar == 2 ) :
nc_vvar[its:ite,idx_k,idx_i,:] = data
if ( ivar == 3 ) :
nc_wvar[its:ite,idx_k,idx_i,:] = data
if ( ivar == 4 ) :
nc_pvar[its:ite,idx_k,idx_i,:] = data
if ( ivar == 5 ) :
nc_svar[its:ite,idx_k,idx_i,:] = data
else:
if ( ivar == 1 ) :
nc_umvar[its:ite,0:tower_jmax] = data
if ( ivar == 2 ) :
nc_vmvar[its:ite,0:tower_jmax] = data
if ( ivar == 3 ) :
nc_wmvar[its:ite,0:tower_jmax] = data
if ( ivar == 4 ) :
nc_pmvar[its:ite,0:tower_jmax] = data
if ( ivar == 5 ) :
nc_smvar[its:ite,0:tower_jmax] = data
# Write real time and iteration data
nc_tvar[:] = iteration_rt
nc_itvar[:]= iteration_it
ncfile.close()
sys_command='mv tower_new.nc tower'+str(slc[0]).zfill(6)+'-'+str(slc[1]).zfill(6)+'.nc'
system(sys_command)
system('rm {}'.format(lock_fname))
exit
|
zrick/tlab
|
scripts/python/tower2nc.py
|
Python
|
gpl-3.0
| 12,284
|
[
"NetCDF"
] |
8d7145aebd8460a46c452b8a900f69a128f59466a38bc323e474da4f7a224f3e
|
## INFO ########################################################################
## ##
## formbuilder ##
## =========== ##
## ##
## Online Form Building Application ##
## Version: 0.3.01.446 (20150119) ##
## File: formdict.py ##
## ##
## For more information about the project, visit ##
## <https://github.com/petervaro/formbuilder>. ##
## Copyright (C) 2014 Peter Varo ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
#------------------------------------------------------------------------------#
class FormDict:
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
@property
def is_locked(self):
return self._lock
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, data, lock=False):
self._data = data
self._lock = lock
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def release(self):
self._lock = False
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def lock(self):
self._lock = True
|
petervaro/formbuilder
|
formdict.py
|
Python
|
gpl-3.0
| 3,166
|
[
"VisIt"
] |
60ed60e8b7832db9f090d3201ad68ef69953ba67a7b4258ff9924d203fb973ba
|
import numpy as np
import numpy.linalg as la
class Kernel(object):
"""Implements list of kernels from
http://en.wikipedia.org/wiki/Support_vector_machine
"""
@staticmethod
def linear():
return lambda x, y: np.inner(x, y)
@staticmethod
def gaussian(sigma):
return lambda x, y: \
np.exp(-np.sqrt(la.norm(x-y) ** 2 / (2 * sigma ** 2)))
@staticmethod
def _polykernel(dimension, offset):
return lambda x, y: (offset + np.inner(x, y)) ** dimension
@classmethod
def inhomogenous_polynomial(cls, dimension):
return cls._polykernel(dimension=dimension, offset=1.0)
@classmethod
def homogenous_polynomial(cls, dimension):
return cls._polykernel(dimension=dimension, offset=0.0)
@staticmethod
def hyperbolic_tangent(kappa, c):
return lambda x, y: np.tanh(kappa * np.dot(x, y) + c)
|
zhoucen/ml
|
Kernel.py
|
Python
|
mit
| 897
|
[
"Gaussian"
] |
c1c9375e9dad55852b172be0a6cca7fa4fb4169a200bec0f29073403e3b11c98
|
"""
This module hears to welcome messages. Use it as a example.
"""
class WelcomeMessage:
def __init__(self, bot):
self.bot = bot
self.words = ['hello', 'hi', 'hola', 'que ase', 'que pasa', 'buenas']
def listenTo(self, channel, message, event):
return any([word in message.lower() for word in self.words])
def reactTo(self, channel, message, event):
self.bot.sendMessage(
channel, 'Hi my friend, trust me and tell me what to estimate (i.e. estimate UIF-233)')
|
jonthebeach/estimator-bot
|
src/messages/welcome_message.py
|
Python
|
mit
| 521
|
[
"ASE"
] |
0ea87b309d3240ea25e83fbab320f02d7c0ca1a0093797f4fcd08018e6747934
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a class to sample data along an elliptical path.
"""
import copy
import numpy as np
from .geometry import EllipseGeometry
from .integrator import INTEGRATORS
__all__ = ['EllipseSample']
class EllipseSample:
"""
Class to sample image data along an elliptical path.
The image intensities along the elliptical path can be extracted
using a selection of integration algorithms.
The ``geometry`` attribute describes the geometry of the elliptical
path.
Parameters
----------
image : 2D `~numpy.ndarray`
The input image.
sma : float
The semimajor axis length in pixels.
x0, y0 : float, optional
The (x, y) coordinate of the ellipse center.
astep : float, optional
The step value for growing/shrinking the semimajor axis. It can
be expressed either in pixels (when ``linear_growth=True``) or
as a relative value (when ``linear_growth=False``). The default
is 0.1.
eps : float, optional
The ellipticity of the ellipse. The default is 0.2.
pa : float, optional
The position angle of ellipse in relation to the positive x axis
of the image array (rotating towards the positive y axis). The
default is 0.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip iterations. Set to zero to skip
sigma-clipping. The default is 0.
linear_growth : bool, optional
The semimajor axis growing/shrinking mode. The default is
`False`.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
geometry : `~photutils.isophote.EllipseGeometry` instance or `None`
The geometry that describes the ellipse. This can be used in
lieu of the explicit specification of parameters ``sma``,
``x0``, ``y0``, ``eps``, etc. In any case, the
`~photutils.isophote.EllipseGeometry` instance becomes an
attribute of the `~photutils.isophote.EllipseSample` object.
The default is `None`.
Attributes
----------
values : 2D `~numpy.ndarray`
The sampled values as a 2D array, where the rows contain the
angles, radii, and extracted intensity values, respectively.
mean : float
The mean intensity along the elliptical path.
geometry : `~photutils.isophote.EllipseGeometry` instance
The geometry of the elliptical path.
gradient : float
The local radial intensity gradient.
gradient_error : float
The error associated with the local radial intensity gradient.
gradient_relative_error : float
The relative error associated with the local radial intensity
gradient.
sector_area : float
The average area of the sectors along the elliptical path from
which the sample values were integrated.
total_points : int
The total number of sample values that would cover the entire
elliptical path.
actual_points : int
The actual number of sample values that were taken from the
image. It can be smaller than ``total_points`` when the ellipse
encompasses regions outside the image, or when sigma-clipping
removed some of the points.
"""
def __init__(self, image, sma, x0=None, y0=None, astep=0.1, eps=0.2,
position_angle=0., sclip=3., nclip=0, linear_growth=False,
integrmode='bilinear', geometry=None):
self.image = image
self.integrmode = integrmode
if geometry:
# when the geometry is inherited from somewhere else,
# its sma attribute must be replaced by the value
# explicitly passed to the constructor.
self.geometry = copy.deepcopy(geometry)
self.geometry.sma = sma
else:
# if no center was specified, assume it's roughly
# coincident with the image center
_x0 = x0
_y0 = y0
if not _x0 or not _y0:
_x0 = image.shape[1] / 2
_y0 = image.shape[0] / 2
self.geometry = EllipseGeometry(_x0, _y0, sma, eps,
position_angle, astep,
linear_growth)
# sigma-clip parameters
self.sclip = sclip
self.nclip = nclip
# extracted values associated with this sample.
self.values = None
self.mean = None
self.gradient = None
self.gradient_error = None
self.gradient_relative_error = None
self.sector_area = None
# total_points reports the total number of pairs angle-radius that
# were attempted. actual_points reports the actual number of sampled
# pairs angle-radius that resulted in valid values.
self.total_points = 0
self.actual_points = 0
def extract(self):
"""
Extract sample data by scanning an elliptical path over the
image array.
Returns
-------
result : 2D `~numpy.ndarray`
The rows of the array contain the angles, radii, and
extracted intensity values, respectively.
"""
# the sample values themselves are kept cached to prevent
# multiple calls to the integrator code.
if self.values is not None:
return self.values
else:
s = self._extract()
self.values = s
return s
def _extract(self, phi_min=0.05):
# Here the actual sampling takes place. This is called only once
# during the life of an EllipseSample instance, because it's an
# expensive calculation. This method should not be called from
# external code.
# If one wants to force it to re-run, then do:
#
# sample.values = None
#
# before calling sample.extract()
# individual extracted sample points will be stored in here
angles = []
radii = []
intensities = []
sector_areas = []
# reset counters
self.total_points = 0
self.actual_points = 0
# build integrator
integrator = INTEGRATORS[self.integrmode](self.image, self.geometry,
angles, radii, intensities)
# initialize walk along elliptical path
radius = self.geometry.initial_polar_radius
phi = self.geometry.initial_polar_angle
# In case of an area integrator, ask the integrator to deliver a
# hint of how much area the sectors will have. In case of too
# small areas, tests showed that the area integrators (mean,
# median) won't perform properly. In that case, we override the
# caller's selection and use the bilinear integrator regardless.
if integrator.is_area():
integrator.integrate(radius, phi)
area = integrator.get_sector_area()
# this integration that just took place messes up with the
# storage arrays and the constructors. We have to build a new
# integrator instance from scratch, even if it is the same
# kind as originally selected by the caller.
angles = []
radii = []
intensities = []
if area < 1.0:
integrator = INTEGRATORS['bilinear'](
self.image, self.geometry, angles, radii, intensities)
else:
integrator = INTEGRATORS[self.integrmode](self.image,
self.geometry,
angles, radii,
intensities)
# walk along elliptical path, integrating at specified
# places defined by polar vector. Need to go a bit beyond
# full circle to ensure full coverage.
while phi <= np.pi*2. + phi_min:
# do the integration at phi-radius position, and append
# results to the angles, radii, and intensities lists.
integrator.integrate(radius, phi)
# store sector area locally
sector_areas.append(integrator.get_sector_area())
# update total number of points
self.total_points += 1
# update angle and radius to be used to define
# next polar vector along the elliptical path
phistep_ = integrator.get_polar_angle_step()
phi += min(phistep_, 0.5)
radius = self.geometry.radius(phi)
# average sector area is calculated after the integrator had
# the opportunity to step over the entire elliptical path.
self.sector_area = np.mean(np.array(sector_areas))
# apply sigma-clipping.
angles, radii, intensities = self._sigma_clip(angles, radii,
intensities)
# actual number of sampled points, after sigma-clip removed outliers.
self.actual_points = len(angles)
# pack results in 2-d array
result = np.array([np.array(angles), np.array(radii),
np.array(intensities)])
return result
def _sigma_clip(self, angles, radii, intensities):
if self.nclip > 0:
for i in range(self.nclip):
# do not use list.copy()! must be python2-compliant.
angles, radii, intensities = self._iter_sigma_clip(
angles[:], radii[:], intensities[:])
return np.array(angles), np.array(radii), np.array(intensities)
def _iter_sigma_clip(self, angles, radii, intensities):
# Can't use scipy or astropy tools because they use masked arrays.
# Also, they operate on a single array, and we need to operate on
# three arrays simultaneously. We need something that physically
# removes the clipped points from the arrays, since that is what
# the remaining of the `ellipse` code expects.
r_angles = []
r_radii = []
r_intensities = []
values = np.array(intensities)
mean = np.mean(values)
sig = np.std(values)
lower = mean - self.sclip * sig
upper = mean + self.sclip * sig
count = 0
for k in range(len(intensities)):
if intensities[k] >= lower and intensities[k] < upper:
r_angles.append(angles[k])
r_radii.append(radii[k])
r_intensities.append(intensities[k])
count += 1
return r_angles, r_radii, r_intensities
def update(self, fixed_parameters=None):
"""
Update this `~photutils.isophote.EllipseSample` instance.
This method calls the
:meth:`~photutils.isophote.EllipseSample.extract` method to get
the values that match the current ``geometry`` attribute, and
then computes the the mean intensity, local gradient, and other
associated quantities.
"""
if fixed_parameters is None:
fixed_parameters = np.array([False, False, False, False])
self.geometry.fix = fixed_parameters
step = self.geometry.astep
# Update the mean value first, using extraction from main sample.
s = self.extract()
self.mean = np.mean(s[2])
# Get sample with same geometry but at a different distance from
# center. Estimate gradient from there.
gradient, gradient_error = self._get_gradient(step)
# Check for meaningful gradient. If no meaningful gradient, try
# another sample, this time using larger radius. Meaningful
# gradient means something shallower, but still close to within
# a factor 3 from previous gradient estimate. If no previous
# estimate is available, guess it by adding the error to the
# current gradient.
previous_gradient = self.gradient
if not previous_gradient:
previous_gradient = gradient + gradient_error
# solution adopted before 08/12/2019
# previous_gradient = -0.05 # good enough, based on usage
if gradient >= (previous_gradient / 3.): # gradient is negative!
gradient, gradient_error = self._get_gradient(2 * step)
# If still no meaningful gradient can be measured, try with
# previous one, slightly shallower. A factor 0.8 is not too far
# from what is expected from geometrical sampling steps of 10-20%
# and a deVaucouleurs law or an exponential disk (at least at its
# inner parts, r <~ 5 req). Gradient error is meaningless in this
# case.
if gradient >= (previous_gradient / 3.):
gradient = previous_gradient * 0.8
gradient_error = None
self.gradient = gradient
self.gradient_error = gradient_error
if gradient_error and gradient < 0.:
self.gradient_relative_error = gradient_error / np.abs(gradient)
else:
self.gradient_relative_error = None
def _get_gradient(self, step):
gradient_sma = (1. + step) * self.geometry.sma
gradient_sample = EllipseSample(
self.image, gradient_sma, x0=self.geometry.x0,
y0=self.geometry.y0, astep=self.geometry.astep, sclip=self.sclip,
nclip=self.nclip, eps=self.geometry.eps,
position_angle=self.geometry.pa,
linear_growth=self.geometry.linear_growth,
integrmode=self.integrmode)
sg = gradient_sample.extract()
mean_g = np.mean(sg[2])
gradient = (mean_g - self.mean) / self.geometry.sma / step
s = self.extract()
sigma = np.std(s[2])
sigma_g = np.std(sg[2])
gradient_error = (np.sqrt(sigma**2 / len(s[2]) +
sigma_g**2 / len(sg[2])) /
self.geometry.sma / step)
return gradient, gradient_error
def coordinates(self):
"""
Return the (x, y) coordinates associated with each sampled
point.
Returns
-------
x, y : 1D `~numpy.ndarray`
The x and y coordinate arrays.
"""
angles = self.values[0]
radii = self.values[1]
x = np.zeros(len(angles))
y = np.zeros(len(angles))
for i in range(len(x)):
x[i] = (radii[i] * np.cos(angles[i] + self.geometry.pa) +
self.geometry.x0)
y[i] = (radii[i] * np.sin(angles[i] + self.geometry.pa) +
self.geometry.y0)
return x, y
class CentralEllipseSample(EllipseSample):
"""
An `~photutils.isophote.EllipseSample` subclass designed to handle
the special case of the central pixel in the galaxy image.
"""
def update(self, fixed_parameters):
"""
Update this `~photutils.isophote.EllipseSample` instance with
the intensity integrated at the (x0, y0) center position using
bilinear integration. The local gradient is set to `None`.
'fixed_parameters' is ignored in this subclass.
"""
s = self.extract()
self.mean = s[2][0]
self.gradient = None
self.gradient_error = None
self.gradient_relative_error = None
def _extract(self):
angles = []
radii = []
intensities = []
integrator = INTEGRATORS['bilinear'](self.image, self.geometry,
angles, radii, intensities)
integrator.integrate(0.0, 0.0)
self.total_points = 1
self.actual_points = 1
return np.array([np.array(angles), np.array(radii),
np.array(intensities)])
|
astropy/photutils
|
photutils/isophote/sample.py
|
Python
|
bsd-3-clause
| 16,048
|
[
"Galaxy"
] |
9504f506c127704df6d11860246fce7811817469e40f98ab987dd9cdc9217456
|
#!/usr/bin/python
#=============================================================================================
# Test MBAR by performing statistical tests on a set of of 1D harmonic oscillators, for which
# the true free energy differences can be computed analytically.
#
# A number of replications of an experiment in which i.i.d. samples are drawn from a set of
# K harmonic oscillators are produced. For each replicate, we estimate the dimensionless free
# energy differences and mean-square displacements (an observable), as well as their uncertainties.
#
# For a 1D harmonic oscillator, the potential is given by
# V(x;K) = (K/2) * (x-x_0)**2
# where K denotes the spring constant.
#
# The equilibrium distribution is given analytically by
# p(x;beta,K) = sqrt[(beta K) / (2 pi)] exp[-beta K (x-x_0)**2 / 2]
# The dimensionless free energy is therefore
# f(beta,K) = - (1/2) * ln[ (2 pi) / (beta K) ]
#
#=============================================================================================
#=============================================================================================
# IMPORTS
#=============================================================================================
from __future__ import print_function
import sys
import numpy
from pymbar import testsystems, EXP, EXPGauss, BAR, MBAR
from pymbar.utils import ParameterError
#=============================================================================================
# HELPER FUNCTIONS
#=============================================================================================
def stddev_away(namex,errorx,dx):
if dx > 0:
print("%s differs by %.3f standard deviations from analytical" % (namex,errorx/dx))
else:
print("%s differs by an undefined number of standard deviations" % (namex))
def GetAnalytical(beta,K,O,observables):
# For a harmonic oscillator with spring constant K,
# x ~ Normal(x_0, sigma^2), where sigma = 1/sqrt(beta K)
# Compute the absolute dimensionless free energies of each oscillator analytically.
# f = - ln(sqrt((2 pi)/(beta K)) )
print('Computing dimensionless free energies analytically...')
sigma = (beta * K)**-0.5
f_k_analytical = - numpy.log(numpy.sqrt(2 * numpy.pi) * sigma )
Delta_f_ij_analytical = numpy.matrix(f_k_analytical) - numpy.matrix(f_k_analytical).transpose()
A_k_analytical = dict()
A_ij_analytical = dict()
for observe in observables:
if observe == 'RMS displacement':
A_k_analytical[observe] = sigma # mean square displacement
if observe == 'potential energy':
A_k_analytical[observe] = 1/(2*beta)*numpy.ones(len(K),float) # By equipartition
if observe == 'position':
A_k_analytical[observe] = O # observable is the position
if observe == 'position^2':
A_k_analytical[observe] = (1+ beta*K*O**2)/(beta*K) # observable is the position^2
A_ij_analytical[observe] = A_k_analytical[observe] - numpy.transpose(numpy.matrix(A_k_analytical[observe]))
return f_k_analytical, Delta_f_ij_analytical, A_k_analytical, A_ij_analytical
#=============================================================================================
# PARAMETERS
#=============================================================================================
K_k = numpy.array([25, 16, 9, 4, 1, 1]) # spring constants for each state
O_k = numpy.array([0, 1, 2, 3, 4, 5]) # offsets for spring constants
N_k = 10*numpy.array([1000, 1000, 1000, 1000, 0, 1000]) # number of samples from each state (can be zero for some states)
Nk_ne_zero = (N_k!=0)
beta = 1.0 # inverse temperature for all simulations
K_extra = numpy.array([20, 12, 6, 2, 1])
O_extra = numpy.array([ 0.5, 1.5, 2.5, 3.5, 4.5])
observables = ['position','position^2','potential energy','RMS displacement']
seed = None
# Uncomment the following line to seed the random number generated to produce reproducible output.
seed = 0
numpy.random.seed(seed)
#=============================================================================================
# MAIN
#=============================================================================================
# Determine number of simulations.
K = numpy.size(N_k)
if numpy.shape(K_k) != numpy.shape(N_k):
raise ParameterError("K_k (%d) and N_k (%d) must have same dimensions." % (numpy.shape(K_k), numpy.shape(N_k)))
if numpy.shape(O_k) != numpy.shape(N_k):
raise ParameterError("O_k (%d) and N_k (%d) must have same dimensions." % (numpy.shape(K_k), numpy.shape(N_k)))
# Determine maximum number of samples to be drawn for any state.
N_max = numpy.max(N_k)
(f_k_analytical, Delta_f_ij_analytical, A_k_analytical, A_ij_analytical) = GetAnalytical(beta,K_k,O_k,observables)
print("This script will draw samples from %d harmonic oscillators." % (K))
print("The harmonic oscillators have equilibrium positions")
print(O_k)
print("and spring constants")
print(K_k)
print("and the following number of samples will be drawn from each (can be zero if no samples drawn):")
print(N_k)
print("")
#=============================================================================================
# Generate independent data samples from K one-dimensional harmonic oscillators centered at q = 0.
#=============================================================================================
print('generating samples...')
randomsample = testsystems.harmonic_oscillators.HarmonicOscillatorsTestCase(O_k=O_k, K_k=K_k, beta=beta)
[x_kn,u_kln,N_k] = randomsample.sample(N_k,mode='u_kln')
# get the unreduced energies
U_kln = u_kln/beta
#=============================================================================================
# Estimate free energies and expectations.
#=============================================================================================
print("======================================")
print(" Initializing MBAR ")
print("======================================")
# Estimate free energies from simulation using MBAR.
print("Estimating relative free energies from simulation (this may take a while)...")
# Initialize the MBAR class, determining the free energies.
mbar = MBAR(u_kln, N_k, relative_tolerance=1.0e-10, verbose=True)
# Get matrix of dimensionless free energy differences and uncertainty estimate.
print("=============================================")
print(" Testing getFreeEnergyDifferences ")
print("=============================================")
results = mbar.getFreeEnergyDifferences(return_dict=True)
Delta_f_ij_estimated = results['Delta_f']
dDelta_f_ij_estimated = results['dDelta_f']
# Compute error from analytical free energy differences.
Delta_f_ij_error = Delta_f_ij_estimated - Delta_f_ij_analytical
print("Error in free energies is:")
print(Delta_f_ij_error)
print("Uncertainty in free energies is:")
print(dDelta_f_ij_estimated)
print("Standard deviations away is:")
# mathematical manipulation to avoid dividing by zero errors; we don't care
# about the diagnonals, since they are identically zero.
df_ij_mod = dDelta_f_ij_estimated + numpy.identity(K)
stdevs = numpy.abs(Delta_f_ij_error/df_ij_mod)
for k in range(K):
stdevs[k,k] = 0
print(stdevs)
print("==============================================")
print(" Testing computeBAR ")
print("==============================================")
nonzero_indices = numpy.array(list(range(K)))[Nk_ne_zero]
Knon = len(nonzero_indices)
for i in range(Knon-1):
k = nonzero_indices[i]
k1 = nonzero_indices[i+1]
w_F = u_kln[k, k1, 0:N_k[k]] - u_kln[k, k, 0:N_k[k]] # forward work
w_R = u_kln[k1, k, 0:N_k[k1]] - u_kln[k1, k1, 0:N_k[k1]] # reverse work
results = BAR(w_F, w_R, return_dict=True)
df_bar = results['Delta_f']
ddf_bar = results['dDelta_f']
bar_analytical = (f_k_analytical[k1]-f_k_analytical[k])
bar_error = bar_analytical - df_bar
print("BAR estimator for reduced free energy from states %d to %d is %f +/- %f" % (k,k1,df_bar,ddf_bar))
stddev_away("BAR estimator",bar_error,ddf_bar)
print("==============================================")
print(" Testing computeEXP ")
print("==============================================")
print("EXP forward free energy")
for k in range(K-1):
if N_k[k] != 0:
w_F = u_kln[k, k+1, 0:N_k[k]] - u_kln[k, k, 0:N_k[k]] # forward work
results = EXP(w_F, return_dict=True)
df_exp = results['Delta_f']
ddf_exp = results['dDelta_f']
exp_analytical = (f_k_analytical[k+1]-f_k_analytical[k])
exp_error = exp_analytical - df_exp
print("df from states %d to %d is %f +/- %f" % (k,k+1,df_exp,ddf_exp))
stddev_away("df",exp_error,ddf_exp)
print("EXP reverse free energy")
for k in range(1,K):
if N_k[k] != 0:
w_R = u_kln[k, k-1, 0:N_k[k]] - u_kln[k, k, 0:N_k[k]] # reverse work
(df_exp,ddf_exp) = EXP(w_R, return_dict=True)
df_exp = -results['Delta_f']
ddf_exp = results['dDelta_f']
exp_analytical = (f_k_analytical[k]-f_k_analytical[k-1])
exp_error = exp_analytical - df_exp
print("df from states %d to %d is %f +/- %f" % (k,k-1,df_exp,ddf_exp))
stddev_away("df",exp_error,ddf_exp)
print("==============================================")
print(" Testing computeGauss ")
print("==============================================")
print("Gaussian forward estimate")
for k in range(K-1):
if N_k[k] != 0:
w_F = u_kln[k, k+1, 0:N_k[k]] - u_kln[k, k, 0:N_k[k]] # forward work
results = EXPGauss(w_F, return_dict=True)
df_gauss = results['Delta_f']
ddf_gauss = results['dDelta_f']
gauss_analytical = (f_k_analytical[k+1]-f_k_analytical[k])
gauss_error = gauss_analytical - df_gauss
print("df for reduced free energy from states %d to %d is %f +/- %f" % (k,k+1,df_gauss,ddf_gauss))
stddev_away("df",gauss_error,ddf_gauss)
print("Gaussian reverse estimate")
for k in range(1,K):
if N_k[k] != 0:
w_R = u_kln[k, k-1, 0:N_k[k]] - u_kln[k, k, 0:N_k[k]] # reverse work
results = EXPGauss(w_R, return_dict=True)
df_gauss = results['Delta_f']
ddf_gauss = results['dDelta_f']
gauss_analytical = (f_k_analytical[k]-f_k_analytical[k-1])
gauss_error = gauss_analytical - df_gauss
print("df for reduced free energy from states %d to %d is %f +/- %f" % (k,k-1,df_gauss,ddf_gauss))
stddev_away("df",gauss_error,ddf_gauss)
print("======================================")
print(" Testing computeExpectations")
print("======================================")
A_kn_all = dict()
A_k_estimated_all = dict()
A_kl_estimated_all = dict()
N = numpy.sum(N_k)
for observe in observables:
print("============================================")
print(" Testing observable %s" % (observe))
print("============================================")
if observe == 'RMS displacement':
state_dependent = True
A_kn = numpy.zeros([K,N], dtype = numpy.float64)
n = 0
for k in range(0,K):
for nk in range(0,N_k[k]):
A_kn[:,n] = (x_kn[k,nk] - O_k[:])**2 # observable is the squared displacement
n += 1
# observable is the potential energy, a 3D array since the potential energy is a function of
# thermodynamic state
elif observe == 'potential energy':
state_dependent = True
A_kn = numpy.zeros([K,N], dtype = numpy.float64)
n = 0
for k in range(0,K):
for nk in range(0,N_k[k]):
A_kn[:,n] = U_kln[k,:,nk]
n += 1
# observable for estimation is the position
elif observe == 'position':
state_dependent = False
A_kn = numpy.zeros([K,N_max], dtype = numpy.float64)
for k in range(0,K):
A_kn[k,0:N_k[k]] = x_kn[k,0:N_k[k]]
# observable for estimation is the position^2
elif observe == 'position^2':
state_dependent = False
A_kn = numpy.zeros([K,N_max], dtype = numpy.float64)
for k in range(0,K):
A_kn[k,0:N_k[k]] = x_kn[k,0:N_k[k]]**2
results = mbar.computeExpectations(A_kn, state_dependent = state_dependent, return_dict=True)
A_k_estimated = results['mu']
dA_k_estimated = results['sigma']
# need to additionally transform to get the square root
if observe == 'RMS displacement':
A_k_estimated = numpy.sqrt(A_k_estimated)
# Compute error from analytical observable estimate.
dA_k_estimated = dA_k_estimated/(2*A_k_estimated)
As_k_estimated = numpy.zeros([K],numpy.float64)
dAs_k_estimated = numpy.zeros([K],numpy.float64)
# 'standard' expectation averages - not defined if no samples
nonzeros = numpy.arange(K)[Nk_ne_zero]
totaln = 0
for k in nonzeros:
if (observe == 'position') or (observe == 'position^2'):
As_k_estimated[k] = numpy.average(A_kn[k,0:N_k[k]])
dAs_k_estimated[k] = numpy.sqrt(numpy.var(A_kn[k,0:N_k[k]])/(N_k[k]-1))
elif (observe == 'RMS displacement' ) or (observe == 'potential energy'):
totalp = totaln + N_k[k]
As_k_estimated[k] = numpy.average(A_kn[k,totaln:totalp])
dAs_k_estimated[k] = numpy.sqrt(numpy.var(A_kn[k,totaln:totalp])/(N_k[k]-1))
totaln = totalp
if observe == 'RMS displacement':
As_k_estimated[k] = numpy.sqrt(As_k_estimated[k])
dAs_k_estimated[k] = dAs_k_estimated[k]/(2*As_k_estimated[k])
A_k_error = A_k_estimated - A_k_analytical[observe]
As_k_error = As_k_estimated - A_k_analytical[observe]
print("------------------------------")
print("Now testing 'averages' mode")
print("------------------------------")
print("Analytical estimator of %s is" % (observe))
print(A_k_analytical[observe])
print("MBAR estimator of the %s is" % (observe))
print(A_k_estimated)
print("MBAR estimators differ by X standard deviations")
stdevs = numpy.abs(A_k_error/dA_k_estimated)
print(stdevs)
print("Standard estimator of %s is (states with samples):" % (observe))
print(As_k_estimated[Nk_ne_zero])
print("Standard estimators differ by X standard deviations (states with samples)")
stdevs = numpy.abs(As_k_error[Nk_ne_zero]/dAs_k_estimated[Nk_ne_zero])
print(stdevs)
results = mbar.computeExpectations(A_kn, state_dependent = state_dependent, output = 'differences', return_dict=True)
A_kl_estimated = results['mu']
dA_kl_estimated = results['sigma']
print("------------------------------")
print("Now testing 'differences' mode")
print("------------------------------")
if 'RMS displacement' != observe: # can't test this, because we're actually computing the expectation of
# the mean square displacement, and so the differences are <a_i^2> - <a_j^2>,
# not sqrt<a_i>^2 - sqrt<a_j>^2
A_kl_analytical = numpy.matrix(A_k_analytical[observe]) - numpy.matrix(A_k_analytical[observe]).transpose()
A_kl_error = A_kl_estimated - A_kl_analytical
print("Analytical estimator of differences of %s is" % (observe))
print(A_kl_analytical)
print("MBAR estimator of the differences of %s is" % (observe))
print(A_kl_estimated)
print("MBAR estimators differ by X standard deviations")
stdevs = numpy.abs(A_kl_error/(dA_kl_estimated+numpy.identity(K)))
for k in range(K):
stdevs[k,k] = 0
print(stdevs)
# save up the A_k for use in computeMultipleExpectations
A_kn_all[observe] = A_kn
A_k_estimated_all[observe] = A_k_estimated
A_kl_estimated_all[observe] = A_kl_estimated
print("=============================================")
print(" Testing computeMultipleExpectations")
print("=============================================")
# have to exclude the potential and RMS displacemet for now, not functions of a single state
observables_single = ['position','position^2']
A_ikn = numpy.zeros([len(observables_single), K, N_k.max()], numpy.float64)
for i,observe in enumerate(observables_single):
A_ikn[i,:,:] = A_kn_all[observe]
for i in range(K):
results = mbar.computeMultipleExpectations(A_ikn, u_kln[:,i,:], compute_covariance=True, return_dict=True)
A_i = results['mu']
dA_ij = results['sigma']
Ca_ij = results['covariances']
print("Averages for state %d" % (i))
print(A_i)
print("Uncertainties for state %d" % (i))
print(dA_ij)
print("Correlation matrix between observables for state %d" % (i))
print(Ca_ij)
print("============================================")
print(" Testing computeEntropyAndEnthalpy")
print("============================================")
results = mbar.computeEntropyAndEnthalpy(u_kn = u_kln, verbose = True, return_dict=True)
Delta_f_ij = results['Delta_f']
dDelta_f_ij = results['dDelta_f']
Delta_u_ij = results['Delta_u']
dDelta_u_ij = results['dDelta_u']
Delta_s_ij = results['Delta_s']
dDelta_s_ij = results['dDelta_s']
print("Free energies")
print(Delta_f_ij)
print(dDelta_f_ij)
diffs1 = Delta_f_ij - Delta_f_ij_estimated
print("maximum difference between values computed here and in computeFreeEnergies is %g" % (numpy.max(diffs1)))
if (numpy.max(numpy.abs(diffs1)) > 1.0e-10):
print("Difference in values from computeFreeEnergies")
print(diffs1)
diffs2 = dDelta_f_ij - dDelta_f_ij_estimated
print("maximum difference between uncertainties computed here and in computeFreeEnergies is %g" % (numpy.max(diffs2)))
if (numpy.max(numpy.abs(diffs2)) > 1.0e-10):
print("Difference in expectations from computeFreeEnergies")
print(diffs2)
print("Energies")
print(Delta_u_ij)
print(dDelta_u_ij)
U_k = numpy.matrix(A_k_estimated_all['potential energy'])
expectations = U_k - U_k.transpose()
diffs1 = Delta_u_ij - expectations
print("maximum difference between values computed here and in computeExpectations is %g" % (numpy.max(diffs1)))
if (numpy.max(numpy.abs(diffs1)) > 1.0e-10):
print("Difference in values from computeExpectations")
print(diffs1)
print("Entropies")
print(Delta_s_ij)
print(dDelta_s_ij)
#analytical entropy estimate
s_k_analytical = numpy.matrix(0.5 / beta - f_k_analytical)
Delta_s_ij_analytical = s_k_analytical - s_k_analytical.transpose()
Delta_s_ij_error = Delta_s_ij_analytical - Delta_s_ij
print("Error in entropies is:")
print(Delta_f_ij_error)
print("Standard deviations away is:")
# mathematical manipulation to avoid dividing by zero errors; we don't care
# about the diagnonals, since they are identically zero.
ds_ij_mod = dDelta_s_ij + numpy.identity(K)
stdevs = numpy.abs(Delta_s_ij_error/ds_ij_mod)
for k in range(K):
stdevs[k,k] = 0
print(stdevs)
print("============================================")
print(" Testing computePerturbedFreeEnergies")
print("============================================")
L = numpy.size(K_extra)
(f_k_analytical, Delta_f_ij_analytical, A_k_analytical, A_ij_analytical) = GetAnalytical(beta,K_extra,O_extra,observables)
if numpy.size(O_extra) != numpy.size(K_extra):
raise ParameterError("O_extra (%d) and K_extra (%d) must have the same dimensions." % (numpy.shape(K_k), numpy.shape(N_k)))
unew_kln = numpy.zeros([K,L,numpy.max(N_k)],numpy.float64)
for k in range(K):
for l in range(L):
unew_kln[k,l,0:N_k[k]] = (K_extra[l]/2.0) * (x_kn[k,0:N_k[k]]-O_extra[l])**2
results = mbar.computePerturbedFreeEnergies(unew_kln, return_dict=True)
Delta_f_ij_estimated = results['Delta_f']
dDelta_f_ij_estimated = results['dDelta_f']
Delta_f_ij_error = Delta_f_ij_estimated - Delta_f_ij_analytical
print("Error in free energies is:")
print(Delta_f_ij_error)
print("Standard deviations away is:")
# mathematical manipulation to avoid dividing by zero errors; we don't care
# about the diagnonals, since they are identically zero.
df_ij_mod = dDelta_f_ij_estimated + numpy.identity(L)
stdevs = numpy.abs(Delta_f_ij_error/df_ij_mod)
for l in range(L):
stdevs[l,l] = 0
print(stdevs)
print("============================================")
print(" Testing computeExpectation (new states) ")
print("============================================")
nth = 3
# test the nth "extra" states, O_extra[nth] & K_extra[nth]
for observe in observables:
print("============================================")
print(" Testing observable %s" % (observe))
print("============================================")
if observe == 'RMS displacement':
state_dependent = True
A_kn = numpy.zeros([K,1,N_max], dtype = numpy.float64)
for k in range(0,K):
A_kn[k,0,0:N_k[k]] = (x_kn[k,0:N_k[k]] - O_extra[nth])**2 # observable is the squared displacement
# observable is the potential energy, a 3D array since the potential energy is a function of
# thermodynamic state
elif observe == 'potential energy':
state_dependent = True
A_kn = unew_kln[:,[nth],:]/beta
# position and position^2 can use the same observables
# observable for estimation is the position
elif observe == 'position':
state_dependent = False
A_kn = A_kn_all['position']
elif observe == 'position^2':
state_dependent = False
A_kn = A_kn_all['position^2']
A_k_estimated, dA_k_estimated
results = mbar.computeExpectations(A_kn,unew_kln[:,[nth],:],state_dependent=state_dependent, return_dict=True)
A_k_estimated = results['mu']
dA_k_estimated = results['sigma']
# need to additionally transform to get the square root
if observe == 'RMS displacement':
A_k_estimated = numpy.sqrt(A_k_estimated)
dA_k_estimated = dA_k_estimated/(2*A_k_estimated)
A_k_error = A_k_estimated - A_k_analytical[observe][nth]
print("Analytical estimator of %s is" % (observe))
print(A_k_analytical[observe][nth])
print("MBAR estimator of the %s is" % (observe))
print(A_k_estimated)
print("MBAR estimators differ by X standard deviations")
stdevs = numpy.abs(A_k_error/dA_k_estimated)
print(stdevs)
print("============================================")
print(" Testing computeOverlap ")
print("============================================")
results = mbar.computeOverlap(return_dict=True)
O = results['scalar']
O_i = results['eigenvalues']
O_ij = results['matrix']
print("Overlap matrix output")
print(O_ij)
for k in range(K):
print("Sum of row %d is %f (should be 1)," % (k,numpy.sum(O_ij[k,:])), end=' ')
if (numpy.abs(numpy.sum(O_ij[k,:])-1)<1.0e-10):
print("looks like it is.")
else:
print("but it's not.")
print("Overlap eigenvalue output")
print(O_i)
print("Overlap scalar output")
print(O)
print("============================================")
print(" Testing computeEffectiveSampleNumber ")
print("============================================")
N_eff = mbar.computeEffectiveSampleNumber(verbose = True)
print("Effective Sample number")
print(N_eff)
print("Compare stanadrd estimate of <x> with the MBAR estimate of <x>")
print("We should have that with MBAR, err_MBAR = sqrt(N_k/N_eff)*err_standard,")
print("so standard (scaled) results should be very close to MBAR results.")
print("No standard estimate exists for states that are not sampled.")
A_kn = x_kn
results = mbar.computeExpectations(A_kn, return_dict=True)
val_mbar = results['mu']
err_mbar = results['sigma']
err_standard = numpy.zeros([K],dtype = numpy.float64)
err_scaled = numpy.zeros([K],dtype = numpy.float64)
for k in range(K):
if N_k[k] != 0:
# use position
err_standard[k] = numpy.std(A_kn[k,0:N_k[k]])/numpy.sqrt(N_k[k]-1)
err_scaled[k] = numpy.std(A_kn[k,0:N_k[k]])/numpy.sqrt(N_eff[k]-1)
print(" ", end=' ')
for k in range(K):
print(" %d " %(k), end=' ')
print("")
print("MBAR :", end=' ')
print(err_mbar)
print("standard :", end=' ')
print(err_standard)
print("sqrt N_k/N_eff :", end=' ')
print(numpy.sqrt(N_k/N_eff))
print("Standard (scaled):", end=' ')
print(err_standard * numpy.sqrt(N_k/N_eff))
print("============================================")
print(" Testing computePMF ")
print("============================================")
# For 2-D, The equilibrium distribution is given analytically by
# p(x;beta,K) = sqrt[(beta K) / (2 pi)] exp[-beta K [(x-mu)^2] / 2]
#
# The dimensionless free energy is therefore
# f(beta,K) = - (1/2) * ln[ (2 pi) / (beta K) ]
#
# In this problem, we are investigating the sum of two Gaussians, once
# centered at 0, and others centered at grid points.
#
# V(x;K) = (K0/2) * [(x-x_0)^2]
#
# For 1-D, The equilibrium distribution is given analytically by
# p(x;beta,K) = 1/N exp[-beta (K0 [x^2] / 2 + KU [(x-mu)^2] / 2)]
# Where N is the normalization constant.
#
# The dimensionless free energy is the integral of this, and can be computed as:
# f(beta,K) = - ln [ (2*numpy.pi/(Ko+Ku))^(d/2) exp[ -Ku*Ko mu' mu / 2(Ko +Ku)]
# f(beta,K) - fzero = -Ku*Ko / 2(Ko+Ku) = 1/(1/(Ku/2) + 1/(K0/2))
def generate_pmf_data(ndim=1, nbinsperdim=15, nsamples = 1000, K0=20.0, Ku = 100.0, gridscale=0.2, xrange = [[-3,3]]):
x0 = numpy.zeros([ndim], numpy.float64) # center of base potential
numbrellas = 1
nperdim = numpy.zeros([ndim],int)
for d in range(ndim):
nperdim[d] = xrange[d][1] - xrange[d][0] + 1
numbrellas *= nperdim[d]
print("There are a total of %d umbrellas." % numbrellas)
# Enumerate umbrella centers, and compute the analytical free energy of that umbrella
print("Constructing umbrellas...")
ksum = (Ku+K0)/beta
kprod = (Ku*K0)/(beta*beta)
f_k_analytical = numpy.zeros(numbrellas, numpy.float64);
xu_i = numpy.zeros([numbrellas, ndim], numpy.float64) # xu_i[i,:] is the center of umbrella i
dp = numpy.zeros(ndim,int)
dp[0] = 1
for d in range(1,ndim):
dp[d] = nperdim[d]*dp[d-1]
umbrella_zero = 0
for i in range(numbrellas):
center = []
for d in range(ndim):
val = gridscale*((int(i//dp[d])) % nperdim[d] + xrange[d][0])
center.append(val)
center = numpy.array(center)
xu_i[i,:] = center
mu2 = numpy.dot(center,center)
f_k_analytical[i] = numpy.log((ndim*numpy.pi/ksum)**(3.0/2.0) *numpy.exp(-kprod*mu2/(2.0*ksum)))
if numpy.all(center==0.0): # assumes that we have one state that is at the zero.
umbrella_zero = i
i += 1
f_k_analytical -= f_k_analytical[umbrella_zero]
print("Generating %d samples for each of %d umbrellas..." % (nsamples, numbrellas))
x_n = numpy.zeros([numbrellas * nsamples, ndim], numpy.float64)
for i in range(numbrellas):
for dim in range(ndim):
# Compute mu and sigma for this dimension for sampling from V0(x) + Vu(x).
# Product of Gaussians: N(x ; a, A) N(x ; b, B) = N(a ; b , A+B) x N(x ; c, C) where
# C = 1/(1/A + 1/B)
# c = C(a/A+b/B)
# A = 1/K0, B = 1/Ku
sigma = 1.0 / (K0 + Ku)
mu = sigma * (x0[dim]*K0 + xu_i[i,dim]*Ku)
# Generate normal deviates for this dimension.
x_n[i*nsamples:(i+1)*nsamples,dim] = numpy.random.normal(mu, numpy.sqrt(sigma), [nsamples])
u_kn = numpy.zeros([numbrellas, nsamples*numbrellas], numpy.float64)
# Compute reduced potential due to V0.
u_n = beta*(K0/2)*numpy.sum((x_n[:,:] - x0)**2, axis=1)
for k in range(numbrellas):
uu = beta*(Ku/2)*numpy.sum((x_n[:,:] - xu_i[k,:])**2, axis=1) # reduced potential due to umbrella k
u_kn[k,:] = u_n + uu
return u_kn, u_n, x_n, f_k_analytical
nbinsperdim = 15
gridscale = 0.2
nsamples = 1000
ndim = 1
K0 = 20.0
Ku = 100.0
print("============================================")
print(" Test 1: 1D PMF ")
print("============================================")
xrange = [[-3,3]]
ndim = 1
u_kn, u_n, x_n, f_k_analytical = generate_pmf_data(K0 = K0, Ku = Ku, ndim=ndim, nbinsperdim = nbinsperdim, nsamples = nsamples, gridscale = gridscale, xrange=xrange)
numbrellas = (numpy.shape(u_kn))[0]
N_k = nsamples*numpy.ones([numbrellas], int)
print("Solving for free energies of state ...")
mbar = MBAR(u_kn, N_k)
# Histogram bins are indexed using the scheme:
# index = 1 + numpy.floor((x[0] - xmin)/dx) + nbins*numpy.floor((x[1] - xmin)/dy)
# index = 0 is reserved for samples outside of the allowed domain
xmin = gridscale*(numpy.min(xrange[0][0])-1/2.0)
xmax = gridscale*(numpy.max(xrange[0][1])+1/2.0)
dx = (xmax-xmin)/nbinsperdim
nbins = 1 + nbinsperdim**ndim
bin_centers = numpy.zeros([nbins,ndim],numpy.float64)
ibin = 1
pmf_analytical = numpy.zeros([nbins],numpy.float64)
minmu2 = 1000000
zeroindex = 0
# construct the bins and the pmf
for i in range(nbinsperdim):
xbin = xmin + dx * (i + 0.5)
bin_centers[ibin,0] = xbin
mu2 = xbin*xbin
if (mu2 < minmu2):
minmu2 = mu2
zeroindex = ibin
pmf_analytical[ibin] = K0*mu2/2.0
ibin += 1
fzero = pmf_analytical[zeroindex]
pmf_analytical -= fzero
pmf_analytical[0] = 0
bin_n = numpy.zeros([numbrellas*nsamples], int)
# Determine indices of those within bounds.
within_bounds = (x_n[:,0] >= xmin) & (x_n[:,0] < xmax)
# Determine states for these.
bin_n[within_bounds] = 1 + numpy.floor((x_n[within_bounds,0]-xmin)/dx)
# Determine indices of bins that are not empty.
bin_counts = numpy.zeros([nbins], int)
for i in range(nbins):
bin_counts[i] = (bin_n == i).sum()
# Compute PMF.
print("Computing PMF ...")
results = mbar.computePMF(u_n, bin_n, nbins, uncertainties = 'from-specified', pmf_reference = zeroindex, return_dict=True)
f_i = results['f_i']
df_i = results['df_i']
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("1D PMF:")
print("%d counts out of %d counts not in any bin" % (bin_counts[0],numbrellas*nsamples))
print("%8s %6s %8s %10s %10s %10s %10s %8s" % ('bin', 'x', 'N', 'f', 'true','error','df','sigmas'))
for i in range(1,nbins):
if (i == zeroindex):
stdevs = 0
df_i[0] = 0
else:
error = pmf_analytical[i]-f_i[i]
stdevs = numpy.abs(error)/df_i[i]
print('%8d %6.2f %8d %10.3f %10.3f %10.3f %10.3f %8.2f' % (i, bin_centers[i,0], bin_counts[i], f_i[i], pmf_analytical[i], error, df_i[i], stdevs))
print("============================================")
print(" Test 2: 2D PMF ")
print("============================================")
xrange = [[-3,3],[-3,3]]
ndim = 2
nsamples = 300
u_kn, u_n, x_n, f_k_analytical = generate_pmf_data(K0 = K0, Ku = Ku, ndim=ndim, nbinsperdim = nbinsperdim, nsamples = nsamples, gridscale = gridscale, xrange=xrange)
numbrellas = (numpy.shape(u_kn))[0]
N_k = nsamples*numpy.ones([numbrellas], int)
print("Solving for free energies of state ...")
mbar = MBAR(u_kn, N_k)
# The dimensionless free energy is the integral of this, and can be computed as:
# f(beta,K) = - ln [ (2*numpy.pi/(Ko+Ku))^(d/2) exp[ -Ku*Ko mu' mu / 2(Ko +Ku)]
# f(beta,K) - fzero = -Ku*Ko / 2(Ko+Ku) = 1/(1/(Ku/2) + 1/(K0/2))
# for computing harmonic samples
#Can compare the free energies computed with MBAR if desired with f_k_analytical
# Histogram bins are indexed using the scheme:
# index = 1 + numpy.floor((x[0] - xmin)/dx) + nbins*numpy.floor((x[1] - xmin)/dy)
# index = 0 is reserved for samples outside of the allowed domain
xmin = gridscale*(numpy.min(xrange[0][0])-1/2.0)
xmax = gridscale*(numpy.max(xrange[0][1])+1/2.0)
ymin = gridscale*(numpy.min(xrange[1][0])-1/2.0)
ymax = gridscale*(numpy.max(xrange[1][1])+1/2.0)
dx = (xmax-xmin)/nbinsperdim
dy = (ymax-ymin)/nbinsperdim
nbins = 1 + nbinsperdim**ndim
bin_centers = numpy.zeros([nbins,ndim],numpy.float64)
ibin = 1 # first reserved for something outside.
pmf_analytical = numpy.zeros([nbins],numpy.float64)
minmu2 = 1000000
zeroindex = 0
# construct the bins and the pmf
for i in range(nbinsperdim):
xbin = xmin + dx * (i + 0.5)
for j in range(nbinsperdim):
# Determine (x,y) of bin center.
ybin = ymin + dy * (j + 0.5)
bin_centers[ibin,0] = xbin
bin_centers[ibin,1] = ybin
mu2 = xbin*xbin+ybin*ybin
if (mu2 < minmu2):
minmu2 = mu2
zeroindex = ibin
pmf_analytical[ibin] = K0*mu2/2.0
ibin += 1
fzero = pmf_analytical[zeroindex]
pmf_analytical -= fzero
pmf_analytical[0] = 0
bin_n = numpy.zeros([numbrellas * nsamples], int)
# Determine indices of those within bounds.
within_bounds = (x_n[:,0] >= xmin) & (x_n[:,0] < xmax) & (x_n[:,1] >= ymin) & (x_n[:,1] < ymax)
# Determine states for these.
xgrid = (x_n[within_bounds,0]-xmin)/dx
ygrid = (x_n[within_bounds,1]-ymin)/dy
bin_n[within_bounds] = 1 + xgrid.astype(int) + nbinsperdim*ygrid.astype(int)
# Determine indices of bins that are not empty.
bin_counts = numpy.zeros([nbins], int)
for i in range(nbins):
bin_counts[i] = (bin_n == i).sum()
# Compute PMF.
print("Computing PMF ...")
[f_i, df_i]
results = mbar.computePMF(u_n, bin_n, nbins, uncertainties = 'from-specified', pmf_reference = zeroindex, return_dict=True)
f_i = results['f_i']
df_i = results['df_i']
# Show free energy and uncertainty of each occupied bin relative to lowest free energy
print("2D PMF:")
print("%d counts out of %d counts not in any bin" % (bin_counts[0],numbrellas*nsamples))
print("%8s %6s %6s %8s %10s %10s %10s %10s %8s" % ('bin', 'x', 'y', 'N', 'f', 'true','error','df','sigmas'))
for i in range(1,nbins):
if (i == zeroindex):
stdevs = 0
df_i[0] = 0
else:
error = pmf_analytical[i]-f_i[i]
stdevs = numpy.abs(error)/df_i[i]
print('%8d %6.2f %6.2f %8d %10.3f %10.3f %10.3f %10.3f %8.2f' % (i, bin_centers[i,0], bin_centers[i,1] , bin_counts[i], f_i[i], pmf_analytical[i], error, df_i[i], stdevs))
#=============================================================================================
# TERMINATE
#=============================================================================================
# Signal successful execution.
sys.exit(0)
|
choderalab/pymbar
|
examples/harmonic-oscillators/harmonic-oscillators.py
|
Python
|
mit
| 33,579
|
[
"Gaussian"
] |
1431f1bd479ff02ce1a06e7b3b51f1561ca05d2a31c7a98b5221172543321398
|
#!/usr/bin/python3
########################################################################
# #
# Cyprium is a multifunction cryptographic, steganographic and #
# cryptanalysis tool developped by members of The Hackademy. #
# French White Hat Hackers Community! #
# www.thehackademy.fr #
# Copyright © 2012 #
# Authors: SAKAROV, Madhatter, mont29, Luxerails, PauseKawa, fred, #
# afranck64, Tyrtamos. #
# Contact: cyprium@thehackademy.fr, sakarov@thehackademy.fr, #
# madhatter@thehackademy.fr, mont29@thehackademy.fr, #
# irc.thehackademy.fr #cyprium, irc.thehackademy.fr #hackademy #
# #
# Cyprium is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but without any warranty; without even the implied warranty of #
# merchantability or fitness for a particular purpose. See the #
# GNU General Public License for more details. #
# #
# The terms of the GNU General Public License is detailed in the #
# COPYING attached file. If not, see : http://www.gnu.org/licenses #
# #
########################################################################
import sys
import os
# In case we directly run that file, we need to add the kernel to path,
# to get access to generic stuff in kernel.utils!
if __name__ == '__main__':
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
"..", "..", "..")))
import kernel.utils as utils
__version__ = "0.6.1"
__date__ = "2012/02/02"
__python__ = "3.x" # Required Python version
__about__ = "" \
"""===== About Octopus =====
Octopus is a simple numbers/text converter. It allows you to cypher and
decypher text to/from binary, octal, decimal or hexadecimal.
It can also cut the output into bytes separated by spaces.
You can use special characters and accents, if you specify a compatible
encoding (e.g. default one, utf-8).
You can also choose ASCII 7bit to get binary encoded over 7 bits instead of 8.
Note: when decyphering, most of the time you can let Octopus auto-detect which
base to use. However, somtimes this will fail (e.g. a decimal-cyphered text
with no '8' nor '9' will be detected as octal…).
Cyprium.Octopus version {} ({}).
Licence GPL3
Software distributed on the site: http://thehackademy.fr
Current execution context:
Operating System: {}
Python version: {}
""".format(__version__, __date__, utils.__pf__, utils.__pytver__)
ASCII7 = utils.ASCII7
ASCII = utils.ASCII
DEFAULT = utils.UTF8
EBCDIC = utils.EBCDIC
N_DIGITS = {2: 8, 8: 3, 10: 3, 16: 2}
D_ALLOWED = utils.BASE_DIGITS_ALLOWED
def do_cypher(text, codec=DEFAULT, bases=(2,), sep=""):
"""
Function to convert some text to binary/octal/decimal/hexadecimal text.
"""
# Create a dict mapping all chars to their binary representation,
# in the given codec (might be more than one byte!).
n_digits = {k: v for k, v in N_DIGITS.items()}
if codec == ASCII7:
codec = ASCII
n_digits[2] = 7
ret = []
MAP = dict.fromkeys(text)
if 2 in bases:
for c in MAP:
b = c.encode(codec)
MAP[c] = (sep.join(("{{:0>{}b}}".format(n_digits[2]),) *
len(b))).format(*b)
ret.append(sep.join((MAP[c] for c in text)))
if 8 in bases:
for c in MAP:
b = c.encode(codec)
MAP[c] = (sep.join(("{{:0>{}o}}".format(n_digits[8]),) *
len(b))).format(*b)
ret.append(sep.join((MAP[c] for c in text)))
if 10 in bases:
for c in MAP:
b = c.encode(codec)
MAP[c] = (sep.join(("{{:0>{}d}}".format(n_digits[10]),) *
len(b))).format(*b)
ret.append(sep.join((MAP[c] for c in text)))
if 16 in bases:
for c in MAP:
b = c.encode(codec)
MAP[c] = (sep.join(("{{:0>{}X}}".format(n_digits[16]),) *
len(b))).format(*b)
ret.append(sep.join((MAP[c] for c in text)))
return ret
def cypher(text, codec=DEFAULT, bases=(2,), sep=""):
"""Just a wrapper around do_cypher, with some checks."""
# Check that text can be encoded with that codec.
chars = set(text)
try:
cdc = codec
if cdc == ASCII7:
cdc = ASCII
"".join(chars).encode(cdc)
except Exception as e:
raise ValueError("The text could not be cyphered into given '{}' "
"encoding ({})".format(cdc, str(e)))
# Check for valid bases.
b_data = set(bases)
b_allowed = set(N_DIGITS.keys())
if not (b_data <= b_allowed):
raise ValueError("Only {} bases are allowed, no '{}'!"
.format(sorted(N_DIGITS.keys()),
"', '".join(b_data - b_allowed)))
return do_cypher(text, codec, bases, sep)
def do_decypher(text, codec=DEFAULT, base=2):
"""
Function to convert binary/octal/decimal/hexadecimal text into text.
Note: expect "unspaced" text as input!
"""
n_digits = {k: v for k, v in N_DIGITS.items()}
if codec == ASCII7:
codec = ASCII
n_digits[2] = 7
if base != 16:
ints = (int(''.join(p), base)
for p in utils.grouper(text, n_digits[base], ''))
byts = utils.int8_to_bytes(ints)
else:
byts = bytes.fromhex(text)
return byts.decode(codec)
def decypher(text, codec=DEFAULT, base=None):
"""Just a wrapper around do_decypher, with some checks."""
if base and base not in N_DIGITS:
raise ValueError("Invalid base value ({})!.".format(base))
# Test length (*without* the spaces!).
text = text.replace(' ', '')
c_data = set(text)
base_names = {2: "binary", 8: "octal", 10: "decimal", 16: "hexadecimal"}
n_digits = {k: v for k, v in N_DIGITS.items()}
if codec == ASCII7:
n_digits[2] = 7
if base is None:
base = utils.base_autodetect(text, n_digits,
sorted(n_digits.keys(), reverse=True))
if len(text) % n_digits[base] != 0:
raise ValueError("No integer number of bytes, please add some "
"digits, to get a total length multiple of {}."
"".format(n_digits[base]))
# Get allowed digits.
c_allowed = utils.get_allowed_digits(base)
if not (c_data <= c_allowed):
raise ValueError("Only {} digits and spaces are allowed, no '{}'!"
.format(base_names[base],
"', '".join(sorted(c_data - c_allowed))))
return do_decypher(text, codec, base)
def main():
# The argparse is much nicer than directly using sys.argv...
# Try 'program.py -h' to see! ;)
# Helper func.
_bases = {'b': 2, 'o': 8, 'd': 10, 'x': 16}
def _2ibase(b):
return _bases.get(b, None)
import argparse
parser = argparse.ArgumentParser(description="Cypher/decypher some text "
"in binary/octal/decimal/"
"hexadecimal form.")
parser.add_argument('--debug', action="store_true", default=False,
help="Enable debug mode.")
sparsers = parser.add_subparsers(dest="command")
cparser = sparsers.add_parser('cypher', help="Cypher data in binary/octal/"
"decimal/hexadecimal.")
cparser.add_argument('-i', '--ifile', type=argparse.FileType('r'),
help="A file containing the text to cypher.")
cparser.add_argument('-o', '--ofile', type=argparse.FileType('w'),
help="A file into which write the cyphered text.")
cparser.add_argument('-d', '--data',
help="The text to cypher.")
cparser.add_argument('-c', '--codec', default=DEFAULT,
help="The codec to use for cyphering.")
cparser.add_argument('-a7', '--ascii7', action="store_true",
help="Use ASCII codec for cyphering, and output "
"7-bits “bytes” in binary (overrides --codec).")
cparser.add_argument('-b', '--bases', nargs="*", type=_2ibase,
choices=_bases.values(), default=(2,),
help="In which base(s) ouput the cyphered text "
"([b]inary, [o]ctal, [d]ecimal, he[x]adecimal, "
"default to binary if none chosen).")
dparser = sparsers.add_parser('decypher',
help="Decypher binary to text.")
dparser.add_argument('-i', '--ifile', type=argparse.FileType('r'),
help="A file containing the text to decypher.")
dparser.add_argument('-o', '--ofile', type=argparse.FileType('w'),
help="A file into which write the decyphered text.")
dparser.add_argument('-d', '--data', help="The text to decypher.")
dparser.add_argument('-c', '--codec', default=DEFAULT,
help="The codec to use for decyphering.")
dparser.add_argument('-a7', '--ascii7', action="store_true",
help="Use ASCII codec for decyphering, assuming "
"7-bits “bytes” (overrides --codec).")
dparser.add_argument('-b', '--base', type=_2ibase,
choices=_bases.values(), default=None,
help="In which base(s) ouput the cyphered text "
"([b]inary, [o]ctal, [d]ecimal, he[x]adecimal, "
"default for auto-detection).")
sparsers.add_parser('about', help="About Octopus…")
args = parser.parse_args()
utils.DEBUG = args.debug
if args.command == "cypher":
try:
data = args.data
if args.ifile:
data = args.ifile.read()
if args.ascii7:
args.codec = ASCII7
out = cypher(data, args.codec, args.bases)
out = "\n".join(out)
if args.ofile:
args.ofile.write(out)
else:
print(out)
except Exception as e:
if utils.DEBUG:
raise e
print(e, "\n\n")
finally:
if args.ifile:
args.ifile.close()
if args.ofile:
args.ofile.close()
return
elif args.command == "decypher":
try:
data = args.data
if args.ifile:
data = args.ifile.read()
if args.ascii7:
args.codec = ASCII7
out = decypher(data, args.codec, args.base)
if args.ofile:
args.ofile.write(out)
else:
print(out)
except Exception as e:
if utils.DEBUG:
raise e
print(e, "\n\n")
finally:
if args.ifile:
args.ifile.close()
if args.ofile:
args.ofile.close()
return
elif args.command == "about":
print(__about__)
return
if __name__ == "__main__":
main()
|
underloki/Cyprium
|
kernel/crypto/text/octopus.py
|
Python
|
gpl-3.0
| 12,134
|
[
"Octopus"
] |
eef7a64a32c93960828a1f6e427452f8bc6c706a958a5c3674ffc711190c813d
|
"""
network.py
Defines Network class which contains cell objects and network-realated methods
Contributors: salvadordura@gmail.com
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from ..specs import ODict
from neuron import h # import NEURON
class Network (object):
# -----------------------------------------------------------------------------
# initialize variables
# -----------------------------------------------------------------------------
def __init__ (self, params = None):
self.params = params
# params that can be expressed using string-based functions in connections
self.connStringFuncParams = ['weight', 'delay', 'synsPerConn', 'loc']
# params that can be expressed using string-based functions in stims
self.stimStringFuncParams = ['delay', 'dur', 'amp', 'gain', 'rstim', 'tau1', 'tau2',
'onset', 'tau', 'gmax', 'e', 'i', 'interval', 'rate', 'number', 'start', 'noise']
# list of h.Random() methods allowed in string-based functions (both for conns and stims)
self.stringFuncRandMethods = ['binomial', 'discunif', 'erlang', 'geometric', 'hypergeo',
'lognormal', 'negexp', 'normal', 'poisson', 'uniform', 'weibull']
self.rand = h.Random() # random number generator
self.pops = ODict() # list to store populations ('Pop' objects)
self.cells = [] # list to store cells ('Cell' objects)
self.gid2lid = {} # Empty dict for storing GID -> local index (key = gid; value = local id) -- ~x6 faster than .index()
self.lastGid = 0 # keep track of last cell gid
self.lastGapId = 0 # keep track of last gap junction gid
# -----------------------------------------------------------------------------
# Set network params
# -----------------------------------------------------------------------------
def setParams (self, params):
self.params = params
# -----------------------------------------------------------------------------
# Instantiate network populations (objects of class 'Pop')
# -----------------------------------------------------------------------------
def createPops (self):
from .. import sim
for popLabel, popParam in self.params.popParams.items(): # for each set of population paramseters
self.pops[popLabel] = sim.Pop(popLabel, popParam) # instantiate a new object of class Pop and add to list pop
return self.pops
# -----------------------------------------------------------------------------
# Create Cells
# -----------------------------------------------------------------------------
def createCells (self):
from .. import sim
sim.pc.barrier()
sim.timing('start', 'createTime')
if sim.rank==0:
print(("\nCreating network of %i cell populations on %i hosts..." % (len(self.pops), sim.nhosts)))
for ipop in list(self.pops.values()): # For each pop instantiate the network cells (objects of class 'Cell')
newCells = ipop.createCells() # create cells for this pop using Pop method
self.cells.extend(newCells) # add to list of cells
sim.pc.barrier()
if sim.rank==0 and sim.cfg.verbose: print(('Instantiated %d cells of population %s'%(len(newCells), ipop.tags['pop'])))
if self.params.defineCellShapes: self.defineCellShapes()
print((' Number of cells on node %i: %i ' % (sim.rank,len(self.cells))))
sim.pc.barrier()
sim.timing('stop', 'createTime')
if sim.rank == 0 and sim.cfg.timing: print((' Done; cell creation time = %0.2f s.' % sim.timingData['createTime']))
return self.cells
# -----------------------------------------------------------------------------
# Import stim methods
# -----------------------------------------------------------------------------
from .stim import addStims, _addCellStim, _stimStrToFunc
# -----------------------------------------------------------------------------
# Import conn methods
# -----------------------------------------------------------------------------
from .conn import connectCells, _findPrePostCellsCondition, _connStrToFunc, \
fullConn, generateRandsPrePost, probConn, randUniqueInt, convConn, divConn, fromListConn, \
_addCellConn, _disynapticBiasProb, _disynapticBiasProb2
# -----------------------------------------------------------------------------
# Import subconn methods
# -----------------------------------------------------------------------------
from .subconn import fromtodistance, _posFromLoc, _interpolateSegmentSigma, subcellularConn
# -----------------------------------------------------------------------------
# Import rxd methods
# -----------------------------------------------------------------------------
from .netrxd import addRxD, _addRegions, _addExtracellularRegion, _addSpecies, \
_addStates, _addReactions, _addRates, _replaceRxDStr
# -----------------------------------------------------------------------------
# Import shape methods
# -----------------------------------------------------------------------------
from .shape import calcSegCoords, defineCellShapes
# -----------------------------------------------------------------------------
# Import modify methods
# -----------------------------------------------------------------------------
from .modify import modifyCells, modifySynMechs, modifyConns, modifyStims
|
thekerrlab/netpyne
|
netpyne/network/network.py
|
Python
|
mit
| 5,784
|
[
"NEURON"
] |
cc1cae87dc8c05f4bc6cd6590fa3a2536d5945d29e21e0953b7e0aecb1da3019
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.magic.prepare_image Prepare an image with PTS.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import argparse
# Import the relevant PTS classes and modules
from pts.magic.prepare.preparer import ImagePreparer
from pts.core.tools import logging, time, tables, parsing
from pts.core.tools import filesystem as fs
from pts.magic.core.image import Image
from pts.magic.basics.region import Region
from pts.magic.misc.calibration import CalibrationError
from pts.magic.misc.extinction import GalacticExtinction
from pts.core.basics.filter import Filter
from pts.magic.misc.kernels import AnianoKernels, aniano_names, variable_fwhms
#from pts.core.basics.configuration import ConfigurationDefinition, ConfigurationReader
# -----------------------------------------------------------------
# Create the command-line parser
parser = argparse.ArgumentParser()
# Basic
parser.add_argument("image", type=str, help="the name/path of the image for which to run the preparation")
parser.add_argument("convolve_to", type=str, help="the name of the band to convolve the image to")
parser.add_argument("rebin_to", type=str, nargs='?', help="the name/path of the reference image to which the image is rebinned")
# Advanced options
parser.add_argument("--sky_annulus_outer", type=float, help="the factor to which the ellipse describing the principal galaxy should be multiplied to represent the outer edge of the sky annulus")
parser.add_argument("--sky_annulus_inner", type=float, help="the factor to which the ellipse describing the principal galaxy should be multiplied to represent the inner edge of the sky annulus")
parser.add_argument("--convolution_remote", type=str, help="the name of the remote host to be used for the convolution step")
parser.add_argument("--sky_region", type=str, help="the name/path of a file with manually selected regions for the sky estimation (not apertures but extended regions of any shape and number) (in sky coordinates!)")
parser.add_argument("--error_frames", type=parsing.string_list, help="the names of planes in the input image which have to be regarded as error maps (seperated by commas)")
# Input and output
parser.add_argument("--input", type=str, help="the input path (output of find_sources step)")
parser.add_argument("--output", type=str, help="the output path")
# Logging
parser.add_argument("--debug", action="store_true", help="enable debug logging mode")
parser.add_argument("--report", action='store_true', help="write a report file")
parser.add_argument("--steps", action="store_true", help="write the results of intermediate steps")
parser.add_argument("--config", type=str, help="the name of a configuration file")
# Visualisation
parser.add_argument("--visualise", action="store_true", help="make visualisations")
# Parse the command line arguments
arguments = parser.parse_args()
# -----------------------------------------------------------------
# Determine the full input and output paths
if arguments.output is None: arguments.output = fs.cwd()
if arguments.input is None: arguments.input = fs.cwd()
arguments.input = fs.absolute(arguments.input)
arguments.output = fs.absolute(arguments.output)
# -----------------------------------------------------------------
# Determine the log file path
logfile_path = fs.join(arguments.output, time.unique_name("log") + ".txt") if arguments.report else None
# Determine the log level
level = "DEBUG" if arguments.debug else "INFO"
# Initialize the logger
log = logging.setup_log(level=level, path=logfile_path)
log.start("Starting prepare_image ...")
# -----------------------------------------------------------------
# Determine the path to the input image
image_path = fs.absolute(arguments.image)
# Load the image
image = Image.from_file(image_path)
# -----------------------------------------------------------------
# Inform the user
log.info("Loading regions ...")
# Determine the path to the galaxy region
galaxy_region_path = fs.join(arguments.input, "galaxies.reg")
# Load the galaxy region
galaxy_region = Region.from_file(galaxy_region_path)
# Determine the path to the star region
star_region_path = fs.join(arguments.input, "stars.reg")
# Load the star region
star_region = Region.from_file(star_region_path) if fs.is_file(star_region_path) else None
# Determine the path to the saturation region
saturation_region_path = fs.join(arguments.input, "saturation.reg")
# Load the saturation region
saturation_region = Region.from_file(saturation_region_path) if fs.is_file(saturation_region_path) else None
# Determine the path to the region of other sources
other_region_path = fs.join(arguments.input, "other_sources.reg")
# Load the region of other sources
other_region = Region.from_file(other_region_path) if fs.is_file(other_region_path) else None
# Inform the user
log.debug("Loading segmentation frames ...")
# Load the image with segmentation maps
segments_path = fs.join(arguments.input, "segments.fits")
segments = Image.from_file(segments_path, no_filter=True)
# Get the segmentation maps
galaxy_segments = segments.frames.galaxies
star_segments = segments.frames.stars
other_segments = segments.frames.other_sources
# Load the statistics file
statistics_path = fs.join(arguments.input, "statistics.dat")
# Inform the user
log.debug("Loading the FWHM ...")
# Get the FWHM from the statistics file
fwhm = None
with open(statistics_path) as statistics_file:
for line in statistics_file:
if "FWHM" in line: fwhm = parsing.get_quantity(line.split("FWHM: ")[1].replace("\n", ""))
# -----------------------------------------------------------------
# Get the center coordinate of the frame
center_coordinate = image.coordinate_range[0]
# Get the filter name
if image.filter is None: raise RuntimeError("Filter not recognized!")
filter_name = str(image.filter)
# -----------------------------------------------------------------
# Debugging
log.debug("Getting galactic extinction ...")
# Get the galactic extinction for this image
arguments.attenuation = GalacticExtinction(center_coordinate).extinction_for_filter(image.filter)
# -----------------------------------------------------------------
# Get the calibration error
arguments.calibration = CalibrationError.from_filter(image.filter)
# -----------------------------------------------------------------
# If visualisation is enabled, set the visualisation path (=output path)
if arguments.visualise: visualisation_path = arguments.output
else: visualisation_path = None
# -----------------------------------------------------------------
# Inform the user
log.info("Looking up the necessary kernel file ...")
# Get the filter to which to convolve to
convolve_to_filter = Filter.from_string(arguments.convolve_to)
# Create an AnianoKernels instance
kernels = AnianoKernels()
# Get the path to the appropriate convolution kernel
kernel_path = kernels.get_kernel_path(image.filter, convolve_to_filter, fwhm=fwhm)
# Set the kernel path
arguments.kernel = kernel_path
# -----------------------------------------------------------------
# Determine the absolute path to the reference image
arguments.rebin_to = fs.absolute(arguments.rebin_to)
# Determine the full path to the sky region file
if arguments.sky_region is not None: arguments.sky_region = fs.absolute(arguments.sky_region)
# -----------------------------------------------------------------
# Create an ImagePreparer instance
preparer = ImagePreparer.from_arguments(arguments)
# Run the image preparation
preparer.run(image, galaxy_region, star_region, saturation_region, other_region, galaxy_segments, star_segments, other_segments, visualisation_path)
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/do/magic/prepare_image.py
|
Python
|
mit
| 8,194
|
[
"Galaxy"
] |
6105489e51541a42a8dd5ac6894d10676332d9297ae3e5ab216b74a9878045ef
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.