code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_settings_json(self):
"""Convert generator settings to JSON.
Returns
-------
`dict`
JSON data.
"""
return {
'scanner': None if self.scanner is None else self.scanner.save(),
'parser': None if self.parser is None else self.parser.save()
}
|
Convert generator settings to JSON.
Returns
-------
`dict`
JSON data.
|
def isTagEqual(self, other):
'''
isTagEqual - Compare if a tag contains the same tag name and attributes as another tag,
i.e. if everything between < and > parts of this tag are the same.
Does NOT compare children, etc. Does NOT compare if these are the same exact tag in the html (use regular == operator for that)
So for example:
tag1 = document.getElementById('something')
tag2 = copy.copy(tag1)
tag1 == tag2 # This is False
tag1.isTagEqual(tag2) # This is True
@return bool - True if tags have the same name and attributes, otherwise False
'''
# if type(other) != type(self):
# return False
# NOTE: Instead of type check,
# just see if we can get the needed attributes in case subclassing
try:
if self.tagName != other.tagName:
return False
myAttributes = self._attributes
otherAttributes = other._attributes
attributeKeysSelf = list(myAttributes.keys())
attributeKeysOther = list(otherAttributes.keys())
except:
return False
# Check that we have all the same attribute names
if set(attributeKeysSelf) != set(attributeKeysOther):
return False
for key in attributeKeysSelf:
if myAttributes.get(key) != otherAttributes.get(key):
return False
return True
|
isTagEqual - Compare if a tag contains the same tag name and attributes as another tag,
i.e. if everything between < and > parts of this tag are the same.
Does NOT compare children, etc. Does NOT compare if these are the same exact tag in the html (use regular == operator for that)
So for example:
tag1 = document.getElementById('something')
tag2 = copy.copy(tag1)
tag1 == tag2 # This is False
tag1.isTagEqual(tag2) # This is True
@return bool - True if tags have the same name and attributes, otherwise False
|
def ackermann_naive(m: int, n: int) -> int:
"""Ackermann number.
"""
if m == 0:
return n + 1
elif n == 0:
return ackermann(m - 1, 1)
else:
return ackermann(m - 1, ackermann(m, n - 1))
|
Ackermann number.
|
def hsvToRGB(h, s, v):
"""
Convert HSV (hue, saturation, value) color space to RGB (red, green blue)
color space.
**Parameters**
**h** : float
Hue, a number in [0, 360].
**s** : float
Saturation, a number in [0, 1].
**v** : float
Value, a number in [0, 1].
**Returns**
**r** : float
Red, a number in [0, 1].
**g** : float
Green, a number in [0, 1].
**b** : float
Blue, a number in [0, 1].
"""
hi = math.floor(h / 60.0) % 6
f = (h / 60.0) - math.floor(h / 60.0)
p = v * (1.0 - s)
q = v * (1.0 - (f * s))
t = v * (1.0 - ((1.0 - f) * s))
D = {0: (v, t, p), 1: (q, v, p), 2: (p, v, t), 3: (p, q, v), 4: (t, p, v),
5: (v, p, q)}
return D[hi]
|
Convert HSV (hue, saturation, value) color space to RGB (red, green blue)
color space.
**Parameters**
**h** : float
Hue, a number in [0, 360].
**s** : float
Saturation, a number in [0, 1].
**v** : float
Value, a number in [0, 1].
**Returns**
**r** : float
Red, a number in [0, 1].
**g** : float
Green, a number in [0, 1].
**b** : float
Blue, a number in [0, 1].
|
def blockType(self, kind):
"""Read block type switch descriptor for given kind of blockType."""
NBLTYPES = self.verboseRead(TypeCountAlphabet(
'BT#'+kind[0].upper(),
description='{} block types'.format(kind),
))
self.numberOfBlockTypes[kind] = NBLTYPES
if NBLTYPES>=2:
self.blockTypeCodes[kind] = self.readPrefixCode(
BlockTypeAlphabet('BT'+kind[0].upper(), NBLTYPES))
self.blockCountCodes[kind] = self.readPrefixCode(
BlockCountAlphabet('BC'+kind[0].upper()))
blockCount = self.verboseRead(self.blockCountCodes[kind])
else:
blockCount = 1<<24
self.currentBlockCounts[kind] = blockCount
|
Read block type switch descriptor for given kind of blockType.
|
def _initAsteriskVersion(self):
"""Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
"""
if self._ami_version > util.SoftwareVersion('1.0'):
cmd = "core show version"
else:
cmd = "show version"
cmdresp = self.executeCommand(cmd)
mobj = re.match('Asterisk\s*(SVN-branch-|\s)(\d+(\.\d+)*)', cmdresp)
if mobj:
self._asterisk_version = util.SoftwareVersion(mobj.group(2))
else:
raise Exception('Asterisk version cannot be determined.')
|
Query Asterisk Manager Interface for Asterisk Version to configure
system for compatibility with multiple versions
.
CLI Command - core show version
|
def set_back(self, x: int, y: int, r: int, g: int, b: int) -> None:
"""Set the background color of one cell.
Args:
x (int): X position to change.
y (int): Y position to change.
r (int): Red background color, from 0 to 255.
g (int): Green background color, from 0 to 255.
b (int): Blue background color, from 0 to 255.
"""
i = self.width * y + x
self.back_r[i] = r
self.back_g[i] = g
self.back_b[i] = b
|
Set the background color of one cell.
Args:
x (int): X position to change.
y (int): Y position to change.
r (int): Red background color, from 0 to 255.
g (int): Green background color, from 0 to 255.
b (int): Blue background color, from 0 to 255.
|
def _save_np(obj, pathfileext, compressed=False):
func = np.savez_compressed if compressed else np.savez
dId = obj.Id._todict()
# tofu.geom
if obj.Id.Cls=='Ves':
func(pathfileext, Id=dId, arrayorder=obj._arrayorder, Clock=obj._Clock,
Poly=obj.Poly, Lim=obj.Lim, Sino_RefPt=obj.sino['RefPt'],
Sino_NP=obj.sino['NP'])
elif obj.Id.Cls=='Struct':
func(pathfileext, Id=dId, arrayorder=obj._arrayorder, Clock=obj._Clock,
Poly=obj.Poly, Lim=obj.Lim, mobile=obj._mobile)
elif obj.Id.Cls in ['Rays','LOS','LOSCam1D','LOSCam2D']:
func(pathfileext, Id=dId, extra=obj._extra,
geom=obj.geom, sino=obj.sino, dchans=obj.dchans)
elif obj.Id.Cls in ['Data','Data1D','Data2D']:
dsave = obj._todict()
if dsave['geom'] is not None and dsave['geom']['LCam'] is not None:
LCam = []
for cc in dsave['geom']['LCam']:
pathS = cc['Id']['SavePath']
pathN = cc['Id']['SaveName']
LCam.append(os.path.join(pathS,pathN+'.npz'))
dsave['geom'] = LCam
elif dsave['geom'] is not None:
geom = []
if dsave['geom']['Ves'] is not None:
pathS = dsave['geom']['Ves']['Id']['SavePath']
pathN = dsave['geom']['Ves']['Id']['SaveName']
Ves = os.path.join(pathS,pathN+'.npz')
geom += [Ves]
if dsave['geom']['LStruct'] is not None:
for ss in dsave['geom']['LStruct']:
sf = os.path.join(ss['Id']['SavePath'],
ss['Id']['SaveName']+'.npz')
geom += [sf]
dsave['geom'] = geom
func(pathfileext, **dsave)
"""
elif obj.Id.Cls=='GLOS':
LIdLOS = [ll.Id.todict() for ll in obj.LLOS]
LDs, Lus = np.array([ll.D for ll in obj.LLOS]).T, np.array([ll.u for ll in obj.LLOS]).T
func(pathfileext, Idsave=Idsave, LIdLOS=LIdLOS, LDs=LDs, Lus=Lus, Sino_RefPt=obj.Sino_RefPt, arrayorder=obj._arrayorder, Clock=obj._Clock)
elif obj.Id.Cls=='Lens':
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, O=obj.O, nIn=obj.nIn, Rad=[obj.Rad], F1=[obj.F1], F2=[obj.F2], R1=[obj.R1], R2=[obj.R2], dd=[obj.dd])
elif obj.Id.Cls=='Apert':
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, Poly=obj.Poly)
elif obj.Id.Cls=='Detect':
LOSprops, Sino, Span, Cone, SAng, SynthDiag, Res, Optics = _convert_Detect2Ldict(obj)
VesCalc = {'SavePath':None} if (not hasattr(obj,'_VesCalc') or obj._VesCalc is None) else {'SavePath':obj._VesCalc.Id.SavePath, 'SaveName':obj._VesCalc.Id.SaveName}
func(pathfileext, Idsave=Idsave, Poly=obj.Poly, Rad=obj.Rad, BaryS=obj.BaryS, nIn=obj.nIn, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt, LOSNP=[obj._LOS_NP],
LOSprops=[LOSprops], Sino=[Sino], Span=[Span], Cone=[Cone], SAng=[SAng], SynthDiag=[SynthDiag], Res=[Res], Optics=[Optics], VesCalc=[VesCalc])
elif obj.Id.Cls=='GDetect':
LDetsave, LDetSynthRes = [], []
for ii in range(0,obj.nDetect):
ddIdsave = obj.LDetect[ii].Id.todict()
LOSprops, Sino, Span, Cone, SAng, SynthDiag, Res, Optics = _convert_Detect2Ldict(obj.LDetect[ii])
VesCalc = {'SavePath':None} if (not hasattr(obj.LDetect[ii],'_VesCalc') or obj.LDetect[ii]._VesCalc is None) else {'SavePath':obj.LDetect[ii]._VesCalc.Id.SavePath, 'SaveName':obj.LDetect[ii]._VesCalc.Id.SaveName}
dd = dict(Idsave=ddIdsave, Poly=obj.LDetect[ii].Poly, Rad=obj.LDetect[ii].Rad, BaryS=obj.LDetect[ii].BaryS, nIn=obj.LDetect[ii].nIn, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt,
LOSNP=[obj.LDetect[ii]._LOS_NP], LOSprops=[LOSprops], Sino=[Sino], Span=[Span], Cone=[Cone], SAng=[SAng], Optics=[Optics], VesCalc=[VesCalc])
LDetsave.append(dd)
LDetSynthRes.append({'SynthDiag':[SynthDiag],'Res':[Res]})
Res, lAttr = {}, dir(obj)
for pp in lAttr:
if not inspect.ismethod(getattr(obj,pp)) and '_Res' in pp:
Res[pp] = getattr(obj,pp)
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt, LOSRef=obj._LOSRef, Res=[Res], LDetsave=LDetsave, LDetSynthRes=LDetSynthRes)
# tofu.Eq
elif obj.Id.Cls=='Eq2D':
np.savez(pathfileext, Idsave=Idsave, **obj._Tab)
# tofu.mesh
elif obj.Id.Cls=='Mesh1D':
func(pathfileext, Idsave=Idsave, Knots=obj.Knots)
elif obj.Id.Cls=='Mesh2D':
SubMinds = [{'Name':kk, 'ind':obj._SubMesh[kk]['ind']} for kk in obj._SubMesh.keys()]
func(pathfileext, Idsave=Idsave, Knots=[obj.MeshX1.Knots,obj.MeshX2.Knots], SubMinds=SubMinds, IndBg=obj._get_CentBckg()[1])
elif obj.Id.Cls=='BF2D':
Id = np.array(['BF2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
IdMesh = np.array(['Mesh2D',obj.Mesh.Id.Name,obj.Mesh.Id.SaveName,obj.Mesh.Id.SavePath,obj.Mesh.Id._dtFormat],dtype=str)
dtime, dtimeMesh = np.array([obj.Id._dtime],dtype=object), np.array([obj.Mesh.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
func(pathfileext, Id=Id, IdMesh=IdMesh, dtime=dtime, IdUSR=USR, dtimeMesh=dtimeMesh, KnotsR=obj.Mesh.MeshR.Knots, KnotsZ=obj.Mesh.MeshZ.Knots, Deg=np.array([obj.Deg],dtype=int), Ind=obj.Mesh._get_CentBckg()[1])
# tofu.matcomp
elif obj.Id.Cls=='GMat2D':
Id = np.array(['GMat2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
dtime = np.array([obj.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
IdObj, IdObjUSR = save_np_IdObj(obj.Id)
CompParamVal = np.array([obj._Mat_epsrel, obj._Mat_SubP, obj._Mat_SubTheta, obj._indMat_SubP, obj._MatLOS_epsrel, obj._MatLOS_SubP, int(obj._Mat_Fast)])
CompParamStr = np.array([obj._Mat_Mode, obj._Mat_SubMode, obj._Mat_SubThetaMode, obj._MatLOS_Mode, obj._MatLOS_SubMode])
func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, Ves=IdObj[2], VesUSR=IdObjUSR[2], LDetect=IdObj[1], BF2=IdObj[0], BF2USR=IdObjUSR[0], LDetectUSR=IdObjUSR[1], CompParamVal=CompParamVal,
CompParamStr=CompParamStr, indMat=obj._indMat, Matdata=obj._Mat_csr.data, Matind=obj._Mat_csr.indices, Matindpr=obj._Mat_csr.indptr, Matshape=obj._Mat_csr.shape,
MatLOSdata=obj._MatLOS_csr.data, MatLOSind=obj._MatLOS_csr.indices, MatLOSindpr=obj._MatLOS_csr.indptr, MatLOSshape=obj._MatLOS_csr.shape,
BF2Par=np.array([obj._BF2_Deg,obj._BF2_NFunc,obj._BF2_NCents]), LD_nD=obj._LD_nDetect)
# tofu.treat
elif obj.Id.Cls=='PreData':
Init, Update = _convert_PreData2Ldict(obj)
func(pathfileext, Idsave=Idsave, Init=[Init], Update=[Update])
#Id = np.array(['PreData',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
#dtime = np.array([obj.Id._dtime],dtype=object)
#USR = np.asarray(obj.Id.USRdict)
#IdObj, IdObjUSR = save_np_IdObj(obj.Id)
#StrPar = np.asarray([obj._Exp, obj._interpkind])
#func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, LDetect=IdObj[0], LDetectUSR=IdObjUSR[0],
# DLPar=obj._DLPar, shot=obj._shot, StrPar=StrPar, Dt=obj._Dt, DtMarg=obj._DtMargin, MovMeanfreq=obj._MovMeanfreq, Resamp=obj._Resamp,
# indOut=obj._indOut, indCorr=obj._indCorr, PhysNoise=obj._PhysNoise, NoiseMod=obj._NoiseModel, interp_lt=obj._interp_lt, interp_lN=obj._interp_lNames)
# tofu.inv
elif obj.Id.Cls=='Sol2D':
Id = np.array(['Sol2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
dtime = np.array([obj.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
IdObj, IdObjUSR = save_np_IdObj(obj.Id)
try:
timing = obj._timing
except Exception:
timing = obj._t2
func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, PreData=IdObj[2], PreDataUSR=IdObjUSR[2], GMat2D=IdObj[1], GMatUSR=IdObjUSR[1], BF2D=IdObj[0], BF2DUSR=IdObjUSR[0],
InvParam=obj.InvParam, shot=obj.shot, LNames=obj._LNames, Run=obj._run,
LOS=obj._LOS, data=obj._data, t=obj._t, Coefs=obj._Coefs, sigma=obj._sigma, Mu=obj._Mu, Chi2N=obj._Chi2N, R = obj._R, Nit=obj._Nit, Spec=obj._Spec, t2=timing, PostTreat=obj._PostTreat)
"""
|
elif obj.Id.Cls=='GLOS':
LIdLOS = [ll.Id.todict() for ll in obj.LLOS]
LDs, Lus = np.array([ll.D for ll in obj.LLOS]).T, np.array([ll.u for ll in obj.LLOS]).T
func(pathfileext, Idsave=Idsave, LIdLOS=LIdLOS, LDs=LDs, Lus=Lus, Sino_RefPt=obj.Sino_RefPt, arrayorder=obj._arrayorder, Clock=obj._Clock)
elif obj.Id.Cls=='Lens':
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, O=obj.O, nIn=obj.nIn, Rad=[obj.Rad], F1=[obj.F1], F2=[obj.F2], R1=[obj.R1], R2=[obj.R2], dd=[obj.dd])
elif obj.Id.Cls=='Apert':
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, Poly=obj.Poly)
elif obj.Id.Cls=='Detect':
LOSprops, Sino, Span, Cone, SAng, SynthDiag, Res, Optics = _convert_Detect2Ldict(obj)
VesCalc = {'SavePath':None} if (not hasattr(obj,'_VesCalc') or obj._VesCalc is None) else {'SavePath':obj._VesCalc.Id.SavePath, 'SaveName':obj._VesCalc.Id.SaveName}
func(pathfileext, Idsave=Idsave, Poly=obj.Poly, Rad=obj.Rad, BaryS=obj.BaryS, nIn=obj.nIn, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt, LOSNP=[obj._LOS_NP],
LOSprops=[LOSprops], Sino=[Sino], Span=[Span], Cone=[Cone], SAng=[SAng], SynthDiag=[SynthDiag], Res=[Res], Optics=[Optics], VesCalc=[VesCalc])
elif obj.Id.Cls=='GDetect':
LDetsave, LDetSynthRes = [], []
for ii in range(0,obj.nDetect):
ddIdsave = obj.LDetect[ii].Id.todict()
LOSprops, Sino, Span, Cone, SAng, SynthDiag, Res, Optics = _convert_Detect2Ldict(obj.LDetect[ii])
VesCalc = {'SavePath':None} if (not hasattr(obj.LDetect[ii],'_VesCalc') or obj.LDetect[ii]._VesCalc is None) else {'SavePath':obj.LDetect[ii]._VesCalc.Id.SavePath, 'SaveName':obj.LDetect[ii]._VesCalc.Id.SaveName}
dd = dict(Idsave=ddIdsave, Poly=obj.LDetect[ii].Poly, Rad=obj.LDetect[ii].Rad, BaryS=obj.LDetect[ii].BaryS, nIn=obj.LDetect[ii].nIn, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt,
LOSNP=[obj.LDetect[ii]._LOS_NP], LOSprops=[LOSprops], Sino=[Sino], Span=[Span], Cone=[Cone], SAng=[SAng], Optics=[Optics], VesCalc=[VesCalc])
LDetsave.append(dd)
LDetSynthRes.append({'SynthDiag':[SynthDiag],'Res':[Res]})
Res, lAttr = {}, dir(obj)
for pp in lAttr:
if not inspect.ismethod(getattr(obj,pp)) and '_Res' in pp:
Res[pp] = getattr(obj,pp)
func(pathfileext, Idsave=Idsave, arrayorder=obj._arrayorder, Clock=obj._Clock, Sino_RefPt=obj.Sino_RefPt, LOSRef=obj._LOSRef, Res=[Res], LDetsave=LDetsave, LDetSynthRes=LDetSynthRes)
# tofu.Eq
elif obj.Id.Cls=='Eq2D':
np.savez(pathfileext, Idsave=Idsave, **obj._Tab)
# tofu.mesh
elif obj.Id.Cls=='Mesh1D':
func(pathfileext, Idsave=Idsave, Knots=obj.Knots)
elif obj.Id.Cls=='Mesh2D':
SubMinds = [{'Name':kk, 'ind':obj._SubMesh[kk]['ind']} for kk in obj._SubMesh.keys()]
func(pathfileext, Idsave=Idsave, Knots=[obj.MeshX1.Knots,obj.MeshX2.Knots], SubMinds=SubMinds, IndBg=obj._get_CentBckg()[1])
elif obj.Id.Cls=='BF2D':
Id = np.array(['BF2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
IdMesh = np.array(['Mesh2D',obj.Mesh.Id.Name,obj.Mesh.Id.SaveName,obj.Mesh.Id.SavePath,obj.Mesh.Id._dtFormat],dtype=str)
dtime, dtimeMesh = np.array([obj.Id._dtime],dtype=object), np.array([obj.Mesh.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
func(pathfileext, Id=Id, IdMesh=IdMesh, dtime=dtime, IdUSR=USR, dtimeMesh=dtimeMesh, KnotsR=obj.Mesh.MeshR.Knots, KnotsZ=obj.Mesh.MeshZ.Knots, Deg=np.array([obj.Deg],dtype=int), Ind=obj.Mesh._get_CentBckg()[1])
# tofu.matcomp
elif obj.Id.Cls=='GMat2D':
Id = np.array(['GMat2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
dtime = np.array([obj.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
IdObj, IdObjUSR = save_np_IdObj(obj.Id)
CompParamVal = np.array([obj._Mat_epsrel, obj._Mat_SubP, obj._Mat_SubTheta, obj._indMat_SubP, obj._MatLOS_epsrel, obj._MatLOS_SubP, int(obj._Mat_Fast)])
CompParamStr = np.array([obj._Mat_Mode, obj._Mat_SubMode, obj._Mat_SubThetaMode, obj._MatLOS_Mode, obj._MatLOS_SubMode])
func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, Ves=IdObj[2], VesUSR=IdObjUSR[2], LDetect=IdObj[1], BF2=IdObj[0], BF2USR=IdObjUSR[0], LDetectUSR=IdObjUSR[1], CompParamVal=CompParamVal,
CompParamStr=CompParamStr, indMat=obj._indMat, Matdata=obj._Mat_csr.data, Matind=obj._Mat_csr.indices, Matindpr=obj._Mat_csr.indptr, Matshape=obj._Mat_csr.shape,
MatLOSdata=obj._MatLOS_csr.data, MatLOSind=obj._MatLOS_csr.indices, MatLOSindpr=obj._MatLOS_csr.indptr, MatLOSshape=obj._MatLOS_csr.shape,
BF2Par=np.array([obj._BF2_Deg,obj._BF2_NFunc,obj._BF2_NCents]), LD_nD=obj._LD_nDetect)
# tofu.treat
elif obj.Id.Cls=='PreData':
Init, Update = _convert_PreData2Ldict(obj)
func(pathfileext, Idsave=Idsave, Init=[Init], Update=[Update])
#Id = np.array(['PreData',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
#dtime = np.array([obj.Id._dtime],dtype=object)
#USR = np.asarray(obj.Id.USRdict)
#IdObj, IdObjUSR = save_np_IdObj(obj.Id)
#StrPar = np.asarray([obj._Exp, obj._interpkind])
#func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, LDetect=IdObj[0], LDetectUSR=IdObjUSR[0],
# DLPar=obj._DLPar, shot=obj._shot, StrPar=StrPar, Dt=obj._Dt, DtMarg=obj._DtMargin, MovMeanfreq=obj._MovMeanfreq, Resamp=obj._Resamp,
# indOut=obj._indOut, indCorr=obj._indCorr, PhysNoise=obj._PhysNoise, NoiseMod=obj._NoiseModel, interp_lt=obj._interp_lt, interp_lN=obj._interp_lNames)
# tofu.inv
elif obj.Id.Cls=='Sol2D':
Id = np.array(['Sol2D',obj.Id.Name,obj.Id.SaveName,obj.Id.SavePath,obj.Id._dtFormat,obj.Id._Diag,str(obj.Id._shot), [obj.Id.Type], obj.Id.Exp],dtype=str)
dtime = np.array([obj.Id._dtime],dtype=object)
USR = np.asarray(obj.Id.USRdict)
IdObj, IdObjUSR = save_np_IdObj(obj.Id)
try:
timing = obj._timing
except Exception:
timing = obj._t2
func(pathfileext, Id=Id, dtime=dtime, IdUSR=USR, PreData=IdObj[2], PreDataUSR=IdObjUSR[2], GMat2D=IdObj[1], GMatUSR=IdObjUSR[1], BF2D=IdObj[0], BF2DUSR=IdObjUSR[0],
InvParam=obj.InvParam, shot=obj.shot, LNames=obj._LNames, Run=obj._run,
LOS=obj._LOS, data=obj._data, t=obj._t, Coefs=obj._Coefs, sigma=obj._sigma, Mu=obj._Mu, Chi2N=obj._Chi2N, R = obj._R, Nit=obj._Nit, Spec=obj._Spec, t2=timing, PostTreat=obj._PostTreat)
|
def check_authorization(self):
"""
Check for the presence of a basic auth Authorization header and
if the credentials contained within in are valid.
:return: Whether or not the credentials are valid.
:rtype: bool
"""
try:
store = self.__config.get('basic_auth')
if store is None:
return True
auth_info = self.headers.get('Authorization')
if not auth_info:
return False
auth_info = auth_info.split()
if len(auth_info) != 2 or auth_info[0] != 'Basic':
return False
auth_info = base64.b64decode(auth_info[1]).decode(sys.getdefaultencoding())
username = auth_info.split(':')[0]
password = ':'.join(auth_info.split(':')[1:])
password_bytes = password.encode(sys.getdefaultencoding())
if hasattr(self, 'custom_authentication'):
if self.custom_authentication(username, password):
self.basic_auth_user = username
return True
return False
if not username in store:
self.server.logger.warning('received invalid username: ' + username)
return False
password_data = store[username]
if password_data['type'] == 'plain':
if password == password_data['value']:
self.basic_auth_user = username
return True
elif hashlib.new(password_data['type'], password_bytes).digest() == password_data['value']:
self.basic_auth_user = username
return True
self.server.logger.warning('received invalid password from user: ' + username)
except Exception:
pass
return False
|
Check for the presence of a basic auth Authorization header and
if the credentials contained within in are valid.
:return: Whether or not the credentials are valid.
:rtype: bool
|
def surface_nodes(self):
"""
:param points: a list of Point objects
:returns: a Node of kind 'griddedSurface'
"""
line = []
for point in self.mesh:
line.append(point.longitude)
line.append(point.latitude)
line.append(point.depth)
return [Node('griddedSurface', nodes=[Node('gml:posList', {}, line)])]
|
:param points: a list of Point objects
:returns: a Node of kind 'griddedSurface'
|
def _post_deactivate_injection(self):
"""
Injects functions after the deactivation routine of child classes got called
:return: None
"""
# Lets be sure that active is really set to false.
self.active = False
self.app.signals.send("plugin_deactivate_post", self)
# After all receivers are handled. We start to clean up signals and receivers of this plugin
# Attention: This signal clean must not be called via a signal (like in other patterns),
# because the call order of receivers is not clear and a signal/receiver clean up would prohibit the call
# of all "later" receivers.
self.signals.deactivate_plugin_signals()
|
Injects functions after the deactivation routine of child classes got called
:return: None
|
def omegac(self,R):
"""
NAME:
omegac
PURPOSE:
calculate the circular angular speed at R in potential Pot
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius (can be Quantity)
OUTPUT:
circular angular speed
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
return nu.sqrt(-self.Rforce(R,use_physical=False)/R)
|
NAME:
omegac
PURPOSE:
calculate the circular angular speed at R in potential Pot
INPUT:
Pot - Potential instance or list of such instances
R - Galactocentric radius (can be Quantity)
OUTPUT:
circular angular speed
HISTORY:
2011-10-09 - Written - Bovy (IAS)
|
def log(self, level, prefix = ''):
"""Writes the contents of the Extension to the logging system.
"""
logging.log(level, "%sname: %s", prefix, self.__name)
logging.log(level, "%soptions: %s", prefix, self.__options)
|
Writes the contents of the Extension to the logging system.
|
def _parse_sid_response(res):
"""Parse response format for request for new channel SID.
Example format (after parsing JS):
[ [0,["c","SID_HERE","",8]],
[1,[{"gsid":"GSESSIONID_HERE"}]]]
Returns (SID, gsessionid) tuple.
"""
res = json.loads(list(ChunkParser().get_chunks(res))[0])
sid = res[0][1][1]
gsessionid = res[1][1][0]['gsid']
return (sid, gsessionid)
|
Parse response format for request for new channel SID.
Example format (after parsing JS):
[ [0,["c","SID_HERE","",8]],
[1,[{"gsid":"GSESSIONID_HERE"}]]]
Returns (SID, gsessionid) tuple.
|
def gettext(ui_file_path):
"""
Let you use gettext instead of the Qt tools for l18n
"""
with open(ui_file_path, 'r') as fin:
content = fin.read()
# replace ``_translate("context", `` by ``_(``
content = re.sub(r'_translate\(".*",\s', '_(', content)
content = content.replace(
' _translate = QtCore.QCoreApplication.translate', '')
with open(ui_file_path, 'w') as fout:
fout.write(content)
|
Let you use gettext instead of the Qt tools for l18n
|
def get_perceel_by_id_and_sectie(self, id, sectie):
'''
Get a `perceel`.
:param id: An id for a `perceel`.
:param sectie: The :class:`Sectie` that contains the perceel.
:rtype: :class:`Perceel`
'''
sid = sectie.id
aid = sectie.afdeling.id
gid = sectie.afdeling.gemeente.id
sectie.clear_gateway()
def creator():
url = self.base_url + '/municipality/%s/department/%s/section/%s/parcel/%s' % (
gid, aid, sid, id)
h = self.base_headers
p = {
'geometry': 'full',
'srs': '31370',
'data': 'adp'
}
res = capakey_rest_gateway_request(url, h, p).json()
return Perceel(
res['perceelnummer'],
sectie,
res['capakey'],
Perceel.get_percid_from_capakey(res['capakey']),
None,
None,
self._parse_centroid(res['geometry']['center']),
self._parse_bounding_box(res['geometry']['boundingBox']),
res['geometry']['shape']
)
if self.caches['short'].is_configured:
key = 'get_perceel_by_id_and_sectie_rest#%s#%s#%s' % (id, sectie.id, sectie.afdeling.id)
perceel = self.caches['short'].get_or_create(key, creator)
else:
perceel = creator()
perceel.set_gateway(self)
return perceel
|
Get a `perceel`.
:param id: An id for a `perceel`.
:param sectie: The :class:`Sectie` that contains the perceel.
:rtype: :class:`Perceel`
|
def download_wiki():
"""Download WikiPedia pages of ambiguous units."""
ambiguous = [i for i in l.UNITS.items() if len(i[1]) > 1]
ambiguous += [i for i in l.DERIVED_ENT.items() if len(i[1]) > 1]
pages = set([(j.name, j.uri) for i in ambiguous for j in i[1]])
print
objs = []
for num, page in enumerate(pages):
obj = {'url': page[1]}
obj['_id'] = obj['url'].replace('https://en.wikipedia.org/wiki/', '')
obj['clean'] = obj['_id'].replace('_', ' ')
print '---> Downloading %s (%d of %d)' % \
(obj['clean'], num + 1, len(pages))
obj['text'] = wikipedia.page(obj['clean']).content
obj['unit'] = page[0]
objs.append(obj)
path = os.path.join(l.TOPDIR, 'wiki.json')
os.remove(path)
json.dump(objs, open(path, 'w'), indent=4, sort_keys=True)
print '\n---> All done.\n'
|
Download WikiPedia pages of ambiguous units.
|
def from_tree(cls, repo, *treeish, **kwargs):
"""Merge the given treeish revisions into a new index which is returned.
The original index will remain unaltered
:param repo:
The repository treeish are located in.
:param treeish:
One, two or three Tree Objects, Commits or 40 byte hexshas. The result
changes according to the amount of trees.
If 1 Tree is given, it will just be read into a new index
If 2 Trees are given, they will be merged into a new index using a
two way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other'
one. It behaves like a fast-forward.
If 3 Trees are given, a 3-way merge will be performed with the first tree
being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current' tree,
tree 3 is the 'other' one
:param kwargs:
Additional arguments passed to git-read-tree
:return:
New IndexFile instance. It will point to a temporary index location which
does not exist anymore. If you intend to write such a merged Index, supply
an alternate file_path to its 'write' method.
:note:
In the three-way merge case, --aggressive will be specified to automatically
resolve more cases in a commonly correct manner. Specify trivial=True as kwarg
to override that.
As the underlying git-read-tree command takes into account the current index,
it will be temporarily moved out of the way to assure there are no unsuspected
interferences."""
if len(treeish) == 0 or len(treeish) > 3:
raise ValueError("Please specify between 1 and 3 treeish, got %i" % len(treeish))
arg_list = []
# ignore that working tree and index possibly are out of date
if len(treeish) > 1:
# drop unmerged entries when reading our index and merging
arg_list.append("--reset")
# handle non-trivial cases the way a real merge does
arg_list.append("--aggressive")
# END merge handling
# tmp file created in git home directory to be sure renaming
# works - /tmp/ dirs could be on another device
tmp_index = tempfile.mktemp('', '', repo.git_dir)
arg_list.append("--index-output=%s" % tmp_index)
arg_list.extend(treeish)
# move current index out of the way - otherwise the merge may fail
# as it considers existing entries. moving it essentially clears the index.
# Unfortunately there is no 'soft' way to do it.
# The TemporaryFileSwap assure the original file get put back
index_handler = TemporaryFileSwap(join_path_native(repo.git_dir, 'index'))
try:
repo.git.read_tree(*arg_list, **kwargs)
index = cls(repo, tmp_index)
index.entries # force it to read the file as we will delete the temp-file
del(index_handler) # release as soon as possible
finally:
if osp.exists(tmp_index):
os.remove(tmp_index)
# END index merge handling
return index
|
Merge the given treeish revisions into a new index which is returned.
The original index will remain unaltered
:param repo:
The repository treeish are located in.
:param treeish:
One, two or three Tree Objects, Commits or 40 byte hexshas. The result
changes according to the amount of trees.
If 1 Tree is given, it will just be read into a new index
If 2 Trees are given, they will be merged into a new index using a
two way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other'
one. It behaves like a fast-forward.
If 3 Trees are given, a 3-way merge will be performed with the first tree
being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current' tree,
tree 3 is the 'other' one
:param kwargs:
Additional arguments passed to git-read-tree
:return:
New IndexFile instance. It will point to a temporary index location which
does not exist anymore. If you intend to write such a merged Index, supply
an alternate file_path to its 'write' method.
:note:
In the three-way merge case, --aggressive will be specified to automatically
resolve more cases in a commonly correct manner. Specify trivial=True as kwarg
to override that.
As the underlying git-read-tree command takes into account the current index,
it will be temporarily moved out of the way to assure there are no unsuspected
interferences.
|
def get_chat_ids(self):
"""Returns unique chat IDs from `/start` command messages sent to our bot by users.
Those chat IDs can be used to send messages to chats.
:rtype: list
"""
updates = self.get_updates()
chat_ids = []
if updates:
for update in updates:
message = update['message']
if message['text'] == '/start':
chat_ids.append(message['chat']['id'])
return list(set(chat_ids))
|
Returns unique chat IDs from `/start` command messages sent to our bot by users.
Those chat IDs can be used to send messages to chats.
:rtype: list
|
def fetch_ensembl_exons(build='37'):
"""Fetch the ensembl genes
Args:
build(str): ['37', '38']
"""
LOG.info("Fetching ensembl exons build %s ...", build)
if build == '37':
url = 'http://grch37.ensembl.org'
else:
url = 'http://www.ensembl.org'
dataset_name = 'hsapiens_gene_ensembl'
dataset = pybiomart.Dataset(name=dataset_name, host=url)
attributes = [
'chromosome_name',
'ensembl_gene_id',
'ensembl_transcript_id',
'ensembl_exon_id',
'exon_chrom_start',
'exon_chrom_end',
'5_utr_start',
'5_utr_end',
'3_utr_start',
'3_utr_end',
'strand',
'rank'
]
filters = {
'chromosome_name': CHROMOSOMES,
}
result = dataset.query(
attributes = attributes,
filters = filters
)
return result
|
Fetch the ensembl genes
Args:
build(str): ['37', '38']
|
def make(world_name, gl_version=GL_VERSION.OPENGL4, window_res=None, cam_res=None, verbose=False):
"""Creates a holodeck environment using the supplied world name.
Args:
world_name (str): The name of the world to load as an environment. Must match the name of a world in an
installed package.
gl_version (int, optional): The OpenGL version to use (Linux only). Defaults to GL_VERSION.OPENGL4.
window_res ((int, int), optional): The resolution to load the game window at. Defaults to (512, 512).
cam_res ((int, int), optional): The resolution to load the pixel camera sensors at. Defaults to (256, 256).
verbose (bool): Whether to run in verbose mode. Defaults to False.
Returns:
HolodeckEnvironment: A holodeck environment instantiated with all the settings necessary for the specified
world, and other supplied arguments.
"""
holodeck_worlds = _get_worlds_map()
if world_name not in holodeck_worlds:
raise HolodeckException("Invalid World Name")
param_dict = copy(holodeck_worlds[world_name])
param_dict["start_world"] = True
param_dict["uuid"] = str(uuid.uuid4())
param_dict["gl_version"] = gl_version
param_dict["verbose"] = verbose
if window_res is not None:
param_dict["window_width"] = window_res[0]
param_dict["window_height"] = window_res[1]
if cam_res is not None:
param_dict["camera_width"] = cam_res[0]
param_dict["camera_height"] = cam_res[1]
return HolodeckEnvironment(**param_dict)
|
Creates a holodeck environment using the supplied world name.
Args:
world_name (str): The name of the world to load as an environment. Must match the name of a world in an
installed package.
gl_version (int, optional): The OpenGL version to use (Linux only). Defaults to GL_VERSION.OPENGL4.
window_res ((int, int), optional): The resolution to load the game window at. Defaults to (512, 512).
cam_res ((int, int), optional): The resolution to load the pixel camera sensors at. Defaults to (256, 256).
verbose (bool): Whether to run in verbose mode. Defaults to False.
Returns:
HolodeckEnvironment: A holodeck environment instantiated with all the settings necessary for the specified
world, and other supplied arguments.
|
def format_bytes_size(val):
"""
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
"""
if not val:
return '0 bytes'
for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError()
|
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
|
def inspect_members(self):
"""
Returns the list of all troposphere members we are able to
construct
"""
if not self._inspect_members:
TemplateGenerator._inspect_members = \
self._import_all_troposphere_modules()
return self._inspect_members
|
Returns the list of all troposphere members we are able to
construct
|
def create_machine_group(self, project_name, group_detail):
""" create machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: CreateMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups"
headers['Content-Type'] = 'application/json'
body = six.b(json.dumps(group_detail.to_json()))
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("POST", project_name, body, resource, params, headers)
return CreateMachineGroupResponse(headers, resp)
|
create machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: MachineGroupDetail
:param group_detail: the machine group detail config
:return: CreateMachineGroupResponse
:raise: LogException
|
async def send_tokens(payment_handle: int, tokens: int, address: str) -> str:
"""
Sends tokens to an address
payment_handle is always 0
:param payment_handle: Integer
:param tokens: Integer
:param address: String
Example:
payment_handle = 0
amount = 1000
address = await Wallet.create_payment_address('00000000000000000000000001234567')
await Wallet.send_tokens(payment_handle, amount, address)
:return:
"""
logger = logging.getLogger(__name__)
if not hasattr(Wallet.send_tokens, "cb"):
logger.debug("vcx_wallet_send_tokens: Creating callback")
Wallet.send_tokens.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_payment_handle = c_uint32(payment_handle)
c_tokens = c_char_p(str(tokens).encode('utf-8'))
c_address = c_char_p(address.encode('utf-8'))
result = await do_call('vcx_wallet_send_tokens',
c_payment_handle,
c_tokens,
c_address,
Wallet.send_tokens.cb)
logger.debug("vcx_wallet_send_tokens completed")
return result
|
Sends tokens to an address
payment_handle is always 0
:param payment_handle: Integer
:param tokens: Integer
:param address: String
Example:
payment_handle = 0
amount = 1000
address = await Wallet.create_payment_address('00000000000000000000000001234567')
await Wallet.send_tokens(payment_handle, amount, address)
:return:
|
def execute(self, eopatch=None, bbox=None, time_interval=None):
"""
Creates OGC (WMS or WCS) request, downloads requested data and stores it together
with valid data mask in newly created EOPatch. Returns the EOPatch.
:param eopatch:
:type eopatch: EOPatch or None
:param bbox: specifies the bounding box of the requested image. Coordinates must be in
the specified coordinate reference system. Required.
:type bbox: BBox
:param time_interval: time or time range for which to return the results, in ISO8601 format
(year-month-date, for example: ``2016-01-01``, or year-month-dateThours:minutes:seconds
format, i.e. ``2016-01-01T16:31:21``). When a single time is specified the request will
return data for that specific date, if it exists. If a time range is specified the result
is a list of all scenes between the specified dates conforming to the cloud coverage
criteria. Most recent acquisition being first in the list. For the latest acquisition use
``latest``. Examples: ``latest``, ``'2016-01-01'``, or ``('2016-01-01', ' 2016-01-31')``
:type time_interval: datetime.datetime, str, or tuple of datetime.datetime/str
"""
if eopatch is None:
eopatch = EOPatch()
request_params, service_type = self._prepare_request_data(eopatch, bbox, time_interval)
request = {ServiceType.WMS: WmsRequest,
ServiceType.WCS: WcsRequest}[service_type](**request_params)
request_dates = request.get_dates()
if not eopatch.timestamp:
eopatch.timestamp = request_dates
download_frames = None
if self.feature_type.is_time_dependent():
download_frames = get_common_timestamps(request_dates, eopatch.timestamp)
images = request.get_data(raise_download_errors=self.raise_download_errors, data_filter=download_frames)
if not self.raise_download_errors:
bad_data = [idx for idx, value in enumerate(images) if value is None]
for idx in reversed(bad_data):
LOGGER.warning('Data from %s could not be downloaded for %s!', str(request_dates[idx]), self.layer)
del images[idx]
del request_dates[idx]
for removed_frame in eopatch.consolidate_timestamps(request_dates):
LOGGER.warning('Removed data for frame %s from EOPatch '
'due to unavailability of %s!', str(removed_frame), self.layer)
self._add_data(eopatch, np.asarray(images))
self._add_meta_info(eopatch, request_params, service_type)
return eopatch
|
Creates OGC (WMS or WCS) request, downloads requested data and stores it together
with valid data mask in newly created EOPatch. Returns the EOPatch.
:param eopatch:
:type eopatch: EOPatch or None
:param bbox: specifies the bounding box of the requested image. Coordinates must be in
the specified coordinate reference system. Required.
:type bbox: BBox
:param time_interval: time or time range for which to return the results, in ISO8601 format
(year-month-date, for example: ``2016-01-01``, or year-month-dateThours:minutes:seconds
format, i.e. ``2016-01-01T16:31:21``). When a single time is specified the request will
return data for that specific date, if it exists. If a time range is specified the result
is a list of all scenes between the specified dates conforming to the cloud coverage
criteria. Most recent acquisition being first in the list. For the latest acquisition use
``latest``. Examples: ``latest``, ``'2016-01-01'``, or ``('2016-01-01', ' 2016-01-31')``
:type time_interval: datetime.datetime, str, or tuple of datetime.datetime/str
|
def check_base_suggested_attributes(self, dataset):
'''
Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD)
:metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD)
:references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF)
'''
suggested_ctx = TestCtx(BaseCheck.LOW, 'Suggested global attributes')
# Do any of the variables define platform ?
platform_name = getattr(dataset, 'platform', '')
suggested_ctx.assert_true(platform_name != '', 'platform should exist and point to a term in :platform_vocabulary.')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
suggested_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
# Parse dates, check for ISO 8601
for attr in ['date_modified', 'date_issued', 'date_metadata_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
suggested_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
suggested_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
units = getattr(dataset, 'geospatial_lat_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units))
units = getattr(dataset, 'geospatial_lon_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units))
contributor_name = getattr(dataset, 'contributor_name', '')
contributor_role = getattr(dataset, 'contributor_role', '')
names = contributor_role.split(',')
roles = contributor_role.split(',')
suggested_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
suggested_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
return suggested_ctx.to_result()
|
Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD)
:metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD)
:references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF)
|
def _display_token(self):
"""
Display token information or redirect to login prompt if none is
available.
"""
if self.token is None:
return "301 Moved", "", {"Location": "/login"}
return ("200 OK",
self.TOKEN_TEMPLATE.format(
access_token=self.token["access_token"]),
{"Content-Type": "text/html"})
|
Display token information or redirect to login prompt if none is
available.
|
def yahoo(base, target):
"""Parse data from Yahoo."""
api_url = 'http://download.finance.yahoo.com/d/quotes.csv'
resp = requests.get(
api_url,
params={
'e': '.csv',
'f': 'sl1d1t1',
's': '{0}{1}=X'.format(base, target)
},
timeout=1,
)
value = resp.text.split(',', 2)[1]
return decimal.Decimal(value)
|
Parse data from Yahoo.
|
def degree_circle(self,EdgeAttribute=None,network=None,NodeAttribute=None,\
nodeList=None,singlePartition=None,verbose=None):
"""
Execute the Degree Sorted Circle Layout on a network.
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(['EdgeAttribute','network','NodeAttribute','nodeList',\
'singlePartition'],[EdgeAttribute,network,NodeAttribute,nodeList,\
singlePartition])
response=api(url=self.__url+"/degree-circle", PARAMS=PARAMS, method="POST", verbose=verbose)
return response
|
Execute the Degree Sorted Circle Layout on a network.
:param EdgeAttribute (string, optional): The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param network (string, optional): Specifies a network by name, or by SUID
if the prefix SUID: is used. The keyword CURRENT, or a blank value c
an also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix
is not used, the NAME column is matched by default. A list of COLUMN
:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be
used to match multiple values.
:param singlePartition (string, optional): Don't partition graph before lay
out; boolean values only, true or false; defaults to false
|
def to_python(cls, value, **kwargs):
"""String deserialisation just return the value as a string"""
if not value:
return ''
try:
return str(value)
except:
pass
try:
return value.encode('utf-8')
except:
pass
raise cls.exception("Cannot deserialize value {0} tostring".format(value))
|
String deserialisation just return the value as a string
|
def BuildAdGroupCriterionOperations(adgroup_operations, number_of_keywords=1):
"""Builds the operations adding a Keyword Criterion to each AdGroup.
Args:
adgroup_operations: a list containing the operations that will add AdGroups.
number_of_keywords: an int defining the number of Keywords to be created.
Returns:
a list containing the operations that will create a new Keyword Criterion
associated with each provided AdGroup.
"""
criterion_operations = [
{
# The xsi_type of the operation can usually be guessed by the API
# because a given service only handles one type of operation.
# However, batch jobs process operations of different types, so
# the xsi_type must always be explicitly defined for these
# operations.
'xsi_type': 'AdGroupCriterionOperation',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': adgroup_operation['operand']['id'],
'criterion': {
'xsi_type': 'Keyword',
# Make 50% of keywords invalid to demonstrate error handling.
'text': 'mars%s%s' % (i, '!!!' if i % 2 == 0 else ''),
'matchType': 'BROAD'
}
},
'operator': 'ADD'
}
for adgroup_operation in adgroup_operations
for i in range(number_of_keywords)]
return criterion_operations
|
Builds the operations adding a Keyword Criterion to each AdGroup.
Args:
adgroup_operations: a list containing the operations that will add AdGroups.
number_of_keywords: an int defining the number of Keywords to be created.
Returns:
a list containing the operations that will create a new Keyword Criterion
associated with each provided AdGroup.
|
def get_remote_url(self, remote='origin', cached=True):
"""Get a git remote URL for this instance."""
if hasattr(self.__class__, '_remote_url') and cached:
url = self.__class__._remote_url
else:
r = self.get_remote(remote)
try:
url = list(r.urls)[0]
except GitCommandError as ex:
if 'correct access rights' in str(ex):
# If ssh is not setup to access this repository
cmd = ['git', 'config', '--get', 'remote.%s.url' % r.name]
url = Git().execute(cmd)
else:
raise ex
except AttributeError:
url = None
if url is not None and url.startswith('git@'):
domain = url.split('@')[1].split(':')[0]
path = url.split(':')[1]
url = "http://%s/%s" % (domain, path)
self.__class__._remote_url = url
return url
|
Get a git remote URL for this instance.
|
def _get_ensemble_bed_files(items):
"""
get all ensemble structural BED file calls, skipping any normal samples from
tumor/normal calls
"""
bed_files = []
for data in items:
for sv in data.get("sv", []):
if sv["variantcaller"] == "sv-ensemble":
if ("vrn_file" in sv and not vcfutils.get_paired_phenotype(data) == "normal"
and file_exists(sv["vrn_file"])):
bed_files.append(sv["vrn_file"])
return bed_files
|
get all ensemble structural BED file calls, skipping any normal samples from
tumor/normal calls
|
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
cb_ = _get_connection()
try:
jid_doc = cb_.get(six.text_type(jid))
except couchbase.exceptions.NotFoundError:
return {}
ret = {}
try:
ret = jid_doc.value['load']
ret['Minions'] = jid_doc.value['minions']
except KeyError as e:
log.error(e)
return ret
|
Return the load data that marks a specified jid
|
def vlink(s_expnum, s_ccd, s_version, s_ext,
l_expnum, l_ccd, l_version, l_ext, s_prefix=None, l_prefix=None):
"""make a link between two version of a file.
@param s_expnum:
@param s_ccd:
@param s_version:
@param s_ext:
@param l_expnum:
@param l_ccd:
@param l_version:
@param l_ext:
@param s_prefix:
@param l_prefix:
@return:
"""
source_uri = get_uri(s_expnum, ccd=s_ccd, version=s_version, ext=s_ext, prefix=s_prefix)
link_uri = get_uri(l_expnum, ccd=l_ccd, version=l_version, ext=l_ext, prefix=l_prefix)
return client.link(source_uri, link_uri)
|
make a link between two version of a file.
@param s_expnum:
@param s_ccd:
@param s_version:
@param s_ext:
@param l_expnum:
@param l_ccd:
@param l_version:
@param l_ext:
@param s_prefix:
@param l_prefix:
@return:
|
def hover_pixmap(self, value):
"""
Setter for **self.__hover_pixmap** attribute.
:param value: Attribute value.
:type value: QPixmap
"""
if value is not None:
assert type(value) is QPixmap, "'{0}' attribute: '{1}' type is not 'QPixmap'!".format(
"hover_pixmap", value)
self.__hover_pixmap = value
|
Setter for **self.__hover_pixmap** attribute.
:param value: Attribute value.
:type value: QPixmap
|
def omim_terms(case_obj):
"""Extract all OMIM phenotypes available for the case
Args:
case_obj(dict): a scout case object
Returns:
disorders(list): a list of OMIM disorder objects
"""
LOG.info("Collecting OMIM disorders for case {}".format(case_obj.get('display_name')))
disorders = []
case_disorders = case_obj.get('diagnosis_phenotypes') # array of OMIM terms
if case_disorders:
for disorder in case_disorders:
disorder_obj = {
"id" : ':'.join([ 'MIM', str(disorder)])
}
disorders.append(disorder_obj)
return disorders
|
Extract all OMIM phenotypes available for the case
Args:
case_obj(dict): a scout case object
Returns:
disorders(list): a list of OMIM disorder objects
|
def get_streaming(self, path, stype="M3U8_AUTO_480", **kwargs):
"""获得视频的m3u8列表
:param path: 视频文件路径
:param stype: 返回stream类型, 已知有``M3U8_AUTO_240``/``M3U8_AUTO_480``/``M3U8_AUTO_720``
.. warning::
M3U8_AUTO_240会有问题, 目前480P是最稳定的, 也是百度网盘默认的
:return: str 播放(列表)需要的信息
"""
params = {
'path': path,
'type': stype
}
url = 'https://{0}/rest/2.0/pcs/file'.format(BAIDUPCS_SERVER)
while True:
ret = self._request('file', 'streaming', url=url, extra_params=params, **kwargs)
if not ret.ok:
logging.debug('get_streaming ret_status_code %s' % ret.status_code)
jdata = json.loads(ret.content)
if jdata['error_code'] == 31345:
# 再试一次
continue
elif jdata['error_code'] == 31066:
# 文件不存在
return 31066
elif jdata['error_code'] == 31304:
# 文件类型不支持
return 31304
elif jdata['error_code'] == 31023:
# params error
return 31023
return ret.content
|
获得视频的m3u8列表
:param path: 视频文件路径
:param stype: 返回stream类型, 已知有``M3U8_AUTO_240``/``M3U8_AUTO_480``/``M3U8_AUTO_720``
.. warning::
M3U8_AUTO_240会有问题, 目前480P是最稳定的, 也是百度网盘默认的
:return: str 播放(列表)需要的信息
|
def create_html(self, filename=None):
"""Create a circle visual from a geojson data source"""
if isinstance(self.style, str):
style = "'{}'".format(self.style)
else:
style = self.style
options = dict(
gl_js_version=GL_JS_VERSION,
accessToken=self.access_token,
div_id=self.div_id,
style=style,
center=list(self.center),
zoom=self.zoom,
geojson_data=json.dumps(self.data, ensure_ascii=False),
belowLayer=self.below_layer,
opacity=self.opacity,
minzoom=self.min_zoom,
maxzoom=self.max_zoom,
pitch=self.pitch,
bearing=self.bearing,
boxZoomOn=json.dumps(self.box_zoom_on),
doubleClickZoomOn=json.dumps(self.double_click_zoom_on),
scrollZoomOn=json.dumps(self.scroll_zoom_on),
touchZoomOn=json.dumps(self.touch_zoom_on),
popupOpensOnHover=self.popup_open_action=='hover',
includeSnapshotLinks=self.add_snapshot_links,
preserveDrawingBuffer=json.dumps(self.add_snapshot_links),
showScale=self.scale,
scaleUnits=self.scale_unit_system,
scaleBorderColor=self.scale_border_color,
scalePosition=self.scale_position,
scaleFillColor=self.scale_background_color,
scaleTextColor=self.scale_text_color,
)
if self.legend:
if all([self.legend, self.legend_gradient, self.legend_function == 'radius']):
raise LegendError(' '.join(['Gradient legend format not compatible with a variable radius legend.',
'Please either change `legend_gradient` to False or `legend_function` to "color".']))
options.update(
showLegend=self.legend,
legendLayout=self.legend_layout,
legendFunction=self.legend_function,
legendStyle=self.legend_style, # reserve for custom CSS
legendGradient=json.dumps(self.legend_gradient),
legendFill=self.legend_fill,
legendHeaderFill=self.legend_header_fill,
legendTextColor=self.legend_text_color,
legendNumericPrecision=json.dumps(self.legend_text_numeric_precision),
legendTitleHaloColor=self.legend_title_halo_color,
legendKeyShape=self.legend_key_shape,
legendKeyBordersOn=json.dumps(self.legend_key_borders_on)
)
if self.vector_source:
options.update(
vectorUrl=self.vector_url,
vectorLayer=self.vector_layer_name,
vectorJoinDataProperty=self.vector_join_property,
joinData=json.dumps(False),
dataJoinProperty=self.data_join_property,
enableDataJoin=not self.disable_data_join
)
data = geojson_to_dict_list(self.data)
if bool(data):
options.update(joinData=json.dumps(data, ensure_ascii=False))
if self.label_property is None:
options.update(labelProperty=None)
else:
options.update(labelProperty='{' + self.label_property + '}')
options.update(
labelColor=self.label_color,
labelSize=self.label_size,
labelHaloColor=self.label_halo_color,
labelHaloWidth=self.label_halo_width
)
self.add_unique_template_variables(options)
if filename:
html = templates.format(self.template, **options)
with codecs.open(filename, "w", "utf-8-sig") as f:
f.write(html)
return None
else:
return templates.format(self.template, **options)
|
Create a circle visual from a geojson data source
|
def make_conditional(
self, request_or_environ, accept_ranges=False, complete_length=None
):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If ``None``, it will be set to
``"none"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
"""
environ = _get_environ(request_or_environ)
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if "date" not in self.headers:
self.headers["Date"] = http_date()
accept_ranges = _clean_accept_ranges(accept_ranges)
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if not is206 and not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
):
if parse_etags(environ.get("HTTP_IF_MATCH")):
self.status_code = 412
else:
self.status_code = 304
if (
self.automatically_set_content_length
and "content-length" not in self.headers
):
length = self.calculate_content_length()
if length is not None:
self.headers["Content-Length"] = length
return self
|
Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If ``None``, it will be set to
``"none"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
|
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
trimmed = None
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
trimmed = name.value == 'trimmed'
continue
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
if trimmed is None:
trimmed = self.environment.policies['ext.i18n.trimmed']
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
|
Parse a translatable tag.
|
def apply_patch(self):
"""
Fix default socket lib to handle client disconnection while receiving data (Broken pipe)
"""
if sys.version_info >= (3, 0):
# No patch for python >= 3.0
pass
else:
from .patch.socket import socket as patch
socket.socket = patch
|
Fix default socket lib to handle client disconnection while receiving data (Broken pipe)
|
def boundary_polygon(self, time):
"""
Get coordinates of object boundary in counter-clockwise order
"""
ti = np.where(time == self.times)[0][0]
com_x, com_y = self.center_of_mass(time)
# If at least one point along perimeter of the mask rectangle is unmasked, find_boundaries() works.
# But if all perimeter points are masked, find_boundaries() does not find the object.
# Therefore, pad the mask with zeroes first and run find_boundaries on the padded array.
padded_mask = np.pad(self.masks[ti], 1, 'constant', constant_values=0)
chull = convex_hull_image(padded_mask)
boundary_image = find_boundaries(chull, mode='inner', background=0)
# Now remove the padding.
boundary_image = boundary_image[1:-1,1:-1]
boundary_x = self.x[ti].ravel()[boundary_image.ravel()]
boundary_y = self.y[ti].ravel()[boundary_image.ravel()]
r = np.sqrt((boundary_x - com_x) ** 2 + (boundary_y - com_y) ** 2)
theta = np.arctan2((boundary_y - com_y), (boundary_x - com_x)) * 180.0 / np.pi + 360
polar_coords = np.array([(r[x], theta[x]) for x in range(r.size)], dtype=[('r', 'f4'), ('theta', 'f4')])
coord_order = np.argsort(polar_coords, order=['theta', 'r'])
ordered_coords = np.vstack([boundary_x[coord_order], boundary_y[coord_order]])
return ordered_coords
|
Get coordinates of object boundary in counter-clockwise order
|
def to_value_list(original_strings, corenlp_values=None):
"""Convert a list of strings to a list of Values
Args:
original_strings (list[basestring])
corenlp_values (list[basestring or None])
Returns:
list[Value]
"""
assert isinstance(original_strings, (list, tuple, set))
if corenlp_values is not None:
assert isinstance(corenlp_values, (list, tuple, set))
assert len(original_strings) == len(corenlp_values)
return list(set(to_value(x, y) for (x, y)
in zip(original_strings, corenlp_values)))
else:
return list(set(to_value(x) for x in original_strings))
|
Convert a list of strings to a list of Values
Args:
original_strings (list[basestring])
corenlp_values (list[basestring or None])
Returns:
list[Value]
|
def name(object):
"Try to find some reasonable name for the object."
return (getattr(object, 'name', 0) or getattr(object, '__name__', 0)
or getattr(getattr(object, '__class__', 0), '__name__', 0)
or str(object))
|
Try to find some reasonable name for the object.
|
def getVariable(dbg, thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = dbg.find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(dbg, frame, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
|
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
|
def copy_ecu_with_frames(ecu_or_glob, source_db, target_db):
# type: (typing.Union[cm.Ecu, str], cm.CanMatrix, cm.CanMatrix) -> None
"""
Copy ECU(s) identified by Name or as Object from source CAN matrix to target CAN matrix.
This function additionally copy all relevant Frames and Defines.
:param ecu_or_glob: Ecu instance or glob pattern for Ecu name
:param source_db: Source CAN matrix
:param target_db: Destination CAN matrix
"""
# check whether ecu_or_glob is object or symbolic name
if isinstance(ecu_or_glob, cm.Ecu):
ecu_list = [ecu_or_glob]
else:
ecu_list = source_db.glob_ecus(ecu_or_glob)
for ecu in ecu_list:
logger.info("Copying ECU " + ecu.name)
target_db.add_ecu(copy.deepcopy(ecu))
# copy tx-frames
for frame in source_db.frames:
if ecu.name in frame.transmitters:
copy_frame(frame.arbitration_id, source_db, target_db)
# copy rx-frames
for frame in source_db.frames:
for signal in frame.signals:
if ecu.name in signal.receivers:
copy_frame(frame.arbitration_id, source_db, target_db)
break
# copy all ECU defines
for attribute in ecu.attributes:
if attribute not in target_db.ecu_defines:
target_db.add_ecu_defines(
copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].definition))
target_db.add_define_default(
copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].defaultValue))
# update enum-data types if needed:
if source_db.ecu_defines[attribute].type == 'ENUM':
temp_attr = ecu.attribute(attribute, db=source_db)
if temp_attr not in target_db.ecu_defines[attribute].values:
target_db.ecu_defines[attribute].values.append(copy.deepcopy(temp_attr))
target_db.ecu_defines[attribute].update()
|
Copy ECU(s) identified by Name or as Object from source CAN matrix to target CAN matrix.
This function additionally copy all relevant Frames and Defines.
:param ecu_or_glob: Ecu instance or glob pattern for Ecu name
:param source_db: Source CAN matrix
:param target_db: Destination CAN matrix
|
def use(self, func, when='whenever'):
''' Append a middleware to the algorithm '''
#NOTE A middleware Object ?
# self.use() is usually called from initialize(), so no logger yet
print('registering middleware {}'.format(func.__name__))
self.middlewares.append({
'call': func,
'name': func.__name__,
'args': func.func_code.co_varnames,
'when': when
})
|
Append a middleware to the algorithm
|
def get_descriptor_defaults(self, api_info, hostname=None, x_google_api_name=False):
"""Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
"""
hostname = (hostname or util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
util.is_running_on_devserver()) else 'https'
base_path = api_info.base_path
if base_path != '/':
base_path = base_path.rstrip('/')
defaults = {
'swagger': '2.0',
'info': {
'version': api_info.api_version,
'title': api_info.name
},
'host': hostname,
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': [protocol],
'basePath': base_path,
}
if x_google_api_name:
defaults['x-google-api-name'] = _validate_api_name(api_info.name)
return defaults
|
Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
|
def dev_get_chunk(dev_name, state, pugrp=None, punit=None):
"""
Get a chunk-descriptor for the first chunk in the given state.
If the pugrp and punit is set, then search only that pugrp/punit
@returns the first chunk in the given state if one exists, None otherwise
"""
rprt = dev_get_rprt(dev_name, pugrp, punit)
if not rprt:
return None
return next((d for d in rprt if d["cs"] == state), None)
|
Get a chunk-descriptor for the first chunk in the given state.
If the pugrp and punit is set, then search only that pugrp/punit
@returns the first chunk in the given state if one exists, None otherwise
|
def generate(env):
"Add RPCGEN Builders and construction variables for an Environment."
client = Builder(action=rpcgen_client, suffix='_clnt.c', src_suffix='.x')
header = Builder(action=rpcgen_header, suffix='.h', src_suffix='.x')
service = Builder(action=rpcgen_service, suffix='_svc.c', src_suffix='.x')
xdr = Builder(action=rpcgen_xdr, suffix='_xdr.c', src_suffix='.x')
env.Append(BUILDERS={'RPCGenClient' : client,
'RPCGenHeader' : header,
'RPCGenService' : service,
'RPCGenXDR' : xdr})
env['RPCGEN'] = 'rpcgen'
env['RPCGENFLAGS'] = SCons.Util.CLVar('')
env['RPCGENCLIENTFLAGS'] = SCons.Util.CLVar('')
env['RPCGENHEADERFLAGS'] = SCons.Util.CLVar('')
env['RPCGENSERVICEFLAGS'] = SCons.Util.CLVar('')
env['RPCGENXDRFLAGS'] = SCons.Util.CLVar('')
|
Add RPCGEN Builders and construction variables for an Environment.
|
def _compile_int_g(self):
"""Time Domain Simulation - update algebraic equations and Jacobian"""
string = '"""\n'
# evaluate the algebraic equations g
string += 'system.dae.init_g()\n'
for gcall, call in zip(self.gcall, self.gcalls):
if gcall:
string += call
string += '\n'
string += 'system.dae.reset_small_g()\n'
# handle islands
string += self.gisland
# rebuild constant Jacobian elements if needed
string += 'if system.dae.factorize:\n'
string += ' system.dae.init_jac0()\n'
for jac0, call in zip(self.jac0, self.jac0s):
if jac0:
string += ' ' + call
string += ' system.dae.temp_to_spmatrix(\'jac0\')\n'
# evaluate Jacobians Gy
string += 'system.dae.setup_Gy()\n'
for gycall, call in zip(self.gycall, self.gycalls):
if gycall:
string += call
string += '\n'
string += self.gyisland
string += 'system.dae.temp_to_spmatrix(\'jac\')\n'
string += '"""'
self.int_g = compile(eval(string), '', 'exec')
|
Time Domain Simulation - update algebraic equations and Jacobian
|
def createFromSource(cls, vs, name, registry):
''' returns a registry component for anything that's a valid package
name (this does not guarantee that the component actually exists in
the registry: use availableVersions() for that).
'''
# we deliberately allow only lowercase, hyphen, and (unfortunately)
# numbers in package names, to reduce the possibility of confusingly
# similar names: if the name doesn't match this then escalate to make
# the user fix it. Targets also allow +
if registry == 'targets':
name_match = re.match('^[a-z]+[a-z0-9+-]*$', name)
if not name_match:
raise access_common.AccessException(
'Target name "%s" is not valid (must contain only lowercase letters, hyphen, plus, and numbers)' % name
)
else:
name_match = re.match('^[a-z]+[a-z0-9-]*$', name)
if not name_match:
raise access_common.AccessException(
'Module name "%s" is not valid (must contain only lowercase letters, hyphen, and numbers)' % name
)
assert(vs.semantic_spec)
return RegistryThing(name, vs.semantic_spec, registry)
|
returns a registry component for anything that's a valid package
name (this does not guarantee that the component actually exists in
the registry: use availableVersions() for that).
|
def _get_build_prefix():
""" Returns a safe build_prefix """
path = os.path.join(
tempfile.gettempdir(),
'pip_build_%s' % __get_username().replace(' ', '_')
)
if WINDOWS:
""" on windows(tested on 7) temp dirs are isolated """
return path
try:
os.mkdir(path)
write_delete_marker_file(path)
except OSError:
file_uid = None
try:
# raises OSError for symlinks
# https://github.com/pypa/pip/pull/935#discussion_r5307003
file_uid = get_path_uid(path)
except OSError:
file_uid = None
if file_uid != os.geteuid():
msg = (
"The temporary folder for building (%s) is either not owned by"
" you, or is a symlink." % path
)
print(msg)
print(
"pip will not work until the temporary folder is either "
"deleted or is a real directory owned by your user account."
)
raise exceptions.InstallationError(msg)
return path
|
Returns a safe build_prefix
|
def entropy_H(self, data):
"""Calculate the entropy of a chunk of data."""
if len(data) == 0:
return 0.0
occurences = array.array('L', [0]*256)
for x in data:
occurences[ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x*math.log(p_x, 2)
return entropy
|
Calculate the entropy of a chunk of data.
|
def _newproject(command, path, name, settings):
"""
Helper to create new project.
"""
key = None
title = _get_project_title()
template = _get_template(settings)
# Init repo
git = sh.git.bake(_cwd=path)
puts(git.init())
if template.get("url"):
# Create submodule
puts(git.submodule.add(template['url'], '_blueprint'))
puts(git.submodule.update(*['--init']))
# Create spreadsheet
key = _create_spreadsheet(name, title, path, settings)
# Copy html files
puts(colored.green("\nCopying html files..."))
files = glob.iglob(os.path.join(path, "_blueprint", "*.html"))
for file in files:
if os.path.isfile(file):
dir, filename = os.path.split(file)
if not filename.startswith("_") and not filename.startswith("."):
puts("Copying {0} to {1}".format(filename, path))
shutil.copy2(file, path)
ignore = os.path.join(path, "_blueprint", ".gitignore")
if os.path.isfile(ignore):
shutil.copy2(ignore, path)
else:
empty_index_path = os.path.join(path, "index.html")
open(empty_index_path, "w")
# Create config file
_copy_config_template(name, title, template, path, key, settings)
# Commit
puts(colored.green("\nInitial commit"))
puts(git.add('.'))
puts(git.commit(m='Created {0} from {1}'.format(name, template['name'])))
_install_requirements(path)
# Get site, run hook
with ensure_project(command, args, path) as site:
site.call_hook("newproject", site, git)
# Messages
puts("\nAll done! To preview your new project, type:\n")
puts("{0} {1}".format(colored.green("tarbell switch"), colored.green(name)))
puts("\nor\n")
puts("{0}".format(colored.green("cd %s" % path)))
puts("{0}".format(colored.green("tarbell serve\n")))
puts("\nYou got this!\n")
|
Helper to create new project.
|
def get_text_stream(name, encoding=None, errors='strict'):
"""Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts on Python 3
for already correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
"""
opener = text_streams.get(name)
if opener is None:
raise TypeError('Unknown standard stream %r' % name)
return opener(encoding, errors)
|
Returns a system stream for text processing. This usually returns
a wrapped stream around a binary stream returned from
:func:`get_binary_stream` but it also can take shortcuts on Python 3
for already correctly configured streams.
:param name: the name of the stream to open. Valid names are ``'stdin'``,
``'stdout'`` and ``'stderr'``
:param encoding: overrides the detected default encoding.
:param errors: overrides the default error mode.
|
def update(self, response, **kwargs):
'''
If a record matching the instance already exists in the database, update
both the column and venue column attributes, else create a new record.
'''
response_cls = super(
LocationResponseClassLegacyAccessor, self)._get_instance(**kwargs)
if response_cls:
setattr(response_cls, self.column, self.accessor(response))
setattr(
response_cls, self.venue_column, self.venue_accessor(response))
_action_and_commit(response_cls, session.add)
|
If a record matching the instance already exists in the database, update
both the column and venue column attributes, else create a new record.
|
def from_Solis(filepath, name=None, parent=None, verbose=True) -> Data:
"""Create a data object from Andor Solis software (ascii exports).
Parameters
----------
filepath : path-like
Path to .txt file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
data
New data object.
"""
# parse filepath
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
if not ".asc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
# parse name
if not name:
name = filepath.name.split(".")[0]
# create data
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
axis0 = []
arr = []
attrs = {}
while True:
line = f.readline().strip()[:-1]
if len(line) == 0:
break
else:
line = line.split(",")
line = [float(x) for x in line]
axis0.append(line.pop(0))
arr.append(line)
i = 0
while i < 3:
line = f.readline().strip()
if len(line) == 0:
i += 1
else:
try:
key, val = line.split(":", 1)
except ValueError:
pass
else:
attrs[key.strip()] = val.strip()
f.close()
created = attrs["Date and Time"] # is this UTC?
created = time.strptime(created, "%a %b %d %H:%M:%S %Y")
created = timestamp.TimeStamp(time.mktime(created)).RFC3339
kwargs = {"name": name, "kind": "Solis", "source": filestr, "created": created}
if parent is None:
data = Data(**kwargs)
else:
data = parent.create_data(**kwargs)
arr = np.array(arr)
arr /= float(attrs["Exposure Time (secs)"])
# signal has units of Hz because time normalized
arr = data.create_channel(name="signal", values=arr, signed=False, units="Hz")
axis0 = np.array(axis0)
if float(attrs["Grating Groove Density (l/mm)"]) == 0:
xname = "xindex"
xunits = None
else:
xname = "wm"
xunits = "nm"
data.create_variable(name=xname, values=axis0[:, None], units=xunits)
data.create_variable(name="yindex", values=np.arange(arr.shape[1])[None, :], units=None)
data.transform(data.variables[0].natural_name, "yindex")
for key, val in attrs.items():
data.attrs[key] = val
# finish
if verbose:
print("data created at {0}".format(data.fullpath))
print(" axes: {0}".format(data.axis_names))
print(" shape: {0}".format(data.shape))
return data
|
Create a data object from Andor Solis software (ascii exports).
Parameters
----------
filepath : path-like
Path to .txt file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
data
New data object.
|
def parse_plays_stream(self):
"""Generate and yield a stream of parsed plays. Useful for per play processing."""
lx_doc = self.html_doc()
if lx_doc is not None:
parser = PlayParser(self.game_key.season, self.game_key.game_type)
plays = lx_doc.xpath('//tr[@class = "evenColor"]')
for p in plays:
p_obj = parser.build_play(p)
self.plays.append(p_obj)
yield p_obj
|
Generate and yield a stream of parsed plays. Useful for per play processing.
|
def accpro20_summary(self, cutoff):
"""Parse the ACCpro output file and return a summary of percent exposed/buried residues based on a cutoff.
Below the cutoff = buried
Equal to or greater than cutoff = exposed
The default cutoff used in accpro is 25%.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Args:
cutoff (float): Cutoff for defining a buried or exposed residue.
Returns:
dict: Percentage of buried and exposed residues
"""
summary = {}
if cutoff < 1:
cutoff = 1 * 100
records = read_accpro20(self.out_accpro20)
for k,v in records.items():
seq_summary = {}
exposed = 0
buried = 0
for s in v:
if s > cutoff:
exposed += 1
else:
buried += 1
seq_summary['percent_exposed-accpro20'] = exposed / float(len(v))
seq_summary['percent_buried-accpro20'] = buried / float(len(v))
summary[k] = seq_summary
return summary
|
Parse the ACCpro output file and return a summary of percent exposed/buried residues based on a cutoff.
Below the cutoff = buried
Equal to or greater than cutoff = exposed
The default cutoff used in accpro is 25%.
The output file is just a FASTA formatted file, so you can get residue level
information by parsing it like a normal sequence file.
Args:
cutoff (float): Cutoff for defining a buried or exposed residue.
Returns:
dict: Percentage of buried and exposed residues
|
def embedded_preview(src_path):
''' Returns path to temporary copy of embedded QuickLook preview, if it exists '''
try:
assert(exists(src_path) and isdir(src_path))
preview_list = glob(join(src_path, '[Q|q]uicklook', '[P|p]review.*'))
assert(preview_list) # Assert there's at least one preview file
preview_path = preview_list[0] # Simplistically, assume there's only one
with NamedTemporaryFile(prefix='pyglass', suffix=extension(preview_path), delete=False) as tempfileobj:
dest_path = tempfileobj.name
shutil.copy(preview_path, dest_path)
assert(exists(dest_path))
return dest_path
except:
return None
|
Returns path to temporary copy of embedded QuickLook preview, if it exists
|
def solve(self):
"""Start (or re-start) optimisation. This method implements the
framework for the iterations of a FISTA algorithm. There is
sufficient flexibility in overriding the component methods that
it calls that it is usually not necessary to override this method
in derived clases.
If option ``Verbose`` is ``True``, the progress of the
optimisation is displayed at every iteration. At termination
of this method, attribute :attr:`itstat` is a list of tuples
representing statistics of each iteration, unless option
``FastSolve`` is ``True`` and option ``Verbose`` is ``False``.
Attribute :attr:`timer` is an instance of :class:`.util.Timer`
that provides the following labelled timers:
``init``: Time taken for object initialisation by
:meth:`__init__`
``solve``: Total time taken by call(s) to :meth:`solve`
``solve_wo_func``: Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics
``solve_wo_rsdl`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals
``solve_wo_btrack`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals and implemented ``BackTrack`` mechanism
"""
# Open status display
fmtstr, nsep = self.display_start()
# Start solve timer
self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
# Main optimisation iterations
for self.k in range(self.k, self.k + self.opt['MaxMainIter']):
# Update record of X from previous iteration
self.store_prev()
# Compute backtracking
if self.opt['BackTrack', 'Enabled'] and self.k >= 0:
self.timer.stop('solve_wo_btrack')
# Compute backtracking
self.backtracking()
self.timer.start('solve_wo_btrack')
else:
# Compute just proximal step
self.proximal_step()
# Update by combining previous iterates
self.combination_step()
# Compute residuals and stopping thresholds
self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])
if not self.opt['FastSolve']:
frcxd, adapt_tol = self.compute_residuals()
self.timer.start('solve_wo_rsdl')
# Compute and record other iteration statistics and
# display iteration stats if Verbose option enabled
self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
if not self.opt['FastSolve']:
itst = self.iteration_stats(self.k, frcxd)
self.itstat.append(itst)
self.display_status(fmtstr, itst)
self.timer.start(['solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
# Call callback function if defined
if self.opt['Callback'] is not None:
if self.opt['Callback'](self):
break
# Stop if residual-based stopping tolerances reached
if not self.opt['FastSolve']:
if frcxd < adapt_tol:
break
# Increment iteration count
self.k += 1
# Record solve time
self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',
'solve_wo_btrack'])
# Print final separator string if Verbose option enabled
self.display_end(nsep)
return self.getmin()
|
Start (or re-start) optimisation. This method implements the
framework for the iterations of a FISTA algorithm. There is
sufficient flexibility in overriding the component methods that
it calls that it is usually not necessary to override this method
in derived clases.
If option ``Verbose`` is ``True``, the progress of the
optimisation is displayed at every iteration. At termination
of this method, attribute :attr:`itstat` is a list of tuples
representing statistics of each iteration, unless option
``FastSolve`` is ``True`` and option ``Verbose`` is ``False``.
Attribute :attr:`timer` is an instance of :class:`.util.Timer`
that provides the following labelled timers:
``init``: Time taken for object initialisation by
:meth:`__init__`
``solve``: Total time taken by call(s) to :meth:`solve`
``solve_wo_func``: Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics
``solve_wo_rsdl`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals
``solve_wo_btrack`` : Total time taken by call(s) to
:meth:`solve`, excluding time taken to compute functional
value and related iteration statistics as well as time take
to compute residuals and implemented ``BackTrack`` mechanism
|
def attach_volume_to_device(self, volume_id, device_id):
"""Attaches the created Volume to a Device.
"""
try:
volume = self.manager.get_volume(volume_id)
volume.attach(device_id)
except packet.baseapi.Error as msg:
raise PacketManagerException(msg)
return volume
|
Attaches the created Volume to a Device.
|
def ctcBeamSearch(mat, classes, lm, k, beamWidth):
"""
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
"""
blankIdx = len(classes)
maxT, maxC = mat.shape
# initialise beam state
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
# go over all time-steps
for t in range(maxT):
curr = BeamState()
# get beam-labelings of best beams
bestLabelings = last.sort()[0:beamWidth]
# go over best beams
for labeling in bestLabelings:
# probability of paths ending with a non-blank
prNonBlank = 0
# in case of non-empty beam
if labeling:
# probability of paths with repeated last char at the end
try:
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
except FloatingPointError:
prNonBlank = 0
# probability of paths ending with a blank
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
# add beam at current time-step if needed
addBeam(curr, labeling)
# fill in data
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[labeling].prText # beam-labeling not changed, therefore also LM score unchanged from
curr.entries[labeling].lmApplied = True # LM already applied at previous time-step for this beam-labeling
# extend current beam-labeling
for c in range(maxC - 1):
# add new char to current beam-labeling
newLabeling = labeling + (c,)
# if new labeling contains duplicate char at the end, only consider paths ending with a blank
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[labeling].prTotal
# add beam at current time-step if needed
addBeam(curr, newLabeling)
# fill in data
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
# apply LM
applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
# set new beam state
last = curr
# normalise LM scores according to beam-labeling-length
last.norm()
# sort by probability
bestLabelings = last.sort()[:k] # get most probable labeling
output = []
for bestLabeling in bestLabelings:
# map labels to chars
res = ''
for l in bestLabeling:
res += classes[l]
output.append(res)
return output
|
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
|
def _search_dirs(self, dirs, basename, extension=""):
"""Search a list of directories for a given filename or directory name.
Iterator over the supplied directories, returning the first file
found with the supplied name and extension.
:param dirs: a list of directories
:param basename: the filename
:param extension: the file extension, for example '.conf'
:returns: the path to a matching file, or None
"""
for d in dirs:
path = os.path.join(d, '%s%s' % (basename, extension))
if os.path.exists(path):
return path
return None
|
Search a list of directories for a given filename or directory name.
Iterator over the supplied directories, returning the first file
found with the supplied name and extension.
:param dirs: a list of directories
:param basename: the filename
:param extension: the file extension, for example '.conf'
:returns: the path to a matching file, or None
|
def parse_scale(x):
"""Splits a "%s:%d" string and returns the string and number.
:return: A ``(string, int)`` pair extracted from ``x``.
:raise ValueError: the string ``x`` does not respect the input format.
"""
match = re.match(r'^(.+?):(\d+)$', x)
if not match:
raise ValueError('Invalid scale "%s".' % x)
return match.group(1), int(match.group(2))
|
Splits a "%s:%d" string and returns the string and number.
:return: A ``(string, int)`` pair extracted from ``x``.
:raise ValueError: the string ``x`` does not respect the input format.
|
def get_markdown_levels(lines, levels=set((0, 1, 2, 3, 4, 5, 6))):
r""" Return a list of 2-tuples with a level integer for the heading levels
>>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n')
[(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n')
[(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2)
[(2, 'bad')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1)
[]
"""
if isinstance(levels, (int, float, basestring, str, bytes)):
levels = [float(levels)]
levels = set([int(i) for i in levels])
if isinstance(lines, basestring):
lines = lines.splitlines()
level_lines = []
for line in lines:
level_line = None
if 0 in levels:
level_line = (0, line)
lstripped = line.lstrip()
for i in range(6, 1, -1):
if lstripped.startswith('#' * i):
level_line = (i, lstripped[i:].lstrip())
break
if level_line and level_line[0] in levels:
level_lines.append(level_line)
return level_lines
|
r""" Return a list of 2-tuples with a level integer for the heading levels
>>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n')
[(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n')
[(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2)
[(2, 'bad')]
>>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1)
[]
|
def _compute_geometric_decay_term(self, C, mag, dists):
"""
Compute and return geometric decay term in equation 3,
page 970.
"""
c1 = self.CONSTS['c1']
return (
(C['b4'] + C['b5'] * (mag - c1)) *
np.log(np.sqrt(dists.rjb ** 2.0 + C['b6'] ** 2.0))
)
|
Compute and return geometric decay term in equation 3,
page 970.
|
def calculate_ef_var(tpf, fpf):
"""
determine variance due to actives (efvar_a) decoys (efvar_d) and s2, the slope of the ROC curve tangent to the
fpf @ which the enrichment factor was calculated
:param tpf: float tpf @ which the enrichment factor was calculated
:param fpf: float fpf @ which the enrichment factor was calculated
:return efvara, efvard, s2: tuple
"""
efvara = (tpf * (1 - tpf))
efvard = (fpf * (1 - fpf))
ef = tpf / fpf
if fpf == 1:
return(0, 0, 0)
else:
s = ef * ( 1 + (np.log(ef)/np.log(fpf)))
s2 = s * s
return (efvara, efvard, s2)
|
determine variance due to actives (efvar_a) decoys (efvar_d) and s2, the slope of the ROC curve tangent to the
fpf @ which the enrichment factor was calculated
:param tpf: float tpf @ which the enrichment factor was calculated
:param fpf: float fpf @ which the enrichment factor was calculated
:return efvara, efvard, s2: tuple
|
def makeringlatticeCIJ(n, k, seed=None):
'''
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
'''
rng = get_rng(seed)
# initialize
CIJ = np.zeros((n, n))
CIJ1 = np.ones((n, n))
kk = 0
count = 0
seq = range(1, n)
seq2 = range(n - 1, 0, -1)
# fill in
while kk < k:
count += 1
dCIJ = np.triu(CIJ1, seq[count]) - np.triu(CIJ1, seq[count] + 1)
dCIJ2 = np.triu(CIJ1, seq2[count]) - np.triu(CIJ1, seq2[count] + 1)
dCIJ = dCIJ + dCIJ.T + dCIJ2 + dCIJ2.T
CIJ += dCIJ
kk = int(np.sum(CIJ))
# remove excess connections
overby = kk - k
if overby:
i, j = np.where(dCIJ)
rp = rng.permutation(np.size(i))
for ii in range(overby):
CIJ[i[rp[ii]], j[rp[ii]]] = 0
return CIJ
|
This function generates a directed lattice network with toroidal
boundary counditions (i.e. with ring-like "wrapping around").
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
The lattice is made by placing connections as close as possible
to the main diagonal, with wrapping around. No connections are made
on the main diagonal. In/Outdegree is kept approx. constant at K/N.
|
def read_records(file):
"""Eagerly read a collection of amazon Record protobuf objects from file."""
records = []
for record_data in read_recordio(file):
record = Record()
record.ParseFromString(record_data)
records.append(record)
return records
|
Eagerly read a collection of amazon Record protobuf objects from file.
|
def list_users(verbose=True, hashes=False):
'''
List user accounts
verbose : boolean
return all information
hashes : boolean
include NT HASH and LM HASH in verbose output
CLI Example:
.. code-block:: bash
salt '*' pdbedit.list
'''
users = {} if verbose else []
if verbose:
# parse detailed user data
res = __salt__['cmd.run_all'](
'pdbedit --list --verbose {hashes}'.format(hashes="--smbpasswd-style" if hashes else ""),
)
if res['retcode'] > 0:
log.error(res['stderr'] if 'stderr' in res else res['stdout'])
return users
user_data = {}
for user in res['stdout'].splitlines():
if user.startswith('-'):
if 'unix username' in user_data:
users[user_data['unix username']] = user_data
user_data = {}
elif ':' in user:
label = user[:user.index(':')].strip().lower()
data = user[(user.index(':')+1):].strip()
user_data[label] = data
if user_data:
users[user_data['unix username']] = user_data
else:
# list users
res = __salt__['cmd.run_all']('pdbedit --list')
if res['retcode'] > 0:
return {'Error': res['stderr'] if 'stderr' in res else res['stdout']}
for user in res['stdout'].splitlines():
if ':' not in user:
continue
user_data = user.split(':')
if len(user_data) >= 3:
users.append(user_data[0])
return users
|
List user accounts
verbose : boolean
return all information
hashes : boolean
include NT HASH and LM HASH in verbose output
CLI Example:
.. code-block:: bash
salt '*' pdbedit.list
|
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
|
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
|
def authenticate(devices, params, facet, check_only):
"""
Interactively authenticates a AuthenticateRequest using an attached U2F
device.
"""
for device in devices[:]:
try:
device.open()
except:
devices.remove(device)
try:
prompted = False
while devices:
removed = []
for device in devices:
try:
return u2f.authenticate(device, params, facet, check_only)
except exc.APDUError as e:
if e.code == APDU_USE_NOT_SATISFIED:
if check_only:
sys.stderr.write('\nCorrect U2F device present!\n')
sys.exit(0)
if not prompted:
sys.stderr.write('\nTouch the flashing U2F device '
'to authenticate...\n')
prompted = True
else:
removed.append(device)
except exc.DeviceError:
removed.append(device)
devices = [d for d in devices if d not in removed]
for d in removed:
d.close()
time.sleep(0.25)
finally:
for device in devices:
device.close()
sys.stderr.write('\nThe required U2F device is not present!\n')
sys.exit(1)
|
Interactively authenticates a AuthenticateRequest using an attached U2F
device.
|
def set_server_callback(self, handle):
"""
Set up on_change events for bokeh server interactions.
"""
if self.on_events:
for event in self.on_events:
handle.on_event(event, self.on_event)
if self.on_changes:
for change in self.on_changes:
if change in ['patching', 'streaming']:
# Patch and stream events do not need handling on server
continue
handle.on_change(change, self.on_change)
|
Set up on_change events for bokeh server interactions.
|
def _ExecuteTransaction(self, transaction):
"""Get connection from pool and execute transaction."""
def Action(connection):
connection.cursor.execute("START TRANSACTION")
for query in transaction:
connection.cursor.execute(query["query"], query["args"])
connection.cursor.execute("COMMIT")
return connection.cursor.fetchall()
return self._RetryWrapper(Action)
|
Get connection from pool and execute transaction.
|
def rate(self):
"""Report the insertion rate in records per second"""
end = self._end_time if self._end_time else time.time()
return self._count / (end - self._start_time)
|
Report the insertion rate in records per second
|
def adjustSize( self ):
"""
Adjusts the size of this node to support the length of its contents.
"""
cell = self.scene().cellWidth() * 2
minheight = cell
minwidth = 2 * cell
# fit to the grid size
metrics = QFontMetrics(QApplication.font())
width = metrics.width(self.displayName()) + 20
width = ((width/cell) * cell) + (cell % width)
height = self.rect().height()
# adjust for the icon
icon = self.icon()
if icon and not icon.isNull():
width += self.iconSize().width() + 2
height = max(height, self.iconSize().height() + 2)
w = max(width, minwidth)
h = max(height, minheight)
max_w = self.maximumWidth()
max_h = self.maximumHeight()
if max_w is not None:
w = min(w, max_w)
if max_h is not None:
h = min(h, max_h)
self.setMinimumWidth(w)
self.setMinimumHeight(h)
self.rebuild()
|
Adjusts the size of this node to support the length of its contents.
|
def __op(name, val, fmt=None, const=False, consume=0, produce=0):
"""
provides sensible defaults for a code, and registers it with the
__OPTABLE for lookup.
"""
name = name.lower()
# fmt can either be a str representing the struct to unpack, or a
# callable to do more complex unpacking. If it's a str, create a
# callable for it.
if isinstance(fmt, str):
fmt = partial(_unpack, compile_struct(fmt))
operand = (name, val, fmt, consume, produce, const)
assert(name not in __OPTABLE)
assert(val not in __OPTABLE)
__OPTABLE[name] = operand
__OPTABLE[val] = operand
return val
|
provides sensible defaults for a code, and registers it with the
__OPTABLE for lookup.
|
def bubble_sizes_ref(self, series):
"""
The Excel worksheet reference to the range containing the bubble
sizes for *series* (not including the column heading cell).
"""
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$C$%d:$C$%d" % (top_row, bottom_row)
|
The Excel worksheet reference to the range containing the bubble
sizes for *series* (not including the column heading cell).
|
def _do_request(self, url, params=None, data=None, headers=None):
"""
Realiza as requisições diversas utilizando a biblioteca requests,
tratando de forma genérica as exceções.
"""
if not headers:
headers = {'content-type': 'application/json'}
try:
response = requests.get(
url, params=params, data=data, headers=headers)
except:
return None
if response.status_code == 200:
return response
|
Realiza as requisições diversas utilizando a biblioteca requests,
tratando de forma genérica as exceções.
|
def pb2dict(obj):
"""
Takes a ProtoBuf Message obj and convertes it to a dict.
"""
adict = {}
if not obj.IsInitialized():
return None
for field in obj.DESCRIPTOR.fields:
if not getattr(obj, field.name):
continue
if not field.label == FD.LABEL_REPEATED:
if not field.type == FD.TYPE_MESSAGE:
adict[field.name] = getattr(obj, field.name)
else:
value = pb2dict(getattr(obj, field.name))
if value:
adict[field.name] = value
else:
if field.type == FD.TYPE_MESSAGE:
adict[field.name] = \
[pb2dict(v) for v in getattr(obj, field.name)]
else:
adict[field.name] = [v for v in getattr(obj, field.name)]
return adict
|
Takes a ProtoBuf Message obj and convertes it to a dict.
|
def get_log_entry_log_assignment_session(self, proxy):
"""Gets the session for assigning log entry to log mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogEntryLogAssignmentSession) - a
``LogEntryLogAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_entry_log_assignment()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_entry_log_assignment()`` is ``true``.*
"""
if not self.supports_log_entry_log_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.LogEntryLogAssignmentSession(proxy=proxy, runtime=self._runtime)
|
Gets the session for assigning log entry to log mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.logging.LogEntryLogAssignmentSession) - a
``LogEntryLogAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_log_entry_log_assignment()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_log_entry_log_assignment()`` is ``true``.*
|
def download(self, location, local_dir='.'):
'''Download content from bucket/prefix/location.
Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif)
If location is a directory, all files in the directory are
downloaded. If it is a file, then that file is downloaded.
Args:
location (str): S3 location within prefix.
local_dir (str): Local directory where file(s) will be stored.
Default is here.
'''
self.logger.debug('Getting S3 info')
bucket = self.info['bucket']
prefix = self.info['prefix']
self.logger.debug('Connecting to S3')
s3conn = self.client
# remove head and/or trail backslash from location
location = location.strip('/')
self.logger.debug('Downloading contents')
objects = s3conn.list_objects(Bucket=bucket, Prefix=(prefix+'/'+location))
if 'Contents' not in objects:
raise ValueError('Download target {}/{}/{} was not found or inaccessible.'.format(bucket, prefix, location))
for s3key in objects['Contents']:
key = s3key['Key']
# skip directory keys
if not key or key.endswith('/'):
continue
# get path to each file
filepath = key.replace(prefix+'/'+location, '', 1).lstrip('/')
filename = key.split('/')[-1]
#self.logger.debug(filename)
file_dir = filepath.split('/')[:-1]
file_dir = '/'.join(file_dir)
full_dir = os.path.join(local_dir, file_dir)
# make sure directory exists
if not os.path.isdir(full_dir):
os.makedirs(full_dir)
# download file
s3conn.download_file(bucket, key, os.path.join(full_dir, filename))
self.logger.debug('Done!')
|
Download content from bucket/prefix/location.
Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif)
If location is a directory, all files in the directory are
downloaded. If it is a file, then that file is downloaded.
Args:
location (str): S3 location within prefix.
local_dir (str): Local directory where file(s) will be stored.
Default is here.
|
def delete(self, request, *args, **kwargs):
"""
Processes deletion of the specified instance.
:param request: the request instance.
:rtype: django.http.HttpResponse.
"""
#noinspection PyAttributeOutsideInit
self.object = self.get_object()
success_url = self.get_success_url()
meta = getattr(self.object, '_meta')
self.object.delete()
messages.success(
request,
_(u'{0} "{1}" deleted.').format(
meta.verbose_name.lower(),
str(self.object)
)
)
return redirect(success_url)
|
Processes deletion of the specified instance.
:param request: the request instance.
:rtype: django.http.HttpResponse.
|
def get_log(self, offset, count=10, callback=None):
'''
Retrieve log records from camera.
cmd: getLog
param:
offset: log offset for first record
count: number of records to return
'''
params = {'offset': offset, 'count': count}
return self.execute_command('getLog', params, callback=callback)
|
Retrieve log records from camera.
cmd: getLog
param:
offset: log offset for first record
count: number of records to return
|
def _build_query_url(self, page = None, verbose = False):
"""
builds the url to call
"""
query = []
# # build the filters
# for afilter in self.filters.keys():
# value = self.filters[afilter]
# print"filter:%s value:%s" % (afilter,value)
# value = urlencode(value)
# query_str = u"%s=%s" % (afilter, value)
if len(self.filters) > 0:
query.append(urlencode(self.filters))
if self.sort:
query_str = u"%s=%s" % (u"sort", self.sort)
query.append(query_str)
if self.sort_by:
query_str = u"%s=%s" % (u"sort_by", self.sort_by)
query.append(query_str)
if self.per_page:
query_str = u"%s=%s" % (u"per_page", self.per_page)
query.append(query_str)
if page:
query_str = u"%s=%s" % (u"page", page)
query.append(query_str)
query = u"?%s" % (u"&".join(query))
url = u"%s%s" % (self.get_list_endpoint()['href'],query)
url = u"%s%s%s" % (self.__api__.base_url, API_BASE_PATH, url)
msg = "_build_query_url: url:%s" % url
log.debug(msg)
if verbose:
print msg
return url
|
builds the url to call
|
def reboot(self, timeout=1):
"""Reboot the device"""
namespace = System.getServiceType("reboot")
uri = self.getControlURL(namespace)
self.execute(uri, namespace, "Reboot", timeout=timeout)
|
Reboot the device
|
def select_token(request, scopes='', new=False):
"""
Presents the user with a selection of applicable tokens for the requested view.
"""
@tokens_required(scopes=scopes, new=new)
def _token_list(r, tokens):
context = {
'tokens': tokens,
'base_template': app_settings.ESI_BASE_TEMPLATE,
}
return render(r, 'esi/select_token.html', context=context)
return _token_list(request)
|
Presents the user with a selection of applicable tokens for the requested view.
|
def google_storage_url(self, sat):
"""
Returns a google storage url the contains the scene provided.
:param sat:
Expects an object created by scene_interpreter method
:type sat:
dict
:returns:
(String) The URL to a google storage file
"""
filename = sat['scene'] + '.tar.bz'
return url_builder([self.google, sat['sat'], sat['path'], sat['row'], filename])
|
Returns a google storage url the contains the scene provided.
:param sat:
Expects an object created by scene_interpreter method
:type sat:
dict
:returns:
(String) The URL to a google storage file
|
def updateUserRole(self,
user,
role):
"""
The Update User Role operation allows the administrator of an org
anization to update the role of a user within a portal.
Inputs:
role - Sets the user's role.
Roles are the following:
org_user - Ability to add items, create groups, and
share in the organization.
org_publisher - Same privileges as org_user plus the
ability to publish hosted services from ArcGIS for
Desktop and ArcGIS Online.
org_admin - In addition to add, create, share, and publish
capabilities, an org_admin administers and customizes
the organization.
Example: role=org_publisher
user - The username whose role you want to change.
"""
url = self._url + "/updateuserrole"
params = {
"f" : "json",
"user" : user,
"role" : role
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
The Update User Role operation allows the administrator of an org
anization to update the role of a user within a portal.
Inputs:
role - Sets the user's role.
Roles are the following:
org_user - Ability to add items, create groups, and
share in the organization.
org_publisher - Same privileges as org_user plus the
ability to publish hosted services from ArcGIS for
Desktop and ArcGIS Online.
org_admin - In addition to add, create, share, and publish
capabilities, an org_admin administers and customizes
the organization.
Example: role=org_publisher
user - The username whose role you want to change.
|
def _one_projector(args: Dict[str, Any], index: int) -> Union[int, np.ndarray]:
"""Returns a projector onto the |1> subspace of the index-th qubit."""
num_shard_qubits = args['num_shard_qubits']
shard_num = args['shard_num']
if index >= num_shard_qubits:
return _kth_bit(shard_num, index - num_shard_qubits)
return _zero_one_vects(args)[index]
|
Returns a projector onto the |1> subspace of the index-th qubit.
|
def rate_limits(self):
"""Returns a list of rate limit details."""
if not self._rate_limits:
self._rate_limits = utilities.get_rate_limits(self.response)
return self._rate_limits
|
Returns a list of rate limit details.
|
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
|
Read a hyperparameter `key` in the optimizer dictionary.
|
def execute_no_results(self, sock_info, generator):
"""Execute all operations, returning no results (w=0).
"""
# Cannot have both unacknowledged write and bypass document validation.
if self.bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
coll = self.collection
# If ordered is True we have to send GLE or use write
# commands so we can abort on the first error.
write_concern = WriteConcern(w=int(self.ordered))
op_id = _randint()
for run in generator:
try:
if run.op_type == _INSERT:
coll._insert(
sock_info,
run.ops,
self.ordered,
write_concern=write_concern,
op_id=op_id,
bypass_doc_val=self.bypass_doc_val)
elif run.op_type == _UPDATE:
for operation in run.ops:
doc = operation['u']
check_keys = True
if doc and next(iter(doc)).startswith('$'):
check_keys = False
coll._update(
sock_info,
operation['q'],
doc,
operation['upsert'],
check_keys,
operation['multi'],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered,
bypass_doc_val=self.bypass_doc_val)
else:
for operation in run.ops:
coll._delete(sock_info,
operation['q'],
not operation['limit'],
write_concern,
op_id,
self.ordered)
except OperationFailure:
if self.ordered:
break
|
Execute all operations, returning no results (w=0).
|
def stop_all(self):
"""
Stop all nodes
"""
pool = Pool(concurrency=3)
for node in self.nodes.values():
pool.append(node.stop)
yield from pool.join()
|
Stop all nodes
|
def array_type(data_types, field):
"""
Allows conversion of Django ArrayField to SQLAlchemy Array.
Takes care of mapping the type of the array element.
"""
from sqlalchemy.dialects import postgresql
internal_type = field.base_field.get_internal_type()
# currently no support for multi-dimensional arrays
if internal_type in data_types and internal_type != 'ArrayField':
sub_type = data_types[internal_type](field)
if not isinstance(sub_type, (list, tuple)):
sub_type = [sub_type]
else:
raise RuntimeError('Unsupported array element type')
return postgresql.ARRAY(sub_type)
|
Allows conversion of Django ArrayField to SQLAlchemy Array.
Takes care of mapping the type of the array element.
|
def asof_locs(self, where, mask):
"""
Find the locations (indices) of the labels from the index for
every entry in the `where` argument.
As in the `asof` function, if the label (a particular entry in
`where`) is not in the index, the latest index label upto the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in `where`,
-1 is returned.
`mask` is used to ignore NA values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : array-like
Array of booleans denoting where values in the original
data are not NA.
Returns
-------
numpy.ndarray
An array of locations (indices) of the labels from the Index
which correspond to the return values of the `asof` function
for every element in `where`.
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where.values < self.values[first])] = -1
return result
|
Find the locations (indices) of the labels from the index for
every entry in the `where` argument.
As in the `asof` function, if the label (a particular entry in
`where`) is not in the index, the latest index label upto the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in `where`,
-1 is returned.
`mask` is used to ignore NA values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : array-like
Array of booleans denoting where values in the original
data are not NA.
Returns
-------
numpy.ndarray
An array of locations (indices) of the labels from the Index
which correspond to the return values of the `asof` function
for every element in `where`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.