code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _set_properties(self, value):
"""Set the properties for the current object.
:type value: dict or :class:`google.cloud.storage.batch._FutureDict`
:param value: The properties to be set.
"""
self._label_removals.clear()
return super(Bucket, self)._set_properties(value) | def function[_set_properties, parameter[self, value]]:
constant[Set the properties for the current object.
:type value: dict or :class:`google.cloud.storage.batch._FutureDict`
:param value: The properties to be set.
]
call[name[self]._label_removals.clear, parameter[]]
return[call[call[name[super], parameter[name[Bucket], name[self]]]._set_properties, parameter[name[value]]]] | keyword[def] identifier[_set_properties] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[_label_removals] . identifier[clear] ()
keyword[return] identifier[super] ( identifier[Bucket] , identifier[self] ). identifier[_set_properties] ( identifier[value] ) | def _set_properties(self, value):
"""Set the properties for the current object.
:type value: dict or :class:`google.cloud.storage.batch._FutureDict`
:param value: The properties to be set.
"""
self._label_removals.clear()
return super(Bucket, self)._set_properties(value) |
def set_data(self, vol, clim=None):
""" Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
"""
# Check volume
if not isinstance(vol, np.ndarray):
raise ValueError('Volume visual needs a numpy array.')
if not ((vol.ndim == 3) or (vol.ndim == 4 and vol.shape[-1] <= 4)):
raise ValueError('Volume visual needs a 3D image.')
# Handle clim
if clim is not None:
clim = np.array(clim, float)
if not (clim.ndim == 1 and clim.size == 2):
raise ValueError('clim must be a 2-element array-like')
self._clim = tuple(clim)
if self._clim is None:
self._clim = vol.min(), vol.max()
# Apply clim
vol = np.array(vol, dtype='float32', copy=False)
if self._clim[1] == self._clim[0]:
if self._clim[0] != 0.:
vol *= 1.0 / self._clim[0]
else:
vol -= self._clim[0]
vol /= self._clim[1] - self._clim[0]
# Apply to texture
self._tex.set_data(vol) # will be efficient if vol is same shape
self.shared_program['u_shape'] = (vol.shape[2], vol.shape[1],
vol.shape[0])
shape = vol.shape[:3]
if self._vol_shape != shape:
self._vol_shape = shape
self._need_vertex_update = True
self._vol_shape = shape
# Get some stats
self._kb_for_texture = np.prod(self._vol_shape) / 1024 | def function[set_data, parameter[self, vol, clim]]:
constant[ Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
]
if <ast.UnaryOp object at 0x7da18dc9ae90> begin[:]
<ast.Raise object at 0x7da18dc99870>
if <ast.UnaryOp object at 0x7da18dc99810> begin[:]
<ast.Raise object at 0x7da18dc9a080>
if compare[name[clim] is_not constant[None]] begin[:]
variable[clim] assign[=] call[name[np].array, parameter[name[clim], name[float]]]
if <ast.UnaryOp object at 0x7da1b0ebca00> begin[:]
<ast.Raise object at 0x7da1b0ebd510>
name[self]._clim assign[=] call[name[tuple], parameter[name[clim]]]
if compare[name[self]._clim is constant[None]] begin[:]
name[self]._clim assign[=] tuple[[<ast.Call object at 0x7da1b0ebdba0>, <ast.Call object at 0x7da1b0ebccd0>]]
variable[vol] assign[=] call[name[np].array, parameter[name[vol]]]
if compare[call[name[self]._clim][constant[1]] equal[==] call[name[self]._clim][constant[0]]] begin[:]
if compare[call[name[self]._clim][constant[0]] not_equal[!=] constant[0.0]] begin[:]
<ast.AugAssign object at 0x7da1b0ebe800>
call[name[self]._tex.set_data, parameter[name[vol]]]
call[name[self].shared_program][constant[u_shape]] assign[=] tuple[[<ast.Subscript object at 0x7da1b0ebc1c0>, <ast.Subscript object at 0x7da1b0ebf520>, <ast.Subscript object at 0x7da1b0ebdae0>]]
variable[shape] assign[=] call[name[vol].shape][<ast.Slice object at 0x7da1b0ebc2e0>]
if compare[name[self]._vol_shape not_equal[!=] name[shape]] begin[:]
name[self]._vol_shape assign[=] name[shape]
name[self]._need_vertex_update assign[=] constant[True]
name[self]._vol_shape assign[=] name[shape]
name[self]._kb_for_texture assign[=] binary_operation[call[name[np].prod, parameter[name[self]._vol_shape]] / constant[1024]] | keyword[def] identifier[set_data] ( identifier[self] , identifier[vol] , identifier[clim] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[vol] , identifier[np] . identifier[ndarray] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] (( identifier[vol] . identifier[ndim] == literal[int] ) keyword[or] ( identifier[vol] . identifier[ndim] == literal[int] keyword[and] identifier[vol] . identifier[shape] [- literal[int] ]<= literal[int] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[clim] keyword[is] keyword[not] keyword[None] :
identifier[clim] = identifier[np] . identifier[array] ( identifier[clim] , identifier[float] )
keyword[if] keyword[not] ( identifier[clim] . identifier[ndim] == literal[int] keyword[and] identifier[clim] . identifier[size] == literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_clim] = identifier[tuple] ( identifier[clim] )
keyword[if] identifier[self] . identifier[_clim] keyword[is] keyword[None] :
identifier[self] . identifier[_clim] = identifier[vol] . identifier[min] (), identifier[vol] . identifier[max] ()
identifier[vol] = identifier[np] . identifier[array] ( identifier[vol] , identifier[dtype] = literal[string] , identifier[copy] = keyword[False] )
keyword[if] identifier[self] . identifier[_clim] [ literal[int] ]== identifier[self] . identifier[_clim] [ literal[int] ]:
keyword[if] identifier[self] . identifier[_clim] [ literal[int] ]!= literal[int] :
identifier[vol] *= literal[int] / identifier[self] . identifier[_clim] [ literal[int] ]
keyword[else] :
identifier[vol] -= identifier[self] . identifier[_clim] [ literal[int] ]
identifier[vol] /= identifier[self] . identifier[_clim] [ literal[int] ]- identifier[self] . identifier[_clim] [ literal[int] ]
identifier[self] . identifier[_tex] . identifier[set_data] ( identifier[vol] )
identifier[self] . identifier[shared_program] [ literal[string] ]=( identifier[vol] . identifier[shape] [ literal[int] ], identifier[vol] . identifier[shape] [ literal[int] ],
identifier[vol] . identifier[shape] [ literal[int] ])
identifier[shape] = identifier[vol] . identifier[shape] [: literal[int] ]
keyword[if] identifier[self] . identifier[_vol_shape] != identifier[shape] :
identifier[self] . identifier[_vol_shape] = identifier[shape]
identifier[self] . identifier[_need_vertex_update] = keyword[True]
identifier[self] . identifier[_vol_shape] = identifier[shape]
identifier[self] . identifier[_kb_for_texture] = identifier[np] . identifier[prod] ( identifier[self] . identifier[_vol_shape] )/ literal[int] | def set_data(self, vol, clim=None):
""" Set the volume data.
Parameters
----------
vol : ndarray
The 3D volume.
clim : tuple | None
Colormap limits to use. None will use the min and max values.
"""
# Check volume
if not isinstance(vol, np.ndarray):
raise ValueError('Volume visual needs a numpy array.') # depends on [control=['if'], data=[]]
if not (vol.ndim == 3 or (vol.ndim == 4 and vol.shape[-1] <= 4)):
raise ValueError('Volume visual needs a 3D image.') # depends on [control=['if'], data=[]]
# Handle clim
if clim is not None:
clim = np.array(clim, float)
if not (clim.ndim == 1 and clim.size == 2):
raise ValueError('clim must be a 2-element array-like') # depends on [control=['if'], data=[]]
self._clim = tuple(clim) # depends on [control=['if'], data=['clim']]
if self._clim is None:
self._clim = (vol.min(), vol.max()) # depends on [control=['if'], data=[]]
# Apply clim
vol = np.array(vol, dtype='float32', copy=False)
if self._clim[1] == self._clim[0]:
if self._clim[0] != 0.0:
vol *= 1.0 / self._clim[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
vol -= self._clim[0]
vol /= self._clim[1] - self._clim[0]
# Apply to texture
self._tex.set_data(vol) # will be efficient if vol is same shape
self.shared_program['u_shape'] = (vol.shape[2], vol.shape[1], vol.shape[0])
shape = vol.shape[:3]
if self._vol_shape != shape:
self._vol_shape = shape
self._need_vertex_update = True # depends on [control=['if'], data=['shape']]
self._vol_shape = shape
# Get some stats
self._kb_for_texture = np.prod(self._vol_shape) / 1024 |
def ImportUcsBackup(self, path=None, merge=False, dumpXml=False):
"""
Imports backUp.
This operation will upload the UCSM backup taken earlier via GUI or BackupUcs operation for all configuration, system configuration, and
logical configuration files. User can perform an import while the system is up and running.
- path specifies path of the backup file.
- merge specifies whether to merge the backup configuration with the existing UCSM configuration.
"""
from UcsBase import WriteUcsWarning, UcsUtils, ManagedObject, WriteObject, UcsUtils, UcsException, \
UcsValidationException
from Ucs import ConfigConfig
from Mos import MgmtImporter
from datetime import datetime
if (self._transactionInProgress):
raise UcsValidationException(
"UCS transaction in progress. Cannot execute ImportUcsBackup. Complete or Undo UCS transaction.")
# raise Exception("UCS transaction in progress. Cannot execute ImportUcsBackup. Complete or Undo UCS transaction.")
if not path:
raise UcsValidationException("path parameter is not provided.")
# raise Exception("Please provide path")
if not os.path.exists(path):
raise UcsValidationException("Backup File not found <%s>" % (path))
# raise Exception("Backup File not found <%s>" %(path))
dn = None
filePath = path
localFile = os.path.basename(filePath)
topSystem = ManagedObject(NamingId.TOP_SYSTEM)
mgmtImporter = ManagedObject(NamingId.MGMT_IMPORTER)
mgmtImporter.Hostname = os.environ['COMPUTERNAME'].lower() + datetime.now().strftime('%Y%m%d%H%M')
dn = UcsUtils.MakeDn([topSystem.MakeRn(), mgmtImporter.MakeRn()])
mgmtImporter.Dn = dn
mgmtImporter.Status = Status.CREATED
mgmtImporter.RemoteFile = filePath
mgmtImporter.Proto = MgmtImporter.CONST_PROTO_HTTP
mgmtImporter.AdminState = MgmtImporter.CONST_ADMIN_STATE_ENABLED
if merge:
mgmtImporter.Action = MgmtImporter.CONST_ACTION_MERGE
else:
mgmtImporter.Action = MgmtImporter.CONST_ACTION_REPLACE
inConfig = ConfigConfig()
inConfig.AddChild(mgmtImporter)
uri = "%s/operations/file-%s/importconfig.txt" % (self.Uri(), localFile)
if sys.version_info < (2, 6):
uploadFileHandle = open(filePath, 'rb')
stream = uploadFileHandle.read()
else:
progress = Progress()
stream = file_with_callback(filePath, 'rb', progress.update, filePath)
request = urllib2.Request(uri)
request.add_header('Cookie', 'ucsm-cookie=%s' % (self._cookie))
request.add_data(stream)
response = urllib2.urlopen(request).read()
if not response:
raise UcsValidationException("Unable to upload properly.")
# WriteUcsWarning("Unable to upload properly.")
ccm = self.ConfigConfMo(dn=dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml)
if (ccm.errorCode != 0):
raise UcsException(ccm.errorCode, ccm.errorDescr)
# raise Exception('[Error]: BackupUcs [Code]:' + ccm.errorCode + ' [Description]:' + ccm.errorDescr)
return ccm.OutConfig.GetChild() | def function[ImportUcsBackup, parameter[self, path, merge, dumpXml]]:
constant[
Imports backUp.
This operation will upload the UCSM backup taken earlier via GUI or BackupUcs operation for all configuration, system configuration, and
logical configuration files. User can perform an import while the system is up and running.
- path specifies path of the backup file.
- merge specifies whether to merge the backup configuration with the existing UCSM configuration.
]
from relative_module[UcsBase] import module[WriteUcsWarning], module[UcsUtils], module[ManagedObject], module[WriteObject], module[UcsUtils], module[UcsException], module[UcsValidationException]
from relative_module[Ucs] import module[ConfigConfig]
from relative_module[Mos] import module[MgmtImporter]
from relative_module[datetime] import module[datetime]
if name[self]._transactionInProgress begin[:]
<ast.Raise object at 0x7da1b25ea1a0>
if <ast.UnaryOp object at 0x7da1b25e9120> begin[:]
<ast.Raise object at 0x7da1b25eb970>
if <ast.UnaryOp object at 0x7da1b25e8a00> begin[:]
<ast.Raise object at 0x7da1b25e8340>
variable[dn] assign[=] constant[None]
variable[filePath] assign[=] name[path]
variable[localFile] assign[=] call[name[os].path.basename, parameter[name[filePath]]]
variable[topSystem] assign[=] call[name[ManagedObject], parameter[name[NamingId].TOP_SYSTEM]]
variable[mgmtImporter] assign[=] call[name[ManagedObject], parameter[name[NamingId].MGMT_IMPORTER]]
name[mgmtImporter].Hostname assign[=] binary_operation[call[call[name[os].environ][constant[COMPUTERNAME]].lower, parameter[]] + call[call[name[datetime].now, parameter[]].strftime, parameter[constant[%Y%m%d%H%M]]]]
variable[dn] assign[=] call[name[UcsUtils].MakeDn, parameter[list[[<ast.Call object at 0x7da1b25e92d0>, <ast.Call object at 0x7da1b25ee3b0>]]]]
name[mgmtImporter].Dn assign[=] name[dn]
name[mgmtImporter].Status assign[=] name[Status].CREATED
name[mgmtImporter].RemoteFile assign[=] name[filePath]
name[mgmtImporter].Proto assign[=] name[MgmtImporter].CONST_PROTO_HTTP
name[mgmtImporter].AdminState assign[=] name[MgmtImporter].CONST_ADMIN_STATE_ENABLED
if name[merge] begin[:]
name[mgmtImporter].Action assign[=] name[MgmtImporter].CONST_ACTION_MERGE
variable[inConfig] assign[=] call[name[ConfigConfig], parameter[]]
call[name[inConfig].AddChild, parameter[name[mgmtImporter]]]
variable[uri] assign[=] binary_operation[constant[%s/operations/file-%s/importconfig.txt] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b246b8e0>, <ast.Name object at 0x7da1b246b7c0>]]]
if compare[name[sys].version_info less[<] tuple[[<ast.Constant object at 0x7da1b246a7a0>, <ast.Constant object at 0x7da1b246a7d0>]]] begin[:]
variable[uploadFileHandle] assign[=] call[name[open], parameter[name[filePath], constant[rb]]]
variable[stream] assign[=] call[name[uploadFileHandle].read, parameter[]]
variable[request] assign[=] call[name[urllib2].Request, parameter[name[uri]]]
call[name[request].add_header, parameter[constant[Cookie], binary_operation[constant[ucsm-cookie=%s] <ast.Mod object at 0x7da2590d6920> name[self]._cookie]]]
call[name[request].add_data, parameter[name[stream]]]
variable[response] assign[=] call[call[name[urllib2].urlopen, parameter[name[request]]].read, parameter[]]
if <ast.UnaryOp object at 0x7da1b2468df0> begin[:]
<ast.Raise object at 0x7da1b2468d90>
variable[ccm] assign[=] call[name[self].ConfigConfMo, parameter[]]
if compare[name[ccm].errorCode not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b246afe0>
return[call[name[ccm].OutConfig.GetChild, parameter[]]] | keyword[def] identifier[ImportUcsBackup] ( identifier[self] , identifier[path] = keyword[None] , identifier[merge] = keyword[False] , identifier[dumpXml] = keyword[False] ):
literal[string]
keyword[from] identifier[UcsBase] keyword[import] identifier[WriteUcsWarning] , identifier[UcsUtils] , identifier[ManagedObject] , identifier[WriteObject] , identifier[UcsUtils] , identifier[UcsException] , identifier[UcsValidationException]
keyword[from] identifier[Ucs] keyword[import] identifier[ConfigConfig]
keyword[from] identifier[Mos] keyword[import] identifier[MgmtImporter]
keyword[from] identifier[datetime] keyword[import] identifier[datetime]
keyword[if] ( identifier[self] . identifier[_transactionInProgress] ):
keyword[raise] identifier[UcsValidationException] (
literal[string] )
keyword[if] keyword[not] identifier[path] :
keyword[raise] identifier[UcsValidationException] ( literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[raise] identifier[UcsValidationException] ( literal[string] %( identifier[path] ))
identifier[dn] = keyword[None]
identifier[filePath] = identifier[path]
identifier[localFile] = identifier[os] . identifier[path] . identifier[basename] ( identifier[filePath] )
identifier[topSystem] = identifier[ManagedObject] ( identifier[NamingId] . identifier[TOP_SYSTEM] )
identifier[mgmtImporter] = identifier[ManagedObject] ( identifier[NamingId] . identifier[MGMT_IMPORTER] )
identifier[mgmtImporter] . identifier[Hostname] = identifier[os] . identifier[environ] [ literal[string] ]. identifier[lower] ()+ identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] )
identifier[dn] = identifier[UcsUtils] . identifier[MakeDn] ([ identifier[topSystem] . identifier[MakeRn] (), identifier[mgmtImporter] . identifier[MakeRn] ()])
identifier[mgmtImporter] . identifier[Dn] = identifier[dn]
identifier[mgmtImporter] . identifier[Status] = identifier[Status] . identifier[CREATED]
identifier[mgmtImporter] . identifier[RemoteFile] = identifier[filePath]
identifier[mgmtImporter] . identifier[Proto] = identifier[MgmtImporter] . identifier[CONST_PROTO_HTTP]
identifier[mgmtImporter] . identifier[AdminState] = identifier[MgmtImporter] . identifier[CONST_ADMIN_STATE_ENABLED]
keyword[if] identifier[merge] :
identifier[mgmtImporter] . identifier[Action] = identifier[MgmtImporter] . identifier[CONST_ACTION_MERGE]
keyword[else] :
identifier[mgmtImporter] . identifier[Action] = identifier[MgmtImporter] . identifier[CONST_ACTION_REPLACE]
identifier[inConfig] = identifier[ConfigConfig] ()
identifier[inConfig] . identifier[AddChild] ( identifier[mgmtImporter] )
identifier[uri] = literal[string] %( identifier[self] . identifier[Uri] (), identifier[localFile] )
keyword[if] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] ):
identifier[uploadFileHandle] = identifier[open] ( identifier[filePath] , literal[string] )
identifier[stream] = identifier[uploadFileHandle] . identifier[read] ()
keyword[else] :
identifier[progress] = identifier[Progress] ()
identifier[stream] = identifier[file_with_callback] ( identifier[filePath] , literal[string] , identifier[progress] . identifier[update] , identifier[filePath] )
identifier[request] = identifier[urllib2] . identifier[Request] ( identifier[uri] )
identifier[request] . identifier[add_header] ( literal[string] , literal[string] %( identifier[self] . identifier[_cookie] ))
identifier[request] . identifier[add_data] ( identifier[stream] )
identifier[response] = identifier[urllib2] . identifier[urlopen] ( identifier[request] ). identifier[read] ()
keyword[if] keyword[not] identifier[response] :
keyword[raise] identifier[UcsValidationException] ( literal[string] )
identifier[ccm] = identifier[self] . identifier[ConfigConfMo] ( identifier[dn] = identifier[dn] , identifier[inConfig] = identifier[inConfig] , identifier[inHierarchical] = identifier[YesOrNo] . identifier[FALSE] , identifier[dumpXml] = identifier[dumpXml] )
keyword[if] ( identifier[ccm] . identifier[errorCode] != literal[int] ):
keyword[raise] identifier[UcsException] ( identifier[ccm] . identifier[errorCode] , identifier[ccm] . identifier[errorDescr] )
keyword[return] identifier[ccm] . identifier[OutConfig] . identifier[GetChild] () | def ImportUcsBackup(self, path=None, merge=False, dumpXml=False):
"""
Imports backUp.
This operation will upload the UCSM backup taken earlier via GUI or BackupUcs operation for all configuration, system configuration, and
logical configuration files. User can perform an import while the system is up and running.
- path specifies path of the backup file.
- merge specifies whether to merge the backup configuration with the existing UCSM configuration.
"""
from UcsBase import WriteUcsWarning, UcsUtils, ManagedObject, WriteObject, UcsUtils, UcsException, UcsValidationException
from Ucs import ConfigConfig
from Mos import MgmtImporter
from datetime import datetime
if self._transactionInProgress:
raise UcsValidationException('UCS transaction in progress. Cannot execute ImportUcsBackup. Complete or Undo UCS transaction.') # depends on [control=['if'], data=[]] # raise Exception("UCS transaction in progress. Cannot execute ImportUcsBackup. Complete or Undo UCS transaction.")
if not path:
raise UcsValidationException('path parameter is not provided.') # depends on [control=['if'], data=[]] # raise Exception("Please provide path")
if not os.path.exists(path):
raise UcsValidationException('Backup File not found <%s>' % path) # depends on [control=['if'], data=[]] # raise Exception("Backup File not found <%s>" %(path))
dn = None
filePath = path
localFile = os.path.basename(filePath)
topSystem = ManagedObject(NamingId.TOP_SYSTEM)
mgmtImporter = ManagedObject(NamingId.MGMT_IMPORTER)
mgmtImporter.Hostname = os.environ['COMPUTERNAME'].lower() + datetime.now().strftime('%Y%m%d%H%M')
dn = UcsUtils.MakeDn([topSystem.MakeRn(), mgmtImporter.MakeRn()])
mgmtImporter.Dn = dn
mgmtImporter.Status = Status.CREATED
mgmtImporter.RemoteFile = filePath
mgmtImporter.Proto = MgmtImporter.CONST_PROTO_HTTP
mgmtImporter.AdminState = MgmtImporter.CONST_ADMIN_STATE_ENABLED
if merge:
mgmtImporter.Action = MgmtImporter.CONST_ACTION_MERGE # depends on [control=['if'], data=[]]
else:
mgmtImporter.Action = MgmtImporter.CONST_ACTION_REPLACE
inConfig = ConfigConfig()
inConfig.AddChild(mgmtImporter)
uri = '%s/operations/file-%s/importconfig.txt' % (self.Uri(), localFile)
if sys.version_info < (2, 6):
uploadFileHandle = open(filePath, 'rb')
stream = uploadFileHandle.read() # depends on [control=['if'], data=[]]
else:
progress = Progress()
stream = file_with_callback(filePath, 'rb', progress.update, filePath)
request = urllib2.Request(uri)
request.add_header('Cookie', 'ucsm-cookie=%s' % self._cookie)
request.add_data(stream)
response = urllib2.urlopen(request).read()
if not response:
raise UcsValidationException('Unable to upload properly.') # depends on [control=['if'], data=[]] # WriteUcsWarning("Unable to upload properly.")
ccm = self.ConfigConfMo(dn=dn, inConfig=inConfig, inHierarchical=YesOrNo.FALSE, dumpXml=dumpXml)
if ccm.errorCode != 0:
raise UcsException(ccm.errorCode, ccm.errorDescr) # depends on [control=['if'], data=[]] # raise Exception('[Error]: BackupUcs [Code]:' + ccm.errorCode + ' [Description]:' + ccm.errorDescr)
return ccm.OutConfig.GetChild() |
def set_ard_time(self, us):
""" Set the ACK retry delay for radio communication """
# Auto Retransmit Delay:
# 0000 - Wait 250uS
# 0001 - Wait 500uS
# 0010 - Wait 750uS
# ........
# 1111 - Wait 4000uS
# Round down, to value representing a multiple of 250uS
t = int((us / 250) - 1)
if (t < 0):
t = 0
if (t > 0xF):
t = 0xF
_send_vendor_setup(self.handle, SET_RADIO_ARD, t, 0, ()) | def function[set_ard_time, parameter[self, us]]:
constant[ Set the ACK retry delay for radio communication ]
variable[t] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[us] / constant[250]] - constant[1]]]]
if compare[name[t] less[<] constant[0]] begin[:]
variable[t] assign[=] constant[0]
if compare[name[t] greater[>] constant[15]] begin[:]
variable[t] assign[=] constant[15]
call[name[_send_vendor_setup], parameter[name[self].handle, name[SET_RADIO_ARD], name[t], constant[0], tuple[[]]]] | keyword[def] identifier[set_ard_time] ( identifier[self] , identifier[us] ):
literal[string]
identifier[t] = identifier[int] (( identifier[us] / literal[int] )- literal[int] )
keyword[if] ( identifier[t] < literal[int] ):
identifier[t] = literal[int]
keyword[if] ( identifier[t] > literal[int] ):
identifier[t] = literal[int]
identifier[_send_vendor_setup] ( identifier[self] . identifier[handle] , identifier[SET_RADIO_ARD] , identifier[t] , literal[int] ,()) | def set_ard_time(self, us):
""" Set the ACK retry delay for radio communication """
# Auto Retransmit Delay:
# 0000 - Wait 250uS
# 0001 - Wait 500uS
# 0010 - Wait 750uS
# ........
# 1111 - Wait 4000uS
# Round down, to value representing a multiple of 250uS
t = int(us / 250 - 1)
if t < 0:
t = 0 # depends on [control=['if'], data=['t']]
if t > 15:
t = 15 # depends on [control=['if'], data=['t']]
_send_vendor_setup(self.handle, SET_RADIO_ARD, t, 0, ()) |
def _get_projection(self):
"""Get projection from the NetCDF4 attributes"""
try:
proj_str = self.nc.attrs['gdal_projection']
except TypeError:
proj_str = self.nc.attrs['gdal_projection'].decode()
# Check the a/b/h units
radius_a = proj_str.split('+a=')[-1].split()[0]
if float(radius_a) > 10e3:
units = 'm'
scale = 1.0
else:
units = 'km'
scale = 1e3
if 'units' not in proj_str:
proj_str = proj_str + ' +units=' + units
area_extent = (float(self.nc.attrs['gdal_xgeo_up_left']) / scale,
float(self.nc.attrs['gdal_ygeo_low_right']) / scale,
float(self.nc.attrs['gdal_xgeo_low_right']) / scale,
float(self.nc.attrs['gdal_ygeo_up_left']) / scale)
return proj_str, area_extent | def function[_get_projection, parameter[self]]:
constant[Get projection from the NetCDF4 attributes]
<ast.Try object at 0x7da1b22a7b50>
variable[radius_a] assign[=] call[call[call[call[name[proj_str].split, parameter[constant[+a=]]]][<ast.UnaryOp object at 0x7da1b22a4820>].split, parameter[]]][constant[0]]
if compare[call[name[float], parameter[name[radius_a]]] greater[>] constant[10000.0]] begin[:]
variable[units] assign[=] constant[m]
variable[scale] assign[=] constant[1.0]
if compare[constant[units] <ast.NotIn object at 0x7da2590d7190> name[proj_str]] begin[:]
variable[proj_str] assign[=] binary_operation[binary_operation[name[proj_str] + constant[ +units=]] + name[units]]
variable[area_extent] assign[=] tuple[[<ast.BinOp object at 0x7da1b22a7d90>, <ast.BinOp object at 0x7da1b2254b50>, <ast.BinOp object at 0x7da1b22545e0>, <ast.BinOp object at 0x7da1b2256350>]]
return[tuple[[<ast.Name object at 0x7da1b2254280>, <ast.Name object at 0x7da1b2257040>]]] | keyword[def] identifier[_get_projection] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[proj_str] = identifier[self] . identifier[nc] . identifier[attrs] [ literal[string] ]
keyword[except] identifier[TypeError] :
identifier[proj_str] = identifier[self] . identifier[nc] . identifier[attrs] [ literal[string] ]. identifier[decode] ()
identifier[radius_a] = identifier[proj_str] . identifier[split] ( literal[string] )[- literal[int] ]. identifier[split] ()[ literal[int] ]
keyword[if] identifier[float] ( identifier[radius_a] )> literal[int] :
identifier[units] = literal[string]
identifier[scale] = literal[int]
keyword[else] :
identifier[units] = literal[string]
identifier[scale] = literal[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[proj_str] :
identifier[proj_str] = identifier[proj_str] + literal[string] + identifier[units]
identifier[area_extent] =( identifier[float] ( identifier[self] . identifier[nc] . identifier[attrs] [ literal[string] ])/ identifier[scale] ,
identifier[float] ( identifier[self] . identifier[nc] . identifier[attrs] [ literal[string] ])/ identifier[scale] ,
identifier[float] ( identifier[self] . identifier[nc] . identifier[attrs] [ literal[string] ])/ identifier[scale] ,
identifier[float] ( identifier[self] . identifier[nc] . identifier[attrs] [ literal[string] ])/ identifier[scale] )
keyword[return] identifier[proj_str] , identifier[area_extent] | def _get_projection(self):
"""Get projection from the NetCDF4 attributes"""
try:
proj_str = self.nc.attrs['gdal_projection'] # depends on [control=['try'], data=[]]
except TypeError:
proj_str = self.nc.attrs['gdal_projection'].decode() # depends on [control=['except'], data=[]]
# Check the a/b/h units
radius_a = proj_str.split('+a=')[-1].split()[0]
if float(radius_a) > 10000.0:
units = 'm'
scale = 1.0 # depends on [control=['if'], data=[]]
else:
units = 'km'
scale = 1000.0
if 'units' not in proj_str:
proj_str = proj_str + ' +units=' + units # depends on [control=['if'], data=['proj_str']]
area_extent = (float(self.nc.attrs['gdal_xgeo_up_left']) / scale, float(self.nc.attrs['gdal_ygeo_low_right']) / scale, float(self.nc.attrs['gdal_xgeo_low_right']) / scale, float(self.nc.attrs['gdal_ygeo_up_left']) / scale)
return (proj_str, area_extent) |
def _check_scan_dir(self, fs, path, info, depth):
# type: (FS, Text, Info, int) -> bool
"""Check if a directory contents should be scanned."""
if self.max_depth is not None and depth >= self.max_depth:
return False
return self.check_scan_dir(fs, path, info) | def function[_check_scan_dir, parameter[self, fs, path, info, depth]]:
constant[Check if a directory contents should be scanned.]
if <ast.BoolOp object at 0x7da1b16098d0> begin[:]
return[constant[False]]
return[call[name[self].check_scan_dir, parameter[name[fs], name[path], name[info]]]] | keyword[def] identifier[_check_scan_dir] ( identifier[self] , identifier[fs] , identifier[path] , identifier[info] , identifier[depth] ):
literal[string]
keyword[if] identifier[self] . identifier[max_depth] keyword[is] keyword[not] keyword[None] keyword[and] identifier[depth] >= identifier[self] . identifier[max_depth] :
keyword[return] keyword[False]
keyword[return] identifier[self] . identifier[check_scan_dir] ( identifier[fs] , identifier[path] , identifier[info] ) | def _check_scan_dir(self, fs, path, info, depth):
# type: (FS, Text, Info, int) -> bool
'Check if a directory contents should be scanned.'
if self.max_depth is not None and depth >= self.max_depth:
return False # depends on [control=['if'], data=[]]
return self.check_scan_dir(fs, path, info) |
def is_collection_aligned(self, data_collection):
"""Check if this Data Collection is aligned with another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collection: The Data Collection which you want to test if this
collection is aligned with.
Return:
True if collections are aligned, False if not aligned
"""
if self._collection_type != data_collection._collection_type:
return False
elif len(self.values) != len(data_collection.values):
return False
elif self.datetimes != data_collection.datetimes:
return False
else:
return True | def function[is_collection_aligned, parameter[self, data_collection]]:
constant[Check if this Data Collection is aligned with another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collection: The Data Collection which you want to test if this
collection is aligned with.
Return:
True if collections are aligned, False if not aligned
]
if compare[name[self]._collection_type not_equal[!=] name[data_collection]._collection_type] begin[:]
return[constant[False]] | keyword[def] identifier[is_collection_aligned] ( identifier[self] , identifier[data_collection] ):
literal[string]
keyword[if] identifier[self] . identifier[_collection_type] != identifier[data_collection] . identifier[_collection_type] :
keyword[return] keyword[False]
keyword[elif] identifier[len] ( identifier[self] . identifier[values] )!= identifier[len] ( identifier[data_collection] . identifier[values] ):
keyword[return] keyword[False]
keyword[elif] identifier[self] . identifier[datetimes] != identifier[data_collection] . identifier[datetimes] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True] | def is_collection_aligned(self, data_collection):
"""Check if this Data Collection is aligned with another.
Aligned Data Collections are of the same Data Collection class, have the
same number of values and have matching datetimes.
Args:
data_collection: The Data Collection which you want to test if this
collection is aligned with.
Return:
True if collections are aligned, False if not aligned
"""
if self._collection_type != data_collection._collection_type:
return False # depends on [control=['if'], data=[]]
elif len(self.values) != len(data_collection.values):
return False # depends on [control=['if'], data=[]]
elif self.datetimes != data_collection.datetimes:
return False # depends on [control=['if'], data=[]]
else:
return True |
def process_dynamic_completion(self, completion):
""" how to validate and generate completion for dynamic params """
if len(completion.split()) > 1:
completion = '\"' + completion + '\"'
if self.validate_completion(completion):
yield Completion(completion, -len(self.unfinished_word)) | def function[process_dynamic_completion, parameter[self, completion]]:
constant[ how to validate and generate completion for dynamic params ]
if compare[call[name[len], parameter[call[name[completion].split, parameter[]]]] greater[>] constant[1]] begin[:]
variable[completion] assign[=] binary_operation[binary_operation[constant["] + name[completion]] + constant["]]
if call[name[self].validate_completion, parameter[name[completion]]] begin[:]
<ast.Yield object at 0x7da1b22adf60> | keyword[def] identifier[process_dynamic_completion] ( identifier[self] , identifier[completion] ):
literal[string]
keyword[if] identifier[len] ( identifier[completion] . identifier[split] ())> literal[int] :
identifier[completion] = literal[string] + identifier[completion] + literal[string]
keyword[if] identifier[self] . identifier[validate_completion] ( identifier[completion] ):
keyword[yield] identifier[Completion] ( identifier[completion] ,- identifier[len] ( identifier[self] . identifier[unfinished_word] )) | def process_dynamic_completion(self, completion):
""" how to validate and generate completion for dynamic params """
if len(completion.split()) > 1:
completion = '"' + completion + '"' # depends on [control=['if'], data=[]]
if self.validate_completion(completion):
yield Completion(completion, -len(self.unfinished_word)) # depends on [control=['if'], data=[]] |
def match(self, props=None, rng=None, offset=None):
"""
Provide any of the args and match or dont.
:param props: Should be a subset of my props.
:param rng: Exactly match my range.
:param offset: I start after this offset.
:returns: True if all the provided predicates match or are None
"""
if rng:
s, e = rng
else:
e = s = None
return ((e is None or self.end == e) and
(s is None or self.start == s)) and \
(props is None or props.issubset(self.props)) and \
(offset is None or self.start >= offset) | def function[match, parameter[self, props, rng, offset]]:
constant[
Provide any of the args and match or dont.
:param props: Should be a subset of my props.
:param rng: Exactly match my range.
:param offset: I start after this offset.
:returns: True if all the provided predicates match or are None
]
if name[rng] begin[:]
<ast.Tuple object at 0x7da1b14d2140> assign[=] name[rng]
return[<ast.BoolOp object at 0x7da2054a5510>] | keyword[def] identifier[match] ( identifier[self] , identifier[props] = keyword[None] , identifier[rng] = keyword[None] , identifier[offset] = keyword[None] ):
literal[string]
keyword[if] identifier[rng] :
identifier[s] , identifier[e] = identifier[rng]
keyword[else] :
identifier[e] = identifier[s] = keyword[None]
keyword[return] (( identifier[e] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[end] == identifier[e] ) keyword[and]
( identifier[s] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[start] == identifier[s] )) keyword[and] ( identifier[props] keyword[is] keyword[None] keyword[or] identifier[props] . identifier[issubset] ( identifier[self] . identifier[props] )) keyword[and] ( identifier[offset] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[start] >= identifier[offset] ) | def match(self, props=None, rng=None, offset=None):
"""
Provide any of the args and match or dont.
:param props: Should be a subset of my props.
:param rng: Exactly match my range.
:param offset: I start after this offset.
:returns: True if all the provided predicates match or are None
"""
if rng:
(s, e) = rng # depends on [control=['if'], data=[]]
else:
e = s = None
return ((e is None or self.end == e) and (s is None or self.start == s)) and (props is None or props.issubset(self.props)) and (offset is None or self.start >= offset) |
def get_neighbor_sentence_ngrams(
mention, d=1, attrib="words", n_min=1, n_max=1, lower=True
):
"""Get the ngrams that are in the neighoring Sentences of the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention whose neighbor Sentences are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in chain.from_iterable(
[
tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
)
for sentence in span.sentence.document.sentences
if abs(sentence.position - span.sentence.position) <= d
and sentence != span.sentence
]
):
yield ngram | def function[get_neighbor_sentence_ngrams, parameter[mention, d, attrib, n_min, n_max, lower]]:
constant[Get the ngrams that are in the neighoring Sentences of the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention whose neighbor Sentences are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
]
variable[spans] assign[=] call[name[_to_spans], parameter[name[mention]]]
for taget[name[span]] in starred[name[spans]] begin[:]
for taget[name[ngram]] in starred[call[name[chain].from_iterable, parameter[<ast.ListComp object at 0x7da1b26afa90>]]] begin[:]
<ast.Yield object at 0x7da204623c70> | keyword[def] identifier[get_neighbor_sentence_ngrams] (
identifier[mention] , identifier[d] = literal[int] , identifier[attrib] = literal[string] , identifier[n_min] = literal[int] , identifier[n_max] = literal[int] , identifier[lower] = keyword[True]
):
literal[string]
identifier[spans] = identifier[_to_spans] ( identifier[mention] )
keyword[for] identifier[span] keyword[in] identifier[spans] :
keyword[for] identifier[ngram] keyword[in] identifier[chain] . identifier[from_iterable] (
[
identifier[tokens_to_ngrams] (
identifier[getattr] ( identifier[sentence] , identifier[attrib] ), identifier[n_min] = identifier[n_min] , identifier[n_max] = identifier[n_max] , identifier[lower] = identifier[lower]
)
keyword[for] identifier[sentence] keyword[in] identifier[span] . identifier[sentence] . identifier[document] . identifier[sentences]
keyword[if] identifier[abs] ( identifier[sentence] . identifier[position] - identifier[span] . identifier[sentence] . identifier[position] )<= identifier[d]
keyword[and] identifier[sentence] != identifier[span] . identifier[sentence]
]
):
keyword[yield] identifier[ngram] | def get_neighbor_sentence_ngrams(mention, d=1, attrib='words', n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the neighoring Sentences of the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention whose neighbor Sentences are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in chain.from_iterable([tokens_to_ngrams(getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower) for sentence in span.sentence.document.sentences if abs(sentence.position - span.sentence.position) <= d and sentence != span.sentence]):
yield ngram # depends on [control=['for'], data=['ngram']] # depends on [control=['for'], data=['span']] |
def set_present(name, set_type, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Verify the set exists.
name
A user-defined set name.
set_type
The type for the set.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
set_check = __salt__['ipset.check_set'](name)
if set_check is True:
ret['result'] = True
ret['comment'] = ('ipset set {0} already exists for {1}'
.format(name, family))
return ret
if __opts__['test']:
ret['comment'] = 'ipset set {0} would be added for {1}'.format(
name,
family)
return ret
command = __salt__['ipset.new_set'](name, set_type, family, **kwargs)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('ipset set {0} created successfully for {1}'
.format(name, family))
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to create set {0} for {2}: {1}'.format(
name,
command.strip(),
family
)
return ret | def function[set_present, parameter[name, set_type, family]]:
constant[
.. versionadded:: 2014.7.0
Verify the set exists.
name
A user-defined set name.
set_type
The type for the set.
family
Networking family, either ipv4 or ipv6
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b20445e0>, <ast.Constant object at 0x7da1b2044b20>, <ast.Constant object at 0x7da1b2047f70>, <ast.Constant object at 0x7da1b2044190>], [<ast.Name object at 0x7da1b2044b80>, <ast.Dict object at 0x7da1b2045f30>, <ast.Constant object at 0x7da1b2046bc0>, <ast.Constant object at 0x7da1b2047820>]]
variable[set_check] assign[=] call[call[name[__salt__]][constant[ipset.check_set]], parameter[name[name]]]
if compare[name[set_check] is constant[True]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[ipset set {0} already exists for {1}].format, parameter[name[name], name[family]]]
return[name[ret]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[ipset set {0} would be added for {1}].format, parameter[name[name], name[family]]]
return[name[ret]]
variable[command] assign[=] call[call[name[__salt__]][constant[ipset.new_set]], parameter[name[name], name[set_type], name[family]]]
if compare[name[command] is constant[True]] begin[:]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da207f99b70>], [<ast.Name object at 0x7da207f9a710>]]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[ipset set {0} created successfully for {1}].format, parameter[name[name], name[family]]]
return[name[ret]] | keyword[def] identifier[set_present] ( identifier[name] , identifier[set_type] , identifier[family] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[None] ,
literal[string] : literal[string] }
identifier[set_check] = identifier[__salt__] [ literal[string] ]( identifier[name] )
keyword[if] identifier[set_check] keyword[is] keyword[True] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]=( literal[string]
. identifier[format] ( identifier[name] , identifier[family] ))
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[name] ,
identifier[family] )
keyword[return] identifier[ret]
identifier[command] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[set_type] , identifier[family] ,** identifier[kwargs] )
keyword[if] identifier[command] keyword[is] keyword[True] :
identifier[ret] [ literal[string] ]={ literal[string] : identifier[name] }
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]=( literal[string]
. identifier[format] ( identifier[name] , identifier[family] ))
keyword[return] identifier[ret]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] (
identifier[name] ,
identifier[command] . identifier[strip] (),
identifier[family]
)
keyword[return] identifier[ret] | def set_present(name, set_type, family='ipv4', **kwargs):
"""
.. versionadded:: 2014.7.0
Verify the set exists.
name
A user-defined set name.
set_type
The type for the set.
family
Networking family, either ipv4 or ipv6
"""
ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''}
set_check = __salt__['ipset.check_set'](name)
if set_check is True:
ret['result'] = True
ret['comment'] = 'ipset set {0} already exists for {1}'.format(name, family)
return ret # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['comment'] = 'ipset set {0} would be added for {1}'.format(name, family)
return ret # depends on [control=['if'], data=[]]
command = __salt__['ipset.new_set'](name, set_type, family, **kwargs)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'ipset set {0} created successfully for {1}'.format(name, family)
return ret # depends on [control=['if'], data=[]]
else:
ret['result'] = False
ret['comment'] = 'Failed to create set {0} for {2}: {1}'.format(name, command.strip(), family)
return ret |
def from_dict(data, ctx):
"""
Instantiate a new PositionSide from a dict (generally from loading a
JSON response). The data used to instantiate the PositionSide is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('units') is not None:
data['units'] = ctx.convert_decimal_number(
data.get('units')
)
if data.get('averagePrice') is not None:
data['averagePrice'] = ctx.convert_decimal_number(
data.get('averagePrice')
)
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(
data.get('pl')
)
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('resettablePL') is not None:
data['resettablePL'] = ctx.convert_decimal_number(
data.get('resettablePL')
)
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(
data.get('financing')
)
if data.get('guaranteedExecutionFees') is not None:
data['guaranteedExecutionFees'] = ctx.convert_decimal_number(
data.get('guaranteedExecutionFees')
)
return PositionSide(**data) | def function[from_dict, parameter[data, ctx]]:
constant[
Instantiate a new PositionSide from a dict (generally from loading a
JSON response). The data used to instantiate the PositionSide is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
]
variable[data] assign[=] call[name[data].copy, parameter[]]
if compare[call[name[data].get, parameter[constant[units]]] is_not constant[None]] begin[:]
call[name[data]][constant[units]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[units]]]]]
if compare[call[name[data].get, parameter[constant[averagePrice]]] is_not constant[None]] begin[:]
call[name[data]][constant[averagePrice]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[averagePrice]]]]]
if compare[call[name[data].get, parameter[constant[pl]]] is_not constant[None]] begin[:]
call[name[data]][constant[pl]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[pl]]]]]
if compare[call[name[data].get, parameter[constant[unrealizedPL]]] is_not constant[None]] begin[:]
call[name[data]][constant[unrealizedPL]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[unrealizedPL]]]]]
if compare[call[name[data].get, parameter[constant[resettablePL]]] is_not constant[None]] begin[:]
call[name[data]][constant[resettablePL]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[resettablePL]]]]]
if compare[call[name[data].get, parameter[constant[financing]]] is_not constant[None]] begin[:]
call[name[data]][constant[financing]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[financing]]]]]
if compare[call[name[data].get, parameter[constant[guaranteedExecutionFees]]] is_not constant[None]] begin[:]
call[name[data]][constant[guaranteedExecutionFees]] assign[=] call[name[ctx].convert_decimal_number, parameter[call[name[data].get, parameter[constant[guaranteedExecutionFees]]]]]
return[call[name[PositionSide], parameter[]]] | keyword[def] identifier[from_dict] ( identifier[data] , identifier[ctx] ):
literal[string]
identifier[data] = identifier[data] . identifier[copy] ()
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[if] identifier[data] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[ctx] . identifier[convert_decimal_number] (
identifier[data] . identifier[get] ( literal[string] )
)
keyword[return] identifier[PositionSide] (** identifier[data] ) | def from_dict(data, ctx):
"""
Instantiate a new PositionSide from a dict (generally from loading a
JSON response). The data used to instantiate the PositionSide is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately.
"""
data = data.copy()
if data.get('units') is not None:
data['units'] = ctx.convert_decimal_number(data.get('units')) # depends on [control=['if'], data=[]]
if data.get('averagePrice') is not None:
data['averagePrice'] = ctx.convert_decimal_number(data.get('averagePrice')) # depends on [control=['if'], data=[]]
if data.get('pl') is not None:
data['pl'] = ctx.convert_decimal_number(data.get('pl')) # depends on [control=['if'], data=[]]
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(data.get('unrealizedPL')) # depends on [control=['if'], data=[]]
if data.get('resettablePL') is not None:
data['resettablePL'] = ctx.convert_decimal_number(data.get('resettablePL')) # depends on [control=['if'], data=[]]
if data.get('financing') is not None:
data['financing'] = ctx.convert_decimal_number(data.get('financing')) # depends on [control=['if'], data=[]]
if data.get('guaranteedExecutionFees') is not None:
data['guaranteedExecutionFees'] = ctx.convert_decimal_number(data.get('guaranteedExecutionFees')) # depends on [control=['if'], data=[]]
return PositionSide(**data) |
def mget(self, *keys):
""" -> #list of values at the specified @keys """
return list(map(
self._loads, self._client.hmget(self.key_prefix, *keys))) | def function[mget, parameter[self]]:
constant[ -> #list of values at the specified @keys ]
return[call[name[list], parameter[call[name[map], parameter[name[self]._loads, call[name[self]._client.hmget, parameter[name[self].key_prefix, <ast.Starred object at 0x7da1b28f4b20>]]]]]]] | keyword[def] identifier[mget] ( identifier[self] ,* identifier[keys] ):
literal[string]
keyword[return] identifier[list] ( identifier[map] (
identifier[self] . identifier[_loads] , identifier[self] . identifier[_client] . identifier[hmget] ( identifier[self] . identifier[key_prefix] ,* identifier[keys] ))) | def mget(self, *keys):
""" -> #list of values at the specified @keys """
return list(map(self._loads, self._client.hmget(self.key_prefix, *keys))) |
def batch_norm(attrs, inputs, proto_obj):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps',
'is_test': 'fix_gamma'})
new_attrs = translation_utils._remove_attributes(new_attrs,
['spatial', 'consumed_inputs'])
# Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5)
cudnn_min_eps = 1e-5
cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off})
# in test mode "fix_gamma" should be unset.
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return 'BatchNorm', new_attrs, inputs | def function[batch_norm, parameter[attrs, inputs, proto_obj]]:
constant[Batch normalization.]
variable[new_attrs] assign[=] call[name[translation_utils]._fix_attribute_names, parameter[name[attrs], dictionary[[<ast.Constant object at 0x7da1b202b790>, <ast.Constant object at 0x7da1b2029780>], [<ast.Constant object at 0x7da1b202b9d0>, <ast.Constant object at 0x7da1b202bdc0>]]]]
variable[new_attrs] assign[=] call[name[translation_utils]._remove_attributes, parameter[name[new_attrs], list[[<ast.Constant object at 0x7da1b202b7c0>, <ast.Constant object at 0x7da1b2029ff0>]]]]
variable[cudnn_min_eps] assign[=] constant[1e-05]
variable[cudnn_off] assign[=] <ast.IfExp object at 0x7da1b202bf40>
variable[new_attrs] assign[=] call[name[translation_utils]._add_extra_attributes, parameter[name[new_attrs], dictionary[[<ast.Constant object at 0x7da1b202b9a0>], [<ast.Name object at 0x7da1b2029b40>]]]]
call[name[new_attrs]][constant[fix_gamma]] assign[=] <ast.UnaryOp object at 0x7da1b20293c0>
return[tuple[[<ast.Constant object at 0x7da1b2064730>, <ast.Name object at 0x7da1b2067190>, <ast.Name object at 0x7da1b2064460>]]] | keyword[def] identifier[batch_norm] ( identifier[attrs] , identifier[inputs] , identifier[proto_obj] ):
literal[string]
identifier[new_attrs] = identifier[translation_utils] . identifier[_fix_attribute_names] ( identifier[attrs] ,{ literal[string] : literal[string] ,
literal[string] : literal[string] })
identifier[new_attrs] = identifier[translation_utils] . identifier[_remove_attributes] ( identifier[new_attrs] ,
[ literal[string] , literal[string] ])
identifier[cudnn_min_eps] = literal[int]
identifier[cudnn_off] = literal[int] keyword[if] identifier[attrs] . identifier[get] ( literal[string] , identifier[cudnn_min_eps] )>= identifier[cudnn_min_eps] keyword[else] literal[int]
identifier[new_attrs] = identifier[translation_utils] . identifier[_add_extra_attributes] ( identifier[new_attrs] ,{ literal[string] : identifier[cudnn_off] })
identifier[new_attrs] [ literal[string] ]= keyword[not] identifier[attrs] . identifier[get] ( literal[string] , literal[int] )
keyword[return] literal[string] , identifier[new_attrs] , identifier[inputs] | def batch_norm(attrs, inputs, proto_obj):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps', 'is_test': 'fix_gamma'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['spatial', 'consumed_inputs'])
# Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5)
cudnn_min_eps = 1e-05
cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off})
# in test mode "fix_gamma" should be unset.
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return ('BatchNorm', new_attrs, inputs) |
def get_model_details(self, model_name):
"""Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = ('projects/%s/models/%s' % (self._project_id, model_name))
return self._api.projects().models().get(name=full_name).execute() | def function[get_model_details, parameter[self, model_name]]:
constant[Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
]
variable[full_name] assign[=] name[model_name]
if <ast.UnaryOp object at 0x7da1b1120850> begin[:]
variable[full_name] assign[=] binary_operation[constant[projects/%s/models/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1122260>, <ast.Name object at 0x7da1b1120820>]]]
return[call[call[call[call[name[self]._api.projects, parameter[]].models, parameter[]].get, parameter[]].execute, parameter[]]] | keyword[def] identifier[get_model_details] ( identifier[self] , identifier[model_name] ):
literal[string]
identifier[full_name] = identifier[model_name]
keyword[if] keyword[not] identifier[model_name] . identifier[startswith] ( literal[string] ):
identifier[full_name] =( literal[string] %( identifier[self] . identifier[_project_id] , identifier[model_name] ))
keyword[return] identifier[self] . identifier[_api] . identifier[projects] (). identifier[models] (). identifier[get] ( identifier[name] = identifier[full_name] ). identifier[execute] () | def get_model_details(self, model_name):
"""Get details of the specified model from CloudML Service.
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
Returns: a dictionary of the model details.
"""
full_name = model_name
if not model_name.startswith('projects/'):
full_name = 'projects/%s/models/%s' % (self._project_id, model_name) # depends on [control=['if'], data=[]]
return self._api.projects().models().get(name=full_name).execute() |
def listlist_and_matrix_to_listdict(graph, weight=None):
"""Transforms the weighted adjacency list representation of a graph
of type listlist + optional weight matrix
into the listdict representation
:param graph: in listlist representation
:param weight: optional weight matrix
:returns: graph in listdict representation
:complexity: linear
"""
if weight:
return [{v:weight[u][v] for v in graph[u]} for u in range(len(graph))]
else:
return [{v:None for v in graph[u]} for u in range(len(graph))] | def function[listlist_and_matrix_to_listdict, parameter[graph, weight]]:
constant[Transforms the weighted adjacency list representation of a graph
of type listlist + optional weight matrix
into the listdict representation
:param graph: in listlist representation
:param weight: optional weight matrix
:returns: graph in listdict representation
:complexity: linear
]
if name[weight] begin[:]
return[<ast.ListComp object at 0x7da1b07f7a90>] | keyword[def] identifier[listlist_and_matrix_to_listdict] ( identifier[graph] , identifier[weight] = keyword[None] ):
literal[string]
keyword[if] identifier[weight] :
keyword[return] [{ identifier[v] : identifier[weight] [ identifier[u] ][ identifier[v] ] keyword[for] identifier[v] keyword[in] identifier[graph] [ identifier[u] ]} keyword[for] identifier[u] keyword[in] identifier[range] ( identifier[len] ( identifier[graph] ))]
keyword[else] :
keyword[return] [{ identifier[v] : keyword[None] keyword[for] identifier[v] keyword[in] identifier[graph] [ identifier[u] ]} keyword[for] identifier[u] keyword[in] identifier[range] ( identifier[len] ( identifier[graph] ))] | def listlist_and_matrix_to_listdict(graph, weight=None):
"""Transforms the weighted adjacency list representation of a graph
of type listlist + optional weight matrix
into the listdict representation
:param graph: in listlist representation
:param weight: optional weight matrix
:returns: graph in listdict representation
:complexity: linear
"""
if weight:
return [{v: weight[u][v] for v in graph[u]} for u in range(len(graph))] # depends on [control=['if'], data=[]]
else:
return [{v: None for v in graph[u]} for u in range(len(graph))] |
def create_zone(domain, profile, type='master', ttl=None):
'''
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param profile: The profile key
:type profile: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
CLI Example:
.. code-block:: bash
salt myminion libcloud_dns.create_zone google.com profile1
'''
conn = _get_driver(profile=profile)
zone = conn.create_record(domain, type=type, ttl=ttl)
return _simple_zone(zone) | def function[create_zone, parameter[domain, profile, type, ttl]]:
constant[
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param profile: The profile key
:type profile: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
CLI Example:
.. code-block:: bash
salt myminion libcloud_dns.create_zone google.com profile1
]
variable[conn] assign[=] call[name[_get_driver], parameter[]]
variable[zone] assign[=] call[name[conn].create_record, parameter[name[domain]]]
return[call[name[_simple_zone], parameter[name[zone]]]] | keyword[def] identifier[create_zone] ( identifier[domain] , identifier[profile] , identifier[type] = literal[string] , identifier[ttl] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_get_driver] ( identifier[profile] = identifier[profile] )
identifier[zone] = identifier[conn] . identifier[create_record] ( identifier[domain] , identifier[type] = identifier[type] , identifier[ttl] = identifier[ttl] )
keyword[return] identifier[_simple_zone] ( identifier[zone] ) | def create_zone(domain, profile, type='master', ttl=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com)
:type domain: ``str``
:param profile: The profile key
:type profile: ``str``
:param type: Zone type (master / slave).
:type type: ``str``
:param ttl: TTL for new records. (optional)
:type ttl: ``int``
CLI Example:
.. code-block:: bash
salt myminion libcloud_dns.create_zone google.com profile1
"""
conn = _get_driver(profile=profile)
zone = conn.create_record(domain, type=type, ttl=ttl)
return _simple_zone(zone) |
def wait(self, timeout=None):
"""
Calls following snippet so you don't have to remember what import. See
:py:obj:`WebDriverWait <selenium.webdriver.support.wait.WebDriverWait>` for more
information. Detault timeout is `~.default_wait_timeout`.
.. code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout)
Example:
.. code-block:: python
driver.wait().until(lambda driver: len(driver.find_element_by_id('elm')) > 10)
If you need to wait for element, consider using
:py:meth:`~._WebdriverWrapper.wait_for_element` instead.
"""
if not timeout:
timeout = self.default_wait_timeout
return WebDriverWait(self, timeout) | def function[wait, parameter[self, timeout]]:
constant[
Calls following snippet so you don't have to remember what import. See
:py:obj:`WebDriverWait <selenium.webdriver.support.wait.WebDriverWait>` for more
information. Detault timeout is `~.default_wait_timeout`.
.. code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout)
Example:
.. code-block:: python
driver.wait().until(lambda driver: len(driver.find_element_by_id('elm')) > 10)
If you need to wait for element, consider using
:py:meth:`~._WebdriverWrapper.wait_for_element` instead.
]
if <ast.UnaryOp object at 0x7da1b0efb2b0> begin[:]
variable[timeout] assign[=] name[self].default_wait_timeout
return[call[name[WebDriverWait], parameter[name[self], name[timeout]]]] | keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[timeout] :
identifier[timeout] = identifier[self] . identifier[default_wait_timeout]
keyword[return] identifier[WebDriverWait] ( identifier[self] , identifier[timeout] ) | def wait(self, timeout=None):
"""
Calls following snippet so you don't have to remember what import. See
:py:obj:`WebDriverWait <selenium.webdriver.support.wait.WebDriverWait>` for more
information. Detault timeout is `~.default_wait_timeout`.
.. code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout)
Example:
.. code-block:: python
driver.wait().until(lambda driver: len(driver.find_element_by_id('elm')) > 10)
If you need to wait for element, consider using
:py:meth:`~._WebdriverWrapper.wait_for_element` instead.
"""
if not timeout:
timeout = self.default_wait_timeout # depends on [control=['if'], data=[]]
return WebDriverWait(self, timeout) |
def change_db(self, db, user=None):
"""Change connect database."""
# Get original config and change database key
config = self._config
config['database'] = db
if user:
config['user'] = user
self.database = db
# Close current database connection
self._disconnect()
# Reconnect to the new database
self._connect(config) | def function[change_db, parameter[self, db, user]]:
constant[Change connect database.]
variable[config] assign[=] name[self]._config
call[name[config]][constant[database]] assign[=] name[db]
if name[user] begin[:]
call[name[config]][constant[user]] assign[=] name[user]
name[self].database assign[=] name[db]
call[name[self]._disconnect, parameter[]]
call[name[self]._connect, parameter[name[config]]] | keyword[def] identifier[change_db] ( identifier[self] , identifier[db] , identifier[user] = keyword[None] ):
literal[string]
identifier[config] = identifier[self] . identifier[_config]
identifier[config] [ literal[string] ]= identifier[db]
keyword[if] identifier[user] :
identifier[config] [ literal[string] ]= identifier[user]
identifier[self] . identifier[database] = identifier[db]
identifier[self] . identifier[_disconnect] ()
identifier[self] . identifier[_connect] ( identifier[config] ) | def change_db(self, db, user=None):
"""Change connect database."""
# Get original config and change database key
config = self._config
config['database'] = db
if user:
config['user'] = user # depends on [control=['if'], data=[]]
self.database = db
# Close current database connection
self._disconnect()
# Reconnect to the new database
self._connect(config) |
def update(self, title=None, description=None):
"""Update the image with a new title and/or description."""
url = (self._imgur._base_url + "/3/image/"
"{0}".format(self._delete_or_id_hash))
is_updated = self._imgur._send_request(url, params=locals(),
method='POST')
if is_updated:
self.title = title or self.title
self.description = description or self.description
return is_updated | def function[update, parameter[self, title, description]]:
constant[Update the image with a new title and/or description.]
variable[url] assign[=] binary_operation[name[self]._imgur._base_url + call[constant[/3/image/{0}].format, parameter[name[self]._delete_or_id_hash]]]
variable[is_updated] assign[=] call[name[self]._imgur._send_request, parameter[name[url]]]
if name[is_updated] begin[:]
name[self].title assign[=] <ast.BoolOp object at 0x7da2045678b0>
name[self].description assign[=] <ast.BoolOp object at 0x7da2045663e0>
return[name[is_updated]] | keyword[def] identifier[update] ( identifier[self] , identifier[title] = keyword[None] , identifier[description] = keyword[None] ):
literal[string]
identifier[url] =( identifier[self] . identifier[_imgur] . identifier[_base_url] + literal[string]
literal[string] . identifier[format] ( identifier[self] . identifier[_delete_or_id_hash] ))
identifier[is_updated] = identifier[self] . identifier[_imgur] . identifier[_send_request] ( identifier[url] , identifier[params] = identifier[locals] (),
identifier[method] = literal[string] )
keyword[if] identifier[is_updated] :
identifier[self] . identifier[title] = identifier[title] keyword[or] identifier[self] . identifier[title]
identifier[self] . identifier[description] = identifier[description] keyword[or] identifier[self] . identifier[description]
keyword[return] identifier[is_updated] | def update(self, title=None, description=None):
"""Update the image with a new title and/or description."""
url = self._imgur._base_url + '/3/image/{0}'.format(self._delete_or_id_hash)
is_updated = self._imgur._send_request(url, params=locals(), method='POST')
if is_updated:
self.title = title or self.title
self.description = description or self.description # depends on [control=['if'], data=[]]
return is_updated |
def calculate(self, T, method):
r'''Method to calculate low-pressure liquid thermal conductivity at
tempearture `T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature of the liquid, [K]
method : str
Name of the method to use
Returns
-------
kl : float
Thermal conductivity of the liquid at T and a low pressure, [W/m/K]
'''
if method == SHEFFY_JOHNSON:
kl = Sheffy_Johnson(T, self.MW, self.Tm)
elif method == SATO_RIEDEL:
kl = Sato_Riedel(T, self.MW, self.Tb, self.Tc)
elif method == GHARAGHEIZI_L:
kl = Gharagheizi_liquid(T, self.MW, self.Tb, self.Pc, self.omega)
elif method == NICOLA:
kl = Nicola(T, self.MW, self.Tc, self.Pc, self.omega)
elif method == NICOLA_ORIGINAL:
kl = Nicola_original(T, self.MW, self.Tc, self.omega, self.Hfus)
elif method == LAKSHMI_PRASAD:
kl = Lakshmi_Prasad(T, self.MW)
elif method == BAHADORI_L:
kl = Bahadori_liquid(T, self.MW)
elif method == DIPPR_PERRY_8E:
kl = EQ100(T, *self.Perrys2_315_coeffs)
elif method == VDI_PPDS:
kl = horner(self.VDI_PPDS_coeffs, T)
elif method == COOLPROP:
kl = CoolProp_T_dependent_property(T, self.CASRN, 'L', 'l')
elif method in self.tabular_data:
kl = self.interpolate(T, method)
return kl | def function[calculate, parameter[self, T, method]]:
constant[Method to calculate low-pressure liquid thermal conductivity at
tempearture `T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature of the liquid, [K]
method : str
Name of the method to use
Returns
-------
kl : float
Thermal conductivity of the liquid at T and a low pressure, [W/m/K]
]
if compare[name[method] equal[==] name[SHEFFY_JOHNSON]] begin[:]
variable[kl] assign[=] call[name[Sheffy_Johnson], parameter[name[T], name[self].MW, name[self].Tm]]
return[name[kl]] | keyword[def] identifier[calculate] ( identifier[self] , identifier[T] , identifier[method] ):
literal[string]
keyword[if] identifier[method] == identifier[SHEFFY_JOHNSON] :
identifier[kl] = identifier[Sheffy_Johnson] ( identifier[T] , identifier[self] . identifier[MW] , identifier[self] . identifier[Tm] )
keyword[elif] identifier[method] == identifier[SATO_RIEDEL] :
identifier[kl] = identifier[Sato_Riedel] ( identifier[T] , identifier[self] . identifier[MW] , identifier[self] . identifier[Tb] , identifier[self] . identifier[Tc] )
keyword[elif] identifier[method] == identifier[GHARAGHEIZI_L] :
identifier[kl] = identifier[Gharagheizi_liquid] ( identifier[T] , identifier[self] . identifier[MW] , identifier[self] . identifier[Tb] , identifier[self] . identifier[Pc] , identifier[self] . identifier[omega] )
keyword[elif] identifier[method] == identifier[NICOLA] :
identifier[kl] = identifier[Nicola] ( identifier[T] , identifier[self] . identifier[MW] , identifier[self] . identifier[Tc] , identifier[self] . identifier[Pc] , identifier[self] . identifier[omega] )
keyword[elif] identifier[method] == identifier[NICOLA_ORIGINAL] :
identifier[kl] = identifier[Nicola_original] ( identifier[T] , identifier[self] . identifier[MW] , identifier[self] . identifier[Tc] , identifier[self] . identifier[omega] , identifier[self] . identifier[Hfus] )
keyword[elif] identifier[method] == identifier[LAKSHMI_PRASAD] :
identifier[kl] = identifier[Lakshmi_Prasad] ( identifier[T] , identifier[self] . identifier[MW] )
keyword[elif] identifier[method] == identifier[BAHADORI_L] :
identifier[kl] = identifier[Bahadori_liquid] ( identifier[T] , identifier[self] . identifier[MW] )
keyword[elif] identifier[method] == identifier[DIPPR_PERRY_8E] :
identifier[kl] = identifier[EQ100] ( identifier[T] ,* identifier[self] . identifier[Perrys2_315_coeffs] )
keyword[elif] identifier[method] == identifier[VDI_PPDS] :
identifier[kl] = identifier[horner] ( identifier[self] . identifier[VDI_PPDS_coeffs] , identifier[T] )
keyword[elif] identifier[method] == identifier[COOLPROP] :
identifier[kl] = identifier[CoolProp_T_dependent_property] ( identifier[T] , identifier[self] . identifier[CASRN] , literal[string] , literal[string] )
keyword[elif] identifier[method] keyword[in] identifier[self] . identifier[tabular_data] :
identifier[kl] = identifier[self] . identifier[interpolate] ( identifier[T] , identifier[method] )
keyword[return] identifier[kl] | def calculate(self, T, method):
"""Method to calculate low-pressure liquid thermal conductivity at
tempearture `T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature of the liquid, [K]
method : str
Name of the method to use
Returns
-------
kl : float
Thermal conductivity of the liquid at T and a low pressure, [W/m/K]
"""
if method == SHEFFY_JOHNSON:
kl = Sheffy_Johnson(T, self.MW, self.Tm) # depends on [control=['if'], data=[]]
elif method == SATO_RIEDEL:
kl = Sato_Riedel(T, self.MW, self.Tb, self.Tc) # depends on [control=['if'], data=[]]
elif method == GHARAGHEIZI_L:
kl = Gharagheizi_liquid(T, self.MW, self.Tb, self.Pc, self.omega) # depends on [control=['if'], data=[]]
elif method == NICOLA:
kl = Nicola(T, self.MW, self.Tc, self.Pc, self.omega) # depends on [control=['if'], data=[]]
elif method == NICOLA_ORIGINAL:
kl = Nicola_original(T, self.MW, self.Tc, self.omega, self.Hfus) # depends on [control=['if'], data=[]]
elif method == LAKSHMI_PRASAD:
kl = Lakshmi_Prasad(T, self.MW) # depends on [control=['if'], data=[]]
elif method == BAHADORI_L:
kl = Bahadori_liquid(T, self.MW) # depends on [control=['if'], data=[]]
elif method == DIPPR_PERRY_8E:
kl = EQ100(T, *self.Perrys2_315_coeffs) # depends on [control=['if'], data=[]]
elif method == VDI_PPDS:
kl = horner(self.VDI_PPDS_coeffs, T) # depends on [control=['if'], data=[]]
elif method == COOLPROP:
kl = CoolProp_T_dependent_property(T, self.CASRN, 'L', 'l') # depends on [control=['if'], data=[]]
elif method in self.tabular_data:
kl = self.interpolate(T, method) # depends on [control=['if'], data=['method']]
return kl |
def _cache_ops_associate(protocol, msgtype):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L111.
Positional arguments:
protocol -- Netlink protocol (integer).
msgtype -- Netlink message type (integer).
Returns:
nl_cache_ops instance with matching protocol containing matching msgtype or None.
"""
ops = cache_ops
while ops: # Loop until `ops` is None.
if ops.co_protocol == protocol:
for co_msgtype in ops.co_msgtypes:
if co_msgtype.mt_id == msgtype:
return ops
ops = ops.co_next
return None | def function[_cache_ops_associate, parameter[protocol, msgtype]]:
constant[https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L111.
Positional arguments:
protocol -- Netlink protocol (integer).
msgtype -- Netlink message type (integer).
Returns:
nl_cache_ops instance with matching protocol containing matching msgtype or None.
]
variable[ops] assign[=] name[cache_ops]
while name[ops] begin[:]
if compare[name[ops].co_protocol equal[==] name[protocol]] begin[:]
for taget[name[co_msgtype]] in starred[name[ops].co_msgtypes] begin[:]
if compare[name[co_msgtype].mt_id equal[==] name[msgtype]] begin[:]
return[name[ops]]
variable[ops] assign[=] name[ops].co_next
return[constant[None]] | keyword[def] identifier[_cache_ops_associate] ( identifier[protocol] , identifier[msgtype] ):
literal[string]
identifier[ops] = identifier[cache_ops]
keyword[while] identifier[ops] :
keyword[if] identifier[ops] . identifier[co_protocol] == identifier[protocol] :
keyword[for] identifier[co_msgtype] keyword[in] identifier[ops] . identifier[co_msgtypes] :
keyword[if] identifier[co_msgtype] . identifier[mt_id] == identifier[msgtype] :
keyword[return] identifier[ops]
identifier[ops] = identifier[ops] . identifier[co_next]
keyword[return] keyword[None] | def _cache_ops_associate(protocol, msgtype):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L111.
Positional arguments:
protocol -- Netlink protocol (integer).
msgtype -- Netlink message type (integer).
Returns:
nl_cache_ops instance with matching protocol containing matching msgtype or None.
"""
ops = cache_ops
while ops: # Loop until `ops` is None.
if ops.co_protocol == protocol:
for co_msgtype in ops.co_msgtypes:
if co_msgtype.mt_id == msgtype:
return ops # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['co_msgtype']] # depends on [control=['if'], data=[]]
ops = ops.co_next # depends on [control=['while'], data=[]]
return None |
def _map_query_path_to_location_info(query_metadata_table):
"""Create a map from each query path to a LocationInfo at that path.
Args:
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Returns:
Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
"""
query_path_to_location_info = {}
for location, location_info in query_metadata_table.registered_locations:
if not isinstance(location, Location):
continue
if location.query_path in query_path_to_location_info:
# make sure the stored location information equals the new location information
# for the fields the SQL backend requires.
equivalent_location_info = query_path_to_location_info[location.query_path]
if not _location_infos_equal(location_info, equivalent_location_info):
raise AssertionError(
u'Differing LocationInfos at query_path {} between {} and {}. Expected '
u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth '
u'and types to be equal for LocationInfos sharing the same query path.'.format(
location.query_path, location_info, equivalent_location_info))
query_path_to_location_info[location.query_path] = location_info
return query_path_to_location_info | def function[_map_query_path_to_location_info, parameter[query_metadata_table]]:
constant[Create a map from each query path to a LocationInfo at that path.
Args:
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Returns:
Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
]
variable[query_path_to_location_info] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b16441f0>, <ast.Name object at 0x7da1b1647a00>]]] in starred[name[query_metadata_table].registered_locations] begin[:]
if <ast.UnaryOp object at 0x7da1b1644880> begin[:]
continue
if compare[name[location].query_path in name[query_path_to_location_info]] begin[:]
variable[equivalent_location_info] assign[=] call[name[query_path_to_location_info]][name[location].query_path]
if <ast.UnaryOp object at 0x7da1b1645ff0> begin[:]
<ast.Raise object at 0x7da1b1644be0>
call[name[query_path_to_location_info]][name[location].query_path] assign[=] name[location_info]
return[name[query_path_to_location_info]] | keyword[def] identifier[_map_query_path_to_location_info] ( identifier[query_metadata_table] ):
literal[string]
identifier[query_path_to_location_info] ={}
keyword[for] identifier[location] , identifier[location_info] keyword[in] identifier[query_metadata_table] . identifier[registered_locations] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[location] , identifier[Location] ):
keyword[continue]
keyword[if] identifier[location] . identifier[query_path] keyword[in] identifier[query_path_to_location_info] :
identifier[equivalent_location_info] = identifier[query_path_to_location_info] [ identifier[location] . identifier[query_path] ]
keyword[if] keyword[not] identifier[_location_infos_equal] ( identifier[location_info] , identifier[equivalent_location_info] ):
keyword[raise] identifier[AssertionError] (
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[location] . identifier[query_path] , identifier[location_info] , identifier[equivalent_location_info] ))
identifier[query_path_to_location_info] [ identifier[location] . identifier[query_path] ]= identifier[location_info]
keyword[return] identifier[query_path_to_location_info] | def _map_query_path_to_location_info(query_metadata_table):
"""Create a map from each query path to a LocationInfo at that path.
Args:
query_metadata_table: QueryMetadataTable, object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
Returns:
Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
"""
query_path_to_location_info = {}
for (location, location_info) in query_metadata_table.registered_locations:
if not isinstance(location, Location):
continue # depends on [control=['if'], data=[]]
if location.query_path in query_path_to_location_info:
# make sure the stored location information equals the new location information
# for the fields the SQL backend requires.
equivalent_location_info = query_path_to_location_info[location.query_path]
if not _location_infos_equal(location_info, equivalent_location_info):
raise AssertionError(u'Differing LocationInfos at query_path {} between {} and {}. Expected parent_location.query_path, optional_scopes_depth, recursive_scopes_depth and types to be equal for LocationInfos sharing the same query path.'.format(location.query_path, location_info, equivalent_location_info)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['query_path_to_location_info']]
query_path_to_location_info[location.query_path] = location_info # depends on [control=['for'], data=[]]
return query_path_to_location_info |
def available():
'''
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
'''
ret = []
for path in __salt__['file.find']('/boot/kernel', name='*.ko$'):
bpath = os.path.basename(path)
comps = bpath.split('.')
if 'ko' in comps:
# This is a kernel module, return it without the .ko extension
ret.append('.'.join(comps[:comps.index('ko')]))
return ret | def function[available, parameter[]]:
constant[
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
]
variable[ret] assign[=] list[[]]
for taget[name[path]] in starred[call[call[name[__salt__]][constant[file.find]], parameter[constant[/boot/kernel]]]] begin[:]
variable[bpath] assign[=] call[name[os].path.basename, parameter[name[path]]]
variable[comps] assign[=] call[name[bpath].split, parameter[constant[.]]]
if compare[constant[ko] in name[comps]] begin[:]
call[name[ret].append, parameter[call[constant[.].join, parameter[call[name[comps]][<ast.Slice object at 0x7da1b1c0cca0>]]]]]
return[name[ret]] | keyword[def] identifier[available] ():
literal[string]
identifier[ret] =[]
keyword[for] identifier[path] keyword[in] identifier[__salt__] [ literal[string] ]( literal[string] , identifier[name] = literal[string] ):
identifier[bpath] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )
identifier[comps] = identifier[bpath] . identifier[split] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[comps] :
identifier[ret] . identifier[append] ( literal[string] . identifier[join] ( identifier[comps] [: identifier[comps] . identifier[index] ( literal[string] )]))
keyword[return] identifier[ret] | def available():
"""
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
"""
ret = []
for path in __salt__['file.find']('/boot/kernel', name='*.ko$'):
bpath = os.path.basename(path)
comps = bpath.split('.')
if 'ko' in comps:
# This is a kernel module, return it without the .ko extension
ret.append('.'.join(comps[:comps.index('ko')])) # depends on [control=['if'], data=['comps']] # depends on [control=['for'], data=['path']]
return ret |
def get_uid(self, idx):
"""
Return the `uid` of the elements with the given `idx`
:param list, matrix idx: external indices
:type idx: list, matrix
:return: a matrix of uid
"""
assert idx is not None
if isinstance(idx, (int, float, str)):
return self.uid[idx]
ret = []
for i in idx:
tmp = self.uid.get(i, None)
assert tmp is not None, (
'Model <{}> does not have element <{}>'.format(self._name, i))
ret.append(self.uid[i])
return ret | def function[get_uid, parameter[self, idx]]:
constant[
Return the `uid` of the elements with the given `idx`
:param list, matrix idx: external indices
:type idx: list, matrix
:return: a matrix of uid
]
assert[compare[name[idx] is_not constant[None]]]
if call[name[isinstance], parameter[name[idx], tuple[[<ast.Name object at 0x7da2044c2a40>, <ast.Name object at 0x7da2044c22f0>, <ast.Name object at 0x7da2044c2020>]]]] begin[:]
return[call[name[self].uid][name[idx]]]
variable[ret] assign[=] list[[]]
for taget[name[i]] in starred[name[idx]] begin[:]
variable[tmp] assign[=] call[name[self].uid.get, parameter[name[i], constant[None]]]
assert[compare[name[tmp] is_not constant[None]]]
call[name[ret].append, parameter[call[name[self].uid][name[i]]]]
return[name[ret]] | keyword[def] identifier[get_uid] ( identifier[self] , identifier[idx] ):
literal[string]
keyword[assert] identifier[idx] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[isinstance] ( identifier[idx] ,( identifier[int] , identifier[float] , identifier[str] )):
keyword[return] identifier[self] . identifier[uid] [ identifier[idx] ]
identifier[ret] =[]
keyword[for] identifier[i] keyword[in] identifier[idx] :
identifier[tmp] = identifier[self] . identifier[uid] . identifier[get] ( identifier[i] , keyword[None] )
keyword[assert] identifier[tmp] keyword[is] keyword[not] keyword[None] ,(
literal[string] . identifier[format] ( identifier[self] . identifier[_name] , identifier[i] ))
identifier[ret] . identifier[append] ( identifier[self] . identifier[uid] [ identifier[i] ])
keyword[return] identifier[ret] | def get_uid(self, idx):
"""
Return the `uid` of the elements with the given `idx`
:param list, matrix idx: external indices
:type idx: list, matrix
:return: a matrix of uid
"""
assert idx is not None
if isinstance(idx, (int, float, str)):
return self.uid[idx] # depends on [control=['if'], data=[]]
ret = []
for i in idx:
tmp = self.uid.get(i, None)
assert tmp is not None, 'Model <{}> does not have element <{}>'.format(self._name, i)
ret.append(self.uid[i]) # depends on [control=['for'], data=['i']]
return ret |
def parse(url):
"""
Parses out the information for this url, returning its components
expanded out to Python objects.
:param url | <str>
:return (<str> path, <dict> query, <str> fragment)
"""
result = urlparse.urlparse(nstr(url))
path = result.scheme + '://' + result.netloc
if result.path:
path += result.path
query = {}
# extract the python information from the query
if result.query:
url_query = urlparse.parse_qs(result.query)
for key, value in url_query.items():
if type(value) == list and len(value) == 1:
value = value[0]
query[key] = value
return path, query, result.fragment | def function[parse, parameter[url]]:
constant[
Parses out the information for this url, returning its components
expanded out to Python objects.
:param url | <str>
:return (<str> path, <dict> query, <str> fragment)
]
variable[result] assign[=] call[name[urlparse].urlparse, parameter[call[name[nstr], parameter[name[url]]]]]
variable[path] assign[=] binary_operation[binary_operation[name[result].scheme + constant[://]] + name[result].netloc]
if name[result].path begin[:]
<ast.AugAssign object at 0x7da20e955ab0>
variable[query] assign[=] dictionary[[], []]
if name[result].query begin[:]
variable[url_query] assign[=] call[name[urlparse].parse_qs, parameter[name[result].query]]
for taget[tuple[[<ast.Name object at 0x7da20e955d80>, <ast.Name object at 0x7da20e954be0>]]] in starred[call[name[url_query].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20e957df0> begin[:]
variable[value] assign[=] call[name[value]][constant[0]]
call[name[query]][name[key]] assign[=] name[value]
return[tuple[[<ast.Name object at 0x7da20e957a00>, <ast.Name object at 0x7da20e9562f0>, <ast.Attribute object at 0x7da20e9579a0>]]] | keyword[def] identifier[parse] ( identifier[url] ):
literal[string]
identifier[result] = identifier[urlparse] . identifier[urlparse] ( identifier[nstr] ( identifier[url] ))
identifier[path] = identifier[result] . identifier[scheme] + literal[string] + identifier[result] . identifier[netloc]
keyword[if] identifier[result] . identifier[path] :
identifier[path] += identifier[result] . identifier[path]
identifier[query] ={}
keyword[if] identifier[result] . identifier[query] :
identifier[url_query] = identifier[urlparse] . identifier[parse_qs] ( identifier[result] . identifier[query] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[url_query] . identifier[items] ():
keyword[if] identifier[type] ( identifier[value] )== identifier[list] keyword[and] identifier[len] ( identifier[value] )== literal[int] :
identifier[value] = identifier[value] [ literal[int] ]
identifier[query] [ identifier[key] ]= identifier[value]
keyword[return] identifier[path] , identifier[query] , identifier[result] . identifier[fragment] | def parse(url):
"""
Parses out the information for this url, returning its components
expanded out to Python objects.
:param url | <str>
:return (<str> path, <dict> query, <str> fragment)
"""
result = urlparse.urlparse(nstr(url))
path = result.scheme + '://' + result.netloc
if result.path:
path += result.path # depends on [control=['if'], data=[]]
query = {}
# extract the python information from the query
if result.query:
url_query = urlparse.parse_qs(result.query)
for (key, value) in url_query.items():
if type(value) == list and len(value) == 1:
value = value[0] # depends on [control=['if'], data=[]]
query[key] = value # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return (path, query, result.fragment) |
def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column]
else:
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
column_length = 0
for i, (row, new_item) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding)
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index)
raise ValueError(("length of 'column' should be atleast {}, "
"got {}").format(len(self._table),
column_length + 1)) | def function[insert_column, parameter[self, index, header, column]]:
constant[Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
]
if compare[name[self]._column_count equal[==] constant[0]] begin[:]
name[self].column_headers assign[=] call[name[HeaderData], parameter[name[self], list[[<ast.Name object at 0x7da1b06261d0>]]]]
name[self]._table assign[=] <ast.ListComp object at 0x7da1b0627760> | keyword[def] identifier[insert_column] ( identifier[self] , identifier[index] , identifier[header] , identifier[column] ):
literal[string]
keyword[if] identifier[self] . identifier[_column_count] == literal[int] :
identifier[self] . identifier[column_headers] = identifier[HeaderData] ( identifier[self] ,[ identifier[header] ])
identifier[self] . identifier[_table] =[ identifier[RowData] ( identifier[self] ,[ identifier[i] ]) keyword[for] identifier[i] keyword[in] identifier[column] ]
keyword[else] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[header] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[column_length] = literal[int]
keyword[for] identifier[i] ,( identifier[row] , identifier[new_item] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[self] . identifier[_table] , identifier[column] )):
identifier[row] . identifier[_insert] ( identifier[index] , identifier[new_item] )
identifier[column_length] = identifier[i]
keyword[if] identifier[column_length] == identifier[len] ( identifier[self] . identifier[_table] )- literal[int] :
identifier[self] . identifier[_column_count] += literal[int]
identifier[self] . identifier[_column_headers] . identifier[_insert] ( identifier[index] , identifier[header] )
identifier[self] . identifier[_column_alignments] . identifier[_insert] ( identifier[index] , identifier[self] . identifier[default_alignment] )
identifier[self] . identifier[_column_widths] . identifier[_insert] ( identifier[index] , literal[int] )
identifier[self] . identifier[_left_padding_widths] . identifier[_insert] ( identifier[index] , identifier[self] . identifier[default_padding] )
identifier[self] . identifier[_right_padding_widths] . identifier[_insert] ( identifier[index] , identifier[self] . identifier[default_padding] )
keyword[else] :
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[column_length] ,- literal[int] ,- literal[int] ):
identifier[self] . identifier[_table] [ identifier[j] ]. identifier[_pop] ( identifier[index] )
keyword[raise] identifier[ValueError] (( literal[string]
literal[string] ). identifier[format] ( identifier[len] ( identifier[self] . identifier[_table] ),
identifier[column_length] + literal[int] )) | def insert_column(self, index, header, column):
"""Insert a column before `index` in the table.
If length of column is bigger than number of rows, lets say
`k`, only the first `k` values of `column` is considered.
If column is shorter than 'k', ValueError is raised.
Note that Table remains in consistent state even if column
is too short. Any changes made by this method is rolled back
before raising the exception.
Parameters
----------
index : int
List index rules apply.
header : str
Title of the column.
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If `header` is not of type `str`.
ValueError:
If length of `column` is shorter than number of rows.
"""
if self._column_count == 0:
self.column_headers = HeaderData(self, [header])
self._table = [RowData(self, [i]) for i in column] # depends on [control=['if'], data=[]]
else:
if not isinstance(header, basestring):
raise TypeError('header must be of type str') # depends on [control=['if'], data=[]]
column_length = 0
for (i, (row, new_item)) in enumerate(zip(self._table, column)):
row._insert(index, new_item)
column_length = i # depends on [control=['for'], data=[]]
if column_length == len(self._table) - 1:
self._column_count += 1
self._column_headers._insert(index, header)
self._column_alignments._insert(index, self.default_alignment)
self._column_widths._insert(index, 0)
self._left_padding_widths._insert(index, self.default_padding)
self._right_padding_widths._insert(index, self.default_padding) # depends on [control=['if'], data=[]]
else:
# Roll back changes so that table remains in consistent state
for j in range(column_length, -1, -1):
self._table[j]._pop(index) # depends on [control=['for'], data=['j']]
raise ValueError("length of 'column' should be atleast {}, got {}".format(len(self._table), column_length + 1)) |
def _read_body(stream, decoder, strict=False, logger=None):
"""
Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response}
"""
def _read_args():
# we have to go through this insanity because it seems that amf0
# does not keep the array of args in the object references lookup
type_byte = stream.peek(1)
if type_byte == '\x11':
if not decoder.use_amf3:
raise pyamf.DecodeError(
"Unexpected AMF3 type with incorrect message type")
return decoder.readElement()
if type_byte != '\x0a':
raise pyamf.DecodeError("Array type required for request body")
stream.read(1)
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for code, s in STATUS_CODES.iteritems():
if not target.endswith(s):
continue
is_request = False
status = code
target = target[:0 - len(s)]
if logger:
logger.debug('Remoting target: %r' % (target,))
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args()
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError("Data read from stream does not match body "
"length (%d != %d)" % (pos + data_len, stream.tell(),))
if is_request:
return response, Request(target, body=data)
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data)
return target, Response(data, status) | def function[_read_body, parameter[stream, decoder, strict, logger]]:
constant[
Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response}
]
def function[_read_args, parameter[]]:
variable[type_byte] assign[=] call[name[stream].peek, parameter[constant[1]]]
if compare[name[type_byte] equal[==] constant[]] begin[:]
if <ast.UnaryOp object at 0x7da18f09f9a0> begin[:]
<ast.Raise object at 0x7da18f09f9d0>
return[call[name[decoder].readElement, parameter[]]]
if compare[name[type_byte] not_equal[!=] constant[
]] begin[:]
<ast.Raise object at 0x7da18f09d9c0>
call[name[stream].read, parameter[constant[1]]]
variable[x] assign[=] call[name[stream].read_ulong, parameter[]]
return[<ast.ListComp object at 0x7da18f09dd20>]
variable[target] assign[=] call[name[stream].read_utf8_string, parameter[call[name[stream].read_ushort, parameter[]]]]
variable[response] assign[=] call[name[stream].read_utf8_string, parameter[call[name[stream].read_ushort, parameter[]]]]
variable[status] assign[=] name[STATUS_OK]
variable[is_request] assign[=] constant[True]
for taget[tuple[[<ast.Name object at 0x7da18f09d300>, <ast.Name object at 0x7da18f09d030>]]] in starred[call[name[STATUS_CODES].iteritems, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da18f09cb80> begin[:]
continue
variable[is_request] assign[=] constant[False]
variable[status] assign[=] name[code]
variable[target] assign[=] call[name[target]][<ast.Slice object at 0x7da18f09cd30>]
if name[logger] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[Remoting target: %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2041d8c70>]]]]]
variable[data_len] assign[=] call[name[stream].read_ulong, parameter[]]
variable[pos] assign[=] call[name[stream].tell, parameter[]]
if name[is_request] begin[:]
variable[data] assign[=] call[name[_read_args], parameter[]]
if <ast.BoolOp object at 0x7da2041da800> begin[:]
<ast.Raise object at 0x7da2041da7a0>
if name[is_request] begin[:]
return[tuple[[<ast.Name object at 0x7da2041d85b0>, <ast.Call object at 0x7da2041d9630>]]]
if <ast.BoolOp object at 0x7da2041d91e0> begin[:]
variable[data] assign[=] call[name[get_fault], parameter[name[data]]]
return[tuple[[<ast.Name object at 0x7da2041da320>, <ast.Call object at 0x7da2041daf50>]]] | keyword[def] identifier[_read_body] ( identifier[stream] , identifier[decoder] , identifier[strict] = keyword[False] , identifier[logger] = keyword[None] ):
literal[string]
keyword[def] identifier[_read_args] ():
identifier[type_byte] = identifier[stream] . identifier[peek] ( literal[int] )
keyword[if] identifier[type_byte] == literal[string] :
keyword[if] keyword[not] identifier[decoder] . identifier[use_amf3] :
keyword[raise] identifier[pyamf] . identifier[DecodeError] (
literal[string] )
keyword[return] identifier[decoder] . identifier[readElement] ()
keyword[if] identifier[type_byte] != literal[string] :
keyword[raise] identifier[pyamf] . identifier[DecodeError] ( literal[string] )
identifier[stream] . identifier[read] ( literal[int] )
identifier[x] = identifier[stream] . identifier[read_ulong] ()
keyword[return] [ identifier[decoder] . identifier[readElement] () keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[x] )]
identifier[target] = identifier[stream] . identifier[read_utf8_string] ( identifier[stream] . identifier[read_ushort] ())
identifier[response] = identifier[stream] . identifier[read_utf8_string] ( identifier[stream] . identifier[read_ushort] ())
identifier[status] = identifier[STATUS_OK]
identifier[is_request] = keyword[True]
keyword[for] identifier[code] , identifier[s] keyword[in] identifier[STATUS_CODES] . identifier[iteritems] ():
keyword[if] keyword[not] identifier[target] . identifier[endswith] ( identifier[s] ):
keyword[continue]
identifier[is_request] = keyword[False]
identifier[status] = identifier[code]
identifier[target] = identifier[target] [: literal[int] - identifier[len] ( identifier[s] )]
keyword[if] identifier[logger] :
identifier[logger] . identifier[debug] ( literal[string] %( identifier[target] ,))
identifier[data_len] = identifier[stream] . identifier[read_ulong] ()
identifier[pos] = identifier[stream] . identifier[tell] ()
keyword[if] identifier[is_request] :
identifier[data] = identifier[_read_args] ()
keyword[else] :
identifier[data] = identifier[decoder] . identifier[readElement] ()
keyword[if] identifier[strict] keyword[and] identifier[pos] + identifier[data_len] != identifier[stream] . identifier[tell] ():
keyword[raise] identifier[pyamf] . identifier[DecodeError] ( literal[string]
literal[string] %( identifier[pos] + identifier[data_len] , identifier[stream] . identifier[tell] (),))
keyword[if] identifier[is_request] :
keyword[return] identifier[response] , identifier[Request] ( identifier[target] , identifier[body] = identifier[data] )
keyword[if] identifier[status] == identifier[STATUS_ERROR] keyword[and] identifier[isinstance] ( identifier[data] , identifier[pyamf] . identifier[ASObject] ):
identifier[data] = identifier[get_fault] ( identifier[data] )
keyword[return] identifier[target] , identifier[Response] ( identifier[data] , identifier[status] ) | def _read_body(stream, decoder, strict=False, logger=None):
"""
Read an AMF message body from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is `False`.
@param logger: Used to log interesting events whilst reading a remoting
body.
@type logger: A C{logging.Logger} instance or C{None}.
@return: A C{tuple} containing the C{id} of the request and the L{Request}
or L{Response}
"""
def _read_args():
# we have to go through this insanity because it seems that amf0
# does not keep the array of args in the object references lookup
type_byte = stream.peek(1)
if type_byte == '\x11':
if not decoder.use_amf3:
raise pyamf.DecodeError('Unexpected AMF3 type with incorrect message type') # depends on [control=['if'], data=[]]
return decoder.readElement() # depends on [control=['if'], data=[]]
if type_byte != '\n':
raise pyamf.DecodeError('Array type required for request body') # depends on [control=['if'], data=[]]
stream.read(1)
x = stream.read_ulong()
return [decoder.readElement() for i in xrange(x)]
target = stream.read_utf8_string(stream.read_ushort())
response = stream.read_utf8_string(stream.read_ushort())
status = STATUS_OK
is_request = True
for (code, s) in STATUS_CODES.iteritems():
if not target.endswith(s):
continue # depends on [control=['if'], data=[]]
is_request = False
status = code
target = target[:0 - len(s)] # depends on [control=['for'], data=[]]
if logger:
logger.debug('Remoting target: %r' % (target,)) # depends on [control=['if'], data=[]]
data_len = stream.read_ulong()
pos = stream.tell()
if is_request:
data = _read_args() # depends on [control=['if'], data=[]]
else:
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError('Data read from stream does not match body length (%d != %d)' % (pos + data_len, stream.tell())) # depends on [control=['if'], data=[]]
if is_request:
return (response, Request(target, body=data)) # depends on [control=['if'], data=[]]
if status == STATUS_ERROR and isinstance(data, pyamf.ASObject):
data = get_fault(data) # depends on [control=['if'], data=[]]
return (target, Response(data, status)) |
def MINEIGVAL(T0, T, TOL):
"""Finds the minimum eigenvalue of a Hermitian Toeplitz matrix
The classical power method is used together with a fast Toeplitz
equation solution routine. The eigenvector is normalized to unit length.
:param T0: Scalar corresponding to real matrix element t(0)
:param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix
:param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number.
:return:
* EVAL - Real scalar denoting the minimum eigenvalue of matrix
* EVEC - Array of M complex eigenvector elements associated
.. note::
* External array T must be dimensioned >= M
* array EVEC must be >= M+1
* Internal array E must be dimensioned >= M+1 .
* **dependencies**
* :meth:`spectrum.toeplitz.HERMTOEP`
"""
M = len(T)
eigval = 10
eigvalold = 1
eigvec = numpy.zeros(M+1, dtype=complex)
for k in range(0,M+1):
eigvec[k] = 1+0j
it=0
#print 'initialisation',T0, T, eigval, eigvec
maxit = 15
while abs(eigvalold-eigval)>TOL*eigvalold and it<maxit:
it=it+1
eigvalold = eigval
#print 'iteration ',it, 'eigvalold=',eigvalold, 'eigval=', eigval
eig = toeplitz.HERMTOEP(T0, T, eigvec)
SUM = 0
save =0.+0j
for k in range(0, M+1):
SUM = SUM + eig[k].real**2+eig[k].imag**2
save = save +eig[k]*eigvec[k].conjugate()
SUM=1./SUM
eigval = save.real*SUM
for k in range(0,M+1):
eigvec[k] = SUM * eig[k]
if it==maxit:
print('warning reached max number of iteration (%s)' % maxit)
return eigval, eigvec | def function[MINEIGVAL, parameter[T0, T, TOL]]:
constant[Finds the minimum eigenvalue of a Hermitian Toeplitz matrix
The classical power method is used together with a fast Toeplitz
equation solution routine. The eigenvector is normalized to unit length.
:param T0: Scalar corresponding to real matrix element t(0)
:param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix
:param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number.
:return:
* EVAL - Real scalar denoting the minimum eigenvalue of matrix
* EVEC - Array of M complex eigenvector elements associated
.. note::
* External array T must be dimensioned >= M
* array EVEC must be >= M+1
* Internal array E must be dimensioned >= M+1 .
* **dependencies**
* :meth:`spectrum.toeplitz.HERMTOEP`
]
variable[M] assign[=] call[name[len], parameter[name[T]]]
variable[eigval] assign[=] constant[10]
variable[eigvalold] assign[=] constant[1]
variable[eigvec] assign[=] call[name[numpy].zeros, parameter[binary_operation[name[M] + constant[1]]]]
for taget[name[k]] in starred[call[name[range], parameter[constant[0], binary_operation[name[M] + constant[1]]]]] begin[:]
call[name[eigvec]][name[k]] assign[=] binary_operation[constant[1] + constant[0j]]
variable[it] assign[=] constant[0]
variable[maxit] assign[=] constant[15]
while <ast.BoolOp object at 0x7da1b016dcc0> begin[:]
variable[it] assign[=] binary_operation[name[it] + constant[1]]
variable[eigvalold] assign[=] name[eigval]
variable[eig] assign[=] call[name[toeplitz].HERMTOEP, parameter[name[T0], name[T], name[eigvec]]]
variable[SUM] assign[=] constant[0]
variable[save] assign[=] binary_operation[constant[0.0] + constant[0j]]
for taget[name[k]] in starred[call[name[range], parameter[constant[0], binary_operation[name[M] + constant[1]]]]] begin[:]
variable[SUM] assign[=] binary_operation[binary_operation[name[SUM] + binary_operation[call[name[eig]][name[k]].real ** constant[2]]] + binary_operation[call[name[eig]][name[k]].imag ** constant[2]]]
variable[save] assign[=] binary_operation[name[save] + binary_operation[call[name[eig]][name[k]] * call[call[name[eigvec]][name[k]].conjugate, parameter[]]]]
variable[SUM] assign[=] binary_operation[constant[1.0] / name[SUM]]
variable[eigval] assign[=] binary_operation[name[save].real * name[SUM]]
for taget[name[k]] in starred[call[name[range], parameter[constant[0], binary_operation[name[M] + constant[1]]]]] begin[:]
call[name[eigvec]][name[k]] assign[=] binary_operation[name[SUM] * call[name[eig]][name[k]]]
if compare[name[it] equal[==] name[maxit]] begin[:]
call[name[print], parameter[binary_operation[constant[warning reached max number of iteration (%s)] <ast.Mod object at 0x7da2590d6920> name[maxit]]]]
return[tuple[[<ast.Name object at 0x7da1b010baf0>, <ast.Name object at 0x7da1b0108160>]]] | keyword[def] identifier[MINEIGVAL] ( identifier[T0] , identifier[T] , identifier[TOL] ):
literal[string]
identifier[M] = identifier[len] ( identifier[T] )
identifier[eigval] = literal[int]
identifier[eigvalold] = literal[int]
identifier[eigvec] = identifier[numpy] . identifier[zeros] ( identifier[M] + literal[int] , identifier[dtype] = identifier[complex] )
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[M] + literal[int] ):
identifier[eigvec] [ identifier[k] ]= literal[int] + literal[int]
identifier[it] = literal[int]
identifier[maxit] = literal[int]
keyword[while] identifier[abs] ( identifier[eigvalold] - identifier[eigval] )> identifier[TOL] * identifier[eigvalold] keyword[and] identifier[it] < identifier[maxit] :
identifier[it] = identifier[it] + literal[int]
identifier[eigvalold] = identifier[eigval]
identifier[eig] = identifier[toeplitz] . identifier[HERMTOEP] ( identifier[T0] , identifier[T] , identifier[eigvec] )
identifier[SUM] = literal[int]
identifier[save] = literal[int] + literal[int]
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[M] + literal[int] ):
identifier[SUM] = identifier[SUM] + identifier[eig] [ identifier[k] ]. identifier[real] ** literal[int] + identifier[eig] [ identifier[k] ]. identifier[imag] ** literal[int]
identifier[save] = identifier[save] + identifier[eig] [ identifier[k] ]* identifier[eigvec] [ identifier[k] ]. identifier[conjugate] ()
identifier[SUM] = literal[int] / identifier[SUM]
identifier[eigval] = identifier[save] . identifier[real] * identifier[SUM]
keyword[for] identifier[k] keyword[in] identifier[range] ( literal[int] , identifier[M] + literal[int] ):
identifier[eigvec] [ identifier[k] ]= identifier[SUM] * identifier[eig] [ identifier[k] ]
keyword[if] identifier[it] == identifier[maxit] :
identifier[print] ( literal[string] % identifier[maxit] )
keyword[return] identifier[eigval] , identifier[eigvec] | def MINEIGVAL(T0, T, TOL):
"""Finds the minimum eigenvalue of a Hermitian Toeplitz matrix
The classical power method is used together with a fast Toeplitz
equation solution routine. The eigenvector is normalized to unit length.
:param T0: Scalar corresponding to real matrix element t(0)
:param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix
:param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number.
:return:
* EVAL - Real scalar denoting the minimum eigenvalue of matrix
* EVEC - Array of M complex eigenvector elements associated
.. note::
* External array T must be dimensioned >= M
* array EVEC must be >= M+1
* Internal array E must be dimensioned >= M+1 .
* **dependencies**
* :meth:`spectrum.toeplitz.HERMTOEP`
"""
M = len(T)
eigval = 10
eigvalold = 1
eigvec = numpy.zeros(M + 1, dtype=complex)
for k in range(0, M + 1):
eigvec[k] = 1 + 0j # depends on [control=['for'], data=['k']]
it = 0
#print 'initialisation',T0, T, eigval, eigvec
maxit = 15
while abs(eigvalold - eigval) > TOL * eigvalold and it < maxit:
it = it + 1
eigvalold = eigval
#print 'iteration ',it, 'eigvalold=',eigvalold, 'eigval=', eigval
eig = toeplitz.HERMTOEP(T0, T, eigvec)
SUM = 0
save = 0.0 + 0j
for k in range(0, M + 1):
SUM = SUM + eig[k].real ** 2 + eig[k].imag ** 2
save = save + eig[k] * eigvec[k].conjugate() # depends on [control=['for'], data=['k']]
SUM = 1.0 / SUM
eigval = save.real * SUM
for k in range(0, M + 1):
eigvec[k] = SUM * eig[k] # depends on [control=['for'], data=['k']] # depends on [control=['while'], data=[]]
if it == maxit:
print('warning reached max number of iteration (%s)' % maxit) # depends on [control=['if'], data=['maxit']]
return (eigval, eigvec) |
def db_query(self, client_id, msg):
"""Perform a raw query on the task record database."""
content = msg['content']
query = content.get('query', {})
keys = content.get('keys', None)
buffers = []
empty = list()
try:
records = self.db.find_records(query, keys)
except Exception as e:
content = error.wrap_exception()
else:
# extract buffers from reply content:
if keys is not None:
buffer_lens = [] if 'buffers' in keys else None
result_buffer_lens = [] if 'result_buffers' in keys else None
else:
buffer_lens = None
result_buffer_lens = None
for rec in records:
# buffers may be None, so double check
b = rec.pop('buffers', empty) or empty
if buffer_lens is not None:
buffer_lens.append(len(b))
buffers.extend(b)
rb = rec.pop('result_buffers', empty) or empty
if result_buffer_lens is not None:
result_buffer_lens.append(len(rb))
buffers.extend(rb)
content = dict(status='ok', records=records, buffer_lens=buffer_lens,
result_buffer_lens=result_buffer_lens)
# self.log.debug (content)
self.session.send(self.query, "db_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers) | def function[db_query, parameter[self, client_id, msg]]:
constant[Perform a raw query on the task record database.]
variable[content] assign[=] call[name[msg]][constant[content]]
variable[query] assign[=] call[name[content].get, parameter[constant[query], dictionary[[], []]]]
variable[keys] assign[=] call[name[content].get, parameter[constant[keys], constant[None]]]
variable[buffers] assign[=] list[[]]
variable[empty] assign[=] call[name[list], parameter[]]
<ast.Try object at 0x7da204567940>
call[name[self].session.send, parameter[name[self].query, constant[db_reply]]] | keyword[def] identifier[db_query] ( identifier[self] , identifier[client_id] , identifier[msg] ):
literal[string]
identifier[content] = identifier[msg] [ literal[string] ]
identifier[query] = identifier[content] . identifier[get] ( literal[string] ,{})
identifier[keys] = identifier[content] . identifier[get] ( literal[string] , keyword[None] )
identifier[buffers] =[]
identifier[empty] = identifier[list] ()
keyword[try] :
identifier[records] = identifier[self] . identifier[db] . identifier[find_records] ( identifier[query] , identifier[keys] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[content] = identifier[error] . identifier[wrap_exception] ()
keyword[else] :
keyword[if] identifier[keys] keyword[is] keyword[not] keyword[None] :
identifier[buffer_lens] =[] keyword[if] literal[string] keyword[in] identifier[keys] keyword[else] keyword[None]
identifier[result_buffer_lens] =[] keyword[if] literal[string] keyword[in] identifier[keys] keyword[else] keyword[None]
keyword[else] :
identifier[buffer_lens] = keyword[None]
identifier[result_buffer_lens] = keyword[None]
keyword[for] identifier[rec] keyword[in] identifier[records] :
identifier[b] = identifier[rec] . identifier[pop] ( literal[string] , identifier[empty] ) keyword[or] identifier[empty]
keyword[if] identifier[buffer_lens] keyword[is] keyword[not] keyword[None] :
identifier[buffer_lens] . identifier[append] ( identifier[len] ( identifier[b] ))
identifier[buffers] . identifier[extend] ( identifier[b] )
identifier[rb] = identifier[rec] . identifier[pop] ( literal[string] , identifier[empty] ) keyword[or] identifier[empty]
keyword[if] identifier[result_buffer_lens] keyword[is] keyword[not] keyword[None] :
identifier[result_buffer_lens] . identifier[append] ( identifier[len] ( identifier[rb] ))
identifier[buffers] . identifier[extend] ( identifier[rb] )
identifier[content] = identifier[dict] ( identifier[status] = literal[string] , identifier[records] = identifier[records] , identifier[buffer_lens] = identifier[buffer_lens] ,
identifier[result_buffer_lens] = identifier[result_buffer_lens] )
identifier[self] . identifier[session] . identifier[send] ( identifier[self] . identifier[query] , literal[string] , identifier[content] = identifier[content] ,
identifier[parent] = identifier[msg] , identifier[ident] = identifier[client_id] ,
identifier[buffers] = identifier[buffers] ) | def db_query(self, client_id, msg):
"""Perform a raw query on the task record database."""
content = msg['content']
query = content.get('query', {})
keys = content.get('keys', None)
buffers = []
empty = list()
try:
records = self.db.find_records(query, keys) # depends on [control=['try'], data=[]]
except Exception as e:
content = error.wrap_exception() # depends on [control=['except'], data=[]]
else:
# extract buffers from reply content:
if keys is not None:
buffer_lens = [] if 'buffers' in keys else None
result_buffer_lens = [] if 'result_buffers' in keys else None # depends on [control=['if'], data=['keys']]
else:
buffer_lens = None
result_buffer_lens = None
for rec in records:
# buffers may be None, so double check
b = rec.pop('buffers', empty) or empty
if buffer_lens is not None:
buffer_lens.append(len(b))
buffers.extend(b) # depends on [control=['if'], data=['buffer_lens']]
rb = rec.pop('result_buffers', empty) or empty
if result_buffer_lens is not None:
result_buffer_lens.append(len(rb))
buffers.extend(rb) # depends on [control=['if'], data=['result_buffer_lens']] # depends on [control=['for'], data=['rec']]
content = dict(status='ok', records=records, buffer_lens=buffer_lens, result_buffer_lens=result_buffer_lens)
# self.log.debug (content)
self.session.send(self.query, 'db_reply', content=content, parent=msg, ident=client_id, buffers=buffers) |
def get(self):
"""
convert json env variable if set to list
"""
self._cast = type([])
source_value = os.getenv(self.env_name)
# set the environment if it is not set
if source_value is None:
os.environ[self.env_name] = json.dumps(self.default)
return self.default
try:
val = json.loads(source_value)
except JSONDecodeError as e:
click.secho(str(e), err=True, color='red')
sys.exit(1)
except ValueError as e:
click.secho(e.message, err=True, color='red')
sys.exit(1)
if self.validator:
val = self.validator(val)
return val | def function[get, parameter[self]]:
constant[
convert json env variable if set to list
]
name[self]._cast assign[=] call[name[type], parameter[list[[]]]]
variable[source_value] assign[=] call[name[os].getenv, parameter[name[self].env_name]]
if compare[name[source_value] is constant[None]] begin[:]
call[name[os].environ][name[self].env_name] assign[=] call[name[json].dumps, parameter[name[self].default]]
return[name[self].default]
<ast.Try object at 0x7da2041daef0>
if name[self].validator begin[:]
variable[val] assign[=] call[name[self].validator, parameter[name[val]]]
return[name[val]] | keyword[def] identifier[get] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_cast] = identifier[type] ([])
identifier[source_value] = identifier[os] . identifier[getenv] ( identifier[self] . identifier[env_name] )
keyword[if] identifier[source_value] keyword[is] keyword[None] :
identifier[os] . identifier[environ] [ identifier[self] . identifier[env_name] ]= identifier[json] . identifier[dumps] ( identifier[self] . identifier[default] )
keyword[return] identifier[self] . identifier[default]
keyword[try] :
identifier[val] = identifier[json] . identifier[loads] ( identifier[source_value] )
keyword[except] identifier[JSONDecodeError] keyword[as] identifier[e] :
identifier[click] . identifier[secho] ( identifier[str] ( identifier[e] ), identifier[err] = keyword[True] , identifier[color] = literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[click] . identifier[secho] ( identifier[e] . identifier[message] , identifier[err] = keyword[True] , identifier[color] = literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[self] . identifier[validator] :
identifier[val] = identifier[self] . identifier[validator] ( identifier[val] )
keyword[return] identifier[val] | def get(self):
"""
convert json env variable if set to list
"""
self._cast = type([])
source_value = os.getenv(self.env_name)
# set the environment if it is not set
if source_value is None:
os.environ[self.env_name] = json.dumps(self.default)
return self.default # depends on [control=['if'], data=[]]
try:
val = json.loads(source_value) # depends on [control=['try'], data=[]]
except JSONDecodeError as e:
click.secho(str(e), err=True, color='red')
sys.exit(1) # depends on [control=['except'], data=['e']]
except ValueError as e:
click.secho(e.message, err=True, color='red')
sys.exit(1) # depends on [control=['except'], data=['e']]
if self.validator:
val = self.validator(val) # depends on [control=['if'], data=[]]
return val |
def select(self, timeout):
'''wait for up to timeout seconds for more data'''
if self.fd is None:
time.sleep(min(timeout,0.5))
return True
try:
(rin, win, xin) = select.select([self.fd], [], [], timeout)
except select.error:
return False
return len(rin) == 1 | def function[select, parameter[self, timeout]]:
constant[wait for up to timeout seconds for more data]
if compare[name[self].fd is constant[None]] begin[:]
call[name[time].sleep, parameter[call[name[min], parameter[name[timeout], constant[0.5]]]]]
return[constant[True]]
<ast.Try object at 0x7da1b2345a80>
return[compare[call[name[len], parameter[name[rin]]] equal[==] constant[1]]] | keyword[def] identifier[select] ( identifier[self] , identifier[timeout] ):
literal[string]
keyword[if] identifier[self] . identifier[fd] keyword[is] keyword[None] :
identifier[time] . identifier[sleep] ( identifier[min] ( identifier[timeout] , literal[int] ))
keyword[return] keyword[True]
keyword[try] :
( identifier[rin] , identifier[win] , identifier[xin] )= identifier[select] . identifier[select] ([ identifier[self] . identifier[fd] ],[],[], identifier[timeout] )
keyword[except] identifier[select] . identifier[error] :
keyword[return] keyword[False]
keyword[return] identifier[len] ( identifier[rin] )== literal[int] | def select(self, timeout):
"""wait for up to timeout seconds for more data"""
if self.fd is None:
time.sleep(min(timeout, 0.5))
return True # depends on [control=['if'], data=[]]
try:
(rin, win, xin) = select.select([self.fd], [], [], timeout) # depends on [control=['try'], data=[]]
except select.error:
return False # depends on [control=['except'], data=[]]
return len(rin) == 1 |
def make_choice2(cls, choices, transform=None, strict=None):
"""
Creates a type converter to select one item from a list of strings.
The type converter function returns a tuple (index, choice_text).
:param choices: List of strings as choice.
:param transform: Optional, initial transform function for parsed text.
:return: Type converter function object for this choices.
"""
choices = cls._normalize_choices(choices, transform)
if strict is None:
strict = cls.default_strict
def convert_choice2(text):
if transform:
text = transform(text)
if strict and not (text in convert_choice2.choices):
values = ", ".join(convert_choice2.choices)
raise ValueError("%s not in: %s" % (text, values))
index = convert_choice2.choices.index(text)
return index, text
convert_choice2.pattern = r"|".join(choices)
convert_choice2.choices = choices
return convert_choice2 | def function[make_choice2, parameter[cls, choices, transform, strict]]:
constant[
Creates a type converter to select one item from a list of strings.
The type converter function returns a tuple (index, choice_text).
:param choices: List of strings as choice.
:param transform: Optional, initial transform function for parsed text.
:return: Type converter function object for this choices.
]
variable[choices] assign[=] call[name[cls]._normalize_choices, parameter[name[choices], name[transform]]]
if compare[name[strict] is constant[None]] begin[:]
variable[strict] assign[=] name[cls].default_strict
def function[convert_choice2, parameter[text]]:
if name[transform] begin[:]
variable[text] assign[=] call[name[transform], parameter[name[text]]]
if <ast.BoolOp object at 0x7da1b2456920> begin[:]
variable[values] assign[=] call[constant[, ].join, parameter[name[convert_choice2].choices]]
<ast.Raise object at 0x7da1b24573d0>
variable[index] assign[=] call[name[convert_choice2].choices.index, parameter[name[text]]]
return[tuple[[<ast.Name object at 0x7da1b2457ac0>, <ast.Name object at 0x7da1b24575b0>]]]
name[convert_choice2].pattern assign[=] call[constant[|].join, parameter[name[choices]]]
name[convert_choice2].choices assign[=] name[choices]
return[name[convert_choice2]] | keyword[def] identifier[make_choice2] ( identifier[cls] , identifier[choices] , identifier[transform] = keyword[None] , identifier[strict] = keyword[None] ):
literal[string]
identifier[choices] = identifier[cls] . identifier[_normalize_choices] ( identifier[choices] , identifier[transform] )
keyword[if] identifier[strict] keyword[is] keyword[None] :
identifier[strict] = identifier[cls] . identifier[default_strict]
keyword[def] identifier[convert_choice2] ( identifier[text] ):
keyword[if] identifier[transform] :
identifier[text] = identifier[transform] ( identifier[text] )
keyword[if] identifier[strict] keyword[and] keyword[not] ( identifier[text] keyword[in] identifier[convert_choice2] . identifier[choices] ):
identifier[values] = literal[string] . identifier[join] ( identifier[convert_choice2] . identifier[choices] )
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[text] , identifier[values] ))
identifier[index] = identifier[convert_choice2] . identifier[choices] . identifier[index] ( identifier[text] )
keyword[return] identifier[index] , identifier[text]
identifier[convert_choice2] . identifier[pattern] = literal[string] . identifier[join] ( identifier[choices] )
identifier[convert_choice2] . identifier[choices] = identifier[choices]
keyword[return] identifier[convert_choice2] | def make_choice2(cls, choices, transform=None, strict=None):
"""
Creates a type converter to select one item from a list of strings.
The type converter function returns a tuple (index, choice_text).
:param choices: List of strings as choice.
:param transform: Optional, initial transform function for parsed text.
:return: Type converter function object for this choices.
"""
choices = cls._normalize_choices(choices, transform)
if strict is None:
strict = cls.default_strict # depends on [control=['if'], data=['strict']]
def convert_choice2(text):
if transform:
text = transform(text) # depends on [control=['if'], data=[]]
if strict and (not text in convert_choice2.choices):
values = ', '.join(convert_choice2.choices)
raise ValueError('%s not in: %s' % (text, values)) # depends on [control=['if'], data=[]]
index = convert_choice2.choices.index(text)
return (index, text)
convert_choice2.pattern = '|'.join(choices)
convert_choice2.choices = choices
return convert_choice2 |
def ensure_unicode(text):
u"""helper to ensure that text passed to WriteConsoleW is unicode"""
if isinstance(text, str):
try:
return text.decode(pyreadline_codepage, u"replace")
except (LookupError, TypeError):
return text.decode(u"ascii", u"replace")
return text | def function[ensure_unicode, parameter[text]]:
constant[helper to ensure that text passed to WriteConsoleW is unicode]
if call[name[isinstance], parameter[name[text], name[str]]] begin[:]
<ast.Try object at 0x7da18f723460>
return[name[text]] | keyword[def] identifier[ensure_unicode] ( identifier[text] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[text] , identifier[str] ):
keyword[try] :
keyword[return] identifier[text] . identifier[decode] ( identifier[pyreadline_codepage] , literal[string] )
keyword[except] ( identifier[LookupError] , identifier[TypeError] ):
keyword[return] identifier[text] . identifier[decode] ( literal[string] , literal[string] )
keyword[return] identifier[text] | def ensure_unicode(text):
u"""helper to ensure that text passed to WriteConsoleW is unicode"""
if isinstance(text, str):
try:
return text.decode(pyreadline_codepage, u'replace') # depends on [control=['try'], data=[]]
except (LookupError, TypeError):
return text.decode(u'ascii', u'replace') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return text |
def write_shiftfile(image_list, filename, outwcs='tweak_wcs.fits'):
""" Write out a shiftfile for a given list of input Image class objects
"""
rows = ''
nrows = 0
for img in image_list:
row = img.get_shiftfile_row()
if row is not None:
rows += row
nrows += 1
if nrows == 0: # If there are no fits to report, do not write out a file
return
# write out reference WCS now
if os.path.exists(outwcs):
os.remove(outwcs)
p = fits.HDUList()
p.append(fits.PrimaryHDU())
p.append(createWcsHDU(image_list[0].refWCS))
p.writeto(outwcs)
# Write out shiftfile to go with reference WCS
with open(filename, 'w') as f:
f.write('# frame: output\n')
f.write('# refimage: %s[wcs]\n' % outwcs)
f.write('# form: delta\n')
f.write('# units: pixels\n')
f.write(rows)
print('Writing out shiftfile :', filename) | def function[write_shiftfile, parameter[image_list, filename, outwcs]]:
constant[ Write out a shiftfile for a given list of input Image class objects
]
variable[rows] assign[=] constant[]
variable[nrows] assign[=] constant[0]
for taget[name[img]] in starred[name[image_list]] begin[:]
variable[row] assign[=] call[name[img].get_shiftfile_row, parameter[]]
if compare[name[row] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1a44640>
<ast.AugAssign object at 0x7da1b1a46c50>
if compare[name[nrows] equal[==] constant[0]] begin[:]
return[None]
if call[name[os].path.exists, parameter[name[outwcs]]] begin[:]
call[name[os].remove, parameter[name[outwcs]]]
variable[p] assign[=] call[name[fits].HDUList, parameter[]]
call[name[p].append, parameter[call[name[fits].PrimaryHDU, parameter[]]]]
call[name[p].append, parameter[call[name[createWcsHDU], parameter[call[name[image_list]][constant[0]].refWCS]]]]
call[name[p].writeto, parameter[name[outwcs]]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[f].write, parameter[constant[# frame: output
]]]
call[name[f].write, parameter[binary_operation[constant[# refimage: %s[wcs]
] <ast.Mod object at 0x7da2590d6920> name[outwcs]]]]
call[name[f].write, parameter[constant[# form: delta
]]]
call[name[f].write, parameter[constant[# units: pixels
]]]
call[name[f].write, parameter[name[rows]]]
call[name[print], parameter[constant[Writing out shiftfile :], name[filename]]] | keyword[def] identifier[write_shiftfile] ( identifier[image_list] , identifier[filename] , identifier[outwcs] = literal[string] ):
literal[string]
identifier[rows] = literal[string]
identifier[nrows] = literal[int]
keyword[for] identifier[img] keyword[in] identifier[image_list] :
identifier[row] = identifier[img] . identifier[get_shiftfile_row] ()
keyword[if] identifier[row] keyword[is] keyword[not] keyword[None] :
identifier[rows] += identifier[row]
identifier[nrows] += literal[int]
keyword[if] identifier[nrows] == literal[int] :
keyword[return]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[outwcs] ):
identifier[os] . identifier[remove] ( identifier[outwcs] )
identifier[p] = identifier[fits] . identifier[HDUList] ()
identifier[p] . identifier[append] ( identifier[fits] . identifier[PrimaryHDU] ())
identifier[p] . identifier[append] ( identifier[createWcsHDU] ( identifier[image_list] [ literal[int] ]. identifier[refWCS] ))
identifier[p] . identifier[writeto] ( identifier[outwcs] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] % identifier[outwcs] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( literal[string] )
identifier[f] . identifier[write] ( identifier[rows] )
identifier[print] ( literal[string] , identifier[filename] ) | def write_shiftfile(image_list, filename, outwcs='tweak_wcs.fits'):
""" Write out a shiftfile for a given list of input Image class objects
"""
rows = ''
nrows = 0
for img in image_list:
row = img.get_shiftfile_row()
if row is not None:
rows += row
nrows += 1 # depends on [control=['if'], data=['row']] # depends on [control=['for'], data=['img']]
if nrows == 0: # If there are no fits to report, do not write out a file
return # depends on [control=['if'], data=[]]
# write out reference WCS now
if os.path.exists(outwcs):
os.remove(outwcs) # depends on [control=['if'], data=[]]
p = fits.HDUList()
p.append(fits.PrimaryHDU())
p.append(createWcsHDU(image_list[0].refWCS))
p.writeto(outwcs)
# Write out shiftfile to go with reference WCS
with open(filename, 'w') as f:
f.write('# frame: output\n')
f.write('# refimage: %s[wcs]\n' % outwcs)
f.write('# form: delta\n')
f.write('# units: pixels\n')
f.write(rows) # depends on [control=['with'], data=['f']]
print('Writing out shiftfile :', filename) |
def stop_regularly_cleanup(self, only_read=False):
"""
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.stop_regularly_cleanup()
True
>>> cache.stop_regularly_cleanup()
False
"""
if hasattr(self, 'cleanup_supervisor') and self.cleanup_supervisor is not None:
self.cleanup_supervisor.stop()
self.logger.debug('Regularly cleanup thread %s is closed' % self.cleanup_supervisor.name)
self.cleanup_supervisor = None
return True
else:
self.logger.warning('Current not have a regularly cleanup thread is existent')
return False | def function[stop_regularly_cleanup, parameter[self, only_read]]:
constant[
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.stop_regularly_cleanup()
True
>>> cache.stop_regularly_cleanup()
False
]
if <ast.BoolOp object at 0x7da1b258ac20> begin[:]
call[name[self].cleanup_supervisor.stop, parameter[]]
call[name[self].logger.debug, parameter[binary_operation[constant[Regularly cleanup thread %s is closed] <ast.Mod object at 0x7da2590d6920> name[self].cleanup_supervisor.name]]]
name[self].cleanup_supervisor assign[=] constant[None]
return[constant[True]] | keyword[def] identifier[stop_regularly_cleanup] ( identifier[self] , identifier[only_read] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[cleanup_supervisor] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[cleanup_supervisor] . identifier[stop] ()
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[cleanup_supervisor] . identifier[name] )
identifier[self] . identifier[cleanup_supervisor] = keyword[None]
keyword[return] keyword[True]
keyword[else] :
identifier[self] . identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] keyword[False] | def stop_regularly_cleanup(self, only_read=False):
"""
>>> cache = Cache(log_level=logging.WARNING)
>>> cache.stop_regularly_cleanup()
True
>>> cache.stop_regularly_cleanup()
False
"""
if hasattr(self, 'cleanup_supervisor') and self.cleanup_supervisor is not None:
self.cleanup_supervisor.stop()
self.logger.debug('Regularly cleanup thread %s is closed' % self.cleanup_supervisor.name)
self.cleanup_supervisor = None
return True # depends on [control=['if'], data=[]]
else:
self.logger.warning('Current not have a regularly cleanup thread is existent')
return False |
def retrieve_sources():
"""Retrieve sources using spectool
"""
spectool = find_executable('spectool')
if not spectool:
log.warn('spectool is not installed')
return
try:
specfile = spec_fn()
except Exception:
return
cmd = [spectool, "-g", specfile]
output = subprocess.check_output(' '.join(cmd), shell=True)
log.warn(output) | def function[retrieve_sources, parameter[]]:
constant[Retrieve sources using spectool
]
variable[spectool] assign[=] call[name[find_executable], parameter[constant[spectool]]]
if <ast.UnaryOp object at 0x7da1b1b683d0> begin[:]
call[name[log].warn, parameter[constant[spectool is not installed]]]
return[None]
<ast.Try object at 0x7da1b1b6b5b0>
variable[cmd] assign[=] list[[<ast.Name object at 0x7da1b1b6a170>, <ast.Constant object at 0x7da1b1b698a0>, <ast.Name object at 0x7da1b1b6b3d0>]]
variable[output] assign[=] call[name[subprocess].check_output, parameter[call[constant[ ].join, parameter[name[cmd]]]]]
call[name[log].warn, parameter[name[output]]] | keyword[def] identifier[retrieve_sources] ():
literal[string]
identifier[spectool] = identifier[find_executable] ( literal[string] )
keyword[if] keyword[not] identifier[spectool] :
identifier[log] . identifier[warn] ( literal[string] )
keyword[return]
keyword[try] :
identifier[specfile] = identifier[spec_fn] ()
keyword[except] identifier[Exception] :
keyword[return]
identifier[cmd] =[ identifier[spectool] , literal[string] , identifier[specfile] ]
identifier[output] = identifier[subprocess] . identifier[check_output] ( literal[string] . identifier[join] ( identifier[cmd] ), identifier[shell] = keyword[True] )
identifier[log] . identifier[warn] ( identifier[output] ) | def retrieve_sources():
"""Retrieve sources using spectool
"""
spectool = find_executable('spectool')
if not spectool:
log.warn('spectool is not installed')
return # depends on [control=['if'], data=[]]
try:
specfile = spec_fn() # depends on [control=['try'], data=[]]
except Exception:
return # depends on [control=['except'], data=[]]
cmd = [spectool, '-g', specfile]
output = subprocess.check_output(' '.join(cmd), shell=True)
log.warn(output) |
def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for grad, var in grad_vars:
if grad is None:
continue
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(grad)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, grads in grouped.items():
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(grads, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, grads))
return tf.summary.merge(summaries) | def function[gradient_summaries, parameter[grad_vars, groups, scope]]:
constant[Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
]
variable[groups] assign[=] <ast.BoolOp object at 0x7da1b26acf10>
variable[grouped] assign[=] call[name[collections].defaultdict, parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da1b26af460>, <ast.Name object at 0x7da1b26aeef0>]]] in starred[name[grad_vars]] begin[:]
if compare[name[grad] is constant[None]] begin[:]
continue
for taget[tuple[[<ast.Name object at 0x7da1b26af610>, <ast.Name object at 0x7da1b26ade40>]]] in starred[call[name[groups].items, parameter[]]] begin[:]
if call[name[re].match, parameter[name[pattern], name[var].name]] begin[:]
variable[name] assign[=] call[name[re].sub, parameter[name[pattern], name[name], name[var].name]]
call[call[name[grouped]][name[name]].append, parameter[name[grad]]]
for taget[name[name]] in starred[name[groups]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[grouped]] begin[:]
call[name[tf].logging.warn, parameter[call[constant[No variables matching '{}' group.].format, parameter[name[name]]]]]
variable[summaries] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c796b30>, <ast.Name object at 0x7da20c795c60>]]] in starred[call[name[grouped].items, parameter[]]] begin[:]
variable[grads] assign[=] <ast.ListComp object at 0x7da20c795330>
variable[grads] assign[=] call[name[tf].concat, parameter[name[grads], constant[0]]]
call[name[summaries].append, parameter[call[name[tf].summary.histogram, parameter[binary_operation[binary_operation[name[scope] + constant[/]] + name[name]], name[grads]]]]]
return[call[name[tf].summary.merge, parameter[name[summaries]]]] | keyword[def] identifier[gradient_summaries] ( identifier[grad_vars] , identifier[groups] = keyword[None] , identifier[scope] = literal[string] ):
literal[string]
identifier[groups] = identifier[groups] keyword[or] { literal[string] : literal[string] }
identifier[grouped] = identifier[collections] . identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[grad] , identifier[var] keyword[in] identifier[grad_vars] :
keyword[if] identifier[grad] keyword[is] keyword[None] :
keyword[continue]
keyword[for] identifier[name] , identifier[pattern] keyword[in] identifier[groups] . identifier[items] ():
keyword[if] identifier[re] . identifier[match] ( identifier[pattern] , identifier[var] . identifier[name] ):
identifier[name] = identifier[re] . identifier[sub] ( identifier[pattern] , identifier[name] , identifier[var] . identifier[name] )
identifier[grouped] [ identifier[name] ]. identifier[append] ( identifier[grad] )
keyword[for] identifier[name] keyword[in] identifier[groups] :
keyword[if] identifier[name] keyword[not] keyword[in] identifier[grouped] :
identifier[tf] . identifier[logging] . identifier[warn] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[summaries] =[]
keyword[for] identifier[name] , identifier[grads] keyword[in] identifier[grouped] . identifier[items] ():
identifier[grads] =[ identifier[tf] . identifier[reshape] ( identifier[grad] ,[- literal[int] ]) keyword[for] identifier[grad] keyword[in] identifier[grads] ]
identifier[grads] = identifier[tf] . identifier[concat] ( identifier[grads] , literal[int] )
identifier[summaries] . identifier[append] ( identifier[tf] . identifier[summary] . identifier[histogram] ( identifier[scope] + literal[string] + identifier[name] , identifier[grads] ))
keyword[return] identifier[tf] . identifier[summary] . identifier[merge] ( identifier[summaries] ) | def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {'all': '.*'}
grouped = collections.defaultdict(list)
for (grad, var) in grad_vars:
if grad is None:
continue # depends on [control=['if'], data=[]]
for (name, pattern) in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(grad) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name)) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['name']]
summaries = []
for (name, grads) in grouped.items():
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(grads, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, grads)) # depends on [control=['for'], data=[]]
return tf.summary.merge(summaries) |
def alter(self, id_option_vip, tipo_opcao, nome_opcao_txt):
"""Change Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(
u'The identifier of Option VIP is invalid or was not informed.')
optionvip_map = dict()
optionvip_map['tipo_opcao'] = tipo_opcao
optionvip_map['nome_opcao_txt'] = nome_opcao_txt
url = 'optionvip/' + str(id_option_vip) + '/'
code, xml = self.submit({'option_vip': optionvip_map}, 'PUT', url)
return self.response(code, xml) | def function[alter, parameter[self, id_option_vip, tipo_opcao, nome_opcao_txt]]:
constant[Change Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
]
if <ast.UnaryOp object at 0x7da20c7ca9b0> begin[:]
<ast.Raise object at 0x7da20c7c98a0>
variable[optionvip_map] assign[=] call[name[dict], parameter[]]
call[name[optionvip_map]][constant[tipo_opcao]] assign[=] name[tipo_opcao]
call[name[optionvip_map]][constant[nome_opcao_txt]] assign[=] name[nome_opcao_txt]
variable[url] assign[=] binary_operation[binary_operation[constant[optionvip/] + call[name[str], parameter[name[id_option_vip]]]] + constant[/]]
<ast.Tuple object at 0x7da1b2345390> assign[=] call[name[self].submit, parameter[dictionary[[<ast.Constant object at 0x7da1b2344eb0>], [<ast.Name object at 0x7da1b23458d0>]], constant[PUT], name[url]]]
return[call[name[self].response, parameter[name[code], name[xml]]]] | keyword[def] identifier[alter] ( identifier[self] , identifier[id_option_vip] , identifier[tipo_opcao] , identifier[nome_opcao_txt] ):
literal[string]
keyword[if] keyword[not] identifier[is_valid_int_param] ( identifier[id_option_vip] ):
keyword[raise] identifier[InvalidParameterError] (
literal[string] )
identifier[optionvip_map] = identifier[dict] ()
identifier[optionvip_map] [ literal[string] ]= identifier[tipo_opcao]
identifier[optionvip_map] [ literal[string] ]= identifier[nome_opcao_txt]
identifier[url] = literal[string] + identifier[str] ( identifier[id_option_vip] )+ literal[string]
identifier[code] , identifier[xml] = identifier[self] . identifier[submit] ({ literal[string] : identifier[optionvip_map] }, literal[string] , identifier[url] )
keyword[return] identifier[self] . identifier[response] ( identifier[code] , identifier[xml] ) | def alter(self, id_option_vip, tipo_opcao, nome_opcao_txt):
"""Change Option VIP from by the identifier.
:param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\\_-]
:return: None
:raise InvalidParameterError: Option VIP identifier is null and invalid.
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise OptionVipNotFoundError: Option VIP not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_option_vip):
raise InvalidParameterError(u'The identifier of Option VIP is invalid or was not informed.') # depends on [control=['if'], data=[]]
optionvip_map = dict()
optionvip_map['tipo_opcao'] = tipo_opcao
optionvip_map['nome_opcao_txt'] = nome_opcao_txt
url = 'optionvip/' + str(id_option_vip) + '/'
(code, xml) = self.submit({'option_vip': optionvip_map}, 'PUT', url)
return self.response(code, xml) |
def retrieve():
"""RETRIEVE Section 9.3.20"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x1c) # 00011100
packet = a / b
return packet | def function[retrieve, parameter[]]:
constant[RETRIEVE Section 9.3.20]
variable[a] assign[=] call[name[TpPd], parameter[]]
variable[b] assign[=] call[name[MessageType], parameter[]]
variable[packet] assign[=] binary_operation[name[a] / name[b]]
return[name[packet]] | keyword[def] identifier[retrieve] ():
literal[string]
identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] )
identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] )
identifier[packet] = identifier[a] / identifier[b]
keyword[return] identifier[packet] | def retrieve():
"""RETRIEVE Section 9.3.20"""
a = TpPd(pd=3)
b = MessageType(mesType=28) # 00011100
packet = a / b
return packet |
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key]) | def function[select_scheme, parameter[self, name]]:
constant[Sets the install directories by applying the install schemes.]
variable[scheme] assign[=] call[name[INSTALL_SCHEMES]][name[name]]
for taget[name[key]] in starred[name[SCHEME_KEYS]] begin[:]
variable[attrname] assign[=] binary_operation[constant[install_] + name[key]]
if compare[call[name[getattr], parameter[name[self], name[attrname]]] is constant[None]] begin[:]
call[name[setattr], parameter[name[self], name[attrname], call[name[scheme]][name[key]]]] | keyword[def] identifier[select_scheme] ( identifier[self] , identifier[name] ):
literal[string]
identifier[scheme] = identifier[INSTALL_SCHEMES] [ identifier[name] ]
keyword[for] identifier[key] keyword[in] identifier[SCHEME_KEYS] :
identifier[attrname] = literal[string] + identifier[key]
keyword[if] identifier[getattr] ( identifier[self] , identifier[attrname] ) keyword[is] keyword[None] :
identifier[setattr] ( identifier[self] , identifier[attrname] , identifier[scheme] [ identifier[key] ]) | def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] |
def remove(self, key):
"""
Clear a column value in the object. Note that this happens immediately:
it does not wait for save() to be called.
"""
payload = {
key: {
'__op': 'Delete'
}
}
self.__class__.PUT(self._absolute_url, **payload)
del self.__dict__[key] | def function[remove, parameter[self, key]]:
constant[
Clear a column value in the object. Note that this happens immediately:
it does not wait for save() to be called.
]
variable[payload] assign[=] dictionary[[<ast.Name object at 0x7da20c993e20>], [<ast.Dict object at 0x7da20c991960>]]
call[name[self].__class__.PUT, parameter[name[self]._absolute_url]]
<ast.Delete object at 0x7da1b26af130> | keyword[def] identifier[remove] ( identifier[self] , identifier[key] ):
literal[string]
identifier[payload] ={
identifier[key] :{
literal[string] : literal[string]
}
}
identifier[self] . identifier[__class__] . identifier[PUT] ( identifier[self] . identifier[_absolute_url] ,** identifier[payload] )
keyword[del] identifier[self] . identifier[__dict__] [ identifier[key] ] | def remove(self, key):
"""
Clear a column value in the object. Note that this happens immediately:
it does not wait for save() to be called.
"""
payload = {key: {'__op': 'Delete'}}
self.__class__.PUT(self._absolute_url, **payload)
del self.__dict__[key] |
def transform_describe_body(self, body, group_var):
"""
Transform the body of an ``ExampleGroup``.
``body`` is the body.
``group_var`` is the name bound to the example group in the context
manager (usually "it").
"""
for node in body:
withitem, = node.items
context_expr = withitem.context_expr
name = context_expr.args[0].s
context_var = withitem.optional_vars.id
yield self.transform_example(node, name, context_var, group_var) | def function[transform_describe_body, parameter[self, body, group_var]]:
constant[
Transform the body of an ``ExampleGroup``.
``body`` is the body.
``group_var`` is the name bound to the example group in the context
manager (usually "it").
]
for taget[name[node]] in starred[name[body]] begin[:]
<ast.Tuple object at 0x7da20c794070> assign[=] name[node].items
variable[context_expr] assign[=] name[withitem].context_expr
variable[name] assign[=] call[name[context_expr].args][constant[0]].s
variable[context_var] assign[=] name[withitem].optional_vars.id
<ast.Yield object at 0x7da20c794f10> | keyword[def] identifier[transform_describe_body] ( identifier[self] , identifier[body] , identifier[group_var] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[body] :
identifier[withitem] ,= identifier[node] . identifier[items]
identifier[context_expr] = identifier[withitem] . identifier[context_expr]
identifier[name] = identifier[context_expr] . identifier[args] [ literal[int] ]. identifier[s]
identifier[context_var] = identifier[withitem] . identifier[optional_vars] . identifier[id]
keyword[yield] identifier[self] . identifier[transform_example] ( identifier[node] , identifier[name] , identifier[context_var] , identifier[group_var] ) | def transform_describe_body(self, body, group_var):
"""
Transform the body of an ``ExampleGroup``.
``body`` is the body.
``group_var`` is the name bound to the example group in the context
manager (usually "it").
"""
for node in body:
(withitem,) = node.items
context_expr = withitem.context_expr
name = context_expr.args[0].s
context_var = withitem.optional_vars.id
yield self.transform_example(node, name, context_var, group_var) # depends on [control=['for'], data=['node']] |
def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = []
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return "OK"
else:
return self._modifyItemTag(item.id, 'a', tag) | def function[addItemTag, parameter[self, item, tag]]:
constant[
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
]
if name[self].inItemTagTransaction begin[:]
if <ast.UnaryOp object at 0x7da18c4cebf0> begin[:]
call[name[self].addTagBacklog][name[tag]] assign[=] list[[]]
call[call[name[self].addTagBacklog][name[tag]].append, parameter[dictionary[[<ast.Constant object at 0x7da18c4ccc10>, <ast.Constant object at 0x7da18c4ce710>], [<ast.Attribute object at 0x7da18c4cf670>, <ast.Attribute object at 0x7da18c4cfe50>]]]]
return[constant[OK]] | keyword[def] identifier[addItemTag] ( identifier[self] , identifier[item] , identifier[tag] ):
literal[string]
keyword[if] identifier[self] . identifier[inItemTagTransaction] :
keyword[if] keyword[not] identifier[tag] keyword[in] identifier[self] . identifier[addTagBacklog] :
identifier[self] . identifier[addTagBacklog] [ identifier[tag] ]=[]
identifier[self] . identifier[addTagBacklog] [ identifier[tag] ]. identifier[append] ({ literal[string] : identifier[item] . identifier[id] , literal[string] : identifier[item] . identifier[parent] . identifier[id] })
keyword[return] literal[string]
keyword[else] :
keyword[return] identifier[self] . identifier[_modifyItemTag] ( identifier[item] . identifier[id] , literal[string] , identifier[tag] ) | def addItemTag(self, item, tag):
"""
Add a tag to an individal item.
tag string must be in form "user/-/label/[tag]"
"""
if self.inItemTagTransaction:
# XXX: what if item's parent is not a feed?
if not tag in self.addTagBacklog:
self.addTagBacklog[tag] = [] # depends on [control=['if'], data=[]]
self.addTagBacklog[tag].append({'i': item.id, 's': item.parent.id})
return 'OK' # depends on [control=['if'], data=[]]
else:
return self._modifyItemTag(item.id, 'a', tag) |
def to_array_list(df, length=None, by_id=True):
"""Converts a dataframe to a list of arrays, with one array for every unique index entry.
Index is assumed to be 0-based contiguous. If there is a missing index entry, an empty
numpy array is returned for it.
Elements in the arrays are sorted by their id.
:param df:
:param length:
:return:
"""
if by_id:
assert 'id' in df.columns
# if `id` is the only column, don't sort it (and don't remove it)
if len(df.columns) == 1:
by_id = False
idx = df.index.unique()
if length is None:
length = max(idx) + 1
l = [np.empty(0) for _ in xrange(length)]
for i in idx:
a = df.loc[i]
if by_id:
if isinstance(a, pd.Series):
a = a[1:]
else:
a = a.copy().set_index('id').sort_index()
l[i] = a.values.reshape((-1, a.shape[-1]))
return np.asarray(l) | def function[to_array_list, parameter[df, length, by_id]]:
constant[Converts a dataframe to a list of arrays, with one array for every unique index entry.
Index is assumed to be 0-based contiguous. If there is a missing index entry, an empty
numpy array is returned for it.
Elements in the arrays are sorted by their id.
:param df:
:param length:
:return:
]
if name[by_id] begin[:]
assert[compare[constant[id] in name[df].columns]]
if compare[call[name[len], parameter[name[df].columns]] equal[==] constant[1]] begin[:]
variable[by_id] assign[=] constant[False]
variable[idx] assign[=] call[name[df].index.unique, parameter[]]
if compare[name[length] is constant[None]] begin[:]
variable[length] assign[=] binary_operation[call[name[max], parameter[name[idx]]] + constant[1]]
variable[l] assign[=] <ast.ListComp object at 0x7da18f813580>
for taget[name[i]] in starred[name[idx]] begin[:]
variable[a] assign[=] call[name[df].loc][name[i]]
if name[by_id] begin[:]
if call[name[isinstance], parameter[name[a], name[pd].Series]] begin[:]
variable[a] assign[=] call[name[a]][<ast.Slice object at 0x7da18f811ff0>]
call[name[l]][name[i]] assign[=] call[name[a].values.reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da18f813e20>, <ast.Subscript object at 0x7da18f812650>]]]]
return[call[name[np].asarray, parameter[name[l]]]] | keyword[def] identifier[to_array_list] ( identifier[df] , identifier[length] = keyword[None] , identifier[by_id] = keyword[True] ):
literal[string]
keyword[if] identifier[by_id] :
keyword[assert] literal[string] keyword[in] identifier[df] . identifier[columns]
keyword[if] identifier[len] ( identifier[df] . identifier[columns] )== literal[int] :
identifier[by_id] = keyword[False]
identifier[idx] = identifier[df] . identifier[index] . identifier[unique] ()
keyword[if] identifier[length] keyword[is] keyword[None] :
identifier[length] = identifier[max] ( identifier[idx] )+ literal[int]
identifier[l] =[ identifier[np] . identifier[empty] ( literal[int] ) keyword[for] identifier[_] keyword[in] identifier[xrange] ( identifier[length] )]
keyword[for] identifier[i] keyword[in] identifier[idx] :
identifier[a] = identifier[df] . identifier[loc] [ identifier[i] ]
keyword[if] identifier[by_id] :
keyword[if] identifier[isinstance] ( identifier[a] , identifier[pd] . identifier[Series] ):
identifier[a] = identifier[a] [ literal[int] :]
keyword[else] :
identifier[a] = identifier[a] . identifier[copy] (). identifier[set_index] ( literal[string] ). identifier[sort_index] ()
identifier[l] [ identifier[i] ]= identifier[a] . identifier[values] . identifier[reshape] ((- literal[int] , identifier[a] . identifier[shape] [- literal[int] ]))
keyword[return] identifier[np] . identifier[asarray] ( identifier[l] ) | def to_array_list(df, length=None, by_id=True):
"""Converts a dataframe to a list of arrays, with one array for every unique index entry.
Index is assumed to be 0-based contiguous. If there is a missing index entry, an empty
numpy array is returned for it.
Elements in the arrays are sorted by their id.
:param df:
:param length:
:return:
"""
if by_id:
assert 'id' in df.columns
# if `id` is the only column, don't sort it (and don't remove it)
if len(df.columns) == 1:
by_id = False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
idx = df.index.unique()
if length is None:
length = max(idx) + 1 # depends on [control=['if'], data=['length']]
l = [np.empty(0) for _ in xrange(length)]
for i in idx:
a = df.loc[i]
if by_id:
if isinstance(a, pd.Series):
a = a[1:] # depends on [control=['if'], data=[]]
else:
a = a.copy().set_index('id').sort_index() # depends on [control=['if'], data=[]]
l[i] = a.values.reshape((-1, a.shape[-1])) # depends on [control=['for'], data=['i']]
return np.asarray(l) |
def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from) | def function[resume, parameter[config_path, restore_from, cl_arguments, output_root]]:
constant[
Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
]
variable[config] assign[=] constant[None]
<ast.Try object at 0x7da20c6aa830>
call[name[run], parameter[]] | keyword[def] identifier[resume] ( identifier[config_path] : identifier[str] , identifier[restore_from] : identifier[Optional] [ identifier[str] ], identifier[cl_arguments] : identifier[Iterable] [ identifier[str] ], identifier[output_root] : identifier[str] )-> keyword[None] :
literal[string]
identifier[config] = keyword[None]
keyword[try] :
identifier[config_path] = identifier[find_config] ( identifier[config_path] )
identifier[restore_from] = identifier[restore_from] keyword[or] identifier[path] . identifier[dirname] ( identifier[config_path] )
identifier[config] = identifier[load_config] ( identifier[config_file] = identifier[config_path] , identifier[additional_args] = identifier[cl_arguments] )
identifier[validate_config] ( identifier[config] )
identifier[logging] . identifier[debug] ( literal[string] , identifier[config] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[fallback] ( literal[string] , identifier[ex] )
identifier[run] ( identifier[config] = identifier[config] , identifier[output_root] = identifier[output_root] , identifier[restore_from] = identifier[restore_from] ) | def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config) # depends on [control=['try'], data=[]]
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex) # depends on [control=['except'], data=['ex']]
run(config=config, output_root=output_root, restore_from=restore_from) |
def parse_single_report(f):
""" Parse a samtools idxstats idxstats """
parsed_data = OrderedDict()
for l in f.splitlines():
s = l.split("\t")
try:
parsed_data[s[0]] = int(s[2])
except (IndexError, ValueError):
pass
return parsed_data | def function[parse_single_report, parameter[f]]:
constant[ Parse a samtools idxstats idxstats ]
variable[parsed_data] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[l]] in starred[call[name[f].splitlines, parameter[]]] begin[:]
variable[s] assign[=] call[name[l].split, parameter[constant[ ]]]
<ast.Try object at 0x7da18f00eec0>
return[name[parsed_data]] | keyword[def] identifier[parse_single_report] ( identifier[f] ):
literal[string]
identifier[parsed_data] = identifier[OrderedDict] ()
keyword[for] identifier[l] keyword[in] identifier[f] . identifier[splitlines] ():
identifier[s] = identifier[l] . identifier[split] ( literal[string] )
keyword[try] :
identifier[parsed_data] [ identifier[s] [ literal[int] ]]= identifier[int] ( identifier[s] [ literal[int] ])
keyword[except] ( identifier[IndexError] , identifier[ValueError] ):
keyword[pass]
keyword[return] identifier[parsed_data] | def parse_single_report(f):
""" Parse a samtools idxstats idxstats """
parsed_data = OrderedDict()
for l in f.splitlines():
s = l.split('\t')
try:
parsed_data[s[0]] = int(s[2]) # depends on [control=['try'], data=[]]
except (IndexError, ValueError):
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['l']]
return parsed_data |
def phi(self):
"""Polar angle / inclination of this vector in radians
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi)
"""
return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z) | def function[phi, parameter[self]]:
constant[Polar angle / inclination of this vector in radians
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi)
]
return[call[name[np].arctan2, parameter[call[name[np].sqrt, parameter[binary_operation[binary_operation[name[self].x ** constant[2]] + binary_operation[name[self].y ** constant[2]]]]], name[self].z]]] | keyword[def] identifier[phi] ( identifier[self] ):
literal[string]
keyword[return] identifier[np] . identifier[arctan2] ( identifier[np] . identifier[sqrt] ( identifier[self] . identifier[x] ** literal[int] + identifier[self] . identifier[y] ** literal[int] ), identifier[self] . identifier[z] ) | def phi(self):
"""Polar angle / inclination of this vector in radians
Based on sperical coordinate space
returns angle between this vector and the positive z-azis
range: (0 <= phi <= pi)
"""
return np.arctan2(np.sqrt(self.x ** 2 + self.y ** 2), self.z) |
def _get_url_datafiles(url_db_view, url_db_content,
mrio_regex, access_cookie=None):
""" Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
"""
# Use post here - NB: get could be necessary for some other pages
# but currently works for wiod and eora
returnvalue = namedtuple('url_content',
['raw_text', 'data_urls'])
url_text = requests.post(url_db_view, cookies=access_cookie).text
data_urls = [url_db_content + ff
for ff in re.findall(mrio_regex, url_text)]
return returnvalue(raw_text=url_text, data_urls=data_urls) | def function[_get_url_datafiles, parameter[url_db_view, url_db_content, mrio_regex, access_cookie]]:
constant[ Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
]
variable[returnvalue] assign[=] call[name[namedtuple], parameter[constant[url_content], list[[<ast.Constant object at 0x7da1b0493490>, <ast.Constant object at 0x7da1b0493460>]]]]
variable[url_text] assign[=] call[name[requests].post, parameter[name[url_db_view]]].text
variable[data_urls] assign[=] <ast.ListComp object at 0x7da1b0491570>
return[call[name[returnvalue], parameter[]]] | keyword[def] identifier[_get_url_datafiles] ( identifier[url_db_view] , identifier[url_db_content] ,
identifier[mrio_regex] , identifier[access_cookie] = keyword[None] ):
literal[string]
identifier[returnvalue] = identifier[namedtuple] ( literal[string] ,
[ literal[string] , literal[string] ])
identifier[url_text] = identifier[requests] . identifier[post] ( identifier[url_db_view] , identifier[cookies] = identifier[access_cookie] ). identifier[text]
identifier[data_urls] =[ identifier[url_db_content] + identifier[ff]
keyword[for] identifier[ff] keyword[in] identifier[re] . identifier[findall] ( identifier[mrio_regex] , identifier[url_text] )]
keyword[return] identifier[returnvalue] ( identifier[raw_text] = identifier[url_text] , identifier[data_urls] = identifier[data_urls] ) | def _get_url_datafiles(url_db_view, url_db_content, mrio_regex, access_cookie=None):
""" Urls of mrio files by parsing url content for mrio_regex
Parameters
----------
url_db_view: url str
Url which shows the list of mrios in the db
url_db_content: url str
Url which needs to be appended before the url parsed from the
url_db_view to get a valid download link
mrio_regex: regex str
Regex to parse the mrio datafile from url_db_view
access_cookie: dict, optional
If needed, cookie to access the database
Returns
-------
Named tuple:
.raw_text: content of url_db_view for later use
.data_urls: list of url
"""
# Use post here - NB: get could be necessary for some other pages
# but currently works for wiod and eora
returnvalue = namedtuple('url_content', ['raw_text', 'data_urls'])
url_text = requests.post(url_db_view, cookies=access_cookie).text
data_urls = [url_db_content + ff for ff in re.findall(mrio_regex, url_text)]
return returnvalue(raw_text=url_text, data_urls=data_urls) |
def parse_formula(formula):
"""Parse a chemical formula and construct a :class:`PyComposition` object
Parameters
----------
formula : :class:`str`
Returns
-------
:class:`PyComposition`
Raises
------
ValueError
If the formula doesn't match the expected pattern
"""
if not formula_pattern.match(formula):
raise ValueError("%r does not look like a formula" % (formula,))
composition = PyComposition()
for elem, isotope, number in atom_pattern.findall(formula):
composition[_make_isotope_string(elem, int(isotope) if isotope else 0)] += int(number)
return composition | def function[parse_formula, parameter[formula]]:
constant[Parse a chemical formula and construct a :class:`PyComposition` object
Parameters
----------
formula : :class:`str`
Returns
-------
:class:`PyComposition`
Raises
------
ValueError
If the formula doesn't match the expected pattern
]
if <ast.UnaryOp object at 0x7da18fe907c0> begin[:]
<ast.Raise object at 0x7da18fe93970>
variable[composition] assign[=] call[name[PyComposition], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18fe90a90>, <ast.Name object at 0x7da18fe93370>, <ast.Name object at 0x7da18fe938b0>]]] in starred[call[name[atom_pattern].findall, parameter[name[formula]]]] begin[:]
<ast.AugAssign object at 0x7da18fe91540>
return[name[composition]] | keyword[def] identifier[parse_formula] ( identifier[formula] ):
literal[string]
keyword[if] keyword[not] identifier[formula_pattern] . identifier[match] ( identifier[formula] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[formula] ,))
identifier[composition] = identifier[PyComposition] ()
keyword[for] identifier[elem] , identifier[isotope] , identifier[number] keyword[in] identifier[atom_pattern] . identifier[findall] ( identifier[formula] ):
identifier[composition] [ identifier[_make_isotope_string] ( identifier[elem] , identifier[int] ( identifier[isotope] ) keyword[if] identifier[isotope] keyword[else] literal[int] )]+= identifier[int] ( identifier[number] )
keyword[return] identifier[composition] | def parse_formula(formula):
"""Parse a chemical formula and construct a :class:`PyComposition` object
Parameters
----------
formula : :class:`str`
Returns
-------
:class:`PyComposition`
Raises
------
ValueError
If the formula doesn't match the expected pattern
"""
if not formula_pattern.match(formula):
raise ValueError('%r does not look like a formula' % (formula,)) # depends on [control=['if'], data=[]]
composition = PyComposition()
for (elem, isotope, number) in atom_pattern.findall(formula):
composition[_make_isotope_string(elem, int(isotope) if isotope else 0)] += int(number) # depends on [control=['for'], data=[]]
return composition |
def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value | def function[get, parameter[self, section, key]]:
constant[Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
]
variable[line] assign[=] call[name[self]._make_line, parameter[name[key]]]
for taget[name[line]] in starred[call[name[self].get_line, parameter[name[section], name[line]]]] begin[:]
<ast.Yield object at 0x7da20c6aae30> | keyword[def] identifier[get] ( identifier[self] , identifier[section] , identifier[key] ):
literal[string]
identifier[line] = identifier[self] . identifier[_make_line] ( identifier[key] )
keyword[for] identifier[line] keyword[in] identifier[self] . identifier[get_line] ( identifier[section] , identifier[line] ):
keyword[yield] identifier[line] . identifier[value] | def get(self, section, key):
"""Return the 'value' of all lines matching the section/key.
Yields:
values for matching lines.
"""
line = self._make_line(key)
for line in self.get_line(section, line):
yield line.value # depends on [control=['for'], data=['line']] |
def verify_exp_list(self, obj):
"""Verify each expression in a list."""
# A tad harder. This is a list of expressions each of which could be
# the head of a tree. We need to recursively walk each of these and
# ensure that any Id elements resolve to the current stack.
#
# I believe we only have to look at the current symtab.
if obj.children is not None:
for children in obj.children:
if isinstance(children, node.Id):
if children.name in self.external_functions:
continue
if children.name not in self.current_symtab:
raise QasmError("Argument '" + children.name
+ "' in expression cannot be "
+ "found, line", str(children.line),
"file", children.file)
else:
if hasattr(children, "children"):
self.verify_exp_list(children) | def function[verify_exp_list, parameter[self, obj]]:
constant[Verify each expression in a list.]
if compare[name[obj].children is_not constant[None]] begin[:]
for taget[name[children]] in starred[name[obj].children] begin[:]
if call[name[isinstance], parameter[name[children], name[node].Id]] begin[:]
if compare[name[children].name in name[self].external_functions] begin[:]
continue
if compare[name[children].name <ast.NotIn object at 0x7da2590d7190> name[self].current_symtab] begin[:]
<ast.Raise object at 0x7da1b03a42e0> | keyword[def] identifier[verify_exp_list] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[if] identifier[obj] . identifier[children] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[children] keyword[in] identifier[obj] . identifier[children] :
keyword[if] identifier[isinstance] ( identifier[children] , identifier[node] . identifier[Id] ):
keyword[if] identifier[children] . identifier[name] keyword[in] identifier[self] . identifier[external_functions] :
keyword[continue]
keyword[if] identifier[children] . identifier[name] keyword[not] keyword[in] identifier[self] . identifier[current_symtab] :
keyword[raise] identifier[QasmError] ( literal[string] + identifier[children] . identifier[name]
+ literal[string]
+ literal[string] , identifier[str] ( identifier[children] . identifier[line] ),
literal[string] , identifier[children] . identifier[file] )
keyword[else] :
keyword[if] identifier[hasattr] ( identifier[children] , literal[string] ):
identifier[self] . identifier[verify_exp_list] ( identifier[children] ) | def verify_exp_list(self, obj):
"""Verify each expression in a list."""
# A tad harder. This is a list of expressions each of which could be
# the head of a tree. We need to recursively walk each of these and
# ensure that any Id elements resolve to the current stack.
#
# I believe we only have to look at the current symtab.
if obj.children is not None:
for children in obj.children:
if isinstance(children, node.Id):
if children.name in self.external_functions:
continue # depends on [control=['if'], data=[]]
if children.name not in self.current_symtab:
raise QasmError("Argument '" + children.name + "' in expression cannot be " + 'found, line', str(children.line), 'file', children.file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif hasattr(children, 'children'):
self.verify_exp_list(children) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['children']] # depends on [control=['if'], data=[]] |
def start_process(self, process, args=None, cwd=None, env=None):
"""
Starts a process interactively.
:param process: Process to run
:type process: str
:param args: List of arguments (list of str)
:type args: list
:param cwd: Working directory
:type cwd: str
:param env: environment variables (dict).
"""
self.setReadOnly(False)
if env is None:
env = {}
if args is None:
args = []
if not self._running:
self.process = QProcess()
self.process.finished.connect(self._on_process_finished)
self.process.started.connect(self.process_started.emit)
self.process.error.connect(self._write_error)
self.process.readyReadStandardError.connect(self._on_stderr)
self.process.readyReadStandardOutput.connect(self._on_stdout)
if cwd:
self.process.setWorkingDirectory(cwd)
e = self.process.systemEnvironment()
ev = QProcessEnvironment()
for v in e:
values = v.split('=')
ev.insert(values[0], '='.join(values[1:]))
for k, v in env.items():
ev.insert(k, v)
self.process.setProcessEnvironment(ev)
self._running = True
self._process_name = process
self._args = args
if self._clear_on_start:
self.clear()
self._user_stop = False
self._write_started()
self.process.start(process, args)
self.process.waitForStarted()
else:
_logger().warning('a process is already running') | def function[start_process, parameter[self, process, args, cwd, env]]:
constant[
Starts a process interactively.
:param process: Process to run
:type process: str
:param args: List of arguments (list of str)
:type args: list
:param cwd: Working directory
:type cwd: str
:param env: environment variables (dict).
]
call[name[self].setReadOnly, parameter[constant[False]]]
if compare[name[env] is constant[None]] begin[:]
variable[env] assign[=] dictionary[[], []]
if compare[name[args] is constant[None]] begin[:]
variable[args] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da204567100> begin[:]
name[self].process assign[=] call[name[QProcess], parameter[]]
call[name[self].process.finished.connect, parameter[name[self]._on_process_finished]]
call[name[self].process.started.connect, parameter[name[self].process_started.emit]]
call[name[self].process.error.connect, parameter[name[self]._write_error]]
call[name[self].process.readyReadStandardError.connect, parameter[name[self]._on_stderr]]
call[name[self].process.readyReadStandardOutput.connect, parameter[name[self]._on_stdout]]
if name[cwd] begin[:]
call[name[self].process.setWorkingDirectory, parameter[name[cwd]]]
variable[e] assign[=] call[name[self].process.systemEnvironment, parameter[]]
variable[ev] assign[=] call[name[QProcessEnvironment], parameter[]]
for taget[name[v]] in starred[name[e]] begin[:]
variable[values] assign[=] call[name[v].split, parameter[constant[=]]]
call[name[ev].insert, parameter[call[name[values]][constant[0]], call[constant[=].join, parameter[call[name[values]][<ast.Slice object at 0x7da2045662f0>]]]]]
for taget[tuple[[<ast.Name object at 0x7da204564b20>, <ast.Name object at 0x7da204564f40>]]] in starred[call[name[env].items, parameter[]]] begin[:]
call[name[ev].insert, parameter[name[k], name[v]]]
call[name[self].process.setProcessEnvironment, parameter[name[ev]]]
name[self]._running assign[=] constant[True]
name[self]._process_name assign[=] name[process]
name[self]._args assign[=] name[args]
if name[self]._clear_on_start begin[:]
call[name[self].clear, parameter[]]
name[self]._user_stop assign[=] constant[False]
call[name[self]._write_started, parameter[]]
call[name[self].process.start, parameter[name[process], name[args]]]
call[name[self].process.waitForStarted, parameter[]] | keyword[def] identifier[start_process] ( identifier[self] , identifier[process] , identifier[args] = keyword[None] , identifier[cwd] = keyword[None] , identifier[env] = keyword[None] ):
literal[string]
identifier[self] . identifier[setReadOnly] ( keyword[False] )
keyword[if] identifier[env] keyword[is] keyword[None] :
identifier[env] ={}
keyword[if] identifier[args] keyword[is] keyword[None] :
identifier[args] =[]
keyword[if] keyword[not] identifier[self] . identifier[_running] :
identifier[self] . identifier[process] = identifier[QProcess] ()
identifier[self] . identifier[process] . identifier[finished] . identifier[connect] ( identifier[self] . identifier[_on_process_finished] )
identifier[self] . identifier[process] . identifier[started] . identifier[connect] ( identifier[self] . identifier[process_started] . identifier[emit] )
identifier[self] . identifier[process] . identifier[error] . identifier[connect] ( identifier[self] . identifier[_write_error] )
identifier[self] . identifier[process] . identifier[readyReadStandardError] . identifier[connect] ( identifier[self] . identifier[_on_stderr] )
identifier[self] . identifier[process] . identifier[readyReadStandardOutput] . identifier[connect] ( identifier[self] . identifier[_on_stdout] )
keyword[if] identifier[cwd] :
identifier[self] . identifier[process] . identifier[setWorkingDirectory] ( identifier[cwd] )
identifier[e] = identifier[self] . identifier[process] . identifier[systemEnvironment] ()
identifier[ev] = identifier[QProcessEnvironment] ()
keyword[for] identifier[v] keyword[in] identifier[e] :
identifier[values] = identifier[v] . identifier[split] ( literal[string] )
identifier[ev] . identifier[insert] ( identifier[values] [ literal[int] ], literal[string] . identifier[join] ( identifier[values] [ literal[int] :]))
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[env] . identifier[items] ():
identifier[ev] . identifier[insert] ( identifier[k] , identifier[v] )
identifier[self] . identifier[process] . identifier[setProcessEnvironment] ( identifier[ev] )
identifier[self] . identifier[_running] = keyword[True]
identifier[self] . identifier[_process_name] = identifier[process]
identifier[self] . identifier[_args] = identifier[args]
keyword[if] identifier[self] . identifier[_clear_on_start] :
identifier[self] . identifier[clear] ()
identifier[self] . identifier[_user_stop] = keyword[False]
identifier[self] . identifier[_write_started] ()
identifier[self] . identifier[process] . identifier[start] ( identifier[process] , identifier[args] )
identifier[self] . identifier[process] . identifier[waitForStarted] ()
keyword[else] :
identifier[_logger] (). identifier[warning] ( literal[string] ) | def start_process(self, process, args=None, cwd=None, env=None):
"""
Starts a process interactively.
:param process: Process to run
:type process: str
:param args: List of arguments (list of str)
:type args: list
:param cwd: Working directory
:type cwd: str
:param env: environment variables (dict).
"""
self.setReadOnly(False)
if env is None:
env = {} # depends on [control=['if'], data=['env']]
if args is None:
args = [] # depends on [control=['if'], data=['args']]
if not self._running:
self.process = QProcess()
self.process.finished.connect(self._on_process_finished)
self.process.started.connect(self.process_started.emit)
self.process.error.connect(self._write_error)
self.process.readyReadStandardError.connect(self._on_stderr)
self.process.readyReadStandardOutput.connect(self._on_stdout)
if cwd:
self.process.setWorkingDirectory(cwd) # depends on [control=['if'], data=[]]
e = self.process.systemEnvironment()
ev = QProcessEnvironment()
for v in e:
values = v.split('=')
ev.insert(values[0], '='.join(values[1:])) # depends on [control=['for'], data=['v']]
for (k, v) in env.items():
ev.insert(k, v) # depends on [control=['for'], data=[]]
self.process.setProcessEnvironment(ev)
self._running = True
self._process_name = process
self._args = args
if self._clear_on_start:
self.clear() # depends on [control=['if'], data=[]]
self._user_stop = False
self._write_started()
self.process.start(process, args)
self.process.waitForStarted() # depends on [control=['if'], data=[]]
else:
_logger().warning('a process is already running') |
def from_jsonf(cls, fpath: str, encoding: str='utf8',
force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> T:
"""From json file path to instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(util.load_jsonf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict) | def function[from_jsonf, parameter[cls, fpath, encoding, force_snake_case, force_cast, restrict]]:
constant[From json file path to instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
]
return[call[name[cls].from_dict, parameter[call[name[util].load_jsonf, parameter[name[fpath], name[encoding]]]]]] | keyword[def] identifier[from_jsonf] ( identifier[cls] , identifier[fpath] : identifier[str] , identifier[encoding] : identifier[str] = literal[string] ,
identifier[force_snake_case] = keyword[True] , identifier[force_cast] : identifier[bool] = keyword[False] , identifier[restrict] : identifier[bool] = keyword[False] )-> identifier[T] :
literal[string]
keyword[return] identifier[cls] . identifier[from_dict] ( identifier[util] . identifier[load_jsonf] ( identifier[fpath] , identifier[encoding] ),
identifier[force_snake_case] = identifier[force_snake_case] ,
identifier[force_cast] = identifier[force_cast] ,
identifier[restrict] = identifier[restrict] ) | def from_jsonf(cls, fpath: str, encoding: str='utf8', force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> T:
"""From json file path to instance
:param fpath: Json file path
:param encoding: Json file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
"""
return cls.from_dict(util.load_jsonf(fpath, encoding), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict) |
def get_user_ip(self, request):
"""
get the client IP address bassed on a HTTPRequest
"""
client_ip_address = None
# searching the IP address
for key in self.configuration.network.ip_meta_precedence_order:
ip_meta_value = request.META.get(key, '').strip()
if ip_meta_value != '':
ips = [ip.strip().lower() for ip in ip_meta_value.split(',')]
for ip_str in ips:
if ip_str and is_valid_ip(ip_str):
if not ip_str.startswith(self.configuration.network.non_public_ip_prefixes):
return ip_str
elif not self.configuration.network.real_ip_only:
loopback = ('127.0.0.1', '::1')
if client_ip_address is None:
client_ip_address = ip_str
elif client_ip_address in loopback and ip_str not in loopback:
client_ip_address = ip_str
if client_ip_address is None and settings.DEBUG:
raise DobermanImproperlyConfigured(
"Unknown IP, maybe you are working on localhost/development, "
"so please set in your setting: DOBERMAN_REAL_IP_ONLY=False"
)
return client_ip_address | def function[get_user_ip, parameter[self, request]]:
constant[
get the client IP address bassed on a HTTPRequest
]
variable[client_ip_address] assign[=] constant[None]
for taget[name[key]] in starred[name[self].configuration.network.ip_meta_precedence_order] begin[:]
variable[ip_meta_value] assign[=] call[call[name[request].META.get, parameter[name[key], constant[]]].strip, parameter[]]
if compare[name[ip_meta_value] not_equal[!=] constant[]] begin[:]
variable[ips] assign[=] <ast.ListComp object at 0x7da2054a4160>
for taget[name[ip_str]] in starred[name[ips]] begin[:]
if <ast.BoolOp object at 0x7da2054a4ac0> begin[:]
if <ast.UnaryOp object at 0x7da2054a6680> begin[:]
return[name[ip_str]]
if <ast.BoolOp object at 0x7da20cabcd00> begin[:]
<ast.Raise object at 0x7da20cabda80>
return[name[client_ip_address]] | keyword[def] identifier[get_user_ip] ( identifier[self] , identifier[request] ):
literal[string]
identifier[client_ip_address] = keyword[None]
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[configuration] . identifier[network] . identifier[ip_meta_precedence_order] :
identifier[ip_meta_value] = identifier[request] . identifier[META] . identifier[get] ( identifier[key] , literal[string] ). identifier[strip] ()
keyword[if] identifier[ip_meta_value] != literal[string] :
identifier[ips] =[ identifier[ip] . identifier[strip] (). identifier[lower] () keyword[for] identifier[ip] keyword[in] identifier[ip_meta_value] . identifier[split] ( literal[string] )]
keyword[for] identifier[ip_str] keyword[in] identifier[ips] :
keyword[if] identifier[ip_str] keyword[and] identifier[is_valid_ip] ( identifier[ip_str] ):
keyword[if] keyword[not] identifier[ip_str] . identifier[startswith] ( identifier[self] . identifier[configuration] . identifier[network] . identifier[non_public_ip_prefixes] ):
keyword[return] identifier[ip_str]
keyword[elif] keyword[not] identifier[self] . identifier[configuration] . identifier[network] . identifier[real_ip_only] :
identifier[loopback] =( literal[string] , literal[string] )
keyword[if] identifier[client_ip_address] keyword[is] keyword[None] :
identifier[client_ip_address] = identifier[ip_str]
keyword[elif] identifier[client_ip_address] keyword[in] identifier[loopback] keyword[and] identifier[ip_str] keyword[not] keyword[in] identifier[loopback] :
identifier[client_ip_address] = identifier[ip_str]
keyword[if] identifier[client_ip_address] keyword[is] keyword[None] keyword[and] identifier[settings] . identifier[DEBUG] :
keyword[raise] identifier[DobermanImproperlyConfigured] (
literal[string]
literal[string]
)
keyword[return] identifier[client_ip_address] | def get_user_ip(self, request):
"""
get the client IP address bassed on a HTTPRequest
"""
client_ip_address = None
# searching the IP address
for key in self.configuration.network.ip_meta_precedence_order:
ip_meta_value = request.META.get(key, '').strip()
if ip_meta_value != '':
ips = [ip.strip().lower() for ip in ip_meta_value.split(',')]
for ip_str in ips:
if ip_str and is_valid_ip(ip_str):
if not ip_str.startswith(self.configuration.network.non_public_ip_prefixes):
return ip_str # depends on [control=['if'], data=[]]
elif not self.configuration.network.real_ip_only:
loopback = ('127.0.0.1', '::1')
if client_ip_address is None:
client_ip_address = ip_str # depends on [control=['if'], data=['client_ip_address']]
elif client_ip_address in loopback and ip_str not in loopback:
client_ip_address = ip_str # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ip_str']] # depends on [control=['if'], data=['ip_meta_value']] # depends on [control=['for'], data=['key']]
if client_ip_address is None and settings.DEBUG:
raise DobermanImproperlyConfigured('Unknown IP, maybe you are working on localhost/development, so please set in your setting: DOBERMAN_REAL_IP_ONLY=False') # depends on [control=['if'], data=[]]
return client_ip_address |
def write_all(self, data):
"""
:param list[int] data:
"""
self._set_cursor_x(0)
self._set_cursor_y(0)
self.DC.on()
self._write(data) | def function[write_all, parameter[self, data]]:
constant[
:param list[int] data:
]
call[name[self]._set_cursor_x, parameter[constant[0]]]
call[name[self]._set_cursor_y, parameter[constant[0]]]
call[name[self].DC.on, parameter[]]
call[name[self]._write, parameter[name[data]]] | keyword[def] identifier[write_all] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[_set_cursor_x] ( literal[int] )
identifier[self] . identifier[_set_cursor_y] ( literal[int] )
identifier[self] . identifier[DC] . identifier[on] ()
identifier[self] . identifier[_write] ( identifier[data] ) | def write_all(self, data):
"""
:param list[int] data:
"""
self._set_cursor_x(0)
self._set_cursor_y(0)
self.DC.on()
self._write(data) |
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError() | def function[result, parameter[self, timeout]]:
constant[Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
]
with name[self]._condition begin[:]
if compare[name[self]._state in list[[<ast.Name object at 0x7da1b26ac0d0>, <ast.Name object at 0x7da1b26ad060>]]] begin[:]
<ast.Raise object at 0x7da1b26af4c0>
call[name[self]._condition.wait, parameter[name[timeout]]]
if compare[name[self]._state in list[[<ast.Name object at 0x7da1b26ac460>, <ast.Name object at 0x7da1b26af640>]]] begin[:]
<ast.Raise object at 0x7da1b26aedd0> | keyword[def] identifier[result] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[with] identifier[self] . identifier[_condition] :
keyword[if] identifier[self] . identifier[_state] keyword[in] [ identifier[CANCELLED] , identifier[CANCELLED_AND_NOTIFIED] ]:
keyword[raise] identifier[CancelledError] ()
keyword[elif] identifier[self] . identifier[_state] == identifier[FINISHED] :
keyword[return] identifier[self] . identifier[__get_result] ()
identifier[self] . identifier[_condition] . identifier[wait] ( identifier[timeout] )
keyword[if] identifier[self] . identifier[_state] keyword[in] [ identifier[CANCELLED] , identifier[CANCELLED_AND_NOTIFIED] ]:
keyword[raise] identifier[CancelledError] ()
keyword[elif] identifier[self] . identifier[_state] == identifier[FINISHED] :
keyword[return] identifier[self] . identifier[__get_result] ()
keyword[else] :
keyword[raise] identifier[TimeoutError] () | def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError() # depends on [control=['if'], data=[]]
elif self._state == FINISHED:
return self.__get_result() # depends on [control=['if'], data=[]]
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError() # depends on [control=['if'], data=[]]
elif self._state == FINISHED:
return self.__get_result() # depends on [control=['if'], data=[]]
else:
raise TimeoutError() # depends on [control=['with'], data=[]] |
def normalize(self, inplace=True):
"""
Normalizes the pdf of the distribution so that it
integrates to 1 over all the variables.
Parameters
----------
inplace: boolean
If inplace=True it will modify the distribution itself, else would return
a new distribution.
Returns
-------
CustomDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new CustomDistribution instance.
Examples
--------
>>> from pgmpy.factors.distributions import CustomDistribution
>>> from scipy.stats import multivariate_normal
>>> normal_pdf_x2 = lambda x1, x2: 2 * multivariate_normal.pdf(
... x=[x1, x2], mean=[0, 0], cov=[[1, 0], [0, 1]])
>>> normal_dist_x2 = CustomDistribution(variables=['x1', 'x2'],
... distribution=normal_pdf_x2)
>>> normal_dist_x2.assignment(1, 1)
0.117099663049
>>> normal_dist = normal_dist_x2.normalize(inplace=False))
>>> normal_dist.assignment(1, 1)
0.0585498315243
"""
phi = self if inplace else self.copy()
pdf = self.pdf
pdf_mod = integrate.nquad(pdf, [[-np.inf, np.inf] for var in self.variables])[0]
phi._pdf = lambda *args: pdf(*args) / pdf_mod
if not inplace:
return phi | def function[normalize, parameter[self, inplace]]:
constant[
Normalizes the pdf of the distribution so that it
integrates to 1 over all the variables.
Parameters
----------
inplace: boolean
If inplace=True it will modify the distribution itself, else would return
a new distribution.
Returns
-------
CustomDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new CustomDistribution instance.
Examples
--------
>>> from pgmpy.factors.distributions import CustomDistribution
>>> from scipy.stats import multivariate_normal
>>> normal_pdf_x2 = lambda x1, x2: 2 * multivariate_normal.pdf(
... x=[x1, x2], mean=[0, 0], cov=[[1, 0], [0, 1]])
>>> normal_dist_x2 = CustomDistribution(variables=['x1', 'x2'],
... distribution=normal_pdf_x2)
>>> normal_dist_x2.assignment(1, 1)
0.117099663049
>>> normal_dist = normal_dist_x2.normalize(inplace=False))
>>> normal_dist.assignment(1, 1)
0.0585498315243
]
variable[phi] assign[=] <ast.IfExp object at 0x7da20c6aa440>
variable[pdf] assign[=] name[self].pdf
variable[pdf_mod] assign[=] call[call[name[integrate].nquad, parameter[name[pdf], <ast.ListComp object at 0x7da20c6a9c00>]]][constant[0]]
name[phi]._pdf assign[=] <ast.Lambda object at 0x7da18f00c280>
if <ast.UnaryOp object at 0x7da20c6a8220> begin[:]
return[name[phi]] | keyword[def] identifier[normalize] ( identifier[self] , identifier[inplace] = keyword[True] ):
literal[string]
identifier[phi] = identifier[self] keyword[if] identifier[inplace] keyword[else] identifier[self] . identifier[copy] ()
identifier[pdf] = identifier[self] . identifier[pdf]
identifier[pdf_mod] = identifier[integrate] . identifier[nquad] ( identifier[pdf] ,[[- identifier[np] . identifier[inf] , identifier[np] . identifier[inf] ] keyword[for] identifier[var] keyword[in] identifier[self] . identifier[variables] ])[ literal[int] ]
identifier[phi] . identifier[_pdf] = keyword[lambda] * identifier[args] : identifier[pdf] (* identifier[args] )/ identifier[pdf_mod]
keyword[if] keyword[not] identifier[inplace] :
keyword[return] identifier[phi] | def normalize(self, inplace=True):
"""
Normalizes the pdf of the distribution so that it
integrates to 1 over all the variables.
Parameters
----------
inplace: boolean
If inplace=True it will modify the distribution itself, else would return
a new distribution.
Returns
-------
CustomDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new CustomDistribution instance.
Examples
--------
>>> from pgmpy.factors.distributions import CustomDistribution
>>> from scipy.stats import multivariate_normal
>>> normal_pdf_x2 = lambda x1, x2: 2 * multivariate_normal.pdf(
... x=[x1, x2], mean=[0, 0], cov=[[1, 0], [0, 1]])
>>> normal_dist_x2 = CustomDistribution(variables=['x1', 'x2'],
... distribution=normal_pdf_x2)
>>> normal_dist_x2.assignment(1, 1)
0.117099663049
>>> normal_dist = normal_dist_x2.normalize(inplace=False))
>>> normal_dist.assignment(1, 1)
0.0585498315243
"""
phi = self if inplace else self.copy()
pdf = self.pdf
pdf_mod = integrate.nquad(pdf, [[-np.inf, np.inf] for var in self.variables])[0]
phi._pdf = lambda *args: pdf(*args) / pdf_mod
if not inplace:
return phi # depends on [control=['if'], data=[]] |
def _record(self, obj):
"""
Construct a `DomainRecord` object belonging to the domain's `doapi`
object. ``obj`` may be a domain record ID, a dictionary of domain
record fields, or another `DomainRecord` object (which will be
shallow-copied). The resulting `DomainRecord` will only contain the
information in ``obj``; no data will be sent to or from the API
endpoint.
:type obj: integer, `dict`, or `DomainRecord`
:rtype: DomainRecord
"""
return DomainRecord(obj, domain=self, doapi_manager=self.doapi_manager) | def function[_record, parameter[self, obj]]:
constant[
Construct a `DomainRecord` object belonging to the domain's `doapi`
object. ``obj`` may be a domain record ID, a dictionary of domain
record fields, or another `DomainRecord` object (which will be
shallow-copied). The resulting `DomainRecord` will only contain the
information in ``obj``; no data will be sent to or from the API
endpoint.
:type obj: integer, `dict`, or `DomainRecord`
:rtype: DomainRecord
]
return[call[name[DomainRecord], parameter[name[obj]]]] | keyword[def] identifier[_record] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[return] identifier[DomainRecord] ( identifier[obj] , identifier[domain] = identifier[self] , identifier[doapi_manager] = identifier[self] . identifier[doapi_manager] ) | def _record(self, obj):
"""
Construct a `DomainRecord` object belonging to the domain's `doapi`
object. ``obj`` may be a domain record ID, a dictionary of domain
record fields, or another `DomainRecord` object (which will be
shallow-copied). The resulting `DomainRecord` will only contain the
information in ``obj``; no data will be sent to or from the API
endpoint.
:type obj: integer, `dict`, or `DomainRecord`
:rtype: DomainRecord
"""
return DomainRecord(obj, domain=self, doapi_manager=self.doapi_manager) |
def _compat_rem_str_id_from_index(self, indexedField, pk, val, conn=None):
'''
_compat_rem_str_id_from_index - Used in compat_convertHashedIndexes to remove the old string repr of a field,
in order to later add the hashed value,
'''
if conn is None:
conn = self._get_connection()
conn.srem(self._compat_get_str_key_for_index(indexedField, val), pk) | def function[_compat_rem_str_id_from_index, parameter[self, indexedField, pk, val, conn]]:
constant[
_compat_rem_str_id_from_index - Used in compat_convertHashedIndexes to remove the old string repr of a field,
in order to later add the hashed value,
]
if compare[name[conn] is constant[None]] begin[:]
variable[conn] assign[=] call[name[self]._get_connection, parameter[]]
call[name[conn].srem, parameter[call[name[self]._compat_get_str_key_for_index, parameter[name[indexedField], name[val]]], name[pk]]] | keyword[def] identifier[_compat_rem_str_id_from_index] ( identifier[self] , identifier[indexedField] , identifier[pk] , identifier[val] , identifier[conn] = keyword[None] ):
literal[string]
keyword[if] identifier[conn] keyword[is] keyword[None] :
identifier[conn] = identifier[self] . identifier[_get_connection] ()
identifier[conn] . identifier[srem] ( identifier[self] . identifier[_compat_get_str_key_for_index] ( identifier[indexedField] , identifier[val] ), identifier[pk] ) | def _compat_rem_str_id_from_index(self, indexedField, pk, val, conn=None):
"""
_compat_rem_str_id_from_index - Used in compat_convertHashedIndexes to remove the old string repr of a field,
in order to later add the hashed value,
"""
if conn is None:
conn = self._get_connection() # depends on [control=['if'], data=['conn']]
conn.srem(self._compat_get_str_key_for_index(indexedField, val), pk) |
def ping(self, timeout=0, **kwargs):
"""THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS"""
# http://stackoverflow.com/a/2257449/5006
def rand_id(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
payload = rand_id()
self.ws.ping(payload)
opcode, data = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kwargs)
if data != payload:
raise IOError("Pinged server but did not receive correct pong") | def function[ping, parameter[self, timeout]]:
constant[THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS]
def function[rand_id, parameter[size, chars]]:
return[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18f58c220>]]]
variable[payload] assign[=] call[name[rand_id], parameter[]]
call[name[self].ws.ping, parameter[name[payload]]]
<ast.Tuple object at 0x7da18f58d090> assign[=] call[name[self].recv_raw, parameter[name[timeout], list[[<ast.Attribute object at 0x7da18f58e200>]]]]
if compare[name[data] not_equal[!=] name[payload]] begin[:]
<ast.Raise object at 0x7da18f58dc90> | keyword[def] identifier[ping] ( identifier[self] , identifier[timeout] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[rand_id] ( identifier[size] = literal[int] , identifier[chars] = identifier[string] . identifier[ascii_uppercase] + identifier[string] . identifier[digits] ):
keyword[return] literal[string] . identifier[join] ( identifier[random] . identifier[choice] ( identifier[chars] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[size] ))
identifier[payload] = identifier[rand_id] ()
identifier[self] . identifier[ws] . identifier[ping] ( identifier[payload] )
identifier[opcode] , identifier[data] = identifier[self] . identifier[recv_raw] ( identifier[timeout] ,[ identifier[websocket] . identifier[ABNF] . identifier[OPCODE_PONG] ],** identifier[kwargs] )
keyword[if] identifier[data] != identifier[payload] :
keyword[raise] identifier[IOError] ( literal[string] ) | def ping(self, timeout=0, **kwargs):
"""THIS DOES NOT WORK, UWSGI DOES NOT RESPOND TO PINGS"""
# http://stackoverflow.com/a/2257449/5006
def rand_id(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join((random.choice(chars) for _ in range(size)))
payload = rand_id()
self.ws.ping(payload)
(opcode, data) = self.recv_raw(timeout, [websocket.ABNF.OPCODE_PONG], **kwargs)
if data != payload:
raise IOError('Pinged server but did not receive correct pong') # depends on [control=['if'], data=[]] |
def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):
"""
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns:
"""
if unique_matches:
pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)
else:
pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)
return pairings | def function[match_tracks, parameter[self, model_tracks, obs_tracks, unique_matches, closest_matches]]:
constant[
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns:
]
if name[unique_matches] begin[:]
variable[pairings] assign[=] call[name[self].track_matcher.match_tracks, parameter[name[model_tracks], name[obs_tracks]]]
return[name[pairings]] | keyword[def] identifier[match_tracks] ( identifier[self] , identifier[model_tracks] , identifier[obs_tracks] , identifier[unique_matches] = keyword[True] , identifier[closest_matches] = keyword[False] ):
literal[string]
keyword[if] identifier[unique_matches] :
identifier[pairings] = identifier[self] . identifier[track_matcher] . identifier[match_tracks] ( identifier[model_tracks] , identifier[obs_tracks] , identifier[closest_matches] = identifier[closest_matches] )
keyword[else] :
identifier[pairings] = identifier[self] . identifier[track_matcher] . identifier[neighbor_matches] ( identifier[model_tracks] , identifier[obs_tracks] )
keyword[return] identifier[pairings] | def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):
"""
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns:
"""
if unique_matches:
pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches) # depends on [control=['if'], data=[]]
else:
pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)
return pairings |
def pufunc(func):
"""
Called by pfunc to convert NumPy ufuncs to deterministic factories.
"""
def dtrm_generator(*args):
if len(args) != func.nin:
raise ValueError('invalid number of arguments')
name = func.__name__ + '(' + '_'.join(
[str(arg) for arg in list(args)]) + ')'
doc_str = 'A deterministic returning %s(%s)' % (
func.__name__,
', '.join([str(arg) for arg in args]))
parents = {}
for i in xrange(func.nin):
parents['in%i' % i] = args[i]
def wrapper(**kwargs):
return func(*[kwargs['in%i' % i] for i in xrange(func.nin)])
return pm.Deterministic(
wrapper, doc_str, name, parents, trace=False, plot=False)
dtrm_generator.__name__ = func.__name__ + '_deterministic_generator'
dtrm_generator.__doc__ = """
Deterministic-generating wrapper for %s. Original docstring:
%s
%s
""" % (func.__name__, '_' * 60, func.__doc__)
return dtrm_generator | def function[pufunc, parameter[func]]:
constant[
Called by pfunc to convert NumPy ufuncs to deterministic factories.
]
def function[dtrm_generator, parameter[]]:
if compare[call[name[len], parameter[name[args]]] not_equal[!=] name[func].nin] begin[:]
<ast.Raise object at 0x7da2041db7c0>
variable[name] assign[=] binary_operation[binary_operation[binary_operation[name[func].__name__ + constant[(]] + call[constant[_].join, parameter[<ast.ListComp object at 0x7da2041d8820>]]] + constant[)]]
variable[doc_str] assign[=] binary_operation[constant[A deterministic returning %s(%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2041d8bb0>, <ast.Call object at 0x7da2041d9810>]]]
variable[parents] assign[=] dictionary[[], []]
for taget[name[i]] in starred[call[name[xrange], parameter[name[func].nin]]] begin[:]
call[name[parents]][binary_operation[constant[in%i] <ast.Mod object at 0x7da2590d6920> name[i]]] assign[=] call[name[args]][name[i]]
def function[wrapper, parameter[]]:
return[call[name[func], parameter[<ast.Starred object at 0x7da1b17b7b20>]]]
return[call[name[pm].Deterministic, parameter[name[wrapper], name[doc_str], name[name], name[parents]]]]
name[dtrm_generator].__name__ assign[=] binary_operation[name[func].__name__ + constant[_deterministic_generator]]
name[dtrm_generator].__doc__ assign[=] binary_operation[constant[
Deterministic-generating wrapper for %s. Original docstring:
%s
%s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b17b43d0>, <ast.BinOp object at 0x7da1b17b4730>, <ast.Attribute object at 0x7da1b17b49a0>]]]
return[name[dtrm_generator]] | keyword[def] identifier[pufunc] ( identifier[func] ):
literal[string]
keyword[def] identifier[dtrm_generator] (* identifier[args] ):
keyword[if] identifier[len] ( identifier[args] )!= identifier[func] . identifier[nin] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[name] = identifier[func] . identifier[__name__] + literal[string] + literal[string] . identifier[join] (
[ identifier[str] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[list] ( identifier[args] )])+ literal[string]
identifier[doc_str] = literal[string] %(
identifier[func] . identifier[__name__] ,
literal[string] . identifier[join] ([ identifier[str] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[args] ]))
identifier[parents] ={}
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[func] . identifier[nin] ):
identifier[parents] [ literal[string] % identifier[i] ]= identifier[args] [ identifier[i] ]
keyword[def] identifier[wrapper] (** identifier[kwargs] ):
keyword[return] identifier[func] (*[ identifier[kwargs] [ literal[string] % identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[func] . identifier[nin] )])
keyword[return] identifier[pm] . identifier[Deterministic] (
identifier[wrapper] , identifier[doc_str] , identifier[name] , identifier[parents] , identifier[trace] = keyword[False] , identifier[plot] = keyword[False] )
identifier[dtrm_generator] . identifier[__name__] = identifier[func] . identifier[__name__] + literal[string]
identifier[dtrm_generator] . identifier[__doc__] = literal[string] %( identifier[func] . identifier[__name__] , literal[string] * literal[int] , identifier[func] . identifier[__doc__] )
keyword[return] identifier[dtrm_generator] | def pufunc(func):
"""
Called by pfunc to convert NumPy ufuncs to deterministic factories.
"""
def dtrm_generator(*args):
if len(args) != func.nin:
raise ValueError('invalid number of arguments') # depends on [control=['if'], data=[]]
name = func.__name__ + '(' + '_'.join([str(arg) for arg in list(args)]) + ')'
doc_str = 'A deterministic returning %s(%s)' % (func.__name__, ', '.join([str(arg) for arg in args]))
parents = {}
for i in xrange(func.nin):
parents['in%i' % i] = args[i] # depends on [control=['for'], data=['i']]
def wrapper(**kwargs):
return func(*[kwargs['in%i' % i] for i in xrange(func.nin)])
return pm.Deterministic(wrapper, doc_str, name, parents, trace=False, plot=False)
dtrm_generator.__name__ = func.__name__ + '_deterministic_generator'
dtrm_generator.__doc__ = '\nDeterministic-generating wrapper for %s. Original docstring:\n%s\n\n%s\n ' % (func.__name__, '_' * 60, func.__doc__)
return dtrm_generator |
def add_reference(self, subj: Node, val: str) -> None:
"""
Add a fhir:link and RDF type arc if it can be determined
:param subj: reference subject
:param val: reference value
"""
match = FHIR_RESOURCE_RE.match(val)
ref_uri_str = res_type = None
if match:
ref_uri_str = val if match.group(FHIR_RE_BASE) else (self._base_uri + urllib.parse.quote(val))
res_type = match.group(FHIR_RE_RESOURCE)
elif '://' in val:
ref_uri_str = val
res_type = "Resource"
elif self._base_uri and not val.startswith('#') and not val.startswith('/'):
ref_uri_str = self._base_uri + urllib.parse.quote(val)
res_type = val.split('/', 1)[0] if '/' in val else "Resource"
if ref_uri_str:
ref_uri = URIRef(ref_uri_str)
self.add(subj, FHIR.link, ref_uri)
self.add(ref_uri, RDF.type, FHIR[res_type]) | def function[add_reference, parameter[self, subj, val]]:
constant[
Add a fhir:link and RDF type arc if it can be determined
:param subj: reference subject
:param val: reference value
]
variable[match] assign[=] call[name[FHIR_RESOURCE_RE].match, parameter[name[val]]]
variable[ref_uri_str] assign[=] constant[None]
if name[match] begin[:]
variable[ref_uri_str] assign[=] <ast.IfExp object at 0x7da20c6c64a0>
variable[res_type] assign[=] call[name[match].group, parameter[name[FHIR_RE_RESOURCE]]]
if name[ref_uri_str] begin[:]
variable[ref_uri] assign[=] call[name[URIRef], parameter[name[ref_uri_str]]]
call[name[self].add, parameter[name[subj], name[FHIR].link, name[ref_uri]]]
call[name[self].add, parameter[name[ref_uri], name[RDF].type, call[name[FHIR]][name[res_type]]]] | keyword[def] identifier[add_reference] ( identifier[self] , identifier[subj] : identifier[Node] , identifier[val] : identifier[str] )-> keyword[None] :
literal[string]
identifier[match] = identifier[FHIR_RESOURCE_RE] . identifier[match] ( identifier[val] )
identifier[ref_uri_str] = identifier[res_type] = keyword[None]
keyword[if] identifier[match] :
identifier[ref_uri_str] = identifier[val] keyword[if] identifier[match] . identifier[group] ( identifier[FHIR_RE_BASE] ) keyword[else] ( identifier[self] . identifier[_base_uri] + identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[val] ))
identifier[res_type] = identifier[match] . identifier[group] ( identifier[FHIR_RE_RESOURCE] )
keyword[elif] literal[string] keyword[in] identifier[val] :
identifier[ref_uri_str] = identifier[val]
identifier[res_type] = literal[string]
keyword[elif] identifier[self] . identifier[_base_uri] keyword[and] keyword[not] identifier[val] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[val] . identifier[startswith] ( literal[string] ):
identifier[ref_uri_str] = identifier[self] . identifier[_base_uri] + identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[val] )
identifier[res_type] = identifier[val] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ] keyword[if] literal[string] keyword[in] identifier[val] keyword[else] literal[string]
keyword[if] identifier[ref_uri_str] :
identifier[ref_uri] = identifier[URIRef] ( identifier[ref_uri_str] )
identifier[self] . identifier[add] ( identifier[subj] , identifier[FHIR] . identifier[link] , identifier[ref_uri] )
identifier[self] . identifier[add] ( identifier[ref_uri] , identifier[RDF] . identifier[type] , identifier[FHIR] [ identifier[res_type] ]) | def add_reference(self, subj: Node, val: str) -> None:
"""
Add a fhir:link and RDF type arc if it can be determined
:param subj: reference subject
:param val: reference value
"""
match = FHIR_RESOURCE_RE.match(val)
ref_uri_str = res_type = None
if match:
ref_uri_str = val if match.group(FHIR_RE_BASE) else self._base_uri + urllib.parse.quote(val)
res_type = match.group(FHIR_RE_RESOURCE) # depends on [control=['if'], data=[]]
elif '://' in val:
ref_uri_str = val
res_type = 'Resource' # depends on [control=['if'], data=['val']]
elif self._base_uri and (not val.startswith('#')) and (not val.startswith('/')):
ref_uri_str = self._base_uri + urllib.parse.quote(val)
res_type = val.split('/', 1)[0] if '/' in val else 'Resource' # depends on [control=['if'], data=[]]
if ref_uri_str:
ref_uri = URIRef(ref_uri_str)
self.add(subj, FHIR.link, ref_uri)
self.add(ref_uri, RDF.type, FHIR[res_type]) # depends on [control=['if'], data=[]] |
def visit_file(self, item, parent):
"""
Add file to the report if it was sent.
:param item: LocalFile file to possibly add.
:param parent: LocalFolder/LocalContent not used here
"""
if item.sent_to_remote:
self._add_report_item(item.path, item.remote_id, item.size, item.get_hash_value()) | def function[visit_file, parameter[self, item, parent]]:
constant[
Add file to the report if it was sent.
:param item: LocalFile file to possibly add.
:param parent: LocalFolder/LocalContent not used here
]
if name[item].sent_to_remote begin[:]
call[name[self]._add_report_item, parameter[name[item].path, name[item].remote_id, name[item].size, call[name[item].get_hash_value, parameter[]]]] | keyword[def] identifier[visit_file] ( identifier[self] , identifier[item] , identifier[parent] ):
literal[string]
keyword[if] identifier[item] . identifier[sent_to_remote] :
identifier[self] . identifier[_add_report_item] ( identifier[item] . identifier[path] , identifier[item] . identifier[remote_id] , identifier[item] . identifier[size] , identifier[item] . identifier[get_hash_value] ()) | def visit_file(self, item, parent):
"""
Add file to the report if it was sent.
:param item: LocalFile file to possibly add.
:param parent: LocalFolder/LocalContent not used here
"""
if item.sent_to_remote:
self._add_report_item(item.path, item.remote_id, item.size, item.get_hash_value()) # depends on [control=['if'], data=[]] |
def reset(self, **kwargs):
"""Resets the aug_state and the LMEngine"""
self.aug_state.reset()
super(LMAugmentedState, self).reset(**kwargs) | def function[reset, parameter[self]]:
constant[Resets the aug_state and the LMEngine]
call[name[self].aug_state.reset, parameter[]]
call[call[name[super], parameter[name[LMAugmentedState], name[self]]].reset, parameter[]] | keyword[def] identifier[reset] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[aug_state] . identifier[reset] ()
identifier[super] ( identifier[LMAugmentedState] , identifier[self] ). identifier[reset] (** identifier[kwargs] ) | def reset(self, **kwargs):
"""Resets the aug_state and the LMEngine"""
self.aug_state.reset()
super(LMAugmentedState, self).reset(**kwargs) |
def digest(dna, restriction_enzyme):
'''Restriction endonuclease reaction.
:param dna: DNA template to digest.
:type dna: coral.DNA
:param restriction_site: Restriction site to use.
:type restriction_site: RestrictionSite
:returns: list of digested DNA fragments.
:rtype: coral.DNA list
'''
pattern = restriction_enzyme.recognition_site
located = dna.locate(pattern)
if not located[0] and not located[1]:
return [dna]
# Bottom strand indices are relative to the bottom strand 5' end.
# Convert to same type as top strand
pattern_len = len(pattern)
r_indices = [len(dna) - index - pattern_len for index in
located[1]]
# If sequence is palindrome, remove redundant results
if pattern.is_palindrome():
r_indices = [index for index in r_indices if index not in
located[0]]
# Flatten cut site indices
cut_sites = sorted(located[0] + r_indices)
# Go through each cut site starting at highest one
# Cut remaining template once, generating remaining + new
current = [dna]
for cut_site in cut_sites[::-1]:
new = _cut(current, cut_site, restriction_enzyme)
current.append(new[1])
current.append(new[0])
current.reverse()
# Combine first and last back together if digest was circular
if dna.circular:
current[0] = current.pop() + current[0]
return current | def function[digest, parameter[dna, restriction_enzyme]]:
constant[Restriction endonuclease reaction.
:param dna: DNA template to digest.
:type dna: coral.DNA
:param restriction_site: Restriction site to use.
:type restriction_site: RestrictionSite
:returns: list of digested DNA fragments.
:rtype: coral.DNA list
]
variable[pattern] assign[=] name[restriction_enzyme].recognition_site
variable[located] assign[=] call[name[dna].locate, parameter[name[pattern]]]
if <ast.BoolOp object at 0x7da1b06df250> begin[:]
return[list[[<ast.Name object at 0x7da1b06dd900>]]]
variable[pattern_len] assign[=] call[name[len], parameter[name[pattern]]]
variable[r_indices] assign[=] <ast.ListComp object at 0x7da1b06df610>
if call[name[pattern].is_palindrome, parameter[]] begin[:]
variable[r_indices] assign[=] <ast.ListComp object at 0x7da1b06dc0a0>
variable[cut_sites] assign[=] call[name[sorted], parameter[binary_operation[call[name[located]][constant[0]] + name[r_indices]]]]
variable[current] assign[=] list[[<ast.Name object at 0x7da1b06df8e0>]]
for taget[name[cut_site]] in starred[call[name[cut_sites]][<ast.Slice object at 0x7da1b06dc670>]] begin[:]
variable[new] assign[=] call[name[_cut], parameter[name[current], name[cut_site], name[restriction_enzyme]]]
call[name[current].append, parameter[call[name[new]][constant[1]]]]
call[name[current].append, parameter[call[name[new]][constant[0]]]]
call[name[current].reverse, parameter[]]
if name[dna].circular begin[:]
call[name[current]][constant[0]] assign[=] binary_operation[call[name[current].pop, parameter[]] + call[name[current]][constant[0]]]
return[name[current]] | keyword[def] identifier[digest] ( identifier[dna] , identifier[restriction_enzyme] ):
literal[string]
identifier[pattern] = identifier[restriction_enzyme] . identifier[recognition_site]
identifier[located] = identifier[dna] . identifier[locate] ( identifier[pattern] )
keyword[if] keyword[not] identifier[located] [ literal[int] ] keyword[and] keyword[not] identifier[located] [ literal[int] ]:
keyword[return] [ identifier[dna] ]
identifier[pattern_len] = identifier[len] ( identifier[pattern] )
identifier[r_indices] =[ identifier[len] ( identifier[dna] )- identifier[index] - identifier[pattern_len] keyword[for] identifier[index] keyword[in]
identifier[located] [ literal[int] ]]
keyword[if] identifier[pattern] . identifier[is_palindrome] ():
identifier[r_indices] =[ identifier[index] keyword[for] identifier[index] keyword[in] identifier[r_indices] keyword[if] identifier[index] keyword[not] keyword[in]
identifier[located] [ literal[int] ]]
identifier[cut_sites] = identifier[sorted] ( identifier[located] [ literal[int] ]+ identifier[r_indices] )
identifier[current] =[ identifier[dna] ]
keyword[for] identifier[cut_site] keyword[in] identifier[cut_sites] [::- literal[int] ]:
identifier[new] = identifier[_cut] ( identifier[current] , identifier[cut_site] , identifier[restriction_enzyme] )
identifier[current] . identifier[append] ( identifier[new] [ literal[int] ])
identifier[current] . identifier[append] ( identifier[new] [ literal[int] ])
identifier[current] . identifier[reverse] ()
keyword[if] identifier[dna] . identifier[circular] :
identifier[current] [ literal[int] ]= identifier[current] . identifier[pop] ()+ identifier[current] [ literal[int] ]
keyword[return] identifier[current] | def digest(dna, restriction_enzyme):
"""Restriction endonuclease reaction.
:param dna: DNA template to digest.
:type dna: coral.DNA
:param restriction_site: Restriction site to use.
:type restriction_site: RestrictionSite
:returns: list of digested DNA fragments.
:rtype: coral.DNA list
"""
pattern = restriction_enzyme.recognition_site
located = dna.locate(pattern)
if not located[0] and (not located[1]):
return [dna] # depends on [control=['if'], data=[]]
# Bottom strand indices are relative to the bottom strand 5' end.
# Convert to same type as top strand
pattern_len = len(pattern)
r_indices = [len(dna) - index - pattern_len for index in located[1]]
# If sequence is palindrome, remove redundant results
if pattern.is_palindrome():
r_indices = [index for index in r_indices if index not in located[0]] # depends on [control=['if'], data=[]]
# Flatten cut site indices
cut_sites = sorted(located[0] + r_indices)
# Go through each cut site starting at highest one
# Cut remaining template once, generating remaining + new
current = [dna]
for cut_site in cut_sites[::-1]:
new = _cut(current, cut_site, restriction_enzyme)
current.append(new[1])
current.append(new[0]) # depends on [control=['for'], data=['cut_site']]
current.reverse()
# Combine first and last back together if digest was circular
if dna.circular:
current[0] = current.pop() + current[0] # depends on [control=['if'], data=[]]
return current |
def ParseZeitgeistEventRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a zeitgeist event row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ZeitgeistActivityEventData()
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri')
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data) | def function[ParseZeitgeistEventRow, parameter[self, parser_mediator, query, row]]:
constant[Parses a zeitgeist event row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
]
variable[query_hash] assign[=] call[name[hash], parameter[name[query]]]
variable[event_data] assign[=] call[name[ZeitgeistActivityEventData], parameter[]]
name[event_data].offset assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[id]]]
name[event_data].query assign[=] name[query]
name[event_data].subject_uri assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[subj_uri]]]
variable[timestamp] assign[=] call[name[self]._GetRowValue, parameter[name[query_hash], name[row], constant[timestamp]]]
variable[date_time] assign[=] call[name[dfdatetime_java_time].JavaTime, parameter[]]
variable[event] assign[=] call[name[time_events].DateTimeValuesEvent, parameter[name[date_time], name[definitions].TIME_DESCRIPTION_UNKNOWN]]
call[name[parser_mediator].ProduceEventWithEventData, parameter[name[event], name[event_data]]] | keyword[def] identifier[ParseZeitgeistEventRow] (
identifier[self] , identifier[parser_mediator] , identifier[query] , identifier[row] ,** identifier[unused_kwargs] ):
literal[string]
identifier[query_hash] = identifier[hash] ( identifier[query] )
identifier[event_data] = identifier[ZeitgeistActivityEventData] ()
identifier[event_data] . identifier[offset] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[event_data] . identifier[query] = identifier[query]
identifier[event_data] . identifier[subject_uri] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[timestamp] = identifier[self] . identifier[_GetRowValue] ( identifier[query_hash] , identifier[row] , literal[string] )
identifier[date_time] = identifier[dfdatetime_java_time] . identifier[JavaTime] ( identifier[timestamp] = identifier[timestamp] )
identifier[event] = identifier[time_events] . identifier[DateTimeValuesEvent] (
identifier[date_time] , identifier[definitions] . identifier[TIME_DESCRIPTION_UNKNOWN] )
identifier[parser_mediator] . identifier[ProduceEventWithEventData] ( identifier[event] , identifier[event_data] ) | def ParseZeitgeistEventRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a zeitgeist event row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ZeitgeistActivityEventData()
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.subject_uri = self._GetRowValue(query_hash, row, 'subj_uri')
timestamp = self._GetRowValue(query_hash, row, 'timestamp')
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def ng(self, wavelength):
'''
The group index with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the group
index will be evaluated at.
Returns:
float, list: The group index at the target wavelength(s).
'''
return self.n(wavelength) - (wavelength*1.e-9)*self.nDer1(wavelength) | def function[ng, parameter[self, wavelength]]:
constant[
The group index with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the group
index will be evaluated at.
Returns:
float, list: The group index at the target wavelength(s).
]
return[binary_operation[call[name[self].n, parameter[name[wavelength]]] - binary_operation[binary_operation[name[wavelength] * constant[1e-09]] * call[name[self].nDer1, parameter[name[wavelength]]]]]] | keyword[def] identifier[ng] ( identifier[self] , identifier[wavelength] ):
literal[string]
keyword[return] identifier[self] . identifier[n] ( identifier[wavelength] )-( identifier[wavelength] * literal[int] )* identifier[self] . identifier[nDer1] ( identifier[wavelength] ) | def ng(self, wavelength):
"""
The group index with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the group
index will be evaluated at.
Returns:
float, list: The group index at the target wavelength(s).
"""
return self.n(wavelength) - wavelength * 1e-09 * self.nDer1(wavelength) |
def sum(self, default=None):
"""
Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`.
"""
return numpy.asscalar(numpy.sum(self.values)) if self.values else default | def function[sum, parameter[self, default]]:
constant[
Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`.
]
return[<ast.IfExp object at 0x7da1b23448e0>] | keyword[def] identifier[sum] ( identifier[self] , identifier[default] = keyword[None] ):
literal[string]
keyword[return] identifier[numpy] . identifier[asscalar] ( identifier[numpy] . identifier[sum] ( identifier[self] . identifier[values] )) keyword[if] identifier[self] . identifier[values] keyword[else] identifier[default] | def sum(self, default=None):
"""
Calculate the sum of all the values in the times series.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the sum or `None`.
"""
return numpy.asscalar(numpy.sum(self.values)) if self.values else default |
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront. Requires the M2Crypto
library be installed.
"""
try:
from M2Crypto import EVP
except ImportError:
raise NotImplementedError("Boto depends on the python M2Crypto "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# if private_key_file is a file object read the key string from there
if isinstance(private_key_file, file):
private_key_string = private_key_file.read()
# Now load key and calculate signature
if private_key_string:
key = EVP.load_key_string(private_key_string)
else:
key = EVP.load_key(private_key_file)
key.reset_context(md='sha1')
key.sign_init()
key.sign_update(str(message))
signature = key.sign_final()
return signature | def function[_sign_string, parameter[message, private_key_file, private_key_string]]:
constant[
Signs a string for use with Amazon CloudFront. Requires the M2Crypto
library be installed.
]
<ast.Try object at 0x7da1b26176d0>
if <ast.BoolOp object at 0x7da1b2615600> begin[:]
<ast.Raise object at 0x7da1b26145e0>
if <ast.BoolOp object at 0x7da1b2617190> begin[:]
<ast.Raise object at 0x7da1b2614670>
if call[name[isinstance], parameter[name[private_key_file], name[file]]] begin[:]
variable[private_key_string] assign[=] call[name[private_key_file].read, parameter[]]
if name[private_key_string] begin[:]
variable[key] assign[=] call[name[EVP].load_key_string, parameter[name[private_key_string]]]
call[name[key].reset_context, parameter[]]
call[name[key].sign_init, parameter[]]
call[name[key].sign_update, parameter[call[name[str], parameter[name[message]]]]]
variable[signature] assign[=] call[name[key].sign_final, parameter[]]
return[name[signature]] | keyword[def] identifier[_sign_string] ( identifier[message] , identifier[private_key_file] = keyword[None] , identifier[private_key_string] = keyword[None] ):
literal[string]
keyword[try] :
keyword[from] identifier[M2Crypto] keyword[import] identifier[EVP]
keyword[except] identifier[ImportError] :
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[private_key_file] keyword[and] identifier[private_key_string] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[private_key_file] keyword[and] keyword[not] identifier[private_key_string] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[private_key_file] , identifier[file] ):
identifier[private_key_string] = identifier[private_key_file] . identifier[read] ()
keyword[if] identifier[private_key_string] :
identifier[key] = identifier[EVP] . identifier[load_key_string] ( identifier[private_key_string] )
keyword[else] :
identifier[key] = identifier[EVP] . identifier[load_key] ( identifier[private_key_file] )
identifier[key] . identifier[reset_context] ( identifier[md] = literal[string] )
identifier[key] . identifier[sign_init] ()
identifier[key] . identifier[sign_update] ( identifier[str] ( identifier[message] ))
identifier[signature] = identifier[key] . identifier[sign_final] ()
keyword[return] identifier[signature] | def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront. Requires the M2Crypto
library be installed.
"""
try:
from M2Crypto import EVP # depends on [control=['try'], data=[]]
except ImportError:
raise NotImplementedError('Boto depends on the python M2Crypto library to generate signed URLs for CloudFront') # depends on [control=['except'], data=[]]
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError('Only specify the private_key_file or the private_key_string not both') # depends on [control=['if'], data=[]]
if not private_key_file and (not private_key_string):
raise ValueError('You must specify one of private_key_file or private_key_string') # depends on [control=['if'], data=[]]
# if private_key_file is a file object read the key string from there
if isinstance(private_key_file, file):
private_key_string = private_key_file.read() # depends on [control=['if'], data=[]]
# Now load key and calculate signature
if private_key_string:
key = EVP.load_key_string(private_key_string) # depends on [control=['if'], data=[]]
else:
key = EVP.load_key(private_key_file)
key.reset_context(md='sha1')
key.sign_init()
key.sign_update(str(message))
signature = key.sign_final()
return signature |
def fs_str(string):
"""Encodes a string into the proper filesystem encoding
Borrowed from pip-tools
"""
if isinstance(string, str):
return string
assert not isinstance(string, bytes)
return string.encode(_fs_encoding) | def function[fs_str, parameter[string]]:
constant[Encodes a string into the proper filesystem encoding
Borrowed from pip-tools
]
if call[name[isinstance], parameter[name[string], name[str]]] begin[:]
return[name[string]]
assert[<ast.UnaryOp object at 0x7da18f00ebc0>]
return[call[name[string].encode, parameter[name[_fs_encoding]]]] | keyword[def] identifier[fs_str] ( identifier[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[string] , identifier[str] ):
keyword[return] identifier[string]
keyword[assert] keyword[not] identifier[isinstance] ( identifier[string] , identifier[bytes] )
keyword[return] identifier[string] . identifier[encode] ( identifier[_fs_encoding] ) | def fs_str(string):
"""Encodes a string into the proper filesystem encoding
Borrowed from pip-tools
"""
if isinstance(string, str):
return string # depends on [control=['if'], data=[]]
assert not isinstance(string, bytes)
return string.encode(_fs_encoding) |
def slicenet_params1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 1024
hparams.hidden_size = 768
hparams.dropout = 0.5
hparams.symbol_dropout = 0.2
hparams.label_smoothing = 0.1
hparams.clip_grad_norm = 2.0
hparams.num_hidden_layers = 4
hparams.kernel_height = 3
hparams.kernel_width = 1
hparams.norm_type = "layer"
hparams.learning_rate_decay_scheme = "exp"
hparams.learning_rate = 0.05
hparams.learning_rate_warmup_steps = 3000
hparams.initializer_gain = 1.0
hparams.weight_decay = 3.0
hparams.num_sampled_classes = 0
hparams.sampling_method = "argmax"
hparams.optimizer_adam_epsilon = 1e-6
hparams.optimizer_adam_beta1 = 0.85
hparams.optimizer_adam_beta2 = 0.997
hparams.add_hparam("large_kernel_size", 15) # New ones are added like this.
hparams.add_hparam("separability", -2)
# A dilation scheme, one of _DILATION_SCHEMES.
hparams.add_hparam("dilation_scheme", "1.1.1.1")
# A kernel scheme, one of _KERNEL_SCHEMES; overrides large_kernel_size.
hparams.add_hparam("kernel_scheme", "3.7.15.31")
hparams.add_hparam("audio_compression", 8)
# attention-related flags
hparams.add_hparam("attention_type", "simple")
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("sim_loss_mult", 0.0) # Try 10.0 for experiments.
hparams.add_hparam("attention_dropout", 0.2)
hparams.shared_embedding_and_softmax_weights = True
return hparams | def function[slicenet_params1, parameter[]]:
constant[Set of hyperparameters.]
variable[hparams] assign[=] call[name[common_hparams].basic_params1, parameter[]]
name[hparams].batch_size assign[=] constant[1024]
name[hparams].hidden_size assign[=] constant[768]
name[hparams].dropout assign[=] constant[0.5]
name[hparams].symbol_dropout assign[=] constant[0.2]
name[hparams].label_smoothing assign[=] constant[0.1]
name[hparams].clip_grad_norm assign[=] constant[2.0]
name[hparams].num_hidden_layers assign[=] constant[4]
name[hparams].kernel_height assign[=] constant[3]
name[hparams].kernel_width assign[=] constant[1]
name[hparams].norm_type assign[=] constant[layer]
name[hparams].learning_rate_decay_scheme assign[=] constant[exp]
name[hparams].learning_rate assign[=] constant[0.05]
name[hparams].learning_rate_warmup_steps assign[=] constant[3000]
name[hparams].initializer_gain assign[=] constant[1.0]
name[hparams].weight_decay assign[=] constant[3.0]
name[hparams].num_sampled_classes assign[=] constant[0]
name[hparams].sampling_method assign[=] constant[argmax]
name[hparams].optimizer_adam_epsilon assign[=] constant[1e-06]
name[hparams].optimizer_adam_beta1 assign[=] constant[0.85]
name[hparams].optimizer_adam_beta2 assign[=] constant[0.997]
call[name[hparams].add_hparam, parameter[constant[large_kernel_size], constant[15]]]
call[name[hparams].add_hparam, parameter[constant[separability], <ast.UnaryOp object at 0x7da1b2346cb0>]]
call[name[hparams].add_hparam, parameter[constant[dilation_scheme], constant[1.1.1.1]]]
call[name[hparams].add_hparam, parameter[constant[kernel_scheme], constant[3.7.15.31]]]
call[name[hparams].add_hparam, parameter[constant[audio_compression], constant[8]]]
call[name[hparams].add_hparam, parameter[constant[attention_type], constant[simple]]]
call[name[hparams].add_hparam, parameter[constant[num_heads], constant[8]]]
call[name[hparams].add_hparam, parameter[constant[attention_key_channels], constant[0]]]
call[name[hparams].add_hparam, parameter[constant[attention_value_channels], constant[0]]]
call[name[hparams].add_hparam, parameter[constant[sim_loss_mult], constant[0.0]]]
call[name[hparams].add_hparam, parameter[constant[attention_dropout], constant[0.2]]]
name[hparams].shared_embedding_and_softmax_weights assign[=] constant[True]
return[name[hparams]] | keyword[def] identifier[slicenet_params1] ():
literal[string]
identifier[hparams] = identifier[common_hparams] . identifier[basic_params1] ()
identifier[hparams] . identifier[batch_size] = literal[int]
identifier[hparams] . identifier[hidden_size] = literal[int]
identifier[hparams] . identifier[dropout] = literal[int]
identifier[hparams] . identifier[symbol_dropout] = literal[int]
identifier[hparams] . identifier[label_smoothing] = literal[int]
identifier[hparams] . identifier[clip_grad_norm] = literal[int]
identifier[hparams] . identifier[num_hidden_layers] = literal[int]
identifier[hparams] . identifier[kernel_height] = literal[int]
identifier[hparams] . identifier[kernel_width] = literal[int]
identifier[hparams] . identifier[norm_type] = literal[string]
identifier[hparams] . identifier[learning_rate_decay_scheme] = literal[string]
identifier[hparams] . identifier[learning_rate] = literal[int]
identifier[hparams] . identifier[learning_rate_warmup_steps] = literal[int]
identifier[hparams] . identifier[initializer_gain] = literal[int]
identifier[hparams] . identifier[weight_decay] = literal[int]
identifier[hparams] . identifier[num_sampled_classes] = literal[int]
identifier[hparams] . identifier[sampling_method] = literal[string]
identifier[hparams] . identifier[optimizer_adam_epsilon] = literal[int]
identifier[hparams] . identifier[optimizer_adam_beta1] = literal[int]
identifier[hparams] . identifier[optimizer_adam_beta2] = literal[int]
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] ,- literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[string] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[add_hparam] ( literal[string] , literal[int] )
identifier[hparams] . identifier[shared_embedding_and_softmax_weights] = keyword[True]
keyword[return] identifier[hparams] | def slicenet_params1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 1024
hparams.hidden_size = 768
hparams.dropout = 0.5
hparams.symbol_dropout = 0.2
hparams.label_smoothing = 0.1
hparams.clip_grad_norm = 2.0
hparams.num_hidden_layers = 4
hparams.kernel_height = 3
hparams.kernel_width = 1
hparams.norm_type = 'layer'
hparams.learning_rate_decay_scheme = 'exp'
hparams.learning_rate = 0.05
hparams.learning_rate_warmup_steps = 3000
hparams.initializer_gain = 1.0
hparams.weight_decay = 3.0
hparams.num_sampled_classes = 0
hparams.sampling_method = 'argmax'
hparams.optimizer_adam_epsilon = 1e-06
hparams.optimizer_adam_beta1 = 0.85
hparams.optimizer_adam_beta2 = 0.997
hparams.add_hparam('large_kernel_size', 15) # New ones are added like this.
hparams.add_hparam('separability', -2)
# A dilation scheme, one of _DILATION_SCHEMES.
hparams.add_hparam('dilation_scheme', '1.1.1.1')
# A kernel scheme, one of _KERNEL_SCHEMES; overrides large_kernel_size.
hparams.add_hparam('kernel_scheme', '3.7.15.31')
hparams.add_hparam('audio_compression', 8)
# attention-related flags
hparams.add_hparam('attention_type', 'simple')
hparams.add_hparam('num_heads', 8)
hparams.add_hparam('attention_key_channels', 0)
hparams.add_hparam('attention_value_channels', 0)
hparams.add_hparam('sim_loss_mult', 0.0) # Try 10.0 for experiments.
hparams.add_hparam('attention_dropout', 0.2)
hparams.shared_embedding_and_softmax_weights = True
return hparams |
def _upload(self, project_id, updating, file_path, language_code=None,
overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [
self.UPDATING_TERMS,
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if updating not in options:
raise POEditorArgsException(
'Updating arg must be in {}'.format(options)
)
options = [
self.UPDATING_TERMS_TRANSLATIONS,
self.UPDATING_TRANSLATIONS
]
if language_code is None and updating in options:
raise POEditorArgsException(
'Language code is required only if updating is '
'terms_translations or translations)'
)
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(
url_path="projects/upload",
id=project_id,
language=language_code,
file=local_file,
updating=updating,
tags=tags,
sync_terms=sync_terms,
overwrite=overwrite,
fuzzy_trigger=fuzzy_trigger
)
return data['result'] | def function[_upload, parameter[self, project_id, updating, file_path, language_code, overwrite, sync_terms, tags, fuzzy_trigger]]:
constant[
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
]
variable[options] assign[=] list[[<ast.Attribute object at 0x7da2041d8730>, <ast.Attribute object at 0x7da2041db820>, <ast.Attribute object at 0x7da2041dbd00>]]
if compare[name[updating] <ast.NotIn object at 0x7da2590d7190> name[options]] begin[:]
<ast.Raise object at 0x7da2041d81f0>
variable[options] assign[=] list[[<ast.Attribute object at 0x7da2041da9b0>, <ast.Attribute object at 0x7da2041db460>]]
if <ast.BoolOp object at 0x7da2041da860> begin[:]
<ast.Raise object at 0x7da2041d99c0>
if compare[name[updating] equal[==] name[self].UPDATING_TRANSLATIONS] begin[:]
variable[tags] assign[=] constant[None]
variable[sync_terms] assign[=] constant[None]
variable[tags] assign[=] <ast.BoolOp object at 0x7da2041d98d0>
variable[language_code] assign[=] <ast.BoolOp object at 0x7da2041d99f0>
variable[sync_terms] assign[=] <ast.IfExp object at 0x7da2041db010>
variable[overwrite] assign[=] <ast.IfExp object at 0x7da2041da410>
variable[fuzzy_trigger] assign[=] <ast.IfExp object at 0x7da2041dabf0>
variable[project_id] assign[=] call[name[str], parameter[name[project_id]]]
with call[name[open], parameter[name[file_path], constant[r+b]]] begin[:]
variable[data] assign[=] call[name[self]._run, parameter[]]
return[call[name[data]][constant[result]]] | keyword[def] identifier[_upload] ( identifier[self] , identifier[project_id] , identifier[updating] , identifier[file_path] , identifier[language_code] = keyword[None] ,
identifier[overwrite] = keyword[False] , identifier[sync_terms] = keyword[False] , identifier[tags] = keyword[None] , identifier[fuzzy_trigger] = keyword[None] ):
literal[string]
identifier[options] =[
identifier[self] . identifier[UPDATING_TERMS] ,
identifier[self] . identifier[UPDATING_TERMS_TRANSLATIONS] ,
identifier[self] . identifier[UPDATING_TRANSLATIONS]
]
keyword[if] identifier[updating] keyword[not] keyword[in] identifier[options] :
keyword[raise] identifier[POEditorArgsException] (
literal[string] . identifier[format] ( identifier[options] )
)
identifier[options] =[
identifier[self] . identifier[UPDATING_TERMS_TRANSLATIONS] ,
identifier[self] . identifier[UPDATING_TRANSLATIONS]
]
keyword[if] identifier[language_code] keyword[is] keyword[None] keyword[and] identifier[updating] keyword[in] identifier[options] :
keyword[raise] identifier[POEditorArgsException] (
literal[string]
literal[string]
)
keyword[if] identifier[updating] == identifier[self] . identifier[UPDATING_TRANSLATIONS] :
identifier[tags] = keyword[None]
identifier[sync_terms] = keyword[None]
identifier[tags] = identifier[tags] keyword[or] literal[string]
identifier[language_code] = identifier[language_code] keyword[or] literal[string]
identifier[sync_terms] = literal[string] keyword[if] identifier[sync_terms] keyword[else] literal[string]
identifier[overwrite] = literal[string] keyword[if] identifier[overwrite] keyword[else] literal[string]
identifier[fuzzy_trigger] = literal[string] keyword[if] identifier[fuzzy_trigger] keyword[else] literal[string]
identifier[project_id] = identifier[str] ( identifier[project_id] )
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[local_file] :
identifier[data] = identifier[self] . identifier[_run] (
identifier[url_path] = literal[string] ,
identifier[id] = identifier[project_id] ,
identifier[language] = identifier[language_code] ,
identifier[file] = identifier[local_file] ,
identifier[updating] = identifier[updating] ,
identifier[tags] = identifier[tags] ,
identifier[sync_terms] = identifier[sync_terms] ,
identifier[overwrite] = identifier[overwrite] ,
identifier[fuzzy_trigger] = identifier[fuzzy_trigger]
)
keyword[return] identifier[data] [ literal[string] ] | def _upload(self, project_id, updating, file_path, language_code=None, overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):
"""
Internal: updates terms / translations
File uploads are limited to one every 30 seconds
"""
options = [self.UPDATING_TERMS, self.UPDATING_TERMS_TRANSLATIONS, self.UPDATING_TRANSLATIONS]
if updating not in options:
raise POEditorArgsException('Updating arg must be in {}'.format(options)) # depends on [control=['if'], data=['options']]
options = [self.UPDATING_TERMS_TRANSLATIONS, self.UPDATING_TRANSLATIONS]
if language_code is None and updating in options:
raise POEditorArgsException('Language code is required only if updating is terms_translations or translations)') # depends on [control=['if'], data=[]]
if updating == self.UPDATING_TRANSLATIONS:
tags = None
sync_terms = None # depends on [control=['if'], data=[]]
# Special content type:
tags = tags or ''
language_code = language_code or ''
sync_terms = '1' if sync_terms else '0'
overwrite = '1' if overwrite else '0'
fuzzy_trigger = '1' if fuzzy_trigger else '0'
project_id = str(project_id)
with open(file_path, 'r+b') as local_file:
data = self._run(url_path='projects/upload', id=project_id, language=language_code, file=local_file, updating=updating, tags=tags, sync_terms=sync_terms, overwrite=overwrite, fuzzy_trigger=fuzzy_trigger) # depends on [control=['with'], data=['local_file']]
return data['result'] |
def corrcoef(time, crossf, integration_window=0.):
"""
Calculate the correlation coefficient for given auto- and crosscorrelation
functions. Standard settings yield the zero lag correlation coefficient.
Setting integration_window > 0 yields the correlation coefficient of
integrated auto- and crosscorrelation functions. The correlation coefficient
between a zero signal with any other signal is defined as 0.
Parameters
----------
time : numpy.ndarray
1 dim array of times corresponding to signal.
crossf : numpy.ndarray
Crosscorrelation functions, 1st axis first unit, 2nd axis second unit,
3rd axis times.
integration_window: float
Size of the integration window.
Returns
-------
cc : numpy.ndarray
2 dim array of correlation coefficient between two units.
"""
N = len(crossf)
cc = np.zeros(np.shape(crossf)[:-1])
tbin = abs(time[1] - time[0])
lim = int(integration_window / tbin)
if len(time)%2 == 0:
mid = len(time)/2-1
else:
mid = np.floor(len(time)/2.)
for i in range(N):
ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1])
offset_autoi = np.mean(crossf[i,i][:mid-1])
for j in range(N):
cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1])
offset_cross = np.mean(crossf[i,j][:mid-1])
aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1])
offset_autoj = np.mean(crossf[j,j][:mid-1])
if ai > 0. and aj > 0.:
cc[i, j] = (cij-offset_cross) / np.sqrt((ai-offset_autoi) * \
(aj-offset_autoj))
else:
cc[i, j] = 0.
return cc | def function[corrcoef, parameter[time, crossf, integration_window]]:
constant[
Calculate the correlation coefficient for given auto- and crosscorrelation
functions. Standard settings yield the zero lag correlation coefficient.
Setting integration_window > 0 yields the correlation coefficient of
integrated auto- and crosscorrelation functions. The correlation coefficient
between a zero signal with any other signal is defined as 0.
Parameters
----------
time : numpy.ndarray
1 dim array of times corresponding to signal.
crossf : numpy.ndarray
Crosscorrelation functions, 1st axis first unit, 2nd axis second unit,
3rd axis times.
integration_window: float
Size of the integration window.
Returns
-------
cc : numpy.ndarray
2 dim array of correlation coefficient between two units.
]
variable[N] assign[=] call[name[len], parameter[name[crossf]]]
variable[cc] assign[=] call[name[np].zeros, parameter[call[call[name[np].shape, parameter[name[crossf]]]][<ast.Slice object at 0x7da1b0b57010>]]]
variable[tbin] assign[=] call[name[abs], parameter[binary_operation[call[name[time]][constant[1]] - call[name[time]][constant[0]]]]]
variable[lim] assign[=] call[name[int], parameter[binary_operation[name[integration_window] / name[tbin]]]]
if compare[binary_operation[call[name[len], parameter[name[time]]] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:]
variable[mid] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[time]]] / constant[2]] - constant[1]]
for taget[name[i]] in starred[call[name[range], parameter[name[N]]]] begin[:]
variable[ai] assign[=] call[name[np].sum, parameter[call[call[name[crossf]][tuple[[<ast.Name object at 0x7da1b0b560b0>, <ast.Name object at 0x7da1b0b553f0>]]]][<ast.Slice object at 0x7da1b0b56080>]]]
variable[offset_autoi] assign[=] call[name[np].mean, parameter[call[call[name[crossf]][tuple[[<ast.Name object at 0x7da1b0b574f0>, <ast.Name object at 0x7da1b0b54be0>]]]][<ast.Slice object at 0x7da1b0b54c10>]]]
for taget[name[j]] in starred[call[name[range], parameter[name[N]]]] begin[:]
variable[cij] assign[=] call[name[np].sum, parameter[call[call[name[crossf]][tuple[[<ast.Name object at 0x7da1b0b546a0>, <ast.Name object at 0x7da1b0b55f90>]]]][<ast.Slice object at 0x7da1b0b54760>]]]
variable[offset_cross] assign[=] call[name[np].mean, parameter[call[call[name[crossf]][tuple[[<ast.Name object at 0x7da1b0b57610>, <ast.Name object at 0x7da1b0b55510>]]]][<ast.Slice object at 0x7da1b0b551b0>]]]
variable[aj] assign[=] call[name[np].sum, parameter[call[call[name[crossf]][tuple[[<ast.Name object at 0x7da1b0b545e0>, <ast.Name object at 0x7da1b0b54af0>]]]][<ast.Slice object at 0x7da1b0b56e00>]]]
variable[offset_autoj] assign[=] call[name[np].mean, parameter[call[call[name[crossf]][tuple[[<ast.Name object at 0x7da1b0b56ec0>, <ast.Name object at 0x7da1b0b54190>]]]][<ast.Slice object at 0x7da1b0b56f20>]]]
if <ast.BoolOp object at 0x7da1b0b569e0> begin[:]
call[name[cc]][tuple[[<ast.Name object at 0x7da1b0b56e90>, <ast.Name object at 0x7da1b0b55600>]]] assign[=] binary_operation[binary_operation[name[cij] - name[offset_cross]] / call[name[np].sqrt, parameter[binary_operation[binary_operation[name[ai] - name[offset_autoi]] * binary_operation[name[aj] - name[offset_autoj]]]]]]
return[name[cc]] | keyword[def] identifier[corrcoef] ( identifier[time] , identifier[crossf] , identifier[integration_window] = literal[int] ):
literal[string]
identifier[N] = identifier[len] ( identifier[crossf] )
identifier[cc] = identifier[np] . identifier[zeros] ( identifier[np] . identifier[shape] ( identifier[crossf] )[:- literal[int] ])
identifier[tbin] = identifier[abs] ( identifier[time] [ literal[int] ]- identifier[time] [ literal[int] ])
identifier[lim] = identifier[int] ( identifier[integration_window] / identifier[tbin] )
keyword[if] identifier[len] ( identifier[time] )% literal[int] == literal[int] :
identifier[mid] = identifier[len] ( identifier[time] )/ literal[int] - literal[int]
keyword[else] :
identifier[mid] = identifier[np] . identifier[floor] ( identifier[len] ( identifier[time] )/ literal[int] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[N] ):
identifier[ai] = identifier[np] . identifier[sum] ( identifier[crossf] [ identifier[i] , identifier[i] ][ identifier[mid] - identifier[lim] : identifier[mid] + identifier[lim] + literal[int] ])
identifier[offset_autoi] = identifier[np] . identifier[mean] ( identifier[crossf] [ identifier[i] , identifier[i] ][: identifier[mid] - literal[int] ])
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[N] ):
identifier[cij] = identifier[np] . identifier[sum] ( identifier[crossf] [ identifier[i] , identifier[j] ][ identifier[mid] - identifier[lim] : identifier[mid] + identifier[lim] + literal[int] ])
identifier[offset_cross] = identifier[np] . identifier[mean] ( identifier[crossf] [ identifier[i] , identifier[j] ][: identifier[mid] - literal[int] ])
identifier[aj] = identifier[np] . identifier[sum] ( identifier[crossf] [ identifier[j] , identifier[j] ][ identifier[mid] - identifier[lim] : identifier[mid] + identifier[lim] + literal[int] ])
identifier[offset_autoj] = identifier[np] . identifier[mean] ( identifier[crossf] [ identifier[j] , identifier[j] ][: identifier[mid] - literal[int] ])
keyword[if] identifier[ai] > literal[int] keyword[and] identifier[aj] > literal[int] :
identifier[cc] [ identifier[i] , identifier[j] ]=( identifier[cij] - identifier[offset_cross] )/ identifier[np] . identifier[sqrt] (( identifier[ai] - identifier[offset_autoi] )*( identifier[aj] - identifier[offset_autoj] ))
keyword[else] :
identifier[cc] [ identifier[i] , identifier[j] ]= literal[int]
keyword[return] identifier[cc] | def corrcoef(time, crossf, integration_window=0.0):
"""
Calculate the correlation coefficient for given auto- and crosscorrelation
functions. Standard settings yield the zero lag correlation coefficient.
Setting integration_window > 0 yields the correlation coefficient of
integrated auto- and crosscorrelation functions. The correlation coefficient
between a zero signal with any other signal is defined as 0.
Parameters
----------
time : numpy.ndarray
1 dim array of times corresponding to signal.
crossf : numpy.ndarray
Crosscorrelation functions, 1st axis first unit, 2nd axis second unit,
3rd axis times.
integration_window: float
Size of the integration window.
Returns
-------
cc : numpy.ndarray
2 dim array of correlation coefficient between two units.
"""
N = len(crossf)
cc = np.zeros(np.shape(crossf)[:-1])
tbin = abs(time[1] - time[0])
lim = int(integration_window / tbin)
if len(time) % 2 == 0:
mid = len(time) / 2 - 1 # depends on [control=['if'], data=[]]
else:
mid = np.floor(len(time) / 2.0)
for i in range(N):
ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1])
offset_autoi = np.mean(crossf[i, i][:mid - 1])
for j in range(N):
cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1])
offset_cross = np.mean(crossf[i, j][:mid - 1])
aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1])
offset_autoj = np.mean(crossf[j, j][:mid - 1])
if ai > 0.0 and aj > 0.0:
cc[i, j] = (cij - offset_cross) / np.sqrt((ai - offset_autoi) * (aj - offset_autoj)) # depends on [control=['if'], data=[]]
else:
cc[i, j] = 0.0 # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
return cc |
def get_remote_button_list(self):
"""
Description:
Get remote button list
Returns an list of all available remote buttons
"""
remote_buttons = []
for key in self.command['remote']:
if self.command['remote'][key] != '':
remote_buttons.append(key)
return remote_buttons | def function[get_remote_button_list, parameter[self]]:
constant[
Description:
Get remote button list
Returns an list of all available remote buttons
]
variable[remote_buttons] assign[=] list[[]]
for taget[name[key]] in starred[call[name[self].command][constant[remote]]] begin[:]
if compare[call[call[name[self].command][constant[remote]]][name[key]] not_equal[!=] constant[]] begin[:]
call[name[remote_buttons].append, parameter[name[key]]]
return[name[remote_buttons]] | keyword[def] identifier[get_remote_button_list] ( identifier[self] ):
literal[string]
identifier[remote_buttons] =[]
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[command] [ literal[string] ]:
keyword[if] identifier[self] . identifier[command] [ literal[string] ][ identifier[key] ]!= literal[string] :
identifier[remote_buttons] . identifier[append] ( identifier[key] )
keyword[return] identifier[remote_buttons] | def get_remote_button_list(self):
"""
Description:
Get remote button list
Returns an list of all available remote buttons
"""
remote_buttons = []
for key in self.command['remote']:
if self.command['remote'][key] != '':
remote_buttons.append(key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']]
return remote_buttons |
def vq_body(x,
codebook_size,
beta=0.25,
decay=0.999,
epsilon=1e-5,
soft_em=False,
num_samples=10,
temperature=None,
do_update=True):
"""Discretize each x into one of codebook_size codes."""
x_shape = common_layers.shape_list(x)
hidden_size = x_shape[-1]
means, ema_means, ema_count = get_vq_codebook(codebook_size, hidden_size)
x = tf.reshape(x, [-1, hidden_size])
x_means_hot, e_loss, distances = vq_nearest_neighbor(
x, means, soft_em=soft_em, num_samples=num_samples,
temperature=temperature)
def loss_with_update():
"""Update the ema variables and return loss triggering the update."""
updated_ema_count = moving_averages.assign_moving_average(
ema_count,
tf.reduce_sum(tf.reshape(x_means_hot, shape=[-1, codebook_size]),
axis=0),
decay,
zero_debias=False)
dw = tf.matmul(x_means_hot, x, transpose_a=True)
updated_ema_means = tf.identity(
moving_averages.assign_moving_average(
ema_means, dw, decay, zero_debias=False))
n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True)
updated_ema_count = (
(updated_ema_count + epsilon) / (n + codebook_size * epsilon) * n)
updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = means.assign(updated_ema_means)
with tf.control_dependencies([update_means]):
return beta * e_loss
# Loss, also do update if requested.
if do_update:
loss = loss_with_update()
else:
loss = tf.cond(do_update, loss_with_update, lambda: beta * e_loss)
d = tf.reshape(x_means_hot, x_shape[:-1] + [codebook_size])
return d, loss, distances | def function[vq_body, parameter[x, codebook_size, beta, decay, epsilon, soft_em, num_samples, temperature, do_update]]:
constant[Discretize each x into one of codebook_size codes.]
variable[x_shape] assign[=] call[name[common_layers].shape_list, parameter[name[x]]]
variable[hidden_size] assign[=] call[name[x_shape]][<ast.UnaryOp object at 0x7da1b20fbfd0>]
<ast.Tuple object at 0x7da1b20f90f0> assign[=] call[name[get_vq_codebook], parameter[name[codebook_size], name[hidden_size]]]
variable[x] assign[=] call[name[tf].reshape, parameter[name[x], list[[<ast.UnaryOp object at 0x7da1b20891b0>, <ast.Name object at 0x7da1b208afe0>]]]]
<ast.Tuple object at 0x7da1b2089300> assign[=] call[name[vq_nearest_neighbor], parameter[name[x], name[means]]]
def function[loss_with_update, parameter[]]:
constant[Update the ema variables and return loss triggering the update.]
variable[updated_ema_count] assign[=] call[name[moving_averages].assign_moving_average, parameter[name[ema_count], call[name[tf].reduce_sum, parameter[call[name[tf].reshape, parameter[name[x_means_hot]]]]], name[decay]]]
variable[dw] assign[=] call[name[tf].matmul, parameter[name[x_means_hot], name[x]]]
variable[updated_ema_means] assign[=] call[name[tf].identity, parameter[call[name[moving_averages].assign_moving_average, parameter[name[ema_means], name[dw], name[decay]]]]]
variable[n] assign[=] call[name[tf].reduce_sum, parameter[name[updated_ema_count]]]
variable[updated_ema_count] assign[=] binary_operation[binary_operation[binary_operation[name[updated_ema_count] + name[epsilon]] / binary_operation[name[n] + binary_operation[name[codebook_size] * name[epsilon]]]] * name[n]]
<ast.AugAssign object at 0x7da1b20b34f0>
with call[name[tf].control_dependencies, parameter[list[[<ast.Name object at 0x7da1b20b1000>]]]] begin[:]
variable[update_means] assign[=] call[name[means].assign, parameter[name[updated_ema_means]]]
with call[name[tf].control_dependencies, parameter[list[[<ast.Name object at 0x7da1b1f38940>]]]] begin[:]
return[binary_operation[name[beta] * name[e_loss]]]
if name[do_update] begin[:]
variable[loss] assign[=] call[name[loss_with_update], parameter[]]
variable[d] assign[=] call[name[tf].reshape, parameter[name[x_means_hot], binary_operation[call[name[x_shape]][<ast.Slice object at 0x7da1b1f39210>] + list[[<ast.Name object at 0x7da1b1f38850>]]]]]
return[tuple[[<ast.Name object at 0x7da1b1f3beb0>, <ast.Name object at 0x7da1b1f3bb50>, <ast.Name object at 0x7da1b1f39990>]]] | keyword[def] identifier[vq_body] ( identifier[x] ,
identifier[codebook_size] ,
identifier[beta] = literal[int] ,
identifier[decay] = literal[int] ,
identifier[epsilon] = literal[int] ,
identifier[soft_em] = keyword[False] ,
identifier[num_samples] = literal[int] ,
identifier[temperature] = keyword[None] ,
identifier[do_update] = keyword[True] ):
literal[string]
identifier[x_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[x] )
identifier[hidden_size] = identifier[x_shape] [- literal[int] ]
identifier[means] , identifier[ema_means] , identifier[ema_count] = identifier[get_vq_codebook] ( identifier[codebook_size] , identifier[hidden_size] )
identifier[x] = identifier[tf] . identifier[reshape] ( identifier[x] ,[- literal[int] , identifier[hidden_size] ])
identifier[x_means_hot] , identifier[e_loss] , identifier[distances] = identifier[vq_nearest_neighbor] (
identifier[x] , identifier[means] , identifier[soft_em] = identifier[soft_em] , identifier[num_samples] = identifier[num_samples] ,
identifier[temperature] = identifier[temperature] )
keyword[def] identifier[loss_with_update] ():
literal[string]
identifier[updated_ema_count] = identifier[moving_averages] . identifier[assign_moving_average] (
identifier[ema_count] ,
identifier[tf] . identifier[reduce_sum] ( identifier[tf] . identifier[reshape] ( identifier[x_means_hot] , identifier[shape] =[- literal[int] , identifier[codebook_size] ]),
identifier[axis] = literal[int] ),
identifier[decay] ,
identifier[zero_debias] = keyword[False] )
identifier[dw] = identifier[tf] . identifier[matmul] ( identifier[x_means_hot] , identifier[x] , identifier[transpose_a] = keyword[True] )
identifier[updated_ema_means] = identifier[tf] . identifier[identity] (
identifier[moving_averages] . identifier[assign_moving_average] (
identifier[ema_means] , identifier[dw] , identifier[decay] , identifier[zero_debias] = keyword[False] ))
identifier[n] = identifier[tf] . identifier[reduce_sum] ( identifier[updated_ema_count] , identifier[axis] =- literal[int] , identifier[keepdims] = keyword[True] )
identifier[updated_ema_count] =(
( identifier[updated_ema_count] + identifier[epsilon] )/( identifier[n] + identifier[codebook_size] * identifier[epsilon] )* identifier[n] )
identifier[updated_ema_means] /= identifier[tf] . identifier[expand_dims] ( identifier[updated_ema_count] , identifier[axis] =- literal[int] )
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[e_loss] ]):
identifier[update_means] = identifier[means] . identifier[assign] ( identifier[updated_ema_means] )
keyword[with] identifier[tf] . identifier[control_dependencies] ([ identifier[update_means] ]):
keyword[return] identifier[beta] * identifier[e_loss]
keyword[if] identifier[do_update] :
identifier[loss] = identifier[loss_with_update] ()
keyword[else] :
identifier[loss] = identifier[tf] . identifier[cond] ( identifier[do_update] , identifier[loss_with_update] , keyword[lambda] : identifier[beta] * identifier[e_loss] )
identifier[d] = identifier[tf] . identifier[reshape] ( identifier[x_means_hot] , identifier[x_shape] [:- literal[int] ]+[ identifier[codebook_size] ])
keyword[return] identifier[d] , identifier[loss] , identifier[distances] | def vq_body(x, codebook_size, beta=0.25, decay=0.999, epsilon=1e-05, soft_em=False, num_samples=10, temperature=None, do_update=True):
"""Discretize each x into one of codebook_size codes."""
x_shape = common_layers.shape_list(x)
hidden_size = x_shape[-1]
(means, ema_means, ema_count) = get_vq_codebook(codebook_size, hidden_size)
x = tf.reshape(x, [-1, hidden_size])
(x_means_hot, e_loss, distances) = vq_nearest_neighbor(x, means, soft_em=soft_em, num_samples=num_samples, temperature=temperature)
def loss_with_update():
"""Update the ema variables and return loss triggering the update."""
updated_ema_count = moving_averages.assign_moving_average(ema_count, tf.reduce_sum(tf.reshape(x_means_hot, shape=[-1, codebook_size]), axis=0), decay, zero_debias=False)
dw = tf.matmul(x_means_hot, x, transpose_a=True)
updated_ema_means = tf.identity(moving_averages.assign_moving_average(ema_means, dw, decay, zero_debias=False))
n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True)
updated_ema_count = (updated_ema_count + epsilon) / (n + codebook_size * epsilon) * n
updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = means.assign(updated_ema_means)
with tf.control_dependencies([update_means]):
return beta * e_loss # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]]
# Loss, also do update if requested.
if do_update:
loss = loss_with_update() # depends on [control=['if'], data=[]]
else:
loss = tf.cond(do_update, loss_with_update, lambda : beta * e_loss)
d = tf.reshape(x_means_hot, x_shape[:-1] + [codebook_size])
return (d, loss, distances) |
def from_db(cls, db, force=False):
"""Make instance from database.
For performance, this caches the episode types for the database. The
`force` parameter can be used to bypass this.
"""
if force or db not in cls._cache:
cls._cache[db] = cls._new_from_db(db)
return cls._cache[db] | def function[from_db, parameter[cls, db, force]]:
constant[Make instance from database.
For performance, this caches the episode types for the database. The
`force` parameter can be used to bypass this.
]
if <ast.BoolOp object at 0x7da18dc078b0> begin[:]
call[name[cls]._cache][name[db]] assign[=] call[name[cls]._new_from_db, parameter[name[db]]]
return[call[name[cls]._cache][name[db]]] | keyword[def] identifier[from_db] ( identifier[cls] , identifier[db] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[force] keyword[or] identifier[db] keyword[not] keyword[in] identifier[cls] . identifier[_cache] :
identifier[cls] . identifier[_cache] [ identifier[db] ]= identifier[cls] . identifier[_new_from_db] ( identifier[db] )
keyword[return] identifier[cls] . identifier[_cache] [ identifier[db] ] | def from_db(cls, db, force=False):
"""Make instance from database.
For performance, this caches the episode types for the database. The
`force` parameter can be used to bypass this.
"""
if force or db not in cls._cache:
cls._cache[db] = cls._new_from_db(db) # depends on [control=['if'], data=[]]
return cls._cache[db] |
def update_ext(self, path, id, body=None):
"""Client extension hook for update."""
return self.put(path % id, body=body) | def function[update_ext, parameter[self, path, id, body]]:
constant[Client extension hook for update.]
return[call[name[self].put, parameter[binary_operation[name[path] <ast.Mod object at 0x7da2590d6920> name[id]]]]] | keyword[def] identifier[update_ext] ( identifier[self] , identifier[path] , identifier[id] , identifier[body] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[put] ( identifier[path] % identifier[id] , identifier[body] = identifier[body] ) | def update_ext(self, path, id, body=None):
"""Client extension hook for update."""
return self.put(path % id, body=body) |
def pre_dissect(self, s):
"""
We need to parse the padding and type as soon as possible,
else we won't be able to parse the message list...
"""
if len(s) < 1:
raise Exception("Invalid InnerPlaintext (too short).")
tmp_len = len(s) - 1
if s[-1] != b"\x00":
msg_len = tmp_len
else:
n = 1
while s[-n] != b"\x00" and n < tmp_len:
n += 1
msg_len = tmp_len - n
self.fields_desc[0].length_from = lambda pkt: msg_len
self.type = struct.unpack("B", s[msg_len:msg_len + 1])[0]
return s | def function[pre_dissect, parameter[self, s]]:
constant[
We need to parse the padding and type as soon as possible,
else we won't be able to parse the message list...
]
if compare[call[name[len], parameter[name[s]]] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da1b21be140>
variable[tmp_len] assign[=] binary_operation[call[name[len], parameter[name[s]]] - constant[1]]
if compare[call[name[s]][<ast.UnaryOp object at 0x7da1b21bf6d0>] not_equal[!=] constant[b'\x00']] begin[:]
variable[msg_len] assign[=] name[tmp_len]
call[name[self].fields_desc][constant[0]].length_from assign[=] <ast.Lambda object at 0x7da1b21b8a30>
name[self].type assign[=] call[call[name[struct].unpack, parameter[constant[B], call[name[s]][<ast.Slice object at 0x7da1b21bb010>]]]][constant[0]]
return[name[s]] | keyword[def] identifier[pre_dissect] ( identifier[self] , identifier[s] ):
literal[string]
keyword[if] identifier[len] ( identifier[s] )< literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[tmp_len] = identifier[len] ( identifier[s] )- literal[int]
keyword[if] identifier[s] [- literal[int] ]!= literal[string] :
identifier[msg_len] = identifier[tmp_len]
keyword[else] :
identifier[n] = literal[int]
keyword[while] identifier[s] [- identifier[n] ]!= literal[string] keyword[and] identifier[n] < identifier[tmp_len] :
identifier[n] += literal[int]
identifier[msg_len] = identifier[tmp_len] - identifier[n]
identifier[self] . identifier[fields_desc] [ literal[int] ]. identifier[length_from] = keyword[lambda] identifier[pkt] : identifier[msg_len]
identifier[self] . identifier[type] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[s] [ identifier[msg_len] : identifier[msg_len] + literal[int] ])[ literal[int] ]
keyword[return] identifier[s] | def pre_dissect(self, s):
"""
We need to parse the padding and type as soon as possible,
else we won't be able to parse the message list...
"""
if len(s) < 1:
raise Exception('Invalid InnerPlaintext (too short).') # depends on [control=['if'], data=[]]
tmp_len = len(s) - 1
if s[-1] != b'\x00':
msg_len = tmp_len # depends on [control=['if'], data=[]]
else:
n = 1
while s[-n] != b'\x00' and n < tmp_len:
n += 1 # depends on [control=['while'], data=[]]
msg_len = tmp_len - n
self.fields_desc[0].length_from = lambda pkt: msg_len
self.type = struct.unpack('B', s[msg_len:msg_len + 1])[0]
return s |
def demote(self):
"""Demote the bootstrap code to the end of the `sys.path` so it is found last.
:return: The list of un-imported bootstrap modules.
:rtype: list of :class:`types.ModuleType`
"""
import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.
unimported_modules = []
for name, module in reversed(sorted(sys.modules.items())):
if self.imported_from_bootstrap(module):
unimported_modules.append(sys.modules.pop(name))
sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]
sys.path.append(self._sys_path_entry)
return unimported_modules | def function[demote, parameter[self]]:
constant[Demote the bootstrap code to the end of the `sys.path` so it is found last.
:return: The list of un-imported bootstrap modules.
:rtype: list of :class:`types.ModuleType`
]
import module[sys]
variable[unimported_modules] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204620f40>, <ast.Name object at 0x7da204620eb0>]]] in starred[call[name[reversed], parameter[call[name[sorted], parameter[call[name[sys].modules.items, parameter[]]]]]]] begin[:]
if call[name[self].imported_from_bootstrap, parameter[name[module]]] begin[:]
call[name[unimported_modules].append, parameter[call[name[sys].modules.pop, parameter[name[name]]]]]
call[name[sys].path][<ast.Slice object at 0x7da204620910>] assign[=] <ast.ListComp object at 0x7da204623f40>
call[name[sys].path.append, parameter[name[self]._sys_path_entry]]
return[name[unimported_modules]] | keyword[def] identifier[demote] ( identifier[self] ):
literal[string]
keyword[import] identifier[sys]
identifier[unimported_modules] =[]
keyword[for] identifier[name] , identifier[module] keyword[in] identifier[reversed] ( identifier[sorted] ( identifier[sys] . identifier[modules] . identifier[items] ())):
keyword[if] identifier[self] . identifier[imported_from_bootstrap] ( identifier[module] ):
identifier[unimported_modules] . identifier[append] ( identifier[sys] . identifier[modules] . identifier[pop] ( identifier[name] ))
identifier[sys] . identifier[path] [:]=[ identifier[path] keyword[for] identifier[path] keyword[in] identifier[sys] . identifier[path] keyword[if] identifier[os] . identifier[path] . identifier[realpath] ( identifier[path] )!= identifier[self] . identifier[_realpath] ]
identifier[sys] . identifier[path] . identifier[append] ( identifier[self] . identifier[_sys_path_entry] )
keyword[return] identifier[unimported_modules] | def demote(self):
"""Demote the bootstrap code to the end of the `sys.path` so it is found last.
:return: The list of un-imported bootstrap modules.
:rtype: list of :class:`types.ModuleType`
"""
import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.
unimported_modules = []
for (name, module) in reversed(sorted(sys.modules.items())):
if self.imported_from_bootstrap(module):
unimported_modules.append(sys.modules.pop(name)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]
sys.path.append(self._sys_path_entry)
return unimported_modules |
async def props_del(self):
""" Fix me after support 204 code in aiohttp """
endpoint = "{bucket}/props".format(bucket=self.path)
try:
async with self._client.delete(endpoint,
headers=self.headers) as r:
if r.status != 204:
raise Error("Bucket {} not found}".format(self.bucket))
return True
except ContentEncodingError:
""" Not error really """
return True
except:
raise | <ast.AsyncFunctionDef object at 0x7da18f00f820> | keyword[async] keyword[def] identifier[props_del] ( identifier[self] ):
literal[string]
identifier[endpoint] = literal[string] . identifier[format] ( identifier[bucket] = identifier[self] . identifier[path] )
keyword[try] :
keyword[async] keyword[with] identifier[self] . identifier[_client] . identifier[delete] ( identifier[endpoint] ,
identifier[headers] = identifier[self] . identifier[headers] ) keyword[as] identifier[r] :
keyword[if] identifier[r] . identifier[status] != literal[int] :
keyword[raise] identifier[Error] ( literal[string] . identifier[format] ( identifier[self] . identifier[bucket] ))
keyword[return] keyword[True]
keyword[except] identifier[ContentEncodingError] :
literal[string]
keyword[return] keyword[True]
keyword[except] :
keyword[raise] | async def props_del(self):
""" Fix me after support 204 code in aiohttp """
endpoint = '{bucket}/props'.format(bucket=self.path)
try:
async with self._client.delete(endpoint, headers=self.headers) as r:
if r.status != 204:
raise Error('Bucket {} not found}'.format(self.bucket)) # depends on [control=['if'], data=[]]
return True # depends on [control=['try'], data=[]]
except ContentEncodingError:
' Not error really '
return True # depends on [control=['except'], data=[]]
except:
raise # depends on [control=['except'], data=[]] |
def migrate_passwords_to_leader_storage(self, excludes=None):
"""Migrate any passwords storage on disk to leader storage."""
if not is_leader():
log("Skipping password migration as not the lead unit",
level=DEBUG)
return
dirname = os.path.dirname(self.root_passwd_file_template)
path = os.path.join(dirname, '*.passwd')
for f in glob.glob(path):
if excludes and f in excludes:
log("Excluding %s from leader storage migration" % (f),
level=DEBUG)
continue
key = os.path.basename(f)
with open(f, 'r') as passwd:
_value = passwd.read().strip()
try:
leader_set(settings={key: _value})
if self.delete_ondisk_passwd_file:
os.unlink(f)
except ValueError:
# NOTE cluster relation not yet ready - skip for now
pass | def function[migrate_passwords_to_leader_storage, parameter[self, excludes]]:
constant[Migrate any passwords storage on disk to leader storage.]
if <ast.UnaryOp object at 0x7da207f00f40> begin[:]
call[name[log], parameter[constant[Skipping password migration as not the lead unit]]]
return[None]
variable[dirname] assign[=] call[name[os].path.dirname, parameter[name[self].root_passwd_file_template]]
variable[path] assign[=] call[name[os].path.join, parameter[name[dirname], constant[*.passwd]]]
for taget[name[f]] in starred[call[name[glob].glob, parameter[name[path]]]] begin[:]
if <ast.BoolOp object at 0x7da207f02b60> begin[:]
call[name[log], parameter[binary_operation[constant[Excluding %s from leader storage migration] <ast.Mod object at 0x7da2590d6920> name[f]]]]
continue
variable[key] assign[=] call[name[os].path.basename, parameter[name[f]]]
with call[name[open], parameter[name[f], constant[r]]] begin[:]
variable[_value] assign[=] call[call[name[passwd].read, parameter[]].strip, parameter[]]
<ast.Try object at 0x7da207f00220> | keyword[def] identifier[migrate_passwords_to_leader_storage] ( identifier[self] , identifier[excludes] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[is_leader] ():
identifier[log] ( literal[string] ,
identifier[level] = identifier[DEBUG] )
keyword[return]
identifier[dirname] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[self] . identifier[root_passwd_file_template] )
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirname] , literal[string] )
keyword[for] identifier[f] keyword[in] identifier[glob] . identifier[glob] ( identifier[path] ):
keyword[if] identifier[excludes] keyword[and] identifier[f] keyword[in] identifier[excludes] :
identifier[log] ( literal[string] %( identifier[f] ),
identifier[level] = identifier[DEBUG] )
keyword[continue]
identifier[key] = identifier[os] . identifier[path] . identifier[basename] ( identifier[f] )
keyword[with] identifier[open] ( identifier[f] , literal[string] ) keyword[as] identifier[passwd] :
identifier[_value] = identifier[passwd] . identifier[read] (). identifier[strip] ()
keyword[try] :
identifier[leader_set] ( identifier[settings] ={ identifier[key] : identifier[_value] })
keyword[if] identifier[self] . identifier[delete_ondisk_passwd_file] :
identifier[os] . identifier[unlink] ( identifier[f] )
keyword[except] identifier[ValueError] :
keyword[pass] | def migrate_passwords_to_leader_storage(self, excludes=None):
"""Migrate any passwords storage on disk to leader storage."""
if not is_leader():
log('Skipping password migration as not the lead unit', level=DEBUG)
return # depends on [control=['if'], data=[]]
dirname = os.path.dirname(self.root_passwd_file_template)
path = os.path.join(dirname, '*.passwd')
for f in glob.glob(path):
if excludes and f in excludes:
log('Excluding %s from leader storage migration' % f, level=DEBUG)
continue # depends on [control=['if'], data=[]]
key = os.path.basename(f)
with open(f, 'r') as passwd:
_value = passwd.read().strip() # depends on [control=['with'], data=['passwd']]
try:
leader_set(settings={key: _value})
if self.delete_ondisk_passwd_file:
os.unlink(f) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ValueError:
# NOTE cluster relation not yet ready - skip for now
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['f']] |
def wait(self, *, block=True, timeout=None):
"""Signal that a party has reached the barrier.
Warning:
Barrier blocking is currently only supported by the stub and
Redis backends.
Warning:
Re-using keys between blocking calls may lead to undefined
behaviour. Make sure your barrier keys are always unique
(use a UUID).
Parameters:
block(bool): Whether or not to block while waiting for the
other parties.
timeout(int): The maximum number of milliseconds to wait for
the barrier to be cleared.
Returns:
bool: Whether or not the barrier has been reached by all parties.
"""
cleared = not self.backend.decr(self.key, 1, 1, self.ttl)
if cleared:
self.backend.wait_notify(self.key_events, self.ttl)
return True
if block:
return self.backend.wait(self.key_events, timeout)
return False | def function[wait, parameter[self]]:
constant[Signal that a party has reached the barrier.
Warning:
Barrier blocking is currently only supported by the stub and
Redis backends.
Warning:
Re-using keys between blocking calls may lead to undefined
behaviour. Make sure your barrier keys are always unique
(use a UUID).
Parameters:
block(bool): Whether or not to block while waiting for the
other parties.
timeout(int): The maximum number of milliseconds to wait for
the barrier to be cleared.
Returns:
bool: Whether or not the barrier has been reached by all parties.
]
variable[cleared] assign[=] <ast.UnaryOp object at 0x7da1b163bd90>
if name[cleared] begin[:]
call[name[self].backend.wait_notify, parameter[name[self].key_events, name[self].ttl]]
return[constant[True]]
if name[block] begin[:]
return[call[name[self].backend.wait, parameter[name[self].key_events, name[timeout]]]]
return[constant[False]] | keyword[def] identifier[wait] ( identifier[self] ,*, identifier[block] = keyword[True] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[cleared] = keyword[not] identifier[self] . identifier[backend] . identifier[decr] ( identifier[self] . identifier[key] , literal[int] , literal[int] , identifier[self] . identifier[ttl] )
keyword[if] identifier[cleared] :
identifier[self] . identifier[backend] . identifier[wait_notify] ( identifier[self] . identifier[key_events] , identifier[self] . identifier[ttl] )
keyword[return] keyword[True]
keyword[if] identifier[block] :
keyword[return] identifier[self] . identifier[backend] . identifier[wait] ( identifier[self] . identifier[key_events] , identifier[timeout] )
keyword[return] keyword[False] | def wait(self, *, block=True, timeout=None):
"""Signal that a party has reached the barrier.
Warning:
Barrier blocking is currently only supported by the stub and
Redis backends.
Warning:
Re-using keys between blocking calls may lead to undefined
behaviour. Make sure your barrier keys are always unique
(use a UUID).
Parameters:
block(bool): Whether or not to block while waiting for the
other parties.
timeout(int): The maximum number of milliseconds to wait for
the barrier to be cleared.
Returns:
bool: Whether or not the barrier has been reached by all parties.
"""
cleared = not self.backend.decr(self.key, 1, 1, self.ttl)
if cleared:
self.backend.wait_notify(self.key_events, self.ttl)
return True # depends on [control=['if'], data=[]]
if block:
return self.backend.wait(self.key_events, timeout) # depends on [control=['if'], data=[]]
return False |
def assert_subset(self, subset, superset, failure_message='Expected collection "{}" to be a subset of "{}'):
"""
Asserts that a superset contains all elements of a subset
"""
assertion = lambda: set(subset).issubset(set(superset))
failure_message = unicode(failure_message).format(superset, subset)
self.webdriver_assert(assertion, failure_message) | def function[assert_subset, parameter[self, subset, superset, failure_message]]:
constant[
Asserts that a superset contains all elements of a subset
]
variable[assertion] assign[=] <ast.Lambda object at 0x7da1b10a58a0>
variable[failure_message] assign[=] call[call[name[unicode], parameter[name[failure_message]]].format, parameter[name[superset], name[subset]]]
call[name[self].webdriver_assert, parameter[name[assertion], name[failure_message]]] | keyword[def] identifier[assert_subset] ( identifier[self] , identifier[subset] , identifier[superset] , identifier[failure_message] = literal[string] ):
literal[string]
identifier[assertion] = keyword[lambda] : identifier[set] ( identifier[subset] ). identifier[issubset] ( identifier[set] ( identifier[superset] ))
identifier[failure_message] = identifier[unicode] ( identifier[failure_message] ). identifier[format] ( identifier[superset] , identifier[subset] )
identifier[self] . identifier[webdriver_assert] ( identifier[assertion] , identifier[failure_message] ) | def assert_subset(self, subset, superset, failure_message='Expected collection "{}" to be a subset of "{}'):
"""
Asserts that a superset contains all elements of a subset
"""
assertion = lambda : set(subset).issubset(set(superset))
failure_message = unicode(failure_message).format(superset, subset)
self.webdriver_assert(assertion, failure_message) |
def fold(self):
"""Folds the Register in accordance with it's dimensions.
If the register is dimensionless, the returned list just
contains the register itself unchanged. In case the register
name looks like a C array, the returned list contains the register
itself, where nothing else than the '%s' placeholder in it's name
has been replaced with value of the dim element.
"""
if self.dim is None:
return [self]
if self.name.endswith("[%s]"): # C array like
self.name = self.name.replace("%s", str(self.dim))
return [self]
registers = []
for offset, index in enumerate(self.dimIndex):
reg = self.copy()
reg.name = self.name.replace("%s", str(index))
reg.addressOffset += offset * reg.dimIncrement
reg.fields = [field.copy() for field in reg.fields]
for field in reg.fields:
field.parent = reg
reg.dim = reg.dimIndex = reg.dimIncrement = None # Dimensionless
registers.append(reg)
return registers | def function[fold, parameter[self]]:
constant[Folds the Register in accordance with it's dimensions.
If the register is dimensionless, the returned list just
contains the register itself unchanged. In case the register
name looks like a C array, the returned list contains the register
itself, where nothing else than the '%s' placeholder in it's name
has been replaced with value of the dim element.
]
if compare[name[self].dim is constant[None]] begin[:]
return[list[[<ast.Name object at 0x7da1b0b5a5f0>]]]
if call[name[self].name.endswith, parameter[constant[[%s]]]] begin[:]
name[self].name assign[=] call[name[self].name.replace, parameter[constant[%s], call[name[str], parameter[name[self].dim]]]]
return[list[[<ast.Name object at 0x7da204344670>]]]
variable[registers] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da204344640>, <ast.Name object at 0x7da204345090>]]] in starred[call[name[enumerate], parameter[name[self].dimIndex]]] begin[:]
variable[reg] assign[=] call[name[self].copy, parameter[]]
name[reg].name assign[=] call[name[self].name.replace, parameter[constant[%s], call[name[str], parameter[name[index]]]]]
<ast.AugAssign object at 0x7da2043443d0>
name[reg].fields assign[=] <ast.ListComp object at 0x7da204346770>
for taget[name[field]] in starred[name[reg].fields] begin[:]
name[field].parent assign[=] name[reg]
name[reg].dim assign[=] constant[None]
call[name[registers].append, parameter[name[reg]]]
return[name[registers]] | keyword[def] identifier[fold] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[dim] keyword[is] keyword[None] :
keyword[return] [ identifier[self] ]
keyword[if] identifier[self] . identifier[name] . identifier[endswith] ( literal[string] ):
identifier[self] . identifier[name] = identifier[self] . identifier[name] . identifier[replace] ( literal[string] , identifier[str] ( identifier[self] . identifier[dim] ))
keyword[return] [ identifier[self] ]
identifier[registers] =[]
keyword[for] identifier[offset] , identifier[index] keyword[in] identifier[enumerate] ( identifier[self] . identifier[dimIndex] ):
identifier[reg] = identifier[self] . identifier[copy] ()
identifier[reg] . identifier[name] = identifier[self] . identifier[name] . identifier[replace] ( literal[string] , identifier[str] ( identifier[index] ))
identifier[reg] . identifier[addressOffset] += identifier[offset] * identifier[reg] . identifier[dimIncrement]
identifier[reg] . identifier[fields] =[ identifier[field] . identifier[copy] () keyword[for] identifier[field] keyword[in] identifier[reg] . identifier[fields] ]
keyword[for] identifier[field] keyword[in] identifier[reg] . identifier[fields] :
identifier[field] . identifier[parent] = identifier[reg]
identifier[reg] . identifier[dim] = identifier[reg] . identifier[dimIndex] = identifier[reg] . identifier[dimIncrement] = keyword[None]
identifier[registers] . identifier[append] ( identifier[reg] )
keyword[return] identifier[registers] | def fold(self):
"""Folds the Register in accordance with it's dimensions.
If the register is dimensionless, the returned list just
contains the register itself unchanged. In case the register
name looks like a C array, the returned list contains the register
itself, where nothing else than the '%s' placeholder in it's name
has been replaced with value of the dim element.
"""
if self.dim is None:
return [self] # depends on [control=['if'], data=[]]
if self.name.endswith('[%s]'): # C array like
self.name = self.name.replace('%s', str(self.dim))
return [self] # depends on [control=['if'], data=[]]
registers = []
for (offset, index) in enumerate(self.dimIndex):
reg = self.copy()
reg.name = self.name.replace('%s', str(index))
reg.addressOffset += offset * reg.dimIncrement
reg.fields = [field.copy() for field in reg.fields]
for field in reg.fields:
field.parent = reg # depends on [control=['for'], data=['field']]
reg.dim = reg.dimIndex = reg.dimIncrement = None # Dimensionless
registers.append(reg) # depends on [control=['for'], data=[]]
return registers |
def manual_run(turn_into_run=True, store_meta_data=True, clean_up=True):
"""Can be used to decorate a function as a manual run function.
This can be helpful if you want the run functionality without using an environment.
:param turn_into_run:
If the trajectory should become a `single run` with more specialized functionality
during a single run.
:param store_meta_data:
If meta-data like runtime should be automatically stored
:param clean_up:
If all data added during the single run should be removed, only works
if ``turn_into_run=True``.
"""
def wrapper(func):
@functools.wraps(func)
def new_func(traj, *args, **kwargs):
do_wrap = not traj._run_by_environment
if do_wrap:
traj.f_start_run(turn_into_run=turn_into_run)
result = func(traj, *args, **kwargs)
if do_wrap:
traj.f_finalize_run(store_meta_data=store_meta_data,
clean_up=clean_up)
return result
return new_func
return wrapper | def function[manual_run, parameter[turn_into_run, store_meta_data, clean_up]]:
constant[Can be used to decorate a function as a manual run function.
This can be helpful if you want the run functionality without using an environment.
:param turn_into_run:
If the trajectory should become a `single run` with more specialized functionality
during a single run.
:param store_meta_data:
If meta-data like runtime should be automatically stored
:param clean_up:
If all data added during the single run should be removed, only works
if ``turn_into_run=True``.
]
def function[wrapper, parameter[func]]:
def function[new_func, parameter[traj]]:
variable[do_wrap] assign[=] <ast.UnaryOp object at 0x7da18f811ae0>
if name[do_wrap] begin[:]
call[name[traj].f_start_run, parameter[]]
variable[result] assign[=] call[name[func], parameter[name[traj], <ast.Starred object at 0x7da18f812560>]]
if name[do_wrap] begin[:]
call[name[traj].f_finalize_run, parameter[]]
return[name[result]]
return[name[new_func]]
return[name[wrapper]] | keyword[def] identifier[manual_run] ( identifier[turn_into_run] = keyword[True] , identifier[store_meta_data] = keyword[True] , identifier[clean_up] = keyword[True] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[func] ):
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[new_func] ( identifier[traj] ,* identifier[args] ,** identifier[kwargs] ):
identifier[do_wrap] = keyword[not] identifier[traj] . identifier[_run_by_environment]
keyword[if] identifier[do_wrap] :
identifier[traj] . identifier[f_start_run] ( identifier[turn_into_run] = identifier[turn_into_run] )
identifier[result] = identifier[func] ( identifier[traj] ,* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[do_wrap] :
identifier[traj] . identifier[f_finalize_run] ( identifier[store_meta_data] = identifier[store_meta_data] ,
identifier[clean_up] = identifier[clean_up] )
keyword[return] identifier[result]
keyword[return] identifier[new_func]
keyword[return] identifier[wrapper] | def manual_run(turn_into_run=True, store_meta_data=True, clean_up=True):
"""Can be used to decorate a function as a manual run function.
This can be helpful if you want the run functionality without using an environment.
:param turn_into_run:
If the trajectory should become a `single run` with more specialized functionality
during a single run.
:param store_meta_data:
If meta-data like runtime should be automatically stored
:param clean_up:
If all data added during the single run should be removed, only works
if ``turn_into_run=True``.
"""
def wrapper(func):
@functools.wraps(func)
def new_func(traj, *args, **kwargs):
do_wrap = not traj._run_by_environment
if do_wrap:
traj.f_start_run(turn_into_run=turn_into_run) # depends on [control=['if'], data=[]]
result = func(traj, *args, **kwargs)
if do_wrap:
traj.f_finalize_run(store_meta_data=store_meta_data, clean_up=clean_up) # depends on [control=['if'], data=[]]
return result
return new_func
return wrapper |
def update(self, key=values.unset, value=values.unset):
"""
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
"""
data = values.of({'Key': key, 'Value': value, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return VariableInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
environment_sid=self._solution['environment_sid'],
sid=self._solution['sid'],
) | def function[update, parameter[self, key, value]]:
constant[
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
]
variable[data] assign[=] call[name[values].of, parameter[dictionary[[<ast.Constant object at 0x7da1b1eef520>, <ast.Constant object at 0x7da1b1eee3b0>], [<ast.Name object at 0x7da1b1eee770>, <ast.Name object at 0x7da1b1eedf00>]]]]
variable[payload] assign[=] call[name[self]._version.update, parameter[constant[POST], name[self]._uri]]
return[call[name[VariableInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[key] = identifier[values] . identifier[unset] , identifier[value] = identifier[values] . identifier[unset] ):
literal[string]
identifier[data] = identifier[values] . identifier[of] ({ literal[string] : identifier[key] , literal[string] : identifier[value] ,})
identifier[payload] = identifier[self] . identifier[_version] . identifier[update] (
literal[string] ,
identifier[self] . identifier[_uri] ,
identifier[data] = identifier[data] ,
)
keyword[return] identifier[VariableInstance] (
identifier[self] . identifier[_version] ,
identifier[payload] ,
identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[environment_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[sid] = identifier[self] . identifier[_solution] [ literal[string] ],
) | def update(self, key=values.unset, value=values.unset):
"""
Update the VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Updated VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
"""
data = values.of({'Key': key, 'Value': value})
payload = self._version.update('POST', self._uri, data=data)
return VariableInstance(self._version, payload, service_sid=self._solution['service_sid'], environment_sid=self._solution['environment_sid'], sid=self._solution['sid']) |
def _Bound_TP(T, P):
"""Region definition for input T and P
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.3
"""
region = None
if 1073.15 < T <= 2273.15 and Pmin <= P <= 50:
region = 5
elif Pmin <= P <= Ps_623:
Tsat = _TSat_P(P)
if 273.15 <= T <= Tsat:
region = 1
elif Tsat < T <= 1073.15:
region = 2
elif Ps_623 < P <= 100:
T_b23 = _t_P(P)
if 273.15 <= T <= 623.15:
region = 1
elif 623.15 < T < T_b23:
region = 3
elif T_b23 <= T <= 1073.15:
region = 2
return region | def function[_Bound_TP, parameter[T, P]]:
constant[Region definition for input T and P
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.3
]
variable[region] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b06d3460> begin[:]
variable[region] assign[=] constant[5]
return[name[region]] | keyword[def] identifier[_Bound_TP] ( identifier[T] , identifier[P] ):
literal[string]
identifier[region] = keyword[None]
keyword[if] literal[int] < identifier[T] <= literal[int] keyword[and] identifier[Pmin] <= identifier[P] <= literal[int] :
identifier[region] = literal[int]
keyword[elif] identifier[Pmin] <= identifier[P] <= identifier[Ps_623] :
identifier[Tsat] = identifier[_TSat_P] ( identifier[P] )
keyword[if] literal[int] <= identifier[T] <= identifier[Tsat] :
identifier[region] = literal[int]
keyword[elif] identifier[Tsat] < identifier[T] <= literal[int] :
identifier[region] = literal[int]
keyword[elif] identifier[Ps_623] < identifier[P] <= literal[int] :
identifier[T_b23] = identifier[_t_P] ( identifier[P] )
keyword[if] literal[int] <= identifier[T] <= literal[int] :
identifier[region] = literal[int]
keyword[elif] literal[int] < identifier[T] < identifier[T_b23] :
identifier[region] = literal[int]
keyword[elif] identifier[T_b23] <= identifier[T] <= literal[int] :
identifier[region] = literal[int]
keyword[return] identifier[region] | def _Bound_TP(T, P):
"""Region definition for input T and P
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [MPa]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.3
"""
region = None
if 1073.15 < T <= 2273.15 and Pmin <= P <= 50:
region = 5 # depends on [control=['if'], data=[]]
elif Pmin <= P <= Ps_623:
Tsat = _TSat_P(P)
if 273.15 <= T <= Tsat:
region = 1 # depends on [control=['if'], data=[]]
elif Tsat < T <= 1073.15:
region = 2 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['P']]
elif Ps_623 < P <= 100:
T_b23 = _t_P(P)
if 273.15 <= T <= 623.15:
region = 1 # depends on [control=['if'], data=[]]
elif 623.15 < T < T_b23:
region = 3 # depends on [control=['if'], data=[]]
elif T_b23 <= T <= 1073.15:
region = 2 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['P']]
return region |
def get_message_voice(self, msgid):
"""
根据消息 ID 获取语音消息内容
:param msgid: 消息 ID
:return: 二进制 MP3 音频字符串, 可直接作为 File Object 中 write 的参数
:raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据
:raises ValueError: 参数出错, 错误原因直接打印异常即可, 错误内容: ``voice message not exist``: msg参数无效
"""
url = 'https://mp.weixin.qq.com/cgi-bin/getvoicedata?msgid={msgid}&fileid=&token={token}&lang=zh_CN'.format(
msgid=msgid,
token=self.__token,
)
headers = {
'x-requested-with': 'XMLHttpRequest',
'referer': 'https://mp.weixin.qq.com/cgi-bin/message?t=message/list&token={token}&count=20&day=7'.format(
token=self.__token,
),
'cookie': self.__cookies,
}
r = requests.get(url, headers=headers, stream=True)
# 检测会话是否超时
if r.headers.get('content-type', None) == 'text/html; charset=UTF-8':
raise NeedLoginError(r.text)
# 检测语音是否存在
if not r.raw.data:
raise ValueError('voice message not exist')
return r.raw.data | def function[get_message_voice, parameter[self, msgid]]:
constant[
根据消息 ID 获取语音消息内容
:param msgid: 消息 ID
:return: 二进制 MP3 音频字符串, 可直接作为 File Object 中 write 的参数
:raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据
:raises ValueError: 参数出错, 错误原因直接打印异常即可, 错误内容: ``voice message not exist``: msg参数无效
]
variable[url] assign[=] call[constant[https://mp.weixin.qq.com/cgi-bin/getvoicedata?msgid={msgid}&fileid=&token={token}&lang=zh_CN].format, parameter[]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b021f250>, <ast.Constant object at 0x7da1b021f460>, <ast.Constant object at 0x7da1b021e890>], [<ast.Constant object at 0x7da1b021c250>, <ast.Call object at 0x7da1b021cd30>, <ast.Attribute object at 0x7da1b021f6a0>]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[call[name[r].headers.get, parameter[constant[content-type], constant[None]]] equal[==] constant[text/html; charset=UTF-8]] begin[:]
<ast.Raise object at 0x7da1b021d0c0>
if <ast.UnaryOp object at 0x7da1b021e830> begin[:]
<ast.Raise object at 0x7da1b021cca0>
return[name[r].raw.data] | keyword[def] identifier[get_message_voice] ( identifier[self] , identifier[msgid] ):
literal[string]
identifier[url] = literal[string] . identifier[format] (
identifier[msgid] = identifier[msgid] ,
identifier[token] = identifier[self] . identifier[__token] ,
)
identifier[headers] ={
literal[string] : literal[string] ,
literal[string] : literal[string] . identifier[format] (
identifier[token] = identifier[self] . identifier[__token] ,
),
literal[string] : identifier[self] . identifier[__cookies] ,
}
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[stream] = keyword[True] )
keyword[if] identifier[r] . identifier[headers] . identifier[get] ( literal[string] , keyword[None] )== literal[string] :
keyword[raise] identifier[NeedLoginError] ( identifier[r] . identifier[text] )
keyword[if] keyword[not] identifier[r] . identifier[raw] . identifier[data] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[r] . identifier[raw] . identifier[data] | def get_message_voice(self, msgid):
"""
根据消息 ID 获取语音消息内容
:param msgid: 消息 ID
:return: 二进制 MP3 音频字符串, 可直接作为 File Object 中 write 的参数
:raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据
:raises ValueError: 参数出错, 错误原因直接打印异常即可, 错误内容: ``voice message not exist``: msg参数无效
"""
url = 'https://mp.weixin.qq.com/cgi-bin/getvoicedata?msgid={msgid}&fileid=&token={token}&lang=zh_CN'.format(msgid=msgid, token=self.__token)
headers = {'x-requested-with': 'XMLHttpRequest', 'referer': 'https://mp.weixin.qq.com/cgi-bin/message?t=message/list&token={token}&count=20&day=7'.format(token=self.__token), 'cookie': self.__cookies}
r = requests.get(url, headers=headers, stream=True)
# 检测会话是否超时
if r.headers.get('content-type', None) == 'text/html; charset=UTF-8':
raise NeedLoginError(r.text) # depends on [control=['if'], data=[]]
# 检测语音是否存在
if not r.raw.data:
raise ValueError('voice message not exist') # depends on [control=['if'], data=[]]
return r.raw.data |
def create_ensemble(ncfiles, mf_flag=False):
"""Create an xarray datset of ensemble of climate simulation from a list of netcdf files. Input data is
concatenated along a newly created data dimension ('realization')
Returns a xarray dataset object containing input data from the list of netcdf files concatenated along
a new dimension (name:'realization'). In the case where input files have unequal time dimensions output
ensemble dataset is created for overlapping time-steps common to all input files
Parameters
----------
ncfiles : sequence
List of netcdf file paths. If mf_flag is true ncfiles should be a list of lists where
each sublist contains input .nc files of a multifile dataset
mf_flag : Boolean . If true climate simulations are treated as multifile datasets before concatenation
Returns
-------
xarray dataset containing concatenated data from all input files
Notes
-----
Input netcdf files require equal spatial dimension size (e.g. lon, lat dimensions)
If input data contains multiple cftime calendar types they must be at monthly or coarser frequency
Examples
--------
>>> from xclim import utils
>>> import glob
>>> ncfiles = glob.glob('/*.nc')
>>> ens = utils.create_ensemble(ncfiles)
>>> print(ens)
Using multifile datasets:
simulation 1 is a list of .nc files (e.g. separated by time)
>>> ncfiles = glob.glob('dir/*.nc')
simulation 2 is also a list of .nc files
>>> ens = utils.create_ensemble(ncfiles)
"""
dim = 'realization'
ds1 = []
start_end_flag = True
print('finding common time-steps')
for n in ncfiles:
if mf_flag:
ds = xr.open_mfdataset(n, concat_dim='time', decode_times=False, chunks={'time': 10})
ds['time'] = xr.open_mfdataset(n).time
else:
ds = xr.open_dataset(n, decode_times=False)
ds['time'] = xr.decode_cf(ds).time
# get times - use common
time1 = pd.to_datetime({'year': ds.time.dt.year, 'month': ds.time.dt.month, 'day': ds.time.dt.day})
if start_end_flag:
start1 = time1.values[0]
end1 = time1.values[-1]
start_end_flag = False
if time1.values.min() > start1:
start1 = time1.values.min()
if time1.values.max() < end1:
end1 = time1.values.max()
for n in ncfiles:
print('accessing file ', ncfiles.index(n) + 1, ' of ', len(ncfiles))
if mf_flag:
ds = xr.open_mfdataset(n, concat_dim='time', decode_times=False, chunks={'time': 10})
ds['time'] = xr.open_mfdataset(n).time
else:
ds = xr.open_dataset(n, decode_times=False, chunks={'time': 10})
ds['time'] = xr.decode_cf(ds).time
ds['time'].values = pd.to_datetime({'year': ds.time.dt.year, 'month': ds.time.dt.month, 'day': ds.time.dt.day})
ds = ds.where((ds.time >= start1) & (ds.time <= end1), drop=True)
ds1.append(ds.drop('time'))
print('concatenating files : adding dimension ', dim, )
ens = xr.concat(ds1, dim=dim)
# assign time coords
ens = ens.assign_coords(time=ds.time.values)
return ens | def function[create_ensemble, parameter[ncfiles, mf_flag]]:
constant[Create an xarray datset of ensemble of climate simulation from a list of netcdf files. Input data is
concatenated along a newly created data dimension ('realization')
Returns a xarray dataset object containing input data from the list of netcdf files concatenated along
a new dimension (name:'realization'). In the case where input files have unequal time dimensions output
ensemble dataset is created for overlapping time-steps common to all input files
Parameters
----------
ncfiles : sequence
List of netcdf file paths. If mf_flag is true ncfiles should be a list of lists where
each sublist contains input .nc files of a multifile dataset
mf_flag : Boolean . If true climate simulations are treated as multifile datasets before concatenation
Returns
-------
xarray dataset containing concatenated data from all input files
Notes
-----
Input netcdf files require equal spatial dimension size (e.g. lon, lat dimensions)
If input data contains multiple cftime calendar types they must be at monthly or coarser frequency
Examples
--------
>>> from xclim import utils
>>> import glob
>>> ncfiles = glob.glob('/*.nc')
>>> ens = utils.create_ensemble(ncfiles)
>>> print(ens)
Using multifile datasets:
simulation 1 is a list of .nc files (e.g. separated by time)
>>> ncfiles = glob.glob('dir/*.nc')
simulation 2 is also a list of .nc files
>>> ens = utils.create_ensemble(ncfiles)
]
variable[dim] assign[=] constant[realization]
variable[ds1] assign[=] list[[]]
variable[start_end_flag] assign[=] constant[True]
call[name[print], parameter[constant[finding common time-steps]]]
for taget[name[n]] in starred[name[ncfiles]] begin[:]
if name[mf_flag] begin[:]
variable[ds] assign[=] call[name[xr].open_mfdataset, parameter[name[n]]]
call[name[ds]][constant[time]] assign[=] call[name[xr].open_mfdataset, parameter[name[n]]].time
variable[time1] assign[=] call[name[pd].to_datetime, parameter[dictionary[[<ast.Constant object at 0x7da1b23d1f30>, <ast.Constant object at 0x7da1b23d05e0>, <ast.Constant object at 0x7da1b23d0460>], [<ast.Attribute object at 0x7da1b23d2bc0>, <ast.Attribute object at 0x7da1b23d34f0>, <ast.Attribute object at 0x7da1b23d0400>]]]]
if name[start_end_flag] begin[:]
variable[start1] assign[=] call[name[time1].values][constant[0]]
variable[end1] assign[=] call[name[time1].values][<ast.UnaryOp object at 0x7da2044c2770>]
variable[start_end_flag] assign[=] constant[False]
if compare[call[name[time1].values.min, parameter[]] greater[>] name[start1]] begin[:]
variable[start1] assign[=] call[name[time1].values.min, parameter[]]
if compare[call[name[time1].values.max, parameter[]] less[<] name[end1]] begin[:]
variable[end1] assign[=] call[name[time1].values.max, parameter[]]
for taget[name[n]] in starred[name[ncfiles]] begin[:]
call[name[print], parameter[constant[accessing file ], binary_operation[call[name[ncfiles].index, parameter[name[n]]] + constant[1]], constant[ of ], call[name[len], parameter[name[ncfiles]]]]]
if name[mf_flag] begin[:]
variable[ds] assign[=] call[name[xr].open_mfdataset, parameter[name[n]]]
call[name[ds]][constant[time]] assign[=] call[name[xr].open_mfdataset, parameter[name[n]]].time
call[name[ds]][constant[time]].values assign[=] call[name[pd].to_datetime, parameter[dictionary[[<ast.Constant object at 0x7da1b23ec880>, <ast.Constant object at 0x7da1b23efdf0>, <ast.Constant object at 0x7da1b23ed030>], [<ast.Attribute object at 0x7da1b23ef220>, <ast.Attribute object at 0x7da1b23edbd0>, <ast.Attribute object at 0x7da1b23eca30>]]]]
variable[ds] assign[=] call[name[ds].where, parameter[binary_operation[compare[name[ds].time greater_or_equal[>=] name[start1]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[ds].time less_or_equal[<=] name[end1]]]]]
call[name[ds1].append, parameter[call[name[ds].drop, parameter[constant[time]]]]]
call[name[print], parameter[constant[concatenating files : adding dimension ], name[dim]]]
variable[ens] assign[=] call[name[xr].concat, parameter[name[ds1]]]
variable[ens] assign[=] call[name[ens].assign_coords, parameter[]]
return[name[ens]] | keyword[def] identifier[create_ensemble] ( identifier[ncfiles] , identifier[mf_flag] = keyword[False] ):
literal[string]
identifier[dim] = literal[string]
identifier[ds1] =[]
identifier[start_end_flag] = keyword[True]
identifier[print] ( literal[string] )
keyword[for] identifier[n] keyword[in] identifier[ncfiles] :
keyword[if] identifier[mf_flag] :
identifier[ds] = identifier[xr] . identifier[open_mfdataset] ( identifier[n] , identifier[concat_dim] = literal[string] , identifier[decode_times] = keyword[False] , identifier[chunks] ={ literal[string] : literal[int] })
identifier[ds] [ literal[string] ]= identifier[xr] . identifier[open_mfdataset] ( identifier[n] ). identifier[time]
keyword[else] :
identifier[ds] = identifier[xr] . identifier[open_dataset] ( identifier[n] , identifier[decode_times] = keyword[False] )
identifier[ds] [ literal[string] ]= identifier[xr] . identifier[decode_cf] ( identifier[ds] ). identifier[time]
identifier[time1] = identifier[pd] . identifier[to_datetime] ({ literal[string] : identifier[ds] . identifier[time] . identifier[dt] . identifier[year] , literal[string] : identifier[ds] . identifier[time] . identifier[dt] . identifier[month] , literal[string] : identifier[ds] . identifier[time] . identifier[dt] . identifier[day] })
keyword[if] identifier[start_end_flag] :
identifier[start1] = identifier[time1] . identifier[values] [ literal[int] ]
identifier[end1] = identifier[time1] . identifier[values] [- literal[int] ]
identifier[start_end_flag] = keyword[False]
keyword[if] identifier[time1] . identifier[values] . identifier[min] ()> identifier[start1] :
identifier[start1] = identifier[time1] . identifier[values] . identifier[min] ()
keyword[if] identifier[time1] . identifier[values] . identifier[max] ()< identifier[end1] :
identifier[end1] = identifier[time1] . identifier[values] . identifier[max] ()
keyword[for] identifier[n] keyword[in] identifier[ncfiles] :
identifier[print] ( literal[string] , identifier[ncfiles] . identifier[index] ( identifier[n] )+ literal[int] , literal[string] , identifier[len] ( identifier[ncfiles] ))
keyword[if] identifier[mf_flag] :
identifier[ds] = identifier[xr] . identifier[open_mfdataset] ( identifier[n] , identifier[concat_dim] = literal[string] , identifier[decode_times] = keyword[False] , identifier[chunks] ={ literal[string] : literal[int] })
identifier[ds] [ literal[string] ]= identifier[xr] . identifier[open_mfdataset] ( identifier[n] ). identifier[time]
keyword[else] :
identifier[ds] = identifier[xr] . identifier[open_dataset] ( identifier[n] , identifier[decode_times] = keyword[False] , identifier[chunks] ={ literal[string] : literal[int] })
identifier[ds] [ literal[string] ]= identifier[xr] . identifier[decode_cf] ( identifier[ds] ). identifier[time]
identifier[ds] [ literal[string] ]. identifier[values] = identifier[pd] . identifier[to_datetime] ({ literal[string] : identifier[ds] . identifier[time] . identifier[dt] . identifier[year] , literal[string] : identifier[ds] . identifier[time] . identifier[dt] . identifier[month] , literal[string] : identifier[ds] . identifier[time] . identifier[dt] . identifier[day] })
identifier[ds] = identifier[ds] . identifier[where] (( identifier[ds] . identifier[time] >= identifier[start1] )&( identifier[ds] . identifier[time] <= identifier[end1] ), identifier[drop] = keyword[True] )
identifier[ds1] . identifier[append] ( identifier[ds] . identifier[drop] ( literal[string] ))
identifier[print] ( literal[string] , identifier[dim] ,)
identifier[ens] = identifier[xr] . identifier[concat] ( identifier[ds1] , identifier[dim] = identifier[dim] )
identifier[ens] = identifier[ens] . identifier[assign_coords] ( identifier[time] = identifier[ds] . identifier[time] . identifier[values] )
keyword[return] identifier[ens] | def create_ensemble(ncfiles, mf_flag=False):
"""Create an xarray datset of ensemble of climate simulation from a list of netcdf files. Input data is
concatenated along a newly created data dimension ('realization')
Returns a xarray dataset object containing input data from the list of netcdf files concatenated along
a new dimension (name:'realization'). In the case where input files have unequal time dimensions output
ensemble dataset is created for overlapping time-steps common to all input files
Parameters
----------
ncfiles : sequence
List of netcdf file paths. If mf_flag is true ncfiles should be a list of lists where
each sublist contains input .nc files of a multifile dataset
mf_flag : Boolean . If true climate simulations are treated as multifile datasets before concatenation
Returns
-------
xarray dataset containing concatenated data from all input files
Notes
-----
Input netcdf files require equal spatial dimension size (e.g. lon, lat dimensions)
If input data contains multiple cftime calendar types they must be at monthly or coarser frequency
Examples
--------
>>> from xclim import utils
>>> import glob
>>> ncfiles = glob.glob('/*.nc')
>>> ens = utils.create_ensemble(ncfiles)
>>> print(ens)
Using multifile datasets:
simulation 1 is a list of .nc files (e.g. separated by time)
>>> ncfiles = glob.glob('dir/*.nc')
simulation 2 is also a list of .nc files
>>> ens = utils.create_ensemble(ncfiles)
"""
dim = 'realization'
ds1 = []
start_end_flag = True
print('finding common time-steps')
for n in ncfiles:
if mf_flag:
ds = xr.open_mfdataset(n, concat_dim='time', decode_times=False, chunks={'time': 10})
ds['time'] = xr.open_mfdataset(n).time # depends on [control=['if'], data=[]]
else:
ds = xr.open_dataset(n, decode_times=False)
ds['time'] = xr.decode_cf(ds).time
# get times - use common
time1 = pd.to_datetime({'year': ds.time.dt.year, 'month': ds.time.dt.month, 'day': ds.time.dt.day})
if start_end_flag:
start1 = time1.values[0]
end1 = time1.values[-1]
start_end_flag = False # depends on [control=['if'], data=[]]
if time1.values.min() > start1:
start1 = time1.values.min() # depends on [control=['if'], data=['start1']]
if time1.values.max() < end1:
end1 = time1.values.max() # depends on [control=['if'], data=['end1']] # depends on [control=['for'], data=['n']]
for n in ncfiles:
print('accessing file ', ncfiles.index(n) + 1, ' of ', len(ncfiles))
if mf_flag:
ds = xr.open_mfdataset(n, concat_dim='time', decode_times=False, chunks={'time': 10})
ds['time'] = xr.open_mfdataset(n).time # depends on [control=['if'], data=[]]
else:
ds = xr.open_dataset(n, decode_times=False, chunks={'time': 10})
ds['time'] = xr.decode_cf(ds).time
ds['time'].values = pd.to_datetime({'year': ds.time.dt.year, 'month': ds.time.dt.month, 'day': ds.time.dt.day})
ds = ds.where((ds.time >= start1) & (ds.time <= end1), drop=True)
ds1.append(ds.drop('time')) # depends on [control=['for'], data=['n']]
print('concatenating files : adding dimension ', dim)
ens = xr.concat(ds1, dim=dim)
# assign time coords
ens = ens.assign_coords(time=ds.time.values)
return ens |
def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.',1)[-1].split("'")[0]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
args, _, _, kwargs = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '',
'function_name': frm.f_code.co_name,
'file': file_,
'path': os.path.split(file_)[0],
'basename': os.path.basename(file_).split('.')[0],
'line_number': line_number or frm.f_lineno,
'globals': frm.f_globals,
'locals': frm.f_locals,
'arguments': args,
'kwargs': kwargs,
'frame': frm} | def function[function_info, parameter[function_index, function_name, line_number]]:
constant[
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
]
variable[frm] assign[=] call[name[func_frame], parameter[binary_operation[name[function_index] + constant[1]], name[function_name]]]
variable[file_] assign[=] call[name[os].path.abspath, parameter[name[frm].f_code.co_filename]]
variable[class_name] assign[=] call[name[frm].f_locals.get, parameter[constant[self], constant[None]]]
if compare[name[class_name] is_not constant[None]] begin[:]
variable[class_name] assign[=] call[call[call[call[call[name[str], parameter[call[name[type], parameter[name[class_name]]]]].split, parameter[constant[.], constant[1]]]][<ast.UnaryOp object at 0x7da1b15956f0>].split, parameter[constant[']]]][constant[0]]
<ast.Tuple object at 0x7da1b1596380> assign[=] call[name[inspect].getargvalues, parameter[name[frm]]]
variable[line_number] assign[=] <ast.BoolOp object at 0x7da1b15951e0>
return[dictionary[[<ast.Constant object at 0x7da1b1595c90>, <ast.Constant object at 0x7da1b15953f0>, <ast.Constant object at 0x7da1b1594100>, <ast.Constant object at 0x7da1b1595420>, <ast.Constant object at 0x7da1b1595e70>, <ast.Constant object at 0x7da1b15979d0>, <ast.Constant object at 0x7da1b1596320>, <ast.Constant object at 0x7da1b1596230>, <ast.Constant object at 0x7da1b1596530>, <ast.Constant object at 0x7da1b15975b0>, <ast.Constant object at 0x7da1b1594c10>], [<ast.BoolOp object at 0x7da1b1595b70>, <ast.Attribute object at 0x7da1b1594d00>, <ast.Name object at 0x7da1b15958d0>, <ast.Subscript object at 0x7da1b15963b0>, <ast.Subscript object at 0x7da1b15941f0>, <ast.BoolOp object at 0x7da1b1596410>, <ast.Attribute object at 0x7da1b1595b10>, <ast.Attribute object at 0x7da1b1595300>, <ast.Name object at 0x7da1b1595f90>, <ast.Name object at 0x7da1b1597250>, <ast.Name object at 0x7da1b1595570>]]] | keyword[def] identifier[function_info] ( identifier[function_index] = literal[int] , identifier[function_name] = keyword[None] , identifier[line_number] = keyword[None] ):
literal[string]
identifier[frm] = identifier[func_frame] ( identifier[function_index] + literal[int] , identifier[function_name] )
identifier[file_] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[frm] . identifier[f_code] . identifier[co_filename] )
identifier[class_name] = identifier[frm] . identifier[f_locals] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[class_name] keyword[is] keyword[not] keyword[None] :
identifier[class_name] = identifier[str] ( identifier[type] ( identifier[class_name] )). identifier[split] ( literal[string] , literal[int] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
identifier[args] , identifier[_] , identifier[_] , identifier[kwargs] = identifier[inspect] . identifier[getargvalues] ( identifier[frm] )
identifier[line_number] = identifier[line_number] keyword[or] identifier[frm] . identifier[f_lineno]
keyword[return] { literal[string] : identifier[class_name] keyword[or] literal[string] ,
literal[string] : identifier[frm] . identifier[f_code] . identifier[co_name] ,
literal[string] : identifier[file_] ,
literal[string] : identifier[os] . identifier[path] . identifier[split] ( identifier[file_] )[ literal[int] ],
literal[string] : identifier[os] . identifier[path] . identifier[basename] ( identifier[file_] ). identifier[split] ( literal[string] )[ literal[int] ],
literal[string] : identifier[line_number] keyword[or] identifier[frm] . identifier[f_lineno] ,
literal[string] : identifier[frm] . identifier[f_globals] ,
literal[string] : identifier[frm] . identifier[f_locals] ,
literal[string] : identifier[args] ,
literal[string] : identifier[kwargs] ,
literal[string] : identifier[frm] } | def function_info(function_index=1, function_name=None, line_number=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index)
:param line_number: int, some times the user may want to override
this for testing purposes
:return tuple: ('cls_name','func_name',line_number,globals())
"""
frm = func_frame(function_index + 1, function_name)
file_ = os.path.abspath(frm.f_code.co_filename)
class_name = frm.f_locals.get('self', None)
if class_name is not None: # and not skip_class:
class_name = str(type(class_name)).split('.', 1)[-1].split("'")[0] # depends on [control=['if'], data=['class_name']]
# try:
# class_name = str(class_name).split(None, 1)[1]
# class_name = class_name.split('.')[-1].replace(')', '')
# except:
# class_name = repr(class_name).split()[0].split('.')[-1]
# if 'object at' in str(class_name):
# class_name = str(class_name).split(' object at')[0].split('.')[-1]
(args, _, _, kwargs) = inspect.getargvalues(frm)
line_number = line_number or frm.f_lineno
return {'class_name': class_name or '', 'function_name': frm.f_code.co_name, 'file': file_, 'path': os.path.split(file_)[0], 'basename': os.path.basename(file_).split('.')[0], 'line_number': line_number or frm.f_lineno, 'globals': frm.f_globals, 'locals': frm.f_locals, 'arguments': args, 'kwargs': kwargs, 'frame': frm} |
def _round_magmoms(magmoms, round_magmoms_mode: Union[int, float]):
"""
If round_magmoms_mode is an integer, simply round to that number
of decimal places, else if set to a float will try and round
intelligently by grouping magmoms.
"""
if isinstance(round_magmoms_mode, int):
# simple rounding to number of decimal places
magmoms = np.around(magmoms, decimals=round_magmoms_mode)
elif isinstance(round_magmoms_mode, float):
try:
# get range of possible magmoms, pad by 50% just to be safe
range_m = max([max(magmoms), abs(min(magmoms))]) * 1.5
# construct kde, here "round_magmoms_mode" is the width of the kde
kernel = gaussian_kde(magmoms, bw_method=round_magmoms_mode)
# with a linearly spaced grid 1000x finer than width
xgrid = np.linspace(
-range_m, range_m, 1000 * range_m / round_magmoms_mode
)
# and evaluate the kde on this grid, extracting the maxima of the kde peaks
kernel_m = kernel.evaluate(xgrid)
extrema = xgrid[argrelextrema(kernel_m, comparator=np.greater)]
# round magmoms to these extrema
magmoms = [extrema[(np.abs(extrema - m)).argmin()] for m in magmoms]
except Exception as e:
# TODO: typically a singular matrix warning, investigate this
warnings.warn(
"Failed to round magmoms intelligently, "
"falling back to simple rounding."
)
warnings.warn(e)
# and finally round roughly to the number of significant figures in our kde width
num_decimals = len(str(round_magmoms_mode).split(".")[1]) + 1
magmoms = np.around(magmoms, decimals=num_decimals)
return magmoms | def function[_round_magmoms, parameter[magmoms, round_magmoms_mode]]:
constant[
If round_magmoms_mode is an integer, simply round to that number
of decimal places, else if set to a float will try and round
intelligently by grouping magmoms.
]
if call[name[isinstance], parameter[name[round_magmoms_mode], name[int]]] begin[:]
variable[magmoms] assign[=] call[name[np].around, parameter[name[magmoms]]]
return[name[magmoms]] | keyword[def] identifier[_round_magmoms] ( identifier[magmoms] , identifier[round_magmoms_mode] : identifier[Union] [ identifier[int] , identifier[float] ]):
literal[string]
keyword[if] identifier[isinstance] ( identifier[round_magmoms_mode] , identifier[int] ):
identifier[magmoms] = identifier[np] . identifier[around] ( identifier[magmoms] , identifier[decimals] = identifier[round_magmoms_mode] )
keyword[elif] identifier[isinstance] ( identifier[round_magmoms_mode] , identifier[float] ):
keyword[try] :
identifier[range_m] = identifier[max] ([ identifier[max] ( identifier[magmoms] ), identifier[abs] ( identifier[min] ( identifier[magmoms] ))])* literal[int]
identifier[kernel] = identifier[gaussian_kde] ( identifier[magmoms] , identifier[bw_method] = identifier[round_magmoms_mode] )
identifier[xgrid] = identifier[np] . identifier[linspace] (
- identifier[range_m] , identifier[range_m] , literal[int] * identifier[range_m] / identifier[round_magmoms_mode]
)
identifier[kernel_m] = identifier[kernel] . identifier[evaluate] ( identifier[xgrid] )
identifier[extrema] = identifier[xgrid] [ identifier[argrelextrema] ( identifier[kernel_m] , identifier[comparator] = identifier[np] . identifier[greater] )]
identifier[magmoms] =[ identifier[extrema] [( identifier[np] . identifier[abs] ( identifier[extrema] - identifier[m] )). identifier[argmin] ()] keyword[for] identifier[m] keyword[in] identifier[magmoms] ]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[warnings] . identifier[warn] (
literal[string]
literal[string]
)
identifier[warnings] . identifier[warn] ( identifier[e] )
identifier[num_decimals] = identifier[len] ( identifier[str] ( identifier[round_magmoms_mode] ). identifier[split] ( literal[string] )[ literal[int] ])+ literal[int]
identifier[magmoms] = identifier[np] . identifier[around] ( identifier[magmoms] , identifier[decimals] = identifier[num_decimals] )
keyword[return] identifier[magmoms] | def _round_magmoms(magmoms, round_magmoms_mode: Union[int, float]):
"""
If round_magmoms_mode is an integer, simply round to that number
of decimal places, else if set to a float will try and round
intelligently by grouping magmoms.
"""
if isinstance(round_magmoms_mode, int):
# simple rounding to number of decimal places
magmoms = np.around(magmoms, decimals=round_magmoms_mode) # depends on [control=['if'], data=[]]
elif isinstance(round_magmoms_mode, float):
try:
# get range of possible magmoms, pad by 50% just to be safe
range_m = max([max(magmoms), abs(min(magmoms))]) * 1.5
# construct kde, here "round_magmoms_mode" is the width of the kde
kernel = gaussian_kde(magmoms, bw_method=round_magmoms_mode)
# with a linearly spaced grid 1000x finer than width
xgrid = np.linspace(-range_m, range_m, 1000 * range_m / round_magmoms_mode)
# and evaluate the kde on this grid, extracting the maxima of the kde peaks
kernel_m = kernel.evaluate(xgrid)
extrema = xgrid[argrelextrema(kernel_m, comparator=np.greater)]
# round magmoms to these extrema
magmoms = [extrema[np.abs(extrema - m).argmin()] for m in magmoms] # depends on [control=['try'], data=[]]
except Exception as e:
# TODO: typically a singular matrix warning, investigate this
warnings.warn('Failed to round magmoms intelligently, falling back to simple rounding.')
warnings.warn(e) # depends on [control=['except'], data=['e']]
# and finally round roughly to the number of significant figures in our kde width
num_decimals = len(str(round_magmoms_mode).split('.')[1]) + 1
magmoms = np.around(magmoms, decimals=num_decimals) # depends on [control=['if'], data=[]]
return magmoms |
def area(self, cc_index=None):
"""
Returns the area of the selected polygon if index is provided or of all polygons if it's not.
"""
if cc_index is not None:
return self.areas[cc_index]
return np.sum(self.areas) | def function[area, parameter[self, cc_index]]:
constant[
Returns the area of the selected polygon if index is provided or of all polygons if it's not.
]
if compare[name[cc_index] is_not constant[None]] begin[:]
return[call[name[self].areas][name[cc_index]]]
return[call[name[np].sum, parameter[name[self].areas]]] | keyword[def] identifier[area] ( identifier[self] , identifier[cc_index] = keyword[None] ):
literal[string]
keyword[if] identifier[cc_index] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[areas] [ identifier[cc_index] ]
keyword[return] identifier[np] . identifier[sum] ( identifier[self] . identifier[areas] ) | def area(self, cc_index=None):
"""
Returns the area of the selected polygon if index is provided or of all polygons if it's not.
"""
if cc_index is not None:
return self.areas[cc_index] # depends on [control=['if'], data=['cc_index']]
return np.sum(self.areas) |
def unreduce_like(array, original_array, axis, keepdims):
"""Reverse summing over a dimension.
Args:
array: The array that was reduced.
original_array: An array whose shape to unreduce to.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array.
"""
atype = type(array)
unreducer = unreducers[atype]
shape = shape_functions[atype]
return unreducer(array, shape(original_array), axis, keepdims) | def function[unreduce_like, parameter[array, original_array, axis, keepdims]]:
constant[Reverse summing over a dimension.
Args:
array: The array that was reduced.
original_array: An array whose shape to unreduce to.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array.
]
variable[atype] assign[=] call[name[type], parameter[name[array]]]
variable[unreducer] assign[=] call[name[unreducers]][name[atype]]
variable[shape] assign[=] call[name[shape_functions]][name[atype]]
return[call[name[unreducer], parameter[name[array], call[name[shape], parameter[name[original_array]]], name[axis], name[keepdims]]]] | keyword[def] identifier[unreduce_like] ( identifier[array] , identifier[original_array] , identifier[axis] , identifier[keepdims] ):
literal[string]
identifier[atype] = identifier[type] ( identifier[array] )
identifier[unreducer] = identifier[unreducers] [ identifier[atype] ]
identifier[shape] = identifier[shape_functions] [ identifier[atype] ]
keyword[return] identifier[unreducer] ( identifier[array] , identifier[shape] ( identifier[original_array] ), identifier[axis] , identifier[keepdims] ) | def unreduce_like(array, original_array, axis, keepdims):
"""Reverse summing over a dimension.
Args:
array: The array that was reduced.
original_array: An array whose shape to unreduce to.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
An array with axes broadcast to match the shape of the original array.
"""
atype = type(array)
unreducer = unreducers[atype]
shape = shape_functions[atype]
return unreducer(array, shape(original_array), axis, keepdims) |
def sam_send(sock, line_and_data):
"""Send a line to the SAM controller, but don't read it"""
if isinstance(line_and_data, tuple):
line, data = line_and_data
else:
line, data = line_and_data, b''
line = bytes(line, encoding='ascii') + b' \n'
# print('-->', line, data)
sock.sendall(line + data) | def function[sam_send, parameter[sock, line_and_data]]:
constant[Send a line to the SAM controller, but don't read it]
if call[name[isinstance], parameter[name[line_and_data], name[tuple]]] begin[:]
<ast.Tuple object at 0x7da1b09109a0> assign[=] name[line_and_data]
variable[line] assign[=] binary_operation[call[name[bytes], parameter[name[line]]] + constant[b' \n']]
call[name[sock].sendall, parameter[binary_operation[name[line] + name[data]]]] | keyword[def] identifier[sam_send] ( identifier[sock] , identifier[line_and_data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[line_and_data] , identifier[tuple] ):
identifier[line] , identifier[data] = identifier[line_and_data]
keyword[else] :
identifier[line] , identifier[data] = identifier[line_and_data] , literal[string]
identifier[line] = identifier[bytes] ( identifier[line] , identifier[encoding] = literal[string] )+ literal[string]
identifier[sock] . identifier[sendall] ( identifier[line] + identifier[data] ) | def sam_send(sock, line_and_data):
"""Send a line to the SAM controller, but don't read it"""
if isinstance(line_and_data, tuple):
(line, data) = line_and_data # depends on [control=['if'], data=[]]
else:
(line, data) = (line_and_data, b'')
line = bytes(line, encoding='ascii') + b' \n'
# print('-->', line, data)
sock.sendall(line + data) |
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs | def function[_merge_expressions, parameter[self, other]]:
constant[
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
]
variable[new_inputs] assign[=] call[name[tuple], parameter[call[call[name[set], parameter[name[self].inputs]].union, parameter[name[other].inputs]]]]
variable[new_self_expr] assign[=] call[name[self]._rebind_variables, parameter[name[new_inputs]]]
variable[new_other_expr] assign[=] call[name[other]._rebind_variables, parameter[name[new_inputs]]]
return[tuple[[<ast.Name object at 0x7da1b1e8ed70>, <ast.Name object at 0x7da1b1e8ddb0>, <ast.Name object at 0x7da1b1e8fb80>]]] | keyword[def] identifier[_merge_expressions] ( identifier[self] , identifier[other] ):
literal[string]
identifier[new_inputs] = identifier[tuple] ( identifier[set] ( identifier[self] . identifier[inputs] ). identifier[union] ( identifier[other] . identifier[inputs] ))
identifier[new_self_expr] = identifier[self] . identifier[_rebind_variables] ( identifier[new_inputs] )
identifier[new_other_expr] = identifier[other] . identifier[_rebind_variables] ( identifier[new_inputs] )
keyword[return] identifier[new_self_expr] , identifier[new_other_expr] , identifier[new_inputs] | def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return (new_self_expr, new_other_expr, new_inputs) |
def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if 'MSGINIT' in env:
return env['MSGINIT']
msginit = env.Detect('msginit');
if msginit:
return msginit
raise SCons.Errors.StopError(MsginitNotFound, "Could not detect msginit")
return None | def function[_detect_msginit, parameter[env]]:
constant[ Detects *msginit(1)* program. ]
if compare[constant[MSGINIT] in name[env]] begin[:]
return[call[name[env]][constant[MSGINIT]]]
variable[msginit] assign[=] call[name[env].Detect, parameter[constant[msginit]]]
if name[msginit] begin[:]
return[name[msginit]]
<ast.Raise object at 0x7da18fe91060>
return[constant[None]] | keyword[def] identifier[_detect_msginit] ( identifier[env] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[env] :
keyword[return] identifier[env] [ literal[string] ]
identifier[msginit] = identifier[env] . identifier[Detect] ( literal[string] );
keyword[if] identifier[msginit] :
keyword[return] identifier[msginit]
keyword[raise] identifier[SCons] . identifier[Errors] . identifier[StopError] ( identifier[MsginitNotFound] , literal[string] )
keyword[return] keyword[None] | def _detect_msginit(env):
""" Detects *msginit(1)* program. """
if 'MSGINIT' in env:
return env['MSGINIT'] # depends on [control=['if'], data=['env']]
msginit = env.Detect('msginit')
if msginit:
return msginit # depends on [control=['if'], data=[]]
raise SCons.Errors.StopError(MsginitNotFound, 'Could not detect msginit')
return None |
def create_router(name, ext_network=None,
admin_state_up=True, profile=None):
'''
Creates a new router
CLI Example:
.. code-block:: bash
salt '*' neutron.create_router new-router-name
:param name: Name of router to create (must be first)
:param ext_network: ID or name of the external for the gateway (Optional)
:param admin_state_up: Set admin state up to true or false,
default:true (Optional)
:param profile: Profile to build on (Optional)
:return: Created router information
'''
conn = _auth(profile)
return conn.create_router(name, ext_network, admin_state_up) | def function[create_router, parameter[name, ext_network, admin_state_up, profile]]:
constant[
Creates a new router
CLI Example:
.. code-block:: bash
salt '*' neutron.create_router new-router-name
:param name: Name of router to create (must be first)
:param ext_network: ID or name of the external for the gateway (Optional)
:param admin_state_up: Set admin state up to true or false,
default:true (Optional)
:param profile: Profile to build on (Optional)
:return: Created router information
]
variable[conn] assign[=] call[name[_auth], parameter[name[profile]]]
return[call[name[conn].create_router, parameter[name[name], name[ext_network], name[admin_state_up]]]] | keyword[def] identifier[create_router] ( identifier[name] , identifier[ext_network] = keyword[None] ,
identifier[admin_state_up] = keyword[True] , identifier[profile] = keyword[None] ):
literal[string]
identifier[conn] = identifier[_auth] ( identifier[profile] )
keyword[return] identifier[conn] . identifier[create_router] ( identifier[name] , identifier[ext_network] , identifier[admin_state_up] ) | def create_router(name, ext_network=None, admin_state_up=True, profile=None):
"""
Creates a new router
CLI Example:
.. code-block:: bash
salt '*' neutron.create_router new-router-name
:param name: Name of router to create (must be first)
:param ext_network: ID or name of the external for the gateway (Optional)
:param admin_state_up: Set admin state up to true or false,
default:true (Optional)
:param profile: Profile to build on (Optional)
:return: Created router information
"""
conn = _auth(profile)
return conn.create_router(name, ext_network, admin_state_up) |
def set_int_param(params, name, value, min=None, max=None):
"""
Set a int parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param int value:
The value of the parameter. If ``None``, the field will not be set. If
an instance of a numeric type or a string that can be turned into a
``int``, the relevant field will be set. Any other value will raise a
`ValueError`.
:param int min:
If provided, values less than this will raise ``ValueError``.
:param int max:
If provided, values greater than this will raise ``ValueError``.
:returns: ``None``
"""
if value is None:
return
try:
value = int(str(value))
except:
raise ValueError(
"Parameter '%s' must be an integer (or a string representation of"
" an integer) or None, got %r." % (name, value))
if min is not None and value < min:
raise ValueError(
"Parameter '%s' must not be less than %r, got %r." % (
name, min, value))
if max is not None and value > max:
raise ValueError(
"Parameter '%s' must not be greater than %r, got %r." % (
name, min, value))
params[name] = str(value) | def function[set_int_param, parameter[params, name, value, min, max]]:
constant[
Set a int parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param int value:
The value of the parameter. If ``None``, the field will not be set. If
an instance of a numeric type or a string that can be turned into a
``int``, the relevant field will be set. Any other value will raise a
`ValueError`.
:param int min:
If provided, values less than this will raise ``ValueError``.
:param int max:
If provided, values greater than this will raise ``ValueError``.
:returns: ``None``
]
if compare[name[value] is constant[None]] begin[:]
return[None]
<ast.Try object at 0x7da1b168f790>
if <ast.BoolOp object at 0x7da1b168d720> begin[:]
<ast.Raise object at 0x7da1b168f0a0>
if <ast.BoolOp object at 0x7da1b168e6b0> begin[:]
<ast.Raise object at 0x7da1b168cc10>
call[name[params]][name[name]] assign[=] call[name[str], parameter[name[value]]] | keyword[def] identifier[set_int_param] ( identifier[params] , identifier[name] , identifier[value] , identifier[min] = keyword[None] , identifier[max] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return]
keyword[try] :
identifier[value] = identifier[int] ( identifier[str] ( identifier[value] ))
keyword[except] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] %( identifier[name] , identifier[value] ))
keyword[if] identifier[min] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] < identifier[min] :
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[name] , identifier[min] , identifier[value] ))
keyword[if] identifier[max] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] > identifier[max] :
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[name] , identifier[min] , identifier[value] ))
identifier[params] [ identifier[name] ]= identifier[str] ( identifier[value] ) | def set_int_param(params, name, value, min=None, max=None):
"""
Set a int parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param int value:
The value of the parameter. If ``None``, the field will not be set. If
an instance of a numeric type or a string that can be turned into a
``int``, the relevant field will be set. Any other value will raise a
`ValueError`.
:param int min:
If provided, values less than this will raise ``ValueError``.
:param int max:
If provided, values greater than this will raise ``ValueError``.
:returns: ``None``
"""
if value is None:
return # depends on [control=['if'], data=[]]
try:
value = int(str(value)) # depends on [control=['try'], data=[]]
except:
raise ValueError("Parameter '%s' must be an integer (or a string representation of an integer) or None, got %r." % (name, value)) # depends on [control=['except'], data=[]]
if min is not None and value < min:
raise ValueError("Parameter '%s' must not be less than %r, got %r." % (name, min, value)) # depends on [control=['if'], data=[]]
if max is not None and value > max:
raise ValueError("Parameter '%s' must not be greater than %r, got %r." % (name, min, value)) # depends on [control=['if'], data=[]]
params[name] = str(value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.